pax_global_header00006660000000000000000000000064147571240750014526gustar00rootroot0000000000000052 comment=e3d1ed560a28d114bd26299b0f717b372e7fc3d2 mozilla-sccache-40c3d6b/000077500000000000000000000000001475712407500151735ustar00rootroot00000000000000mozilla-sccache-40c3d6b/.github/000077500000000000000000000000001475712407500165335ustar00rootroot00000000000000mozilla-sccache-40c3d6b/.github/actions/000077500000000000000000000000001475712407500201735ustar00rootroot00000000000000mozilla-sccache-40c3d6b/.github/actions/artifact_failure/000077500000000000000000000000001475712407500234775ustar00rootroot00000000000000mozilla-sccache-40c3d6b/.github/actions/artifact_failure/action.yml000066400000000000000000000016021475712407500254760ustar00rootroot00000000000000name: "Upload failure Artifacts" description: "Upload failure Artifacts" inputs: name: description: "" required: true runs: using: "composite" steps: - name: pack failure artifacts shell: bash run: | echo "Current running processes" ps uax echo "Processes that access current dir" lsof +D `pwd` || true killall sccache || true killall sccache-dist || true # possible temp dirs for either linux or windows cp "${TMP:-${TEMP:-${TMPDIR:-/tmp}}}"/sccache_*.txt . 2>/dev/null || true tar --exclude='target' \ --exclude='docs' \ --exclude='bins' \ --exclude='.git' \ -zcf target/failure-${{ inputs.name }}.tar.gz . - uses: actions/upload-artifact@v3 with: name: ${{ inputs.name }} path: target/failure-${{ inputs.name }}.tar.gz mozilla-sccache-40c3d6b/.github/actions/nvcc-toolchain/000077500000000000000000000000001475712407500231025ustar00rootroot00000000000000mozilla-sccache-40c3d6b/.github/actions/nvcc-toolchain/action.yml000066400000000000000000000006561475712407500251110ustar00rootroot00000000000000name: nvcc-toolchain inputs: cuda-version: description: CUDA Toolkit version required: true runs: using: composite steps: - if: runner.os == 'Linux' shell: bash run: .github/actions/nvcc-toolchain/install-cuda.sh ${{ inputs.cuda-version }} - if: runner.os == 'Windows' shell: powershell run: .\.github\actions\nvcc-toolchain\install-cuda.ps1 -cudaVersion ${{ inputs.cuda-version }} mozilla-sccache-40c3d6b/.github/actions/nvcc-toolchain/install-cuda.ps1000066400000000000000000000037531475712407500261170ustar00rootroot00000000000000Param( [Parameter(Mandatory=$false)] [string] $cudaVersion="12.8.0" ) # Use System.Version to tokenize version $version = [Version]$cudaVersion $major = $version.Major $minor = $version.Minor $build = $version.Build # Minimum build is 0, not -1 as default in case "12.5" is passed if ($build -lt 0) { $build = 0 } # mmb == major minor build $mmbVersionTag = "${major}.${minor}.${build}" # mm = major minor $mmVersionTag = "${major}.${minor}" $cudaVersionUrl = "https://developer.download.nvidia.com/compute/cuda/${mmbVersionTag}/network_installers/cuda_${mmbVersionTag}_windows_network.exe" ### # `cuda_${mmbVersionTag}_windows_network.exe` name only valid back to CUDA v11.5.1. # Before that it was named `cuda_${mmbVersionTag}_win10_network.exe`: # * https://developer.download.nvidia.com/compute/cuda/11.5.1/network_installers/cuda_11.5.1_windows_network.exe # * https://developer.download.nvidia.com/compute/cuda/11.5.0/network_installers/cuda_11.5.0_win10_network.exe ### if ([version]$mmbVersionTag -le "11.5.0") { $cudaVersionUrl = "https://developer.download.nvidia.com/compute/cuda/${mmbVersionTag}/network_installers/cuda_${mmbVersionTag}_win10_network.exe" } $cudaComponents = "nvcc_$mmVersionTag", "curand_$mmVersionTag", "curand_dev_$mmVersionTag", "cudart_$mmVersionTag", "cupti_$mmVersionTag", "nvrtc_$mmVersionTag", "nvrtc_dev_$mmVersionTag", "nvml_dev_$mmVersionTag", "nvtx_$mmVersionTag" Invoke-WebRequest -Uri "$cudaVersionUrl" -OutFile "./cuda_network.exe" -UseBasicParsing Start-Process -Wait -PassThru -FilePath .\cuda_network.exe -ArgumentList "-s $cudaComponents" $ENV:PATH="$ENV:PATH;C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v$mmVersionTag\bin" $ENV:CUDA_PATH="C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v$mmVersionTag" $PATH_STR="PATH=$ENV:PATH" $PATH_STR | Out-File -Append $ENV:GITHUB_ENV $CUDA_PATH_STR="CUDA_PATH=$ENV:CUDA_PATH" $CUDA_PATH_STR | Out-File -Append $ENV:GITHUB_ENV Remove-Item .\cuda_network.exe mozilla-sccache-40c3d6b/.github/actions/nvcc-toolchain/install-cuda.sh000077500000000000000000000031721475712407500260240ustar00rootroot00000000000000#! /usr/bin/env bash set -eu export DEBIAN_FRONTEND=noninteractive get_cuda_deb() { local deb="$( \ wget --no-hsts -q -O- "${1}/Packages" \ | grep -P "^Filename: \./${2}(.*)\.deb$" \ | sort -Vr | head -n1 | cut -d' ' -f2 \ )"; if [ -z "$deb" ]; then echo "Error: No matching .deb found for '${1}' and '${2}'" >&2 return 1 fi wget --no-hsts -q -O "/tmp/${deb#./}" "${1}/${deb#./}"; echo -n "/tmp/${deb#./}"; } VERSION="$1"; NVARCH="$(uname -p)"; if test "$NVARCH" = aarch64; then NVARCH="sbsa"; fi OSNAME="$( . /etc/os-release; major="$(cut -d'.' -f1 <<< "${VERSION_ID}")"; minor="$(cut -d'.' -f2 <<< "${VERSION_ID}")"; echo "$ID$((major - (major % 2)))${minor}"; )"; CUDA_HOME="/usr/local/cuda"; cuda_repo_base="https://developer.download.nvidia.com/compute/cuda/repos"; cuda_repo="${cuda_repo_base}/${OSNAME}/${NVARCH}"; cuda_ver="$VERSION"; cuda_ver="$(grep -Po '^[0-9]+\.[0-9]+' <<< "${cuda_ver}")"; cuda_ver="${cuda_ver/./-}"; if ! dpkg -s cuda-keyring; then sudo apt-get install -y --no-install-recommends \ "$(get_cuda_deb "${cuda_repo}" cuda-keyring)" \ ; fi PKGS=(); PKGS+=("cuda-toolkit-${cuda_ver}"); sudo apt-get update; sudo apt-get install -y --no-install-recommends "${PKGS[@]}"; if ! test -L "${CUDA_HOME}"; then # Create /usr/local/cuda symlink sudo ln -s "${CUDA_HOME}-${cuda_ver}" "${CUDA_HOME}"; fi export PATH="$PATH:$CUDA_HOME/bin" which -a nvcc nvcc --version cat </dev/null 2>&1; then sudo apt remove -y gcc-14 g++-14 sudo apt autoremove -y fi # Ubuntu20.04's clang-10 is too old for CTK 11+, so install clang-12 instead if test "${{ matrix.os }}" = "ubuntu-20.04" && test -n "${{ matrix.cuda }}"; then sudo apt install -y --no-install-recommends gcc clang-12 sudo ln -sf $(which clang-12) /usr/bin/clang sudo ln -sf $(which clang++-12) /usr/bin/clang++ else sudo apt install -y --no-install-recommends gcc clang fi echo 'gcc version:' gcc --version echo 'clang version:' clang --version - if: matrix.cuda != '' && contains(fromJSON('["Linux", "Windows"]'), runner.os) name: Install nvcc uses: ./.github/actions/nvcc-toolchain with: cuda-version: ${{ matrix.cuda }} - name: Build tests run: cargo test --no-run --locked --all-targets ${{ matrix.extra_args }} - name: Run tests run: cargo test --locked --all-targets ${{ matrix.extra_args }} - name: Upload failure if: failure() uses: ./.github/actions/artifact_failure with: name: test-${{ matrix.os }}-${{ matrix.rustc || 'stable' }}-${{ matrix.extra_desc }} build: name: build ${{ matrix.binary || 'sccache' }} ${{ matrix.target }} runs-on: ${{ matrix.os }} container: ${{ fromJson(matrix.container || '{"image":null}') }} timeout-minutes: 30 strategy: fail-fast: false matrix: include: - os: ubuntu-20.04 target: x86_64-unknown-linux-musl container: '{"image": "messense/rust-musl-cross:x86_64-musl"}' - os: ubuntu-20.04 binary: sccache-dist extra_args: --no-default-features --features="dist-server" target: x86_64-unknown-linux-musl container: '{"image": "messense/rust-musl-cross:x86_64-musl"}' - os: ubuntu-20.04 target: aarch64-unknown-linux-musl container: '{"image": "messense/rust-musl-cross:aarch64-musl"}' - os: ubuntu-20.04 target: armv7-unknown-linux-musleabi container: '{"image": "messense/rust-musl-cross:armv7-musleabi"}' - os: ubuntu-20.04 target: i686-unknown-linux-musl container: '{"image": "messense/rust-musl-cross:i686-musl"}' - os: macos-13 target: x86_64-apple-darwin macosx_deployment_target: 10.13 - os: macos-14 target: aarch64-apple-darwin macosx_deployment_target: 11.0 - os: windows-2019 target: x86_64-pc-windows-msvc rustflags: -Ctarget-feature=+crt-static - os: windows-2019 target: aarch64-pc-windows-msvc rustflags: -Ctarget-feature=+crt-static steps: - name: Clone repository uses: actions/checkout@v4 - name: Install rust uses: ./.github/actions/rust-toolchain with: toolchain: ${{ matrix.target == 'aarch64-apple-darwin' && 'beta' || 'stable' }} target: ${{ matrix.target }} if: ${{ !matrix.container }} - name: Build run: cargo build --locked --release --bin ${{ matrix.binary || 'sccache' }} --target ${{ matrix.target }} --features=openssl/vendored ${{ matrix.extra_args }} env: MACOSX_DEPLOYMENT_TARGET: ${{ matrix.macosx_deployment_target }} DEVELOPER_DIR: ${{ matrix.developer_dir }} SDKROOT: ${{ matrix.sdkroot }} RUSTFLAGS: ${{ matrix.rustflags }} # Workaround for the lack of substring() function in github actions expressions. - name: Id id: id shell: bash run: echo "id=${ID#refs/tags/}" >> $GITHUB_OUTPUT env: ID: ${{ startsWith(github.ref, 'refs/tags/') && github.ref || github.sha }} - name: Upload artifacts uses: actions/upload-artifact@v4 with: name: ${{ matrix.binary || 'sccache' }}-${{ steps.id.outputs.id }}-${{ matrix.target }} path: target/${{ matrix.target }}/release/${{ matrix.binary || 'sccache' }}${{ endsWith(matrix.target, '-msvc') && '.exe' || '' }} if-no-files-found: error coverage: name: coverage ${{ matrix.os }} rust ${{ matrix.rustc || 'stable' }} ${{ matrix.extra_desc }} runs-on: ${{ matrix.os }} continue-on-error: ${{ matrix.allow_failure || false }} timeout-minutes: 30 strategy: fail-fast: false matrix: include: - os: ubuntu-22.04 cuda: "11.8" rustc: nightly allow_failure: true extra_args: --features=unstable - os: macos-13 rustc: nightly # Disable on Windows for now as it fails with: # found invalid metadata files for crate `vte_generate_state_changes` # - os: windows-2019 # rustc: nightly env: RUST_BACKTRACE: 1 steps: - name: Clone repository uses: actions/checkout@v4 - name: Install rust uses: ./.github/actions/rust-toolchain with: toolchain: ${{ matrix.rustc }} - name: Install gcc & clang for tests run: sudo apt-get install -y clang gcc if: ${{ matrix.os == 'ubuntu-20.04' }} - if: matrix.cuda != '' && contains(fromJSON('["Linux", "Windows"]'), runner.os) name: Install nvcc uses: ./.github/actions/nvcc-toolchain with: cuda-version: ${{ matrix.cuda }} - name: "`grcov` ~ install" run: cargo install grcov - name: Execute tests run: cargo test --no-fail-fast --locked --all-targets ${{ matrix.extra_args }} env: CARGO_INCREMENTAL: "0" RUSTC_WRAPPER: "" RUSTFLAGS: "-Ccodegen-units=1 -Copt-level=0 -Clink-dead-code -Coverflow-checks=off -Cprofile-generate=target/debug" - name: Generate coverage data (via `grcov`) id: coverage shell: bash run: | ## Generate coverage data COVERAGE_REPORT_DIR="target/debug" COVERAGE_REPORT_FILE="${COVERAGE_REPORT_DIR}/lcov.info" # GRCOV_IGNORE_OPTION='--ignore build.rs --ignore "/*" --ignore "[a-zA-Z]:/*"' ## `grcov` ignores these params when passed as an environment variable (why?) # GRCOV_EXCLUDE_OPTION='--excl-br-line "^\s*((debug_)?assert(_eq|_ne)?!|#\[derive\()"' ## `grcov` ignores these params when passed as an environment variable (why?) mkdir -p "${COVERAGE_REPORT_DIR}" # display coverage files grcov . --output-type files --ignore build.rs --ignore "/*" --ignore "[a-zA-Z]:/*" --excl-br-line "^\s*((debug_)?assert(_eq|_ne)?!|#\[derive\()" | sort --unique # generate coverage report grcov . --output-type lcov --output-path "${COVERAGE_REPORT_FILE}" --branch --ignore build.rs --ignore "/*" --ignore "[a-zA-Z]:/*" --excl-br-line "^\s*((debug_)?assert(_eq|_ne)?!|#\[derive\()" echo "report=${COVERAGE_REPORT_FILE}" >> $GITHUB_OUTPUT - name: Upload coverage results (to Codecov.io) uses: codecov/codecov-action@v5 with: file: ${{ steps.coverage.outputs.report }} ## flags: IntegrationTests, UnitTests, ${{ steps.vars.outputs.CODECOV_FLAGS }} flags: ${{ steps.vars.outputs.CODECOV_FLAGS }} name: codecov-umbrella fail_ci_if_error: false test_freebsd: name: test freebsd-14.1 rust stable runs-on: ${{ matrix.job.os }} timeout-minutes: 70 strategy: fail-fast: false matrix: job: - { os: ubuntu-22.04 } steps: - uses: actions/checkout@v4 - name: Prepare, build and test uses: vmactions/freebsd-vm@v1 with: mem: 8192 usesh: true sync: rsync copyback: false prepare: pkg install -y ca_root_nss curl gmake gtar pot sudo run: | ##################################################################################### ### Prepare, build, and test ##################################################################################### ### based on ref: ### and on ref: ### * NOTE: All steps need to be run in this block, otherwise, we are operating back ### on the mac host. set -exo pipefail # ### Basic user setup ################################################################ TEST_USER=tester TEST_USER_HOME="/opt/$TEST_USER" REPO_NAME=${GITHUB_WORKSPACE##*/} WORKSPACE_PARENT="/home/runner/work/${REPO_NAME}" WORKSPACE="${WORKSPACE_PARENT}/${REPO_NAME}" export WORKSPACE # mkdir -p "$TEST_USER_HOME" pw adduser -n "$TEST_USER" -d "$TEST_USER_HOME" -c "Tester" -h - chown -R "$TEST_USER":"$TEST_USER" "$TEST_USER_HOME" chown -R "$TEST_USER":"$TEST_USER" "/$WORKSPACE_PARENT"/ cat > /usr/local/etc/sudoers.d/wheel< "$d.tar.gz.sha256" if [[ $d =~ (sccache-)(.*)?(x86_64-pc-windows)(.*)? ]]; then zip -r "$d.zip" "$d" echo -n "$(shasum -ba 256 "$d.zip" | cut -d " " -f 1)" > "$d.zip.sha256" fi done - name: Create release run: | sudo apt-get update && sudo apt-get install -y hub tag_name=${GITHUB_REF#refs/tags/} for f in sccache-*.tar.gz* sccache-*.zip*; do if [[ -f "$f" ]]; then files="$files -a $f"; fi done hub release create -m $tag_name $tag_name $files env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} mozilla-sccache-40c3d6b/.github/workflows/close-snap.yml000066400000000000000000000005641475712407500233640ustar00rootroot00000000000000name: Close Snaps on: pull_request: types: [closed] jobs: close: runs-on: ubuntu-latest timeout-minutes: 5 steps: - name: Close obsolete channels uses: canonical/actions/close-snap@release continue-on-error: true with: channel: edge/pr${{ github.event.number }} snapcraft-token: ${{ secrets.SNAPCRAFT_TOKEN }} mozilla-sccache-40c3d6b/.github/workflows/integration-tests.yml000066400000000000000000000605421475712407500250050ustar00rootroot00000000000000name: integration-tests on: [ push, pull_request ] env: RUST_BACKTRACE: full RUST_LOG: debug SCCACHE_PATH: /home/runner/.cargo/bin/sccache jobs: build: runs-on: ubuntu-24.04 steps: - name: Clone repository uses: actions/checkout@v4 - name: Install rust uses: ./.github/actions/rust-toolchain with: toolchain: "stable" - name: Build run: | cargo build --all-features - uses: actions/upload-artifact@v4 with: name: integration-tests path: ./target/debug/sccache redis-deprecated: runs-on: ubuntu-24.04 needs: build services: redis: image: redis ports: - 6379:6379 env: SCCACHE_REDIS: redis://127.0.0.1 RUSTC_WRAPPER: /home/runner/.cargo/bin/sccache steps: - name: Clone repository uses: actions/checkout@v4 - name: Install rust uses: ./.github/actions/rust-toolchain with: toolchain: "stable" - uses: actions/download-artifact@v4 with: name: integration-tests path: /home/runner/.cargo/bin/ - name: Chmod for binary run: chmod +x ${SCCACHE_PATH} - name: Test run: cargo clean && cargo build - name: Output run: | ${SCCACHE_PATH} --show-stats ${SCCACHE_PATH} --show-stats | grep redis - name: Test Twice for Cache Read run: cargo clean && cargo build - name: Output run: | ${SCCACHE_PATH} --show-stats ${SCCACHE_PATH} --show-stats | grep -e "Cache hits\s*[1-9]" redis: runs-on: ubuntu-24.04 needs: build services: redis: image: redis ports: - 6379:6379 env: SCCACHE_REDIS_ENDPOINT: tcp://127.0.0.1 RUSTC_WRAPPER: /home/runner/.cargo/bin/sccache steps: - name: Clone repository uses: actions/checkout@v4 - name: Install rust uses: ./.github/actions/rust-toolchain with: toolchain: "stable" - uses: actions/download-artifact@v4 with: name: integration-tests path: /home/runner/.cargo/bin/ - name: Chmod for binary run: chmod +x ${SCCACHE_PATH} - name: Test run: cargo clean && cargo build - name: Output run: | ${SCCACHE_PATH} --show-stats ${SCCACHE_PATH} --show-stats | grep redis - name: Test Twice for Cache Read run: cargo clean && cargo build - name: Output run: | ${SCCACHE_PATH} --show-stats ${SCCACHE_PATH} --show-stats | grep -e "Cache hits\s*[1-9]" s3_minio: runs-on: ubuntu-24.04 needs: build # Setup minio server services: minio: image: wktk/minio-server ports: - 9000:9000 env: MINIO_ACCESS_KEY: "minioadmin" MINIO_SECRET_KEY: "minioadmin" env: SCCACHE_BUCKET: test SCCACHE_ENDPOINT: http://127.0.0.1:9000/ SCCACHE_REGION: us-east-1 AWS_ACCESS_KEY_ID: "minioadmin" AWS_SECRET_ACCESS_KEY: "minioadmin" AWS_EC2_METADATA_DISABLED: "true" RUSTC_WRAPPER: /home/runner/.cargo/bin/sccache steps: - name: Clone repository uses: actions/checkout@v4 - name: Setup test bucket run: aws --endpoint-url http://127.0.0.1:9000/ s3 mb s3://test - name: Install rust uses: ./.github/actions/rust-toolchain with: toolchain: "stable" - uses: actions/download-artifact@v4 with: name: integration-tests path: /home/runner/.cargo/bin/ - name: Chmod for binary run: chmod +x ${SCCACHE_PATH} - name: Test run: cargo clean && cargo build - name: Output run: | ${SCCACHE_PATH} --show-stats ${SCCACHE_PATH} --show-stats | grep s3 - name: Test Twice for Cache Read run: cargo clean && cargo build - name: Output run: | ${SCCACHE_PATH} --show-stats ${SCCACHE_PATH} --show-stats | grep -e "Cache hits\s*[1-9]" azblob_azurite: runs-on: ubuntu-24.04 needs: build # Setup azurite server services: azurite: image: mcr.microsoft.com/azure-storage/azurite ports: - 10000:10000 env: SCCACHE_AZURE_BLOB_CONTAINER: "test" SCCACHE_AZURE_CONNECTION_STRING: "DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://127.0.0.1:10000/devstoreaccount1;" RUSTC_WRAPPER: /home/runner/.cargo/bin/sccache steps: - name: Clone repository uses: actions/checkout@v4 - name: Setup test bucket run: | az storage container create \ --name test \ --connection-string ${SCCACHE_AZURE_CONNECTION_STRING} - name: Install rust uses: ./.github/actions/rust-toolchain with: toolchain: "stable" - uses: actions/download-artifact@v4 with: name: integration-tests path: /home/runner/.cargo/bin/ - name: Chmod for binary run: chmod +x ${SCCACHE_PATH} - name: Test run: cargo clean && cargo build - name: Output run: | ${SCCACHE_PATH} --show-stats ${SCCACHE_PATH} --show-stats | grep azblob - name: Test Twice for Cache Read run: cargo clean && cargo build - name: Output run: | ${SCCACHE_PATH} --show-stats ${SCCACHE_PATH} --show-stats | grep -e "Cache hits\s*[1-9]" gha: runs-on: ubuntu-24.04 needs: build env: SCCACHE_GHA_ENABLED: "on" RUSTC_WRAPPER: /home/runner/.cargo/bin/sccache steps: - name: Clone repository uses: actions/checkout@v4 - name: Configure Cache Env uses: actions/github-script@v7 with: script: | core.exportVariable('ACTIONS_CACHE_URL', process.env.ACTIONS_CACHE_URL || ''); core.exportVariable('ACTIONS_RUNTIME_TOKEN', process.env.ACTIONS_RUNTIME_TOKEN || ''); - name: Install rust uses: ./.github/actions/rust-toolchain with: toolchain: "stable" - uses: actions/download-artifact@v4 with: name: integration-tests path: /home/runner/.cargo/bin/ - name: Chmod for binary run: chmod +x ${SCCACHE_PATH} - name: Test run: cargo clean && cargo build - name: Output run: | ${SCCACHE_PATH} --show-stats ${SCCACHE_PATH} --show-stats | grep gha - name: Test Twice for Cache Read run: cargo clean && cargo build - name: Output run: | ${SCCACHE_PATH} --show-stats ${SCCACHE_PATH} --show-stats | grep -e "Cache hits\s*[1-9]" memcached-deprecated: runs-on: ubuntu-24.04 needs: build # Setup memcached server services: memcached: image: bitnami/memcached env: # memcache's max item size is 1MiB, But our tests # will produce larger file. # # Specify the setting here to make our test happy. MEMCACHED_MAX_ITEM_SIZE: 16777216 ports: - 11211:11211 env: SCCACHE_MEMCACHED: "tcp://127.0.0.1:11211" RUSTC_WRAPPER: /home/runner/.cargo/bin/sccache steps: - name: Clone repository uses: actions/checkout@v4 - name: Install rust uses: ./.github/actions/rust-toolchain with: toolchain: "stable" - uses: actions/download-artifact@v4 with: name: integration-tests path: /home/runner/.cargo/bin/ - name: Chmod for binary run: chmod +x ${SCCACHE_PATH} - name: Test run: cargo clean && cargo build - name: Output run: | ${SCCACHE_PATH} --show-stats ${SCCACHE_PATH} --show-stats | grep memcached - name: Test Twice for Cache Read run: cargo clean && cargo build - name: Output run: | ${SCCACHE_PATH} --show-stats ${SCCACHE_PATH} --show-stats | grep -e "Cache hits\s*[1-9]" memcached: runs-on: ubuntu-24.04 needs: build # Setup memcached server services: memcached: image: bitnami/memcached env: # memcache's max item size is 1MiB, But our tests # will produce larger file. # # Specify the setting here to make our test happy. MEMCACHED_MAX_ITEM_SIZE: 16777216 ports: - 11211:11211 env: SCCACHE_MEMCACHED_ENDPOINT: "tcp://127.0.0.1:11211" RUSTC_WRAPPER: /home/runner/.cargo/bin/sccache steps: - name: Clone repository uses: actions/checkout@v4 - name: Install rust uses: ./.github/actions/rust-toolchain with: toolchain: "stable" - uses: actions/download-artifact@v4 with: name: integration-tests path: /home/runner/.cargo/bin/ - name: Chmod for binary run: chmod +x ${SCCACHE_PATH} - name: Test run: cargo clean && cargo build - name: Output run: | ${SCCACHE_PATH} --show-stats ${SCCACHE_PATH} --show-stats | grep memcached - name: Test Twice for Cache Read run: cargo clean && cargo build - name: Output run: | ${SCCACHE_PATH} --show-stats ${SCCACHE_PATH} --show-stats | grep -e "Cache hits\s*[1-9]" webdav: runs-on: ubuntu-24.04 needs: build env: SCCACHE_WEBDAV_ENDPOINT: "http://127.0.0.1:8080" SCCACHE_WEBDAV_USERNAME: "bar" SCCACHE_WEBDAV_PASSWORD: "baz" RUSTC_WRAPPER: /home/runner/.cargo/bin/sccache steps: - uses: actions/checkout@v4 - name: Start nginx shell: bash run: | sudo apt install -y nginx-full mkdir /tmp/static cp `pwd`/tests/htpasswd /tmp/htpasswd nginx -c `pwd`/tests/nginx_http_cache.conf - name: Install rust uses: ./.github/actions/rust-toolchain with: toolchain: "stable" - uses: actions/download-artifact@v4 with: name: integration-tests path: /home/runner/.cargo/bin/ - name: Chmod for binary run: chmod +x ${SCCACHE_PATH} - name: Test run: cargo clean && cargo build - name: Output run: | ${SCCACHE_PATH} --show-stats ${SCCACHE_PATH} --show-stats | grep webdav - name: Test Twice for Cache Read run: cargo clean && cargo build - name: Output run: | ${SCCACHE_PATH} --show-stats ${SCCACHE_PATH} --show-stats | grep -e "Cache hits\s*[1-9]" test-mock-msvc: runs-on: windows-2019 env: TARGET: x86_64-pc-windows-msvc SCCACHE_EXE: ${{ github.workspace }}\\target\\x86_64-pc-windows-msvc\\debug\\sccache.exe SCCACHE_LOG: "debug" SCCACHE_ERROR_LOG: "${{ github.workspace }}\\server_log.txt" steps: - uses: ilammy/msvc-dev-cmd@v1 - name: Clone repository uses: actions/checkout@v4 - name: Install rust uses: ./.github/actions/rust-toolchain with: toolchain: "stable" target: $TARGET - name: Build run: cargo build --bin sccache --target $env:TARGET --features=vendored-openssl - name: Compile MSVC (no cache) shell: bash working-directory: ./tests/msvc run: | cl "@args.rsp" test -e ./foo.o || { echo "No compiler output found"; exit -1; } - name: Start Server shell: bash run: $SCCACHE_EXE --start-server - name: Compile - Cache Miss shell: bash working-directory: ./tests/msvc run: | rm ./foo.o || true $SCCACHE_EXE "$(where cl.exe)" -c "@args.rsp" $SCCACHE_EXE --show-stats $SCCACHE_EXE --show-stats | grep -e "Cache misses\s*[1-9]" test -e ./foo.o || { echo "No compiler output found"; exit -1; } test -e ./foo.o.json || { echo "No dependency list found"; exit -1; } - name: Compile - Cache Hit shell: bash working-directory: ./tests/msvc run: | rm ./foo.o || true $SCCACHE_EXE "$(where cl.exe)" -c "@args.rsp" $SCCACHE_EXE --show-stats $SCCACHE_EXE --show-stats | grep -e "Cache hits\s*[1-9]" test -e ./foo.o || { echo "No compiler output found"; exit -1; } test -e ./foo.o.json || { echo "No dependency list found"; exit -1; } - name: Compile - Preprocessing Compiler Bug shell: bash working-directory: ./tests/msvc-preprocessing run: | $SCCACHE_EXE "$(where cl.exe)" -c "@args.rsp" $SCCACHE_EXE --show-stats - name: Stop Server if: success() || failure() shell: bash run: $SCCACHE_EXE --stop-server - name: Show Server Log if: success() || failure() shell: bash run: cat "$SCCACHE_ERROR_LOG" clang: runs-on: ubuntu-24.04 needs: build env: LLVM_VERSION: "19" SCCACHE_GHA_ENABLED: "on" SCCACHE_SERVER_UDS: "\\x00sccache.socket" steps: - uses: actions/checkout@v4 - name: Configure Cache Env uses: actions/github-script@v7 with: script: | core.exportVariable('ACTIONS_CACHE_URL', process.env.ACTIONS_CACHE_URL || ''); core.exportVariable('ACTIONS_RUNTIME_TOKEN', process.env.ACTIONS_RUNTIME_TOKEN || '') - name: Install clang run: | wget https://apt.llvm.org/llvm.sh chmod +x llvm.sh sudo ./llvm.sh "${LLVM_VERSION}" - uses: actions/download-artifact@v4 with: name: integration-tests path: /home/runner/.cargo/bin/ - name: Chmod for binary run: chmod +x ${SCCACHE_PATH} - name: Test run: | export CXX="${SCCACHE_PATH} clang++" $CXX -c `pwd`/tests/test_clang_multicall.c - name: Output run: | ${SCCACHE_PATH} --show-stats - name: Test Twice for Cache Read run: | export CXX="${SCCACHE_PATH} clang++" $CXX -c `pwd`/tests/test_clang_multicall.c - name: Output run: | ${SCCACHE_PATH} --show-stats ${SCCACHE_PATH} --show-stats | grep -e "Cache hits\s*[1-9]" hip: # Probably wouldn't matter anyway since we run in a container, but staying # close to the version is better than not. runs-on: ubuntu-24.04 needs: build container: image: rocm/dev-ubuntu-24.04:6.3 env: # SCCACHE_GHA_ENABLED: "on" ROCM_PATH: "/opt/rocm" steps: - uses: actions/checkout@v4 # I don't want to break the cache during testing. Will turn on after I # make sure it's working. # # - name: Configure Cache Env # uses: actions/github-script@v7 # with: # script: | # core.exportVariable('ACTIONS_CACHE_URL', process.env.ACTIONS_CACHE_URL || ''); # core.exportVariable('ACTIONS_RUNTIME_TOKEN', process.env.ACTIONS_RUNTIME_TOKEN || '') # - name: Configure ROCm Env # uses: actions/github-script@v7 # with: # script: | # core.exportVariable('ROCM_PATH', process.env.ROCM_PATH || ''); - uses: actions/download-artifact@v4 with: name: integration-tests path: /home/runner/.cargo/bin/ - name: Chmod for binary run: chmod +x ${SCCACHE_PATH} - name: Install dependencies shell: bash run: | ## Install dependencies sudo apt-get update sudo apt-get install -y cmake # Ensure that HIPCC isn't already borken - name: Sanity Check run: | hipcc -o vectoradd_hip --offload-arch=gfx900 tests/cmake-hip/vectoradd_hip.cpp - name: Test run: | cmake -B build -S tests/cmake-hip -DCMAKE_HIP_COMPILER_LAUNCHER=${SCCACHE_PATH} -DCMAKE_HIP_ARCHITECTURES=gfx900 cmake --build build - name: Output run: | ${SCCACHE_PATH} --show-stats - name: Test Twice for Cache Read run: | rm -rf build cmake -B build -S tests/cmake-hip -DCMAKE_HIP_COMPILER_LAUNCHER=${SCCACHE_PATH} -DCMAKE_HIP_ARCHITECTURES=gfx900 cmake --build build - name: Output run: | ${SCCACHE_PATH} --show-stats ${SCCACHE_PATH} --show-stats | grep -e "Cache hits\s*[1-9]" gcc: runs-on: ubuntu-24.04 needs: build env: SCCACHE_GHA_ENABLED: "on" SCCACHE_SERVER_UDS: "/home/runner/sccache.socket" steps: - uses: actions/checkout@v4 - name: Configure Cache Env uses: actions/github-script@v7 with: script: | core.exportVariable('ACTIONS_CACHE_URL', process.env.ACTIONS_CACHE_URL || ''); core.exportVariable('ACTIONS_RUNTIME_TOKEN', process.env.ACTIONS_RUNTIME_TOKEN || '') - uses: actions/download-artifact@v4 with: name: integration-tests path: /home/runner/.cargo/bin/ - name: Chmod for binary run: chmod +x ${SCCACHE_PATH} - name: Test run: | export CXX="${SCCACHE_PATH} g++" $CXX -c `pwd`/tests/test_clang_multicall.c - name: Output run: | ${SCCACHE_PATH} --show-stats - name: Test Twice for Cache Read run: | export CXX="${SCCACHE_PATH} g++" $CXX -c `pwd`/tests/test_clang_multicall.c - name: Output run: | ${SCCACHE_PATH} --show-stats ${SCCACHE_PATH} --show-stats | grep -e "Cache hits\s*[1-9]" autotools: runs-on: ubuntu-24.04 needs: build env: SCCACHE_GHA_ENABLED: "on" steps: - uses: actions/checkout@v4 - name: Configure Cache Env uses: actions/github-script@v7 with: script: | core.exportVariable('ACTIONS_CACHE_URL', process.env.ACTIONS_CACHE_URL || ''); core.exportVariable('ACTIONS_RUNTIME_TOKEN', process.env.ACTIONS_RUNTIME_TOKEN || '') - uses: actions/download-artifact@v4 with: name: integration-tests path: /home/runner/.cargo/bin/ - name: Chmod for binary run: chmod +x ${SCCACHE_PATH} - name: Install dependencies shell: bash run: | ## Install dependencies sudo apt-get update sudo apt-get install autoconf automake libtool - name: Test run: | cd `pwd`/tests/autotools/ autoreconf||true automake --add-missing ./configure CXX="${SCCACHE_PATH} g++" make - name: Output run: | ${SCCACHE_PATH} --show-stats - name: Test Twice for Cache Read run: | cd `pwd`/tests/autotools/ make distclean ./configure CXX="${SCCACHE_PATH} g++" make - name: Output run: | ${SCCACHE_PATH} --show-stats ${SCCACHE_PATH} --show-stats | grep -e "Cache hits\s*[1-9]" cmake: runs-on: ubuntu-24.04 needs: build env: SCCACHE_GHA_ENABLED: "on" steps: - uses: actions/checkout@v4 - name: Configure Cache Env uses: actions/github-script@v7 with: script: | core.exportVariable('ACTIONS_CACHE_URL', process.env.ACTIONS_CACHE_URL || ''); core.exportVariable('ACTIONS_RUNTIME_TOKEN', process.env.ACTIONS_RUNTIME_TOKEN || '') - uses: actions/download-artifact@v4 with: name: integration-tests path: /home/runner/.cargo/bin/ - name: Chmod for binary run: chmod +x ${SCCACHE_PATH} - name: Install dependencies shell: bash run: | ## Install dependencies sudo apt-get update sudo apt-get install cmake - name: Test run: | cd `pwd`/tests/cmake/ mkdir build cd build cmake -DCMAKE_C_COMPILER_LAUNCHER=${SCCACHE_PATH} -DCMAKE_CXX_COMPILER_LAUNCHER=${SCCACHE_PATH} .. make - name: Output run: | ${SCCACHE_PATH} --show-stats - name: Test Twice for Cache Read run: | cd `pwd`/tests/cmake/ rm -rf build mkdir build cd build cmake -DCMAKE_C_COMPILER_LAUNCHER=${SCCACHE_PATH} -DCMAKE_CXX_COMPILER_LAUNCHER=${SCCACHE_PATH} .. make - name: Output run: | ${SCCACHE_PATH} --show-stats ${SCCACHE_PATH} --show-stats | grep -e "Cache hits\s*[1-9]" # The test cargo "cargo build -Zprofile" rust-test-Z-profile: runs-on: ubuntu-24.04 needs: build env: RUSTC_WRAPPER: /home/runner/.cargo/bin/sccache CARGO_INCREMENTAL: "0" RUSTFLAGS: "-Ccodegen-units=1 -Copt-level=0 -Clink-dead-code -Coverflow-checks=off -Zpanic_abort_tests -Cpanic=abort -Zprofile" RUSTDOCFLAGS: "-Cpanic=abort" # The last nightly rust that still support "-Zprofile" # # See https://github.com/rust-lang/rust/pull/131829 RUST_TEST_TOOLCHAIN: nightly-2024-11-01 steps: - name: Clone repository uses: actions/checkout@v4 - name: Install rust uses: ./.github/actions/rust-toolchain with: toolchain: ${{ env.RUST_TEST_TOOLCHAIN }} - uses: actions/download-artifact@v4 with: name: integration-tests path: /home/runner/.cargo/bin/ - name: Chmod for binary run: chmod +x ${SCCACHE_PATH} - name: "Coverage test #1" run: cargo +${{ env.RUST_TEST_TOOLCHAIN }} clean && cargo +${{ env.RUST_TEST_TOOLCHAIN }} build - name: Output run: | ${SCCACHE_PATH} --show-stats - name: "Coverage test #2" run: cargo +${{ env.RUST_TEST_TOOLCHAIN }} clean && cargo +${{ env.RUST_TEST_TOOLCHAIN }} build - name: Output run: | ${SCCACHE_PATH} --show-stats ${SCCACHE_PATH} --show-stats | grep -e "Cache hits\s*[1-9]" zstd-compression-level: runs-on: ubuntu-24.04 needs: build env: RUSTC_WRAPPER: /home/runner/.cargo/bin/sccache SCCACHE_DIR: CARGO_INCREMENTAL: "0" steps: - uses: actions/download-artifact@v4 with: name: integration-tests path: /home/runner/.cargo/bin/ - name: Chmod for binary run: chmod +x ${SCCACHE_PATH} - name: Clone repository uses: actions/checkout@v4 - name: Install rust uses: ./.github/actions/rust-toolchain with: toolchain: "stable" - name: default-test-save run: | export SCCACHE_DIR=${PWD}/temp-test/zstd-level/default cargo build - name: default-stats-save run: ${SCCACHE_PATH} --show-stats - name: default-test-use run: | cargo clean && cargo build - name: default-stats-use run: ${SCCACHE_PATH} --show-stats - name: lv10-test-save run: | export SCCACHE_DIR=${PWD}/temp-test/zstd-level/10 export SCCACHE_CACHE_ZSTD_LEVEL=10 ${SCCACHE_PATH} --stop-server > /dev/null cargo clean cargo build - name: lv10-stats-save run: ${SCCACHE_PATH} --show-stats - name: lv10-test-use run: | cargo clean && cargo build - name: lv10-stats-use run: ${SCCACHE_PATH} --show-stats xcode: runs-on: macos-latest env: SCCACHE_PATH: target/debug/sccache steps: - name: Clone repository uses: actions/checkout@v4 - name: Install rust uses: ./.github/actions/rust-toolchain with: toolchain: "stable" - name: Build sccache run: | cargo build - name: Start server run: ${SCCACHE_PATH} --start-server - name: Test compile xcode working-directory: tests/xcode run: | xcodebuild -version xcodebuild -xcconfig sccache.xcconfig - name: Output run: | ${SCCACHE_PATH} --show-stats - name: Test compile xcode cached working-directory: tests/xcode run: | xcodebuild clean xcodebuild -xcconfig sccache.xcconfig - name: Output run: | ${SCCACHE_PATH} --show-stats ${SCCACHE_PATH} --show-stats | grep -e "Cache hits\s*[1-9]" mozilla-sccache-40c3d6b/.github/workflows/snap.yml000066400000000000000000000013151475712407500222540ustar00rootroot00000000000000name: Snap on: pull_request: types: [opened, synchronize, reopened, ready_for_review] jobs: Snap: runs-on: ubuntu-latest timeout-minutes: 45 steps: - name: Check out code uses: actions/checkout@v4 with: fetch-depth: 0 # needed for version determination - name: Build and publish the snap uses: canonical/actions/build-snap@release with: snapcraft-channel: edge review-opts: --allow-classic snapcraft-token: ${{ secrets.SNAPCRAFT_TOKEN }} publish: ${{ github.event_name == 'pull_request' && github.repository == github.event.pull_request.head.repo.full_name }} publish-channel: edge/pr${{ github.event.number }} mozilla-sccache-40c3d6b/.gitignore000066400000000000000000000003441475712407500171640ustar00rootroot00000000000000target *~ *.log .cargo *.pyc # snapcraft artifacts /parts /prime /snap/.snapcraft /stage /*.snap # Ignore lockfiles of test crates used in the integration tests tests/**/Cargo.lock # VSCode Workspace files **.code-workspace mozilla-sccache-40c3d6b/.pre-commit-config.yaml000066400000000000000000000010561475712407500214560ustar00rootroot00000000000000repos: - repo: local hooks: - id: rust-linting name: Rust linting description: Run cargo fmt on files included in the commit. entry: cargo +nightly fmt -- pass_filenames: true types: [file, rust] language: system - id: rust-clippy name: Rust clippy description: Run cargo clippy on files included in the commit. entry: cargo +nightly clippy --workspace --all-targets --all-features -- pass_filenames: false types: [file, rust] language: system mozilla-sccache-40c3d6b/.taplo.toml000066400000000000000000000026201475712407500172650ustar00rootroot00000000000000include = ["Cargo.toml", "**/*.toml"] [formatting] # Align consecutive entries vertically. align_entries = false # Append trailing commas for multi-line arrays. array_trailing_comma = true # Expand arrays to multiple lines that exceed the maximum column width. array_auto_expand = true # Collapse arrays that don't exceed the maximum column width and don't contain comments. array_auto_collapse = true # Omit white space padding from single-line arrays compact_arrays = true # Omit white space padding from the start and end of inline tables. compact_inline_tables = false # Maximum column width in characters, affects array expansion and collapse, this doesn't take whitespace into account. # Note that this is not set in stone, and works on a best-effort basis. column_width = 80 # Indent based on tables and arrays of tables and their subtables, subtables out of order are not indented. indent_tables = false # The substring that is used for indentation, should be tabs or spaces (but technically can be anything). indent_string = ' ' # Add trailing newline at the end of the file if not present. trailing_newline = true # Alphabetically reorder keys that are not separated by empty lines. reorder_keys = true # Maximum amount of allowed consecutive blank lines. This does not affect the whitespace at the end of the document, as it is always stripped. allowed_blank_lines = 1 # Use CRLF for line endings. crlf = false mozilla-sccache-40c3d6b/CODE_OF_CONDUCT.md000066400000000000000000000012631475712407500177740ustar00rootroot00000000000000# Community Participation Guidelines This repository is governed by Mozilla's code of conduct and etiquette guidelines. For more details, please read the [Mozilla Community Participation Guidelines](https://www.mozilla.org/about/governance/policies/participation/). ## How to Report For more information on how to report violations of the Community Participation Guidelines, please read our '[How to Report](https://www.mozilla.org/about/governance/policies/participation/reporting/)' page. mozilla-sccache-40c3d6b/Cargo.lock000066400000000000000000002747761475712407500171270ustar00rootroot00000000000000# This file is automatically @generated by Cargo. # It is not intended for manual editing. version = 3 [[package]] name = "addr2line" version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" dependencies = [ "gimli", ] [[package]] name = "adler" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" [[package]] name = "aes" version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b169f7a6d4742236a0a00c541b845991d0ac43e546831af1249753ab4c3aa3a0" dependencies = [ "cfg-if 1.0.0", "cipher", "cpufeatures", ] [[package]] name = "aho-corasick" version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b2969dcb958b36655471fc61f7e416fa76033bdd4bfed0678d8fee1e2d07a1f0" dependencies = [ "memchr", ] [[package]] name = "android-tzdata" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" [[package]] name = "android_system_properties" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" dependencies = [ "libc", ] [[package]] name = "anstream" version = "0.6.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e2e1ebcb11de5c03c67de28a7df593d32191b44939c482e97702baaaa6ab6a5" dependencies = [ "anstyle", "anstyle-parse", "anstyle-query", "anstyle-wincon", "colorchoice", "utf8parse", ] [[package]] name = "anstyle" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "15c4c2c83f81532e5845a733998b6971faca23490340a418e9b72a3ec9de12ea" [[package]] name = "anstyle-parse" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "938874ff5980b03a87c5524b3ae5b59cf99b1d6bc836848df7bc5ada9643c333" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5ca11d4be1bab0c8bc8734a9aa7bf4ee8316d462a08c6ac5052f888fef5b494b" dependencies = [ "windows-sys 0.48.0", ] [[package]] name = "anstyle-wincon" version = "3.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1cd54b81ec8d6180e24654d0b371ad22fc3dd083b6ff8ba325b72e00c87660a7" dependencies = [ "anstyle", "windows-sys 0.52.0", ] [[package]] name = "anyhow" version = "1.0.86" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da" dependencies = [ "backtrace", ] [[package]] name = "ar" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d67af77d68a931ecd5cbd8a3b5987d63a1d1d1278f7f6a60ae33db485cdebb69" [[package]] name = "arc-swap" version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457" [[package]] name = "arrayref" version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6b4930d2cb77ce62f89ee5d5289b4ac049559b1c45539271f5ed4fdc7db34545" [[package]] name = "arrayvec" version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" [[package]] name = "ascii" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d92bec98840b8f03a5ff5413de5293bfcd8bf96467cf5452609f939ec6f5de16" [[package]] name = "assert_cmd" version = "2.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "00ad3f3a942eee60335ab4342358c161ee296829e0d16ff42fc1d6cb07815467" dependencies = [ "anstyle", "bstr", "doc-comment", "predicates", "predicates-core", "predicates-tree", "wait-timeout", ] [[package]] name = "async-trait" version = "0.1.83" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" dependencies = [ "proc-macro2", "quote", "syn 2.0.48", ] [[package]] name = "atomic-waker" version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" [[package]] name = "autocfg" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "backon" version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e4fa97bb310c33c811334143cf64c5bb2b7b3c06e453db6b095d7061eff8f113" dependencies = [ "fastrand", "gloo-timers", "tokio", ] [[package]] name = "backtrace" version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837" dependencies = [ "addr2line", "cc", "cfg-if 1.0.0", "libc", "miniz_oxide", "object", "rustc-demangle", ] [[package]] name = "base64" version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64" version = "0.21.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" [[package]] name = "base64" version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" [[package]] name = "base64ct" version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" [[package]] name = "bb8" version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "98b4b0f25f18bcdc3ac72bdb486ed0acf7e185221fd4dc985bc15db5800b0ba2" dependencies = [ "async-trait", "futures-channel", "futures-util", "parking_lot", "tokio", ] [[package]] name = "bincode" version = "1.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad" dependencies = [ "serde", ] [[package]] name = "bitflags" version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07" [[package]] name = "blake3" version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0231f06152bf547e9c2b5194f247cd97aacf6dcd8b15d8e5ec0663f64580da87" dependencies = [ "arrayref", "arrayvec", "cc", "cfg-if 1.0.0", "constant_time_eq", ] [[package]] name = "block-buffer" version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" dependencies = [ "generic-array", ] [[package]] name = "block-padding" version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8894febbff9f758034a5b8e12d87918f56dfc64a8e1fe757d65e29041538d93" dependencies = [ "generic-array", ] [[package]] name = "bstr" version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c48f0051a4b4c5e0b6d365cd04af53aeaa209e3cc15ec2cdb69e73cc87fbd0dc" dependencies = [ "memchr", "regex-automata", "serde", ] [[package]] name = "buf_redux" version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b953a6887648bb07a535631f2bc00fbdb2a2216f135552cb3f534ed136b9c07f" dependencies = [ "memchr", "safemem", ] [[package]] name = "bumpalo" version = "3.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f30e7476521f6f8af1a1c4c0b8cc94f0bee37d91763d0ca2665f299b6cd8aec" [[package]] name = "byteorder" version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9" [[package]] name = "cbc" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26b52a9543ae338f279b96b0b9fed9c8093744685043739079ce85cd58f289a6" dependencies = [ "cipher", ] [[package]] name = "cc" version = "1.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e9e8aabfac534be767c909e0690571677d49f41bd8465ae876fe043d52ba5292" dependencies = [ "jobserver", "libc", ] [[package]] name = "cfg-if" version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" [[package]] name = "cfg-if" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "cfg_aliases" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" [[package]] name = "chrono" version = "0.4.38" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a21f936df1771bf62b77f047b726c4625ff2e8aa607c01ec06e5a05bd8463401" dependencies = [ "android-tzdata", "iana-time-zone", "js-sys", "num-traits", "serde", "wasm-bindgen", "windows-targets 0.52.0", ] [[package]] name = "chunked_transfer" version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e4de3bc4ea267985becf712dc6d9eed8b04c953b3fcfb339ebc87acd9804901" [[package]] name = "cipher" version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" dependencies = [ "crypto-common", "inout", ] [[package]] name = "clap" version = "4.4.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e578d6ec4194633722ccf9544794b71b1385c3c027efe0c55db226fc880865c" dependencies = [ "clap_builder", "clap_derive", ] [[package]] name = "clap_builder" version = "4.4.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4df4df40ec50c46000231c914968278b1eb05098cf8f1b3a518a95030e71d1c7" dependencies = [ "anstream", "anstyle", "clap_lex", "strsim", "terminal_size", ] [[package]] name = "clap_derive" version = "4.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf9804afaaf59a91e75b022a30fb7229a7901f60c755489cc61c9b423b836442" dependencies = [ "heck", "proc-macro2", "quote", "syn 2.0.48", ] [[package]] name = "clap_lex" version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "702fc72eb24e5a1e48ce58027a675bc24edd52096d5397d4aea7c6dd9eca0bd1" [[package]] name = "colorchoice" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" [[package]] name = "combine" version = "4.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "35ed6e9d84f0b51a7f52daf1c7d71dd136fd7a3f41a8462b8cdb8c78d920fad4" dependencies = [ "bytes", "futures-core", "memchr", "pin-project-lite", "tokio", "tokio-util", ] [[package]] name = "const-oid" version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" [[package]] name = "const-random" version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5aaf16c9c2c612020bcfd042e170f6e32de9b9d75adb5277cdbbd2e2c8c8299a" dependencies = [ "const-random-macro", ] [[package]] name = "const-random-macro" version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f9d839f2a20b0aee515dc581a6172f2321f96cab76c1a38a4c584a194955390e" dependencies = [ "getrandom", "once_cell", "tiny-keccak", ] [[package]] name = "constant_time_eq" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f7144d30dcf0fafbce74250a3963025d8d52177934239851c917d29f1df280c2" [[package]] name = "core-foundation" version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" dependencies = [ "core-foundation-sys", "libc", ] [[package]] name = "core-foundation-sys" version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" [[package]] name = "core_affinity" version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "622892f5635ce1fc38c8f16dfc938553ed64af482edb5e150bf4caedbfcb2304" dependencies = [ "libc", "num_cpus", "winapi", ] [[package]] name = "cpufeatures" version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "53fe5e26ff1b7aef8bca9c6080520cfb8d9333c7568e1829cef191a9723e5504" dependencies = [ "libc", ] [[package]] name = "crc16" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "338089f42c427b86394a5ee60ff321da23a5c89c9d89514c829687b26359fcff" [[package]] name = "crc32c" version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3a47af21622d091a8f0fb295b88bc886ac74efcc613efc19f5d0b21de5c89e47" dependencies = [ "rustc_version", ] [[package]] name = "crc32fast" version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" dependencies = [ "cfg-if 1.0.0", ] [[package]] name = "crossbeam-utils" version = "0.8.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3a430a770ebd84726f584a90ee7f020d28db52c6d02138900f22341f866d39c" dependencies = [ "cfg-if 1.0.0", ] [[package]] name = "crunchy" version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" [[package]] name = "crypto-common" version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ "generic-array", "typenum", ] [[package]] name = "daemonize" version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab8bfdaacb3c887a54d41bdf48d3af8873b3f5566469f8ba21b92057509f116e" dependencies = [ "libc", ] [[package]] name = "der" version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fffa369a668c8af7dbf8b5e56c9f744fbd399949ed171606040001947de40b1c" dependencies = [ "const-oid", "pem-rfc7468", "zeroize", ] [[package]] name = "deranged" version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" dependencies = [ "powerfmt", ] [[package]] name = "derive_more" version = "0.99.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" dependencies = [ "proc-macro2", "quote", "syn 1.0.109", ] [[package]] name = "difflib" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6184e33543162437515c2e2b48714794e37845ec9851711914eec9d308f6ebe8" [[package]] name = "digest" version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ "block-buffer", "const-oid", "crypto-common", "subtle", ] [[package]] name = "directories" version = "5.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a49173b84e034382284f27f1af4dcbbd231ffa358c0fe316541a7337f376a35" dependencies = [ "dirs-sys", ] [[package]] name = "dirs-sys" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "520f05a5cbd335fae5a99ff7a6ab8627577660ee5cfd6a94a6a929b52ff0321c" dependencies = [ "libc", "option-ext", "redox_users", "windows-sys 0.48.0", ] [[package]] name = "displaydoc" version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "adc2ab4d5a16117f9029e9a6b5e4e79f4c67f6519bc134210d4d4a04ba31f41b" dependencies = [ "proc-macro2", "quote", "syn 1.0.109", ] [[package]] name = "dlv-list" version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "442039f5147480ba31067cb00ada1adae6892028e40e45fc5de7b7df6dcc1b5f" dependencies = [ "const-random", ] [[package]] name = "doc-comment" version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" [[package]] name = "either" version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" [[package]] name = "encoding_rs" version = "0.8.34" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b45de904aa0b010bce2ab45264d0631681847fa7b6f2eaa7dab7619943bc4f59" dependencies = [ "cfg-if 1.0.0", ] [[package]] name = "env_logger" version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4cd405aab171cb85d6735e5c8d9db038c17d3ca007a4d2c25f337935c3d90580" dependencies = [ "humantime", "is-terminal", "log", "regex", "termcolor", ] [[package]] name = "equivalent" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "errno" version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245" dependencies = [ "libc", "windows-sys 0.52.0", ] [[package]] name = "error-chain" version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2d2f06b9cac1506ece98fe3231e3cc9c4410ec3d5b1f24ae1c8946f0742cdefc" dependencies = [ "version_check", ] [[package]] name = "fastrand" version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" [[package]] name = "filetime" version = "0.2.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ee447700ac8aa0b2f2bd7bc4462ad686ba06baa6727ac149a2d6277f0d240fd" dependencies = [ "cfg-if 1.0.0", "libc", "redox_syscall", "windows-sys 0.52.0", ] [[package]] name = "flate2" version = "1.0.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "46303f565772937ffe1d394a4fac6f411c6013172fadde9dcdb1e147a086940e" dependencies = [ "crc32fast", "miniz_oxide", ] [[package]] name = "float-cmp" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "98de4bbd547a563b716d8dfa9aad1cb19bfab00f4fa09a6a4ed21dbcf44ce9c4" dependencies = [ "num-traits", ] [[package]] name = "flume" version = "0.10.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1657b4441c3403d9f7b3409e47575237dac27b1b5726df654a6ecbf92f0f7577" dependencies = [ "futures-core", "futures-sink", "nanorand", "pin-project", "spin 0.9.8", ] [[package]] name = "fnv" version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "foreign-types" version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" dependencies = [ "foreign-types-shared", ] [[package]] name = "foreign-types-shared" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" [[package]] name = "form_urlencoded" version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" dependencies = [ "percent-encoding", ] [[package]] name = "fs-err" version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "88a41f105fe1d5b6b34b2055e3dc59bb79b46b48b2040b9e6c7b4b5de097aa41" dependencies = [ "autocfg", ] [[package]] name = "futures" version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" dependencies = [ "futures-channel", "futures-core", "futures-executor", "futures-io", "futures-sink", "futures-task", "futures-util", ] [[package]] name = "futures-channel" version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" dependencies = [ "futures-core", "futures-sink", ] [[package]] name = "futures-core" version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" [[package]] name = "futures-executor" version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" dependencies = [ "futures-core", "futures-task", "futures-util", ] [[package]] name = "futures-io" version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" [[package]] name = "futures-macro" version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", "syn 2.0.48", ] [[package]] name = "futures-sink" version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" [[package]] name = "futures-task" version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" [[package]] name = "futures-util" version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" dependencies = [ "futures-channel", "futures-core", "futures-io", "futures-macro", "futures-sink", "futures-task", "memchr", "pin-project-lite", "pin-utils", "slab", ] [[package]] name = "generic-array" version = "0.14.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" dependencies = [ "typenum", "version_check", ] [[package]] name = "getrandom" version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fe9006bed769170c11f845cf00c7c1e9092aeb3f268e007c3e760ac68008070f" dependencies = [ "cfg-if 1.0.0", "js-sys", "libc", "wasi", "wasm-bindgen", ] [[package]] name = "ghac" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a10bd5b898cac1a4de4a882a754b2ccaafead449348cfb420b48cd5c00ffd08b" dependencies = [ "prost", ] [[package]] name = "gimli" version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" [[package]] name = "gloo-timers" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbb143cf96099802033e0d4f4963b19fd2e0b728bcf076cd9cf7f6634f092994" dependencies = [ "futures-channel", "futures-core", "js-sys", "wasm-bindgen", ] [[package]] name = "gzp" version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e7c65d1899521a11810501b50b898464d133e1afc96703cff57726964cfa7baf" dependencies = [ "byteorder", "bytes", "core_affinity", "flate2", "flume", "num_cpus", "thiserror", ] [[package]] name = "h2" version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8" dependencies = [ "bytes", "fnv", "futures-core", "futures-sink", "futures-util", "http 0.2.11", "indexmap", "slab", "tokio", "tokio-util", "tracing", ] [[package]] name = "h2" version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fa82e28a107a8cc405f0839610bdc9b15f1e25ec7d696aa5cf173edbcb1486ab" dependencies = [ "atomic-waker", "bytes", "fnv", "futures-core", "futures-sink", "http 1.1.0", "indexmap", "slab", "tokio", "tokio-util", "tracing", ] [[package]] name = "hashbrown" version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" [[package]] name = "heck" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" [[package]] name = "hermit-abi" version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" [[package]] name = "hex" version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "hmac" version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" dependencies = [ "digest", ] [[package]] name = "home" version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3d1354bf6b7235cb4a0576c2619fd4ed18183f689b12b006a0ee7329eeff9a5" dependencies = [ "windows-sys 0.52.0", ] [[package]] name = "hostname" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c731c3e10504cc8ed35cfe2f1db4c9274c3d35fa486e3b31df46f068ef3e867" dependencies = [ "libc", "match_cfg", "winapi", ] [[package]] name = "http" version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8947b1a6fad4393052c7ba1f4cd97bed3e953a95c79c92ad9b051a04611d9fbb" dependencies = [ "bytes", "fnv", "itoa", ] [[package]] name = "http" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258" dependencies = [ "bytes", "fnv", "itoa", ] [[package]] name = "http-body" version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" dependencies = [ "bytes", "http 0.2.11", "pin-project-lite", ] [[package]] name = "http-body" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1cac85db508abc24a2e48553ba12a996e87244a0395ce011e62b37158745d643" dependencies = [ "bytes", "http 1.1.0", ] [[package]] name = "http-body-util" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0475f8b2ac86659c21b64320d5d653f9efe42acd2a4e560073ec61a155a34f1d" dependencies = [ "bytes", "futures-core", "http 1.1.0", "http-body 1.0.0", "pin-project-lite", ] [[package]] name = "httparse" version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" [[package]] name = "httpdate" version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "humantime" version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" version = "0.14.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bf96e135eb83a2a8ddf766e426a841d8ddd7449d5f00d34ea02b41d2f19eef80" dependencies = [ "bytes", "futures-channel", "futures-core", "futures-util", "h2 0.3.26", "http 0.2.11", "http-body 0.4.6", "httparse", "httpdate", "itoa", "pin-project-lite", "socket2", "tokio", "tower-service", "tracing", "want", ] [[package]] name = "hyper" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fb5aa53871fc917b1a9ed87b683a5d86db645e23acb32c2e0785a353e522fb75" dependencies = [ "bytes", "futures-channel", "futures-util", "h2 0.4.5", "http 1.1.0", "http-body 1.0.0", "httparse", "httpdate", "itoa", "pin-project-lite", "tokio", "want", ] [[package]] name = "hyper-rustls" version = "0.27.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5ee4be2c948921a1a5320b629c4193916ed787a7f7f293fd3f7f5a6c9de74155" dependencies = [ "futures-util", "http 1.1.0", "hyper 1.1.0", "hyper-util", "rustls", "rustls-native-certs", "rustls-pki-types", "tokio", "tokio-rustls", "tower-service", "webpki-roots", ] [[package]] name = "hyper-tls" version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" dependencies = [ "bytes", "hyper 0.14.28", "native-tls", "tokio", "tokio-native-tls", ] [[package]] name = "hyper-tls" version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" dependencies = [ "bytes", "http-body-util", "hyper 1.1.0", "hyper-util", "native-tls", "tokio", "tokio-native-tls", "tower-service", ] [[package]] name = "hyper-util" version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca38ef113da30126bbff9cd1705f9273e15d45498615d138b0c20279ac7a76aa" dependencies = [ "bytes", "futures-channel", "futures-util", "http 1.1.0", "http-body 1.0.0", "hyper 1.1.0", "pin-project-lite", "socket2", "tokio", "tower", "tower-service", "tracing", ] [[package]] name = "iana-time-zone" version = "0.1.59" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6a67363e2aa4443928ce15e57ebae94fd8949958fd1223c4cfc0cd473ad7539" dependencies = [ "android_system_properties", "core-foundation-sys", "iana-time-zone-haiku", "js-sys", "wasm-bindgen", "windows-core", ] [[package]] name = "iana-time-zone-haiku" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" dependencies = [ "cc", ] [[package]] name = "idna" version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" dependencies = [ "unicode-bidi", "unicode-normalization", ] [[package]] name = "indexmap" version = "2.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7b0b929d511467233429c45a44ac1dcaa21ba0f5ba11e4879e6ed28ddb4f9df4" dependencies = [ "equivalent", "hashbrown", ] [[package]] name = "inout" version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a0c10553d664a4d0bcff9f4215d0aac67a639cc68ef660840afe309b807bc9f5" dependencies = [ "block-padding", "generic-array", ] [[package]] name = "ipnet" version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" [[package]] name = "is-terminal" version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f23ff5ef2b80d608d61efee834934d862cd92461afc0560dedf493e4c033738b" dependencies = [ "hermit-abi", "libc", "windows-sys 0.52.0", ] [[package]] name = "itertools" version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "25db6b064527c5d482d0423354fcd07a89a2dfe07b67892e62411946db7f07b0" dependencies = [ "either", ] [[package]] name = "itoa" version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c" [[package]] name = "jobserver" version = "0.1.32" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "48d1dbcbbeb6a7fec7e059840aa538bd62aaccf972c7346c4d9d2059312853d0" dependencies = [ "libc", ] [[package]] name = "js-sys" version = "0.3.66" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cee9c64da59eae3b50095c18d3e74f8b73c0b86d2792824ff01bbce68ba229ca" dependencies = [ "wasm-bindgen", ] [[package]] name = "jsonwebtoken" version = "9.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c7ea04a7c5c055c175f189b6dc6ba036fd62306b58c66c9f6389036c503a3f4" dependencies = [ "base64 0.21.7", "js-sys", "pem", "ring", "serde", "serde_json", "simple_asn1", ] [[package]] name = "lazy_static" version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" dependencies = [ "spin 0.5.2", ] [[package]] name = "libc" version = "0.2.155" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c" [[package]] name = "libm" version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" [[package]] name = "libmount" version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "23c4c2ad2d5cbd2f5a05620c3daf45930add53ec207fa99ce5eec971089dc35f" dependencies = [ "libc", "nix 0.14.1", "quick-error", ] [[package]] name = "libredox" version = "0.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85c833ca1e66078851dba29046874e38f08b2c883700aa29a03ddd3b23814ee8" dependencies = [ "bitflags 2.4.1", "libc", "redox_syscall", ] [[package]] name = "linked-hash-map" version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" [[package]] name = "linux-raw-sys" version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4cd1a83af159aa67994778be9070f0ae1bd732942279cabb14f86f986a21456" [[package]] name = "lock_api" version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c168f8615b12bc01f9c17e2eb0cc07dcae1940121185446edc3744920e8ef45" dependencies = [ "autocfg", "scopeguard", ] [[package]] name = "log" version = "0.4.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" [[package]] name = "match_cfg" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" [[package]] name = "md-5" version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d89e7ee0cfbedfc4da3340218492196241d89eefb6dab27de5df917a6d2e78cf" dependencies = [ "cfg-if 1.0.0", "digest", ] [[package]] name = "memchr" version = "2.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "523dc4f511e55ab87b694dc30d0f820d60906ef06413f93d4d7a1385599cc149" [[package]] name = "memmap2" version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fe751422e4a8caa417e13c3ea66452215d7d63e19e604f4980461212f3ae1322" dependencies = [ "libc", ] [[package]] name = "mime" version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" [[package]] name = "mime_guess" version = "2.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4192263c238a5f0d0c6bfd21f336a313a4ce1c450542449ca191bb657b4642ef" dependencies = [ "mime", "unicase", ] [[package]] name = "miniz_oxide" version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7" dependencies = [ "adler", ] [[package]] name = "mio" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "80e04d1dcff3aae0704555fe5fee3bcfaf3d1fdf8a7e521d5b9d2b42acb52cec" dependencies = [ "hermit-abi", "libc", "wasi", "windows-sys 0.52.0", ] [[package]] name = "multipart" version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "00dec633863867f29cb39df64a397cdf4a6354708ddd7759f70c7fb51c5f9182" dependencies = [ "buf_redux", "httparse", "log", "mime", "mime_guess", "quick-error", "rand", "safemem", "tempfile", "twoway", ] [[package]] name = "nanorand" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a51313c5820b0b02bd422f4b44776fbf47961755c74ce64afc73bfad10226c3" dependencies = [ "getrandom", ] [[package]] name = "native-tls" version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "07226173c32f2926027b63cce4bcd8076c3552846cbe7925f3aaffeac0a3b92e" dependencies = [ "lazy_static", "libc", "log", "openssl", "openssl-probe", "openssl-sys", "schannel", "security-framework", "security-framework-sys", "tempfile", ] [[package]] name = "nix" version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c722bee1037d430d0f8e687bbdbf222f27cc6e4e68d5caf630857bb2b6dbdce" dependencies = [ "bitflags 1.3.2", "cc", "cfg-if 0.1.10", "libc", "void", ] [[package]] name = "nix" version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab2156c4fce2f8df6c499cc1c763e4394b7482525bf2a9701c9d79d215f519e4" dependencies = [ "bitflags 2.4.1", "cfg-if 1.0.0", "cfg_aliases", "libc", ] [[package]] name = "normalize-line-endings" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61807f77802ff30975e01f4f071c8ba10c022052f98b3294119f3e615d13e5be" [[package]] name = "num-bigint" version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" dependencies = [ "num-integer", "num-traits", ] [[package]] name = "num-bigint-dig" version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc84195820f291c7697304f3cbdadd1cb7199c0efc917ff5eafd71225c136151" dependencies = [ "byteorder", "lazy_static", "libm", "num-integer", "num-iter", "num-traits", "rand", "smallvec", "zeroize", ] [[package]] name = "num-conv" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" [[package]] name = "num-integer" version = "0.1.46" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" dependencies = [ "num-traits", ] [[package]] name = "num-iter" version = "0.1.43" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7d03e6c028c5dc5cac6e2dec0efda81fc887605bb3d884578bb6d6bf7514e252" dependencies = [ "autocfg", "num-integer", "num-traits", ] [[package]] name = "num-traits" version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" dependencies = [ "autocfg", "libm", ] [[package]] name = "num_cpus" version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" dependencies = [ "hermit-abi", "libc", ] [[package]] name = "num_threads" version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2819ce041d2ee131036f4fc9d6ae7ae125a3a40e97ba64d04fe799ad9dabbb44" dependencies = [ "libc", ] [[package]] name = "number_prefix" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3" [[package]] name = "object" version = "0.32.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a6a622008b6e321afc04970976f62ee297fdbaa6f95318ca343e3eebb9648441" dependencies = [ "flate2", "memchr", "ruzstd", ] [[package]] name = "once_cell" version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" [[package]] name = "opendal" version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a55c840b5a6ad96106d6c0612fabb8f35a5ace826e0474fc55ebda33042b8d33" dependencies = [ "anyhow", "async-trait", "backon", "base64 0.22.1", "bb8", "bytes", "chrono", "crc32c", "futures", "getrandom", "ghac", "http 1.1.0", "log", "md-5", "once_cell", "percent-encoding", "prost", "quick-xml 0.36.1", "redis", "reqsign", "reqwest 0.12.5", "serde", "serde_json", "sha2", "tokio", "uuid", ] [[package]] name = "openssl" version = "0.10.64" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95a0481286a310808298130d22dd1fef0fa571e05a8f44ec801801e84b216b1f" dependencies = [ "bitflags 2.4.1", "cfg-if 1.0.0", "foreign-types", "libc", "once_cell", "openssl-macros", "openssl-sys", ] [[package]] name = "openssl-macros" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", "syn 2.0.48", ] [[package]] name = "openssl-probe" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-src" version = "300.2.1+3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3fe476c29791a5ca0d1273c697e96085bbabbbea2ef7afd5617e78a4b40332d3" dependencies = [ "cc", ] [[package]] name = "openssl-sys" version = "0.9.101" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dda2b0f344e78efc2facf7d195d098df0dd72151b26ab98da807afc26c198dff" dependencies = [ "cc", "libc", "openssl-src", "pkg-config", "vcpkg", ] [[package]] name = "option-ext" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" [[package]] name = "ordered-multimap" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "04d84ee66570a6460dc6143a5c4835f3a4179201ce91c25fc717d4eb3dc31db9" dependencies = [ "dlv-list", "hashbrown", ] [[package]] name = "parking_lot" version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ "lock_api", "parking_lot_core", ] [[package]] name = "parking_lot_core" version = "0.9.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c42a9226546d68acdd9c0a280d17ce19bfe27a46bf68784e4066115788d008e" dependencies = [ "cfg-if 1.0.0", "libc", "redox_syscall", "smallvec", "windows-targets 0.48.5", ] [[package]] name = "pbkdf2" version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8ed6a7761f76e3b9f92dfb0a60a6a6477c61024b775147ff0973a02653abaf2" dependencies = [ "digest", "hmac", ] [[package]] name = "pem" version = "3.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b8fcc794035347fb64beda2d3b462595dd2753e3f268d89c5aae77e8cf2c310" dependencies = [ "base64 0.21.7", "serde", ] [[package]] name = "pem-rfc7468" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "88b39c9bfcfc231068454382784bb460aae594343fb030d46e9f50a645418412" dependencies = [ "base64ct", ] [[package]] name = "percent-encoding" version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pin-project" version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be57f64e946e500c8ee36ef6331845d40a93055567ec57e8fae13efd33759b95" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c" dependencies = [ "proc-macro2", "quote", "syn 2.0.48", ] [[package]] name = "pin-project-lite" version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" [[package]] name = "pin-utils" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pkcs1" version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c8ffb9f10fa047879315e6625af03c164b16962a5368d724ed16323b68ace47f" dependencies = [ "der", "pkcs8", "spki", ] [[package]] name = "pkcs5" version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e847e2c91a18bfa887dd028ec33f2fe6f25db77db3619024764914affe8b69a6" dependencies = [ "aes", "cbc", "der", "pbkdf2", "scrypt", "sha2", "spki", ] [[package]] name = "pkcs8" version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" dependencies = [ "der", "pkcs5", "rand_core", "spki", ] [[package]] name = "pkg-config" version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "69d3587f8a9e599cc7ec2c00e331f71c4e69a5f9a4b8a6efd5b07466b9736f9a" [[package]] name = "powerfmt" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" [[package]] name = "ppv-lite86" version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] name = "predicates" version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68b87bfd4605926cdfefc1c3b5f8fe560e3feca9d5552cf68c466d3d8236c7e8" dependencies = [ "anstyle", "difflib", "float-cmp", "normalize-line-endings", "predicates-core", "regex", ] [[package]] name = "predicates-core" version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b794032607612e7abeb4db69adb4e33590fa6cf1149e95fd7cb00e634b92f174" [[package]] name = "predicates-tree" version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "368ba315fb8c5052ab692e68a0eefec6ec57b23a36959c14496f0b0df2c0cecf" dependencies = [ "predicates-core", "termtree", ] [[package]] name = "proc-macro2" version = "1.0.75" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "907a61bd0f64c2f29cd1cf1dc34d05176426a3f504a78010f08416ddb7b13708" dependencies = [ "unicode-ident", ] [[package]] name = "prost" version = "0.13.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2796faa41db3ec313a31f7624d9286acf277b52de526150b7e69f3debf891ee5" dependencies = [ "bytes", "prost-derive", ] [[package]] name = "prost-derive" version = "0.13.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a56d757972c98b346a9b766e3f02746cde6dd1cd1d1d563472929fdd74bec4d" dependencies = [ "anyhow", "itertools", "proc-macro2", "quote", "syn 2.0.48", ] [[package]] name = "quick-error" version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quick-xml" version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "86e446ed58cef1bbfe847bc2fda0e2e4ea9f0e57b90c507d4781292590d72a4e" dependencies = [ "memchr", "serde", ] [[package]] name = "quick-xml" version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96a05e2e8efddfa51a84ca47cec303fac86c8541b686d37cac5efc0e094417bc" dependencies = [ "memchr", "serde", ] [[package]] name = "quinn" version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e4ceeeeabace7857413798eb1ffa1e9c905a9946a57d81fb69b4b71c4d8eb3ad" dependencies = [ "bytes", "pin-project-lite", "quinn-proto", "quinn-udp", "rustc-hash 1.1.0", "rustls", "thiserror", "tokio", "tracing", ] [[package]] name = "quinn-proto" version = "0.11.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fadfaed2cd7f389d0161bb73eeb07b7b78f8691047a6f3e73caaeae55310a4a6" dependencies = [ "bytes", "rand", "ring", "rustc-hash 2.0.0", "rustls", "slab", "thiserror", "tinyvec", "tracing", ] [[package]] name = "quinn-udp" version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9096629c45860fc7fb143e125eb826b5e721e10be3263160c7d60ca832cf8c46" dependencies = [ "libc", "once_cell", "socket2", "tracing", "windows-sys 0.52.0", ] [[package]] name = "quote" version = "1.0.35" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef" dependencies = [ "proc-macro2", ] [[package]] name = "rand" version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", "rand_chacha", "rand_core", ] [[package]] name = "rand_chacha" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", "rand_core", ] [[package]] name = "rand_core" version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ "getrandom", ] [[package]] name = "redis" version = "0.27.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81cccf17a692ce51b86564334614d72dcae1def0fd5ecebc9f02956da74352b5" dependencies = [ "arc-swap", "async-trait", "bytes", "combine", "crc16", "futures", "futures-util", "itoa", "log", "num-bigint", "percent-encoding", "pin-project-lite", "rand", "rustls", "rustls-native-certs", "rustls-pemfile", "rustls-pki-types", "ryu", "sha1_smol", "socket2", "tokio", "tokio-retry2", "tokio-rustls", "tokio-util", "url", ] [[package]] name = "redox_syscall" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" dependencies = [ "bitflags 1.3.2", ] [[package]] name = "redox_users" version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a18479200779601e498ada4e8c1e1f50e3ee19deb0259c25825a98b5603b2cb4" dependencies = [ "getrandom", "libredox", "thiserror", ] [[package]] name = "regex" version = "1.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b62dbe01f0b06f9d8dc7d49e05a0785f153b00b2c227856282f671e0318c9b15" dependencies = [ "aho-corasick", "memchr", "regex-automata", "regex-syntax", ] [[package]] name = "regex-automata" version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5bb987efffd3c6d0d8f5f89510bb458559eab11e4f869acb20bf845e016259cd" dependencies = [ "aho-corasick", "memchr", "regex-syntax", ] [[package]] name = "regex-syntax" version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" [[package]] name = "reqsign" version = "0.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eb0075a66c8bfbf4cc8b70dca166e722e1f55a3ea9250ecbb85f4d92a5f64149" dependencies = [ "anyhow", "async-trait", "base64 0.22.1", "chrono", "form_urlencoded", "getrandom", "hex", "hmac", "home", "http 1.1.0", "jsonwebtoken", "log", "once_cell", "percent-encoding", "quick-xml 0.35.0", "rand", "reqwest 0.12.5", "rsa", "rust-ini", "serde", "serde_json", "sha1", "sha2", "toml", ] [[package]] name = "reqwest" version = "0.11.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37b1ae8d9ac08420c66222fb9096fc5de435c3c48542bc5336c51892cffafb41" dependencies = [ "base64 0.21.7", "bytes", "encoding_rs", "futures-core", "futures-util", "h2 0.3.26", "http 0.2.11", "http-body 0.4.6", "hyper 0.14.28", "hyper-tls 0.5.0", "ipnet", "js-sys", "log", "mime", "native-tls", "once_cell", "percent-encoding", "pin-project-lite", "serde", "serde_json", "serde_urlencoded", "system-configuration", "tokio", "tokio-native-tls", "tower-service", "url", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", "winreg 0.50.0", ] [[package]] name = "reqwest" version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c7d6d2a27d57148378eb5e111173f4276ad26340ecc5c49a4a2152167a2d6a37" dependencies = [ "base64 0.22.1", "bytes", "encoding_rs", "futures-channel", "futures-core", "futures-util", "h2 0.4.5", "http 1.1.0", "http-body 1.0.0", "http-body-util", "hyper 1.1.0", "hyper-rustls", "hyper-tls 0.6.0", "hyper-util", "ipnet", "js-sys", "log", "mime", "native-tls", "once_cell", "percent-encoding", "pin-project-lite", "quinn", "rustls", "rustls-native-certs", "rustls-pemfile", "rustls-pki-types", "serde", "serde_json", "serde_urlencoded", "sync_wrapper", "system-configuration", "tokio", "tokio-native-tls", "tokio-rustls", "tokio-util", "tower-service", "url", "wasm-bindgen", "wasm-bindgen-futures", "wasm-streams", "web-sys", "webpki-roots", "winreg 0.52.0", ] [[package]] name = "retry" version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9166d72162de3575f950507683fac47e30f6f2c3836b71b7fbc61aa517c9c5f4" dependencies = [ "rand", ] [[package]] name = "ring" version = "0.17.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "688c63d65483050968b2a8937f7995f443e27041a0f7700aa59b0822aedebb74" dependencies = [ "cc", "getrandom", "libc", "spin 0.9.8", "untrusted", "windows-sys 0.48.0", ] [[package]] name = "rouille" version = "3.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3716fbf57fc1084d7a706adf4e445298d123e4a44294c4e8213caf1b85fcc921" dependencies = [ "base64 0.13.1", "chrono", "filetime", "multipart", "percent-encoding", "rand", "serde", "serde_derive", "serde_json", "sha1_smol", "threadpool", "time", "tiny_http", "url", ] [[package]] name = "rsa" version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5d0e5124fcb30e76a7e79bfee683a2746db83784b86289f6251b54b7950a0dfc" dependencies = [ "const-oid", "digest", "num-bigint-dig", "num-integer", "num-traits", "pkcs1", "pkcs8", "rand_core", "sha2", "signature", "spki", "subtle", "zeroize", ] [[package]] name = "rust-ini" version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0d625ed57d8f49af6cfa514c42e1a71fadcff60eb0b1c517ff82fe41aa025b41" dependencies = [ "cfg-if 1.0.0", "ordered-multimap", "trim-in-place", ] [[package]] name = "rustc-demangle" version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" [[package]] name = "rustc-hash" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" [[package]] name = "rustc-hash" version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "583034fd73374156e66797ed8e5b0d5690409c9226b22d87cb7f19821c05d152" [[package]] name = "rustc_version" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ "semver", ] [[package]] name = "rustix" version = "0.38.34" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "70dc5ec042f7a43c4a73241207cecc9873a06d45debb38b329f8541d85c2730f" dependencies = [ "bitflags 2.4.1", "errno", "libc", "linux-raw-sys", "windows-sys 0.52.0", ] [[package]] name = "rustls" version = "0.23.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05cff451f60db80f490f3c182b77c35260baace73209e9cdbbe526bfe3a4d402" dependencies = [ "once_cell", "ring", "rustls-pki-types", "rustls-webpki", "subtle", "zeroize", ] [[package]] name = "rustls-native-certs" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f1fb85efa936c42c6d5fc28d2629bb51e4b2f4b8a5211e297d599cc5a093792" dependencies = [ "openssl-probe", "rustls-pemfile", "rustls-pki-types", "schannel", "security-framework", ] [[package]] name = "rustls-pemfile" version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "29993a25686778eb88d4189742cd713c9bce943bc54251a33509dc63cbacf73d" dependencies = [ "base64 0.22.1", "rustls-pki-types", ] [[package]] name = "rustls-pki-types" version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "976295e77ce332211c0d24d92c0e83e50f5c5f046d11082cea19f3df13a3562d" [[package]] name = "rustls-webpki" version = "0.102.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff448f7e92e913c4b7d4c6d8e4540a1724b319b4152b8aef6d4cf8339712b33e" dependencies = [ "ring", "rustls-pki-types", "untrusted", ] [[package]] name = "ruzstd" version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "58c4eb8a81997cf040a091d1f7e1938aeab6749d3a0dfa73af43cdc32393483d" dependencies = [ "byteorder", "derive_more", "twox-hash", ] [[package]] name = "ryu" version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f98d2aa92eebf49b69786be48e4477826b256916e84a57ff2a4f21923b48eb4c" [[package]] name = "safemem" version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ef703b7cb59335eae2eb93ceb664c0eb7ea6bf567079d843e09420219668e072" [[package]] name = "salsa20" version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97a22f5af31f73a954c10289c93e8a50cc23d971e80ee446f1f6f7137a088213" dependencies = [ "cipher", ] [[package]] name = "same-file" version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" dependencies = [ "winapi-util", ] [[package]] name = "scc" version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec96560eea317a9cc4e0bb1f6a2c93c09a19b8c4fc5cb3fcc0ec1c094cd783e2" dependencies = [ "sdd", ] [[package]] name = "sccache" version = "0.10.0" dependencies = [ "anyhow", "ar", "assert_cmd", "async-trait", "base64 0.21.7", "bincode", "blake3", "byteorder", "bytes", "cc", "chrono", "clap", "daemonize", "directories", "encoding_rs", "env_logger", "filetime", "flate2", "fs-err", "futures", "gzp", "http 1.1.0", "http-body-util", "hyper 1.1.0", "hyper-util", "itertools", "jobserver", "jsonwebtoken", "libc", "libmount", "linked-hash-map", "log", "memchr", "memmap2", "mime", "nix 0.28.0", "num_cpus", "number_prefix", "object", "once_cell", "opendal", "openssl", "predicates", "rand", "regex", "reqsign", "reqwest 0.12.5", "retry", "rouille", "semver", "serde", "serde_json", "serial_test", "sha2", "shlex", "strip-ansi-escapes", "syslog", "tar", "temp-env", "tempfile", "test-case", "thirtyfour_sync", "tokio", "tokio-serde", "tokio-util", "toml", "tower", "url", "uuid", "version-compare", "walkdir", "which", "windows-sys 0.52.0", "zip", "zstd", ] [[package]] name = "schannel" version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fbc91545643bcf3a0bbb6569265615222618bdf33ce4ffbbd13c4bbd4c093534" dependencies = [ "windows-sys 0.52.0", ] [[package]] name = "scopeguard" version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "scrypt" version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0516a385866c09368f0b5bcd1caff3366aace790fcd46e2bb032697bb172fd1f" dependencies = [ "pbkdf2", "salsa20", "sha2", ] [[package]] name = "sdd" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b84345e4c9bd703274a082fb80caaa99b7612be48dfaa1dd9266577ec412309d" [[package]] name = "security-framework" version = "2.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05b64fb303737d99b81884b2c63433e9ae28abebe5eb5045dcdd175dc2ecf4de" dependencies = [ "bitflags 1.3.2", "core-foundation", "core-foundation-sys", "libc", "security-framework-sys", ] [[package]] name = "security-framework-sys" version = "2.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e932934257d3b408ed8f30db49d85ea163bfe74961f017f405b025af298f0c7a" dependencies = [ "core-foundation-sys", "libc", ] [[package]] name = "semver" version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" [[package]] name = "serde" version = "1.0.201" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "780f1cebed1629e4753a1a38a3c72d30b97ec044f0aef68cb26650a3c5cf363c" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" version = "1.0.201" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c5e405930b9796f1c00bee880d03fc7e0bb4b9a11afc776885ffe84320da2865" dependencies = [ "proc-macro2", "quote", "syn 2.0.48", ] [[package]] name = "serde_json" version = "1.0.114" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c5f09b1bd632ef549eaa9f60a1f8de742bdbc698e6cee2095fc84dde5f549ae0" dependencies = [ "indexmap", "itoa", "ryu", "serde", ] [[package]] name = "serde_repr" version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b2e6b945e9d3df726b65d6ee24060aff8e3533d431f677a9695db04eff9dfdb" dependencies = [ "proc-macro2", "quote", "syn 2.0.48", ] [[package]] name = "serde_spanned" version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eb3622f419d1296904700073ea6cc23ad690adbd66f13ea683df73298736f0c1" dependencies = [ "serde", ] [[package]] name = "serde_urlencoded" version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" dependencies = [ "form_urlencoded", "itoa", "ryu", "serde", ] [[package]] name = "serial_test" version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "adb86f9315df5df6a70eae0cc22395a44e544a0d8897586820770a35ede74449" dependencies = [ "futures", "log", "once_cell", "parking_lot", "scc", "serial_test_derive", ] [[package]] name = "serial_test_derive" version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a9bb72430492e9549b0c4596725c0f82729bff861c45aa8099c0a8e67fc3b721" dependencies = [ "proc-macro2", "quote", "syn 2.0.48", ] [[package]] name = "sha1" version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" dependencies = [ "cfg-if 1.0.0", "cpufeatures", "digest", ] [[package]] name = "sha1_smol" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae1a47186c03a32177042e55dbc5fd5aee900b8e0069a8d70fba96a9375cd012" [[package]] name = "sha2" version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" dependencies = [ "cfg-if 1.0.0", "cpufeatures", "digest", ] [[package]] name = "shlex" version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" [[package]] name = "signal-hook-registry" version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" dependencies = [ "libc", ] [[package]] name = "signature" version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" dependencies = [ "digest", "rand_core", ] [[package]] name = "simple_asn1" version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "adc4e5204eb1910f40f9cfa375f6f05b68c3abac4b6fd879c8ff5e7ae8a0a085" dependencies = [ "num-bigint", "num-traits", "thiserror", "time", ] [[package]] name = "slab" version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" dependencies = [ "autocfg", ] [[package]] name = "smallvec" version = "1.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4dccd0940a2dcdf68d092b8cbab7dc0ad8fa938bf95787e1b916b0e3d0e8e970" [[package]] name = "socket2" version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7b5fac59a5cb5dd637972e5fca70daf0523c9067fcdc4842f053dae04a18f8e9" dependencies = [ "libc", "windows-sys 0.48.0", ] [[package]] name = "spin" version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] name = "spin" version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" dependencies = [ "lock_api", ] [[package]] name = "spki" version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" dependencies = [ "base64ct", "der", ] [[package]] name = "static_assertions" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" [[package]] name = "stringmatch" version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8c0faab770316c3838f895fc2dfc3a8707ef4da48676f1014e1061ebd583b40" dependencies = [ "regex", ] [[package]] name = "strip-ansi-escapes" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "55ff8ef943b384c414f54aefa961dd2bd853add74ec75e7ac74cf91dba62bcfa" dependencies = [ "vte", ] [[package]] name = "strsim" version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" [[package]] name = "subtle" version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" [[package]] name = "syn" version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] [[package]] name = "syn" version = "2.0.48" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0f3531638e407dfc0814761abb7c00a5b54992b849452a0646b7f65c9f770f3f" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] [[package]] name = "sync_wrapper" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394" [[package]] name = "syslog" version = "6.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7434e95bcccce1215d30f4bf84fe8c00e8de1b9be4fb736d747ca53d36e7f96f" dependencies = [ "error-chain", "hostname", "libc", "log", "time", ] [[package]] name = "system-configuration" version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" dependencies = [ "bitflags 1.3.2", "core-foundation", "system-configuration-sys", ] [[package]] name = "system-configuration-sys" version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a75fb188eb626b924683e3b95e3a48e63551fcfb51949de2f06a9d91dbee93c9" dependencies = [ "core-foundation-sys", "libc", ] [[package]] name = "tar" version = "0.4.40" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b16afcea1f22891c49a00c751c7b63b2233284064f11a200fc624137c51e2ddb" dependencies = [ "filetime", "libc", "xattr", ] [[package]] name = "temp-env" version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96374855068f47402c3121c6eed88d29cb1de8f3ab27090e273e420bdabcf050" dependencies = [ "parking_lot", ] [[package]] name = "tempfile" version = "3.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1" dependencies = [ "cfg-if 1.0.0", "fastrand", "rustix", "windows-sys 0.52.0", ] [[package]] name = "termcolor" version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff1bc3d3f05aff0403e8ac0d92ced918ec05b666a43f83297ccef5bea8a3d449" dependencies = [ "winapi-util", ] [[package]] name = "terminal_size" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "21bebf2b7c9e0a515f6e0f8c51dc0f8e4696391e6f1ff30379559f8365fb0df7" dependencies = [ "rustix", "windows-sys 0.48.0", ] [[package]] name = "termtree" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" [[package]] name = "test-case" version = "3.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eb2550dd13afcd286853192af8601920d959b14c401fcece38071d53bf0768a8" dependencies = [ "test-case-macros", ] [[package]] name = "test-case-core" version = "3.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "adcb7fd841cd518e279be3d5a3eb0636409487998a4aff22f3de87b81e88384f" dependencies = [ "cfg-if 1.0.0", "proc-macro2", "quote", "syn 2.0.48", ] [[package]] name = "test-case-macros" version = "3.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c89e72a01ed4c579669add59014b9a524d609c0c88c6a585ce37485879f6ffb" dependencies = [ "proc-macro2", "quote", "syn 2.0.48", "test-case-core", ] [[package]] name = "thirtyfour" version = "0.27.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6ac2540aff94b9f8d89a94bb5d8cc5e71560f78ee8f6c953cd31469083c61f6d" dependencies = [ "async-trait", "base64 0.13.1", "chrono", "displaydoc", "futures", "log", "reqwest 0.11.23", "serde", "serde_json", "serde_repr", "stringmatch", "thiserror", "tokio", "urlparse", ] [[package]] name = "thirtyfour_sync" version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab1e47e6c2fed609d851c6f6171a559ecffb1d121f2d6e02dd390e90ea2c3d38" dependencies = [ "base64 0.13.1", "log", "reqwest 0.11.23", "serde", "serde_json", "stringmatch", "thirtyfour", ] [[package]] name = "thiserror" version = "1.0.56" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d54378c645627613241d077a3a79db965db602882668f9136ac42af9ecb730ad" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" version = "1.0.56" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fa0faa943b50f3db30a20aa7e265dbc66076993efed8463e8de414e5d06d3471" dependencies = [ "proc-macro2", "quote", "syn 2.0.48", ] [[package]] name = "threadpool" version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d050e60b33d41c19108b32cea32164033a9013fe3b46cbd4457559bfbf77afaa" dependencies = [ "num_cpus", ] [[package]] name = "time" version = "0.3.36" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" dependencies = [ "deranged", "itoa", "libc", "num-conv", "num_threads", "powerfmt", "serde", "time-core", "time-macros", ] [[package]] name = "time-core" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" dependencies = [ "num-conv", "time-core", ] [[package]] name = "tiny-keccak" version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" dependencies = [ "crunchy", ] [[package]] name = "tiny_http" version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "389915df6413a2e74fb181895f933386023c71110878cd0825588928e64cdc82" dependencies = [ "ascii", "chunked_transfer", "httpdate", "log", "openssl", "zeroize", ] [[package]] name = "tinyvec" version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" dependencies = [ "tinyvec_macros", ] [[package]] name = "tinyvec_macros" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" version = "1.41.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "145f3413504347a2be84393cc8a7d2fb4d863b375909ea59f2158261aa258bbb" dependencies = [ "backtrace", "bytes", "libc", "mio", "parking_lot", "pin-project-lite", "signal-hook-registry", "socket2", "tokio-macros", "windows-sys 0.52.0", ] [[package]] name = "tokio-macros" version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", "syn 2.0.48", ] [[package]] name = "tokio-native-tls" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" dependencies = [ "native-tls", "tokio", ] [[package]] name = "tokio-retry2" version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "903934dba1c4c2f2e9cb460ef10b5695e0b0ecad3bf9ee7c8675e540c5e8b2d1" dependencies = [ "pin-project", "rand", "tokio", ] [[package]] name = "tokio-rustls" version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" dependencies = [ "rustls", "rustls-pki-types", "tokio", ] [[package]] name = "tokio-serde" version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "911a61637386b789af998ee23f50aa30d5fd7edcec8d6d3dedae5e5815205466" dependencies = [ "bytes", "futures-core", "futures-sink", "pin-project", ] [[package]] name = "tokio-util" version = "0.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5419f34732d9eb6ee4c3578b7989078579b7f039cbbb9ca2c4da015749371e15" dependencies = [ "bytes", "futures-core", "futures-sink", "pin-project-lite", "tokio", "tracing", ] [[package]] name = "toml" version = "0.8.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a9aad4a3066010876e8dcf5a8a06e70a558751117a145c6ce2b82c2e2054290" dependencies = [ "serde", "serde_spanned", "toml_datetime", "toml_edit", ] [[package]] name = "toml_datetime" version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3550f4e9685620ac18a50ed434eb3aec30db8ba93b0287467bca5826ea25baf1" dependencies = [ "serde", ] [[package]] name = "toml_edit" version = "0.22.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2c1b5fd4128cc8d3e0cb74d4ed9a9cc7c7284becd4df68f5f940e1ad123606f6" dependencies = [ "indexmap", "serde", "serde_spanned", "toml_datetime", "winnow", ] [[package]] name = "tower" version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" dependencies = [ "futures-core", "futures-util", "pin-project", "pin-project-lite", "tokio", "tower-layer", "tower-service", "tracing", ] [[package]] name = "tower-layer" version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" [[package]] name = "tower-service" version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" [[package]] name = "tracing" version = "0.1.40" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" dependencies = [ "log", "pin-project-lite", "tracing-attributes", "tracing-core", ] [[package]] name = "tracing-attributes" version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", "syn 2.0.48", ] [[package]] name = "tracing-core" version = "0.1.32" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" dependencies = [ "once_cell", ] [[package]] name = "trim-in-place" version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "343e926fc669bc8cde4fa3129ab681c63671bae288b1f1081ceee6d9d37904fc" [[package]] name = "try-lock" version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" [[package]] name = "twoway" version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "59b11b2b5241ba34be09c3cc85a36e56e48f9888862e19cedf23336d35316ed1" dependencies = [ "memchr", ] [[package]] name = "twox-hash" version = "1.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" dependencies = [ "cfg-if 1.0.0", "static_assertions", ] [[package]] name = "typenum" version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" [[package]] name = "unicase" version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f7d2d4dafb69621809a81864c9c1b864479e1235c0dd4e199924b9742439ed89" dependencies = [ "version_check", ] [[package]] name = "unicode-bidi" version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6f2528f27a9eb2b21e69c95319b30bd0efd85d09c379741b0f78ea1d86be2416" [[package]] name = "unicode-ident" version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" [[package]] name = "unicode-normalization" version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" dependencies = [ "tinyvec", ] [[package]] name = "untrusted" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "url" version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "31e6302e3bb753d46e83516cae55ae196fc0c309407cf11ab35cc51a4c2a4633" dependencies = [ "form_urlencoded", "idna", "percent-encoding", ] [[package]] name = "urlparse" version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "110352d4e9076c67839003c7788d8604e24dcded13e0b375af3efaa8cf468517" [[package]] name = "utf8parse" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" [[package]] name = "uuid" version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3ea73390fe27785838dcbf75b91b1d84799e28f1ce71e6f372a5dc2200c80de5" dependencies = [ "getrandom", "serde", ] [[package]] name = "vcpkg" version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" [[package]] name = "version-compare" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "579a42fc0b8e0c63b76519a339be31bed574929511fa53c1a3acae26eb258f29" [[package]] name = "version_check" version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" [[package]] name = "void" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" [[package]] name = "vte" version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f5022b5fbf9407086c180e9557be968742d839e68346af7792b8592489732197" dependencies = [ "utf8parse", "vte_generate_state_changes", ] [[package]] name = "vte_generate_state_changes" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d257817081c7dffcdbab24b9e62d2def62e2ff7d00b1c20062551e6cccc145ff" dependencies = [ "proc-macro2", "quote", ] [[package]] name = "wait-timeout" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9f200f5b12eb75f8c1ed65abd4b2db8a6e1b138a20de009dacee265a2498f3f6" dependencies = [ "libc", ] [[package]] name = "walkdir" version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" dependencies = [ "same-file", "winapi-util", ] [[package]] name = "want" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" dependencies = [ "try-lock", ] [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" version = "0.2.89" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ed0d4f68a3015cc185aff4db9506a015f4b96f95303897bfa23f846db54064e" dependencies = [ "cfg-if 1.0.0", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" version = "0.2.89" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b56f625e64f3a1084ded111c4d5f477df9f8c92df113852fa5a374dbda78826" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", "syn 2.0.48", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" version = "0.4.39" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac36a15a220124ac510204aec1c3e5db8a22ab06fd6706d881dc6149f8ed9a12" dependencies = [ "cfg-if 1.0.0", "js-sys", "wasm-bindgen", "web-sys", ] [[package]] name = "wasm-bindgen-macro" version = "0.2.89" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0162dbf37223cd2afce98f3d0785506dcb8d266223983e4b5b525859e6e182b2" dependencies = [ "quote", "wasm-bindgen-macro-support", ] [[package]] name = "wasm-bindgen-macro-support" version = "0.2.89" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0eb82fcb7930ae6219a7ecfd55b217f5f0893484b7a13022ebb2b2bf20b5283" dependencies = [ "proc-macro2", "quote", "syn 2.0.48", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" version = "0.2.89" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ab9b36309365056cd639da3134bf87fa8f3d86008abf99e612384a6eecd459f" [[package]] name = "wasm-streams" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b65dc4c90b63b118468cf747d8bf3566c1913ef60be765b5730ead9e0a3ba129" dependencies = [ "futures-util", "js-sys", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", ] [[package]] name = "web-sys" version = "0.3.66" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "50c24a44ec86bb68fbecd1b3efed7e85ea5621b39b35ef2766b66cd984f8010f" dependencies = [ "js-sys", "wasm-bindgen", ] [[package]] name = "webpki-roots" version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c452ad30530b54a4d8e71952716a212b08efd0f3562baa66c29a618b07da7c3" dependencies = [ "rustls-pki-types", ] [[package]] name = "which" version = "6.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4ee928febd44d98f2f459a4a79bd4d928591333a494a10a868418ac1b39cf1f" dependencies = [ "either", "home", "rustix", "winsafe", ] [[package]] name = "winapi" version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" dependencies = [ "winapi-i686-pc-windows-gnu", "winapi-x86_64-pc-windows-gnu", ] [[package]] name = "winapi-i686-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4d4cc384e1e73b93bafa6fb4f1df8c41695c8a91cf9c4c64358067d15a7b6c6b" dependencies = [ "windows-sys 0.52.0", ] [[package]] name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows-core" version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" dependencies = [ "windows-targets 0.52.0", ] [[package]] name = "windows-sys" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" dependencies = [ "windows-targets 0.48.5", ] [[package]] name = "windows-sys" version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ "windows-targets 0.52.0", ] [[package]] name = "windows-targets" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" dependencies = [ "windows_aarch64_gnullvm 0.48.5", "windows_aarch64_msvc 0.48.5", "windows_i686_gnu 0.48.5", "windows_i686_msvc 0.48.5", "windows_x86_64_gnu 0.48.5", "windows_x86_64_gnullvm 0.48.5", "windows_x86_64_msvc 0.48.5", ] [[package]] name = "windows-targets" version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd" dependencies = [ "windows_aarch64_gnullvm 0.52.0", "windows_aarch64_msvc 0.52.0", "windows_i686_gnu 0.52.0", "windows_i686_msvc 0.52.0", "windows_x86_64_gnu 0.52.0", "windows_x86_64_gnullvm 0.52.0", "windows_x86_64_msvc 0.52.0", ] [[package]] name = "windows_aarch64_gnullvm" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_gnullvm" version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea" [[package]] name = "windows_aarch64_msvc" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_aarch64_msvc" version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef" [[package]] name = "windows_i686_gnu" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_gnu" version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313" [[package]] name = "windows_i686_msvc" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_i686_msvc" version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a" [[package]] name = "windows_x86_64_gnu" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnu" version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd" [[package]] name = "windows_x86_64_gnullvm" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_gnullvm" version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e" [[package]] name = "windows_x86_64_msvc" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "windows_x86_64_msvc" version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" [[package]] name = "winnow" version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d90f4e0f530c4c69f62b80d839e9ef3855edc9cba471a160c4d692deed62b401" dependencies = [ "memchr", ] [[package]] name = "winreg" version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" dependencies = [ "cfg-if 1.0.0", "windows-sys 0.48.0", ] [[package]] name = "winreg" version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a277a57398d4bfa075df44f501a17cfdf8542d224f0d36095a2adc7aee4ef0a5" dependencies = [ "cfg-if 1.0.0", "windows-sys 0.48.0", ] [[package]] name = "winsafe" version = "0.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d135d17ab770252ad95e9a872d365cf3090e3be864a34ab46f48555993efc904" [[package]] name = "xattr" version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "914566e6413e7fa959cc394fb30e563ba80f3541fbd40816d4c05a0fc3f2a0f1" dependencies = [ "libc", "linux-raw-sys", "rustix", ] [[package]] name = "zeroize" version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "525b4ec142c6b68a2d10f01f7bbf6755599ca3f81ea53b8431b7dd348f5fdb2d" [[package]] name = "zip" version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "760394e246e4c28189f19d488c058bf16f564016aefac5d32bb1f3b51d5e9261" dependencies = [ "byteorder", "crc32fast", "crossbeam-utils", ] [[package]] name = "zstd" version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2d789b1514203a1120ad2429eae43a7bd32b90976a7bb8a05f7ec02fa88cc23a" dependencies = [ "zstd-safe", ] [[package]] name = "zstd-safe" version = "7.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1cd99b45c6bc03a018c8b8a86025678c87e55526064e38f9df301989dce7ec0a" dependencies = [ "zstd-sys", ] [[package]] name = "zstd-sys" version = "2.0.10+zstd.1.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c253a4914af5bafc8fa8c86ee400827e83cf6ec01195ec1f1ed8441bf00d65aa" dependencies = [ "cc", "pkg-config", ] mozilla-sccache-40c3d6b/Cargo.toml000066400000000000000000000120301475712407500171170ustar00rootroot00000000000000[package] edition = "2021" name = "sccache" rust-version = "1.75.0" version = "0.10.0" categories = ["command-line-utilities", "development-tools::build-utils"] description = "Sccache is a ccache-like tool. It is used as a compiler wrapper and avoids compilation when possible. Sccache has the capability to utilize caching in remote storage environments, including various cloud storage options, or alternatively, in local storage." keywords = ["ccache"] license = "Apache-2.0" readme = "README.md" repository = "https://github.com/mozilla/sccache/" [[bin]] name = "sccache" [[bin]] name = "sccache-dist" required-features = ["dist-server"] [profile.release] codegen-units = 1 lto = true strip = true [dependencies] anyhow = { version = "1.0", features = ["backtrace"] } ar = "0.9" async-trait = "0.1" base64 = "0.21" bincode = "1" blake3 = "1" byteorder = "1.5" bytes = "1" chrono = "0.4" clap = { version = "4.4.18", features = ["derive", "env", "wrap_help"] } directories = "5.0.1" encoding_rs = "0.8" env_logger = "0.10" filetime = "0.2" flate2 = { version = "1.0", optional = true, default-features = false, features = [ "rust_backend", ] } fs-err = "2.11" futures = "0.3" gzp = { version = "0.11.3", default-features = false, features = [ "deflate_rust", ] } http = "1.0" http-body-util = { version = "0.1", optional = true } hyper = { version = "1.1", optional = true, features = ["server", "http1"] } hyper-util = { version = "0.1.3", optional = true, features = [ "tokio", "server", ] } itertools = "0.12" jobserver = "0.1" jwt = { package = "jsonwebtoken", version = "9", optional = true } libc = "0.2.153" linked-hash-map = "0.5" log = "0.4" memchr = "2" memmap2 = "0.9.4" mime = "0.3" num_cpus = "1.16" number_prefix = "0.4" object = "0.32" once_cell = "1.19" opendal = { version = "0.52.0", optional = true, default-features = false } openssl = { version = "0.10.64", optional = true } rand = "0.8.4" regex = "1.10.3" reqsign = { version = "0.16.0", optional = true } reqwest = { version = "0.12", features = [ "json", "blocking", "stream", "rustls-tls", "rustls-tls-native-roots", "trust-dns", ], optional = true } retry = "2" semver = "1.0" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" sha2 = { version = "0.10.8", optional = true } shlex = "1.3.0" strip-ansi-escapes = "0.2" tar = "0.4.40" tempfile = "3" tokio = { version = "1", features = [ "rt-multi-thread", "io-util", "time", "net", "process", "macros", ] } tokio-serde = "0.8" tokio-util = { version = "0.7", features = ["codec", "io"] } toml = "0.8" tower = "0.4" url = { version = "2", optional = true } uuid = { version = "1.9", features = ["v4"] } walkdir = "2" # by default which pulls in an outdated failure version which = { version = "6", default-features = false } zip = { version = "0.6", default-features = false } zstd = "0.13" # dist-server only nix = { version = "0.28.0", optional = true, features = [ "mount", "user", "sched", "signal", "process", ] } rouille = { version = "3.6", optional = true, default-features = false, features = [ "ssl", ] } syslog = { version = "6", optional = true } version-compare = { version = "0.1.1", optional = true } [dev-dependencies] assert_cmd = "2.0.13" cc = "1.0" chrono = "0.4.33" filetime = "0.2" itertools = "0.12" predicates = "=3.1.0" serial_test = "3.1" temp-env = "0.3.6" test-case = "3.3.1" thirtyfour_sync = "0.27" [target.'cfg(unix)'.dependencies] daemonize = "0.5" [target.'cfg(not(target_os = "freebsd"))'.dependencies.libmount] optional = true version = "0.1.15" [target.'cfg(windows)'.dependencies.windows-sys] features = [ "Win32_Foundation", "Win32_Globalization", "Win32_Storage_FileSystem", "Win32_System_Threading", "Win32_System_Console", ] version = "0.52" [features] all = [ "dist-client", "redis", "s3", "memcached", "gcs", "azure", "gha", "webdav", "oss", ] azure = ["opendal/services-azblob", "reqsign", "reqwest"] default = ["all"] gcs = ["opendal/services-gcs", "reqsign", "url", "reqwest"] gha = ["opendal/services-ghac", "reqwest"] memcached = ["opendal/services-memcached"] native-zlib = [] oss = ["opendal/services-oss", "reqsign", "reqwest"] redis = ["url", "opendal/services-redis"] s3 = ["opendal/services-s3", "reqsign", "reqwest"] webdav = ["opendal/services-webdav", "reqwest"] # Enable features that will build a vendored version of openssl and # statically linked with it, instead of linking against the system-wide openssl # dynamically or statically. vendored-openssl = ["openssl?/vendored", "reqwest?/native-tls-vendored"] # Enable features that require unstable features of Nightly Rust. unstable = [] # Enables distributed support in the sccache client dist-client = [ "flate2", "hyper", "http-body-util", "hyper-util", "reqwest", "url", "sha2", ] # Enables the sccache-dist binary dist-server = [ "jwt", "flate2", "libmount", "nix", "openssl", "reqwest", "rouille", "syslog", "version-compare", ] # Enables dist tests with external requirements dist-tests = ["dist-client", "dist-server"] [workspace] exclude = ["tests/test-crate"] mozilla-sccache-40c3d6b/LICENSE000066400000000000000000000261361475712407500162100ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. mozilla-sccache-40c3d6b/README.md000066400000000000000000000356441475712407500164660ustar00rootroot00000000000000[![Build Status](https://github.com/mozilla/sccache/workflows/ci/badge.svg)](https://github.com/mozilla/sccache/actions?query=workflow%3Aci) [![Crates.io](https://img.shields.io/crates/v/sccache.svg)](https://crates.io/crates/sccache) [![Matrix](https://img.shields.io/matrix/sccache:mozilla.org)](https://chat.mozilla.org/#/room/#sccache:mozilla.org) ![Crates.io](https://img.shields.io/crates/l/sccache) [![dependency status](https://deps.rs/repo/github/mozilla/sccache/status.svg)](https://deps.rs/repo/github/mozilla/sccache) [![CodeCov](https://codecov.io/gh/mozilla/sccache/branch/master/graph/badge.svg)](https://codecov.io/gh/mozilla/sccache) sccache - Shared Compilation Cache ================================== sccache is a [ccache](https://ccache.dev/)-like compiler caching tool. It is used as a compiler wrapper and avoids compilation when possible, storing cached results either on [local disk](docs/Local.md) or in one of [several cloud storage backends](#storage-options). sccache includes support for caching the compilation of C/C++ code, [Rust](docs/Rust.md), as well as NVIDIA's CUDA using [nvcc](https://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html), and [clang](https://llvm.org/docs/CompileCudaWithLLVM.html). sccache also provides [icecream](https://github.com/icecc/icecream)-style distributed compilation (automatic packaging of local toolchains) for all supported compilers (including Rust). The distributed compilation system includes several security features that icecream lacks such as authentication, transport layer encryption, and sandboxed compiler execution on build servers. See [the distributed quickstart](docs/DistributedQuickstart.md) guide for more information. sccache is also available as a [GitHub Actions](https://github.com/marketplace/actions/sccache-action) to facilitate the deployment using GitHub Actions cache. --- Table of Contents (ToC) ======================= * [Installation](#installation) * [Usage](#usage) * [Build Requirements](#build-requirements) * [Build](#build) * [Separating caches between invocations](#separating-caches-between-invocations) * [Overwriting the cache](#overwriting-the-cache) * [Debugging](#debugging) * [Interaction with GNU `make` jobserver](#interaction-with-gnu-make-jobserver) * [Known Caveats](#known-caveats) * [Storage Options](#storage-options) * [Local](docs/Local.md) * [S3](docs/S3.md) * [R2](docs/S3.md#R2) * [Redis](docs/Redis.md) * [Memcached](docs/Memcached.md) * [Google Cloud Storage](docs/Gcs.md) * [Azure](docs/Azure.md) * [GitHub Actions](docs/GHA.md) * [WebDAV (Ccache/Bazel/Gradle compatible)](docs/Webdav.md) * [Alibaba OSS](docs/OSS.md) --- ## Installation There are prebuilt x86-64 binaries available for Windows, Linux (a portable binary compiled against musl), and macOS [on the releases page](https://github.com/mozilla/sccache/releases/latest). Several package managers also include sccache packages, you can install the latest release from source using cargo, or build directly from a source checkout. ### macOS On macOS sccache can be installed via [Homebrew](https://brew.sh/): ```bash brew install sccache ``` or via [MacPorts](https://www.macports.org/): ```bash sudo port install sccache ``` ### Windows On Windows, sccache can be installed via [scoop](https://scoop.sh/): ``` scoop install sccache ``` ### Via cargo If you have a Rust toolchain installed you can install sccache using cargo. **Note that this will compile sccache from source which is fairly resource-intensive. For CI purposes you should use prebuilt binary packages.** ```bash cargo install sccache --locked ``` --- Usage ----- Running sccache is like running ccache: prefix your compilation commands with it, like so: ```bash sccache gcc -o foo.o -c foo.c ``` If you want to use sccache for caching Rust builds you can define `build.rustc-wrapper` in the [cargo configuration file](https://doc.rust-lang.org/cargo/reference/config.html). For example, you can set it globally in `$HOME/.cargo/config.toml` by adding: ```toml [build] rustc-wrapper = "/path/to/sccache" ``` Note that you need to use cargo 1.40 or newer for this to work. Alternatively you can use the environment variable `RUSTC_WRAPPER`: ```bash export RUSTC_WRAPPER=/path/to/sccache cargo build ``` sccache supports gcc, clang, MSVC, rustc, [NVCC](https://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html), [NVC++](https://docs.nvidia.com/hpc-sdk//compilers/hpc-compilers-user-guide/index.html), and [Wind River's diab compiler](https://www.windriver.com/products/development-tools/#diab_compiler). Both gcc and msvc support Response Files, read more about their implementation [here](docs/ResponseFiles.md). If you don't [specify otherwise](#storage-options), sccache will use a local disk cache. sccache works using a client-server model, where the server runs locally on the same machine as the client. The client-server model allows the server to be more efficient by keeping some state in memory. The sccache command will spawn a server process if one is not already running, or you can run `sccache --start-server` to start the background server process without performing any compilation. By default sccache server will listen on `127.0.0.1:4226`, you can specify environment variable `SCCACHE_SERVER_PORT` to use a different port or `SCCACHE_SERVER_UDS` to listen on unix domain socket. Abstract unix socket is also supported as long as the path is escaped following the [format](https://doc.rust-lang.org/std/ascii/fn.escape_default.html). For example: ``` % env SCCACHE_SERVER_UDS=$HOME/sccache.sock sccache --start-server # unix socket % env SCCACHE_SERVER_UDS=\\x00sccache.sock sccache --start-server # abstract unix socket ``` You can run `sccache --stop-server` to terminate the server. It will also terminate after (by default) 10 minutes of inactivity. Running `sccache --show-stats` will print a summary of cache statistics. Some notes about using `sccache` with [Jenkins](https://jenkins.io) are [here](docs/Jenkins.md). To use sccache with cmake, provide the following command line arguments to cmake 3.4 or newer: ``` -DCMAKE_C_COMPILER_LAUNCHER=sccache -DCMAKE_CXX_COMPILER_LAUNCHER=sccache ``` The process for using sccache with MSVC and cmake, depends on which version of cmake you're using. **For versions of cmake 3.24 and earlier**, to generate PDB files for debugging with MSVC, you can use the [`/Z7` option](https://docs.microsoft.com/en-us/cpp/build/reference/z7-zi-zi-debug-information-format?view=msvc-160). Alternatively, the `/Zi` option together with `/Fd` can work if `/Fd` names a different PDB file name for each object file created. Note that CMake sets `/Zi` by default, so if you use CMake, you can use `/Z7` by adding code like this in your CMakeLists.txt: ```cmake if(CMAKE_BUILD_TYPE STREQUAL "Debug") string(REPLACE "/Zi" "/Z7" CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG}") string(REPLACE "/Zi" "/Z7" CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG}") elseif(CMAKE_BUILD_TYPE STREQUAL "Release") string(REPLACE "/Zi" "/Z7" CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE}") string(REPLACE "/Zi" "/Z7" CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE}") elseif(CMAKE_BUILD_TYPE STREQUAL "RelWithDebInfo") string(REPLACE "/Zi" "/Z7" CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO}") string(REPLACE "/Zi" "/Z7" CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO}") endif() ``` By default, sccache will fail your build if it fails to successfully communicate with its associated server. To have sccache instead gracefully failover to the local compiler without stopping, set the environment variable `SCCACHE_IGNORE_SERVER_IO_ERROR=1`. **For versions of cmake 3.25 and later**, to compile with MSVC, you have to use the new `CMAKE_MSVC_DEBUG_INFORMATION_FORMAT` option, meant to configure the `-Z7` flag. Additionally, you must set the cmake policy number 0141 to the NEW setting: ```cmake set(CMAKE_MSVC_DEBUG_INFORMATION_FORMAT Embedded) cmake_policy(SET CMP0141 NEW) ``` Example configuration where we automatically look for `sccache` in the `PATH`: ```cmake find_program(SCCACHE sccache REQUIRED) set(CMAKE_C_COMPILER_LAUNCHER ${SCCACHE}) set(CMAKE_CXX_COMPILER_LAUNCHER ${SCCACHE}) set(CMAKE_MSVC_DEBUG_INFORMATION_FORMAT Embedded) cmake_policy(SET CMP0141 NEW) ``` Alternatively, if configuring cmake with MSVC on the command line, assuming that sccache is on the default search path: ``` cmake -DCMAKE_C_COMPILER_LAUNCHER=sccache -DCMAKE_CXX_COMPILER_LAUNCHER=sccache -DCMAKE_MSVC_DEBUG_INFORMATION_FORMAT=Embedded -DCMAKE_POLICY_CMP0141=NEW [...] ``` And you can build code as usual without any additional flags in the command line, which is useful for IDEs. --- Build Requirements ------------------ sccache is a [Rust](https://www.rust-lang.org/) program. Building it requires `cargo` (and thus`rustc`). sccache currently requires **Rust 1.75.0**. We recommend you install Rust via [Rustup](https://rustup.rs/). Build ----- If you are building sccache for non-development purposes make sure you use `cargo build --release` to get optimized binaries: ```bash cargo build --release [--no-default-features --features=s3|redis|gcs|memcached|azure|gha|webdav|oss] ``` The list of features can be found in the `Cargo.toml` file, `[features]` section. By default, `sccache` builds with support for all storage backends, but individual backends may be disabled by resetting the list of features and enabling all the other backends. Refer the [Cargo Documentation](http://doc.crates.io/manifest.html#the-features-section) for details on how to select features with Cargo. ### Building portable binaries When building with the `dist-server` feature, `sccache` will depend on OpenSSL, which can be an annoyance if you want to distribute portable binaries. It is possible to statically link against OpenSSL using the `openssl/vendored` feature. #### Linux Build with `cargo` and use `ldd` to check that the resulting binary does not depend on OpenSSL anymore. #### macOS Build with `cargo` and use `otool -L` to check that the resulting binary does not depend on OpenSSL anymore. #### Windows On Windows, the binary might also depend on a few MSVC CRT DLLs that are not available on older Windows versions. It is possible to statically link against the CRT using a `.cargo/config.toml` file with the following contents. ```toml [target.x86_64-pc-windows-msvc] rustflags = ["-Ctarget-feature=+crt-static"] ``` Build with `cargo` and use `dumpbin /dependents` to check that the resulting binary does not depend on MSVC CRT DLLs anymore. When statically linking with OpenSSL, you will need Perl available in your `$PATH`. --- Separating caches between invocations ------------------------------------- In situations where several different compilation invocations should not reuse the cached results from each other, one can set `SCCACHE_C_CUSTOM_CACHE_BUSTER` to a unique value that'll be mixed into the hash. `MACOSX_DEPLOYMENT_TARGET` and `IPHONEOS_DEPLOYMENT_TARGET` variables already exhibit such reuse-suppression behaviour. There are currently no such variables for compiling Rust. --- Overwriting the cache --------------------- In situations where the cache contains broken build artifacts, it can be necessary to overwrite the contents in the cache. That can be achieved by setting the `SCCACHE_RECACHE` environment variable. --- Debugging --------- You can set the `SCCACHE_ERROR_LOG` environment variable to a path and set `SCCACHE_LOG` to get the server process to redirect its logging there (including the output of unhandled panics, since the server sets `RUST_BACKTRACE=1` internally). SCCACHE_ERROR_LOG=/tmp/sccache_log.txt SCCACHE_LOG=debug sccache You can also set these environment variables for your build system, for example SCCACHE_ERROR_LOG=/tmp/sccache_log.txt SCCACHE_LOG=debug cmake --build /path/to/cmake/build/directory Alternatively, if you are compiling locally, you can run the server manually in foreground mode by running `SCCACHE_START_SERVER=1 SCCACHE_NO_DAEMON=1 sccache`, and send logging to stderr by setting the [`SCCACHE_LOG` environment variable](https://docs.rs/env_logger/0.7.1/env_logger/#enabling-logging) for example. This method is not suitable for CI services because you need to compile in another shell at the same time. SCCACHE_LOG=debug SCCACHE_START_SERVER=1 SCCACHE_NO_DAEMON=1 sccache --- Interaction with GNU `make` jobserver ------------------------------------- sccache provides support for a [GNU make jobserver](https://www.gnu.org/software/make/manual/html_node/Job-Slots.html). When the server is started from a process that provides a jobserver, sccache will use that jobserver and provide it to any processes it spawns. (If you are running sccache from a GNU make recipe, you will need to prefix the command with `+` to get this behavior.) If the sccache server is started without a jobserver present it will create its own with the number of slots equal to the number of available CPU cores. This is most useful when using sccache for Rust compilation, as rustc supports using a jobserver for parallel codegen, so this ensures that rustc will not overwhelm the system with codegen tasks. Cargo implements its own jobserver ([see the information on `NUM_JOBS` in the cargo documentation](https://doc.rust-lang.org/stable/cargo/reference/environment-variables.html#environment-variables-cargo-sets-for-build-scripts)) for rustc to use, so using sccache for Rust compilation in cargo via `RUSTC_WRAPPER` should do the right thing automatically. --- Known Caveats ------------- ### General * Absolute paths to files must match to get a cache hit. This means that even if you are using a shared cache, everyone will have to build at the same absolute path (i.e. not in `$HOME`) in order to benefit each other. In Rust this includes the source for third party crates which are stored in `$HOME/.cargo/registry/cache` by default. ### Rust * Crates that invoke the system linker cannot be cached. This includes `bin`, `dylib`, `cdylib`, and `proc-macro` crates. You may be able to improve compilation time of large `bin` crates by converting them to a `lib` crate with a thin `bin` wrapper. * Incrementally compiled crates cannot be cached. By default, in the debug profile Cargo will use incremental compilation for workspace members and path dependencies. [You can disable incremental compilation.](https://doc.rust-lang.org/cargo/reference/profiles.html#incremental) [More details on Rust caveats](/docs/Rust.md) ### Symbolic links * Symbolic links to sccache won't work. Use hardlinks: `ln sccache /usr/local/bin/cc` ### User Agent * Requests sent to your storage option of choice will have a user agent header indicating the current sccache version, e.g. `sccache/0.8.2`. Storage Options --------------- * [Local](docs/Local.md) * [S3](docs/S3.md) * [R2](docs/S3.md#R2) * [Redis](docs/Redis.md) * [Memcached](docs/Memcached.md) * [Google Cloud Storage](docs/Gcs.md) * [Azure](docs/Azure.md) * [GitHub Actions](docs/GHA.md) * [WebDAV (Ccache/Bazel/Gradle compatible)](docs/Webdav.md) * [Alibaba OSS](docs/OSS.md) mozilla-sccache-40c3d6b/docs/000077500000000000000000000000001475712407500161235ustar00rootroot00000000000000mozilla-sccache-40c3d6b/docs/Architecture.md000066400000000000000000000010341475712407500210650ustar00rootroot00000000000000# Sccache high level architecture This schema shows at high level how sccache works. ```mermaid flowchart LR id1[[Environment variables]] --> hash id2[[Compiler binary]] --> hash id3[[Compiler arguments]] --> hash id5[[Files]] --> | | hash Compile --> Upload Storage[(Storage)] --> | yes | Download hash([hash]) --> | exists? | Storage Storage --> | no | Compile Upload --> Storage ``` For more details about hash generation works, see [the caching documentation](Caching.md). mozilla-sccache-40c3d6b/docs/Azure.md000066400000000000000000000014271475712407500175370ustar00rootroot00000000000000# Azure To use Azure Blob Storage, you'll need your Azure connection string and an _existing_ Blob Storage container name. Set the `SCCACHE_AZURE_CONNECTION_STRING` environment variable to your connection string, and `SCCACHE_AZURE_BLOB_CONTAINER` to the name of the container to use. Note that sccache will not create the container for you - you'll need to do that yourself. You can also define a prefix that will be prepended to the keys of all cache objects created and read within the container, effectively creating a scope. To do that use the `SCCACHE_AZURE_KEY_PREFIX` environment variable. This can be useful when sharing a bucket with another application. **Important:** The environment variables are only taken into account when the server starts, i.e. only on the first run. mozilla-sccache-40c3d6b/docs/Caching.md000066400000000000000000000043741475712407500200110ustar00rootroot00000000000000# How caching works To know if the storage contains the artifact we need, we are computing some hashes to make sure the input is the same. Because the configuration and environment matter, the hash computation takes a few parameters into account. ## How hash keys are computed. ### Rust We generate a blake3 digest for each file compiled. In parallel, we also take into account in the hash: * Path to the rustc executable * Host triple for this rustc * Path to the rustc sysroot * digests of all the shared libraries in rustc's $sysroot/lib * A shared, caching reader for rlib dependencies (for dist-client) * Parsed arguments from the rustc invocation See https://github.com/mozilla/sccache/blob/8567bbe2ba493153e76177c1f9a6f98cc7ba419f/src/compiler/rust.rs#L122 for the full list ### C/C++ compiler For C/C++, the hash is generated with a blake3 digest of the preprocessed file (-E with gcc/clang). For compilations that specify multiple `-arch` flags, these flags are rewritten to their corresponding preprocessor defines to allow pre-processing the file (e.g `-arch x86_64` is rewritten to `-D__X86_64__=1`), this can be enabled by setting the environment variable `SCCACHE_CACHE_MULTIARCH` but is disabled by default as it may not work in all cases. We also take into account in the hash: * Hash of the compiler binary * Programming language * Flag required to compile for the given language * File in which to generate dependencies. * Commandline arguments for dependency generation * Commandline arguments for the preprocessor * Commandline arguments specifying the architecture to compile for * Extra files that need to have their contents hashed * Whether the compilation is generating profiling or coverage data * Color mode * Environment variables See https://github.com/mozilla/sccache/blob/8567bbe2ba493153e76177c1f9a6f98cc7ba419f/src/compiler/c.rs#L84 ### C/C++ preprocessor In "preprocessor cache mode", [explained in the local doc](Local.md), an extra key is computed to cache the preprocessor output itself. It is very close to the C/C++ compiler one, but with additional elements: * The path of the input file * The hash of the input file Note that some compiler options can disable preprocessor cache mode. As of this writing, only `-Xpreprocessor` and `-Wp,*` do. mozilla-sccache-40c3d6b/docs/Configuration.md000066400000000000000000000177711475712407500212710ustar00rootroot00000000000000# Available Configuration Options ## file ```toml # If specified, wait this long for the server to start up. server_startup_timeout_ms = 10000 [dist] # where to find the scheduler scheduler_url = "http://1.2.3.4:10600" # a set of prepackaged toolchains toolchains = [] # the maximum size of the toolchain cache in bytes toolchain_cache_size = 5368709120 cache_dir = "/home/user/.cache/sccache-dist-client" [dist.auth] type = "token" token = "secrettoken" #[cache.azure] # does not work as it appears [cache.disk] dir = "/tmp/.cache/sccache" size = 7516192768 # 7 GiBytes # See the local docs on more explanations about this mode [cache.disk.preprocessor_cache_mode] # Whether to use the preprocessor cache mode use_preprocessor_cache_mode = true # Whether to use file times to check for changes file_stat_matches = true # Whether to also use ctime (file status change) time to check for changes use_ctime_for_stat = true # Whether to ignore `__TIME__` when caching ignore_time_macros = false # Whether to skip (meaning not cache, only hash) system headers skip_system_headers = false # Whether hash the current working directory hash_working_directory = true [cache.gcs] # optional oauth url oauth_url = "..." # optional deprecated url deprecated_url = "..." rw_mode = "READ_ONLY" # rw_mode = "READ_WRITE" cred_path = "/psst/secret/cred" bucket = "bucket" key_prefix = "prefix" [cache.gha] url = "http://localhost" token = "secret" cache_to = "sccache-latest" cache_from = "sccache-" [cache.memcached] # Deprecated alias for `endpoint` # url = "127.0.0.1:11211" endpoint = "tcp://127.0.0.1:11211" # Username and password for authentication username = "user" password = "passwd" # Entry expiration time in seconds. Default is 86400 (24 hours) expiration = 3600 key_prefix = "/custom/prefix/if/need" [cache.redis] # Deprecated, use `endpoint` instead url = "redis://user:passwd@1.2.3.4:6379/?db=1" ## Refer to the `opendal` documentation for more information about Redis endpoint # Single-node endpoint. Mutually exclusive with `cluster_endpoints` endpoint = "redis://127.0.0.1:6379" # Multiple-node list of endpoints (cluster mode). Mutually exclusive with `endpoint` cluster_endpoints = "redis://10.0.0.1:6379,redis://10.0.0.2:6379" username = "user" password = "passwd" # Database number to use. Default is 0 db = 1 # Entry expiration time in seconds. Default is 0 (never expire) expiration = 3600 key_prefix = "/custom/prefix/if/need" [cache.s3] bucket = "name" endpoint = "s3-us-east-1.amazonaws.com" use_ssl = true key_prefix = "s3prefix" server_side_encryption = false [cache.webdav] endpoint = "http://192.168.10.42:80/some/webdav.php" key_prefix = "/custom/webdav/subfolder/if/need" # Basic HTTP authentication credentials. username = "alice" password = "secret12" # Mutually exclusive with username & password. Bearer token value token = "token123" [cache.oss] bucket = "name" endpoint = "oss-us-east-1.aliyuncs.com" key_prefix = "ossprefix" no_credentials = true ``` sccache looks for its configuration file at the path indicated by env variable `SCCACHE_CONF`. If no such env variable is set, sccache looks at default locations as below: - Linux: `~/.config/sccache/config` - macOS: `~/Library/Application Support/Mozilla.sccache/config` - Windows: `%APPDATA%\Mozilla\sccache\config\config` The latest `cache.XXX` entries may be found here: https://github.com/mozilla/sccache/blob/ffe3070f77ef3301c8ff718316e4ab017ec83042/src/config.rs#L300. ## env Whatever is set by a file based configuration, it is overruled by the env configuration variables ### misc * `SCCACHE_ALLOW_CORE_DUMPS` to enable core dumps by the server * `SCCACHE_CONF` configuration file path * `SCCACHE_CACHED_CONF` * `SCCACHE_IDLE_TIMEOUT` how long the local daemon process waits for more client requests before exiting, in seconds. Set to `0` to run sccache permanently * `SCCACHE_STARTUP_NOTIFY` specify a path to a socket which will be used for server completion notification * `SCCACHE_MAX_FRAME_LENGTH` how much data can be transferred between client and server * `SCCACHE_NO_DAEMON` set to `1` to disable putting the server to the background * `SCCACHE_CACHE_MULTIARCH` to disable caching of multi architecture builds. * `SCCACHE_CACHE_ZSTD_LEVEL` to set zstd compression level of cache. the range is `1-22` and default is `3`. - For example, in `10`, it have about 0.9x size with about 1.6x time than default `3` (tested with compiling sccache code) - This option will only applied to newly compressed cache and don't affect existing cache. - If you want to be apply to all cache, you should reset cache and make new cache. ### cache configs #### disk (local) * `SCCACHE_DIR` local on disk artifact cache directory * `SCCACHE_CACHE_SIZE` maximum size of the local on disk cache i.e. `2G` - default is 10G * `SCCACHE_DIRECT` enable/disable preprocessor caching (see [the local doc](Local.md)) * `SCCACHE_LOCAL_RW_MODE` the mode that the cache will operate in (`READ_ONLY` or `READ_WRITE`) #### s3 compatible * `SCCACHE_BUCKET` s3 bucket to be used * `SCCACHE_ENDPOINT` s3 endpoint * `SCCACHE_REGION` s3 region, required if using AWS S3 * `SCCACHE_S3_USE_SSL` s3 endpoint requires TLS, set this to `true` * `SCCACHE_S3_KEY_PREFIX` s3 key prefix (optional) The endpoint used then becomes `${SCCACHE_BUCKET}.s3-{SCCACHE_REGION}.amazonaws.com`. If you are not using the default endpoint and `SCCACHE_REGION` is undefined, it will default to `us-east-1`. #### cloudflare r2 * `SCCACHE_BUCKET` is the name of your R2 bucket. * `SCCACHE_ENDPOINT` must follow the format of `https://.r2.cloudflarestorage.com`. Note that the `https://` must be included. Your account ID can be found [here](https://developers.cloudflare.com/fundamentals/get-started/basic-tasks/find-account-and-zone-ids/). * `SCCACHE_REGION` should be set to `auto`. * `SCCACHE_S3_KEY_PREFIX` s3 key prefix (optional). #### redis * `SCCACHE_REDIS` full redis url, including auth and access token/passwd (deprecated). * `SCCACHE_REDIS_ENDPOINT` redis url without auth and access token/passwd - single node configuration. * `SCCACHE_REDIS_CLUSTER_ENDPOINTS` redis cluster urls, separated by comma - shared cluster configuration. * `SCCACHE_REDIS_USERNAME` redis username (optional). * `SCCACHE_REDIS_PASSWORD` redis password (optional). * `SCCACHE_REDIS_DB` redis database (optional, default is 0). * `SCCACHE_REDIS_EXPIRATION` / `SCCACHE_REDIS_TTL` ttl for redis cache, don't set for default behavior. * `SCCACHE_REDIS_KEY_PREFIX` key prefix (optional). The full url appears then as `redis://user:passwd@1.2.3.4:6379/?db=1`. #### memcached * `SCCACHE_MEMCACHED` is a deprecated alias for `SCCACHE_MEMCACHED_ENDPOINT`. * `SCCACHE_MEMCACHED_ENDPOINT` memcached url. * `SCCACHE_MEMCACHED_USERNAME` memcached username (optional). * `SCCACHE_MEMCACHED_PASSWORD` memcached password (optional). * `SCCACHE_MEMCACHED_EXPIRATION` ttl for memcached cache, don't set for default behavior. * `SCCACHE_MEMCACHED_KEY_PREFIX` key prefix (optional). #### gcs * `SCCACHE_GCS_BUCKET` * `SCCACHE_GCS_CREDENTIALS_URL` * `SCCACHE_GCS_KEY_PATH` * `SCCACHE_GCS_RW_MODE` #### azure * `SCCACHE_AZURE_CONNECTION_STRING` #### gha * `SCCACHE_GHA_CACHE_URL` / `ACTIONS_CACHE_URL` GitHub Actions cache API URL * `SCCACHE_GHA_RUNTIME_TOKEN` / `ACTIONS_RUNTIME_TOKEN` GitHub Actions access token * `SCCACHE_GHA_CACHE_TO` cache key to write * `SCCACHE_GHA_CACHE_FROM` comma separated list of cache keys to read from #### webdav * `SCCACHE_WEBDAV_ENDPOINT` a webdav service endpoint to store cache, such as `http://127.0.0.1:8080/my/webdav.php`. * `SCCACHE_WEBDAV_KEY_PREFIX` specify the key prefix (subfolder) of cache (optional). * `SCCACHE_WEBDAV_USERNAME` a username to authenticate with webdav service (optional). * `SCCACHE_WEBDAV_PASSWORD` a password to authenticate with webdav service (optional). * `SCCACHE_WEBDAV_TOKEN` a token to authenticate with webdav service (optional) - may be used instead of login & password. #### OSS * `SCCACHE_OSS_BUCKET` * `SCCACHE_OSS_ENDPOINT` * `SCCACHE_OSS_KEY_PREFIX` * `ALIBABA_CLOUD_ACCESS_KEY_ID` * `ALIBABA_CLOUD_ACCESS_KEY_SECRET` * `SCCACHE_OSS_NO_CREDENTIALS` mozilla-sccache-40c3d6b/docs/Distributed.md000066400000000000000000000331221475712407500207300ustar00rootroot00000000000000# Distributed sccache Background: - You should read about JSON Web Tokens - https://jwt.io/. - HS256 in short: you can sign a piece of (typically unencrypted) data with a key. Verification involves signing the data again with the same key and comparing the result. As a result, if you want two parties to verify each others messages, the key must be shared beforehand. - Secure token's referenced below should be generated with a CSPRNG (your OS random number generator should suffice). For example, on Linux this is accessible with: `openssl rand -hex 64`. - When relying on random number generators (for generating keys or tokens), be aware that a lack of entropy is possible in cloud or virtualized environments in some scenarios. ## Overview Distributed sccache consists of three parts: - the client, an sccache binary that wishes to perform a compilation on remote machines - the scheduler (`sccache-dist` binary), responsible for deciding where a compilation job should run - the server (`sccache-dist` binary), responsible for actually executing a build All servers are required to be a 64-bit Linux or a FreeBSD install. Clients may request compilation from Linux, Windows or macOS. Linux compilations will attempt to automatically package the compiler in use, while Windows and macOS users will need to specify a toolchain for cross-compilation ahead of time. ## Communication The HTTP implementation of sccache has the following API, where all HTTP body content is encoded using [`bincode`](http://docs.rs/bincode): - scheduler - `POST /api/v1/scheduler/alloc_job` - Called by a client to submit a compilation request. - Returns information on where the job is allocated it should run. - `GET /api/v1/scheduler/server_certificate` - Called by a client to retrieve the (dynamically created) HTTPS certificate for a server, for use in communication with that server. - Returns a digest and PEM for the temporary server HTTPS certificate. - `POST /api/v1/scheduler/heartbeat_server` - Called (repeatedly) by servers to register as available for jobs. - `POST /api/v1/scheduler/job_state` - Called by servers to inform the scheduler of the state of the job. - `GET /api/v1/scheduler/status` - Returns information about the scheduler. - `server` - `POST /api/v1/distserver/assign_job` - Called by the scheduler to inform of a new job being assigned to this server. - Returns whether the toolchain is already on the server or needs submitting. - `POST /api/v1/distserver/submit_toolchain` - Called by the client to submit a toolchain. - `POST /api/v1/distserver/run_job` - Called by the client to run a job. - Returns the compilation stdout along with files created. There are three axes of security in this setup: 1. Can the scheduler trust the servers? 2. Is the client permitted to submit and run jobs? 3. Can third parties see and/or modify traffic? ### Server Trust If a server is malicious, they can return malicious compilation output to a user. To protect against this, servers must be authenticated to the scheduler. You have three means for doing this, and the scheduler and all servers must use the same mechanism. Once a server has registered itself using the selected authentication, the scheduler will trust the registered server address and use it for builds. #### JWT HS256 (preferred) This method uses secret key to create a per-IP-and-port token for each server. Acquiring a token will only allow participation as a server if the attacker can additionally impersonate the IP and port the token was generated for. You *must* keep the secret key safe. *To use it*: Create a scheduler key with `sccache-dist auth generate-jwt-hs256-key` (which will use your OS random number generator) and put it in your scheduler config file as follows: ``` server_auth = { type = "jwt_hs256", secret_key = "YOUR_KEY_HERE" } ``` Now generate a token for the server, giving the IP and port the scheduler and clients can connect to the server on (address `192.168.1.10:10501` here): ``` sccache-dist auth generate-jwt-hs256-server-token \ --secret-key YOUR_KEY_HERE \ --server 192.168.1.10:10501 ``` *or:* ``` sccache-dist auth generate-jwt-hs256-server-token \ --config /path/to/scheduler-config.toml \ --server 192.168.1.10:10501 ``` This will output a token (you can examine it with https://jwt.io if you're curious) that you should add to your server config file as follows: ``` scheduler_auth = { type = "jwt_token", token = "YOUR_TOKEN_HERE" } ``` Done! #### Token This method simply shares a token between the scheduler and all servers. A token leak from anywhere allows any attacker to participate as a server. *To use it*: Choose a 'secure token' you can share between your scheduler and all servers. Put the following in your scheduler config file: ``` server_auth = { type = "token", token = "YOUR_TOKEN_HERE" } ``` Put the following in your server config file: ``` scheduler_auth = { type = "token", token = "YOUR_TOKEN_HERE" } ``` Done! #### Insecure (bad idea) *This route is not recommended* This method uses a hardcoded token that effectively disables authentication and provides no security at all. *To use it*: Put the following in your scheduler config file: ``` server_auth = { type = "DANGEROUSLY_INSECURE" } ``` Put the following in your server config file: ``` scheduler_auth = { type = "DANGEROUSLY_INSECURE" } ``` Done! ### Client Trust If a client is malicious, they can cause a DoS of distributed sccache servers or explore ways to escape the build sandbox. To protect against this, clients must be authenticated. Each client will use an authentication token for the initial job allocation request to the scheduler. A successful allocation will return a job token that is used to authorise requests to the appropriate server for that specific job. This job token is a JWT HS256 token of the job id, signed with a server key. The key for each server is randomly generated on server startup and given to the scheduler during registration. This means that the server can verify users without either a) adding client authentication to every server or b) needing secret transfer between scheduler and server on every job allocation. #### OAuth2 This is a group of similar methods for achieving the same thing - the client retrieves a token from an OAuth2 service, and then submits it to the scheduler which has a few different options for performing validation on that token. *To use it*: Put one of the following settings in your scheduler config file to determine how the scheduler will validate tokens from the client: ``` # Use the known settings for Mozilla OAuth2 token validation client_auth = { type = "mozilla" } # Will forward the valid JWT token onto another URL in the `Bearer` header, with a # success response indicating the token is valid. Optional `cache_secs` how long # to cache successful authentication for. client_auth = { type = "proxy_token", url = "...", cache_secs = 60 } ``` Additionally, each client should set up an OAuth2 configuration in the with one of the following settings (as appropriate for your OAuth service): ``` # Use the known settings for Mozilla OAuth2 authentication auth = { type = "mozilla" } # Use the Authorization Code with PKCE flow. This requires a client id, # an initial authorize URL (which may have parameters like 'audience' depending # on your service) and the URL for retrieving a token after the browser flow. auth = { type = "oauth2_code_grant_pkce", client_id = "...", auth_url = "...", token_url = "..." } # Use the Implicit flow (typically not recommended due to security issues). This requires # a client id and an authorize URL (which may have parameters like 'audience' depending # on your service). auth = { type = "oauth2_implicit", client_id = "...", auth_url = "..." } ``` The client should then run `sccache --dist-auth` and follow the instructions to retrieve a token. This will be automatically cached locally for the token expiry period (manual revalidation will be necessary after expiry). #### Token This method simply shares a token between the scheduler and all clients. A token leak from anywhere allows any attacker to participate as a client. *To use it*: Choose a 'secure token' you can share between your scheduler and all clients. Put the following in your scheduler config file: ``` client_auth = { type = "token", token = "YOUR_TOKEN_HERE" } ``` Put the following in your client config file: ``` auth = { type = "token", token = "YOUR_TOKEN_HERE" } ``` Done! #### Insecure (bad idea) *This route is not recommended* This method uses a hardcoded token that effectively disables authentication and provides no security at all. *To use it*: Put the following in your scheduler config file: ``` client_auth = { type = "DANGEROUSLY_INSECURE" } ``` Remove any `auth =` setting under the `[dist]` heading in your client config file (it will default to this insecure mode). Done! ### Eavesdropping and Tampering Protection If third parties can see traffic to the servers, source code can be leaked. If third parties can modify traffic to and from the servers or the scheduler, they can cause the client to receive malicious compiled objects. Securing communication with the scheduler is the responsibility of the sccache cluster administrator - it is recommended to put a webserver with a HTTPS certificate in front of the scheduler and instruct clients to configure their `scheduler_url` with the appropriate `https://` address. The scheduler will verify the server's IP in this configuration by inspecting the `X-Real-IP` header's value, if present. The webserver used in this case should be configured to set this header to the appropriate value. Securing communication with the server is performed automatically - HTTPS certificates are generated dynamically on server startup and communicated to the scheduler during the heartbeat. If a client does not have the appropriate certificate for communicating securely with a server (after receiving a job allocation from the scheduler), the certificate will be requested from the scheduler. ## Configuration Use the `--config` argument to pass the path to its configuration file to `sccache-dist`. ### scheduler.toml ```toml # The socket address the scheduler will listen on. It's strongly recommended # to listen on localhost and put a HTTPS server in front of it. public_addr = "127.0.0.1:10600" [client_auth] type = "token" token = "my client token" [server_auth] type = "jwt_hs256" secret_key = "my secret key" ``` #### [client_auth] The `[client_auth]` section can be one of (sorted by authentication method): ```toml # OAuth2 [client_auth] type = "mozilla" client_auth = { type = "proxy_token", url = "...", cache_secs = 60 } # JWT [client_auth] type = "jwt_validate" audience = "audience" issuer = "issuer" jwks_url = "..." # Token [client_auth] type = "token" token = "preshared token" # None [client_auth] type = "DANGEROUSLY_INSECURE" ``` #### [server_auth] The `[server_auth]` section can be can be one of: ```toml [server_auth] type = "jwt_hs256" secret_key = "my secret key" [server_auth] type = "token" token = "preshared token" [server_auth] type = "DANGEROUSLY_INSECURE" ``` ### server.toml ```toml # This is where client toolchains will be stored. cache_dir = "/tmp/toolchains" # The maximum size of the toolchain cache, in bytes. # If unspecified the default is 10GB. #toolchain_cache_size = 10737418240 # A public IP address and port that clients will use to connect to this builder. public_addr = "192.168.1.1:10501" # The socket address the builder will listen on. Falls back to public_addr. #bind_address = "0.0.0.0:10501" # The URL used to connect to the scheduler (should use https, given an ideal # setup of a HTTPS server in front of the scheduler) scheduler_url = "https://192.168.1.1" [builder] type = "overlay" # The directory under which a sandboxed filesystem will be created for builds. build_dir = "/tmp/build" # The path to the bubblewrap version 0.3.0+ `bwrap` binary. bwrap_path = "/usr/bin/bwrap" [scheduler_auth] type = "jwt_token" # This will be generated by the `generate-jwt-hs256-server-token` command or # provided by an administrator of the sccache cluster. token = "my server's token" ``` #### [builder] The `[builder]` section can be can be one of: ```toml [builder] type = "docker" [builder] type = "overlay" # The directory under which a sandboxed filesystem will be created for builds. build_dir = "/tmp/build" # The path to the bubblewrap version 0.3.0+ `bwrap` binary. bwrap_path = "/usr/bin/bwrap" [builder] type = "pot" # Pot filesystem root #pot_fs_root = "/opt/pot" # Reference pot cloned when creating containers #clone_from = "sccache-template" # Command to invoke when calling pot #pot_cmd = "pot" # Arguments passed to `pot clone` command #pot_clone_args = ["-i", "lo0|127.0.0.2"] ``` #### [scheduler_auth] The `[scheduler_auth]` section can be can be one of: ```toml [scheduler_auth] type = "jwt_token" token = "my server's token" [scheduler_auth] type = "token" token = "preshared token" [scheduler_auth] type = "DANGEROUSLY_INSECURE" ``` # Building the Distributed Server Binaries Until these binaries [are included in releases](https://github.com/mozilla/sccache/issues/393) I've put together a Docker container that can be used to easily build a release binary: ``` docker run -ti --rm -v $PWD:/sccache luser/sccache-musl-build:0.1 /bin/bash -c "cd /sccache; cargo build --release --target x86_64-unknown-linux-musl --features=dist-server && strip target/x86_64-unknown-linux-musl/release/sccache-dist && cd target/x86_64-unknown-linux-musl/release/ && tar czf sccache-dist.tar.gz sccache-dist" ``` mozilla-sccache-40c3d6b/docs/DistributedFreeBSD.md000066400000000000000000000077171475712407500220760ustar00rootroot00000000000000Distributed sccache on FreeBSD ============================== Please read the [the distributed quickstart](DistributedQuickstart.md) guide first. Build and install from source ----------------------------- ``` cargo install --features="dist-client,dist-server" --path=. ``` Configure a FreeBSD build server -------------------------------- On FreeBSD, the build server requires [pot](https://github.com/bsdpot/pot) to sandbox execution: ```sh pkg install pot ``` It's up to the user to create the reference pot that serves as a template to clone from when instantiating image and build containers, e.g.: ```sh pot create -p sccache-template -N alias -i "lo0|127.0.0.2" -t single -b 14.1 pot set-cmd -p sccache-template -c /usr/bin/true pot set-attr -p sccache-template -A no-rc-script -V YES pot snapshot -p sccache-template ``` Then, a server.conf like the one below is created, making use of the `pot` builder type (commented out options show defaults): ```toml # This is where client toolchains will be stored. cache_dir = "/tmp/toolchains" # The maximum size of the toolchain cache, in bytes. # If unspecified the default is 10GB. # toolchain_cache_size = 10737418240 # A public IP address and port that clients will use to connect to this builder. public_addr = "192.168.1.1:10501" # The URL used to connect to the scheduler (should use https, given an ideal # setup of a HTTPS server in front of the scheduler) scheduler_url = "https://192.168.1.1" [builder] type = "pot" # Pot filesystem root #pot_fs_root = "/opt/pot" # Reference pot cloned when creating containers #clone_from = "sccache-template" # Command to invoke when calling pot #pot_cmd = "pot" # Arguments passed to `pot clone` command #pot_clone_args = ["-i", "lo0|127.0.0.2"] [scheduler_auth] type = "jwt_token" # This will be generated by calling # `sccache-dist auth generate-jwt-hs256-server-token` or # provided by an administrator of the sccache cluster. token = "my server's token" ``` FreeBSD as a build client ------------------------- On a FreeBSD client, make sure to add the right toolchains to to `~/.config/sccache/config`: ```toml [dist] # The URL used to connect to the scheduler (should use https, given an ideal # setup of a HTTPS server in front of the scheduler) scheduler_url = "http://127.0.0.1:10600" # Used for mapping local toolchains to remote cross-compile toolchains. Empty in # this example where the client and build server are both Linux. #toolchains = [] # Size of the local toolchain cache, in bytes (5GB here, 10GB if unspecified). toolchain_cache_size = 5368709120 cache_dir = "/home/user/.cache/sccache-dist-client" [dist.auth] type = "token" # This should match the `client_auth` section of the scheduler config # and was generated by calling `sccache-dist auth generate-jwt-hs256-key` token = "my client token" [[dist.toolchains]] type = "path_override" compiler_executable = "/usr/bin/cc" archive = "/path/to/empty.tar.gz" archive_compiler_executable = "/usr/bin/cc" [[dist.toolchains]] type = "path_override" compiler_executable = "/usr/local/bin/rustc" archive = "/path/to/rust-toolchain.tgz" archive_compiler_executable = "/usr/local/bin/rustc" ``` Creating toolchain archives --------------------------- The toolchain files from the examples above can be created like this: ```sh pkg install gtar gtar cvf - --files-from /dev/null | gzip >empty.tar.gz pkg info -lq rust | gtar -cf - -T - | gzip >rust-toolchain.tgz ``` This just creates an empty file for the system compiler (as it is included in the pot image anyway) and the toolchain for rustc is created from the rust package installed on the system. See [the distributed quickstart](DistributedQuickstart.md) guide for instructions how to create other C toolchains using icecc-create-env. Note: We use `gtar` (GNU tar) here, as the [flate2]( https://github.com/rust-lang/flate2-rs) crate has issues processing sparse files created with `bsdtar`. Cargo invocation example ------------------------ ```sh RUSTC_WRAPPER=~/.cargo/bin/sccache \ cargo build ``` mozilla-sccache-40c3d6b/docs/DistributedQuickstart.md000066400000000000000000000257511475712407500230140ustar00rootroot00000000000000sccache distributed compilation quickstart ========================================== This is a quick start guide to getting distributed compilation working with sccache. This guide primarily covers Linux clients. macOS and Windows clients are supported but have seen significantly less testing. Get sccache binaries -------------------- Either [install pre-built sccache binaries](https://github.com/mozilla/sccache#installation), or build sccache locally with the `dist-client` and `dist-server` features enabled: ``` cargo build --release --features="dist-client dist-server" ``` The `target/release/sccache` binary will be used on the client, and the `target/release/sccache-dist` binary will be used on the scheduler and build server. If you're only planning to use the client, it is enabled by default, so just `cargo install sccache` should do the trick. Configure a scheduler --------------------- If you're adding a server to a cluster that has already been set up, skip ahead to [configuring a build server](#configure-a-build-server). The scheduler is a daemon that manages compile request from clients and parcels them out to build servers. You only need one of these per sccache setup. Currently only Linux is supported for running the scheduler. Create a scheduler.conf file to configure client/server authentication. A minimal example looks like: ```toml # The socket address the scheduler will listen on. It's strongly recommended # to listen on localhost and put a HTTPS server in front of it. public_addr = "127.0.0.1:10600" [client_auth] type = "token" token = "my client token" [server_auth] type = "jwt_hs256" secret_key = "my secret key" ``` Mozilla build servers will typically require clients to be authenticated with the [Mozilla identity system](https://github.com/mozilla-iam/mozilla-iam). To configure for scheduler for this, the `client_auth` section should be as follows so any client tokens are validated with the Mozilla service: ``` [client_auth] type = "mozilla" required_groups = ["group_name"] ``` Where `group_name` is a Mozilla LDAP group. Users will be required to belong to this group to successfully authenticate with the scheduler. Start the scheduler by running: ``` sccache-dist scheduler --config scheduler.conf ``` Like the local server, the scheduler process will daemonize itself unless `SCCACHE_NO_DAEMON=1` is set. If the scheduler fails to start you may need to set `SCCACHE_LOG=trace` when starting it to get useful diagnostics. Configure a build server ------------------------ A build server communicates with the scheduler and executes compiles requested by clients. Only Linux is supported for running a build server, but executing cross-compile requests from macOS/Windows clients is supported. You can also run a build server on FreeBSD, please see [distributed sccache on FreeBSD](DistributedFreeBSD.md). The build server requires [bubblewrap](https://github.com/projectatomic/bubblewrap) to sandbox execution, at least version 0.3.0. Verify your version of bubblewrap *before* attempting to run the server. On Ubuntu 18.10+ you can `apt install bubblewrap` to install it. If you build from source you will need to first install your distro's equivalent of the `libcap-dev` package. Create a server.conf file to configure authentication, storage locations, network addresses and the path to bubblewrap. A minimal example looks like: ```toml # This is where client toolchains will be stored. cache_dir = "/tmp/toolchains" # The maximum size of the toolchain cache, in bytes. # If unspecified the default is 10GB. # toolchain_cache_size = 10737418240 # A public IP address and port that clients will use to connect to this builder. public_addr = "192.168.1.1:10501" # The URL used to connect to the scheduler (should use https, given an ideal # setup of a HTTPS server in front of the scheduler) scheduler_url = "https://192.168.1.1" [builder] type = "overlay" # The directory under which a sandboxed filesystem will be created for builds. build_dir = "/tmp/build" # The path to the bubblewrap version 0.3.0+ `bwrap` binary. bwrap_path = "/usr/bin/bwrap" [scheduler_auth] type = "jwt_token" # This will be generated by the `generate-jwt-hs256-server-token` command or # provided by an administrator of the sccache cluster. token = "my server's token" ``` Due to bubblewrap requirements currently the build server *must* be run as root. Start the build server by running: ``` sudo sccache-dist server --config server.conf ``` As with the scheduler, if the build server fails to start you may need to set `SCCACHE_LOG=trace` to get useful diagnostics. Configure a client ------------------ A client uses `sccache` to wrap compile commands, communicates with the scheduler to find available build servers, and communicates with build servers to execute the compiles and receive the results. Clients that are not targeting linux64 require the `icecc-create-env` script or should be provided with an archive. `icecc-create-env` is part of `icecream` for packaging toolchains. You can install icecream to get this script (`apt install icecc` on Ubuntu), or download it from the git repository and place it in your `PATH`: `curl https://raw.githubusercontent.com/icecc/icecream/master/client/icecc-create-env.in > icecc-create-env && chmod +x icecc-create-env`. See [using custom toolchains](#using-custom-toolchains). Create a client config file in `~/.config/sccache/config` (on Linux), `~/Library/Application Support/Mozilla.sccache/config` (on macOS), or `%APPDATA%\Mozilla\sccache\config\config` (on Windows). A minimal example looks like: ```toml [dist] # The URL used to connect to the scheduler (should use https, given an ideal # setup of a HTTPS server in front of the scheduler) scheduler_url = "https://192.168.1.1" # Used for mapping local toolchains to remote cross-compile toolchains. Empty in # this example where the client and build server are both Linux. toolchains = [] # Size of the local toolchain cache, in bytes (5GB here, 10GB if unspecified). toolchain_cache_size = 5368709120 [dist.auth] type = "token" # This should match the `client_auth` section of the scheduler config. token = "my client token" ``` Clients using Mozilla build servers should configure their `dist.auth` section as follows: ``` [dist.auth] type = "mozilla" ``` And retrieve a token from the Mozilla identity service by running `sccache --dist-auth` and following the instructions. Completing this process will retrieve and cache a token valid for 7 days. Make sure to run `sccache --stop-server` and `sccache --start-server` if sccache was running before changing the configuration. You can check the status with `sccache --dist-status`, it should say something like: ``` $ sccache --dist-status {"SchedulerStatus":["https://sccache1.corpdmz.ber3.mozilla.com/",{"num_servers":3,"num_cpus":56,"in_progress":24}]} ``` Using custom toolchains ----------------------- Since Windows and macOS cannot automatically package toolchains, it is important to be able to manually specify toolchains for distribution. This functionality is also available on Linux. Using custom toolchains involves adding a `dist.toolchains` section to your client config file (you can add it multiple times to specify multiple toolchains). On Linux and macOS: ``` [[dist.toolchains]] type = "path_override" compiler_executable = "/home/me/.mozbuild/clang/bin/clang" archive = "/home/me/.mozbuild/toolchains/33d92fcd79ffef6e-clang-dist-toolchain.tar.xz" archive_compiler_executable = "/builds/worker/toolchains/clang/bin/clang" ``` On Windows: ``` [[dist.toolchains]] type = "path_override" compiler_executable = "C:/clang/bin\\clang-cl.exe" archive = "C:/toolchains/33d92fcd79ffef6e-clang-dist-toolchain.tar.xz" archive_compiler_executable = "/builds/worker/toolchains/clang/bin/clang" ``` Where: - `compiler_executable` identifies the path that sccache will match against to activate this configuration (you need to be careful on Windows - paths can have slashes in both directions, and you may need to escape backslashes, as in the example) - `archive` is the compressed tar archive containing the compiler toolchain to distribute when `compiler_executable` is matched - `archive_compiler_executable` is the path within the archive the distributed compilation should invoke A toolchain archive should be a Gzip compressed TAR archive, containing a filesystem sufficient to run the compiler without relying on any external files. If you have archives compatible with icecream (created with `icecc-create-env`, like [these ones](https://github.com/jyavenard/mozilla-icecream) for macOS), they should also work with sccache. To create a Windows toolchain, it is recommended that you download the [Clang binaries for Ubuntu 16.04](http://releases.llvm.org/download.html) and extract them, package up the toolchain using the extracted `bin/clang` file (requires [PR #321](https://github.com/mozilla/sccache/pull/321)) and then insert `bin/clang-cl` at the appropriate path as a symlink to the `bin/clang` binary. Considerations when distributing from macOS ------------------------------------------- When distributing from a macOS client, additional flags and configuration may be required: - An explicit target should be passed to the compiler, for instance by adding `--target=x86_64-apple-darwin16.0.0` to your build system's `CFLAGS`. - An explicit toolchain archive will need to be configured, as described above. In case rust is being cached, the same version of `rustc` will need to be used for local compiles as is found in the distributed archive. - The client config will be read from `~/Library/Application Support/Mozilla.sccache/config`, not `~/.config/sccache/config`. - Some cross compilers may not understand some intrinsics used in more recent macOS SDKs. The 10.11 SDK is known to work. Making a build server start at boot time ---------------------------------------- It is very easy with a systemd service to spawn the server on boot. You can create a service file like `/etc/systemd/system/sccache-server.service` with the following contents: ```ini [Unit] Description=sccache-dist server Wants=network-online.target After=network-online.target [Service] ExecStart=/path/to/sccache-dist server --config /path/to/server.conf [Install] WantedBy=multi-user.target ``` **Note** that if the `sccache-dist` binary is in a user's home directory, and you're in a distro with SELinux enabled (like Fedora), you may need to use an `ExecStart` line like: ```ini ExecStart=/bin/bash -c "/home//path/to/sccache-dist server --config /home//path/to/server.conf" ``` This is because SELinux by default prevents services from running binaries in home directories, for some reason. Using a shell works around that. An alternative would be to move the `sccache-dist` binary to somewhere like `/usr/local/bin`, but then you need to remember to update it manually. After creating that file, you can ensure it's working and enable it by default like: ``` # systemctl daemon-reload # systemctl start sccache-server # systemctl status # And check it's fine. # systemctl enable sccache-server # This enables the service on boot ``` mozilla-sccache-40c3d6b/docs/GHA.md000066400000000000000000000015471475712407500170530ustar00rootroot00000000000000# GitHub Actions To use the [GitHub Actions cache](https://docs.github.com/en/actions/using-workflows/caching-dependencies-to-speed-up-workflows), you need to set `SCCACHE_GHA_ENABLED` to `on` to enable it. By changing `SCCACHE_GHA_VERSION`, we can purge all the cache. This cache type will need tokens like `ACTIONS_CACHE_URL` and `ACTIONS_RUNTIME_TOKEN` to work. You can set these environmental variables using the following step in a GitHub Actions workflow. ```yaml - name: Configure sccache uses: actions/github-script@v7 with: script: | core.exportVariable('ACTIONS_CACHE_URL', process.env.ACTIONS_CACHE_URL || ''); core.exportVariable('ACTIONS_RUNTIME_TOKEN', process.env.ACTIONS_RUNTIME_TOKEN || ''); ``` ## Behavior In case sccache reaches the rate limit of the service, the build will continue, but the storage might not be performed. mozilla-sccache-40c3d6b/docs/Gcs.md000066400000000000000000000060661475712407500171710ustar00rootroot00000000000000# Google Cloud Storage To use [Google Cloud Storage](https://cloud.google.com/storage/), you need to set the `SCCACHE_GCS_BUCKET` environment variable to the name of the GCS bucket. By default, SCCACHE on GCS will be read-only. To change this, set `SCCACHE_GCS_RW_MODE` to either `READ_ONLY` or `READ_WRITE`. You can also define a prefix that will be prepended to the keys of all cache objects created and read within the GCS bucket, effectively creating a scope. To do that use the `SCCACHE_GCS_KEY_PREFIX` environment variable. This can be useful when sharing a bucket with another application. ## Credentials Sccache is able to load credentials from various sources. Including: - User Input: If `SCCACHE_GCS_KEY_PATH` has been set, we will load from this file first. - Service accounts JSONs - External accounts JSONs - [Task Cluster](https://taskcluster.net/): If `SCCACHE_GCS_CREDENTIALS_URL` has been set, we will load token from this url first. - Static: `GOOGLE_APPLICATION_CREDENTIALS` - Well-known locations: - Windows: `%APPDATA%\gcloud\application_default_credentials.json` - macOS/Linux: - `$XDG_CONFIG_HOME/gcloud/application_default_credentials.json` - `$HOME/.config/gcloud/application_default_credentials.json` - VM Metadata: Fetch token will the specified service account. ### Service accounts To create such account, in GCP, go in `APIs and Services` => `Cloud Storage` => `Create credentials` => `Service account`. Then, once created, click on the account then `Keys` => `Add key` => `Create new key`. Select the JSON format and here it is. This JSON file is what `SCCACHE_GCS_KEY_PATH` expects. The service account needs `Storage Object Admin` permissions on the bucket (otherwise, sccache will fail with a simple `Permission denied`). ### External accounts Such accounts require creating a [Workload Identity Pool and Workload Identity Provider]. This approach allows the environment (Azure, Aws, or other OIDC providers like Github) to create a temporary service account grant without having to share a service account JSON, which can be pretty powerful. An example on how to create such accounts is [Google's guide on how to use it with Github]. After generating the external account JSON file, you may pass its path to `SCCACHE_GCS_KEY_PATH`. Service accounts used by the pool must have `Storage Object Admin` permissions on bucket as well. ## Verifying it works To verify that it works, run: ``` export SCCACHE_GCS_BUCKET= export SCCACHE_GCS_KEY_PATH=secret-gcp-storage.json ./sccache --show-stats # you should see [...] Cache location GCS, bucket: Bucket(name=), key_prefix: (none) ``` ## Deprecation `SCCACHE_GCS_OAUTH_URL` have been deprecated and not supported, please use `SCCACHE_GCS_SERVICE_ACCOUNT` instead. [Workload Identity Pool and Workload Identity Provider]: https://cloud.google.com/iam/docs/manage-workload-identity-pools-providers [Google's guide on how to use it with Github]: https://cloud.google.com/blog/products/identity-security/enabling-keyless-authentication-from-github-actions mozilla-sccache-40c3d6b/docs/Jenkins.md000066400000000000000000000027541475712407500200560ustar00rootroot00000000000000sccache on Jenkins ================== When using `sccache` on [Jenkins](https://jenkins.io) one has to know about how to deal with the sccache server process. Unless specified otherwise, sccache uses port `4226`. On invocation, sccache tries to connect to a sccache server instance on this port. If no server is running, a new instance is spawned. Jenkins tries to kill *all* spawned processes once a job is finished. This results in broken builds when two run in parallel and the first one who spawned the server is finished and the server is killed. The other job way be in contact with the server (e.g waiting for a cache response) and fail. One option to solve this problem is to spawn a always running sccache server process by setting `SCCACHE_IDLE_TIMEOUT` to `0` and start the server beside Jenkins as a system service. This implies that all jobs use the same sccache configuration and share the statistics. If a per-jobs sccache configuration is needed or preferred (e.g place a local disc cache in `$WORKSPACE`) the [Port allocator plugin](https://wiki.jenkins.io/display/JENKINS/Port+Allocator+Plugin) does a good job. It assigns a free and unique port number to a job by exporting a variable. Naming this variable `SCCACHE_SERVER_PORT` is enough to make the job spawn it's own sccache server that is save to terminate upon job termination. This approach has the advantage that each job (with a dedicated server instance) maintains it's own statistics that might be interesting upon job finalization. mozilla-sccache-40c3d6b/docs/Local.md000066400000000000000000000077311475712407500175070ustar00rootroot00000000000000# Local sccache defaults to using local disk storage. You can set the `SCCACHE_DIR` environment variable to change the disk cache location. By default it will use a sensible location for the current platform: `~/.cache/sccache` on Linux, `%LOCALAPPDATA%\Mozilla\sccache` on Windows, and `~/Library/Caches/Mozilla.sccache` on MacOS. The default cache size is 10 gigabytes. To change this, set `SCCACHE_CACHE_SIZE`, for example `SCCACHE_CACHE_SIZE="1G"`. The local storage only supports a single sccache server at a time. Multiple concurrent servers will race and cause spurious build failures. ## Preprocessor cache mode This is inspired by [ccache's direct mode](https://ccache.dev/manual/3.7.9.html#_the_direct_mode) and works roughly the same. It can be set with by setting the variable: ``` SCCACHE_DIRECT=true ``` In preprocessor cache mode, sccache caches the preprocessor step for C/C++ whenever possible. This can make the compilation a lot faster, since the preprocessor accounts for a non-negligible amount of time in the entire compile chain. In order to cache the preprocessor step sccache needs to remember, among other things, all files included by the given input file. To quote ccache's documentation: > There is a catch with the [preprocessor cache] mode: header files that were used by the compiler are recorded, but header files that were not used, but would have been used if they existed, are not. So, when [sccache] checks if a result can be taken from the cache, it currently can’t check if the existence of a new header file should invalidate the result. In practice, the [preprocessor cache] mode is safe to use in the absolute majority of cases. Preprocessor cache mode will be disabled if any of the following holds: - the configuration setting `use_preprocessor_cache_mode` is false - a modification time of one of the include files is too new (needed to avoid a race condition) - a compiler option not supported by the preprocessor cache mode is used. Currently, this is only `-Xpreprocessor` and `-Wp,*`, but if/when sccache grows to handle options then more could be added to this list. - the string `__TIME__` is present in the source code Configuration options and their default values: - `use_preprocessor_cache_mode`: `true`. Whether to use preprocessor cache mode entirely. - `file_stat_matches`: `false`. If false, only compare header files by hashing their contents. If true, will use size + ctime + mtime to check whether a file has changed. See other flags below for more control over this behavior. - `use_ctime_for_stat`: `true`. If true, uses the ctime (file status change on UNIX, creation time on Windows) to check that a file has/hasn't changed. Can be useful to disable when backdating modification times in a controlled manner. - `ignore_time_macros`: `false`. If true, ignore `__DATE__`, `__TIME__` and `__TIMESTAMP__` being present in the source code. Will speed up preprocessor cache mode, but can result in false positives. - `skip_system_headers`: `false`. If true, preprocessor cache mode will not cache system headers, only add them to the hash. - `hash_working_directory`: `true`. If true, will add the current working directory in the hash to distinguish two compilations from different directories. See where to write the config in [the configuration doc](Configuration.md). *Note that preprocessor caching is currently only implemented for GCC and Clang and when using local storage.* ## Read-only cache mode By default, the local cache operates in read/write mode. The `SCCACHE_LOCAL_RW_MODE` environment variable can be set to `READ_ONLY` (or `READ_WRITE`) to modify this behavior. You can use read-only mode to prevent sccache from writing new cache items to the disk. This can be useful, for example, if you want to use items that have already been cached, but not add new ones to the cache. Note that this feature is only effective if you already have items in your cache. Using this option on an empty cache will cause sccache to simply do nothing, just add overhead. mozilla-sccache-40c3d6b/docs/Memcached.md000066400000000000000000000015451475712407500203200ustar00rootroot00000000000000# Memcached Set `SCCACHE_MEMCACHED_ENDPOINT` to a [Memcached](https://memcached.org/) url in format `tcp://: ...` to store the cache in a Memcached instance. `SCCACHE_MEMCACHED` is a deprecated alias for `SCCACHE_MEMCACHED_ENDPOINT` for unifying the variable name with other remote storages. Set `SCCACHE_MEMCACHED_USERNAME` and `SCCACHE_MEMCACHED_PASSWORD` if you want to authenticate to Memcached. Set `SCCACHE_MEMCACHED_EXPIRATION` to the default expiration seconds of memcached. The default value is `86400` (1 day) and can up to `2592000` (30 days). Set this value to `0` will disable the expiration. memcached will purge the cache entry while it exceed 30 days or meets LRU rules. Set `SCCACHE_MEMCACHED_KEY_PREFIX` if you want to prefix all cache keys. This can be useful when sharing a Memcached instance with another application or cache. mozilla-sccache-40c3d6b/docs/OSS.md000066400000000000000000000023151475712407500171120ustar00rootroot00000000000000# OSS If you want to use _Object Storage Service_ (aka OSS) by Alibaba for the sccache cache, you need to set the `SCCACHE_OSS_BUCKET` environment variable to the name of the OSS bucket to use. You **must** specify the endpoint URL using the `SCCACHE_OSS_ENDPOINT` environment variable. More details about [OSS endpoints](https://www.alibabacloud.com/help/en/oss/user-guide/regions-and-endpoints). You can also define a prefix that will be prepended to the keys of all cache objects created and read within the OSS bucket, effectively creating a scope. To do that use the `SCCACHE_OSS_KEY_PREFIX` environment variable. This can be useful when sharing a bucket with another application. ## Credentials Sccache is able to load credentials from environment variables: `ALIBABA_CLOUD_ACCESS_KEY_ID` and `ALIBABA_CLOUD_ACCESS_KEY_SECRET`. Alternatively, the `SCCACHE_OSS_NO_CREDENTIALS` environment variable can be set to use public readonly access to the OSS bucket, without the need for credentials. Valid values for this environment variable are `true`, `1`, `false`, and `0`. This can be useful for implementing a readonly cache for pull requests, which typically cannot be given access to credentials for security reasons. mozilla-sccache-40c3d6b/docs/Redis.md000066400000000000000000000053561475712407500175240ustar00rootroot00000000000000# Redis If you want to use [Redis](https://redis.io/) storage for the sccache cache, you need to set the `SCCACHE_REDIS_ENDPOINT` with the single-node redis URL. If you want to use a Redis cluster, set `SCCACHE_REDIS_CLUSTER_ENDPOINTS` instead of `SCCACHE_REDIS_ENDPOINT` with the comma-separated list of redis node URLs. Redis endpoint URL format can be found in the [OpenDAL source code](https://github.com/apache/opendal/blob/5f1d5d1d61ed28f63d4955538b33a4d582feebef/core/src/services/redis/backend.rs#L268-L307). Some valid examples: * `redis://127.0.0.1:6379` or `tcp://127.0.0.1:6379` or `127.0.0.1:6379` - TCP-based Redis connection (non-secure) * `rediss://@1.2.3.4:6379` - TLS-based Redis connection over TCP (secure) * `unix:///tmp/redis.sock` or `redis+unix:///tmp/redis.sock` - Unix socket-based Redis connection Redis can be configured as a LRU (least recently used) cache with a fixed maximum cache size. Set `maxmemory` and `maxmemory-policy` according to the [Redis documentation](https://redis.io/topics/lru-cache). The `allkeys-lru` policy which discards the *least recently accessed or modified* key fits well for the sccache use case. Redis over TLS is supported. Use the [`rediss://`](https://www.iana.org/assignments/uri-schemes/prov/rediss) url scheme (note `rediss` vs `redis`). Append `#insecure` the the url to disable hostname verification and accept self-signed certificates (dangerous!). Note that this also disables [SNI](https://en.wikipedia.org/wiki/Server_Name_Indication). If you want to authenticate to Redis, set `SCCACHE_REDIS_USERNAME` and `SCCACHE_REDIS_PASSWORD` to the username and password accordingly. `SCCACHE_REDIS_DB` is the database number to use. Default is 0. Set `SCCACHE_REDIS_EXPIRATION` in seconds if you don't want your cache to live forever. This will override the default behavior of redis. `SCCACHE_REDIS_TTL` is a deprecated synonym for `SCCACHE_REDIS_EXPIRATION`. Set `SCCACHE_REDIS_KEY_PREFIX` if you want to prefix all cache keys. This can be useful when sharing a Redis instance with another application or cache. `SCCACHE_REDIS` is deprecated for security reasons, use `SCCACHE_REDIS_ENDPOINT` instead. See mozilla/sccache#2083 for details. If you really want to use `SCCACHE_REDIS`, you should URL in format `redis://[[]:@][:port][/?db=]`. ## Deprecated API Examples Use the local Redis instance with no password: ```sh SCCACHE_REDIS=redis://localhost ``` Use the local Redis instance on port `6379` with password `qwerty`: ```sh SCCACHE_REDIS=redis://:qwerty@localhost:6379 ``` Use the `192.168.1.10` Redis instance on port `6380` with username `alice`, password `qwerty123` and database `12` via TLS connection: ```sh SCCACHE_REDIS=rediss://alice:qwerty123@192.168.1.10:6380/?db=12 ``` mozilla-sccache-40c3d6b/docs/Releasing.md000066400000000000000000000021751475712407500203630ustar00rootroot00000000000000# Sccache Release Process Most of the sccache release process is automated. The [github workflow](https://github.com/mozilla/sccache/actions?query=workflow%3Aci) contains builds for all supported platforms, as well as a release job that is triggered by pushing a new tag to the repository. That job will upload the resulting binary packages to [the GitHub releases page](https://github.com/mozilla/sccache/releases) on the repository. # Producing a release We use [`cargo-release`](https://crates.io/crates/cargo-release) to produce releases, since it encapsulates the steps of bumping the version number, creating and pushing a new tag, and releasing to [crates.io](https://crates.io/crates/sccache). You can install it with `cargo install cargo-release`, then simply run `cargo release` in an sccache checkout to do the work. Note that it supports a `--dry-run` option you can use to preview what it will run. ## Things to be aware of 1. You must have authenticated to crates.io using `cargo login` to publish the sccache create there. 2. cargo will not allow publishing a create if there are crates in the `[patch]` section in `Cargo.toml`. mozilla-sccache-40c3d6b/docs/ResponseFiles.md000066400000000000000000000071311475712407500212300ustar00rootroot00000000000000# Response Files Response files are a way for compilers to accept arguments that would otherwise overflow the character limit in the command line. [On Windows in particular](https://learn.microsoft.com/en-us/troubleshoot/windows-client/shell-experience/command-line-string-limitation), the character limit per command is 8191 characters. These files can contain additional options that the compiler will read and process as if they were provided in the original command. Each compiler that supports response files has different formats/expectations and implementations. Support for response files are also re-implemented per compiler by sccache so it can cache compilations accurately. There is currently support for response files on the gcc and msvc implementations in sccache. ## GCC As defined by the [gcc docs](https://gcc.gnu.org/onlinedocs/gcc-4.6.3/gcc/Overall-Options.html#Overall-Options): 1. Options in a response file are inserted in-place in the original command line. If the file does not exist or cannot be read, the option will be treated literally, and not removed. 2. Options in a response file are separated by whitespace. 3. Single or double quotes can be used to include whitespace in an option. 4. Any character (including a backslash) may be included by prefixing the character to be included with a backslash (e.g. `\\`, `\?`, `\@`, etc). 5. The response file may itself contain additional @file options; any such options will be processed recursively. Implementation details: - The gcc implementation in sccache supports all of these **except** #3. If a response file contains **any** quotations (`"` or `'`), the @file arg is treated literally and not removed (and its content not processed). - Additionally, sccache will not expand concatenated arguments such as `-include@foo` (see [#150](https://github.com/mozilla/sccache/issues/150#issuecomment-318586953) for more on this). - Recursive files are processed depth-first; when an @file option is encountered, its contents are read and each option is evaluated in-place before continuing to options following the @file. ## MSVC Per the [MSVC docs](https://learn.microsoft.com/en-us/cpp/build/reference/cl-command-files?view=msvc-170): 1. The contents of a response file are inserted in-place in the original command. 2. Response files can contain multiple lines of options, but each option must begin and end on the same line. 3. Backslashes (`\`) cannot be used to combine options across multiple lines. 4. The `/link` directive has special treatment: 1. Entering an @file: if the `/link` option is provided prior to an `@file` in the command line, the `/link` directive does not affect any options within the `@file`. 2. Newlines: A `/link` directive provided in an `@file` on one line does not affect the next line. 3. Exiting an @file: A `/link` directive on the final line of a response file does not affect options following the `@file` option in the command line. 5. A response file cannot contain additional `@file` options, they are not recursive. (found in a [separate doc](https://learn.microsoft.com/en-us/cpp/build/reference/at-specify-a-compiler-response-file?view=msvc-170)) 6. (implied) options can be wrapped in double-quotes (`"`), which allows whitespace to be preserved within the option The msvc implementation in sccache supports all of these **except** #4, because sccache doesn't accept the `/link` directive. Additionally, because `msbuild` generates response files using an encoding other than `utf-8`, all text files under the [WHATWG encoding standard](https://encoding.spec.whatwg.org/) are supported. This includes both `utf-8` and `utf-16`. mozilla-sccache-40c3d6b/docs/Rust.md000066400000000000000000000014301475712407500174000ustar00rootroot00000000000000sccache includes support for caching Rust compilation. This includes many caveats, and is primarily focused on caching rustc invocations as produced by cargo. A (possibly-incomplete) list follows: * `--emit` is required. * `--crate-name` is required. * Only `link`, `metadata` and `dep-info` are supported as `--emit` values, and `link` must be present. * `--out-dir` is required. * `-o file` is not supported. * Compilation from stdin is not supported, a source file must be provided. * Values from `env!` require Rust >= 1.46 to be tracked in caching. * Procedural macros that read files from the filesystem may not be cached properly If you are using Rust 1.18 or later, you can ask cargo to wrap all compilation with sccache by setting `RUSTC_WRAPPER=sccache` in your build environment. mozilla-sccache-40c3d6b/docs/S3.md000066400000000000000000000061631475712407500167400ustar00rootroot00000000000000# S3 If you want to use S3 storage for the sccache cache, you need to set the following environment variables: - `SCCACHE_BUCKET` with the name of the S3 bucket to use; - `SCCACHE_REGION` with the S3 region. If you have set `SCCACHE_ENDPOINT`, you can set `SCCACHE_REGION` to `auto`; - Optionally, `SCCACHE_ENDPOINT=:` with a custom URL of a server you want a use, such as MinIO or DigitalOcean storage. If your endpoint requires HTTPS/TLS, set `SCCACHE_S3_USE_SSL=true`. If you don't need a secure network layer, HTTP (`SCCACHE_S3_USE_SSL=false`) might be better for performance. Enable server-side encryption with s3 managed key (SSE-S3), set `SCCACHE_S3_SERVER_SIDE_ENCRYPTION=true`. More details about encryption [here](https://opendal.apache.org/docs/services/s3/#server-side-encryption) and documentation [here](https://docs.rs/opendal/latest/opendal/services/struct.S3.html#method.server_side_encryption_with_s3_key). You can also define a prefix that will be prepended to the keys of all cache objects created and read within the S3 bucket, effectively creating a scope. To do that use the `SCCACHE_S3_KEY_PREFIX` environment variable. This can be useful when sharing a bucket with another application. # R2 Cloudflare R2 is an S3-compatible object storage and works with the same configuration options as above. To use R2, you **must** define `SCCACHE_ENDPOINT`, otherwise sccache will default to AWS as the endpoint to hit. R2 also requires endpoint connections to be secure, therefore `https://` either needs to be included in `SCCACHE_ENDPOINT` or `SCCACHE_S3_USE_SSL=true` can be used, if the protocol is omitted. There are no regions in R2, so `SCCACHE_REGION` must point to `auto`. The below environment variables are recommended. - `SCCACHE_BUCKET` is the name of your R2 bucket. - `SCCACHE_ENDPOINT` should follow the format of `https://.r2.cloudflarestorage.com`. It is recommended that `https://` be included in this env var. Your account ID can be found [here](https://developers.cloudflare.com/fundamentals/get-started/basic-tasks/find-account-and-zone-ids/). - `SCCACHE_REGION` should be set to `auto`. ## Credentials Sccache is able to load credentials from various sources. Including: - Static: `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`. - Profile: `~/.aws/credentials` and `~/.aws/config`. The AWS_PROFILE environment variable can be used to select a specific profile if multiple profiles are available. - EC2 Metadata Services: Via [IMDSv2](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html). - AssumeRole: assume role with the role specified by `AWS_ROLE_ARN`. - AssumeRoleWithWebIdentity: assume role with web webIdentity specified by `AWS_ROLE_ARN` and `AWS_WEB_IDENTITY_TOKEN_FILE`. Alternatively, the `SCCACHE_S3_NO_CREDENTIALS` environment variable can be set to use public readonly access to the S3 bucket, without the need for credentials. Valid values for this environment variable are `true`, `1`, `false`, and `0`. This can be useful for implementing a readonly cache for pull requests, which typically cannot be given access to credentials for security reasons. mozilla-sccache-40c3d6b/docs/Webdav.md000066400000000000000000000015641475712407500176630ustar00rootroot00000000000000# WebDAV Users can configure sccache to cache incremental build artifacts in a remote WebDAV service. The following services all expose a WebDAV interface and can be used as a backend: - [Ccache HTTP storage backend](https://ccache.dev/manual/4.7.4.html#_http_storage_backend) - [Bazel Remote Caching](https://bazel.build/remote/caching). - [Gradle Build Cache](https://docs.gradle.org/current/userguide/build_cache.html) Set `SCCACHE_WEBDAV_ENDPOINT` to an appropriate webdav service endpoint to enable remote caching. Set `SCCACHE_WEBDAV_KEY_PREFIX` to specify the key prefix of cache. ## Credentials Sccache is able to load credentials from the following sources: - Set `SCCACHE_WEBDAV_USERNAME`/`SCCACHE_WEBDAV_PASSWORD` to specify the username/password pair for basic authentication. - Set `SCCACHE_WEBDAV_TOKEN` to specify the token value for bearer token authentication. mozilla-sccache-40c3d6b/docs/Xcode.md000066400000000000000000000062711475712407500175150ustar00rootroot00000000000000# Using `sccache` with Xcode It is possible to use `sccache` with Xcode with some setup. ### Running the daemon Before building, you need to run the daemon outside of Xcode. This needs to be done because if `sccache` invocation happens to implicitly start the server daemon, the Xcode build will hang on the `sccache` invocation, waiting for the process to idle timeout. You can do this in another terminal windows by calling ```sh SCCACHE_LOG=info SCCACHE_START_SERVER=1 SCCACHE_NO_DAEMON=1 sccache ``` Or by setting it up in a `launchd` configuration, perhaps as `~/Library/LaunchAgents/sccache.plist` (note the paths in the plist): ```xml Label sccache.server ProgramArguments /path/to/sccache EnvironmentVariables SCCACHE_START_SERVER 1 SCCACHE_NO_DAEMON 1 SCCACHE_IDLE_TIMEOUT 0 SCCACHE_LOG info StandardOutPath /tmp/sccache.log StandardErrorPath /tmp/sccache.log ``` ### Setting it up for `xcodebuild` Xcode seems to support barely documented `C_COMPILER_LAUNCHER` attribute, for having a custom launcher program. Then you can invoke `xcodebuild` like so ```sh xcodebuild C_COMPILER_LAUNCHER=sccache CLANG_ENABLE_MODULES=NO COMPILER_INDEX_STORE_ENABLE=NO CLANG_USE_RESPONSE_FILE=NO ``` Where the additional arguments are for disabling some features that `sccache` can't cache currently. These build settings can also be put in a xcconfig file, like `sccache.xcconfig` ``` C_COMPILER_LAUNCHER=sccache CLANG_ENABLE_MODULES=NO COMPILER_INDEX_STORE_ENABLE=NO CLANG_USE_RESPONSE_FILE=NO ``` Which can then be invoked with ```sh xcodebuild -xcconfig sccache.xcconfig ``` ### Setting it up for `cmake` Xcode generator While `cmake` has the convenient `CMAKE__COMPILER_LAUNCHER` for prepending tools like `sccache`, it is not supported for the Xcode generator. But you can configuring it directly with something like ```cmake # This bit before the first `project()`, as the COMPILER_LAUNCHER variables are read in then if(DEFINED CCACHE) find_program(CCACHE_EXE ${CCACHE} REQUIRED) if(NOT CMAKE_GENERATOR STREQUAL "Xcode") # Support for other generators should work with these set(CMAKE_C_COMPILER_LAUNCHER "${CCACHE_EXE}") set(CMAKE_CXX_COMPILER_LAUNCHER "${CCACHE_EXE}") else() # And this should work for Xcode generator set(CMAKE_XCODE_ATTRIBUTE_C_COMPILER_LAUNCHER ${CCACHE_EXE}) set(CMAKE_XCODE_ATTRIBUTE_CLANG_ENABLE_MODULES "NO") set(CMAKE_XCODE_ATTRIBUTE_COMPILER_INDEX_STORE_ENABLE "NO") set(CMAKE_XCODE_ATTRIBUTE_CLANG_USE_RESPONSE_FILE "NO") endif() endif() ``` Then configuring with `-DCCACHE=sccache` should work on all generators. mozilla-sccache-40c3d6b/scripts/000077500000000000000000000000001475712407500166625ustar00rootroot00000000000000mozilla-sccache-40c3d6b/scripts/extratest.sh000077500000000000000000000112161475712407500212450ustar00rootroot00000000000000#!/bin/sh set -o errexit set -o pipefail set -o nounset set -o xtrace #CARGO="cargo --color=always" CARGO="cargo" gnutarget=x86_64-unknown-linux-gnu wintarget=x86_64-pc-windows-gnu gnutarget() { unset OPENSSL_DIR export OPENSSL_STATIC=1 target=$gnutarget } wintarget() { export OPENSSL_DIR=$(pwd)/openssl-win export OPENSSL_STATIC=1 target=$wintarget } # all-windows doesn't work as redis-rs build.rs has issues (checks for cfg!(unix)) if [ "$1" = checkall ]; then $CARGO check --target $target --all-targets --features 'all dist-client dist-server dist-tests' $CARGO check --target $target --all-targets --features 'all dist-client dist-server' $CARGO check --target $target --all-targets --features 'all dist-client dist-tests' $CARGO check --target $target --all-targets --features 'all dist-server dist-tests' $CARGO check --target $target --all-targets --features 'all dist-client' $CARGO check --target $target --all-targets --features 'all dist-server' $CARGO check --target $target --all-targets --features 'all dist-tests' $CARGO check --target $target --all-targets --features 'all' $CARGO check --target $target --all-targets --features 'dist-client dist-server dist-tests' $CARGO check --target $target --all-targets --features 'dist-client dist-server' $CARGO check --target $target --all-targets --features 'dist-client dist-tests' $CARGO check --target $target --all-targets --features 'dist-server dist-tests' $CARGO check --target $target --all-targets --features 'dist-client' $CARGO check --target $target --all-targets --features 'dist-server' $CARGO check --target $target --all-targets --features 'dist-tests' $CARGO check --target $target --all-targets --features '' $CARGO check --target $target --all-targets --no-default-features --features 'all dist-client dist-server dist-tests' $CARGO check --target $target --all-targets --no-default-features --features 'all dist-client dist-server' $CARGO check --target $target --all-targets --no-default-features --features 'all dist-client dist-tests' $CARGO check --target $target --all-targets --no-default-features --features 'all dist-server dist-tests' $CARGO check --target $target --all-targets --no-default-features --features 'all dist-client' $CARGO check --target $target --all-targets --no-default-features --features 'all dist-server' $CARGO check --target $target --all-targets --no-default-features --features 'all dist-tests' $CARGO check --target $target --all-targets --no-default-features --features 'all' $CARGO check --target $target --all-targets --no-default-features --features 'dist-client dist-server dist-tests' $CARGO check --target $target --all-targets --no-default-features --features 'dist-client dist-server' $CARGO check --target $target --all-targets --no-default-features --features 'dist-client dist-tests' $CARGO check --target $target --all-targets --no-default-features --features 'dist-server dist-tests' $CARGO check --target $target --all-targets --no-default-features --features 'dist-client' $CARGO check --target $target --all-targets --no-default-features --features 'dist-server' $CARGO check --target $target --all-targets --no-default-features --features 'dist-tests' $CARGO check --target $target --all-targets --no-default-features --features '' wintarget $CARGO check --target $target --all-targets --features 'dist-client' #$CARGO check --target $target --all-targets --features 'all-windows dist-client' #$CARGO check --target $target --all-targets --features 'all-windows' $CARGO check --target $target --all-targets --features '' elif [ "$1" = test ]; then # Musl tests segfault due to https://github.com/mozilla/sccache/issues/256#issuecomment-399254715 gnutarget VERBOSE= NOCAPTURE= NORUN= TESTTHREADS= #VERBOSE="--verbose" #NORUN=--no-run #NOCAPTURE=--nocapture TESTTHREADS="--test-threads 1" # Since integration tests start up the sccache server they must be run sequentially. This only matters # if you have multiple test functions in one file. set +x if ! which docker; then printf "WARNING: =====\n\ndocker not present, some tests will fail\n\n=====\n\n\n\n\n" sleep 5 fi if ! which icecc-create-env; then printf "WARNING: =====\n\nicecc-create-env not present, some tests will fail\n\n=====\n\n\n\n\n" sleep 5 fi set -x RUST_BACKTRACE=1 $CARGO test $NORUN --target $target --features 'all dist-client dist-server dist-tests' $VERBOSE -- $NOCAPTURE $TESTTHREADS test_dist_nobuilder else echo invalid command exit 1 fi mozilla-sccache-40c3d6b/scripts/freebsd-ci-test.sh000077500000000000000000000223431475712407500222050ustar00rootroot00000000000000#!/bin/sh # This script contains CI tests for FreeBSD, testing # # - cargo build & cargo test # - configure and start sccache-dist and scheduler # - test distributed compile # - test that the cache is used # # It creates a temporary test pool backed by a # file (using mdconfig) and does a full configuration # of pot. # # After running it copies the sccache log file into # the repo's root directory. It also does a full # cleanup (removal of all temporary files, test pool # etc.) after each run. This can be prevented by # setting FREEBSD_CI_NOCLEAN in the environment: # # FREEBSD_CI_NOCLEAN=1 scripts/freebsd-ci-test.sh # # When running in a loop, time and bandwidth can be # saved by placing FreeBSD distribution files in # $HOME/.potcache # # mkdir $HOME/.potcache # fetch -o $HOME/.potcache/14.1-RELEASE_base.txz \ # https://ftp.freebsd.org/pub/FreeBSD/releases/amd64/14.1-RELEASE/base.txz # # This script can be run from a github action. When run locally, make # sure to install the required packages: # # pkg install -y ca-root-nss curl gmake gtar pot sudo # # shellcheck disable=SC3040 set -eo pipefail init() { base=$(realpath "$(dirname "$0")"/..) OS_VERSION="$(freebsd-version | awk -F- '{print $1}')" PUB_INTF="$(netstat -4rn | grep default | awk '{ print $4}')" TEST_TMPDIR=$(mktemp -d "/tmp/sccache_freebsd.XXXXXXX") || exit 1 chmod g+r "$TEST_TMPDIR" export XDG_CONFIG_HOME="$TEST_TMPDIR/.config" mkdir -p "$XDG_CONFIG_HOME" export SCCACHE_DIR="$TEST_TMPDIR/.cache" killall sccache 2>/dev/null || true killall sccache-dist 2>/dev/null || true export RUST_LOG_STYLE=never } output_env_info() { echo "## user" whoami echo "## environment" env | sort echo "## network" ifconfig echo "## tooling info" cargo -V rustc -V curl --version # See https://github.com/bsdpot/pot/pull/253 pot version || true gtar --version echo "## installed packages" pkg info } build_and_test_project() { echo "#### building sccache (cargo)" cd "$base" FAULT=0 export RUSTFLAGS="-C debuginfo=0" cargo build --features "dist-client,dist-server" || FAULT=1 echo "#### testing sccache (cargo)" cargo test --features "dist-client,dist-server" -- \ --test-threads 1 || FAULT=1 unset RUSTFLAGS if [ "$FAULT" -eq 0 ]; then # save build time by avoiding "cargo install" cp -a target/debug/sccache target/debug/sccache-dist \ "$HOME/.cargo/bin/." fi if [ $FAULT -ne 0 ]; then return 1; fi } prepare_and_run_sccache_dist() { echo "#### preparing sccache-dist" SECRET_KEY="$(sccache-dist auth generate-jwt-hs256-key)" CLIENT_AUTH_KEY="$(sccache-dist auth generate-jwt-hs256-key)" # create scheduler.conf cat >"$TEST_TMPDIR"/scheduler.conf <<-EOF public_addr = "127.0.0.1:10600" [client_auth] type = "token" token = "$CLIENT_AUTH_KEY" [server_auth] type = "jwt_hs256" secret_key = "$SECRET_KEY" EOF SERVER_TOKEN="$(sccache-dist auth generate-jwt-hs256-server-token \ --config="$TEST_TMPDIR"/scheduler.conf \ --server="127.0.0.1:10501")" # Create server.conf cat >"$TEST_TMPDIR"/server.conf <<-EOF cache_dir = "$TEST_TMPDIR/toolchains" public_addr = "127.0.0.1:10501" scheduler_url = "http://127.0.0.1:10600" [builder] type = "pot" pot_fs_root = "$TEST_TMPDIR/pot" [scheduler_auth] type = "jwt_token" token = "$SERVER_TOKEN" EOF # create sccache client config TC="$(rustup toolchain list | grep default | awk '{ print $1 }')" RUSTC_PATH="$HOME/.rustup/toolchains/$TC/bin/rustc" mkdir -p "$XDG_CONFIG_HOME/sccache" cat >"$XDG_CONFIG_HOME/sccache/config" <<-EOF [dist] scheduler_url = "http://127.0.0.1:10600" toolchain_cache_size = 5368709120 cache_dir = "$HOME/.cache/sccache-dist-client" [dist.auth] type = "token" token = "$CLIENT_AUTH_KEY" [[dist.toolchains]] type = "path_override" compiler_executable = "/usr/bin/cc" archive = "$TEST_TMPDIR/empty.tar.gz" archive_compiler_executable = "/usr/bin/cc" [[dist.toolchains]] type = "path_override" compiler_executable = "$RUSTC_PATH" archive = "$TEST_TMPDIR/rust-toolchain.tgz" archive_compiler_executable = "$RUSTC_PATH" EOF echo "Creating toolchain tarballs" gtar cvf - --files-from /dev/null | \ gzip -n >"$TEST_TMPDIR/empty.tar.gz" gtar cf - --sort=name --mtime='2022-06-28 17:35Z' "$HOME/.rustup" | \ gzip -n >"$TEST_TMPDIR/rust-toolchain.tgz" echo "Starting scheduler" sccache-dist scheduler --config "$TEST_TMPDIR"/scheduler.conf } prepare_zpool() { echo "#### preparing zpool" sudo dd if=/dev/zero of="$TEST_TMPDIR/zfs1" bs=1 count=1 seek=3G MDUNIT=$(sudo mdconfig -a -n -t vnode -S 4096 -f "$TEST_TMPDIR/zfs1") zdev="/dev/md$MDUNIT" sudo zpool create -f potpool "$zdev" } prepare_pot() { echo "#### preparing pot" sudo sysrc -f /usr/local/etc/pot/pot.conf POT_ZFS_ROOT=potpool/pot sudo sysrc -f /usr/local/etc/pot/pot.conf POT_EXTIF="$PUB_INTF" sudo sysrc -f /usr/local/etc/pot/pot.conf POT_TMP="$TEST_TMPDIR" sudo sysrc -f /usr/local/etc/pot/pot.conf \ POT_FS_ROOT="$TEST_TMPDIR/pot" sudo sysrc -f /usr/local/etc/pot/pot.conf POT_GROUP=wheel sudo pot init -f "" sudo pot version sudo cp "$HOME"/.potcache/*.txz /var/cache/pot 2>/dev/null || true sudo pot create -p sccache-template -N alias -i "lo0|127.0.0.2" \ -t single -b "$OS_VERSION" sudo pot set-cmd -p sccache-template -c /usr/bin/true sudo pot set-attr -p sccache-template -A no-rc-script -V YES sudo pot snapshot -p sccache-template } start_build_server() { echo "#### starting build-server (as root)" SCCACHE_DIST_LOG=debug RUST_LOG=info sudo \ "$HOME"/.cargo/bin/sccache-dist server \ --config "$TEST_TMPDIR"/server.conf & } wait_for_build_server() { echo "#### waiting for build server to become available" count=0 while [ "$(sockstat -q4l -p 10501 | wc -l | xargs)" -eq "0" ]; do count=$(( count + 1 )) if [ $count -gt 60 ]; then 2>&1 echo "Build server did not become available" return 1 fi sleep 5 done } create_build_test_project() { echo "#### create and build test project" cd "$TEST_TMPDIR" cargo init buildtest cd buildtest echo 'chrono = "0.4"' >>Cargo.toml } start_sccache_server() { echo "#### starting sccache-server" killall sccache 2>/dev/null || true SCCACHE_ERROR_LOG="$TEST_TMPDIR"/sccache_log.txt SCCACHE_LOG=info \ RUST_LOG=info sccache --start-server sleep 10 } test_sccache_dist_01() { echo "#### running scache_dist test 01" cd "$TEST_TMPDIR/buildtest" RUSTC_WRAPPER=sccache cargo build STATS="$(sccache -s)" echo "Statistics of first buildtest" echo "$STATS" CACHE_HITS="$(echo "$STATS" | \ grep "Cache hits" | grep -v Rust | \awk '{ print $3 }')" FAILED_DIST="$(echo "$STATS" | \ grep "Failed distributed compilations" | awk '{ print $4 }')" SUCCEEDED_DIST="$(echo "$STATS" | \ (grep -F "127.0.0.1:10501" || echo 0 0) | awk '{ print $2 }')" if [ "$CACHE_HITS" -ne 0 ]; then 2>&1 echo "Unexpected cache hits" return 1 fi # We sometimes get "connection closed before message completed" # on the first remote build (which will make sccache fall-back # to building locally). Until this has been resolved, accept # one failed remote build. if [ "$FAILED_DIST" -gt 1 ]; then 2>&1 echo "More than one distributed compilations failed" cat "$TEST_TMPDIR"/sccache_log.txt return 1 fi if [ "$SUCCEEDED_DIST" -eq 0 ]; then 2>&1 echo "No distributed compilations succeeded" return 1 fi } test_sccache_dist_02() { echo "#### running scache_dist test 02" cd "$TEST_TMPDIR/buildtest" sccache -z cargo clean RUSTC_WRAPPER=sccache cargo build STATS="$(sccache -s)" echo "Statistics of second buildtest" echo "$STATS" CACHE_HITS="$(echo "$STATS" | \ grep "Cache hits" | grep -v Rust | \awk '{ print $3 }')" FAILED_DIST="$(echo "$STATS" | \ grep "Failed distributed compilations" | awk '{ print $4 }')" SUCCEEDED_DIST="$(echo "$STATS" | \ (grep -F "127.0.0.1:10501" || echo 0 0) | awk '{ print $2 }')" if [ "$CACHE_HITS" -eq 0 ]; then 2>&1 echo "No cache hits when there should be some" return 1 fi # We sometimes get "connection closed before message completed" # on the first remote build (which will make sccache fall-back # to building locally). Until this has been resolved, accept # one failed remote build. if [ "$FAILED_DIST" -gt 1 ]; then 2>&1 echo "More than one distributed compilations failed" return 1 fi if [ "$SUCCEEDED_DIST" -ne 0 ]; then 2>&1 echo "Unexpected distributed compilations happened" return 1 fi } cleanup() { echo "#### cleaning up" set +e sccache --stop-server killall sccache killall sccache-dist && sleep 3 sudo killall sccache-dist && sleep 3 sudo killall -9 sccache-dist killall sccache cp "$TEST_TMPDIR/sccache_log.txt" "$base/sccache_log_$(date +%s).txt" if [ -z "$FREEBSD_CI_NOCLEAN" ]; then for name in $(pot ls -q); do sudo pot stop -p "$name" done sudo pot de-init sudo zpool destroy -f potpool if [ -n "$MDUNIT" ]; then sudo mdconfig -d -u "$MDUNIT" fi sudo rm -rf "$TEST_TMPDIR" fi set -e } install_signal_handler() { trap 'remove_signal_handler; cleanup; exit' EXIT INT HUP } remove_signal_handler() { trap - EXIT INT HUP } main() { install_signal_handler init output_env_info build_and_test_project prepare_and_run_sccache_dist prepare_zpool prepare_pot start_build_server wait_for_build_server create_build_test_project start_sccache_server test_sccache_dist_01 test_sccache_dist_02 remove_signal_handler cleanup } # run main function main mozilla-sccache-40c3d6b/snap/000077500000000000000000000000001475712407500161345ustar00rootroot00000000000000mozilla-sccache-40c3d6b/snap/snapcraft.yaml000066400000000000000000000031021475712407500207750ustar00rootroot00000000000000name: sccache base: core22 adopt-info: sccache summary: sccache is ccache with cloud storage description: | sccache is a ccache-like compiler caching tool. It is used as a compiler wrapper and avoids compilation when possible, storing cached results either on local disk or in one of several cloud storage backends. sccache includes support for caching the compilation of C/C++ code, Rust, as well as NVIDIA's CUDA using nvcc. sccache also provides icecream-style distributed compilation (automatic packaging of local toolchains) for all supported compilers (including Rust). The distributed compilation system includes several security features that icecream lacks such as authentication, transport layer encryption, and sandboxed compiler execution on build servers. See the distributed quickstart guide for more information. sccache is also available as a GitHub Actions to facilitate the deployment using GitHub Actions cache. website: https://github.com/mozilla/sccache contact: https://github.com/mozilla/sccache/issues license: "Apache-2.0" grade: stable confinement: classic apps: sccache: command: bin/sccache sccache-dist: command: bin/sccache-dist parts: sccache: plugin: rust rust-channel: 1.75.0 rust-use-global-lto: true source: . override-pull: | craftctl default craftctl set version="$( git -C "${CRAFT_PART_SRC}" describe --tags )" build-packages: - libssl-dev - make - pkg-config rust-features: - all - dist-server build-attributes: - enable-patchelf mozilla-sccache-40c3d6b/src/000077500000000000000000000000001475712407500157625ustar00rootroot00000000000000mozilla-sccache-40c3d6b/src/bin/000077500000000000000000000000001475712407500165325ustar00rootroot00000000000000mozilla-sccache-40c3d6b/src/bin/sccache-dist/000077500000000000000000000000001475712407500210645ustar00rootroot00000000000000mozilla-sccache-40c3d6b/src/bin/sccache-dist/build.rs000066400000000000000000001040651475712407500225370ustar00rootroot00000000000000// Copyright 2016 Mozilla Foundation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use anyhow::{anyhow, bail, Context, Error, Result}; use flate2::read::GzDecoder; use fs_err as fs; use libmount::Overlay; use sccache::dist::{ BuildResult, BuilderIncoming, CompileCommand, InputsReader, OutputData, ProcessOutput, TcCache, Toolchain, }; use sccache::lru_disk_cache::Error as LruError; use std::collections::{hash_map, HashMap}; use std::io; use std::iter; use std::path::{self, Path, PathBuf}; use std::process::{ChildStdin, Command, Output, Stdio}; use std::sync::Mutex; use std::time::Instant; use version_compare::Version; trait CommandExt { fn check_stdout_trim(&mut self) -> Result; fn check_piped(&mut self, pipe: &mut dyn FnMut(&mut ChildStdin) -> Result<()>) -> Result<()>; fn check_run(&mut self) -> Result<()>; } impl CommandExt for Command { fn check_stdout_trim(&mut self) -> Result { let output = self.output().context("Failed to start command")?; check_output(&output)?; let stdout = String::from_utf8(output.stdout).context("Output from listing containers not UTF8")?; Ok(stdout.trim().to_owned()) } // Should really take a FnOnce/FnBox fn check_piped(&mut self, pipe: &mut dyn FnMut(&mut ChildStdin) -> Result<()>) -> Result<()> { let mut process = self .stdin(Stdio::piped()) .spawn() .context("Failed to start command")?; let mut stdin = process .stdin .take() .expect("Requested piped stdin but not present"); pipe(&mut stdin).context("Failed to pipe input to process")?; let output = process .wait_with_output() .context("Failed to wait for process to return")?; check_output(&output) } fn check_run(&mut self) -> Result<()> { let output = self.output().context("Failed to start command")?; check_output(&output) } } fn check_output(output: &Output) -> Result<()> { if !output.status.success() { warn!( "===========\n{}\n==========\n\n\n\n=========\n{}\n===============\n\n\n", String::from_utf8_lossy(&output.stdout), String::from_utf8_lossy(&output.stderr) ); bail!("Command failed with status {}", output.status) } Ok(()) } fn join_suffix>(path: &Path, suffix: P) -> PathBuf { let suffixpath = suffix.as_ref(); let mut components = suffixpath.components(); if suffixpath.has_root() { assert_eq!(components.next(), Some(path::Component::RootDir)); } path.join(components) } #[derive(Debug)] struct OverlaySpec { build_dir: PathBuf, toolchain_dir: PathBuf, } #[derive(Debug, Clone)] struct DeflatedToolchain { path: PathBuf, build_count: u64, ctime: Instant, } pub struct OverlayBuilder { bubblewrap: PathBuf, dir: PathBuf, toolchain_dir_map: Mutex>, } impl OverlayBuilder { pub fn new(bubblewrap: PathBuf, dir: PathBuf) -> Result { info!("Creating overlay builder"); if !nix::unistd::getuid().is_root() || !nix::unistd::geteuid().is_root() { // Not root, or a setuid binary - haven't put enough thought into supporting this, bail bail!("not running as root") } let out = Command::new(&bubblewrap) .arg("--version") .check_stdout_trim() .context("Failed to execute bwrap for version check")?; if let Some(s) = out.split_whitespace().nth(1) { match (Version::from("0.3.0"), Version::from(s)) { (Some(min), Some(seen)) => { if seen < min { bail!( "bubblewrap 0.3.0 or later is required, got {:?} for {:?}", out, bubblewrap ); } } (_, _) => { bail!("Unexpected version format running {:?}: got {:?}, expected \"bubblewrap x.x.x\"", bubblewrap, out); } } } else { bail!( "Unexpected version format running {:?}: got {:?}, expected \"bubblewrap x.x.x\"", bubblewrap, out ); } // TODO: pidfile let ret = Self { bubblewrap, dir, toolchain_dir_map: Mutex::new(HashMap::new()), }; ret.cleanup()?; fs::create_dir(&ret.dir).context("Failed to create base directory for builder")?; fs::create_dir(ret.dir.join("builds")) .context("Failed to create builder builds directory")?; fs::create_dir(ret.dir.join("toolchains")) .context("Failed to create builder toolchains directory")?; Ok(ret) } fn cleanup(&self) -> Result<()> { if self.dir.exists() { fs::remove_dir_all(&self.dir).context("Failed to clean up builder directory")? } Ok(()) } fn prepare_overlay_dirs( &self, tc: &Toolchain, tccache: &Mutex, ) -> Result { let DeflatedToolchain { path: toolchain_dir, build_count: id, ctime: _, } = { let mut toolchain_dir_map = self.toolchain_dir_map.lock().unwrap(); // Create the toolchain dir (if necessary) while we have an exclusive lock let toolchain_dir = self.dir.join("toolchains").join(&tc.archive_id); if toolchain_dir_map.contains_key(tc) && toolchain_dir.exists() { // TODO: use if let when sccache can use NLL let entry = toolchain_dir_map .get_mut(tc) .expect("Key missing after checking"); entry.build_count += 1; entry.clone() } else { trace!("Creating toolchain directory for {}", tc.archive_id); fs::create_dir(&toolchain_dir)?; let mut tccache = tccache.lock().unwrap(); let toolchain_rdr = match tccache.get(tc) { Ok(rdr) => rdr, Err(LruError::FileNotInCache) => { bail!("expected toolchain {}, but not available", tc.archive_id) } Err(e) => { return Err(Error::from(e).context("failed to get toolchain from cache")) } }; tar::Archive::new(GzDecoder::new(toolchain_rdr)) .unpack(&toolchain_dir) .or_else(|e| { warn!("Failed to unpack toolchain: {:?}", e); fs::remove_dir_all(&toolchain_dir) .context("Failed to remove unpacked toolchain")?; tccache .remove(tc) .context("Failed to remove corrupt toolchain")?; Err(Error::from(e)) })?; let entry = DeflatedToolchain { path: toolchain_dir, build_count: 1, ctime: Instant::now(), }; toolchain_dir_map.insert(tc.clone(), entry.clone()); if toolchain_dir_map.len() > tccache.len() { let dir_map = toolchain_dir_map.clone(); let mut entries: Vec<_> = dir_map.iter().collect(); // In the pathological case, creation time for unpacked // toolchains could be the opposite of the least recently // recently used, so we clear out half of the accumulated // toolchains to prevent repeated sort/delete cycles. entries.sort_by(|a, b| (a.1).ctime.cmp(&(b.1).ctime)); entries.truncate(entries.len() / 2); for (tc, _) in entries { warn!("Removing old un-compressed toolchain: {:?}", tc); assert!(toolchain_dir_map.remove(tc).is_some()); fs::remove_dir_all(self.dir.join("toolchains").join(&tc.archive_id)) .context("Failed to remove old toolchain directory")?; } } entry } }; trace!("Creating build directory for {}-{}", tc.archive_id, id); let build_dir = self .dir .join("builds") .join(format!("{}-{}", tc.archive_id, id)); fs::create_dir(&build_dir)?; Ok(OverlaySpec { build_dir, toolchain_dir, }) } fn perform_build( bubblewrap: &Path, compile_command: CompileCommand, inputs_rdr: InputsReader, output_paths: Vec, overlay: &OverlaySpec, ) -> Result { trace!("Compile environment: {:?}", compile_command.env_vars); trace!( "Compile command: {:?} {:?}", compile_command.executable, compile_command.arguments ); std::thread::scope(|scope| { scope .spawn(|| { // Now mounted filesystems will be automatically unmounted when this thread dies // (and tmpfs filesystems will be completely destroyed) nix::sched::unshare(nix::sched::CloneFlags::CLONE_NEWNS) .context("Failed to enter a new Linux namespace")?; // Make sure that all future mount changes are private to this namespace // TODO: shouldn't need to add these annotations let source: Option<&str> = None; let fstype: Option<&str> = None; let data: Option<&str> = None; // Turn / into a 'slave', so it receives mounts from real root, but doesn't propagate back nix::mount::mount( source, "/", fstype, nix::mount::MsFlags::MS_REC | nix::mount::MsFlags::MS_PRIVATE, data, ) .context("Failed to turn / into a slave")?; let work_dir = overlay.build_dir.join("work"); let upper_dir = overlay.build_dir.join("upper"); let target_dir = overlay.build_dir.join("target"); fs::create_dir(&work_dir).context("Failed to create overlay work directory")?; fs::create_dir(&upper_dir) .context("Failed to create overlay upper directory")?; fs::create_dir(&target_dir) .context("Failed to create overlay target directory")?; let () = Overlay::writable( iter::once(overlay.toolchain_dir.as_path()), upper_dir, work_dir, &target_dir, // This error is unfortunately not Send+Sync ) .mount() .map_err(|e| anyhow!("Failed to mount overlay FS: {}", e.to_string()))?; trace!("copying in inputs"); // Note that we don't unpack directly into the upperdir since there overlayfs has some // special marker files that we don't want to create by accident (or malicious intent) tar::Archive::new(inputs_rdr) .unpack(&target_dir) .context("Failed to unpack inputs to overlay")?; let CompileCommand { executable, arguments, env_vars, cwd, } = compile_command; let cwd = Path::new(&cwd); trace!("creating output directories"); fs::create_dir_all(join_suffix(&target_dir, cwd)) .context("Failed to create cwd")?; for path in output_paths.iter() { // If it doesn't have a parent, nothing needs creating let output_parent = if let Some(p) = Path::new(path).parent() { p } else { continue; }; fs::create_dir_all(join_suffix(&target_dir, cwd.join(output_parent))) .context("Failed to create an output directory")?; } trace!("performing compile"); // Bubblewrap notes: // - We're running as uid 0 (to do the mounts above), and so bubblewrap is run as uid 0 // - There's special handling in bubblewrap to compare uid and euid - of interest to us, // if uid == euid == 0, bubblewrap preserves capabilities (not good!) so we explicitly // drop all capabilities // - By entering a new user namespace means any set of capabilities do not apply to any // other user namespace, i.e. you lose privileges. This is not strictly necessary because // we're dropping caps anyway so it's irrelevant which namespace we're in, but it doesn't // hurt. // - --unshare-all is not ideal as it happily continues if it fails to unshare either // the user or cgroups namespace, so we list everything explicitly // - The order of bind vs proc + dev is important - the new root must be put in place // first, otherwise proc and dev get hidden let mut cmd = Command::new(bubblewrap); cmd.arg("--die-with-parent") .args(["--cap-drop", "ALL"]) .args([ "--unshare-user", "--unshare-cgroup", "--unshare-ipc", "--unshare-pid", "--unshare-net", "--unshare-uts", ]) .arg("--bind") .arg(&target_dir) .arg("/") .args(["--proc", "/proc"]) .args(["--dev", "/dev"]) .arg("--chdir") .arg(cwd); for (k, v) in env_vars { if k.contains('=') { warn!("Skipping environment variable: {:?}", k); continue; } cmd.arg("--setenv").arg(k).arg(v); } cmd.arg("--"); cmd.arg(executable); cmd.args(arguments); let compile_output = cmd .output() .context("Failed to retrieve output from compile")?; trace!("compile_output: {:?}", compile_output); let mut outputs = vec![]; trace!("retrieving {:?}", output_paths); for path in output_paths { let abspath = join_suffix(&target_dir, cwd.join(&path)); // Resolve in case it's relative since we copy it from the root level match fs::File::open(abspath) { Ok(file) => { let output = OutputData::try_from_reader(file) .context("Failed to read output file")?; outputs.push((path, output)) } Err(e) => { if e.kind() == io::ErrorKind::NotFound { debug!("Missing output path {:?}", path) } else { return Err( Error::from(e).context("Failed to open output file") ); } } } } let compile_output = ProcessOutput::try_from(compile_output) .context("Failed to convert compilation exit status")?; Ok(BuildResult { output: compile_output, outputs, }) // Bizarrely there's no way to actually get any information from a thread::Result::Err }) .join() .unwrap_or_else(|_e| Err(anyhow!("Build thread exited unsuccessfully"))) }) } // Failing during cleanup is pretty unexpected, but we can still return the successful compile // TODO: if too many of these fail, we should mark this builder as faulty fn finish_overlay(&self, _tc: &Toolchain, overlay: OverlaySpec) { // TODO: collect toolchain directories let OverlaySpec { build_dir, toolchain_dir: _, } = overlay; if let Err(e) = fs::remove_dir_all(&build_dir) { error!( "Failed to remove build directory {}: {}", build_dir.display(), e ); } } } impl BuilderIncoming for OverlayBuilder { fn run_build( &self, tc: Toolchain, command: CompileCommand, outputs: Vec, inputs_rdr: InputsReader, tccache: &Mutex, ) -> Result { debug!("Preparing overlay"); let overlay = self .prepare_overlay_dirs(&tc, tccache) .context("failed to prepare overlay dirs")?; debug!("Performing build in {:?}", overlay); let res = Self::perform_build(&self.bubblewrap, command, inputs_rdr, outputs, &overlay); debug!("Finishing with overlay"); self.finish_overlay(&tc, overlay); debug!("Returning result"); res.context("Compilation execution failed") } } const BASE_DOCKER_IMAGE: &str = "aidanhs/busybox"; // Make sure sh doesn't exec the final command, since we need it to do // init duties (reaping zombies). Also, because we kill -9 -1, that kills // the sleep (it's not a builtin) so it needs to be a loop. const DOCKER_SHELL_INIT: &str = "while true; do /busybox sleep 365d && /busybox true; done"; // Check the diff and clean up the FS fn docker_diff(cid: &str) -> Result { Command::new("docker") .args(["diff", cid]) .check_stdout_trim() .context("Failed to Docker diff container") } // Force remove the container fn docker_rm(cid: &str) -> Result<()> { Command::new("docker") .args(["rm", "-f", cid]) .check_run() .context("Failed to force delete container") } pub struct DockerBuilder { image_map: Mutex>, container_lists: Mutex>>, } impl DockerBuilder { // TODO: this should accept a unique string, e.g. inode of the tccache directory // having locked a pidfile, or at minimum should loudly detect other running // instances - pidfile in /tmp pub fn new() -> Result { info!("Creating docker builder"); let ret = Self { image_map: Mutex::new(HashMap::new()), container_lists: Mutex::new(HashMap::new()), }; ret.cleanup()?; Ok(ret) } // TODO: this should really reclaim, and should check in the image map and container lists, so // that when things are removed from there it becomes a form of GC fn cleanup(&self) -> Result<()> { info!("Performing initial Docker cleanup"); let containers = Command::new("docker") .args(["ps", "-a", "--format", "{{.ID}} {{.Image}}"]) .check_stdout_trim() .context("Unable to list all Docker containers")?; if !containers.is_empty() { let mut containers_to_rm = vec![]; for line in containers.split(|c| c == '\n') { let mut iter = line.splitn(2, ' '); let container_id = iter .next() .context("Malformed container listing - no container ID")?; let image_name = iter .next() .context("Malformed container listing - no image name")?; if iter.next().is_some() { bail!("Malformed container listing - third field on row") } if image_name.starts_with("sccache-builder-") { containers_to_rm.push(container_id) } } if !containers_to_rm.is_empty() { Command::new("docker") .args(["rm", "-f"]) .args(containers_to_rm) .check_run() .context("Failed to start command to remove old containers")?; } } let images = Command::new("docker") .args(["images", "--format", "{{.ID}} {{.Repository}}"]) .check_stdout_trim() .context("Failed to list all docker images")?; if !images.is_empty() { let mut images_to_rm = vec![]; for line in images.split(|c| c == '\n') { let mut iter = line.splitn(2, ' '); let image_id = iter .next() .context("Malformed image listing - no image ID")?; let image_name = iter .next() .context("Malformed image listing - no image name")?; if iter.next().is_some() { bail!("Malformed image listing - third field on row") } if image_name.starts_with("sccache-builder-") { images_to_rm.push(image_id) } } if !images_to_rm.is_empty() { Command::new("docker") .args(["rmi"]) .args(images_to_rm) .check_run() .context("Failed to remove image")? } } info!("Completed initial Docker cleanup"); Ok(()) } // If we have a spare running container, claim it and remove it from the available list, // otherwise try and create a new container (possibly creating the Docker image along // the way) fn get_container(&self, tc: &Toolchain, tccache: &Mutex) -> Result { let container = { let mut map = self.container_lists.lock().unwrap(); map.entry(tc.clone()).or_default().pop() }; match container { Some(cid) => Ok(cid), None => { // TODO: can improve parallelism (of creating multiple images at a time) by using another // (more fine-grained) mutex around the entry value and checking if its empty a second time let image = { let mut map = self.image_map.lock().unwrap(); match map.entry(tc.clone()) { hash_map::Entry::Occupied(e) => e.get().clone(), hash_map::Entry::Vacant(e) => { info!("Creating Docker image for {:?} (may block requests)", tc); let image = Self::make_image(tc, tccache)?; e.insert(image.clone()); image } } }; Self::start_container(&image) } } } fn clean_container(&self, cid: &str) -> Result<()> { // Clean up any running processes Command::new("docker") .args(["exec", cid, "/busybox", "kill", "-9", "-1"]) .check_run() .context("Failed to run kill on all processes in container")?; let diff = docker_diff(cid)?; if !diff.is_empty() { let mut lastpath = None; for line in diff.split(|c| c == '\n') { let mut iter = line.splitn(2, ' '); let changetype = iter .next() .context("Malformed container diff - no change type")?; let changepath = iter .next() .context("Malformed container diff - no change path")?; if iter.next().is_some() { bail!("Malformed container diff - third field on row") } // TODO: If files are created in this dir, it gets marked as modified. // A similar thing applies to /root or /build etc if changepath == "/tmp" { continue; } if changetype != "A" { bail!( "Path {} had a non-A changetype of {}", changepath, changetype ); } // Docker diff paths are in alphabetical order and we do `rm -rf`, so we might be able to skip // calling Docker more than necessary (since it's slow) if let Some(lastpath) = lastpath { if Path::new(changepath).starts_with(lastpath) { continue; } } lastpath = Some(changepath); if let Err(e) = Command::new("docker") .args(["exec", cid, "/busybox", "rm", "-rf", changepath]) .check_run() { // We do a final check anyway, so just continue warn!("Failed to remove added path in a container: {}", e) } } let newdiff = docker_diff(cid)?; // See note about changepath == "/tmp" above if !newdiff.is_empty() && newdiff != "C /tmp" { bail!( "Attempted to delete files, but container still has a diff: {:?}", newdiff ); } } Ok(()) } // Failing during cleanup is pretty unexpected, but we can still return the successful compile // TODO: if too many of these fail, we should mark this builder as faulty fn finish_container(&self, tc: &Toolchain, cid: String) { // TODO: collect images if let Err(e) = self.clean_container(&cid) { info!("Failed to clean container {}: {}", cid, e); if let Err(e) = docker_rm(&cid) { warn!( "Failed to remove container {} after failed clean: {}", cid, e ); } return; } // Good as new, add it back to the container list if let Some(entry) = self.container_lists.lock().unwrap().get_mut(tc) { debug!("Reclaimed container {}", cid); entry.push(cid) } else { warn!( "Was ready to reclaim container {} but toolchain went missing", cid ); if let Err(e) = docker_rm(&cid) { warn!("Failed to remove container {}: {}", cid, e); } } } fn make_image(tc: &Toolchain, tccache: &Mutex) -> Result { let cid = Command::new("docker") .args(["create", BASE_DOCKER_IMAGE, "/busybox", "true"]) .check_stdout_trim() .context("Failed to create docker container")?; let mut tccache = tccache.lock().unwrap(); let mut toolchain_rdr = match tccache.get(tc) { Ok(rdr) => rdr, Err(LruError::FileNotInCache) => bail!( "Expected to find toolchain {}, but not available", tc.archive_id ), Err(e) => { return Err(e).with_context(|| format!("Failed to use toolchain {}", tc.archive_id)) } }; trace!("Copying in toolchain"); Command::new("docker") .args(["cp", "-", &format!("{}:/", cid)]) .check_piped(&mut |stdin| { io::copy(&mut toolchain_rdr, stdin)?; Ok(()) }) .context("Failed to copy toolchain tar into container")?; drop(toolchain_rdr); let imagename = format!("sccache-builder-{}", &tc.archive_id); Command::new("docker") .args(["commit", &cid, &imagename]) .check_run() .context("Failed to commit container after build")?; Command::new("docker") .args(["rm", "-f", &cid]) .check_run() .context("Failed to remove temporary build container")?; Ok(imagename) } fn start_container(image: &str) -> Result { Command::new("docker") .args([ "run", "-d", image, "/busybox", "sh", "-c", DOCKER_SHELL_INIT, ]) .check_stdout_trim() .context("Failed to run container") } fn perform_build( compile_command: CompileCommand, mut inputs_rdr: InputsReader, output_paths: Vec, cid: &str, ) -> Result { trace!("Compile environment: {:?}", compile_command.env_vars); trace!( "Compile command: {:?} {:?}", compile_command.executable, compile_command.arguments ); trace!("copying in inputs"); Command::new("docker") .args(["cp", "-", &format!("{}:/", cid)]) .check_piped(&mut |stdin| { io::copy(&mut inputs_rdr, stdin)?; Ok(()) }) .context("Failed to copy inputs tar into container")?; drop(inputs_rdr); let CompileCommand { executable, arguments, env_vars, cwd, } = compile_command; let cwd = Path::new(&cwd); trace!("creating output directories"); assert!(!output_paths.is_empty()); let mut cmd = Command::new("docker"); cmd.args(["exec", cid, "/busybox", "mkdir", "-p"]).arg(cwd); for path in output_paths.iter() { // If it doesn't have a parent, nothing needs creating let output_parent = if let Some(p) = Path::new(path).parent() { p } else { continue; }; cmd.arg(cwd.join(output_parent)); } cmd.check_run() .context("Failed to create directories required for compile in container")?; trace!("performing compile"); // TODO: likely shouldn't perform the compile as root in the container let mut cmd = Command::new("docker"); cmd.arg("exec"); for (k, v) in env_vars { if k.contains('=') { warn!("Skipping environment variable: {:?}", k); continue; } let mut env = k; env.push('='); env.push_str(&v); cmd.arg("-e").arg(env); } let shell_cmd = "cd \"$1\" && shift && exec \"$@\""; cmd.args([cid, "/busybox", "sh", "-c", shell_cmd]); cmd.arg(&executable); cmd.arg(cwd); cmd.arg(executable); cmd.args(arguments); let compile_output = cmd.output().context("Failed to start executing compile")?; trace!("compile_output: {:?}", compile_output); let mut outputs = vec![]; trace!("retrieving {:?}", output_paths); for path in output_paths { let abspath = cwd.join(&path); // Resolve in case it's relative since we copy it from the root level // TODO: this isn't great, but cp gives it out as a tar let output = Command::new("docker") .args(["exec", cid, "/busybox", "cat"]) .arg(abspath) .output() .context("Failed to start command to retrieve output file")?; if output.status.success() { let output = OutputData::try_from_reader(&*output.stdout) .expect("Failed to read compress output stdout"); outputs.push((path, output)) } else { debug!("Missing output path {:?}", path) } } let compile_output = ProcessOutput::try_from(compile_output) .context("Failed to convert compilation exit status")?; Ok(BuildResult { output: compile_output, outputs, }) } } impl BuilderIncoming for DockerBuilder { // From Server fn run_build( &self, tc: Toolchain, command: CompileCommand, outputs: Vec, inputs_rdr: InputsReader, tccache: &Mutex, ) -> Result { debug!("Finding container"); let cid = self .get_container(&tc, tccache) .context("Failed to get a container for build")?; debug!("Performing build with container {}", cid); let res = Self::perform_build(command, inputs_rdr, outputs, &cid) .context("Failed to perform build")?; debug!("Finishing with container {}", cid); self.finish_container(&tc, cid); debug!("Returning result"); Ok(res) } } mozilla-sccache-40c3d6b/src/bin/sccache-dist/build_freebsd.rs000066400000000000000000000367561475712407500242440ustar00rootroot00000000000000// Copyright 2016 Mozilla Foundation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use anyhow::{bail, Context, Error, Result}; use flate2::read::GzDecoder; use sccache::dist::{ BuildResult, BuilderIncoming, CompileCommand, InputsReader, OutputData, ProcessOutput, TcCache, Toolchain, }; use sccache::lru_disk_cache::Error as LruError; use std::collections::{hash_map, HashMap}; use std::path::{Path, PathBuf}; use std::process::{ChildStdin, Command, Output, Stdio}; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::{Arc, Mutex}; use std::{hint, thread}; use uuid::Uuid; trait CommandExt { fn check_stdout_trim(&mut self) -> Result; fn check_piped(&mut self, pipe: &mut dyn FnMut(&mut ChildStdin) -> Result<()>) -> Result<()>; fn check_run(&mut self) -> Result<()>; } impl CommandExt for Command { fn check_stdout_trim(&mut self) -> Result { let output = self.output().context("Failed to start command")?; check_output(&output)?; let stdout = String::from_utf8(output.stdout).context("Output from listing containers not UTF8")?; Ok(stdout.trim().to_owned()) } // Should really take a FnOnce/FnBox fn check_piped(&mut self, pipe: &mut dyn FnMut(&mut ChildStdin) -> Result<()>) -> Result<()> { let mut process = self .stdin(Stdio::piped()) .spawn() .context("Failed to start command")?; let mut stdin = process .stdin .take() .expect("Requested piped stdin but not present"); pipe(&mut stdin).context("Failed to pipe input to process")?; let output = process .wait_with_output() .context("Failed to wait for process to return")?; check_output(&output) } fn check_run(&mut self) -> Result<()> { let output = self.output().context("Failed to start command")?; check_output(&output) } } fn check_output(output: &Output) -> Result<()> { if !output.status.success() { warn!( "===========\n{}\n==========\n\n\n\n=========\n{}\n===============\n\n\n", String::from_utf8_lossy(&output.stdout), String::from_utf8_lossy(&output.stderr) ); bail!("Command failed with status {}", output.status) } Ok(()) } // Force remove the container fn pot_rm(cid: &str, pot_cmd: &PathBuf) -> Result<()> { Command::new(pot_cmd) .args(&["destroy", "-F", "-p", cid]) .check_run() .context("Failed to force delete container") } #[derive(Clone)] pub struct PotBuilder { pot_fs_root: PathBuf, clone_from: String, pot_cmd: PathBuf, pot_clone_args: Vec, image_map: Arc>>, container_lists: Arc>>>, cleanup_thread_count: Arc, max_cleanup_thread_count: usize, } impl PotBuilder { // TODO: this should accept a unique string, e.g. inode of the tccache directory // having locked a pidfile, or at minimum should loudly detect other running // instances - pidfile in /tmp pub fn new( pot_fs_root: PathBuf, clone_from: String, pot_cmd: PathBuf, pot_clone_args: Vec, ) -> Result { info!("Creating pot builder"); let ret = Self { pot_fs_root, clone_from, pot_cmd, pot_clone_args, image_map: Arc::new(Mutex::new(HashMap::new())), container_lists: Arc::new(Mutex::new(HashMap::new())), cleanup_thread_count: Arc::new(AtomicUsize::new(0)), max_cleanup_thread_count: num_cpus::get() * 3, }; ret.cleanup()?; Ok(ret) } // This removes all leftover pots from previous runs fn cleanup(&self) -> Result<()> { info!("Performing initial pot cleanup"); let mut to_remove = Command::new(&self.pot_cmd) .args(&["ls", "-q"]) .check_stdout_trim() .context("Failed to force delete container")? .split('\n') .filter(|a| a.starts_with("sccache-builder-") || a.starts_with("sccache-image-")) .map(|s| s.to_string()) .collect::>(); to_remove.sort(); for cid in to_remove { trace!("Removing pot {}", cid); if let Err(e) = pot_rm(&cid, &self.pot_cmd) { warn!("Failed to remove container {}: {}", cid, e); } } info!("Completed initial pot cleanup"); Ok(()) } // If we have a spare running container, claim it and remove it from the available list, // otherwise try and create a new container (possibly creating the Pot image along // the way) fn get_container(&self, tc: &Toolchain, tccache: &Mutex) -> Result { let container = { let mut map = self.container_lists.lock().unwrap(); map.entry(tc.clone()).or_insert_with(Vec::new).pop() }; match container { Some(cid) => Ok(cid), None => { // TODO: can improve parallelism (of creating multiple images at a time) by using another // (more fine-grained) mutex around the entry value and checking if its empty a second time let image = { let mut map = self.image_map.lock().unwrap(); match map.entry(tc.clone()) { hash_map::Entry::Occupied(e) => e.get().clone(), hash_map::Entry::Vacant(e) => { info!("Creating pot image for {:?} (may block requests)", tc); let image = Self::make_image( tc, tccache, &self.pot_fs_root, &self.clone_from, &self.pot_cmd, &self.pot_clone_args, )?; e.insert(image.clone()); image } } }; Self::start_container(&image, &self.pot_cmd, &self.pot_clone_args) } } } fn clean_container(cid: &str) -> Result<()> { Command::new("pot") .args(&["stop", "-p", cid]) .check_run() .context("Failed to stop container")?; Command::new("pot") .args(&["revert", "-p", cid]) .check_run() .context("Failed to revert container")?; Command::new("pot") .args(&["start", "-p", cid]) .check_run() .context("Failed to (re)start container")?; Ok(()) } // Failing during cleanup is pretty unexpected, but we can still return the successful compile // TODO: if too many of these fail, we should mark this builder as faulty fn finish_container( container_lists: Arc>>>, tc: Toolchain, cid: String, pot_cmd: &PathBuf, ) { if let Err(e) = Self::clean_container(&cid) { info!("Failed to clean container {}: {}", cid, e); if let Err(e) = pot_rm(&cid, pot_cmd) { warn!( "Failed to remove container {} after failed clean: {}", cid, e ); } return; } // Good as new, add it back to the container list if let Some(entry) = container_lists.lock().unwrap().get_mut(&tc) { debug!("Reclaimed container {}", cid); entry.push(cid) } else { warn!( "Was ready to reclaim container {} but toolchain went missing", cid ); if let Err(e) = pot_rm(&cid, pot_cmd) { warn!("Failed to remove container {}: {}", cid, e); } } } fn make_image( tc: &Toolchain, tccache: &Mutex, pot_fs_root: &Path, clone_from: &str, pot_cmd: &PathBuf, pot_clone_args: &[String], ) -> Result { let imagename = format!("sccache-image-{}", &tc.archive_id); trace!("Creating toolchain image: {}", imagename); let mut clone_args: Vec<&str> = ["clone", "-p", &imagename, "-P", clone_from].to_vec(); clone_args.append(&mut pot_clone_args.iter().map(|s| s as &str).collect()); Command::new(pot_cmd) .args(clone_args) .check_run() .context("Failed to create pot container")?; let mut tccache = tccache.lock().unwrap(); let toolchain_rdr = match tccache.get(tc) { Ok(rdr) => rdr, Err(LruError::FileNotInCache) => { bail!("expected toolchain {}, but not available", tc.archive_id) } Err(e) => return Err(Error::from(e).context("failed to get toolchain from cache")), }; trace!("Copying in toolchain"); tar::Archive::new(GzDecoder::new(toolchain_rdr)) .unpack(pot_fs_root.join("jails").join(&imagename).join("m")) .or_else(|e| { warn!("Failed to unpack toolchain: {:?}", e); tccache .remove(tc) .context("Failed to remove corrupt toolchain")?; Err(Error::from(e)) })?; Command::new(pot_cmd) .args(&["snapshot", "-p", &imagename]) .check_run() .context("Failed to snapshot container after build")?; Ok(imagename) } fn start_container( image: &str, pot_cmd: &PathBuf, pot_clone_args: &[String], ) -> Result { let cid = format!("sccache-builder-{}", Uuid::new_v4()); let mut clone_args: Vec<&str> = ["clone", "-p", &cid, "-P", image].to_vec(); clone_args.append(&mut pot_clone_args.iter().map(|s| s as &str).collect()); Command::new(pot_cmd) .args(&clone_args) .check_run() .context("Failed to create pot container")?; Command::new(pot_cmd) .args(&["snapshot", "-p", &cid]) .check_run() .context("Failed to snapshotpot container")?; Command::new(pot_cmd) .args(&["start", "-p", &cid]) .check_run() .context("Failed to start container")?; Ok(cid.to_string()) } fn perform_build( compile_command: CompileCommand, inputs_rdr: InputsReader, output_paths: Vec, cid: &str, pot_fs_root: &Path, ) -> Result { trace!("Compile environment: {:?}", compile_command.env_vars); trace!( "Compile command: {:?} {:?}", compile_command.executable, compile_command.arguments ); trace!("copying in inputs"); // not elegant tar::Archive::new(inputs_rdr) .unpack(pot_fs_root.join("jails").join(cid).join("m")) .context("Failed to unpack inputs to pot")?; let CompileCommand { executable, arguments, env_vars, cwd, } = compile_command; let cwd = Path::new(&cwd); trace!("creating output directories"); assert!(!output_paths.is_empty()); let mut cmd = Command::new("jexec"); cmd.args(&[cid, "mkdir", "-p"]).arg(cwd); for path in output_paths.iter() { // If it doesn't have a parent, nothing needs creating let output_parent = if let Some(p) = Path::new(path).parent() { p } else { continue; }; cmd.arg(cwd.join(output_parent)); } cmd.check_run() .context("Failed to create directories required for compile in container")?; trace!("performing compile"); // TODO: likely shouldn't perform the compile as root in the container let mut cmd = Command::new("jexec"); cmd.arg(cid); cmd.arg("env"); for (k, v) in env_vars { if k.contains('=') { warn!("Skipping environment variable: {:?}", k); continue; } let mut env = k; env.push('='); env.push_str(&v); cmd.arg(env); } let shell_cmd = "cd \"$1\" && shift && exec \"$@\""; cmd.args(&["sh", "-c", shell_cmd]); cmd.arg(&executable); cmd.arg(cwd); cmd.arg(executable); cmd.args(arguments); let compile_output = cmd.output().context("Failed to start executing compile")?; trace!("compile_output: {:?}", compile_output); let mut outputs = vec![]; trace!("retrieving {:?}", output_paths); for path in output_paths { let abspath = cwd.join(&path); // Resolve in case it's relative since we copy it from the root level // TODO: this isn't great, but cp gives it out as a tar let output = Command::new("jexec") .args(&[cid, "cat"]) .arg(abspath) .output() .context("Failed to start command to retrieve output file")?; if output.status.success() { let output = OutputData::try_from_reader(&*output.stdout) .expect("Failed to read compress output stdout"); outputs.push((path, output)) } else { debug!("Missing output path {:?}", path) } } let compile_output = ProcessOutput::try_from(compile_output) .context("Failed to convert compilation exit status")?; Ok(BuildResult { output: compile_output, outputs, }) } } impl BuilderIncoming for PotBuilder { // From Server fn run_build( &self, tc: Toolchain, command: CompileCommand, outputs: Vec, inputs_rdr: InputsReader, tccache: &Mutex, ) -> Result { debug!("Finding container"); let cid = self .get_container(&tc, tccache) .context("Failed to get a container for build")?; debug!("Performing build with container {}", cid); let res = Self::perform_build(command, inputs_rdr, outputs, &cid, &self.pot_fs_root) .context("Failed to perform build")?; debug!("Finishing with container {}", cid); let cloned = self.clone(); let tc = tc; while cloned.cleanup_thread_count.fetch_add(1, Ordering::SeqCst) > self.max_cleanup_thread_count { cloned.cleanup_thread_count.fetch_sub(1, Ordering::SeqCst); hint::spin_loop(); } thread::spawn(move || { Self::finish_container(cloned.container_lists, tc, cid, &cloned.pot_cmd); cloned.cleanup_thread_count.fetch_sub(1, Ordering::SeqCst); }); debug!("Returning result"); Ok(res) } } mozilla-sccache-40c3d6b/src/bin/sccache-dist/cmdline/000077500000000000000000000000001475712407500224775ustar00rootroot00000000000000mozilla-sccache-40c3d6b/src/bin/sccache-dist/cmdline/mod.rs000066400000000000000000000020501475712407500236210ustar00rootroot00000000000000// Copyright 2022 // Copyright 2016 Mozilla Foundation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use sccache::{config, dist::ServerId}; mod parse; pub use parse::try_parse_from; #[derive(Debug)] pub enum Command { Auth(AuthSubcommand), Scheduler(config::scheduler::Config), Server(config::server::Config), } #[derive(Debug, PartialEq, Eq)] pub enum AuthSubcommand { Base64 { num_bytes: usize, }, JwtHS256ServerToken { secret_key: String, server_id: ServerId, }, } mozilla-sccache-40c3d6b/src/bin/sccache-dist/cmdline/parse.rs000066400000000000000000000307341475712407500241660ustar00rootroot00000000000000// Copyright 2022 // Copyright 2016 Mozilla Foundation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use std::{env, ffi::OsString, fmt, net::SocketAddr, path::PathBuf, str::FromStr}; use anyhow::{anyhow, bail}; use clap::{Arg, ArgGroup, Command as ClapCommand, ValueEnum}; use sccache::{config, dist::ServerId}; use syslog::Facility; use crate::cmdline::{AuthSubcommand, Command}; #[derive(Debug, Clone)] struct TokenLength(usize); impl TokenLength { fn as_bytes(&self) -> usize { self.0 / 8 } fn from_bits(bits: &str) -> anyhow::Result { let bits: usize = bits.parse()?; if bits & 0x7 != 0 { Err(anyhow!("Number of bits must be divisible by 8")) } else if bits < 64 { Err(anyhow!( "Number of bits must be greater than or equal to 64" )) } else if bits > 4_096 { Err(anyhow!("Number of bits must be less than or equal to 4096")) } else { Ok(Self(bits)) } } } impl fmt::Display for TokenLength { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", self.0) } } #[derive(Clone, Copy, ValueEnum)] enum LogLevel { Error, Warn, Info, Debug, Trace, } impl FromStr for LogLevel { type Err = anyhow::Error; fn from_str(s: &str) -> Result { let variant = match s { "error" => Self::Error, "warn" => Self::Warn, "info" => Self::Info, "debug" => Self::Debug, "trace" => Self::Trace, _ => bail!("Unknown log level: {:?}", s), }; Ok(variant) } } impl From for log::LevelFilter { fn from(log_level: LogLevel) -> Self { match log_level { LogLevel::Error => Self::Error, LogLevel::Warn => Self::Warn, LogLevel::Info => Self::Info, LogLevel::Debug => Self::Debug, LogLevel::Trace => Self::Trace, } } } fn flag_infer_long(name: &'static str) -> Arg { Arg::new(name).long(name) } fn get_clap_command() -> ClapCommand { let syslog = flag_infer_long("syslog") .help("Log to the syslog with LEVEL") .value_name("LEVEL") .value_parser(clap::value_parser!(LogLevel)); let config_with_help_message = |help: &'static str| { flag_infer_long("config") .help(help) .value_name("PATH") .value_parser(clap::value_parser!(PathBuf)) }; ClapCommand::new(env!("CARGO_PKG_NAME")) .version(env!("CARGO_PKG_VERSION")) .subcommand_required(true) .subcommand( ClapCommand::new("auth") .subcommand_required(true) .subcommand(ClapCommand::new("generate-jwt-hs256-key")) .subcommand( ClapCommand::new("generate-jwt-hs256-server-token") .args(&[ flag_infer_long("server") .help("Generate a key for the specified server") .value_name("SERVER_ADDR") .value_parser(clap::value_parser!(SocketAddr)) .required(true), flag_infer_long("secret-key") .help("Use specified key to create the token") .value_name("KEY"), config_with_help_message( "Use the key from the scheduler config file at PATH", ), ]) .group( ArgGroup::new("key_source_mutual_exclusion") .args(["config", "secret-key"]) .required(true), ), ) .subcommand( ClapCommand::new("generate-shared-token").arg( flag_infer_long("bits") .help("Use the specified number of bits of randomness") .value_name("BITS") .default_value("256") .value_parser(TokenLength::from_bits), ), ), ) .subcommand(ClapCommand::new("scheduler").args(&[ config_with_help_message("Use the scheduler config file at PATH").required(true), syslog.clone(), ])) .subcommand(ClapCommand::new("server").args(&[ config_with_help_message("Use the server config file at PATH").required(true), syslog, ])) } fn check_init_syslog(name: &str, log_level: LogLevel) { let level = log::LevelFilter::from(log_level); drop(syslog::init(Facility::LOG_DAEMON, level, Some(name))); } /// Parse commandline `args` into a `Result` to execute. pub fn try_parse_from( args: impl IntoIterator + Clone>, ) -> anyhow::Result { let matches = get_clap_command().try_get_matches_from(args)?; Ok(match matches.subcommand() { Some(("auth", matches)) => Command::Auth(match matches.subcommand() { // Size based on https://briansmith.org/rustdoc/ring/hmac/fn.recommended_key_len.html Some(("generate-jwt-hs256-key", _)) => AuthSubcommand::Base64 { num_bytes: 256 / 8 }, Some(("generate-jwt-hs256-server-token", matches)) => { let server_addr = matches .get_one("server") .expect("`server` is required and it can be parsed to a `SocketAddr`"); let server_id = ServerId::new(*server_addr); let secret_key = if matches.contains_id("config") { let config_path = matches .get_one::("config") .expect("`config` is required and it can be parsed to a `PathBuf`"); if let Some(config) = config::scheduler::from_path(config_path)? { match config.server_auth { config::scheduler::ServerAuth::JwtHS256 { secret_key } => secret_key, config::scheduler::ServerAuth::Insecure | config::scheduler::ServerAuth::Token { .. } => { bail!("Scheduler not configured with JWT HS256") } } } else { bail!("Could not load config") } } else { matches .get_one::("secret-key") .expect("`secret-key` is required") .to_string() }; AuthSubcommand::JwtHS256ServerToken { secret_key, server_id, } } Some(("generate-shared-token", matches)) => { let token_bits = matches .get_one::("bits") .expect("clap provides default"); AuthSubcommand::Base64 { num_bytes: token_bits.as_bytes(), } } _ => unreachable!("Subcommand is enforced by clap"), }), Some(("scheduler", matches)) => { if matches.contains_id("syslog") { let log_level = matches .get_one::("syslog") .expect("`syslog` is required"); check_init_syslog("sccache-scheduler", *log_level); } let config_path = matches .get_one::("config") .expect("`config` is required"); if let Some(config) = config::scheduler::from_path(config_path)? { Command::Scheduler(config) } else { bail!("Could not load config") } } Some(("server", matches)) => { if matches.contains_id("syslog") { let log_level = matches .get_one::("syslog") .expect("`syslog` is required"); check_init_syslog("sccache-buildserver", *log_level); } let config_path = matches .get_one::("config") .expect("`config` is required"); if let Some(config) = config::server::from_path(config_path)? { Command::Server(config) } else { bail!("Could not load config") } } _ => unreachable!("Subcommand is enforced by clap"), }) } #[cfg(test)] mod tests { use super::*; const EXE: &str = "sccache-dist"; fn auth_generate_shared_tokens_bits(bit_val: &'static str) -> Vec<&'static str> { vec![EXE, "auth", "generate-shared-token", "--bits", bit_val] } fn auth_generate_jwt_hs256_server_token(subcommand_args: &[&'static str]) -> Vec<&'static str> { let mut args = vec![EXE, "auth", "generate-jwt-hs256-server-token"]; args.extend_from_slice(subcommand_args); args } #[test] fn debug_assert() { get_clap_command().debug_assert() } #[test] fn missing_required_subcommands_fails() { let args_sets = &[vec![EXE], vec![EXE, "auth"]]; for args in args_sets { assert!(try_parse_from(args).is_err()); } } #[test] fn invalid_token_bits_fails() { let invalid_vals = vec!["not_a_num", "58", "8000", "70"]; for invalid_val in invalid_vals { let args = auth_generate_shared_tokens_bits(invalid_val); assert!(try_parse_from(args).is_err()); } } #[test] fn auth_generate_server_token_needs_key_source() { let server_args = &["--server", "127.0.0.1:4321"]; let no_key = auth_generate_jwt_hs256_server_token(server_args); assert!(try_parse_from(no_key).is_err()); let mut too_many_keys = auth_generate_jwt_hs256_server_token(server_args); too_many_keys.extend_from_slice(&["--secret-key", "secret", "--config", "some/path.toml"]); assert!(try_parse_from(too_many_keys).is_err()); } // This is all just to work around `PartialEq` not being on some of the values used in variants // for `Command` yet fn assert_args_parse_to_auth(args: Vec<&'static str>, ideal_auth: AuthSubcommand) { match try_parse_from(&args) { Ok(Command::Auth(auth)) => assert_eq!(auth, ideal_auth), _ => panic!("Bad parsing for: {:#?}", args), } } #[test] fn auth_generate_jwt_hs256_key_good() { let args = vec![EXE, "auth", "generate-jwt-hs256-key"]; assert_args_parse_to_auth(args, AuthSubcommand::Base64 { num_bytes: 256 / 8 }); } #[test] fn auth_generate_jwt_hs256_server_token_good() { let base = auth_generate_jwt_hs256_server_token(&["--server", "127.0.0.1:4321"]); let server_socket: SocketAddr = "127.0.0.1:4321".parse().unwrap(); let server_id = ServerId::new(server_socket); let mut secret_key = base.clone(); secret_key.extend_from_slice(&["--secret-key", "very secret"]); assert_args_parse_to_auth( secret_key, AuthSubcommand::JwtHS256ServerToken { server_id, secret_key: "very secret".to_owned(), }, ); } #[test] fn auth_generate_shared_token_good() { let raw_to_expected_bit_vals = &[ ("64", 64 / 8), ("128", 128 / 8), ("136", 136 / 8), ("4000", 4_000 / 8), ]; for (raw, expected) in raw_to_expected_bit_vals { let args = auth_generate_shared_tokens_bits(raw); assert_args_parse_to_auth( args, AuthSubcommand::Base64 { num_bytes: *expected, }, ); } } } mozilla-sccache-40c3d6b/src/bin/sccache-dist/main.rs000066400000000000000000000747751475712407500224020ustar00rootroot00000000000000#[macro_use] extern crate log; use anyhow::{bail, Context, Error, Result}; use base64::Engine; use rand::{rngs::OsRng, RngCore}; use sccache::config::{ scheduler as scheduler_config, server as server_config, INSECURE_DIST_CLIENT_TOKEN, }; use sccache::dist::{ self, AllocJobResult, AssignJobResult, BuilderIncoming, CompileCommand, HeartbeatServerResult, InputsReader, JobAlloc, JobAuthorizer, JobComplete, JobId, JobState, RunJobResult, SchedulerIncoming, SchedulerOutgoing, SchedulerStatusResult, ServerId, ServerIncoming, ServerNonce, ServerOutgoing, SubmitToolchainResult, TcCache, Toolchain, ToolchainReader, UpdateJobStateResult, }; use sccache::util::daemonize; use sccache::util::BASE64_URL_SAFE_ENGINE; use serde::{Deserialize, Serialize}; use std::collections::{btree_map, BTreeMap, HashMap, HashSet}; use std::env; use std::io; use std::path::Path; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::{Mutex, MutexGuard}; use std::time::{Duration, Instant}; #[cfg_attr(target_os = "freebsd", path = "build_freebsd.rs")] mod build; mod cmdline; mod token_check; use cmdline::{AuthSubcommand, Command}; pub const INSECURE_DIST_SERVER_TOKEN: &str = "dangerously_insecure_server"; // Only supported on x86_64 Linux machines and on FreeBSD #[cfg(any( all(target_os = "linux", target_arch = "x86_64"), target_os = "freebsd" ))] fn main() { init_logging(); let incr_env_strs = ["CARGO_BUILD_INCREMENTAL", "CARGO_INCREMENTAL"]; incr_env_strs .iter() .for_each(|incr_str| match env::var(incr_str) { Ok(incr_val) if incr_val == "1" => { println!("sccache: increment compilation is prohibited."); std::process::exit(1); } _ => (), }); let command = match cmdline::try_parse_from(env::args()) { Ok(cmd) => cmd, Err(e) => match e.downcast::() { Ok(clap_err) => clap_err.exit(), Err(some_other_err) => { println!("sccache-dist: {some_other_err}"); for source_err in some_other_err.chain().skip(1) { println!("sccache-dist: caused by: {source_err}"); } std::process::exit(1); } }, }; std::process::exit(match run(command) { Ok(s) => s, Err(e) => { eprintln!("sccache-dist: error: {}", e); for e in e.chain().skip(1) { eprintln!("sccache-dist: caused by: {}", e); } 2 } }); } fn create_server_token(server_id: ServerId, auth_token: &str) -> String { format!("{} {}", server_id.addr(), auth_token) } fn check_server_token(server_token: &str, auth_token: &str) -> Option { let mut split = server_token.splitn(2, |c| c == ' '); let server_addr = split.next().and_then(|addr| addr.parse().ok())?; match split.next() { Some(t) if t == auth_token => Some(ServerId::new(server_addr)), Some(_) | None => None, } } #[derive(Serialize, Deserialize)] #[serde(deny_unknown_fields)] struct ServerJwt { exp: u64, server_id: ServerId, } fn create_jwt_server_token( server_id: ServerId, header: &jwt::Header, key: &[u8], ) -> Result { let key = jwt::EncodingKey::from_secret(key); jwt::encode(header, &ServerJwt { exp: 0, server_id }, &key).map_err(Into::into) } fn dangerous_insecure_extract_jwt_server_token(server_token: &str) -> Result { let validation = { let mut validation = jwt::Validation::default(); validation.validate_exp = false; validation.validate_nbf = false; validation.insecure_disable_signature_validation(); validation }; let dummy_key = jwt::DecodingKey::from_secret(b"secret"); jwt::decode::(server_token, &dummy_key, &validation) .map(|res| res.claims.server_id) .map_err(Into::into) } fn check_jwt_server_token( server_token: &str, key: &[u8], validation: &jwt::Validation, ) -> Option { let key = jwt::DecodingKey::from_secret(key); jwt::decode::(server_token, &key, validation) .map(|res| res.claims.server_id) .ok() } fn run(command: Command) -> Result { match command { Command::Auth(AuthSubcommand::Base64 { num_bytes }) => { let mut bytes = vec![0; num_bytes]; OsRng.fill_bytes(&mut bytes); // As long as it can be copied, it doesn't matter if this is base64 or hex etc println!("{}", BASE64_URL_SAFE_ENGINE.encode(bytes)); Ok(0) } Command::Auth(AuthSubcommand::JwtHS256ServerToken { secret_key, server_id, }) => { let header = jwt::Header::new(jwt::Algorithm::HS256); let secret_key = BASE64_URL_SAFE_ENGINE.decode(secret_key)?; let token = create_jwt_server_token(server_id, &header, &secret_key) .context("Failed to create server token")?; println!("{}", token); Ok(0) } Command::Scheduler(scheduler_config::Config { public_addr, client_auth, server_auth, }) => { let check_client_auth: Box = match client_auth { scheduler_config::ClientAuth::Insecure => Box::new(token_check::EqCheck::new( INSECURE_DIST_CLIENT_TOKEN.to_owned(), )), scheduler_config::ClientAuth::Token { token } => { Box::new(token_check::EqCheck::new(token)) } scheduler_config::ClientAuth::JwtValidate { audience, issuer, jwks_url, } => Box::new( token_check::ValidJWTCheck::new(audience, issuer, &jwks_url) .context("Failed to create a checker for valid JWTs")?, ), scheduler_config::ClientAuth::Mozilla { required_groups } => { Box::new(token_check::MozillaCheck::new(required_groups)) } scheduler_config::ClientAuth::ProxyToken { url, cache_secs } => { Box::new(token_check::ProxyTokenCheck::new(url, cache_secs)) } }; let check_server_auth: dist::http::ServerAuthCheck = match server_auth { scheduler_config::ServerAuth::Insecure => { warn!("Scheduler starting with DANGEROUSLY_INSECURE server authentication"); let token = INSECURE_DIST_SERVER_TOKEN; Box::new(move |server_token| check_server_token(server_token, token)) } scheduler_config::ServerAuth::Token { token } => { Box::new(move |server_token| check_server_token(server_token, &token)) } scheduler_config::ServerAuth::JwtHS256 { secret_key } => { let secret_key = BASE64_URL_SAFE_ENGINE .decode(secret_key) .context("Secret key base64 invalid")?; if secret_key.len() != 256 / 8 { bail!("Size of secret key incorrect") } let validation = { let mut validation = jwt::Validation::new(jwt::Algorithm::HS256); validation.leeway = 0; validation.validate_exp = false; validation.validate_nbf = false; validation }; Box::new(move |server_token| { check_jwt_server_token(server_token, &secret_key, &validation) }) } }; daemonize()?; let scheduler = Scheduler::new(); let http_scheduler = dist::http::Scheduler::new( public_addr, scheduler, check_client_auth, check_server_auth, ); http_scheduler.start()?; unreachable!(); } Command::Server(server_config::Config { builder, cache_dir, public_addr, bind_address, scheduler_url, scheduler_auth, toolchain_cache_size, }) => { let builder: Box = match builder { #[cfg(not(target_os = "freebsd"))] server_config::BuilderType::Docker => { Box::new(build::DockerBuilder::new().context("Docker builder failed to start")?) } #[cfg(not(target_os = "freebsd"))] server_config::BuilderType::Overlay { bwrap_path, build_dir, } => Box::new( build::OverlayBuilder::new(bwrap_path, build_dir) .context("Overlay builder failed to start")?, ), #[cfg(target_os = "freebsd")] server_config::BuilderType::Pot { pot_fs_root, clone_from, pot_cmd, pot_clone_args, } => Box::new( build::PotBuilder::new(pot_fs_root, clone_from, pot_cmd, pot_clone_args) .context("Pot builder failed to start")?, ), _ => bail!( "Builder type `{}` not supported on this platform", format!("{:?}", builder) .split_whitespace() .next() .unwrap_or("") ), }; let server_id = ServerId::new(public_addr); let scheduler_auth = match scheduler_auth { server_config::SchedulerAuth::Insecure => { warn!("Server starting with DANGEROUSLY_INSECURE scheduler authentication"); create_server_token(server_id, INSECURE_DIST_SERVER_TOKEN) } server_config::SchedulerAuth::Token { token } => { create_server_token(server_id, &token) } server_config::SchedulerAuth::JwtToken { token } => { let token_server_id: ServerId = dangerous_insecure_extract_jwt_server_token(&token) .context("Could not decode scheduler auth jwt")?; if token_server_id != server_id { bail!( "JWT server id ({:?}) did not match configured server id ({:?})", token_server_id, server_id ) } token } }; let server = Server::new(builder, &cache_dir, toolchain_cache_size) .context("Failed to create sccache server instance")?; let http_server = dist::http::Server::new( public_addr, bind_address, scheduler_url.to_url(), scheduler_auth, server, ) .context("Failed to create sccache HTTP server instance")?; http_server.start()?; unreachable!(); } } } fn init_logging() { if env::var(sccache::LOGGING_ENV).is_ok() { match env_logger::Builder::from_env(sccache::LOGGING_ENV).try_init() { Ok(_) => (), Err(e) => panic!("Failed to initialize logging: {:?}", e), } } } // Maximum number of jobs per core - only occurs for one core, usually less, see load_weight() const MAX_PER_CORE_LOAD: f64 = 2f64; const SERVER_REMEMBER_ERROR_TIMEOUT: Duration = Duration::from_secs(300); const UNCLAIMED_PENDING_TIMEOUT: Duration = Duration::from_secs(300); const UNCLAIMED_READY_TIMEOUT: Duration = Duration::from_secs(60); #[derive(Copy, Clone)] struct JobDetail { server_id: ServerId, state: JobState, } // To avoid deadlicking, make sure to do all locking at once (i.e. no further locking in a downward scope), // in alphabetical order pub struct Scheduler { job_count: AtomicUsize, // Currently running jobs, can never be Complete jobs: Mutex>, servers: Mutex>, } struct ServerDetails { jobs_assigned: HashSet, // Jobs assigned that haven't seen a state change. Can only be pending // or ready. jobs_unclaimed: HashMap, last_seen: Instant, last_error: Option, num_cpus: usize, server_nonce: ServerNonce, job_authorizer: Box, } impl Scheduler { pub fn new() -> Self { Scheduler { job_count: AtomicUsize::new(0), jobs: Mutex::new(BTreeMap::new()), servers: Mutex::new(HashMap::new()), } } fn prune_servers( &self, servers: &mut MutexGuard>, jobs: &mut MutexGuard>, ) { let now = Instant::now(); let mut dead_servers = Vec::new(); for (&server_id, details) in servers.iter_mut() { if now.duration_since(details.last_seen) > dist::http::HEARTBEAT_TIMEOUT { dead_servers.push(server_id); } } for server_id in dead_servers { warn!( "Server {} appears to be dead, pruning it in the scheduler", server_id.addr() ); let server_details = servers .remove(&server_id) .expect("server went missing from map"); for job_id in server_details.jobs_assigned { warn!( "Non-terminated job {} was cleaned up in server pruning", job_id ); // A job may be missing here if it failed to allocate // initially, so just warn if it's not present. if jobs.remove(&job_id).is_none() { warn!( "Non-terminated job {} assignment originally failed.", job_id ); } } } } } impl Default for Scheduler { fn default() -> Self { Self::new() } } fn load_weight(job_count: usize, core_count: usize) -> f64 { // Oversubscribe cores just a little to make up for network and I/O latency. This formula is // not based on hard data but an extrapolation to high core counts of the conventional wisdom // that slightly more jobs than cores achieve the shortest compile time. Which is originally // about local compiles and this is over the network, so be slightly less conservative. let cores_plus_slack = core_count + 1 + core_count / 8; // Note >=, not >, because the question is "can we add another job"? if job_count >= cores_plus_slack { MAX_PER_CORE_LOAD + 1f64 // no new jobs for now } else { job_count as f64 / core_count as f64 } } impl SchedulerIncoming for Scheduler { fn handle_alloc_job( &self, requester: &dyn SchedulerOutgoing, tc: Toolchain, ) -> Result { let (job_id, server_id, auth) = { // LOCKS let mut servers = self.servers.lock().unwrap(); let res = { let mut best = None; let mut best_err = None; let mut best_load: f64 = MAX_PER_CORE_LOAD; let now = Instant::now(); for (&server_id, details) in servers.iter_mut() { let load = load_weight(details.jobs_assigned.len(), details.num_cpus); if let Some(last_error) = details.last_error { if load < MAX_PER_CORE_LOAD { if now.duration_since(last_error) > SERVER_REMEMBER_ERROR_TIMEOUT { details.last_error = None; } match best_err { Some(( _, &mut ServerDetails { last_error: Some(best_last_err), .. }, )) => { if last_error < best_last_err { trace!( "Selected {:?}, its most recent error is {:?} ago", server_id, now - last_error ); best_err = Some((server_id, details)); } } _ => { trace!( "Selected {:?}, its most recent error is {:?} ago", server_id, now - last_error ); best_err = Some((server_id, details)); } } } } else if load < best_load { best = Some((server_id, details)); trace!("Selected {:?} as the server with the best load", server_id); best_load = load; if load == 0f64 { break; } } } // Assign the job to our best choice if let Some((server_id, server_details)) = best.or(best_err) { let job_count = self.job_count.fetch_add(1, Ordering::SeqCst) as u64; let job_id = JobId(job_count); assert!(server_details.jobs_assigned.insert(job_id)); assert!(server_details .jobs_unclaimed .insert(job_id, Instant::now()) .is_none()); info!( "Job {} created and will be assigned to server {:?}", job_id, server_id ); let auth = server_details .job_authorizer .generate_token(job_id) .map_err(Error::from) .context("Could not create an auth token for this job")?; Some((job_id, server_id, auth)) } else { None } }; if let Some(res) = res { res } else { let msg = format!( "Insufficient capacity across {} available servers", servers.len() ); return Ok(AllocJobResult::Fail { msg }); } }; let AssignJobResult { state, need_toolchain, } = requester .do_assign_job(server_id, job_id, tc, auth.clone()) .with_context(|| { // LOCKS let mut servers = self.servers.lock().unwrap(); if let Some(entry) = servers.get_mut(&server_id) { entry.last_error = Some(Instant::now()); entry.jobs_unclaimed.remove(&job_id); if !entry.jobs_assigned.remove(&job_id) { "assign job failed and job not known to the server" } else { "assign job failed, job un-assigned from the server" } } else { "assign job failed and server not known" } })?; { // LOCKS let mut jobs = self.jobs.lock().unwrap(); info!( "Job {} successfully assigned and saved with state {:?}", job_id, state ); assert!(jobs .insert(job_id, JobDetail { server_id, state }) .is_none()); } let job_alloc = JobAlloc { auth, job_id, server_id, }; Ok(AllocJobResult::Success { job_alloc, need_toolchain, }) } fn handle_heartbeat_server( &self, server_id: ServerId, server_nonce: ServerNonce, num_cpus: usize, job_authorizer: Box, ) -> Result { if num_cpus == 0 { bail!("Invalid number of CPUs (0) specified in heartbeat") } // LOCKS let mut jobs = self.jobs.lock().unwrap(); let mut servers = self.servers.lock().unwrap(); self.prune_servers(&mut servers, &mut jobs); match servers.get_mut(&server_id) { Some(ref mut details) if details.server_nonce == server_nonce => { let now = Instant::now(); details.last_seen = now; let mut stale_jobs = Vec::new(); for (&job_id, &last_seen) in details.jobs_unclaimed.iter() { if now.duration_since(last_seen) < UNCLAIMED_READY_TIMEOUT { continue; } if let Some(detail) = jobs.get(&job_id) { match detail.state { JobState::Ready => { stale_jobs.push(job_id); } JobState::Pending => { if now.duration_since(last_seen) > UNCLAIMED_PENDING_TIMEOUT { stale_jobs.push(job_id); } } state => { warn!("Invalid unclaimed job state for {}: {}", job_id, state); } } } else { warn!("Unknown stale job {}", job_id); stale_jobs.push(job_id); } } if !stale_jobs.is_empty() { warn!( "The following stale jobs will be de-allocated: {:?}", stale_jobs ); for job_id in stale_jobs { if !details.jobs_assigned.remove(&job_id) { warn!( "Stale job for server {} not assigned: {}", server_id.addr(), job_id ); } if details.jobs_unclaimed.remove(&job_id).is_none() { warn!( "Unknown stale job for server {}: {}", server_id.addr(), job_id ); } if jobs.remove(&job_id).is_none() { warn!( "Unknown stale job for server {}: {}", server_id.addr(), job_id ); } } } return Ok(HeartbeatServerResult { is_new: false }); } Some(ref mut details) if details.server_nonce != server_nonce => { for job_id in details.jobs_assigned.iter() { if jobs.remove(job_id).is_none() { warn!( "Unknown job found when replacing server {}: {}", server_id.addr(), job_id ); } } } _ => (), } info!("Registered new server {:?}", server_id); servers.insert( server_id, ServerDetails { last_seen: Instant::now(), last_error: None, jobs_assigned: HashSet::new(), jobs_unclaimed: HashMap::new(), num_cpus, server_nonce, job_authorizer, }, ); Ok(HeartbeatServerResult { is_new: true }) } fn handle_update_job_state( &self, job_id: JobId, server_id: ServerId, job_state: JobState, ) -> Result { // LOCKS let mut jobs = self.jobs.lock().unwrap(); let mut servers = self.servers.lock().unwrap(); if let btree_map::Entry::Occupied(mut entry) = jobs.entry(job_id) { let job_detail = entry.get(); if job_detail.server_id != server_id { bail!( "Job id {} is not registered on server {:?}", job_id, server_id ) } let now = Instant::now(); let mut server_details = servers.get_mut(&server_id); if let Some(ref mut details) = server_details { details.last_seen = now; }; match (job_detail.state, job_state) { (JobState::Pending, JobState::Ready) => entry.get_mut().state = job_state, (JobState::Ready, JobState::Started) => { if let Some(details) = server_details { details.jobs_unclaimed.remove(&job_id); } else { warn!("Job state updated, but server is not known to scheduler") } entry.get_mut().state = job_state } (JobState::Started, JobState::Complete) => { let (job_id, _) = entry.remove_entry(); if let Some(entry) = server_details { assert!(entry.jobs_assigned.remove(&job_id)) } else { bail!("Job was marked as finished, but server is not known to scheduler") } } (from, to) => bail!("Invalid job state transition from {} to {}", from, to), } info!("Job {} updated state to {:?}", job_id, job_state); } else { bail!("Unknown job") } Ok(UpdateJobStateResult::Success) } fn handle_status(&self) -> Result { // LOCKS let mut jobs = self.jobs.lock().unwrap(); let mut servers = self.servers.lock().unwrap(); self.prune_servers(&mut servers, &mut jobs); Ok(SchedulerStatusResult { num_servers: servers.len(), num_cpus: servers.values().map(|v| v.num_cpus).sum(), in_progress: jobs.len(), }) } } pub struct Server { builder: Box, cache: Mutex, job_toolchains: Mutex>, } impl Server { pub fn new( builder: Box, cache_dir: &Path, toolchain_cache_size: u64, ) -> Result { let cache = TcCache::new(&cache_dir.join("tc"), toolchain_cache_size) .context("Failed to create toolchain cache")?; Ok(Server { builder, cache: Mutex::new(cache), job_toolchains: Mutex::new(HashMap::new()), }) } } impl ServerIncoming for Server { fn handle_assign_job(&self, job_id: JobId, tc: Toolchain) -> Result { let need_toolchain = !self.cache.lock().unwrap().contains_toolchain(&tc); assert!(self .job_toolchains .lock() .unwrap() .insert(job_id, tc) .is_none()); let state = if need_toolchain { JobState::Pending } else { // TODO: can start prepping the build environment now JobState::Ready }; Ok(AssignJobResult { state, need_toolchain, }) } fn handle_submit_toolchain( &self, requester: &dyn ServerOutgoing, job_id: JobId, tc_rdr: ToolchainReader, ) -> Result { requester .do_update_job_state(job_id, JobState::Ready) .context("Updating job state failed")?; // TODO: need to lock the toolchain until the container has started // TODO: can start prepping container let tc = match self.job_toolchains.lock().unwrap().get(&job_id).cloned() { Some(tc) => tc, None => return Ok(SubmitToolchainResult::JobNotFound), }; let mut cache = self.cache.lock().unwrap(); // TODO: this returns before reading all the data, is that valid? if cache.contains_toolchain(&tc) { return Ok(SubmitToolchainResult::Success); } Ok(cache .insert_with(&tc, |mut file| { io::copy(&mut { tc_rdr }, &mut file).map(|_| ()) }) .map(|_| SubmitToolchainResult::Success) .unwrap_or(SubmitToolchainResult::CannotCache)) } fn handle_run_job( &self, requester: &dyn ServerOutgoing, job_id: JobId, command: CompileCommand, outputs: Vec, inputs_rdr: InputsReader, ) -> Result { requester .do_update_job_state(job_id, JobState::Started) .context("Updating job state failed")?; let tc = self.job_toolchains.lock().unwrap().remove(&job_id); let res = match tc { None => Ok(RunJobResult::JobNotFound), Some(tc) => { match self .builder .run_build(tc, command, outputs, inputs_rdr, &self.cache) { Err(e) => Err(e.context("run build failed")), Ok(res) => Ok(RunJobResult::Complete(JobComplete { output: res.output, outputs: res.outputs, })), } } }; requester .do_update_job_state(job_id, JobState::Complete) .context("Updating job state failed")?; res } } mozilla-sccache-40c3d6b/src/bin/sccache-dist/token_check.rs000066400000000000000000000317371475712407500237220ustar00rootroot00000000000000use anyhow::{bail, Context, Result}; use base64::Engine; use sccache::dist::http::{ClientAuthCheck, ClientVisibleMsg}; use sccache::util::{new_reqwest_blocking_client, BASE64_URL_SAFE_ENGINE}; use serde::{Deserialize, Serialize}; use std::collections::HashMap; use std::result::Result as StdResult; use std::sync::Mutex; use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; // https://auth0.com/docs/jwks #[derive(Debug, Serialize, Deserialize)] pub struct Jwks { pub keys: Vec, } #[derive(Debug, Serialize, Deserialize)] pub struct Jwk { pub kid: String, kty: String, n: String, e: String, } impl Jwk { // https://github.com/lawliet89/biscuit/issues/96#issuecomment-399149872 pub fn to_der_pkcs1(&self) -> Result> { if self.kty != "RSA" { bail!("Cannot handle non-RSA JWK") } // JWK is big-endian, openssl bignum from_slice is big-endian let n = BASE64_URL_SAFE_ENGINE .decode(&self.n) .context("Failed to base64 decode n")?; let e = BASE64_URL_SAFE_ENGINE .decode(&self.e) .context("Failed to base64 decode e")?; let n_bn = openssl::bn::BigNum::from_slice(&n) .context("Failed to create openssl bignum from n")?; let e_bn = openssl::bn::BigNum::from_slice(&e) .context("Failed to create openssl bignum from e")?; let pubkey = openssl::rsa::Rsa::from_public_components(n_bn, e_bn) .context("Failed to create pubkey from n and e")?; let der: Vec = pubkey .public_key_to_der_pkcs1() .context("Failed to convert public key to der pkcs1")?; Ok(der) } } // Check a token is equal to a fixed string pub struct EqCheck { s: String, } impl ClientAuthCheck for EqCheck { fn check(&self, token: &str) -> StdResult<(), ClientVisibleMsg> { if self.s == token { Ok(()) } else { warn!("User token {} != expected token {}", token, self.s); Err(ClientVisibleMsg::from_nonsensitive( "Fixed token mismatch".to_owned(), )) } } } impl EqCheck { pub fn new(s: String) -> Self { Self { s } } } // https://infosec.mozilla.org/guidelines/iam/openid_connect#session-handling const MOZ_SESSION_TIMEOUT: Duration = Duration::from_secs(60 * 15); const MOZ_USERINFO_ENDPOINT: &str = "https://auth.mozilla.auth0.com/userinfo"; /// Mozilla-specific check by forwarding the token onto the auth0 userinfo endpoint pub struct MozillaCheck { // token, token_expiry auth_cache: Mutex>, client: reqwest::blocking::Client, required_groups: Vec, } impl ClientAuthCheck for MozillaCheck { fn check(&self, token: &str) -> StdResult<(), ClientVisibleMsg> { self.check_mozilla(token).map_err(|e| { warn!("Mozilla token validation failed: {}", e); ClientVisibleMsg::from_nonsensitive( "Failed to validate Mozilla OAuth token, run sccache --dist-auth".to_owned(), ) }) } } impl MozillaCheck { pub fn new(required_groups: Vec) -> Self { Self { auth_cache: Mutex::new(HashMap::new()), client: new_reqwest_blocking_client(), required_groups, } } fn check_mozilla(&self, token: &str) -> Result<()> { // azp == client_id // { // "iss": "https://auth.mozilla.auth0.com/", // "sub": "ad|Mozilla-LDAP|asayers", // "aud": [ // "sccache", // "https://auth.mozilla.auth0.com/userinfo" // ], // "iat": 1541103283, // "exp": 1541708083, // "azp": "F1VVD6nRTckSVrviMRaOdLBWIk1AvHYo", // "scope": "openid" // } #[derive(Deserialize)] struct MozillaToken { exp: u64, sub: String, } let mut validation = jwt::Validation::default(); validation.validate_exp = false; validation.validate_nbf = false; // We don't really do any validation here (just forwarding on) so it's ok to unsafely decode validation.insecure_disable_signature_validation(); let dummy_key = jwt::DecodingKey::from_secret(b"secret"); let insecure_token = jwt::decode::(token, &dummy_key, &validation) .context("Unable to decode jwt")?; let user = insecure_token.claims.sub; trace!("Validating token for user {} with mozilla", user); if UNIX_EPOCH + Duration::from_secs(insecure_token.claims.exp) < SystemTime::now() { bail!("JWT expired") } // If the token is cached and not expired, return it let mut auth_cache = self.auth_cache.lock().unwrap(); if let Some(cached_at) = auth_cache.get(token) { if cached_at.elapsed() < MOZ_SESSION_TIMEOUT { return Ok(()); } } auth_cache.remove(token); debug!("User {} not in cache, validating via auth0 endpoint", user); // Retrieve the groups from the auth0 /userinfo endpoint, which Mozilla rules populate with groups // https://github.com/mozilla-iam/auth0-deploy/blob/6889f1dde12b84af50bb4b2e2f00d5e80d5be33f/rules/CIS-Claims-fixups.js#L158-L168 let url = reqwest::Url::parse(MOZ_USERINFO_ENDPOINT) .expect("Failed to parse MOZ_USERINFO_ENDPOINT"); let res = self .client .get(url.clone()) .bearer_auth(token) .send() .context("Failed to make request to mozilla userinfo")?; let status = res.status(); let res_text = res .text() .context("Failed to interpret response from mozilla userinfo as string")?; if !status.is_success() { bail!("JWT forwarded to {} returned {}: {}", url, status, res_text) } // The API didn't return a HTTP error code, let's check the response check_mozilla_profile(&user, &self.required_groups, &res_text) .with_context(|| format!("Validation of the user profile failed for {}", user))?; // Validation success, cache the token debug!("Validation for user {} succeeded, caching", user); auth_cache.insert(token.to_owned(), Instant::now()); Ok(()) } } fn check_mozilla_profile(user: &str, required_groups: &[String], profile: &str) -> Result<()> { #[derive(Deserialize)] struct UserInfo { sub: String, #[serde(rename = "https://sso.mozilla.com/claim/groups")] groups: Vec, } let profile: UserInfo = serde_json::from_str(profile) .with_context(|| format!("Could not parse profile: {}", profile))?; if user != profile.sub { bail!( "User {} retrieved in profile is different to desired user {}", profile.sub, user ) } for group in required_groups.iter() { if !profile.groups.contains(group) { bail!("User {} is not a member of required group {}", user, group) } } Ok(()) } #[test] fn test_auth_verify_check_mozilla_profile() { // A successful response let profile = r#"{ "sub": "ad|Mozilla-LDAP|asayers", "https://sso.mozilla.com/claim/groups": [ "everyone", "hris_dept_firefox", "hris_individual_contributor", "hris_nonmanagers", "hris_is_staff", "hris_workertype_contractor" ], "https://sso.mozilla.com/claim/README_FIRST": "Please refer to https://github.com/mozilla-iam/person-api in order to query Mozilla IAM CIS user profile data" }"#; // If the user has been deactivated since the token was issued. Note this may be partnered with an error code // response so may never reach validation let profile_fail = r#"{ "error": "unauthorized", "error_description": "user is blocked" }"#; assert!(check_mozilla_profile( "ad|Mozilla-LDAP|asayers", &["hris_dept_firefox".to_owned()], profile, ) .is_ok()); assert!(check_mozilla_profile("ad|Mozilla-LDAP|asayers", &[], profile).is_ok()); assert!(check_mozilla_profile( "ad|Mozilla-LDAP|asayers", &["hris_the_ceo".to_owned()], profile, ) .is_err()); assert!(check_mozilla_profile("ad|Mozilla-LDAP|asayers", &[], profile_fail).is_err()); } // Don't check a token is valid (it may not even be a JWT) just forward it to // an API and check for success pub struct ProxyTokenCheck { client: reqwest::blocking::Client, maybe_auth_cache: Option, Duration)>>, url: String, } impl ClientAuthCheck for ProxyTokenCheck { fn check(&self, token: &str) -> StdResult<(), ClientVisibleMsg> { match self.check_token_with_forwarding(token) { Ok(()) => Ok(()), Err(e) => { warn!("Proxying token validation failed: {}", e); Err(ClientVisibleMsg::from_nonsensitive( "Validation with token forwarding failed".to_owned(), )) } } } } impl ProxyTokenCheck { pub fn new(url: String, cache_secs: Option) -> Self { let maybe_auth_cache: Option, Duration)>> = cache_secs.map(|secs| Mutex::new((HashMap::new(), Duration::from_secs(secs)))); Self { client: new_reqwest_blocking_client(), maybe_auth_cache, url, } } fn check_token_with_forwarding(&self, token: &str) -> Result<()> { trace!("Validating token by forwarding to {}", self.url); // If the token is cached and not cache has not expired, return it if let Some(ref auth_cache) = self.maybe_auth_cache { let mut auth_cache = auth_cache.lock().unwrap(); let (ref mut auth_cache, cache_duration) = *auth_cache; if let Some(cached_at) = auth_cache.get(token) { if cached_at.elapsed() < cache_duration { return Ok(()); } } auth_cache.remove(token); } // Make a request to another API, which as a side effect should actually check the token let res = self .client .get(&self.url) .bearer_auth(token) .send() .context("Failed to make request to proxying url")?; if !res.status().is_success() { bail!("Token forwarded to {} returned {}", self.url, res.status()); } // Cache the token if let Some(ref auth_cache) = self.maybe_auth_cache { let mut auth_cache = auth_cache.lock().unwrap(); let (ref mut auth_cache, _) = *auth_cache; auth_cache.insert(token.to_owned(), Instant::now()); } Ok(()) } } // Check a JWT is valid pub struct ValidJWTCheck { audience: String, issuer: String, kid_to_pkcs1: HashMap>, } impl ClientAuthCheck for ValidJWTCheck { fn check(&self, token: &str) -> StdResult<(), ClientVisibleMsg> { match self.check_jwt_validity(token) { Ok(()) => Ok(()), Err(e) => { warn!("JWT validation failed: {}", e); Err(ClientVisibleMsg::from_nonsensitive( "JWT could not be validated".to_owned(), )) } } } } impl ValidJWTCheck { pub fn new(audience: String, issuer: String, jwks_url: &str) -> Result { let res = reqwest::blocking::get(jwks_url).context("Failed to make request to JWKs url")?; if !res.status().is_success() { bail!("Could not retrieve JWKs, HTTP error: {}", res.status()) } let jwks: Jwks = res.json().context("Failed to parse JWKs json")?; let kid_to_pkcs1 = jwks .keys .into_iter() .map(|k| k.to_der_pkcs1().map(|pkcs1| (k.kid, pkcs1))) .collect::>() .context("Failed to convert JWKs into pkcs1")?; Ok(Self { audience, issuer, kid_to_pkcs1, }) } fn check_jwt_validity(&self, token: &str) -> Result<()> { let header = jwt::decode_header(token).context("Could not decode jwt header")?; trace!("Validating JWT in scheduler"); // Prepare validation let kid = header.kid.context("No kid found")?; let pkcs1 = jwt::DecodingKey::from_rsa_der( self.kid_to_pkcs1 .get(&kid) .context("kid not found in jwks")?, ); let mut validation = jwt::Validation::new(header.alg); validation.set_audience(&[&self.audience]); validation.set_issuer(&[&self.issuer]); #[derive(Deserialize)] struct Claims {} // Decode the JWT, discarding any claims - we just care about validity let _tokendata = jwt::decode::(token, &pkcs1, &validation) .context("Unable to validate and decode jwt")?; Ok(()) } } mozilla-sccache-40c3d6b/src/cache/000077500000000000000000000000001475712407500170255ustar00rootroot00000000000000mozilla-sccache-40c3d6b/src/cache/azure.rs000066400000000000000000000023361475712407500205250ustar00rootroot00000000000000// Copyright 2018 Benjamin Bader // Copyright 2016 Mozilla Foundation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use opendal::Operator; use opendal::layers::LoggingLayer; use opendal::services::Azblob; use crate::errors::*; use super::http_client::set_user_agent; pub struct AzureBlobCache; impl AzureBlobCache { pub fn build(connection_string: &str, container: &str, key_prefix: &str) -> Result { let builder = Azblob::from_connection_string(connection_string)? .container(container) .root(key_prefix) .http_client(set_user_agent()); let op = Operator::new(builder)? .layer(LoggingLayer::default()) .finish(); Ok(op) } } mozilla-sccache-40c3d6b/src/cache/cache.rs000066400000000000000000000666361475712407500204570ustar00rootroot00000000000000// Copyright 2016 Mozilla Foundation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #[cfg(feature = "azure")] use crate::cache::azure::AzureBlobCache; use crate::cache::disk::DiskCache; #[cfg(feature = "gcs")] use crate::cache::gcs::GCSCache; #[cfg(feature = "gha")] use crate::cache::gha::GHACache; #[cfg(feature = "memcached")] use crate::cache::memcached::MemcachedCache; #[cfg(feature = "oss")] use crate::cache::oss::OSSCache; #[cfg(feature = "redis")] use crate::cache::redis::RedisCache; #[cfg(feature = "s3")] use crate::cache::s3::S3Cache; #[cfg(feature = "webdav")] use crate::cache::webdav::WebdavCache; use crate::compiler::PreprocessorCacheEntry; use crate::config::Config; #[cfg(any( feature = "azure", feature = "gcs", feature = "gha", feature = "memcached", feature = "redis", feature = "s3", feature = "webdav", feature = "oss" ))] use crate::config::{self, CacheType}; use async_trait::async_trait; use fs_err as fs; use serde::{Deserialize, Serialize}; use std::fmt; use std::io::{self, Cursor, Read, Seek, Write}; use std::path::{Path, PathBuf}; use std::sync::Arc; use std::time::Duration; use tempfile::NamedTempFile; use zip::write::FileOptions; use zip::{CompressionMethod, ZipArchive, ZipWriter}; use crate::errors::*; #[cfg(unix)] fn get_file_mode(file: &fs::File) -> Result> { use std::os::unix::fs::MetadataExt; Ok(Some(file.metadata()?.mode())) } #[cfg(windows)] #[allow(clippy::unnecessary_wraps)] fn get_file_mode(_file: &fs::File) -> Result> { Ok(None) } #[cfg(unix)] fn set_file_mode(path: &Path, mode: u32) -> Result<()> { use std::fs::Permissions; use std::os::unix::fs::PermissionsExt; let p = Permissions::from_mode(mode); fs::set_permissions(path, p)?; Ok(()) } #[cfg(windows)] #[allow(clippy::unnecessary_wraps)] fn set_file_mode(_path: &Path, _mode: u32) -> Result<()> { Ok(()) } /// Cache object sourced by a file. #[derive(Clone)] pub struct FileObjectSource { /// Identifier for this object. Should be unique within a compilation unit. /// Note that a compilation unit is a single source file in C/C++ and a crate in Rust. pub key: String, /// Absolute path to the file. pub path: PathBuf, /// Whether the file must be present on disk and is essential for the compilation. pub optional: bool, } /// Result of a cache lookup. pub enum Cache { /// Result was found in cache. Hit(CacheRead), /// Result was not found in cache. Miss, /// Do not cache the results of the compilation. None, /// Cache entry should be ignored, force compilation. Recache, } impl fmt::Debug for Cache { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { Cache::Hit(_) => write!(f, "Cache::Hit(...)"), Cache::Miss => write!(f, "Cache::Miss"), Cache::None => write!(f, "Cache::None"), Cache::Recache => write!(f, "Cache::Recache"), } } } /// CacheMode is used to represent which mode we are using. #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub enum CacheMode { /// Only read cache from storage. ReadOnly, /// Full support of cache storage: read and write. ReadWrite, } /// Trait objects can't be bounded by more than one non-builtin trait. pub trait ReadSeek: Read + Seek + Send {} impl ReadSeek for T {} /// Data stored in the compiler cache. pub struct CacheRead { zip: ZipArchive>, } /// Represents a failure to decompress stored object data. #[derive(Debug)] pub struct DecompressionFailure; impl std::fmt::Display for DecompressionFailure { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "failed to decompress content") } } impl std::error::Error for DecompressionFailure {} impl CacheRead { /// Create a cache entry from `reader`. pub fn from(reader: R) -> Result where R: ReadSeek + 'static, { let z = ZipArchive::new(Box::new(reader) as Box) .context("Failed to parse cache entry")?; Ok(CacheRead { zip: z }) } /// Get an object from this cache entry at `name` and write it to `to`. /// If the file has stored permissions, return them. pub fn get_object(&mut self, name: &str, to: &mut T) -> Result> where T: Write, { let file = self.zip.by_name(name).or(Err(DecompressionFailure))?; if file.compression() != CompressionMethod::Stored { bail!(DecompressionFailure); } let mode = file.unix_mode(); zstd::stream::copy_decode(file, to).or(Err(DecompressionFailure))?; Ok(mode) } /// Get the stdout from this cache entry, if it exists. pub fn get_stdout(&mut self) -> Vec { self.get_bytes("stdout") } /// Get the stderr from this cache entry, if it exists. pub fn get_stderr(&mut self) -> Vec { self.get_bytes("stderr") } fn get_bytes(&mut self, name: &str) -> Vec { let mut bytes = Vec::new(); drop(self.get_object(name, &mut bytes)); bytes } pub async fn extract_objects( mut self, objects: T, pool: &tokio::runtime::Handle, ) -> Result<()> where T: IntoIterator + Send + Sync + 'static, { pool.spawn_blocking(move || { for FileObjectSource { key, path, optional, } in objects { let dir = match path.parent() { Some(d) => d, None => bail!("Output file without a parent directory!"), }; // Write the cache entry to a tempfile and then atomically // move it to its final location so that other rustc invocations // happening in parallel don't see a partially-written file. let mut tmp = NamedTempFile::new_in(dir)?; match (self.get_object(&key, &mut tmp), optional) { (Ok(mode), _) => { tmp.persist(&path)?; if let Some(mode) = mode { set_file_mode(&path, mode)?; } } (Err(e), false) => return Err(e), // skip if no object found and it's optional (Err(_), true) => continue, } } Ok(()) }) .await? } } /// Data to be stored in the compiler cache. pub struct CacheWrite { zip: ZipWriter>>, } impl CacheWrite { /// Create a new, empty cache entry. pub fn new() -> CacheWrite { CacheWrite { zip: ZipWriter::new(io::Cursor::new(vec![])), } } /// Create a new cache entry populated with the contents of `objects`. pub async fn from_objects(objects: T, pool: &tokio::runtime::Handle) -> Result where T: IntoIterator + Send + Sync + 'static, { pool.spawn_blocking(move || { let mut entry = CacheWrite::new(); for FileObjectSource { key, path, optional, } in objects { let f = fs::File::open(&path) .with_context(|| format!("failed to open file `{:?}`", path)); match (f, optional) { (Ok(mut f), _) => { let mode = get_file_mode(&f)?; entry.put_object(&key, &mut f, mode).with_context(|| { format!("failed to put object `{:?}` in cache entry", path) })?; } (Err(e), false) => return Err(e), (Err(_), true) => continue, } } Ok(entry) }) .await? } /// Add an object containing the contents of `from` to this cache entry at `name`. /// If `mode` is `Some`, store the file entry with that mode. pub fn put_object(&mut self, name: &str, from: &mut T, mode: Option) -> Result<()> where T: Read, { // We're going to declare the compression method as "stored", // but we're actually going to store zstd-compressed blobs. let opts = FileOptions::default().compression_method(CompressionMethod::Stored); let opts = if let Some(mode) = mode { opts.unix_permissions(mode) } else { opts }; self.zip .start_file(name, opts) .context("Failed to start cache entry object")?; let compression_level = std::env::var("SCCACHE_CACHE_ZSTD_LEVEL") .ok() .and_then(|value| value.parse::().ok()) .unwrap_or(3); zstd::stream::copy_encode(from, &mut self.zip, compression_level)?; Ok(()) } pub fn put_stdout(&mut self, bytes: &[u8]) -> Result<()> { self.put_bytes("stdout", bytes) } pub fn put_stderr(&mut self, bytes: &[u8]) -> Result<()> { self.put_bytes("stderr", bytes) } fn put_bytes(&mut self, name: &str, bytes: &[u8]) -> Result<()> { if !bytes.is_empty() { let mut cursor = Cursor::new(bytes); return self.put_object(name, &mut cursor, None); } Ok(()) } /// Finish writing data to the cache entry writer, and return the data. pub fn finish(self) -> Result> { let CacheWrite { mut zip } = self; let cur = zip.finish().context("Failed to finish cache entry zip")?; Ok(cur.into_inner()) } } impl Default for CacheWrite { fn default() -> Self { Self::new() } } /// An interface to cache storage. #[async_trait] pub trait Storage: Send + Sync { /// Get a cache entry by `key`. /// /// If an error occurs, this method should return a `Cache::Error`. /// If nothing fails but the entry is not found in the cache, /// it should return a `Cache::Miss`. /// If the entry is successfully found in the cache, it should /// return a `Cache::Hit`. async fn get(&self, key: &str) -> Result; /// Put `entry` in the cache under `key`. /// /// Returns a `Future` that will provide the result or error when the put is /// finished. async fn put(&self, key: &str, entry: CacheWrite) -> Result; /// Check the cache capability. /// /// - `Ok(CacheMode::ReadOnly)` means cache can only be used to `get` /// cache. /// - `Ok(CacheMode::ReadWrite)` means cache can do both `get` and `put`. /// - `Err(err)` means cache is not setup correctly or not match with /// users input (for example, user try to use `ReadWrite` but cache /// is `ReadOnly`). /// /// We will provide a default implementation which returns /// `Ok(CacheMode::ReadWrite)` for service that doesn't /// support check yet. async fn check(&self) -> Result { Ok(CacheMode::ReadWrite) } /// Get the storage location. fn location(&self) -> String; /// Get the current storage usage, if applicable. async fn current_size(&self) -> Result>; /// Get the maximum storage size, if applicable. async fn max_size(&self) -> Result>; /// Return the config for preprocessor cache mode if applicable fn preprocessor_cache_mode_config(&self) -> PreprocessorCacheModeConfig { // Enable by default, only in local mode PreprocessorCacheModeConfig::default() } /// Return the preprocessor cache entry for a given preprocessor key, /// if it exists. /// Only applicable when using preprocessor cache mode. async fn get_preprocessor_cache_entry( &self, _key: &str, ) -> Result>> { Ok(None) } /// Insert a preprocessor cache entry at the given preprocessor key, /// overwriting the entry if it exists. /// Only applicable when using preprocessor cache mode. async fn put_preprocessor_cache_entry( &self, _key: &str, _preprocessor_cache_entry: PreprocessorCacheEntry, ) -> Result<()> { Ok(()) } } /// Configuration switches for preprocessor cache mode. #[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] #[serde(deny_unknown_fields)] #[serde(default)] pub struct PreprocessorCacheModeConfig { /// Whether to use preprocessor cache mode entirely pub use_preprocessor_cache_mode: bool, /// If false (default), only compare header files by hashing their contents. /// If true, will use size + ctime + mtime to check whether a file has changed. /// See other flags below for more control over this behavior. pub file_stat_matches: bool, /// If true (default), uses the ctime (file status change on UNIX, /// creation time on Windows) to check that a file has/hasn't changed. /// Can be useful to disable when backdating modification times /// in a controlled manner. pub use_ctime_for_stat: bool, /// If true, ignore `__DATE__`, `__TIME__` and `__TIMESTAMP__` being present /// in the source code. Will speed up preprocessor cache mode, /// but can result in false positives. pub ignore_time_macros: bool, /// If true, preprocessor cache mode will not cache system headers, only /// add them to the hash. pub skip_system_headers: bool, /// If true (default), will add the current working directory in the hash to /// distinguish two compilations from different directories. pub hash_working_directory: bool, } impl Default for PreprocessorCacheModeConfig { fn default() -> Self { Self { use_preprocessor_cache_mode: false, file_stat_matches: false, use_ctime_for_stat: true, ignore_time_macros: false, skip_system_headers: false, hash_working_directory: true, } } } impl PreprocessorCacheModeConfig { /// Return a default [`Self`], but with the cache active. pub fn activated() -> Self { Self { use_preprocessor_cache_mode: true, ..Default::default() } } } /// Implement storage for operator. #[cfg(any( feature = "azure", feature = "gcs", feature = "gha", feature = "memcached", feature = "redis", feature = "s3", feature = "webdav", ))] #[async_trait] impl Storage for opendal::Operator { async fn get(&self, key: &str) -> Result { match self.read(&normalize_key(key)).await { Ok(res) => { let hit = CacheRead::from(io::Cursor::new(res.to_bytes()))?; Ok(Cache::Hit(hit)) } Err(e) if e.kind() == opendal::ErrorKind::NotFound => Ok(Cache::Miss), Err(e) => { warn!("Got unexpected error: {:?}", e); Ok(Cache::Miss) } } } async fn put(&self, key: &str, entry: CacheWrite) -> Result { let start = std::time::Instant::now(); self.write(&normalize_key(key), entry.finish()?).await?; Ok(start.elapsed()) } async fn check(&self) -> Result { use opendal::ErrorKind; let path = ".sccache_check"; // Read is required, return error directly if we can't read . match self.read(path).await { Ok(_) => (), // Read not exist file with not found is ok. Err(err) if err.kind() == ErrorKind::NotFound => (), // Tricky Part. // // We tolerate rate limited here to make sccache keep running. // For the worse case, we will miss all the cache. // // In some super rare cases, user could configure storage in wrong // and hitting other services rate limit. There are few things we // can do, so we will print our the error here to make users know // about it. Err(err) if err.kind() == ErrorKind::RateLimited => { eprintln!("cache storage read check: {err:?}, but we decide to keep running") } Err(err) => bail!("cache storage failed to read: {:?}", err), }; let can_write = match self.write(path, "Hello, World!").await { Ok(_) => true, Err(err) if err.kind() == ErrorKind::AlreadyExists => true, // Tolerate all other write errors because we can do read at least. Err(err) => { eprintln!("storage write check failed: {err:?}"); false } }; let mode = if can_write { CacheMode::ReadWrite } else { CacheMode::ReadOnly }; debug!("storage check result: {mode:?}"); Ok(mode) } fn location(&self) -> String { let meta = self.info(); format!( "{}, name: {}, prefix: {}", meta.scheme(), meta.name(), meta.root() ) } async fn current_size(&self) -> Result> { Ok(None) } async fn max_size(&self) -> Result> { Ok(None) } } /// Normalize key `abcdef` into `a/b/c/abcdef` pub(in crate::cache) fn normalize_key(key: &str) -> String { format!("{}/{}/{}/{}", &key[0..1], &key[1..2], &key[2..3], &key) } /// Get a suitable `Storage` implementation from configuration. #[allow(clippy::cognitive_complexity)] // TODO simplify! pub fn storage_from_config( config: &Config, pool: &tokio::runtime::Handle, ) -> Result> { if let Some(cache_type) = &config.cache { match cache_type { #[cfg(feature = "azure")] CacheType::Azure(config::AzureCacheConfig { ref connection_string, ref container, ref key_prefix, }) => { debug!("Init azure cache with container {container}, key_prefix {key_prefix}"); let storage = AzureBlobCache::build(connection_string, container, key_prefix) .map_err(|err| anyhow!("create azure cache failed: {err:?}"))?; return Ok(Arc::new(storage)); } #[cfg(feature = "gcs")] CacheType::GCS(config::GCSCacheConfig { ref bucket, ref key_prefix, ref cred_path, rw_mode, ref service_account, ref credential_url, }) => { debug!("Init gcs cache with bucket {bucket}, key_prefix {key_prefix}"); let storage = GCSCache::build( bucket, key_prefix, cred_path.as_deref(), service_account.as_deref(), (*rw_mode).into(), credential_url.as_deref(), ) .map_err(|err| anyhow!("create gcs cache failed: {err:?}"))?; return Ok(Arc::new(storage)); } #[cfg(feature = "gha")] CacheType::GHA(config::GHACacheConfig { ref version, .. }) => { debug!("Init gha cache with version {version}"); let storage = GHACache::build(version) .map_err(|err| anyhow!("create gha cache failed: {err:?}"))?; return Ok(Arc::new(storage)); } #[cfg(feature = "memcached")] CacheType::Memcached(config::MemcachedCacheConfig { ref url, ref username, ref password, ref expiration, ref key_prefix, }) => { debug!("Init memcached cache with url {url}"); let storage = MemcachedCache::build( url, username.as_deref(), password.as_deref(), key_prefix, *expiration, ) .map_err(|err| anyhow!("create memcached cache failed: {err:?}"))?; return Ok(Arc::new(storage)); } #[cfg(feature = "redis")] CacheType::Redis(config::RedisCacheConfig { ref endpoint, ref cluster_endpoints, ref username, ref password, ref db, ref url, ref ttl, ref key_prefix, }) => { let storage = match (endpoint, cluster_endpoints, url) { (Some(url), None, None) => { debug!("Init redis single-node cache with url {url}"); RedisCache::build_single( url, username.as_deref(), password.as_deref(), *db, key_prefix, *ttl, ) } (None, Some(urls), None) => { debug!("Init redis cluster cache with urls {urls}"); RedisCache::build_cluster( urls, username.as_deref(), password.as_deref(), *db, key_prefix, *ttl, ) } (None, None, Some(url)) => { warn!("Init redis single-node cache from deprecated API with url {url}"); if username.is_some() || password.is_some() || *db != crate::config::DEFAULT_REDIS_DB { bail!("`username`, `password` and `db` has no effect when `url` is set. Please use `endpoint` or `cluster_endpoints` for new API accessing"); } RedisCache::build_from_url(url, key_prefix, *ttl) } _ => bail!("Only one of `endpoint`, `cluster_endpoints`, `url` must be set"), } .map_err(|err| anyhow!("create redis cache failed: {err:?}"))?; return Ok(Arc::new(storage)); } #[cfg(feature = "s3")] CacheType::S3(ref c) => { debug!( "Init s3 cache with bucket {}, endpoint {:?}", c.bucket, c.endpoint ); let storage = S3Cache::build( &c.bucket, c.region.as_deref(), &c.key_prefix, c.no_credentials, c.endpoint.as_deref(), c.use_ssl, c.server_side_encryption, ) .map_err(|err| anyhow!("create s3 cache failed: {err:?}"))?; return Ok(Arc::new(storage)); } #[cfg(feature = "webdav")] CacheType::Webdav(ref c) => { debug!("Init webdav cache with endpoint {}", c.endpoint); let storage = WebdavCache::build( &c.endpoint, &c.key_prefix, c.username.as_deref(), c.password.as_deref(), c.token.as_deref(), ) .map_err(|err| anyhow!("create webdav cache failed: {err:?}"))?; return Ok(Arc::new(storage)); } #[cfg(feature = "oss")] CacheType::OSS(ref c) => { debug!( "Init oss cache with bucket {}, endpoint {:?}", c.bucket, c.endpoint ); let storage = OSSCache::build( &c.bucket, &c.key_prefix, c.endpoint.as_deref(), c.no_credentials, ) .map_err(|err| anyhow!("create oss cache failed: {err:?}"))?; return Ok(Arc::new(storage)); } #[allow(unreachable_patterns)] // if we build only with `cargo build --no-default-features` // we only want to use sccache with a local cache (no remote storage) _ => {} } } let (dir, size) = (&config.fallback_cache.dir, config.fallback_cache.size); let preprocessor_cache_mode_config = config.fallback_cache.preprocessor_cache_mode; let rw_mode = config.fallback_cache.rw_mode.into(); debug!("Init disk cache with dir {:?}, size {}", dir, size); Ok(Arc::new(DiskCache::new( dir, size, pool, preprocessor_cache_mode_config, rw_mode, ))) } #[cfg(test)] mod test { use super::*; use crate::config::CacheModeConfig; #[test] fn test_normalize_key() { assert_eq!( normalize_key("0123456789abcdef0123456789abcdef"), "0/1/2/0123456789abcdef0123456789abcdef" ); } #[test] fn test_read_write_mode_local() { let runtime = tokio::runtime::Builder::new_current_thread() .enable_all() .worker_threads(1) .build() .unwrap(); // Use disk cache. let mut config = Config { cache: None, ..Default::default() }; let tempdir = tempfile::Builder::new() .prefix("sccache_test_rust_cargo") .tempdir() .context("Failed to create tempdir") .unwrap(); let cache_dir = tempdir.path().join("cache"); fs::create_dir(&cache_dir).unwrap(); config.fallback_cache.dir = cache_dir; // Test Read Write config.fallback_cache.rw_mode = CacheModeConfig::ReadWrite; { let cache = storage_from_config(&config, runtime.handle()).unwrap(); runtime.block_on(async move { cache.put("test1", CacheWrite::default()).await.unwrap(); cache .put_preprocessor_cache_entry("test1", PreprocessorCacheEntry::default()) .await .unwrap(); }); } // Test Read-only config.fallback_cache.rw_mode = CacheModeConfig::ReadOnly; { let cache = storage_from_config(&config, runtime.handle()).unwrap(); runtime.block_on(async move { assert_eq!( cache .put("test1", CacheWrite::default()) .await .unwrap_err() .to_string(), "Cannot write to a read-only cache" ); assert_eq!( cache .put_preprocessor_cache_entry("test1", PreprocessorCacheEntry::default()) .await .unwrap_err() .to_string(), "Cannot write to a read-only cache" ); }); } } } mozilla-sccache-40c3d6b/src/cache/disk.rs000066400000000000000000000156451475712407500203400ustar00rootroot00000000000000// Copyright 2016 Mozilla Foundation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use crate::cache::{Cache, CacheMode, CacheRead, CacheWrite, Storage}; use crate::compiler::PreprocessorCacheEntry; use crate::lru_disk_cache::LruDiskCache; use crate::lru_disk_cache::{Error as LruError, ReadSeek}; use async_trait::async_trait; use std::ffi::{OsStr, OsString}; use std::io::{BufWriter, Write}; use std::path::{Path, PathBuf}; use std::sync::{Arc, Mutex}; use std::time::{Duration, Instant}; use crate::errors::*; use super::{normalize_key, PreprocessorCacheModeConfig}; enum LazyDiskCache { Uninit { root: OsString, max_size: u64 }, Init(LruDiskCache), } impl LazyDiskCache { fn get_or_init(&mut self) -> Result<&mut LruDiskCache> { match self { LazyDiskCache::Uninit { root, max_size } => { *self = LazyDiskCache::Init(LruDiskCache::new(&root, *max_size)?); self.get_or_init() } LazyDiskCache::Init(d) => Ok(d), } } fn get(&mut self) -> Option<&mut LruDiskCache> { match self { LazyDiskCache::Uninit { .. } => None, LazyDiskCache::Init(d) => Some(d), } } fn capacity(&self) -> u64 { match self { LazyDiskCache::Uninit { max_size, .. } => *max_size, LazyDiskCache::Init(d) => d.capacity(), } } fn path(&self) -> &Path { match self { LazyDiskCache::Uninit { root, .. } => root.as_ref(), LazyDiskCache::Init(d) => d.path(), } } } /// A cache that stores entries at local disk paths. pub struct DiskCache { /// `LruDiskCache` does all the real work here. lru: Arc>, /// Thread pool to execute disk I/O pool: tokio::runtime::Handle, preprocessor_cache_mode_config: PreprocessorCacheModeConfig, preprocessor_cache: Arc>, rw_mode: CacheMode, } impl DiskCache { /// Create a new `DiskCache` rooted at `root`, with `max_size` as the maximum cache size on-disk, in bytes. pub fn new>( root: T, max_size: u64, pool: &tokio::runtime::Handle, preprocessor_cache_mode_config: PreprocessorCacheModeConfig, rw_mode: CacheMode, ) -> DiskCache { DiskCache { lru: Arc::new(Mutex::new(LazyDiskCache::Uninit { root: root.as_ref().to_os_string(), max_size, })), pool: pool.clone(), preprocessor_cache_mode_config, preprocessor_cache: Arc::new(Mutex::new(LazyDiskCache::Uninit { root: Path::new(root.as_ref()) .join("preprocessor") .into_os_string(), max_size, })), rw_mode, } } } /// Make a path to the cache entry with key `key`. fn make_key_path(key: &str) -> PathBuf { Path::new(&key[0..1]).join(&key[1..2]).join(key) } #[async_trait] impl Storage for DiskCache { async fn get(&self, key: &str) -> Result { trace!("DiskCache::get({})", key); let path = make_key_path(key); let lru = self.lru.clone(); let key = key.to_owned(); self.pool .spawn_blocking(move || { let io = match lru.lock().unwrap().get_or_init()?.get(&path) { Ok(f) => f, Err(LruError::FileNotInCache) => { trace!("DiskCache::get({}): FileNotInCache", key); return Ok(Cache::Miss); } Err(LruError::Io(e)) => { trace!("DiskCache::get({}): IoError: {:?}", key, e); return Err(e.into()); } Err(_) => unreachable!(), }; let hit = CacheRead::from(io)?; Ok(Cache::Hit(hit)) }) .await? } async fn put(&self, key: &str, entry: CacheWrite) -> Result { // We should probably do this on a background thread if we're going to buffer // everything in memory... trace!("DiskCache::finish_put({})", key); if self.rw_mode == CacheMode::ReadOnly { return Err(anyhow!("Cannot write to a read-only cache")); } let lru = self.lru.clone(); let key = make_key_path(key); self.pool .spawn_blocking(move || { let start = Instant::now(); let v = entry.finish()?; let mut f = lru .lock() .unwrap() .get_or_init()? .prepare_add(key, v.len() as u64)?; f.as_file_mut().write_all(&v)?; lru.lock().unwrap().get().unwrap().commit(f)?; Ok(start.elapsed()) }) .await? } async fn check(&self) -> Result { Ok(self.rw_mode) } fn location(&self) -> String { format!("Local disk: {:?}", self.lru.lock().unwrap().path()) } async fn current_size(&self) -> Result> { Ok(self.lru.lock().unwrap().get().map(|l| l.size())) } async fn max_size(&self) -> Result> { Ok(Some(self.lru.lock().unwrap().capacity())) } fn preprocessor_cache_mode_config(&self) -> PreprocessorCacheModeConfig { self.preprocessor_cache_mode_config } async fn get_preprocessor_cache_entry(&self, key: &str) -> Result>> { let key = normalize_key(key); Ok(self .preprocessor_cache .lock() .unwrap() .get_or_init()? .get(key) .ok()) } async fn put_preprocessor_cache_entry( &self, key: &str, preprocessor_cache_entry: PreprocessorCacheEntry, ) -> Result<()> { if self.rw_mode == CacheMode::ReadOnly { return Err(anyhow!("Cannot write to a read-only cache")); } let key = normalize_key(key); let mut f = self .preprocessor_cache .lock() .unwrap() .get_or_init()? .prepare_add(key, 0)?; preprocessor_cache_entry.serialize_to(BufWriter::new(f.as_file_mut()))?; Ok(self .preprocessor_cache .lock() .unwrap() .get() .unwrap() .commit(f)?) } } mozilla-sccache-40c3d6b/src/cache/gcs.rs000066400000000000000000000101021475712407500201410ustar00rootroot00000000000000// Copyright 2017 Mozilla Foundation // Copyright 2017 Google Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use crate::cache::CacheMode; use crate::errors::*; use opendal::Operator; use opendal::{layers::LoggingLayer, services::Gcs}; use reqsign::{GoogleToken, GoogleTokenLoad}; use reqwest::Client; use serde::Deserialize; use url::Url; use super::http_client::set_user_agent; fn rw_to_scope(mode: CacheMode) -> &'static str { match mode { CacheMode::ReadOnly => "https://www.googleapis.com/auth/devstorage.read_only", CacheMode::ReadWrite => "https://www.googleapis.com/auth/devstorage.read_write", } } /// A cache that stores entries in Google Cloud Storage pub struct GCSCache; impl GCSCache { /// Create a new `GCSCache` storing data in `bucket` pub fn build( bucket: &str, key_prefix: &str, cred_path: Option<&str>, service_account: Option<&str>, rw_mode: CacheMode, credential_url: Option<&str>, ) -> Result { let mut builder = Gcs::default() .bucket(bucket) .root(key_prefix) .scope(rw_to_scope(rw_mode)) .http_client(set_user_agent()); if let Some(service_account) = service_account { builder = builder.service_account(service_account); } if let Some(path) = cred_path { builder = builder.credential_path(path); } if let Some(cred_url) = credential_url { let _ = Url::parse(cred_url) .map_err(|err| anyhow!("gcs credential url is invalid: {err:?}"))?; builder = builder.customized_token_loader(Box::new(TaskClusterTokenLoader { scope: rw_to_scope(rw_mode).to_string(), url: cred_url.to_string(), })); } let op = Operator::new(builder)? .layer(LoggingLayer::default()) .finish(); Ok(op) } } /// TaskClusterTokenLoeader is used to load tokens from [TaskCluster](https://taskcluster.net/) /// /// This feature is required to run [mozilla's CI](https://searchfox.org/mozilla-central/source/build/mozconfig.cache#67-84): /// /// ```txt /// export SCCACHE_GCS_CREDENTIALS_URL=http://taskcluster/auth/v1/gcp/credentials/$SCCACHE_GCS_PROJECT/${bucket}@$SCCACHE_GCS_PROJECT.iam.gserviceaccount.com" /// ``` /// /// Reference: [gcpCredentials](https://docs.taskcluster.net/docs/reference/platform/auth/api#gcpCredentials) #[derive(Debug)] struct TaskClusterTokenLoader { scope: String, url: String, } #[async_trait::async_trait] impl GoogleTokenLoad for TaskClusterTokenLoader { async fn load(&self, client: Client) -> Result> { debug!("gcs: start to load token from: {}", &self.url); let res = client.get(&self.url).send().await?; if res.status().is_success() { let resp = res.json::().await?; debug!("gcs: token load succeeded for scope: {}", &self.scope); // TODO: we can parse expire time instead using hardcode 1 hour. Ok(Some(GoogleToken::new( &resp.access_token, 3600, &self.scope, ))) } else { let status_code = res.status(); let content = res.text().await?; Err(anyhow!( "token load failed for: code: {status_code}, {content}" )) } } } #[derive(Deserialize, Default)] #[serde(default, rename_all(deserialize = "camelCase"))] struct TaskClusterToken { access_token: String, expire_time: String, } mozilla-sccache-40c3d6b/src/cache/gha.rs000066400000000000000000000031311475712407500201300ustar00rootroot00000000000000// Copyright 2022 Bitski Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use opendal::layers::LoggingLayer; use opendal::services::Ghac; use opendal::Operator; use crate::errors::*; use crate::VERSION; use super::http_client::set_user_agent; /// A cache that stores entries in GHA Cache Services. pub struct GHACache; impl GHACache { pub fn build(version: &str) -> Result { let mut builder = Ghac::default() // This is the prefix of gha cache. // From user side, cache key will be like `sccache/f/c/b/fcbxxx` // // User customization is theoretically supported, but I decided // to see the community feedback first. .root("/sccache") .http_client(set_user_agent()); builder = if version.is_empty() { builder.version(&format!("sccache-v{VERSION}")) } else { builder.version(&format!("sccache-v{VERSION}-{version}")) }; let op = Operator::new(builder)? .layer(LoggingLayer::default()) .finish(); Ok(op) } } mozilla-sccache-40c3d6b/src/cache/http_client.rs000066400000000000000000000005551475712407500217150ustar00rootroot00000000000000use opendal::raw::HttpClient; use reqwest::ClientBuilder; /// Set the user agent (helps with monitoring on the server side) pub fn set_user_agent() -> HttpClient { let user_agent = format!("{}/{}", env!("CARGO_PKG_NAME"), env!("CARGO_PKG_VERSION")); let client = ClientBuilder::new().user_agent(user_agent).build().unwrap(); HttpClient::with(client) } mozilla-sccache-40c3d6b/src/cache/memcached.rs000066400000000000000000000030331475712407500213000ustar00rootroot00000000000000// Copyright 2016 Mozilla Foundation // Copyright 2017 David Michael Barr // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use std::time::Duration; use opendal::layers::LoggingLayer; use opendal::services::Memcached; use opendal::Operator; use crate::errors::*; #[derive(Clone)] pub struct MemcachedCache; impl MemcachedCache { pub fn build( url: &str, username: Option<&str>, password: Option<&str>, key_prefix: &str, expiration: u32, ) -> Result { let mut builder = Memcached::default().endpoint(url); if let Some(username) = username { builder = builder.username(username); } if let Some(password) = password { builder = builder.password(password); } builder = builder .root(key_prefix) .default_ttl(Duration::from_secs(expiration.into())); let op = Operator::new(builder)? .layer(LoggingLayer::default()) .finish(); Ok(op) } } mozilla-sccache-40c3d6b/src/cache/mod.rs000066400000000000000000000022641475712407500201560ustar00rootroot00000000000000// Copyright 2016 Mozilla Foundation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #[cfg(feature = "azure")] pub mod azure; #[allow(clippy::module_inception)] pub mod cache; pub mod disk; #[cfg(feature = "gcs")] pub mod gcs; #[cfg(feature = "gha")] pub mod gha; #[cfg(feature = "memcached")] pub mod memcached; #[cfg(feature = "oss")] pub mod oss; pub mod readonly; #[cfg(feature = "redis")] pub mod redis; #[cfg(feature = "s3")] pub mod s3; #[cfg(feature = "webdav")] pub mod webdav; #[cfg(any( feature = "azure", feature = "gcs", feature = "gha", feature = "s3", feature = "webdav", feature = "oss" ))] pub(crate) mod http_client; pub use crate::cache::cache::*; mozilla-sccache-40c3d6b/src/cache/oss.rs000066400000000000000000000030251475712407500201770ustar00rootroot00000000000000// Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use opendal::layers::LoggingLayer; use opendal::services::Oss; use opendal::Operator; use crate::errors::*; use super::http_client::set_user_agent; pub struct OSSCache; // Implement the Object Storage Service for Alibaba cloud impl OSSCache { pub fn build( bucket: &str, key_prefix: &str, endpoint: Option<&str>, no_credentials: bool, ) -> Result { let mut builder = Oss::default() .bucket(bucket) .root(key_prefix) .http_client(set_user_agent()); if let Some(endpoint) = endpoint { builder = builder.endpoint(endpoint); } if no_credentials { // Allow anonymous access to OSS so that OpenDAL will not // throw error when no credentials are provided. builder = builder.allow_anonymous(); } let op = Operator::new(builder)? .layer(LoggingLayer::default()) .finish(); Ok(op) } } mozilla-sccache-40c3d6b/src/cache/readonly.rs000066400000000000000000000113061475712407500212110ustar00rootroot00000000000000// Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use std::sync::Arc; use std::time::Duration; use async_trait::async_trait; use crate::cache::{Cache, CacheMode, CacheWrite, Storage}; use crate::compiler::PreprocessorCacheEntry; use crate::errors::*; use super::PreprocessorCacheModeConfig; pub struct ReadOnlyStorage(pub Arc); #[async_trait] impl Storage for ReadOnlyStorage { async fn get(&self, key: &str) -> Result { self.0.get(key).await } /// Put `entry` in the cache under `key`. /// /// Returns a `Future` that will provide the result or error when the put is /// finished. async fn put(&self, _key: &str, _entry: CacheWrite) -> Result { Err(anyhow!("Cannot write to read-only storage")) } /// Check the cache capability. /// /// The ReadOnlyStorage cache is always read-only. async fn check(&self) -> Result { Ok(CacheMode::ReadOnly) } /// Get the storage location. fn location(&self) -> String { self.0.location() } /// Get the current storage usage, if applicable. async fn current_size(&self) -> Result> { self.0.current_size().await } /// Get the maximum storage size, if applicable. async fn max_size(&self) -> Result> { self.0.max_size().await } /// Return the config for preprocessor cache mode if applicable fn preprocessor_cache_mode_config(&self) -> PreprocessorCacheModeConfig { self.0.preprocessor_cache_mode_config() } /// Return the preprocessor cache entry for a given preprocessor key, /// if it exists. /// Only applicable when using preprocessor cache mode. async fn get_preprocessor_cache_entry( &self, key: &str, ) -> Result>> { self.0.get_preprocessor_cache_entry(key).await } /// Insert a preprocessor cache entry at the given preprocessor key, /// overwriting the entry if it exists. /// Only applicable when using preprocessor cache mode. async fn put_preprocessor_cache_entry( &self, _key: &str, _preprocessor_cache_entry: PreprocessorCacheEntry, ) -> Result<()> { Err(anyhow!("Cannot write to read-only storage")) } } #[cfg(test)] mod test { use futures::FutureExt; use super::*; use crate::test::mock_storage::MockStorage; #[test] fn readonly_storage_is_readonly() { let storage = ReadOnlyStorage(Arc::new(MockStorage::new(None, false))); assert_eq!( storage.check().now_or_never().unwrap().unwrap(), CacheMode::ReadOnly ); } #[test] fn readonly_storage_forwards_preprocessor_cache_mode_config() { let storage_no_preprocessor_cache = ReadOnlyStorage(Arc::new(MockStorage::new(None, false))); assert!( !storage_no_preprocessor_cache .preprocessor_cache_mode_config() .use_preprocessor_cache_mode ); let storage_with_preprocessor_cache = ReadOnlyStorage(Arc::new(MockStorage::new(None, true))); assert!( storage_with_preprocessor_cache .preprocessor_cache_mode_config() .use_preprocessor_cache_mode ); } #[test] fn readonly_storage_put_err() { let runtime = tokio::runtime::Builder::new_current_thread() .enable_all() .worker_threads(1) .build() .unwrap(); let storage = ReadOnlyStorage(Arc::new(MockStorage::new(None, true))); runtime.block_on(async move { assert_eq!( storage .put("test1", CacheWrite::default()) .await .unwrap_err() .to_string(), "Cannot write to read-only storage" ); assert_eq!( storage .put_preprocessor_cache_entry("test1", PreprocessorCacheEntry::default()) .await .unwrap_err() .to_string(), "Cannot write to read-only storage" ); }); } } mozilla-sccache-40c3d6b/src/cache/redis.rs000066400000000000000000000063371475712407500205120ustar00rootroot00000000000000// Copyright 2016 Mozilla Foundation // Copyright 2016 Felix Obenhuber // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use crate::errors::*; use opendal::layers::LoggingLayer; use opendal::services::Redis; use opendal::Operator; use std::collections::HashMap; use std::time::Duration; use url::Url; /// A cache that stores entries in a Redis. pub struct RedisCache; impl RedisCache { /// Create a new `RedisCache` for the given URL. pub fn build_from_url(url: &str, key_prefix: &str, ttl: u64) -> Result { let parsed = Url::parse(url)?; let mut builder = Redis::default() .endpoint(parsed.as_str()) .username(parsed.username()) .password(parsed.password().unwrap_or_default()) .root(key_prefix); if ttl != 0 { builder = builder.default_ttl(Duration::from_secs(ttl)); } let options: HashMap<_, _> = parsed .query_pairs() .map(|(k, v)| (k.to_string(), v.to_string())) .collect(); builder = builder.db(options .get("db") .map(|v| v.parse().unwrap_or_default()) .unwrap_or_default()); let op = Operator::new(builder)? .layer(LoggingLayer::default()) .finish(); Ok(op) } /// Create a new `RedisCache` for the given single instance. pub fn build_single( endpoint: &str, username: Option<&str>, password: Option<&str>, db: u32, key_prefix: &str, ttl: u64, ) -> Result { let builder = Redis::default().endpoint(endpoint); Self::build_common(builder, username, password, db, key_prefix, ttl) } /// Create a new `RedisCache` for the given cluster. pub fn build_cluster( endpoints: &str, username: Option<&str>, password: Option<&str>, db: u32, key_prefix: &str, ttl: u64, ) -> Result { let builder = Redis::default().cluster_endpoints(endpoints); Self::build_common(builder, username, password, db, key_prefix, ttl) } fn build_common( mut builder: Redis, username: Option<&str>, password: Option<&str>, db: u32, key_prefix: &str, ttl: u64, ) -> Result { builder = builder .username(username.unwrap_or_default()) .password(password.unwrap_or_default()) .db(db.into()) .root(key_prefix); if ttl != 0 { builder = builder.default_ttl(Duration::from_secs(ttl)); } let op = Operator::new(builder)? .layer(LoggingLayer::default()) .finish(); Ok(op) } } mozilla-sccache-40c3d6b/src/cache/s3.rs000066400000000000000000000122251475712407500177220ustar00rootroot00000000000000// Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use opendal::layers::LoggingLayer; use opendal::services::S3; use opendal::Operator; use crate::errors::*; use super::http_client::set_user_agent; pub struct S3Cache; impl S3Cache { pub fn build( bucket: &str, region: Option<&str>, key_prefix: &str, no_credentials: bool, endpoint: Option<&str>, use_ssl: Option, server_side_encryption: Option, ) -> Result { let mut builder = S3::default() .http_client(set_user_agent()) .bucket(bucket) .root(key_prefix); if let Some(region) = region { builder = builder.region(region); } if no_credentials { builder = builder .disable_config_load() // Disable EC2 metadata to avoid OpenDAL trying to load // credentials from EC2 metadata. // // A.k.a, don't try to visit `http://169.254.169.254` .disable_ec2_metadata() // Allow anonymous access to S3 so that OpenDAL will not // throw error when no credentials are provided. .allow_anonymous(); } if let Some(endpoint) = endpoint { builder = builder.endpoint(&endpoint_resolver(endpoint, use_ssl)?); } if server_side_encryption.unwrap_or_default() { builder = builder.server_side_encryption_with_s3_key(); } let op = Operator::new(builder)? .layer(LoggingLayer::default()) .finish(); Ok(op) } } /// Resolve given endpoint along with use_ssl settings. fn endpoint_resolver(endpoint: &str, use_ssl: Option) -> Result { let endpoint_uri: http::Uri = endpoint .try_into() .map_err(|err| anyhow!("input endpoint {endpoint} is invalid: {:?}", err))?; let mut parts = endpoint_uri.into_parts(); match use_ssl { Some(true) => { parts.scheme = Some(http::uri::Scheme::HTTPS); } Some(false) => { parts.scheme = Some(http::uri::Scheme::HTTP); } None => { if parts.scheme.is_none() { parts.scheme = Some(http::uri::Scheme::HTTP); } } } // path_and_query is required when scheme is set if parts.path_and_query.is_none() { parts.path_and_query = Some(http::uri::PathAndQuery::from_static("/")); } Ok(http::Uri::from_parts(parts)?.to_string()) } #[cfg(test)] mod test { use super::*; #[test] fn test_endpoint_resolver() -> Result<()> { let cases = vec![ ( "no scheme without use_ssl", "s3-us-east-1.amazonaws.com", None, "http://s3-us-east-1.amazonaws.com/", ), ( "http without use_ssl", "http://s3-us-east-1.amazonaws.com", None, "http://s3-us-east-1.amazonaws.com/", ), ( "https without use_ssl", "https://s3-us-east-1.amazonaws.com", None, "https://s3-us-east-1.amazonaws.com/", ), ( "no scheme with use_ssl", "s3-us-east-1.amazonaws.com", Some(true), "https://s3-us-east-1.amazonaws.com/", ), ( "http with use_ssl", "http://s3-us-east-1.amazonaws.com", Some(true), "https://s3-us-east-1.amazonaws.com/", ), ( "https with use_ssl", "https://s3-us-east-1.amazonaws.com", Some(true), "https://s3-us-east-1.amazonaws.com/", ), ( "no scheme with not use_ssl", "s3-us-east-1.amazonaws.com", Some(false), "http://s3-us-east-1.amazonaws.com/", ), ( "http with not use_ssl", "http://s3-us-east-1.amazonaws.com", Some(false), "http://s3-us-east-1.amazonaws.com/", ), ( "https with not use_ssl", "https://s3-us-east-1.amazonaws.com", Some(false), "http://s3-us-east-1.amazonaws.com/", ), ]; for (name, endpoint, use_ssl, expected) in cases { let actual = endpoint_resolver(endpoint, use_ssl)?; assert_eq!(actual, expected, "{}", name); } Ok(()) } } mozilla-sccache-40c3d6b/src/cache/webdav.rs000066400000000000000000000026531475712407500206510ustar00rootroot00000000000000// Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use crate::errors::*; use opendal::layers::LoggingLayer; use opendal::services::Webdav; use opendal::Operator; use super::http_client::set_user_agent; /// A cache that stores entries in a Webdav. pub struct WebdavCache; impl WebdavCache { /// Create a new `WebdavCache`. pub fn build( endpoint: &str, key_prefix: &str, username: Option<&str>, password: Option<&str>, token: Option<&str>, ) -> Result { let builder = Webdav::default() .endpoint(endpoint) .root(key_prefix) .username(username.unwrap_or_default()) .password(password.unwrap_or_default()) .token(token.unwrap_or_default()) .http_client(set_user_agent()); let op = Operator::new(builder)? .layer(LoggingLayer::default()) .finish(); Ok(op) } } mozilla-sccache-40c3d6b/src/client.rs000066400000000000000000000065421475712407500176150ustar00rootroot00000000000000// Copyright 2016 Mozilla Foundation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use crate::errors::*; use crate::net::Connection; use crate::protocol::{Request, Response}; use crate::util; use byteorder::{BigEndian, ByteOrder}; use retry::{delay::Fixed, retry}; use std::io::{self, BufReader, BufWriter, Read}; /// A connection to an sccache server. pub struct ServerConnection { /// A reader for the socket connected to the server. reader: BufReader>, /// A writer for the socket connected to the server. writer: BufWriter>, } impl ServerConnection { /// Create a new connection using `stream`. pub fn new(conn: Box) -> io::Result { let write_conn = conn.try_clone()?; Ok(ServerConnection { reader: BufReader::new(conn), writer: BufWriter::new(write_conn), }) } /// Send `request` to the server, read and return a `Response`. pub fn request(&mut self, request: Request) -> Result { trace!("ServerConnection::request"); util::write_length_prefixed_bincode(&mut self.writer, request)?; trace!("ServerConnection::request: sent request"); self.read_one_response() } /// Read a single `Response` from the server. pub fn read_one_response(&mut self) -> Result { trace!("ServerConnection::read_one_response"); let mut bytes = [0; 4]; self.reader .read_exact(&mut bytes) .context("Failed to read response header")?; let len = BigEndian::read_u32(&bytes); trace!("Should read {} more bytes", len); let mut data = vec![0; len as usize]; self.reader.read_exact(&mut data)?; trace!("Done reading"); Ok(bincode::deserialize(&data)?) } } /// Establish a TCP connection to an sccache server listening on `addr`. pub fn connect_to_server(addr: &crate::net::SocketAddr) -> io::Result { trace!("connect_to_server({addr})"); let conn = crate::net::connect(addr)?; ServerConnection::new(conn) } /// Attempt to establish a TCP connection to an sccache server listening on `addr`. /// /// If the connection fails, retry a few times. pub fn connect_with_retry(addr: &crate::net::SocketAddr) -> io::Result { trace!("connect_with_retry({addr})"); // TODOs: // * Pass the server Child in here, so we can stop retrying // if the process exited. // * Send a pipe handle to the server process so it can notify // us once it starts the server instead of us polling. match retry(Fixed::from_millis(500).take(10), || connect_to_server(addr)) { Ok(conn) => Ok(conn), Err(e) => Err(io::Error::new( io::ErrorKind::TimedOut, format!("Connection to server timed out: {:?}", e), )), } } mozilla-sccache-40c3d6b/src/cmdline.rs000066400000000000000000000310301475712407500177400ustar00rootroot00000000000000// Copyright 2016 Mozilla Foundation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use crate::errors::*; use clap::{error::ErrorKind, Arg, ArgAction, ArgGroup, ValueEnum}; use std::env; use std::ffi::OsString; use std::path::PathBuf; use std::str::FromStr; use which::which_in; const ENV_VAR_INTERNAL_START_SERVER: &str = "SCCACHE_START_SERVER"; #[derive(Debug, Clone, ValueEnum)] pub enum StatsFormat { Text, Json, } impl StatsFormat { fn as_str(&self) -> &'static str { match self { Self::Text => "text", Self::Json => "json", } } } impl FromStr for StatsFormat { type Err = anyhow::Error; fn from_str(s: &str) -> anyhow::Result { match s { "text" => Ok(Self::Text), "json" => Ok(Self::Json), _ => bail!("Unrecognized stats format: {:?}", s), } } } impl Default for StatsFormat { fn default() -> Self { Self::Text } } /// A specific command to run. pub enum Command { /// Show cache statistics and exit. ShowStats(StatsFormat, bool), /// Run background server. InternalStartServer, /// Start background server as a subprocess. StartServer, /// Stop background server. StopServer, /// Zero cache statistics and exit. ZeroStats, /// Show the status of the distributed client. DistStatus, /// Perform a login to authenticate for distributed compilation. DistAuth, /// Package a toolchain for distributed compilation (executable, out) PackageToolchain(PathBuf, PathBuf), /// Run a compiler command. Compile { /// The binary to execute. exe: OsString, /// The commandline arguments to pass to `exe`. cmdline: Vec, /// The directory in which to execute the command. cwd: PathBuf, /// The environment variables to use for execution. env_vars: Vec<(OsString, OsString)>, }, DebugPreprocessorCacheEntries, } fn flag_infer_long_and_short(name: &'static str) -> Arg { flag_infer_long(name).short(name.chars().next().expect("Name needs at least one char")) } fn flag_infer_long(name: &'static str) -> Arg { Arg::new(name).long(name) } /// Get the [`clap::Command`] used for argument parsing. fn get_clap_command() -> clap::Command { clap::Command::new(env!("CARGO_PKG_NAME")) .version(env!("CARGO_PKG_VERSION")) .max_term_width(110) .after_help(concat!( "Enabled features:\n", " S3: ", cfg!(feature = "s3"), "\n", " Redis: ", cfg!(feature = "redis"), "\n", " Memcached: ", cfg!(feature = "memcached"), "\n", " GCS: ", cfg!(feature = "gcs"), "\n", " GHA: ", cfg!(feature = "gha"), "\n", " Azure: ", cfg!(feature = "azure"), "\n", " WebDAV: ", cfg!(feature = "webdav"), "\n", " OSS: ", cfg!(feature = "oss"), "\n" )) .args(&[ flag_infer_long_and_short("show-stats") .help("show cache statistics") .action(ArgAction::SetTrue), flag_infer_long("show-adv-stats") .help("show advanced cache statistics") .action(ArgAction::SetTrue), flag_infer_long("start-server") .help("start background server") .action(ArgAction::SetTrue), flag_infer_long("debug-preprocessor-cache") .help("show all preprocessor cache entries") .action(ArgAction::SetTrue), flag_infer_long("stop-server") .help("stop background server") .action(ArgAction::SetTrue), flag_infer_long_and_short("zero-stats") .help("zero statistics counters") .action(ArgAction::SetTrue), flag_infer_long("dist-auth") .help("authenticate for distributed compilation") .action(ArgAction::SetTrue), flag_infer_long("dist-status") .help("show status of the distributed client") .action(ArgAction::SetTrue), flag_infer_long("package-toolchain") .help("package toolchain for distributed compilation") .value_parser(clap::value_parser!(PathBuf)) .num_args(2) .value_names(["EXE", "OUT"]), flag_infer_long("stats-format") .help("set output format of statistics") .value_name("FMT") .value_parser(clap::value_parser!(StatsFormat)) .default_value(StatsFormat::default().as_str()), Arg::new("CMD") .value_parser(clap::value_parser!(OsString)) .trailing_var_arg(true) .action(ArgAction::Append), ]) .group( ArgGroup::new("one_and_only_one") .args([ "dist-auth", "debug-preprocessor-cache", "dist-status", "show-stats", "show-adv-stats", "start-server", "stop-server", "zero-stats", "package-toolchain", "CMD", ]) .required(true), ) } /// Parse the commandline args into a `Result` to execute. pub fn try_parse() -> Result { trace!("parse"); let cwd = env::current_dir().context("sccache: Couldn't determine current working directory")?; // We only care if it's `1` let internal_start_server = env::var(ENV_VAR_INTERNAL_START_SERVER).as_deref() == Ok("1"); let mut args: Vec<_> = env::args_os().collect(); if !internal_start_server { if let Ok(exe) = env::current_exe() { match exe .file_stem() .and_then(|s| s.to_str()) .map(|s| s.to_lowercase()) { // If the executable has its standard name, do nothing. Some(ref e) if e == env!("CARGO_PKG_NAME") => {} // Otherwise, if it was copied/hardlinked under a different $name, act // as if it were invoked with `sccache $name`, but avoid $name resolving // to ourselves again if it's in the PATH. _ => { if let (Some(path), Some(exe_filename)) = (env::var_os("PATH"), exe.file_name()) { match which_in(exe_filename, Some(&path), &cwd) { Ok(ref full_path) if full_path.canonicalize()? == exe.canonicalize()? => { if let Some(dir) = full_path.parent() { let path = env::join_paths( env::split_paths(&path).filter(|p| p != dir), ) .ok(); if let Ok(full_path) = which_in(exe_filename, path, &cwd) { args[0] = full_path.into(); } } } Ok(full_path) => args[0] = full_path.into(), Err(_) => {} } args.insert(0, env!("CARGO_PKG_NAME").into()); } } } } } let matches_result = get_clap_command().try_get_matches_from(args); // A command can either be from `ENV_VAR_INTERNAL_START_SERVER` being set or from command-line // args. Validate things so that error messages are nice and the returned opts are correct match (internal_start_server, matches_result) { (true, Err(e)) => { // Need to make sure that the error from `clap` is due to a missing command and not // some other issue if e.kind() == ErrorKind::MissingRequiredArgument { Ok(Command::InternalStartServer) } else { Err(e.into()) } } (false, Err(e)) => Err(e.into()), (true, Ok(_)) => { // `ENV_VAR_INTERNAL_START_SERVER` and a match means that more than one command was // provided bail!("`{ENV_VAR_INTERNAL_START_SERVER}=1` can't be used with other commands"); } (false, Ok(matches)) => { if matches.get_flag("show-stats") { let fmt = matches .get_one("stats-format") .cloned() .expect("There is a default value"); Ok(Command::ShowStats(fmt, false)) } else if matches.get_flag("show-adv-stats") { let fmt = matches .get_one("stats-format") .cloned() .expect("There is a default value"); Ok(Command::ShowStats(fmt, true)) } else if matches.get_flag("start-server") { Ok(Command::StartServer) } else if matches.get_flag("debug-preprocessor-cache") { Ok(Command::DebugPreprocessorCacheEntries) } else if matches.get_flag("stop-server") { Ok(Command::StopServer) } else if matches.get_flag("zero-stats") { Ok(Command::ZeroStats) } else if matches.get_flag("dist-auth") { Ok(Command::DistAuth) } else if matches.get_flag("dist-status") { Ok(Command::DistStatus) } else if matches.contains_id("package-toolchain") { let mut toolchain_values = matches .get_many("package-toolchain") .expect("`package-toolchain` requires two values") .cloned() .collect::>(); let maybe_out = toolchain_values.pop(); let maybe_exe = toolchain_values.pop(); match (maybe_exe, maybe_out) { (Some(exe), Some(out)) => Ok(Command::PackageToolchain(exe, out)), _ => unreachable!("clap should enforce two values"), } } else if matches.contains_id("CMD") { let mut env_vars = env::vars_os().collect::>(); // If we're running under rr, avoid the `LD_PRELOAD` bits, as it will // almost surely do the wrong thing, as the compiler gets executed // in a different process tree. env_vars.retain(|(k, _v)| { k != "LD_PRELOAD" && k != "RUNNING_UNDER_RR" && k != "HOSTNAME" && k != "PWD" && k != "HOST" && k != "RPM_BUILD_ROOT" && k != "SOURCE_DATE_EPOCH" && k != "RPM_PACKAGE_RELEASE" && k != "MINICOM" && k != "DESTDIR" && k != "RPM_PACKAGE_VERSION" }); let cmd = matches .get_many("CMD") .expect("CMD is required") .cloned() .collect::>(); match cmd.as_slice() { [exe, cmdline @ ..] => Ok(Command::Compile { exe: exe.to_owned(), cmdline: cmdline.to_owned(), cwd, env_vars, }), _ => unreachable!("clap should enforce at least one value in cmd"), } } else { unreachable!("Either the arg group or env variable should provide a command"); } } } } mozilla-sccache-40c3d6b/src/commands.rs000066400000000000000000000715331475712407500201420ustar00rootroot00000000000000// Copyright 2016 Mozilla Foundation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use crate::cache::storage_from_config; use crate::client::{connect_to_server, connect_with_retry, ServerConnection}; use crate::cmdline::{Command, StatsFormat}; use crate::compiler::ColorMode; use crate::config::{default_disk_cache_dir, Config}; use crate::jobserver::Client; use crate::mock_command::{CommandChild, CommandCreatorSync, ProcessCommandCreator, RunCommand}; use crate::protocol::{Compile, CompileFinished, CompileResponse, Request, Response}; use crate::server::{self, DistInfo, ServerInfo, ServerStartup, ServerStats}; use crate::util::daemonize; use byteorder::{BigEndian, ByteOrder}; use fs::{File, OpenOptions}; use fs_err as fs; use log::Level::Trace; use std::env; use std::ffi::{OsStr, OsString}; use std::io::{self, IsTerminal, Write}; #[cfg(unix)] use std::os::unix::process::ExitStatusExt; use std::path::Path; use std::process; use std::time::Duration; use strip_ansi_escapes::Writer; use tokio::io::AsyncReadExt; use tokio::runtime::Runtime; use walkdir::WalkDir; use which::which_in; use crate::errors::*; /// The default sccache server port. pub const DEFAULT_PORT: u16 = 4226; /// The number of milliseconds to wait for server startup. const SERVER_STARTUP_TIMEOUT: Duration = Duration::from_millis(10000); /// Get the port on which the server should listen. fn get_addr() -> crate::net::SocketAddr { #[cfg(unix)] if let Ok(addr) = env::var("SCCACHE_SERVER_UDS") { if let Ok(uds) = crate::net::SocketAddr::parse_uds(&addr) { return uds; } } let port = env::var("SCCACHE_SERVER_PORT") .ok() .and_then(|s| s.parse().ok()) .unwrap_or(DEFAULT_PORT); crate::net::SocketAddr::with_port(port) } /// Check if ignoring all response errors fn ignore_all_server_io_errors() -> bool { match env::var("SCCACHE_IGNORE_SERVER_IO_ERROR") { Ok(ignore_server_error) => ignore_server_error == "1", Err(_) => false, } } async fn read_server_startup_status( mut server: R, ) -> Result { // This is an async equivalent of ServerConnection::read_one_response let mut bytes = [0u8; 4]; server.read_exact(&mut bytes[..]).await?; let len = BigEndian::read_u32(&bytes); let mut data = vec![0; len as usize]; server.read_exact(data.as_mut_slice()).await?; Ok(bincode::deserialize(&data)?) } /// Re-execute the current executable as a background server, and wait /// for it to start up. #[cfg(not(windows))] fn run_server_process(startup_timeout: Option) -> Result { trace!("run_server_process"); let tempdir = tempfile::Builder::new().prefix("sccache").tempdir()?; let socket_path = tempdir.path().join("sock"); let runtime = Runtime::new()?; let exe_path = env::current_exe()?; let workdir = exe_path.parent().expect("executable path has no parent?!"); // Spawn a blocking task to bind the Unix socket. Note that the socket // must be bound before spawning `_child` below to avoid a race between // the parent binding the socket and the child connecting to it. let listener = { let _guard = runtime.enter(); tokio::net::UnixListener::bind(&socket_path)? }; let _child = process::Command::new(&exe_path) .current_dir(workdir) .env("SCCACHE_START_SERVER", "1") .env("SCCACHE_STARTUP_NOTIFY", &socket_path) .env("RUST_BACKTRACE", "1") .spawn()?; let startup = async move { let (socket, _) = listener.accept().await?; read_server_startup_status(socket).await }; let timeout = startup_timeout.unwrap_or(SERVER_STARTUP_TIMEOUT); runtime.block_on(async move { match tokio::time::timeout(timeout, startup).await { Ok(result) => result, Err(_elapsed) => Ok(ServerStartup::TimedOut), } }) } #[cfg(not(windows))] fn redirect_stderr(f: File) { use libc::dup2; use std::os::unix::io::IntoRawFd; // Ignore errors here. unsafe { dup2(f.into_raw_fd(), 2); } } #[cfg(windows)] fn redirect_stderr(f: File) { use std::os::windows::io::IntoRawHandle; use windows_sys::Win32::System::Console::{SetStdHandle, STD_ERROR_HANDLE}; // Ignore errors here. unsafe { SetStdHandle(STD_ERROR_HANDLE, f.into_raw_handle() as _); } } /// Create the log file and return an error if cannot be created fn create_error_log() -> Result { trace!("Create the log file"); let name = match env::var("SCCACHE_ERROR_LOG") { Ok(filename) if !filename.is_empty() => filename, _ => { bail!("Cannot read variable 'SCCACHE_ERROR_LOG'"); } }; let f = match OpenOptions::new().create(true).append(true).open(&name) { Ok(f) => f, Err(_) => { bail!("Cannot open/write log file '{}'", &name); } }; Ok(f) } /// If `SCCACHE_ERROR_LOG` is set, redirect stderr to it. fn redirect_error_log(f: File) -> Result<()> { debug!("redirecting stderr into {:?}", f); redirect_stderr(f); Ok(()) } /// Re-execute the current executable as a background server. #[cfg(windows)] fn run_server_process(startup_timeout: Option) -> Result { use futures::StreamExt; use std::mem; use std::os::windows::ffi::OsStrExt; use std::ptr; use tokio::net::windows::named_pipe; use uuid::Uuid; use windows_sys::Win32::Foundation::CloseHandle; use windows_sys::Win32::System::Threading::{ CreateProcessW, CREATE_NEW_PROCESS_GROUP, CREATE_NO_WINDOW, CREATE_UNICODE_ENVIRONMENT, PROCESS_INFORMATION, STARTUPINFOW, }; trace!("run_server_process"); // Create a mini event loop and register our named pipe server let runtime = Runtime::new()?; let pipe_name = &format!(r"\\.\pipe\{}", Uuid::new_v4().as_simple()); // Spawn a server which should come back and connect to us let exe_path = env::current_exe()?; let mut exe = OsStr::new(&exe_path) .encode_wide() .chain(Some(0u16)) .collect::>(); let mut envp = { let mut v = vec![]; let extra_vars = vec![ (OsString::from("SCCACHE_START_SERVER"), OsString::from("1")), ( OsString::from("SCCACHE_STARTUP_NOTIFY"), OsString::from(&pipe_name), ), (OsString::from("RUST_BACKTRACE"), OsString::from("1")), ]; for (key, val) in env::vars_os().chain(extra_vars) { v.extend( key.encode_wide() .chain(Some('=' as u16)) .chain(val.encode_wide()) .chain(Some(0)), ); } v.push(0); v }; let workdir = exe_path .parent() .expect("executable path has no parent?!") .as_os_str() .encode_wide() .chain(Some(0u16)) .collect::>(); // TODO: Expose `bInheritHandles` argument of `CreateProcessW` through the // standard library's `Command` type and then use that instead. let mut pi = PROCESS_INFORMATION { hProcess: 0, hThread: 0, dwProcessId: 0, dwThreadId: 0, }; let mut si: STARTUPINFOW = unsafe { mem::zeroed() }; si.cb = mem::size_of::() as _; if unsafe { CreateProcessW( exe.as_mut_ptr(), ptr::null_mut(), ptr::null_mut(), ptr::null_mut(), 0, CREATE_UNICODE_ENVIRONMENT | CREATE_NEW_PROCESS_GROUP | CREATE_NO_WINDOW, envp.as_mut_ptr().cast(), workdir.as_ptr(), &si, &mut pi, ) != 0 } { unsafe { CloseHandle(pi.hProcess); CloseHandle(pi.hThread); } } else { return Err(io::Error::last_os_error().into()); } fn create_named_pipe( pipe_name: &str, is_first: bool, ) -> io::Result { named_pipe::ServerOptions::new() .first_pipe_instance(is_first) .reject_remote_clients(true) .access_inbound(true) .access_outbound(true) .in_buffer_size(65536) .out_buffer_size(65536) .create(pipe_name) } let startup = async move { let pipe = create_named_pipe(pipe_name, true)?; let incoming = futures::stream::try_unfold(pipe, |listener| async move { listener.connect().await?; let new_listener = create_named_pipe(pipe_name, false)?; Ok::<_, io::Error>(Some((listener, new_listener))) }); futures::pin_mut!(incoming); let socket = incoming.next().await; let socket = socket.unwrap(); // incoming() never returns None read_server_startup_status(socket?).await }; let timeout = startup_timeout.unwrap_or(SERVER_STARTUP_TIMEOUT); runtime.block_on(async move { match tokio::time::timeout(timeout, startup).await { Ok(result) => result, Err(_elapsed) => Ok(ServerStartup::TimedOut), } }) } /// Attempt to connect to an sccache server listening on `addr`, or start one if no server is running. fn connect_or_start_server( addr: &crate::net::SocketAddr, startup_timeout: Option, ) -> Result { trace!("connect_or_start_server({addr})"); match connect_to_server(addr) { Ok(server) => Ok(server), Err(ref e) if (e.kind() == io::ErrorKind::ConnectionRefused || e.kind() == io::ErrorKind::TimedOut) || (e.kind() == io::ErrorKind::NotFound && addr.is_unix_path()) => { // If the connection was refused we probably need to start // the server. match run_server_process(startup_timeout)? { ServerStartup::Ok { addr: actual_addr } => { if addr.to_string() != actual_addr { // bail as the next connect_with_retry will fail bail!( "sccache: Listening on address {actual_addr} instead of {addr}" ); } } ServerStartup::AddrInUse => { debug!("AddrInUse: possible parallel server bootstraps, retrying..") } ServerStartup::TimedOut => bail!("Timed out waiting for server startup. Maybe the remote service is unreachable?\nRun with SCCACHE_LOG=debug SCCACHE_NO_DAEMON=1 to get more information"), ServerStartup::Err { reason } => bail!("Server startup failed: {}\nRun with SCCACHE_LOG=debug SCCACHE_NO_DAEMON=1 to get more information", reason), } let server = connect_with_retry(addr)?; Ok(server) } Err(e) => Err(e.into()), } } /// Send a `ZeroStats` request to the server, and return the `ServerInfo` request if successful. pub fn request_zero_stats(mut conn: ServerConnection) -> Result<()> { debug!("request_stats"); let response = conn.request(Request::ZeroStats).context( "failed to send zero statistics command to server or failed to receive response", )?; if let Response::ZeroStats = response { Ok(()) } else { bail!("Unexpected server response!") } } /// Send a `GetStats` request to the server, and return the `ServerInfo` request if successful. pub fn request_stats(mut conn: ServerConnection) -> Result { debug!("request_stats"); let response = conn.request(Request::GetStats).context( "Failed to send data to or receive data from server. Mismatch of client/server versions?", )?; if let Response::Stats(stats) = response { Ok(*stats) } else { bail!("Unexpected server response!") } } /// Send a `DistStatus` request to the server, and return `DistStatus` if successful. pub fn request_dist_status(mut conn: ServerConnection) -> Result { debug!("request_dist_status"); let response = conn .request(Request::DistStatus) .context("Failed to send data to or receive data from server")?; if let Response::DistStatus(info) = response { Ok(info) } else { bail!("Unexpected server response!") } } /// Send a `Shutdown` request to the server, and return the `ServerInfo` contained within the response if successful. pub fn request_shutdown(mut conn: ServerConnection) -> Result { debug!("request_shutdown"); //TODO: better error mapping let response = conn .request(Request::Shutdown) .context("Failed to send data to or receive data from server")?; if let Response::ShuttingDown(stats) = response { Ok(*stats) } else { bail!("Unexpected server response!") } } /// Send a `Compile` request to the server, and return the server response if successful. fn request_compile( conn: &mut ServerConnection, exe: W, args: &[X], cwd: Y, env_vars: Vec<(OsString, OsString)>, ) -> Result where W: AsRef, X: AsRef, Y: AsRef, { let req = Request::Compile(Compile { exe: exe.as_ref().to_owned().into(), cwd: cwd.as_ref().to_owned().into(), args: args.iter().map(|a| a.as_ref().to_owned()).collect(), env_vars, }); trace!("request_compile: {:?}", req); //TODO: better error mapping? let response = conn .request(req) .context("Failed to send data to or receive data from server")?; if let Response::Compile(response) = response { Ok(response) } else { bail!("Unexpected response from server") } } /// Return the signal that caused a process to exit from `status`. #[cfg(unix)] #[allow(dead_code)] fn status_signal(status: process::ExitStatus) -> Option { status.signal() } /// Not implemented for non-Unix. #[cfg(not(unix))] #[allow(dead_code)] fn status_signal(_status: process::ExitStatus) -> Option { None } /// Handle `response`, the output from running a compile on the server. /// Return the compiler exit status. fn handle_compile_finished( response: CompileFinished, stdout: &mut dyn Write, stderr: &mut dyn Write, ) -> Result { trace!("handle_compile_finished"); fn write_output( stream: impl IsTerminal, writer: &mut dyn Write, data: &[u8], color_mode: ColorMode, ) -> Result<()> { // rustc uses the `termcolor` crate which explicitly checks for TERM=="dumb", so // match that behavior here. let dumb_term = env::var("TERM").map(|v| v == "dumb").unwrap_or(false); // If the compiler options explicitly requested color output, or if this output stream // is a terminal and the compiler options didn't explicitly request non-color output, // then write the compiler output directly. if color_mode == ColorMode::On || (!dumb_term && stream.is_terminal() && color_mode != ColorMode::Off) { writer.write_all(data)?; } else { // Remove escape codes (and thus colors) while writing. let mut writer = Writer::new(writer); writer.write_all(data)?; } Ok(()) } // It might be nice if the server sent stdout/stderr as the process // ran, but then it would have to also save them in the cache as // interleaved streams to really make it work. write_output( std::io::stdout(), stdout, &response.stdout, response.color_mode, )?; write_output( std::io::stderr(), stderr, &response.stderr, response.color_mode, )?; if let Some(ret) = response.retcode { trace!("compiler exited with status {}", ret); Ok(ret) } else if let Some(signal) = response.signal { println!("sccache: Compiler killed by signal {}", signal); Ok(-2) } else { println!("sccache: Missing compiler exit status!"); Ok(-3) } } /// Handle `response`, the response from sending a `Compile` request to the server. Return the compiler exit status. /// /// If the server returned `CompileStarted`, wait for a `CompileFinished` and /// print the results. /// /// If the server returned `UnhandledCompile`, run the compilation command /// locally using `creator` and return the result. #[allow(clippy::too_many_arguments)] fn handle_compile_response( mut creator: T, runtime: &mut Runtime, conn: &mut ServerConnection, response: CompileResponse, exe: &Path, cmdline: Vec, cwd: &Path, stdout: &mut dyn Write, stderr: &mut dyn Write, ) -> Result where T: CommandCreatorSync, { match response { CompileResponse::CompileStarted => { debug!("Server sent CompileStarted"); // Wait for CompileFinished. match conn.read_one_response() { Ok(Response::CompileFinished(result)) => { return handle_compile_finished(result, stdout, stderr) } Ok(_) => bail!("unexpected response from server"), Err(e) => { match e.downcast_ref::() { Some(io_e) if io_e.kind() == io::ErrorKind::UnexpectedEof => { eprintln!( "sccache: warning: The server looks like it shut down \ unexpectedly, compiling locally instead" ); } _ => { //TODO: something better here? if ignore_all_server_io_errors() { eprintln!( "sccache: warning: error reading compile response from server \ compiling locally instead" ); } else { return Err(e) .context("error reading compile response from server"); } } } } } } CompileResponse::UnsupportedCompiler(s) => { debug!("Server sent UnsupportedCompiler: {:?}", s); bail!("Compiler not supported: {:?}", s); } CompileResponse::UnhandledCompile => { debug!("Server sent UnhandledCompile"); } }; let mut cmd = creator.new_command_sync(exe); cmd.args(&cmdline).current_dir(cwd); if log_enabled!(Trace) { trace!("running command: {:?}", cmd); } let status = runtime.block_on(async move { let child = cmd.spawn().await?; child .wait() .await .with_context(|| "failed to wait for a child") })?; Ok(status.code().unwrap_or_else(|| { if let Some(sig) = status_signal(status) { println!("sccache: Compile terminated by signal {}", sig); } // Arbitrary. 2 })) } /// Send a `Compile` request to the sccache server `conn`, and handle the response. /// /// The first entry in `cmdline` will be looked up in `path` if it is not /// an absolute path. /// See `request_compile` and `handle_compile_response`. #[allow(clippy::too_many_arguments)] pub fn do_compile( creator: T, runtime: &mut Runtime, mut conn: ServerConnection, exe: &Path, cmdline: Vec, cwd: &Path, path: Option, env_vars: Vec<(OsString, OsString)>, stdout: &mut dyn Write, stderr: &mut dyn Write, ) -> Result where T: CommandCreatorSync, { trace!("do_compile"); let exe_path = which_in(exe, path, cwd)?; let res = request_compile(&mut conn, &exe_path, &cmdline, cwd, env_vars)?; handle_compile_response( creator, runtime, &mut conn, res, &exe_path, cmdline, cwd, stdout, stderr, ) } /// Run `cmd` and return the process exit status. pub fn run_command(cmd: Command) -> Result { // Config isn't required for all commands, but if it's broken then we should flag // it early and loudly. let config = &Config::load()?; let startup_timeout = config.server_startup_timeout; match cmd { Command::ShowStats(fmt, advanced) => { trace!("Command::ShowStats({:?})", fmt); let stats = match connect_to_server(&get_addr()) { Ok(srv) => request_stats(srv).context("failed to get stats from server")?, // If there is no server, spawning a new server would start with zero stats // anyways, so we can just return (mostly) empty stats directly. Err(_) => { let runtime = Runtime::new()?; let storage = storage_from_config(config, runtime.handle()).ok(); runtime.block_on(ServerInfo::new(ServerStats::default(), storage.as_deref()))? } }; match fmt { StatsFormat::Text => stats.print(advanced), StatsFormat::Json => serde_json::to_writer(&mut io::stdout(), &stats)?, } } Command::DebugPreprocessorCacheEntries => { trace!("Command::DebugPreprocessorCacheEntries"); let entries_dir = default_disk_cache_dir().join("preprocessor"); for entry in WalkDir::new(entries_dir).sort_by_file_name().into_iter() { let preprocessor_cache_entry_file = entry?; let path = preprocessor_cache_entry_file.path(); if !path.is_file() { continue; } println!("========================="); println!("Showing preprocessor entry file {}", &path.display()); let contents = std::fs::read(path)?; let preprocessor_cache_entry = crate::compiler::PreprocessorCacheEntry::read(&contents)?; println!("{:#?}", preprocessor_cache_entry); println!("========================="); } } Command::InternalStartServer => { trace!("Command::InternalStartServer"); if env::var("SCCACHE_ERROR_LOG").is_ok() { let f = create_error_log()?; // Can't report failure here, we're already daemonized. daemonize()?; redirect_error_log(f)?; } else { // We aren't asking for a log file daemonize()?; } server::start_server(config, &get_addr())?; } Command::StartServer => { trace!("Command::StartServer"); println!("sccache: Starting the server..."); let startup = run_server_process(startup_timeout).context("failed to start server process")?; match startup { ServerStartup::Ok { addr } => { println!("sccache: Listening on address {addr}"); } ServerStartup::TimedOut => bail!("Timed out waiting for server startup"), ServerStartup::AddrInUse => bail!("Server startup failed: Address in use"), ServerStartup::Err { reason } => bail!("Server startup failed: {}", reason), } } Command::StopServer => { trace!("Command::StopServer"); println!("Stopping sccache server..."); let server = connect_to_server(&get_addr()).context("couldn't connect to server")?; let stats = request_shutdown(server)?; stats.print(false); } Command::ZeroStats => { trace!("Command::ZeroStats"); let conn = connect_or_start_server(&get_addr(), startup_timeout)?; request_zero_stats(conn).context("couldn't zero stats on server")?; eprintln!("Statistics zeroed."); } #[cfg(feature = "dist-client")] Command::DistAuth => { use crate::config; use crate::dist; use url::Url; match &config.dist.auth { config::DistAuth::Token { .. } => { info!("No authentication needed for type 'token'") } config::DistAuth::Oauth2CodeGrantPKCE { client_id, auth_url, token_url, } => { let cached_config = config::CachedConfig::load()?; let parsed_auth_url = Url::parse(auth_url) .map_err(|_| anyhow!("Failed to parse URL {}", auth_url))?; let token = dist::client_auth::get_token_oauth2_code_grant_pkce( client_id, parsed_auth_url, token_url, )?; cached_config .with_mut(|c| { c.dist.auth_tokens.insert(auth_url.to_owned(), token); }) .context("Unable to save auth token")?; println!("Saved token") } config::DistAuth::Oauth2Implicit { client_id, auth_url, } => { let cached_config = config::CachedConfig::load()?; let parsed_auth_url = Url::parse(auth_url) .map_err(|_| anyhow!("Failed to parse URL {}", auth_url))?; let token = dist::client_auth::get_token_oauth2_implicit(client_id, parsed_auth_url)?; cached_config .with_mut(|c| { c.dist.auth_tokens.insert(auth_url.to_owned(), token); }) .context("Unable to save auth token")?; println!("Saved token") } }; } #[cfg(not(feature = "dist-client"))] Command::DistAuth => bail!( "Distributed compilation not compiled in, please rebuild with the dist-client feature" ), Command::DistStatus => { trace!("Command::DistStatus"); let srv = connect_or_start_server(&get_addr(), startup_timeout)?; let status = request_dist_status(srv).context("failed to get dist-status from server")?; serde_json::to_writer(&mut io::stdout(), &status)?; } #[cfg(feature = "dist-client")] Command::PackageToolchain(executable, out) => { use crate::compiler; trace!("Command::PackageToolchain({})", executable.display()); let runtime = Runtime::new()?; let jobserver = Client::new(); let creator = ProcessCommandCreator::new(&jobserver); let args: Vec<_> = env::args_os().collect(); let env: Vec<_> = env::vars_os().collect(); let out_file = File::create(out)?; let cwd = env::current_dir().expect("A current working dir should exist"); let pool = runtime.handle().clone(); runtime.block_on(async move { compiler::get_compiler_info(creator, &executable, &cwd, &args, &env, &pool, None) .await .map(|compiler| compiler.0.get_toolchain_packager()) .and_then(|packager| packager.write_pkg(out_file)) })? } #[cfg(not(feature = "dist-client"))] Command::PackageToolchain(_executable, _out) => bail!( "Toolchain packaging not compiled in, please rebuild with the dist-client feature" ), Command::Compile { exe, cmdline, cwd, env_vars, } => { trace!("Command::Compile {{ {:?}, {:?}, {:?} }}", exe, cmdline, cwd); let jobserver = Client::new(); let conn = connect_or_start_server(&get_addr(), startup_timeout)?; let mut runtime = Runtime::new()?; let res = do_compile( ProcessCommandCreator::new(&jobserver), &mut runtime, conn, exe.as_ref(), cmdline, &cwd, env::var_os("PATH"), env_vars, &mut io::stdout(), &mut io::stderr(), ); return res.context("failed to execute compile"); } } Ok(0) } mozilla-sccache-40c3d6b/src/compiler/000077500000000000000000000000001475712407500175745ustar00rootroot00000000000000mozilla-sccache-40c3d6b/src/compiler/args.rs000066400000000000000000001141641475712407500211050ustar00rootroot00000000000000use std::cmp::Ordering; use std::error::Error; use std::ffi::OsString; use std::fmt::{self, Debug, Display}; use std::marker::PhantomData; use std::path::{Path, PathBuf}; use std::result::Result as StdResult; use std::str; pub type ArgParseResult = StdResult; pub type ArgToStringResult = StdResult; pub type PathTransformerFn<'a> = &'a mut dyn FnMut(&Path) -> Option; #[derive(Debug, PartialEq, Eq)] pub enum ArgParseError { UnexpectedEndOfArgs, InvalidUnicode(OsString), Other(&'static str), } impl Display for ArgParseError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let s = match self { ArgParseError::UnexpectedEndOfArgs => "Unexpected end of args".into(), ArgParseError::InvalidUnicode(s) => format!("String {:?} contained invalid unicode", s), ArgParseError::Other(s) => format!("Arg-specific parsing failed: {}", s), }; write!(f, "{}", s) } } impl Error for ArgParseError { fn cause(&self) -> Option<&dyn Error> { None } } #[derive(Debug, PartialEq, Eq)] pub enum ArgToStringError { FailedPathTransform(PathBuf), InvalidUnicode(OsString), } impl Display for ArgToStringError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let s = match self { ArgToStringError::FailedPathTransform(p) => { format!("Path {:?} could not be transformed", p) } ArgToStringError::InvalidUnicode(s) => { format!("String {:?} contained invalid unicode", s) } }; write!(f, "{}", s) } } impl Error for ArgToStringError { fn source(&self) -> Option<&(dyn Error + 'static)> { None } } pub type Delimiter = Option; /// Representation of a parsed argument /// The type parameter T contains the parsed information for this argument, /// for use during argument handling (typically an enum to allow switching /// on the different kinds of argument). `Flag`s may contain a simple /// variant which influences how to do caching, whereas `WithValue`s could /// be a struct variant with parsed data from the value. #[derive(PartialEq, Eq, Clone, Debug)] pub enum Argument { /// Unknown non-flag argument ; e.g. "foo" Raw(OsString), /// Unknown flag argument ; e.g. "-foo" UnknownFlag(OsString), /// Known flag argument ; e.g. "-bar" Flag(&'static str, T), /// Known argument with a value ; e.g. "-qux bar", where the way the /// value is passed is described by the ArgDisposition type. WithValue(&'static str, T, ArgDisposition), } /// How a value is passed to an argument with a value. #[derive(PartialEq, Eq, Clone, Debug)] pub enum ArgDisposition { /// As "-arg value" Separated, /// As "-arg value", but "-argvalue" would be valid too CanBeConcatenated(Delimiter), /// As "-argvalue", but "-arg value" would be valid too CanBeSeparated(Delimiter), /// As "-argvalue" Concatenated(Delimiter), } pub enum NormalizedDisposition { Separated, Concatenated, } impl Argument { /// For arguments that allow both a concatenated or separated disposition, /// normalize a parsed argument to a preferred disposition. pub fn normalize(self, disposition: NormalizedDisposition) -> Self { match self { Argument::WithValue(s, v, ArgDisposition::CanBeConcatenated(d)) | Argument::WithValue(s, v, ArgDisposition::CanBeSeparated(d)) => Argument::WithValue( s, v, match disposition { NormalizedDisposition::Separated => ArgDisposition::Separated, NormalizedDisposition::Concatenated => ArgDisposition::Concatenated(d), }, ), a => a, } } pub fn to_os_string(&self) -> OsString { match *self { Argument::Raw(ref s) | Argument::UnknownFlag(ref s) => s.clone(), Argument::Flag(ref s, _) | Argument::WithValue(ref s, _, _) => s.into(), } } pub fn flag_str(&self) -> Option<&'static str> { match *self { Argument::Flag(s, _) | Argument::WithValue(s, _, _) => Some(s), _ => None, } } pub fn get_data(&self) -> Option<&T> { match *self { Argument::Flag(_, ref d) => Some(d), Argument::WithValue(_, ref d, _) => Some(d), _ => None, } } /// Transforms a parsed argument into an iterator. pub fn iter_os_strings(&self) -> Iter<'_, T> { Iter { arg: self, emitted: 0, } } /// Transforms a parsed argument into an iterator over strings, with transformed paths. #[cfg(feature = "dist-client")] pub fn iter_strings Option>( &self, path_transformer: F, ) -> IterStrings<'_, T, F> { IterStrings { arg: self, emitted: 0, path_transformer, } } } pub struct Iter<'a, T> { arg: &'a Argument, emitted: usize, } impl<'a, T: ArgumentValue> Iterator for Iter<'a, T> { type Item = OsString; fn next(&mut self) -> Option { let result = match *self.arg { Argument::Raw(ref s) | Argument::UnknownFlag(ref s) => match self.emitted { 0 => Some(s.clone()), _ => None, }, Argument::Flag(s, _) => match self.emitted { 0 => Some(s.into()), _ => None, }, Argument::WithValue(s, ref v, ref d) => match (self.emitted, d) { (0, &ArgDisposition::CanBeSeparated(d)) | (0, &ArgDisposition::Concatenated(d)) => { let mut s = OsString::from(s); let v = v.clone().into_arg_os_string(); if let Some(d) = d { if !v.is_empty() { s.push(OsString::from( str::from_utf8(&[d]).expect("delimiter should be ascii"), )); } } s.push(v); Some(s) } (0, &ArgDisposition::Separated) | (0, &ArgDisposition::CanBeConcatenated(_)) => { Some(s.into()) } (1, &ArgDisposition::Separated) | (1, &ArgDisposition::CanBeConcatenated(_)) => { Some(v.clone().into_arg_os_string()) } _ => None, }, }; if result.is_some() { self.emitted += 1; } result } } #[cfg(feature = "dist-client")] pub struct IterStrings<'a, T, F> { arg: &'a Argument, emitted: usize, path_transformer: F, } #[cfg(feature = "dist-client")] impl<'a, T: ArgumentValue, F: FnMut(&Path) -> Option> Iterator for IterStrings<'a, T, F> { type Item = ArgToStringResult; fn next(&mut self) -> Option { let result: Option = match *self.arg { Argument::Raw(ref s) | Argument::UnknownFlag(ref s) => match self.emitted { 0 => Some(s.clone().into_arg_string(&mut self.path_transformer)), _ => None, }, Argument::Flag(s, _) => match self.emitted { 0 => Some(Ok(s.to_owned())), _ => None, }, Argument::WithValue(s, ref v, ref d) => match (self.emitted, d) { (0, &ArgDisposition::CanBeSeparated(d)) | (0, &ArgDisposition::Concatenated(d)) => { let mut s = s.to_owned(); let v = match v.clone().into_arg_string(&mut self.path_transformer) { Ok(s) => s, Err(e) => return Some(Err(e)), }; if let Some(d) = d { if !v.is_empty() { s.push_str(str::from_utf8(&[d]).expect("delimiter should be ascii")); } } s.push_str(&v); Some(Ok(s)) } (0, &ArgDisposition::Separated) | (0, &ArgDisposition::CanBeConcatenated(_)) => { Some(Ok(s.to_owned())) } (1, &ArgDisposition::Separated) | (1, &ArgDisposition::CanBeConcatenated(_)) => { Some(v.clone().into_arg_string(&mut self.path_transformer)) } _ => None, }, }; if result.is_some() { self.emitted += 1; } result } } macro_rules! ArgData { // Collected all the arms, time to create the match { __matchify $var:ident $fn:ident ($( $fnarg:ident )*) ($( $arms:tt )*) } => { match $var { $( $arms )* } }; // Unit variant { __matchify $var:ident $fn:ident ($( $fnarg:ident )*) ($( $arms:tt )*) $x:ident, $( $rest:tt )* } => { ArgData!{ __matchify $var $fn ($($fnarg)*) ($($arms)* ArgData::$x => ().$fn($( $fnarg )*),) $($rest)* } }; // Tuple variant { __matchify $var:ident $fn:ident ($( $fnarg:ident )*) ($( $arms:tt )*) $x:ident($y:ty), $( $rest:tt )* } => { ArgData!{ __matchify $var $fn ($($fnarg)*) ($($arms)* ArgData::$x(inner) => inner.$fn($( $fnarg )*),) $($rest)* } }; { __impl $( $tok:tt )+ } => { impl IntoArg for ArgData { fn into_arg_os_string(self) -> OsString { ArgData!{ __matchify self into_arg_os_string () () $($tok)+ } } fn into_arg_string(self, transformer: PathTransformerFn<'_>) -> ArgToStringResult { ArgData!{ __matchify self into_arg_string (transformer) () $($tok)+ } } } }; // PartialEq necessary for tests { pub $( $tok:tt )+ } => { #[derive(Clone, Debug, PartialEq, Eq)] pub enum ArgData { $($tok)+ } ArgData!{ __impl $( $tok )+ } }; { $( $tok:tt )+ } => { #[derive(Clone, Debug, PartialEq)] #[allow(clippy::enum_variant_names)] enum ArgData { $($tok)+ } ArgData!{ __impl $( $tok )+ } }; } // The value associated with a parsed argument pub trait ArgumentValue: IntoArg + Clone + Debug {} impl ArgumentValue for T {} pub trait FromArg: Sized { fn process(arg: OsString) -> ArgParseResult; } pub trait IntoArg: Sized { fn into_arg_os_string(self) -> OsString; fn into_arg_string(self, transformer: PathTransformerFn<'_>) -> ArgToStringResult; } impl FromArg for OsString { fn process(arg: OsString) -> ArgParseResult { Ok(arg) } } impl FromArg for PathBuf { fn process(arg: OsString) -> ArgParseResult { Ok(arg.into()) } } impl FromArg for String { fn process(arg: OsString) -> ArgParseResult { arg.into_string().map_err(ArgParseError::InvalidUnicode) } } impl IntoArg for OsString { fn into_arg_os_string(self) -> OsString { self } fn into_arg_string(self, _transformer: PathTransformerFn<'_>) -> ArgToStringResult { self.into_string().map_err(ArgToStringError::InvalidUnicode) } } impl IntoArg for PathBuf { fn into_arg_os_string(self) -> OsString { self.into() } fn into_arg_string(self, transformer: PathTransformerFn<'_>) -> ArgToStringResult { transformer(&self).ok_or(ArgToStringError::FailedPathTransform(self)) } } impl IntoArg for String { fn into_arg_os_string(self) -> OsString { self.into() } fn into_arg_string(self, _transformer: PathTransformerFn<'_>) -> ArgToStringResult { Ok(self) } } impl IntoArg for () { fn into_arg_os_string(self) -> OsString { OsString::new() } fn into_arg_string(self, _transformer: PathTransformerFn<'_>) -> ArgToStringResult { Ok(String::new()) } } pub fn split_os_string_arg(val: OsString, split: &str) -> ArgParseResult<(String, Option)> { let val = val.into_string().map_err(ArgParseError::InvalidUnicode)?; let mut split_it = val.splitn(2, split); let s1 = split_it.next().expect("splitn with no values"); let maybe_s2 = split_it.next(); Ok((s1.to_owned(), maybe_s2.map(|s| s.to_owned()))) } /// The description of how an argument may be parsed #[derive(PartialEq, Eq, Clone, Debug)] pub enum ArgInfo { /// An simple flag argument, of the form "-foo" Flag(&'static str, T), /// An argument with a value ; e.g. "-qux bar", where the way the /// value is passed is described by the ArgDisposition type. TakeArg( &'static str, fn(OsString) -> ArgParseResult, ArgDisposition, ), } impl ArgInfo { /// Transform an argument description into a parsed Argument, given a /// string. For arguments with a value, where the value is separate, the /// `get_next_arg` function returns the next argument, in raw `OsString` /// form. fn process(self, arg: &str, get_next_arg: F) -> ArgParseResult> where F: FnOnce() -> Option, { Ok(match self { ArgInfo::Flag(s, variant) => { debug_assert_eq!(s, arg); Argument::Flag(s, variant) } ArgInfo::TakeArg(s, create, ArgDisposition::Separated) => { debug_assert_eq!(s, arg); if let Some(a) = get_next_arg() { Argument::WithValue(s, create(a)?, ArgDisposition::Separated) } else { return Err(ArgParseError::UnexpectedEndOfArgs); } } ArgInfo::TakeArg(s, create, ArgDisposition::Concatenated(d)) => { let mut len = s.len(); debug_assert_eq!(&arg[..len], s); if let Some(d) = d { if arg.as_bytes().get(len) == Some(&d) { len += 1; } } Argument::WithValue( s, create(arg[len..].into())?, ArgDisposition::Concatenated(d), ) } ArgInfo::TakeArg(s, create, ArgDisposition::CanBeSeparated(d)) | ArgInfo::TakeArg(s, create, ArgDisposition::CanBeConcatenated(d)) => { let derived = if arg == s { ArgInfo::TakeArg(s, create, ArgDisposition::Separated) } else { ArgInfo::TakeArg(s, create, ArgDisposition::Concatenated(d)) }; match derived.process(arg, get_next_arg) { Err(ArgParseError::UnexpectedEndOfArgs) if d.is_none() => { Argument::WithValue(s, create("".into())?, ArgDisposition::Concatenated(d)) } Ok(Argument::WithValue(s, v, ArgDisposition::Concatenated(d))) => { Argument::WithValue(s, v, ArgDisposition::CanBeSeparated(d)) } Ok(Argument::WithValue(s, v, ArgDisposition::Separated)) => { Argument::WithValue(s, v, ArgDisposition::CanBeConcatenated(d)) } a => a?, } } }) } /// Returns whether the given string matches the argument description, and if not, /// how it differs. fn cmp(&self, arg: &str) -> Ordering { match self { &ArgInfo::TakeArg(s, _, ArgDisposition::CanBeSeparated(None)) | &ArgInfo::TakeArg(s, _, ArgDisposition::Concatenated(None)) if arg.starts_with(s) => { Ordering::Equal } &ArgInfo::TakeArg(s, _, ArgDisposition::CanBeSeparated(Some(d))) | &ArgInfo::TakeArg(s, _, ArgDisposition::Concatenated(Some(d))) if arg.len() > s.len() && arg.starts_with(s) => { arg.as_bytes()[s.len()].cmp(&d) } _ => self.flag_str().cmp(arg), } } fn flag_str(&self) -> &'static str { match self { &ArgInfo::Flag(s, _) | &ArgInfo::TakeArg(s, _, _) => s, } } } /// Binary search for a `key` in a sorted array of items, given a comparison /// function. This implementation is tweaked to handle the case where the /// comparison function does prefix matching, where multiple items in the array /// might match, but the last match is the one actually matching. fn bsearch(key: K, items: &[T], cmp: F) -> Option<&T> where F: Fn(&T, &K) -> Ordering, { let mut slice = items; while !slice.is_empty() { let middle = slice.len() / 2; match cmp(&slice[middle], &key) { Ordering::Equal => { let found_after = if slice.len() == 1 { None } else { bsearch(key, &slice[middle + 1..], cmp) }; return found_after.or(Some(&slice[middle])); } Ordering::Greater => { slice = &slice[..middle]; } Ordering::Less => { slice = &slice[middle + 1..]; } } } None } /// Trait for generically search over a "set" of ArgInfos. pub trait SearchableArgInfo { fn search(&self, key: &str) -> Option<&ArgInfo>; #[cfg(debug_assertions)] fn check(&self) -> bool; } /// Allow to search over a sorted array of ArgInfo items associated with extra /// data. impl SearchableArgInfo for &'static [ArgInfo] { fn search(&self, key: &str) -> Option<&ArgInfo> { bsearch(key, self, |i, k| i.cmp(k)) } #[cfg(debug_assertions)] fn check(&self) -> bool { self.windows(2).all(|w| { let a = w[0].flag_str(); let b = w[1].flag_str(); assert!(a < b, "{} can't precede {}", a, b); true }) } } /// Allow to search over a couple of arrays of ArgInfo, where the second /// complements or overrides the first one. impl SearchableArgInfo for (&'static [ArgInfo], &'static [ArgInfo]) { fn search(&self, key: &str) -> Option<&ArgInfo> { match (self.0.search(key), self.1.search(key)) { (None, None) => None, (Some(a), None) => Some(a), (None, Some(a)) => Some(a), (Some(a), Some(b)) => { if a.flag_str() > b.flag_str() { Some(a) } else { Some(b) } } } } #[cfg(debug_assertions)] fn check(&self) -> bool { self.0.check() && self.1.check() } } /// An `Iterator` for parsed arguments pub struct ArgsIter where I: Iterator, S: SearchableArgInfo, { arguments: I, arg_info: S, seen_double_dashes: Option, phantom: PhantomData, } impl ArgsIter where I: Iterator, T: ArgumentValue, S: SearchableArgInfo, { /// Create an `Iterator` for parsed arguments, given an iterator of raw /// `OsString` arguments, and argument descriptions. pub fn new(arguments: I, arg_info: S) -> Self { #[cfg(debug_assertions)] debug_assert!(arg_info.check()); ArgsIter { arguments, arg_info, seen_double_dashes: None, phantom: PhantomData, } } pub fn with_double_dashes(mut self) -> Self { self.seen_double_dashes = Some(false); self } } impl Iterator for ArgsIter where I: Iterator, T: ArgumentValue, S: SearchableArgInfo, { type Item = ArgParseResult>; fn next(&mut self) -> Option { if let Some(arg) = self.arguments.next() { if let Some(seen_double_dashes) = &mut self.seen_double_dashes { if !*seen_double_dashes && arg == "--" { *seen_double_dashes = true; } if *seen_double_dashes { return Some(Ok(Argument::Raw(arg))); } } let s = arg.to_string_lossy(); let arguments = &mut self.arguments; Some(match self.arg_info.search(&s[..]) { Some(i) => i.clone().process(&s[..], || arguments.next()), None => Ok(if s.starts_with('-') { Argument::UnknownFlag(arg.clone()) } else { Argument::Raw(arg.clone()) }), }) } else { None } } } /// Helper macro used to define ArgInfo::Flag's. /// Variant is an enum variant, e.g. enum ArgType { Variant } /// flag!("-foo", Variant) macro_rules! flag { ($s:expr, $variant:expr) => { ArgInfo::Flag($s, $variant) }; } /// Helper macro used to define ArgInfo::TakeArg's. /// Variant is an enum variant, e.g. enum ArgType { Variant(OsString) } /// take_arg!("-foo", OsString, Separated, Variant) /// take_arg!("-foo", OsString, Concatenated, Variant) /// take_arg!("-foo", OsString, Concatenated('='), Variant) macro_rules! take_arg { ($s:expr, $vtype:ident, Separated, $variant:expr) => { ArgInfo::TakeArg( $s, |arg: OsString| $vtype::process(arg).map($variant), ArgDisposition::Separated, ) }; ($s:expr, $vtype:ident, $d:ident, $variant:expr) => { ArgInfo::TakeArg( $s, |arg: OsString| $vtype::process(arg).map($variant), ArgDisposition::$d(None), ) }; ($s:expr, $vtype:ident, $d:ident($x:expr), $variant:expr) => { ArgInfo::TakeArg( $s, |arg: OsString| $vtype::process(arg).map($variant), ArgDisposition::$d(Some($x as u8)), ) }; } #[cfg(test)] mod tests { use super::*; use itertools::{diff_with, Diff}; use std::iter::FromIterator; macro_rules! arg { ($name:ident($x:expr)) => { Argument::$name($x.into()) }; ($name:ident($x:expr, $v:ident)) => { Argument::$name($x.into(), $v) }; ($name:ident($x:expr, $v:ident($y:expr))) => { Argument::$name($x.into(), $v($y.into())) }; ($name:ident($x:expr, $v:ident($y:expr), Separated)) => { Argument::$name($x, $v($y.into()), ArgDisposition::Separated) }; ($name:ident($x:expr, $v:ident($y:expr), $d:ident)) => { Argument::$name($x, $v($y.into()), ArgDisposition::$d(None)) }; ($name:ident($x:expr, $v:ident($y:expr), $d:ident($z:expr))) => { Argument::$name($x, $v($y.into()), ArgDisposition::$d(Some($z as u8))) }; ($name:ident($x:expr, $v:ident::$w:ident)) => { Argument::$name($x.into(), $v::$w) }; ($name:ident($x:expr, $v:ident::$w:ident($y:expr))) => { Argument::$name($x.into(), $v::$w($y.into())) }; ($name:ident($x:expr, $v:ident::$w:ident($y:expr), Separated)) => { Argument::$name($x, $v::$w($y.into()), ArgDisposition::Separated) }; ($name:ident($x:expr, $v:ident::$w:ident($y:expr), $d:ident)) => { Argument::$name($x, $v::$w($y.into()), ArgDisposition::$d(None)) }; ($name:ident($x:expr, $v:ident::$w:ident($y:expr), $d:ident($z:expr))) => { Argument::$name($x, $v::$w($y.into()), ArgDisposition::$d(Some($z as u8))) }; } ArgData! { FooFlag, Foo(OsString), FooPath(PathBuf), } use self::ArgData::*; #[test] #[allow(clippy::cognitive_complexity)] fn test_arginfo_cmp() { let info = flag!("-foo", FooFlag); assert_eq!(info.cmp("-foo"), Ordering::Equal); assert_eq!(info.cmp("bar"), Ordering::Less); assert_eq!(info.cmp("-bar"), Ordering::Greater); assert_eq!(info.cmp("-qux"), Ordering::Less); assert_eq!(info.cmp("-foobar"), Ordering::Less); assert_eq!(info.cmp("-foo="), Ordering::Less); assert_eq!(info.cmp("-foo=bar"), Ordering::Less); let info = take_arg!("-foo", OsString, Separated, Foo); assert_eq!(info.cmp("-foo"), Ordering::Equal); assert_eq!(info.cmp("bar"), Ordering::Less); assert_eq!(info.cmp("-bar"), Ordering::Greater); assert_eq!(info.cmp("-qux"), Ordering::Less); assert_eq!(info.cmp("-foobar"), Ordering::Less); assert_eq!(info.cmp("-foo="), Ordering::Less); assert_eq!(info.cmp("-foo=bar"), Ordering::Less); let info = take_arg!("-foo", OsString, Concatenated, Foo); assert_eq!(info.cmp("-foo"), Ordering::Equal); assert_eq!(info.cmp("bar"), Ordering::Less); assert_eq!(info.cmp("-bar"), Ordering::Greater); assert_eq!(info.cmp("-qux"), Ordering::Less); assert_eq!(info.cmp("-foobar"), Ordering::Equal); assert_eq!(info.cmp("-foo="), Ordering::Equal); assert_eq!(info.cmp("-foo=bar"), Ordering::Equal); let info = take_arg!("-foo", OsString, Concatenated('='), Foo); assert_eq!(info.cmp("-foo"), Ordering::Equal); assert_eq!(info.cmp("bar"), Ordering::Less); assert_eq!(info.cmp("-bar"), Ordering::Greater); assert_eq!(info.cmp("-qux"), Ordering::Less); assert_eq!(info.cmp("-foobar"), Ordering::Greater); assert_eq!(info.cmp("-foo="), Ordering::Equal); assert_eq!(info.cmp("-foo=bar"), Ordering::Equal); let info = take_arg!("-foo", OsString, CanBeSeparated, Foo); assert_eq!(info.cmp("-foo"), Ordering::Equal); assert_eq!(info.cmp("bar"), Ordering::Less); assert_eq!(info.cmp("-bar"), Ordering::Greater); assert_eq!(info.cmp("-qux"), Ordering::Less); assert_eq!(info.cmp("-foobar"), Ordering::Equal); assert_eq!(info.cmp("-foo="), Ordering::Equal); assert_eq!(info.cmp("-foo=bar"), Ordering::Equal); let info = take_arg!("-foo", OsString, CanBeSeparated('='), Foo); assert_eq!(info.cmp("-foo"), Ordering::Equal); assert_eq!(info.cmp("bar"), Ordering::Less); assert_eq!(info.cmp("-bar"), Ordering::Greater); assert_eq!(info.cmp("-qux"), Ordering::Less); assert_eq!(info.cmp("-foobar"), Ordering::Greater); assert_eq!(info.cmp("-foo="), Ordering::Equal); assert_eq!(info.cmp("-foo=bar"), Ordering::Equal); } #[test] fn test_arginfo_process() { let info = flag!("-foo", FooFlag); assert_eq!( info.process("-foo", || None).unwrap(), arg!(Flag("-foo", FooFlag)) ); let info = take_arg!("-foo", OsString, Separated, Foo); assert_eq!( info.clone().process("-foo", || None).unwrap_err(), ArgParseError::UnexpectedEndOfArgs ); assert_eq!( info.process("-foo", || Some("bar".into())).unwrap(), arg!(WithValue("-foo", Foo("bar"), Separated)) ); let info = take_arg!("-foo", OsString, Concatenated, Foo); assert_eq!( info.clone().process("-foo", || None).unwrap(), arg!(WithValue("-foo", Foo(""), Concatenated)) ); assert_eq!( info.process("-foobar", || None).unwrap(), arg!(WithValue("-foo", Foo("bar"), Concatenated)) ); let info = take_arg!("-foo", OsString, Concatenated('='), Foo); assert_eq!( info.clone().process("-foo=", || None).unwrap(), arg!(WithValue("-foo", Foo(""), Concatenated('='))) ); assert_eq!( info.process("-foo=bar", || None).unwrap(), arg!(WithValue("-foo", Foo("bar"), Concatenated('='))) ); let info = take_arg!("-foo", OsString, CanBeSeparated, Foo); assert_eq!( info.clone().process("-foo", || None).unwrap(), arg!(WithValue("-foo", Foo(""), Concatenated)) ); assert_eq!( info.clone().process("-foobar", || None).unwrap(), arg!(WithValue("-foo", Foo("bar"), CanBeSeparated)) ); assert_eq!( info.process("-foo", || Some("bar".into())).unwrap(), arg!(WithValue("-foo", Foo("bar"), CanBeConcatenated)) ); let info = take_arg!("-foo", OsString, CanBeSeparated('='), Foo); assert_eq!( info.clone().process("-foo", || None).unwrap_err(), ArgParseError::UnexpectedEndOfArgs ); assert_eq!( info.clone().process("-foo=", || None).unwrap(), arg!(WithValue("-foo", Foo(""), CanBeSeparated('='))) ); assert_eq!( info.clone().process("-foo=bar", || None).unwrap(), arg!(WithValue("-foo", Foo("bar"), CanBeSeparated('='))) ); assert_eq!( info.process("-foo", || Some("bar".into())).unwrap(), arg!(WithValue("-foo", Foo("bar"), CanBeConcatenated('='))) ); } #[test] fn test_bsearch() { let data = vec![ ("bar", 1), ("foo", 2), ("fuga", 3), ("hoge", 4), ("plop", 5), ("qux", 6), ("zorglub", 7), ]; for item in &data { assert_eq!(bsearch(item.0, &data, |i, k| i.0.cmp(k)), Some(item)); } // Try again with an even number of items let data = &data[..6]; for item in data { assert_eq!(bsearch(item.0, data, |i, k| i.0.cmp(k)), Some(item)); } // Once more, with prefix matches let data = vec![ ("a", 1), ("ab", 2), ("abc", 3), ("abd", 4), ("abe", 5), ("abef", 6), ("abefg", 7), ]; for item in &data { assert_eq!( bsearch(item.0, &data, |i, k| if k.starts_with(i.0) { Ordering::Equal } else { i.0.cmp(k) }), Some(item) ); } // Try again with an even number of items let data = &data[..6]; for item in data { assert_eq!( bsearch(item.0, data, |i, k| if k.starts_with(i.0) { Ordering::Equal } else { i.0.cmp(k) }), Some(item) ); } } #[test] fn test_multi_search() { static ARGS: [ArgInfo; 1] = [take_arg!("-include", OsString, Concatenated, Foo)]; static ARGS2: [ArgInfo; 1] = [take_arg!("-include-pch", OsString, Concatenated, Foo)]; static ARGS3: [ArgInfo; 1] = [take_arg!("-include", PathBuf, Concatenated, FooPath)]; assert_eq!((&ARGS[..], &ARGS2[..]).search("-include"), Some(&ARGS[0])); assert_eq!( (&ARGS[..], &ARGS2[..]).search("-include-pch"), Some(&ARGS2[0]) ); assert_eq!((&ARGS2[..], &ARGS[..]).search("-include"), Some(&ARGS[0])); assert_eq!( (&ARGS2[..], &ARGS[..]).search("-include-pch"), Some(&ARGS2[0]) ); assert_eq!((&ARGS[..], &ARGS3[..]).search("-include"), Some(&ARGS3[0])); } #[test] fn test_argsiter() { ArgData! { Bar, Foo(OsString), Fuga, Hoge(PathBuf), Plop, Qux(OsString), Zorglub, } // Need to explicitly refer to enum because `use` doesn't work if it's in a module // https://internals.rust-lang.org/t/pre-rfc-support-use-enum-for-function-local-enums/3853/13 static ARGS: [ArgInfo; 7] = [ flag!("-bar", ArgData::Bar), take_arg!("-foo", OsString, Separated, ArgData::Foo), flag!("-fuga", ArgData::Fuga), take_arg!("-hoge", PathBuf, Concatenated, ArgData::Hoge), flag!("-plop", ArgData::Plop), take_arg!("-qux", OsString, CanBeSeparated('='), ArgData::Qux), flag!("-zorglub", ArgData::Zorglub), ]; let args = [ "-nomatch", "-foo", "value", "-hoge", "value", // -hoge doesn't take a separate value "-hoge=value", // = is not recognized as a separator "-hogevalue", "-zorglub", "-qux", "value", "-plop", "-quxbar", // -quxbar is not -qux with a value of bar "-qux=value", "--", "non_flag", "-flag-after-double-dashes", ]; let iter = ArgsIter::new(args.iter().map(OsString::from), &ARGS[..]).with_double_dashes(); let expected = vec![ arg!(UnknownFlag("-nomatch")), arg!(WithValue("-foo", ArgData::Foo("value"), Separated)), arg!(WithValue("-hoge", ArgData::Hoge(""), Concatenated)), arg!(Raw("value")), arg!(WithValue("-hoge", ArgData::Hoge("=value"), Concatenated)), arg!(WithValue("-hoge", ArgData::Hoge("value"), Concatenated)), arg!(Flag("-zorglub", ArgData::Zorglub)), arg!(WithValue( "-qux", ArgData::Qux("value"), CanBeConcatenated('=') )), arg!(Flag("-plop", ArgData::Plop)), arg!(UnknownFlag("-quxbar")), arg!(WithValue( "-qux", ArgData::Qux("value"), CanBeSeparated('=') )), arg!(Raw("--")), arg!(Raw("non_flag")), arg!(Raw("-flag-after-double-dashes")), ]; match diff_with(iter, expected, |a, b| { assert_eq!(a.as_ref().unwrap(), b); true }) { None => {} Some(Diff::FirstMismatch(_, _, _)) => unreachable!(), Some(Diff::Shorter(_, i)) => { assert_eq!(i.map(|a| a.unwrap()).collect::>(), vec![]) } Some(Diff::Longer(_, i)) => { assert_eq!(Vec::>::new(), i.collect::>()) } } } // https://github.com/rust-lang/rust-clippy/issues/6550 #[allow(clippy::from_iter_instead_of_collect)] #[test] fn test_argument_into_iter() { // Needs type annotation or ascription let raw: Argument = arg!(Raw("value")); let unknown: Argument = arg!(UnknownFlag("-foo")); assert_eq!(Vec::from_iter(raw.iter_os_strings()), ovec!["value"]); assert_eq!(Vec::from_iter(unknown.iter_os_strings()), ovec!["-foo"]); assert_eq!( Vec::from_iter(arg!(Flag("-foo", FooFlag)).iter_os_strings()), ovec!["-foo"] ); let arg = arg!(WithValue("-foo", Foo("bar"), Concatenated)); assert_eq!(Vec::from_iter(arg.iter_os_strings()), ovec!["-foobar"]); let arg = arg!(WithValue("-foo", Foo("bar"), Concatenated('='))); assert_eq!(Vec::from_iter(arg.iter_os_strings()), ovec!["-foo=bar"]); let arg = arg!(WithValue("-foo", Foo("bar"), CanBeSeparated)); assert_eq!(Vec::from_iter(arg.iter_os_strings()), ovec!["-foobar"]); let arg = arg!(WithValue("-foo", Foo("bar"), CanBeSeparated('='))); assert_eq!(Vec::from_iter(arg.iter_os_strings()), ovec!["-foo=bar"]); let arg = arg!(WithValue("-foo", Foo("bar"), CanBeConcatenated)); assert_eq!(Vec::from_iter(arg.iter_os_strings()), ovec!["-foo", "bar"]); let arg = arg!(WithValue("-foo", Foo("bar"), CanBeConcatenated('='))); assert_eq!(Vec::from_iter(arg.iter_os_strings()), ovec!["-foo", "bar"]); let arg = arg!(WithValue("-foo", Foo("bar"), Separated)); assert_eq!(Vec::from_iter(arg.iter_os_strings()), ovec!["-foo", "bar"]); } #[test] fn test_arginfo_process_take_concat_arg_delim_doesnt_crash() { let _ = take_arg!("-foo", OsString, Concatenated('='), Foo).process("-foo", || None); } #[cfg(debug_assertions)] mod assert_tests { use super::*; #[test] #[should_panic] fn test_arginfo_process_flag() { flag!("-foo", FooFlag).process("-bar", || None).unwrap(); } #[test] #[should_panic] fn test_arginfo_process_take_arg() { take_arg!("-foo", OsString, Separated, Foo) .process("-bar", || None) .unwrap(); } #[test] #[should_panic] fn test_arginfo_process_take_concat_arg() { take_arg!("-foo", OsString, Concatenated, Foo) .process("-bar", || None) .unwrap(); } #[test] #[should_panic] fn test_arginfo_process_take_concat_arg_delim() { take_arg!("-foo", OsString, Concatenated('='), Foo) .process("-bar", || None) .unwrap(); } #[test] #[should_panic] fn test_arginfo_process_take_maybe_concat_arg() { take_arg!("-foo", OsString, CanBeSeparated, Foo) .process("-bar", || None) .unwrap(); } #[test] #[should_panic] fn test_arginfo_process_take_maybe_concat_arg_delim() { take_arg!("-foo", OsString, CanBeSeparated('='), Foo) .process("-bar", || None) .unwrap(); } #[test] #[should_panic] fn test_args_iter_unsorted() { static ARGS: [ArgInfo; 2] = [flag!("-foo", FooFlag), flag!("-bar", FooFlag)]; ArgsIter::new(Vec::::new().into_iter(), &ARGS[..]); } #[test] #[should_panic] fn test_args_iter_unsorted_2() { static ARGS: [ArgInfo; 2] = [flag!("-foo", FooFlag), flag!("-foo", FooFlag)]; ArgsIter::new(Vec::::new().into_iter(), &ARGS[..]); } #[test] fn test_args_iter_no_conflict() { static ARGS: [ArgInfo; 2] = [flag!("-foo", FooFlag), flag!("-fooz", FooFlag)]; ArgsIter::new(Vec::::new().into_iter(), &ARGS[..]); } } } mozilla-sccache-40c3d6b/src/compiler/c.rs000066400000000000000000002120611475712407500203660ustar00rootroot00000000000000// Copyright 2016 Mozilla Foundation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use crate::cache::{FileObjectSource, PreprocessorCacheModeConfig, Storage}; use crate::compiler::preprocessor_cache::preprocessor_cache_entry_hash_key; use crate::compiler::{ Cacheable, ColorMode, Compilation, CompileCommand, Compiler, CompilerArguments, CompilerHasher, CompilerKind, HashResult, Language, }; #[cfg(feature = "dist-client")] use crate::compiler::{DistPackagers, NoopOutputsRewriter}; use crate::dist; #[cfg(feature = "dist-client")] use crate::dist::pkg; use crate::mock_command::CommandCreatorSync; use crate::util::{ decode_path, encode_path, hash_all, Digest, HashToDigest, MetadataCtimeExt, TimeMacroFinder, Timestamp, }; use async_trait::async_trait; use fs_err as fs; use once_cell::sync::Lazy; use std::borrow::Cow; use std::collections::{HashMap, HashSet}; use std::ffi::{OsStr, OsString}; use std::fmt; use std::hash::Hash; use std::io; use std::ops::ControlFlow; use std::path::{Path, PathBuf}; use std::process; use std::sync::Arc; use crate::errors::*; use super::preprocessor_cache::PreprocessorCacheEntry; use super::CacheControl; /// A generic implementation of the `Compiler` trait for C/C++ compilers. #[derive(Clone)] pub struct CCompiler where I: CCompilerImpl, { executable: PathBuf, executable_digest: String, compiler: I, } /// A generic implementation of the `CompilerHasher` trait for C/C++ compilers. #[derive(Debug, Clone)] pub struct CCompilerHasher where I: CCompilerImpl, { parsed_args: ParsedArguments, executable: PathBuf, executable_digest: String, compiler: I, } /// Artifact produced by a C/C++ compiler. #[derive(Clone, Debug, PartialEq, Eq)] pub struct ArtifactDescriptor { /// Path to the artifact. pub path: PathBuf, /// Whether the artifact is an optional object file. pub optional: bool, } /// The results of parsing a compiler commandline. #[allow(dead_code)] #[derive(Debug, PartialEq, Eq, Clone)] pub struct ParsedArguments { /// The input source file. pub input: PathBuf, /// Whether to prepend the input with `--` pub double_dash_input: bool, /// The type of language used in the input source file. pub language: Language, /// The flag required to compile for the given language pub compilation_flag: OsString, /// The file in which to generate dependencies. pub depfile: Option, /// Output files and whether it's optional, keyed by a simple name, like "obj". pub outputs: HashMap<&'static str, ArtifactDescriptor>, /// Commandline arguments for dependency generation. pub dependency_args: Vec, /// Commandline arguments for the preprocessor (not including common_args). pub preprocessor_args: Vec, /// Commandline arguments for the preprocessor or the compiler. pub common_args: Vec, /// Commandline arguments for the compiler that specify the architecture given pub arch_args: Vec, /// Commandline arguments for the preprocessor or the compiler that don't affect the computed hash. pub unhashed_args: Vec, /// Extra unhashed files that need to be sent along with dist compiles. pub extra_dist_files: Vec, /// Extra files that need to have their contents hashed. pub extra_hash_files: Vec, /// Whether or not the `-showIncludes` argument is passed on MSVC pub msvc_show_includes: bool, /// Whether the compilation is generating profiling or coverage data. pub profile_generate: bool, /// The color mode. pub color_mode: ColorMode, /// arguments are incompatible with rewrite_includes_only pub suppress_rewrite_includes_only: bool, /// Arguments are incompatible with preprocessor cache mode pub too_hard_for_preprocessor_cache_mode: Option, } impl ParsedArguments { pub fn output_pretty(&self) -> Cow<'_, str> { self.outputs .get("obj") .and_then(|o| o.path.file_name()) .map(|s| s.to_string_lossy()) .unwrap_or(Cow::Borrowed("Unknown filename")) } } /// A generic implementation of the `Compilation` trait for C/C++ compilers. struct CCompilation { parsed_args: ParsedArguments, #[cfg(feature = "dist-client")] preprocessed_input: Vec, executable: PathBuf, compiler: I, cwd: PathBuf, env_vars: Vec<(OsString, OsString)>, } /// Supported C compilers. #[derive(Debug, PartialEq, Eq, Clone)] pub enum CCompilerKind { /// GCC Gcc, /// clang Clang, /// Diab Diab, /// Microsoft Visual C++ Msvc, /// NVIDIA CUDA compiler Nvcc, /// NVIDIA CUDA front-end CudaFE, /// NVIDIA CUDA optimizer and PTX generator Cicc, /// NVIDIA CUDA PTX assembler Ptxas, /// NVIDIA hpc c, c++ compiler Nvhpc, /// Tasking VX TaskingVX, } /// An interface to a specific C compiler. #[async_trait] pub trait CCompilerImpl: Clone + fmt::Debug + Send + Sync + 'static { /// Return the kind of compiler. fn kind(&self) -> CCompilerKind; /// Return true iff this is g++ or clang++. fn plusplus(&self) -> bool; /// Return the compiler version reported by the compiler executable. fn version(&self) -> Option; /// Determine whether `arguments` are supported by this compiler. fn parse_arguments( &self, arguments: &[OsString], cwd: &Path, env_vars: &[(OsString, OsString)], ) -> CompilerArguments; /// Run the C preprocessor with the specified set of arguments. #[allow(clippy::too_many_arguments)] async fn preprocess( &self, creator: &T, executable: &Path, parsed_args: &ParsedArguments, cwd: &Path, env_vars: &[(OsString, OsString)], may_dist: bool, rewrite_includes_only: bool, preprocessor_cache_mode: bool, ) -> Result where T: CommandCreatorSync; /// Generate a command that can be used to invoke the C compiler to perform /// the compilation. fn generate_compile_commands( &self, path_transformer: &mut dist::PathTransformer, executable: &Path, parsed_args: &ParsedArguments, cwd: &Path, env_vars: &[(OsString, OsString)], rewrite_includes_only: bool, ) -> Result<( Box>, Option, Cacheable, )> where T: CommandCreatorSync; } impl CCompiler where I: CCompilerImpl, { pub async fn new( compiler: I, executable: PathBuf, pool: &tokio::runtime::Handle, ) -> Result> { let digest = Digest::file(executable.clone(), pool).await?; Ok(CCompiler { executable, executable_digest: { if let Some(version) = compiler.version() { let mut m = Digest::new(); m.update(digest.as_bytes()); m.update(version.as_bytes()); m.finish() } else { digest } }, compiler, }) } fn extract_rocm_arg(args: &ParsedArguments, flag: &str) -> Option { args.common_args.iter().find_map(|arg| match arg.to_str() { Some(sarg) if sarg.starts_with(flag) => { Some(PathBuf::from(sarg[arg.len()..].to_string())) } _ => None, }) } fn extract_rocm_env(env_vars: &[(OsString, OsString)], name: &str) -> Option { env_vars.iter().find_map(|(k, v)| match v.to_str() { Some(path) if k == name => Some(PathBuf::from(path.to_string())), _ => None, }) } // See https://clang.llvm.org/docs/HIPSupport.html for details regarding the // order in which the environment variables and command-line arguments control the // directory to search for bitcode libraries. fn search_hip_device_libs( args: &ParsedArguments, env_vars: &[(OsString, OsString)], ) -> Vec { let rocm_path_arg: Option = Self::extract_rocm_arg(args, "--rocm-path="); let hip_device_lib_path_arg: Option = Self::extract_rocm_arg(args, "--hip-device-lib-path="); let rocm_path_env: Option = Self::extract_rocm_env(env_vars, "ROCM_PATH"); let hip_device_lib_path_env: Option = Self::extract_rocm_env(env_vars, "HIP_DEVICE_LIB_PATH"); let hip_device_lib_path: PathBuf = hip_device_lib_path_arg .or(hip_device_lib_path_env) .or(rocm_path_arg.map(|path| path.join("amdgcn").join("bitcode"))) .or(rocm_path_env.map(|path| path.join("amdgcn").join("bitcode"))) // This is the default location in official AMD packages and containers. .unwrap_or(PathBuf::from("/opt/rocm/amdgcn/bitcode")); hip_device_lib_path .read_dir() .ok() .map(|f| { f.flatten() .filter(|f| f.path().extension().map_or(false, |ext| ext == "bc")) .map(|f| f.path()) .collect() }) .unwrap_or_default() } } impl Compiler for CCompiler { fn kind(&self) -> CompilerKind { CompilerKind::C(self.compiler.kind()) } #[cfg(feature = "dist-client")] fn get_toolchain_packager(&self) -> Box { Box::new(CToolchainPackager { executable: self.executable.clone(), kind: self.compiler.kind(), }) } fn parse_arguments( &self, arguments: &[OsString], cwd: &Path, env_vars: &[(OsString, OsString)], ) -> CompilerArguments + 'static>> { match self.compiler.parse_arguments(arguments, cwd, env_vars) { CompilerArguments::Ok(mut args) => { // Handle SCCACHE_EXTRAFILES for (k, v) in env_vars.iter() { if k.as_os_str() == OsStr::new("SCCACHE_EXTRAFILES") { args.extra_hash_files.extend(std::env::split_paths(&v)) } } // Handle cache invalidation for the ROCm device bitcode libraries. Every HIP // object links in some LLVM bitcode libraries (.bc files), so in some sense // every HIP object compilation has an direct dependency on those bitcode // libraries. // // The bitcode libraries are unlikely to change **except** when a ROCm version // changes, so for correctness we should take these bitcode libraries into // account by adding them to `extra_hash_files`. // // In reality, not every available bitcode library is needed, but that is // too much to handle on our side so we just hash every bitcode library we find. if args.language == Language::Hip { args.extra_hash_files .extend(Self::search_hip_device_libs(&args, env_vars)) } CompilerArguments::Ok(Box::new(CCompilerHasher { parsed_args: args, executable: self.executable.clone(), executable_digest: self.executable_digest.clone(), compiler: self.compiler.clone(), })) } CompilerArguments::CannotCache(why, extra_info) => { CompilerArguments::CannotCache(why, extra_info) } CompilerArguments::NotCompilation => CompilerArguments::NotCompilation, } } fn box_clone(&self) -> Box> { Box::new((*self).clone()) } } #[async_trait] impl CompilerHasher for CCompilerHasher where T: CommandCreatorSync, I: CCompilerImpl, { async fn generate_hash_key( self: Box, creator: &T, cwd: PathBuf, env_vars: Vec<(OsString, OsString)>, may_dist: bool, pool: &tokio::runtime::Handle, rewrite_includes_only: bool, storage: Arc, cache_control: CacheControl, ) -> Result> { let start_of_compilation = std::time::SystemTime::now(); let CCompilerHasher { parsed_args, executable, executable_digest, compiler, } = *self; let extra_hashes = hash_all(&parsed_args.extra_hash_files, &pool.clone()).await?; // Create an argument vector containing both preprocessor and arch args, to // use in creating a hash key let mut preprocessor_and_arch_args = parsed_args.preprocessor_args.clone(); preprocessor_and_arch_args.extend(parsed_args.arch_args.to_vec()); // common_args is used in preprocessing too preprocessor_and_arch_args.extend(parsed_args.common_args.to_vec()); let absolute_input_path: Cow<'_, _> = if parsed_args.input.is_absolute() { Cow::Borrowed(&parsed_args.input) } else { Cow::Owned(cwd.join(&parsed_args.input)) }; // Try to look for a cached preprocessing step for this compilation // request. let preprocessor_cache_mode_config = storage.preprocessor_cache_mode_config(); let too_hard_for_preprocessor_cache_mode = parsed_args.too_hard_for_preprocessor_cache_mode.is_some(); if let Some(arg) = &parsed_args.too_hard_for_preprocessor_cache_mode { debug!( "parse_arguments: Cannot use preprocessor cache because of {:?}", arg ); } let use_preprocessor_cache_mode = { let can_use_preprocessor_cache_mode = !may_dist && preprocessor_cache_mode_config.use_preprocessor_cache_mode && !too_hard_for_preprocessor_cache_mode; let mut use_preprocessor_cache_mode = can_use_preprocessor_cache_mode; // Allow overrides from the env for (key, val) in env_vars.iter() { if key == "SCCACHE_DIRECT" { if let Some(val) = val.to_str() { use_preprocessor_cache_mode = match val.to_lowercase().as_str() { "false" | "off" | "0" => false, _ => can_use_preprocessor_cache_mode, }; } break; } } if can_use_preprocessor_cache_mode && !use_preprocessor_cache_mode { debug!( "parse_arguments: Disabling preprocessor cache because SCCACHE_DIRECT=false" ); } use_preprocessor_cache_mode }; // Disable preprocessor cache when doing distributed compilation let mut preprocessor_key = if use_preprocessor_cache_mode { preprocessor_cache_entry_hash_key( &executable_digest, parsed_args.language, &preprocessor_and_arch_args, &extra_hashes, &env_vars, &absolute_input_path, compiler.plusplus(), preprocessor_cache_mode_config, )? } else { None }; if let Some(preprocessor_key) = &preprocessor_key { if cache_control == CacheControl::Default { if let Some(mut seekable) = storage .get_preprocessor_cache_entry(preprocessor_key) .await? { let mut buf = vec![]; seekable.read_to_end(&mut buf)?; let mut preprocessor_cache_entry = PreprocessorCacheEntry::read(&buf)?; let mut updated = false; let hit = preprocessor_cache_entry .lookup_result_digest(preprocessor_cache_mode_config, &mut updated); let mut update_failed = false; if updated { // Time macros have been found, we need to update // the preprocessor cache entry. See [`PreprocessorCacheEntry::result_matches`]. debug!( "Preprocessor cache updated because of time macros: {preprocessor_key}" ); if let Err(e) = storage .put_preprocessor_cache_entry( preprocessor_key, preprocessor_cache_entry, ) .await { debug!("Failed to update preprocessor cache: {}", e); update_failed = true; } } if !update_failed { if let Some(key) = hit { debug!("Preprocessor cache hit: {preprocessor_key}"); // A compiler binary may be a symlink to another and // so has the same digest, but that means // the toolchain will not contain the correct path // to invoke the compiler! Add the compiler // executable path to try and prevent this let weak_toolchain_key = format!("{}-{}", executable.to_string_lossy(), executable_digest); return Ok(HashResult { key, compilation: Box::new(CCompilation { parsed_args: parsed_args.to_owned(), #[cfg(feature = "dist-client")] // TODO or is it never relevant since dist? preprocessed_input: vec![], executable: executable.to_owned(), compiler: compiler.to_owned(), cwd: cwd.to_owned(), env_vars: env_vars.to_owned(), }), weak_toolchain_key, }); } else { debug!("Preprocessor cache miss: {preprocessor_key}"); } } } } } let result = compiler .preprocess( creator, &executable, &parsed_args, &cwd, &env_vars, may_dist, rewrite_includes_only, use_preprocessor_cache_mode, ) .await; let out_pretty = parsed_args.output_pretty().into_owned(); let result = result.map_err(|e| { debug!("[{}]: preprocessor failed: {:?}", out_pretty, e); e }); let outputs = parsed_args.outputs.clone(); let args_cwd = cwd.clone(); let mut preprocessor_result = result.or_else(move |err| { // Errors remove all traces of potential output. debug!("removing files {:?}", &outputs); let v: std::result::Result<(), std::io::Error> = outputs.values().try_for_each(|output| { let mut path = args_cwd.clone(); path.push(&output.path); match fs::metadata(&path) { // File exists, remove it. Ok(_) => fs::remove_file(&path), _ => Ok(()), } }); if v.is_err() { warn!("Could not remove files after preprocessing failed!"); } match err.downcast::() { Ok(ProcessError(output)) => { debug!( "[{}]: preprocessor returned error status {:?}", out_pretty, output.status.code() ); // Drop the stdout since it's the preprocessor output, // just hand back stderr and the exit status. bail!(ProcessError(process::Output { stdout: vec!(), ..output })) } Err(err) => Err(err), } })?; // Remember include files needed in this preprocessing step let mut include_files = HashMap::new(); if preprocessor_key.is_some() { // TODO how to propagate stats and which stats? if !process_preprocessed_file( &absolute_input_path, &cwd, &mut preprocessor_result.stdout, &mut include_files, preprocessor_cache_mode_config, start_of_compilation, StandardFsAbstraction, )? { debug!("Disabling preprocessor cache mode"); preprocessor_key = None; } } trace!( "[{}]: Preprocessor output is {} bytes", parsed_args.output_pretty(), preprocessor_result.stdout.len() ); // Create an argument vector containing both common and arch args, to // use in creating a hash key let mut common_and_arch_args = parsed_args.common_args.clone(); common_and_arch_args.extend(parsed_args.arch_args.to_vec()); let key = { hash_key( &executable_digest, parsed_args.language, &common_and_arch_args, &extra_hashes, &env_vars, &preprocessor_result.stdout, compiler.plusplus(), ) }; // Cache the preprocessing step if let Some(preprocessor_key) = preprocessor_key { if !include_files.is_empty() { let mut preprocessor_cache_entry = PreprocessorCacheEntry::new(); let mut files: Vec<_> = include_files .into_iter() .map(|(path, digest)| (digest, path)) .collect(); files.sort_unstable_by(|a, b| a.1.cmp(&b.1)); preprocessor_cache_entry.add_result(start_of_compilation, &key, files); if let Err(e) = storage .put_preprocessor_cache_entry(&preprocessor_key, preprocessor_cache_entry) .await { debug!("Failed to update preprocessor cache: {}", e); } } } // A compiler binary may be a symlink to another and so has the same digest, but that means // the toolchain will not contain the correct path to invoke the compiler! Add the compiler // executable path to try and prevent this let weak_toolchain_key = format!("{}-{}", executable.to_string_lossy(), executable_digest); Ok(HashResult { key, compilation: Box::new(CCompilation { parsed_args, #[cfg(feature = "dist-client")] preprocessed_input: preprocessor_result.stdout, executable, compiler, cwd, env_vars, }), weak_toolchain_key, }) } fn color_mode(&self) -> ColorMode { self.parsed_args.color_mode } fn output_pretty(&self) -> Cow<'_, str> { self.parsed_args.output_pretty() } fn box_clone(&self) -> Box> { Box::new((*self).clone()) } fn language(&self) -> Language { self.parsed_args.language } } const PRAGMA_GCC_PCH_PREPROCESS: &[u8] = b"pragma GCC pch_preprocess"; const HASH_31_COMMAND_LINE_NEWLINE: &[u8] = b"# 31 \"\"\n"; const HASH_32_COMMAND_LINE_2_NEWLINE: &[u8] = b"# 32 \"\" 2\n"; const INCBIN_DIRECTIVE: &[u8] = b".incbin"; /// Remember the include files in the preprocessor output if it can be cached. /// Returns `false` if preprocessor cache mode should be disabled. fn process_preprocessed_file( input_file: &Path, cwd: &Path, bytes: &mut [u8], included_files: &mut HashMap, config: PreprocessorCacheModeConfig, time_of_compilation: std::time::SystemTime, fs_impl: impl PreprocessorFSAbstraction, ) -> Result { let mut start = 0; let mut hash_start = 0; let total_len = bytes.len(); let mut digest = Digest::new(); let mut normalized_include_paths: HashMap, Option>> = HashMap::new(); // There must be at least 7 characters (# 1 "x") left to potentially find an // include file path. while start < total_len.saturating_sub(7) { let mut slice = &bytes[start..]; // Check if we look at a line containing the file name of an included file. // At least the following formats exist (where N is a positive integer): // // GCC: // // # N "file" // # N "file" N // #pragma GCC pch_preprocess "file" // // HP's compiler: // // #line N "file" // // AIX's compiler: // // #line N "file" // #line N // // Note that there may be other lines starting with '#' left after // preprocessing as well, for instance "# pragma". if slice[0] == b'#' // GCC: && ((slice[1] == b' ' && slice[2] >= b'0' && slice[2] <= b'9') // GCC precompiled header: || slice[1..].starts_with(PRAGMA_GCC_PCH_PREPROCESS) // HP/AIX: || (&slice[1..5] == b"line ")) && (start == 0 || bytes[start - 1] == b'\n') { match process_preprocessor_line( input_file, cwd, included_files, config, time_of_compilation, bytes, start, hash_start, &mut digest, total_len, &mut normalized_include_paths, &fs_impl, )? { ControlFlow::Continue((s, h)) => { start = s; hash_start = h; } ControlFlow::Break((s, h, continue_preprocessor_cache_mode)) => { if !continue_preprocessor_cache_mode { return Ok(false); } start = s; hash_start = h; continue; } }; } else if slice .strip_prefix(INCBIN_DIRECTIVE) .filter(|slice| { slice.starts_with(b"\"") || slice.starts_with(b" \"") || slice.starts_with(b" \\\"") }) .is_some() { // An assembler .inc bin (without the space) statement, which could be // part of inline assembly, refers to an external file. If the file // changes, the hash should change as well, but finding out what file to // hash is too hard for sccache, so just bail out. debug!("Found potential unsupported .inc bin directive in source code"); return Ok(false); } else if slice.starts_with(b"___________") && (start == 0 || bytes[start - 1] == b'\n') { // Unfortunately the distcc-pump wrapper outputs standard output lines: // __________Using distcc-pump from /usr/bin // __________Using # distcc servers in pump mode // __________Shutting down distcc-pump include server digest.update(&bytes[hash_start..start]); while start < total_len && slice[0] != b'\n' { start += 1; if start < total_len { slice = &bytes[start..]; } } slice = &bytes[start..]; if slice[0] == b'\n' { start += 1; } hash_start = start; continue; } else { start += 1; } } digest.update(&bytes[hash_start..]); Ok(true) } /// What to do after handling a preprocessor number line. /// The `Break` variant is `(start, hash_start, continue_preprocessor_cache_mode)`. /// The `Continue` variant is `(start, hash_start)`. type PreprocessedLineAction = ControlFlow<(usize, usize, bool), (usize, usize)>; #[allow(clippy::too_many_arguments)] fn process_preprocessor_line( input_file: &Path, cwd: &Path, included_files: &mut HashMap, config: PreprocessorCacheModeConfig, time_of_compilation: std::time::SystemTime, bytes: &mut [u8], mut start: usize, mut hash_start: usize, digest: &mut Digest, total_len: usize, normalized_include_paths: &mut HashMap, Option>>, fs_impl: &impl PreprocessorFSAbstraction, ) -> Result { let mut slice = &bytes[start..]; // Workarounds for preprocessor linemarker bugs in GCC version 6. if slice.get(2) == Some(&b'3') { if slice.starts_with(HASH_31_COMMAND_LINE_NEWLINE) { // Bogus extra line with #31, after the regular #1: // Ignore the whole line, and continue parsing. digest.update(&bytes[hash_start..start]); while start < hash_start && slice[0] != b'\n' { start += 1; } start += 1; hash_start = start; return Ok(ControlFlow::Break((start, hash_start, true))); } else if slice.starts_with(HASH_32_COMMAND_LINE_2_NEWLINE) { // Bogus wrong line with #32, instead of regular #1: // Replace the line number with the usual one. digest.update(&bytes[hash_start..start]); start += 1; bytes[start..=start + 2].copy_from_slice(b"# 1"); hash_start = start; slice = &bytes[start..]; } } while start < total_len && slice[0] != b'"' && slice[0] != b'\n' { start += 1; if start < total_len { slice = &bytes[start..]; } } slice = &bytes[start..]; if start < total_len && slice[0] == b'\n' { // a newline before the quotation mark -> no match return Ok(ControlFlow::Break((start, hash_start, true))); } start += 1; if start >= total_len { bail!("Failed to parse included file path"); } // `start` points to the beginning of an include file path digest.update(&bytes[hash_start..start]); hash_start = start; slice = &bytes[start..]; while start < total_len && slice[0] != b'"' { start += 1; if start < total_len { slice = &bytes[start..]; } } if start == hash_start { // Skip empty file name. return Ok(ControlFlow::Break((start, hash_start, true))); } // Look for preprocessor flags, after the "filename". let mut system = false; let mut pointer = start + 1; while pointer < total_len && bytes[pointer] != b'\n' { if bytes[pointer] == b'3' { // System header. system = true; } pointer += 1; } // `hash_start` and `start` span the include file path. let include_path = &bytes[hash_start..start]; // We need to normalize the path now since it's part of the // hash and since we need to deduplicate the include files. // We cache the results since they are often quite a bit repeated. let include_path: &[u8] = if let Some(opt) = normalized_include_paths.get(include_path) { match opt { Some(normalized) => normalized, None => include_path, } } else { let path_buf = decode_path(include_path)?; let normalized = normalize_path(&path_buf); if normalized == path_buf { // `None` is a marker that the normalization is the same normalized_include_paths.insert(include_path.to_owned(), None); include_path } else { let mut encoded = Vec::with_capacity(include_path.len()); encode_path(&mut encoded, &normalized)?; normalized_include_paths.insert(include_path.to_owned(), Some(encoded)); // No entry API on hashmaps, so we need to query again normalized_include_paths .get(include_path) .unwrap() .as_ref() .unwrap() } }; if !remember_include_file( include_path, input_file, cwd, included_files, digest, system, config, time_of_compilation, fs_impl, )? { return Ok(ControlFlow::Break((start, hash_start, false))); }; // Everything of interest between hash_start and start has been hashed now. hash_start = start; Ok(ControlFlow::Continue((start, hash_start))) } /// Copied from cargo. /// /// Normalize a path, removing things like `.` and `..`. /// /// CAUTION: This does not resolve symlinks (unlike /// [`std::fs::canonicalize`]). pub fn normalize_path(path: &Path) -> PathBuf { use std::path::Component; let mut components = path.components().peekable(); let mut ret = if let Some(c @ Component::Prefix(..)) = components.peek().cloned() { components.next(); PathBuf::from(c.as_os_str()) } else { PathBuf::new() }; for component in components { match component { Component::Prefix(..) => unreachable!(), Component::RootDir => { ret.push(component.as_os_str()); } Component::CurDir => {} Component::ParentDir => { ret.pop(); } Component::Normal(c) => { ret.push(c); } } } ret } /// Limited abstraction of `std::fs::Metadata`, allowing us to create fake /// values during testing. #[derive(Debug, Eq, PartialEq, Clone)] struct PreprocessorFileMetadata { is_dir: bool, is_file: bool, modified: Option, ctime_or_creation: Option, } impl From for PreprocessorFileMetadata { fn from(meta: std::fs::Metadata) -> Self { Self { is_dir: meta.is_dir(), is_file: meta.is_file(), modified: meta.modified().ok().map(Into::into), ctime_or_creation: meta.ctime_or_creation().ok(), } } } /// An abstraction to filesystem access for use during the preprocessor /// caching phase, to make testing easier. /// /// This may help non-local preprocessor caching in the future, if it ends up /// being viable. trait PreprocessorFSAbstraction { fn metadata(&self, path: impl AsRef) -> io::Result { std::fs::metadata(path).map(Into::into) } fn open(&self, path: impl AsRef) -> io::Result> { Ok(Box::new(std::fs::File::open(path)?)) } } /// Provides filesystem access with the expected standard library functions. struct StandardFsAbstraction; impl PreprocessorFSAbstraction for StandardFsAbstraction {} // Returns false if the include file was "too new" (meaning modified during or // after the start of the compilation) and therefore should disable // the preprocessor cache mode, otherwise true. #[allow(clippy::too_many_arguments)] fn remember_include_file( mut path: &[u8], input_file: &Path, cwd: &Path, included_files: &mut HashMap, digest: &mut Digest, system: bool, config: PreprocessorCacheModeConfig, time_of_compilation: std::time::SystemTime, fs_impl: &impl PreprocessorFSAbstraction, ) -> Result { // TODO if precompiled header. if path.len() >= 2 && path[0] == b'<' && path[path.len() - 1] == b'>' { // Typically or . digest.update(path); return Ok(true); } if system && config.skip_system_headers { // Don't remember this system header, only hash its path. digest.update(path); return Ok(true); } let original_path = path; // Canonicalize path for comparison; Clang uses ./header.h. #[cfg(windows)] { if path.starts_with(br".\") || path.starts_with(b"./") { path = &path[2..]; } } #[cfg(not(windows))] { if path.starts_with(b"./") { path = &path[2..]; } } let mut path = decode_path(path).context("failed to decode path")?; if path.is_relative() { path = cwd.join(path); } if path != cwd || config.hash_working_directory { digest.update(original_path); } if included_files.contains_key(&path) { // Already known include file return Ok(true); } if path == input_file { // Don't remember the input file. return Ok(true); } let meta = match fs_impl.metadata(&path) { Ok(meta) => meta, Err(e) => { debug!("Failed to stat include file {}: {}", path.display(), e); return Ok(false); } }; if meta.is_dir { // Ignore directory, typically $PWD. return Ok(true); } if !meta.is_file { // Device, pipe, socket or other strange creature. debug!("Non-regular include file {}", path.display()); return Ok(false); } // TODO add an option to ignore some header files? if include_is_too_new(&path, &meta, time_of_compilation) { return Ok(false); } // Let's hash the include file content. let file = match fs_impl.open(&path) { Ok(file) => file, Err(e) => { debug!("Failed to open header file {}: {}", path.display(), e); return Ok(false); } }; let (file_digest, finder) = if config.ignore_time_macros { match Digest::reader_sync(file) { Ok(file_digest) => (file_digest, TimeMacroFinder::new()), Err(e) => { debug!("Failed to read header file {}: {}", path.display(), e); return Ok(false); } } } else { match Digest::reader_sync_time_macros(file) { Ok((file_digest, finder)) => (file_digest, finder), Err(e) => { debug!("Failed to read header file {}: {}", path.display(), e); return Ok(false); } } }; if finder.found_time() { debug!("Found __TIME__ in header file {}", path.display()); return Ok(false); } included_files.insert(path, file_digest); Ok(true) } /// Opt out of preprocessor cache mode because of a race condition. /// /// The race condition consists of these events: /// /// - the preprocessor is run /// - an include file is modified by someone /// - the new include file is hashed by sccache /// - the real compiler is run on the preprocessor's output, which contains /// data from the old header file /// - the wrong object file is stored in the cache. fn include_is_too_new( path: &Path, meta: &PreprocessorFileMetadata, time_of_compilation: std::time::SystemTime, ) -> bool { // The comparison using >= is intentional, due to a possible race between // starting compilation and writing the include file. if let Some(mtime) = meta.modified { if mtime >= time_of_compilation.into() { debug!("Include file {} is too new", path.display()); return true; } } // The same >= logic as above applies to the change time of the file. if let Some(ctime) = meta.ctime_or_creation { if ctime >= time_of_compilation.into() { debug!("Include file {} is too new", path.display()); return true; } } false } impl Compilation for CCompilation { fn generate_compile_commands( &self, path_transformer: &mut dist::PathTransformer, rewrite_includes_only: bool, ) -> Result<( Box>, Option, Cacheable, )> { let CCompilation { ref parsed_args, ref executable, ref compiler, ref cwd, ref env_vars, .. } = *self; compiler.generate_compile_commands( path_transformer, executable, parsed_args, cwd, env_vars, rewrite_includes_only, ) } #[cfg(feature = "dist-client")] fn into_dist_packagers( self: Box, path_transformer: dist::PathTransformer, ) -> Result { let CCompilation { parsed_args, cwd, preprocessed_input, executable, compiler, .. } = *self; trace!("Dist inputs: {:?}", parsed_args.input); let input_path = cwd.join(&parsed_args.input); let inputs_packager = Box::new(CInputsPackager { input_path, preprocessed_input, path_transformer, extra_dist_files: parsed_args.extra_dist_files, extra_hash_files: parsed_args.extra_hash_files, }); let toolchain_packager = Box::new(CToolchainPackager { executable, kind: compiler.kind(), }); let outputs_rewriter = Box::new(NoopOutputsRewriter); Ok((inputs_packager, toolchain_packager, outputs_rewriter)) } fn outputs<'a>(&'a self) -> Box + 'a> { Box::new( self.parsed_args .outputs .iter() .map(|(k, output)| FileObjectSource { key: k.to_string(), path: output.path.clone(), optional: output.optional, }), ) } } #[cfg(feature = "dist-client")] struct CInputsPackager { input_path: PathBuf, path_transformer: dist::PathTransformer, preprocessed_input: Vec, extra_dist_files: Vec, extra_hash_files: Vec, } #[cfg(feature = "dist-client")] impl pkg::InputsPackager for CInputsPackager { fn write_inputs(self: Box, wtr: &mut dyn io::Write) -> Result { let CInputsPackager { input_path, mut path_transformer, preprocessed_input, extra_dist_files, extra_hash_files, } = *self; let mut builder = tar::Builder::new(wtr); { let input_path = pkg::simplify_path(&input_path)?; let dist_input_path = path_transformer.as_dist(&input_path).with_context(|| { format!("unable to transform input path {}", input_path.display()) })?; let mut file_header = pkg::make_tar_header(&input_path, &dist_input_path)?; file_header.set_size(preprocessed_input.len() as u64); // The metadata is from non-preprocessed file_header.set_cksum(); builder.append(&file_header, preprocessed_input.as_slice())?; } for input_path in extra_hash_files.iter().chain(extra_dist_files.iter()) { let input_path = pkg::simplify_path(input_path)?; if !super::CAN_DIST_DYLIBS && input_path .extension() .map_or(false, |ext| ext == std::env::consts::DLL_EXTENSION) { bail!( "Cannot distribute dylib input {} on this platform", input_path.display() ) } let dist_input_path = path_transformer.as_dist(&input_path).with_context(|| { format!("unable to transform input path {}", input_path.display()) })?; let mut file = io::BufReader::new(fs::File::open(&input_path)?); let mut output = vec![]; io::copy(&mut file, &mut output)?; let mut file_header = pkg::make_tar_header(&input_path, &dist_input_path)?; file_header.set_size(output.len() as u64); file_header.set_cksum(); builder.append(&file_header, &*output)?; } // Finish archive let _ = builder.into_inner(); Ok(path_transformer) } } #[cfg(feature = "dist-client")] #[allow(unused)] struct CToolchainPackager { executable: PathBuf, kind: CCompilerKind, } #[cfg(feature = "dist-client")] #[cfg(all(target_os = "linux", target_arch = "x86_64"))] impl pkg::ToolchainPackager for CToolchainPackager { fn write_pkg(self: Box, f: fs::File) -> Result<()> { use std::os::unix::ffi::OsStringExt; info!("Generating toolchain {}", self.executable.display()); let mut package_builder = pkg::ToolchainPackageBuilder::new(); package_builder.add_common()?; package_builder.add_executable_and_deps(self.executable.clone())?; // Helper to use -print-file-name and -print-prog-name to look up // files by path. let named_file = |kind: &str, name: &str| -> Option { let mut output = process::Command::new(&self.executable) .arg(format!("-print-{}-name={}", kind, name)) .output() .ok()?; debug!( "find named {} {} output:\n{}\n===\n{}", kind, name, String::from_utf8_lossy(&output.stdout), String::from_utf8_lossy(&output.stderr), ); if !output.status.success() { debug!("exit failure"); return None; } // Remove the trailing newline (if present) if output.stdout.last() == Some(&b'\n') { output.stdout.pop(); } // Create our PathBuf from the raw bytes. Assume that relative // paths can be found via PATH. let path: PathBuf = OsString::from_vec(output.stdout).into(); if path.is_absolute() { Some(path) } else { which::which(path).ok() } }; // Helper to add a named file/program by to the package. // We ignore the case where the file doesn't exist, as we don't need it. let add_named_prog = |builder: &mut pkg::ToolchainPackageBuilder, name: &str| -> Result<()> { if let Some(path) = named_file("prog", name) { builder.add_executable_and_deps(path)?; } Ok(()) }; let add_named_file = |builder: &mut pkg::ToolchainPackageBuilder, name: &str| -> Result<()> { if let Some(path) = named_file("file", name) { builder.add_file(path)?; } Ok(()) }; // Add basic |as| and |objcopy| programs. add_named_prog(&mut package_builder, "as")?; add_named_prog(&mut package_builder, "objcopy")?; // Linker configuration. if Path::new("/etc/ld.so.conf").is_file() { package_builder.add_file("/etc/ld.so.conf".into())?; } // Compiler-specific handling match self.kind { CCompilerKind::Clang => { // Clang uses internal header files, so add them. if let Some(limits_h) = named_file("file", "include/limits.h") { info!("limits_h = {}", limits_h.display()); package_builder.add_dir_contents(limits_h.parent().unwrap())?; } } CCompilerKind::Gcc => { // Various external programs / files which may be needed by gcc add_named_prog(&mut package_builder, "cc1")?; add_named_prog(&mut package_builder, "cc1plus")?; add_named_file(&mut package_builder, "specs")?; add_named_file(&mut package_builder, "liblto_plugin.so")?; } CCompilerKind::Cicc | CCompilerKind::CudaFE | CCompilerKind::Ptxas | CCompilerKind::Nvcc => {} CCompilerKind::Nvhpc => { // Various programs called by the nvc nvc++ front end. add_named_file(&mut package_builder, "cpp1")?; add_named_file(&mut package_builder, "cpp2")?; add_named_file(&mut package_builder, "opt")?; add_named_prog(&mut package_builder, "llc")?; add_named_prog(&mut package_builder, "acclnk")?; } _ => unreachable!(), } // Bundle into a compressed tarfile. package_builder.into_compressed_tar(f) } } /// The cache is versioned by the inputs to `hash_key`. pub const CACHE_VERSION: &[u8] = b"11"; /// Environment variables that are factored into the cache key. static CACHED_ENV_VARS: Lazy> = Lazy::new(|| { [ // SCCACHE_C_CUSTOM_CACHE_BUSTER has no particular meaning behind it, // serving as a way for the user to factor custom data into the hash. // One can set it to different values for different invocations // to prevent cache reuse between them. "SCCACHE_C_CUSTOM_CACHE_BUSTER", "MACOSX_DEPLOYMENT_TARGET", "IPHONEOS_DEPLOYMENT_TARGET", "TVOS_DEPLOYMENT_TARGET", "WATCHOS_DEPLOYMENT_TARGET", "SDKROOT", "CCC_OVERRIDE_OPTIONS", ] .iter() .map(OsStr::new) .collect() }); /// Compute the hash key of `compiler` compiling `preprocessor_output` with `args`. pub fn hash_key( compiler_digest: &str, language: Language, arguments: &[OsString], extra_hashes: &[String], env_vars: &[(OsString, OsString)], preprocessor_output: &[u8], plusplus: bool, ) -> String { // If you change any of the inputs to the hash, you should change `CACHE_VERSION`. let mut m = Digest::new(); m.update(compiler_digest.as_bytes()); // clang and clang++ have different behavior despite being byte-for-byte identical binaries, so // we have to incorporate that into the hash as well. m.update(&[plusplus as u8]); m.update(CACHE_VERSION); m.update(language.as_str().as_bytes()); for arg in arguments { arg.hash(&mut HashToDigest { digest: &mut m }); } for hash in extra_hashes { m.update(hash.as_bytes()); } for (var, val) in env_vars.iter() { if CACHED_ENV_VARS.contains(var.as_os_str()) { var.hash(&mut HashToDigest { digest: &mut m }); m.update(&b"="[..]); val.hash(&mut HashToDigest { digest: &mut m }); } } m.update(preprocessor_output); m.finish() } #[cfg(test)] mod test { use std::{collections::VecDeque, sync::Mutex}; use super::*; #[test] fn test_same_content() { let args = ovec!["a", "b", "c"]; const PREPROCESSED: &[u8] = b"hello world"; assert_eq!( hash_key("abcd", Language::C, &args, &[], &[], PREPROCESSED, false), hash_key("abcd", Language::C, &args, &[], &[], PREPROCESSED, false) ); } #[test] fn test_plusplus_differs() { let args = ovec!["a", "b", "c"]; const PREPROCESSED: &[u8] = b"hello world"; assert_neq!( hash_key("abcd", Language::C, &args, &[], &[], PREPROCESSED, false), hash_key("abcd", Language::C, &args, &[], &[], PREPROCESSED, true) ); } #[test] fn test_header_differs() { let args = ovec!["a", "b", "c"]; const PREPROCESSED: &[u8] = b"hello world"; assert_neq!( hash_key("abcd", Language::C, &args, &[], &[], PREPROCESSED, false), hash_key( "abcd", Language::CHeader, &args, &[], &[], PREPROCESSED, false ) ); } #[test] fn test_plusplus_header_differs() { let args = ovec!["a", "b", "c"]; const PREPROCESSED: &[u8] = b"hello world"; assert_neq!( hash_key("abcd", Language::Cxx, &args, &[], &[], PREPROCESSED, true), hash_key( "abcd", Language::CxxHeader, &args, &[], &[], PREPROCESSED, true ) ); } #[test] fn test_hash_key_executable_contents_differs() { let args = ovec!["a", "b", "c"]; const PREPROCESSED: &[u8] = b"hello world"; assert_neq!( hash_key("abcd", Language::C, &args, &[], &[], PREPROCESSED, false), hash_key("wxyz", Language::C, &args, &[], &[], PREPROCESSED, false) ); } #[test] fn test_hash_key_args_differs() { let digest = "abcd"; let abc = ovec!["a", "b", "c"]; let xyz = ovec!["x", "y", "z"]; let ab = ovec!["a", "b"]; let a = ovec!["a"]; const PREPROCESSED: &[u8] = b"hello world"; assert_neq!( hash_key(digest, Language::C, &abc, &[], &[], PREPROCESSED, false), hash_key(digest, Language::C, &xyz, &[], &[], PREPROCESSED, false) ); assert_neq!( hash_key(digest, Language::C, &abc, &[], &[], PREPROCESSED, false), hash_key(digest, Language::C, &ab, &[], &[], PREPROCESSED, false) ); assert_neq!( hash_key(digest, Language::C, &abc, &[], &[], PREPROCESSED, false), hash_key(digest, Language::C, &a, &[], &[], PREPROCESSED, false) ); } #[test] fn test_hash_key_preprocessed_content_differs() { let args = ovec!["a", "b", "c"]; assert_neq!( hash_key( "abcd", Language::C, &args, &[], &[], &b"hello world"[..], false ), hash_key("abcd", Language::C, &args, &[], &[], &b"goodbye"[..], false) ); } #[test] fn test_hash_key_env_var_differs() { let args = ovec!["a", "b", "c"]; let digest = "abcd"; const PREPROCESSED: &[u8] = b"hello world"; for var in CACHED_ENV_VARS.iter() { let h1 = hash_key(digest, Language::C, &args, &[], &[], PREPROCESSED, false); let vars = vec![(OsString::from(var), OsString::from("something"))]; let h2 = hash_key(digest, Language::C, &args, &[], &vars, PREPROCESSED, false); let vars = vec![(OsString::from(var), OsString::from("something else"))]; let h3 = hash_key(digest, Language::C, &args, &[], &vars, PREPROCESSED, false); assert_neq!(h1, h2); assert_neq!(h2, h3); } } #[test] fn test_extra_hash_data() { let args = ovec!["a", "b", "c"]; let digest = "abcd"; const PREPROCESSED: &[u8] = b"hello world"; let extra_data = stringvec!["hello", "world"]; assert_neq!( hash_key( digest, Language::C, &args, &extra_data, &[], PREPROCESSED, false ), hash_key(digest, Language::C, &args, &[], &[], PREPROCESSED, false) ); } #[test] fn test_language_from_file_name() { fn t(extension: &str, expected: Language) { let path_str = format!("input.{}", extension); let path = Path::new(&path_str); let actual = Language::from_file_name(path); assert_eq!(actual, Some(expected)); } t("c", Language::C); t("C", Language::Cxx); t("cc", Language::Cxx); t("cp", Language::Cxx); t("cpp", Language::Cxx); t("CPP", Language::Cxx); t("cxx", Language::Cxx); t("c++", Language::Cxx); t("h", Language::GenericHeader); t("hh", Language::CxxHeader); t("H", Language::CxxHeader); t("hp", Language::CxxHeader); t("hxx", Language::CxxHeader); t("hpp", Language::CxxHeader); t("HPP", Language::CxxHeader); t("h++", Language::CxxHeader); t("tcc", Language::CxxHeader); t("m", Language::ObjectiveC); t("M", Language::ObjectiveCxx); t("mm", Language::ObjectiveCxx); t("cu", Language::Cuda); t("hip", Language::Hip); } #[test] fn test_language_from_file_name_none() { fn t(extension: &str) { let path_str = format!("input.{}", extension); let path = Path::new(&path_str); let actual = Language::from_file_name(path); let expected = None; assert_eq!(actual, expected); } // gcc parses file-extensions as case-sensitive t("Cp"); t("Cpp"); t("Hp"); t("Hpp"); t("Mm"); t("Cu"); } #[test] fn test_process_preprocessed_file() { env_logger::builder() .is_test(true) .filter_level(log::LevelFilter::Debug) .try_init() .ok(); let input_file = Path::new("tests/test.c"); let path = Path::new(file!()) .parent() .unwrap() .parent() .unwrap() .parent() .unwrap(); // This should be portable since the only headers present in this // output are system headers, which aren't interacted with // on the filesystem if configured. let path = path.join("tests/test.c.gcc-13.2.0-preproc"); let mut bytes = std::fs::read(path).unwrap(); let original_bytes = bytes.clone(); let mut include_files = HashMap::new(); let config = PreprocessorCacheModeConfig { use_preprocessor_cache_mode: true, skip_system_headers: true, ..Default::default() }; let success = process_preprocessed_file( input_file, Path::new(""), &mut bytes, &mut include_files, config, std::time::SystemTime::now(), StandardFsAbstraction, ) .unwrap(); assert_eq!(&bytes, &original_bytes); assert!(success); assert_eq!(include_files.len(), 0); } /// A filesystem interface that only panics to test that we don't access it. struct PanicFs; impl PreprocessorFSAbstraction for PanicFs { fn metadata(&self, path: impl AsRef) -> io::Result { panic!("called metadata at {}", path.as_ref().display()); } fn open(&self, path: impl AsRef) -> io::Result> { panic!("called open at {}", path.as_ref().display()); } } /// A filesystem interface that gives back expected values. struct TestFs { metadata_results: Mutex>, open_results: Mutex)>>, } impl PreprocessorFSAbstraction for TestFs { fn metadata(&self, path: impl AsRef) -> io::Result { let (expected_path, meta) = self .metadata_results .lock() .unwrap() .pop_front() .expect("not enough 'metadata' results"); assert_eq!(expected_path, path.as_ref(), "{}", path.as_ref().display()); Ok(meta) } fn open(&self, path: impl AsRef) -> io::Result> { let (expected_path, impls_read) = self .open_results .lock() .unwrap() .pop_front() .expect("not enough 'open' results"); assert_eq!(expected_path, path.as_ref(), "{}", path.as_ref().display()); Ok(impls_read) } } // Short-circuit the parameters we don't need to change during tests fn do_single_preprocessor_line_call( line: &[u8], include_files: &mut HashMap, fs_impl: &impl PreprocessorFSAbstraction, skip_system_headers: bool, ) -> PreprocessedLineAction { let input_file = Path::new("tests/test.c"); let config = PreprocessorCacheModeConfig { use_preprocessor_cache_mode: true, skip_system_headers, ..Default::default() }; let mut bytes = line.to_vec(); let total_len = bytes.len(); process_preprocessor_line( input_file, Path::new(""), include_files, config, std::time::SystemTime::now(), &mut bytes, 0, 0, &mut Digest::new(), total_len, &mut HashMap::new(), fs_impl, ) .unwrap() } /// Test cases where we don't access the filesystem #[test] fn test_process_preprocessor_line_simple() { env_logger::builder() .is_test(true) .filter_level(log::LevelFilter::Debug) .try_init() .ok(); let mut include_files = HashMap::new(); assert_eq!( do_single_preprocessor_line_call( br#"// # 0 "tests/test.c""#, &mut include_files, &PanicFs, true, ), ControlFlow::Continue((20, 20)), ); assert_eq!(include_files.len(), 0); assert_eq!( do_single_preprocessor_line_call( br#"// # 0 """#, &mut include_files, &PanicFs, true, ), ControlFlow::Continue((18, 18)), ); assert_eq!(include_files.len(), 0); assert_eq!( do_single_preprocessor_line_call( br#"// # 0 """#, &mut include_files, &PanicFs, true, ), ControlFlow::Continue((22, 22)), ); assert_eq!(include_files.len(), 0); assert_eq!( do_single_preprocessor_line_call( br#"// # 0 "" 2"#, &mut include_files, &PanicFs, true, ), ControlFlow::Continue((22, 22)), ); assert_eq!(include_files.len(), 0); assert_eq!( do_single_preprocessor_line_call( br#"// # 1 "tests/test.c""#, &mut include_files, &PanicFs, true, ), ControlFlow::Continue((20, 20)), ); assert_eq!(include_files.len(), 0); assert_eq!( do_single_preprocessor_line_call( br#"// # 1 "/usr/include/stdc-predef.h" 1 3 4"#, &mut include_files, &PanicFs, true, ), ControlFlow::Continue((34, 34)), ); assert_eq!(include_files.len(), 0); } /// Test cases where we test our tests... #[test] fn test_test_helpers() { env_logger::builder() .is_test(true) .filter_level(log::LevelFilter::Debug) .try_init() .ok(); // Test PanicFs let res = std::panic::catch_unwind(|| { let mut include_files = HashMap::new(); assert_eq!( do_single_preprocessor_line_call( br#"// # 1 "/usr/include/stdc-predef.h" 1 3 4"#, &mut include_files, &PanicFs, false, ), ControlFlow::Continue((34, 34)), ); }); assert_eq!( res.unwrap_err().downcast_ref::().unwrap(), "called metadata at /usr/include/stdc-predef.h" ); // Test TestFs's safeguard let res = std::panic::catch_unwind(|| { let mut include_files = HashMap::new(); let fs_impl = TestFs { metadata_results: Mutex::new(VecDeque::new()), open_results: Mutex::new(VecDeque::new()), }; assert_eq!( do_single_preprocessor_line_call( br#"// # 33 "/usr/include/x86_64-linux-gnu/bits/libc-header-start.h" 3 4"#, &mut include_files, &fs_impl, false, ), ControlFlow::Continue((34, 34)), ); }); assert_eq!( res.unwrap_err().downcast_ref::().unwrap(), "not enough 'metadata' results" ); } /// Test cases where we test filesystem access #[test] fn test_process_preprocessor_line_fs_access() { env_logger::builder() .is_test(true) .filter_level(log::LevelFilter::Debug) .try_init() .ok(); // Test "too new" include file let mut include_files = HashMap::new(); let fs_impl = TestFs { metadata_results: Mutex::new( [( PathBuf::from("/usr/include/x86_64-linux-gnu/bits/libc-header-start.h"), PreprocessorFileMetadata { is_dir: false, is_file: true, modified: Some(Timestamp::new(i64::MAX - 1, 0)), ctime_or_creation: None, }, )] .into_iter() .collect(), ), open_results: Mutex::new(VecDeque::new()), }; assert_eq!( do_single_preprocessor_line_call( br#"// # 33 "/usr/include/x86_64-linux-gnu/bits/libc-header-start.h" 3 4"#, &mut include_files, &fs_impl, false, ), // preprocessor cache mode is disabled ControlFlow::Break((63, 9, false)), ); // Test invalid include file is actually a dir let mut include_files = HashMap::new(); let fs_impl = TestFs { metadata_results: Mutex::new( [( PathBuf::from("/usr/include/x86_64-linux-gnu/bits/libc-header-start.h"), PreprocessorFileMetadata { is_dir: true, is_file: false, modified: Some(Timestamp::new(12341234, 0)), ctime_or_creation: None, }, )] .into_iter() .collect(), ), open_results: Mutex::new(VecDeque::new()), }; assert_eq!( do_single_preprocessor_line_call( br#"// # 33 "/usr/include/x86_64-linux-gnu/bits/libc-header-start.h" 3 4"#, &mut include_files, &fs_impl, false, ), // preprocessor cache mode is *not* disabled, ControlFlow::Continue((63, 63)), ); assert_eq!(include_files.len(), 0); // Test correct include file let mut include_files = HashMap::new(); let fs_impl = TestFs { metadata_results: Mutex::new( [( PathBuf::from("/usr/include/x86_64-linux-gnu/bits/libc-header-start.h"), PreprocessorFileMetadata { is_dir: false, is_file: true, modified: Some(Timestamp::new(12341234, 0)), ctime_or_creation: None, }, )] .into_iter() .collect(), ), open_results: Mutex::new( [( PathBuf::from("/usr/include/x86_64-linux-gnu/bits/libc-header-start.h"), Box::new(&b"contents"[..]) as Box, )] .into_iter() .collect(), ), }; assert_eq!( do_single_preprocessor_line_call( br#"// # 33 "/usr/include/x86_64-linux-gnu/bits/libc-header-start.h" 3 4"#, &mut include_files, &fs_impl, false, ), ControlFlow::Continue((63, 63)), ); assert_eq!(include_files.len(), 1); assert_eq!( include_files .get(Path::new( "/usr/include/x86_64-linux-gnu/bits/libc-header-start.h", )) .unwrap(), // hash of `b"contents"` "a93900c371d997927c5bc568ea538bed59ae5c960021dcfe7b0b369da5267528", ); } } mozilla-sccache-40c3d6b/src/compiler/cicc.rs000066400000000000000000000264121475712407500210500ustar00rootroot00000000000000// Copyright 2016 Mozilla Foundation // SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #![allow(unused_imports, dead_code, unused_variables)] use crate::compiler::args::*; use crate::compiler::c::{ArtifactDescriptor, CCompilerImpl, CCompilerKind, ParsedArguments}; use crate::compiler::{ CCompileCommand, Cacheable, ColorMode, CompileCommand, CompilerArguments, Language, SingleCompileCommand, }; use crate::{counted_array, dist}; use crate::mock_command::{CommandCreator, CommandCreatorSync, RunCommand}; use async_trait::async_trait; use std::collections::HashMap; use std::ffi::OsString; use std::fs; use std::path::{Path, PathBuf}; use std::process; use crate::errors::*; /// A unit struct on which to implement `CCompilerImpl`. #[derive(Clone, Debug)] pub struct Cicc { pub version: Option, } #[async_trait] impl CCompilerImpl for Cicc { fn kind(&self) -> CCompilerKind { CCompilerKind::Cicc } fn plusplus(&self) -> bool { true } fn version(&self) -> Option { self.version.clone() } fn parse_arguments( &self, arguments: &[OsString], cwd: &Path, _env_vars: &[(OsString, OsString)], ) -> CompilerArguments { parse_arguments(arguments, cwd, Language::Ptx, &ARGS[..], 3) } #[allow(clippy::too_many_arguments)] async fn preprocess( &self, _creator: &T, _executable: &Path, parsed_args: &ParsedArguments, cwd: &Path, _env_vars: &[(OsString, OsString)], _may_dist: bool, _rewrite_includes_only: bool, _preprocessor_cache_mode: bool, ) -> Result where T: CommandCreatorSync, { preprocess(cwd, parsed_args).await } fn generate_compile_commands( &self, path_transformer: &mut dist::PathTransformer, executable: &Path, parsed_args: &ParsedArguments, cwd: &Path, env_vars: &[(OsString, OsString)], _rewrite_includes_only: bool, ) -> Result<( Box>, Option, Cacheable, )> where T: CommandCreatorSync, { generate_compile_commands(path_transformer, executable, parsed_args, cwd, env_vars).map( |(command, dist_command, cacheable)| { (CCompileCommand::new(command), dist_command, cacheable) }, ) } } pub fn parse_arguments( arguments: &[OsString], cwd: &Path, language: Language, arg_info: S, input_arg_offset_from_end: usize, ) -> CompilerArguments where S: SearchableArgInfo, { let mut args = arguments.to_vec(); let input_loc = arguments.len() - input_arg_offset_from_end; let input = args.splice(input_loc..input_loc + 1, []).next().unwrap(); let mut take_next = false; let mut outputs = HashMap::new(); let mut extra_dist_files = vec![]; let mut gen_module_id_file = false; let mut module_id_file_name = Option::::None; let mut common_args = vec![]; let mut unhashed_args = vec![]; for arg in ArgsIter::new(args.iter().cloned(), arg_info) { match arg { Ok(arg) => { let args = match arg.get_data() { Some(PassThrough(_)) => { take_next = false; &mut common_args } Some(Output(o)) => { take_next = false; let path = cwd.join(o); outputs.insert( "obj", ArtifactDescriptor { path, optional: false, }, ); continue; } Some(GenModuleIdFileFlag) => { take_next = false; gen_module_id_file = true; &mut common_args } Some(ModuleIdFileName(o)) => { take_next = false; module_id_file_name = Some(cwd.join(o)); &mut common_args } Some(UnhashedPassThrough(o)) => { take_next = false; &mut unhashed_args } Some(UnhashedOutput(o)) => { take_next = false; let path = cwd.join(o); if let Some(flag) = arg.flag_str() { outputs.insert( flag, ArtifactDescriptor { path, optional: false, }, ); } &mut unhashed_args } Some(UnhashedFlag) => { take_next = false; &mut unhashed_args } None => match arg { Argument::Raw(ref p) => { if take_next { take_next = false; &mut common_args } else { continue; } } Argument::UnknownFlag(ref p) => { let s = p.to_string_lossy(); take_next = s.starts_with('-'); &mut common_args } _ => unreachable!(), }, }; args.extend(arg.iter_os_strings()); } _ => continue, }; } if let Some(module_id_path) = module_id_file_name { if gen_module_id_file { outputs.insert( "--module_id_file_name", ArtifactDescriptor { path: module_id_path, optional: true, }, ); } else { extra_dist_files.push(module_id_path); } } CompilerArguments::Ok(ParsedArguments { input: input.into(), outputs, double_dash_input: false, language, compilation_flag: OsString::new(), depfile: None, dependency_args: vec![], preprocessor_args: vec![], common_args, arch_args: vec![], unhashed_args, extra_dist_files: extra_dist_files.clone(), extra_hash_files: extra_dist_files, msvc_show_includes: false, profile_generate: false, color_mode: ColorMode::Off, suppress_rewrite_includes_only: false, too_hard_for_preprocessor_cache_mode: None, }) } pub async fn preprocess(cwd: &Path, parsed_args: &ParsedArguments) -> Result { // cicc and ptxas expect input to be an absolute path let input = if parsed_args.input.is_absolute() { parsed_args.input.clone() } else { cwd.join(&parsed_args.input) }; std::fs::read(input) .map_err(anyhow::Error::new) .map(|s| process::Output { status: process::ExitStatus::default(), stdout: s, stderr: vec![], }) } pub fn generate_compile_commands( path_transformer: &mut dist::PathTransformer, executable: &Path, parsed_args: &ParsedArguments, cwd: &Path, env_vars: &[(OsString, OsString)], ) -> Result<( SingleCompileCommand, Option, Cacheable, )> { // Unused arguments #[cfg(not(feature = "dist-client"))] { let _ = path_transformer; } let lang_str = &parsed_args.language.as_str(); let out_file = match parsed_args.outputs.get("obj") { Some(obj) => &obj.path, None => return Err(anyhow!("Missing {:?} file output", lang_str)), }; let mut arguments: Vec = vec![]; arguments.extend_from_slice(&parsed_args.common_args); arguments.extend_from_slice(&parsed_args.unhashed_args); arguments.extend(vec![ (&parsed_args.input).into(), "-o".into(), out_file.into(), ]); if log_enabled!(log::Level::Trace) { trace!( "[{}]: {} command: {:?}", out_file.file_name().unwrap().to_string_lossy(), executable.file_name().unwrap().to_string_lossy(), [ &[format!("cd {} &&", cwd.to_string_lossy()).to_string()], &[executable.to_str().unwrap_or_default().to_string()][..], &dist::osstrings_to_strings(&arguments).unwrap_or_default()[..] ] .concat() .join(" ") ); } let command = SingleCompileCommand { executable: executable.to_owned(), arguments, env_vars: env_vars.to_owned(), cwd: cwd.to_owned(), }; #[cfg(not(feature = "dist-client"))] let dist_command = None; #[cfg(feature = "dist-client")] let dist_command = (|| { let mut arguments: Vec = vec![]; arguments.extend(dist::osstrings_to_strings(&parsed_args.common_args)?); arguments.extend(dist::osstrings_to_strings(&parsed_args.unhashed_args)?); arguments.extend(vec![ path_transformer.as_dist(&parsed_args.input)?, "-o".into(), path_transformer.as_dist(out_file)?, ]); Some(dist::CompileCommand { executable: path_transformer.as_dist(executable.canonicalize().unwrap().as_path())?, arguments, env_vars: dist::osstring_tuples_to_strings(env_vars)?, cwd: path_transformer.as_dist_abs(cwd)?, }) })(); Ok((command, dist_command, Cacheable::Yes)) } ArgData! { pub Output(PathBuf), PassThrough(OsString), UnhashedFlag, GenModuleIdFileFlag, ModuleIdFileName(PathBuf), UnhashedPassThrough(OsString), UnhashedOutput(PathBuf), } use self::ArgData::*; counted_array!(pub static ARGS: [ArgInfo; _] = [ take_arg!("--gen_c_file_name", PathBuf, Separated, UnhashedOutput), take_arg!("--gen_device_file_name", PathBuf, Separated, UnhashedOutput), flag!("--gen_module_id_file", GenModuleIdFileFlag), take_arg!("--include_file_name", OsString, Separated, PassThrough), take_arg!("--module_id_file_name", PathBuf, Separated, ModuleIdFileName), take_arg!("--stub_file_name", PathBuf, Separated, UnhashedOutput), take_arg!("-o", PathBuf, Separated, Output), ]); mozilla-sccache-40c3d6b/src/compiler/clang.rs000066400000000000000000001017601475712407500212330ustar00rootroot00000000000000// Copyright 2016 Mozilla Foundation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #![allow(unused_imports, dead_code, unused_variables)] use crate::compiler::args::*; use crate::compiler::c::{ArtifactDescriptor, CCompilerImpl, CCompilerKind, ParsedArguments}; use crate::compiler::gcc::ArgData::*; use crate::compiler::{ gcc, write_temp_file, CCompileCommand, Cacheable, CompileCommand, CompilerArguments, Language, }; use crate::mock_command::{CommandCreator, CommandCreatorSync, RunCommand}; use crate::util::{run_input_output, OsStrExt}; use crate::{counted_array, dist}; use async_trait::async_trait; use fs::File; use fs_err as fs; use semver::{BuildMetadata, Prerelease, Version}; use std::ffi::OsString; use std::future::Future; use std::io::{self, Write}; use std::path::{Path, PathBuf}; use std::process; use crate::errors::*; /// A struct on which to implement `CCompilerImpl`. #[derive(Clone, Debug)] pub struct Clang { /// true iff this is clang++. pub clangplusplus: bool, /// true iff this is Apple's clang(++). pub is_appleclang: bool, /// String from __VERSION__ macro. pub version: Option, } impl Clang { fn is_minversion(&self, major: u64) -> bool { // Apple clang follows its own versioning scheme. if self.is_appleclang { return false; } let version_val = match self.version.clone() { Some(version_val) => version_val, None => return false, }; let version_str = match version_val.split(' ').find(|x| x.contains('.')) { Some(version_str) => version_str, None => return false, }; let parsed_version = match Version::parse(version_str.trim_end_matches('"')) { Ok(parsed_version) => parsed_version, Err(e) => return false, }; parsed_version >= (Version { major, minor: 0, patch: 0, pre: Prerelease::default(), build: BuildMetadata::default(), }) } } #[async_trait] impl CCompilerImpl for Clang { fn kind(&self) -> CCompilerKind { CCompilerKind::Clang } fn plusplus(&self) -> bool { self.clangplusplus } fn version(&self) -> Option { self.version.clone() } fn parse_arguments( &self, arguments: &[OsString], cwd: &Path, _env_vars: &[(OsString, OsString)], ) -> CompilerArguments { gcc::parse_arguments( arguments, cwd, (&gcc::ARGS[..], &ARGS[..]), self.clangplusplus, self.kind(), ) } #[allow(clippy::too_many_arguments)] async fn preprocess( &self, creator: &T, executable: &Path, parsed_args: &ParsedArguments, cwd: &Path, env_vars: &[(OsString, OsString)], may_dist: bool, rewrite_includes_only: bool, preprocessor_cache_mode: bool, ) -> Result where T: CommandCreatorSync, { let mut ignorable_whitespace_flags = if preprocessor_cache_mode { vec![] } else { vec!["-P".to_string()] }; // Clang 14 and later support -fminimize-whitespace, which normalizes away non-semantic whitespace which in turn increases cache hit rate. if self.is_minversion(14) { ignorable_whitespace_flags.push("-fminimize-whitespace".to_string()) } gcc::preprocess( creator, executable, parsed_args, cwd, env_vars, may_dist, self.kind(), rewrite_includes_only, ignorable_whitespace_flags, language_to_clang_arg, ) .await } fn generate_compile_commands( &self, path_transformer: &mut dist::PathTransformer, executable: &Path, parsed_args: &ParsedArguments, cwd: &Path, env_vars: &[(OsString, OsString)], rewrite_includes_only: bool, ) -> Result<( Box>, Option, Cacheable, )> where T: CommandCreatorSync, { gcc::generate_compile_commands( path_transformer, executable, parsed_args, cwd, env_vars, self.kind(), rewrite_includes_only, language_to_clang_arg, ) .map(|(command, dist_command, cacheable)| { (CCompileCommand::new(command), dist_command, cacheable) }) } } pub fn language_to_clang_arg(lang: Language) -> Option<&'static str> { match lang { Language::C => Some("c"), Language::CHeader => Some("c-header"), Language::Cxx => Some("c++"), Language::CxxHeader => Some("c++-header"), Language::ObjectiveC => Some("objective-c"), Language::ObjectiveCxx => Some("objective-c++"), Language::ObjectiveCxxHeader => Some("objective-c++-header"), Language::Cuda => Some("cuda"), Language::CudaFE => None, Language::Ptx => None, Language::Cubin => None, Language::Rust => None, // Let the compiler decide Language::Hip => Some("hip"), Language::GenericHeader => None, // Let the compiler decide } } counted_array!(pub static ARGS: [ArgInfo; _] = [ take_arg!("--dependent-lib", OsString, Concatenated('='), PassThrough), take_arg!("--hip-device-lib-path", PathBuf, Concatenated('='), PassThroughPath), take_arg!("--hip-path", PathBuf, Concatenated('='), PassThroughPath), take_arg!("--rocm-path", PathBuf, Concatenated('='), PassThroughPath), take_arg!("--serialize-diagnostics", OsString, Separated, PassThrough), take_arg!("--target", OsString, Separated, PassThrough), // Note: for clang we must override the dep options from gcc.rs with `CanBeSeparated`. take_arg!("-MF", PathBuf, CanBeSeparated, DepArgumentPath), take_arg!("-MQ", OsString, CanBeSeparated, DepTarget), take_arg!("-MT", OsString, CanBeSeparated, DepTarget), flag!("-Wno-unknown-cuda-version", PassThroughFlag), flag!("-Wno-unused-parameter", PassThroughFlag), take_arg!("-Xclang", OsString, Separated, XClang), take_arg!("-add-plugin", OsString, Separated, PassThrough), take_arg!("-debug-info-kind", OsString, Concatenated('='), PassThrough), take_arg!("-dependency-file", PathBuf, Separated, DepArgumentPath), flag!("-emit-pch", PassThroughFlag), flag!("-fcolor-diagnostics", DiagnosticsColorFlag), flag!("-fcuda-allow-variadic-functions", PassThroughFlag), flag!("-fcxx-modules", TooHardFlag), take_arg!("-fdebug-compilation-dir", OsString, Separated, PassThrough), take_arg!("-fembed-offload-object", PathBuf, Concatenated('='), ExtraHashFile), flag!("-fmodules", TooHardFlag), flag!("-fno-color-diagnostics", NoDiagnosticsColorFlag), flag!("-fno-pch-timestamp", PassThroughFlag), flag!("-fno-profile-instr-generate", TooHardFlag), flag!("-fno-profile-instr-use", TooHardFlag), take_arg!("-fplugin", PathBuf, CanBeConcatenated('='), ExtraHashFile), flag!("-fprofile-instr-generate", ProfileGenerate), // Note: the PathBuf argument is optional take_arg!("-fprofile-instr-use", PathBuf, Concatenated('='), ClangProfileUse), // Note: this overrides the -fprofile-use option in gcc.rs. take_arg!("-fprofile-use", PathBuf, Concatenated('='), ClangProfileUse), take_arg!("-fsanitize-blacklist", PathBuf, Concatenated('='), ExtraHashFile), flag!("-fuse-ctor-homing", PassThroughFlag), take_arg!("-gcc-toolchain", OsString, Separated, PassThrough), flag!("-gcodeview", PassThroughFlag), take_arg!("-include-pch", PathBuf, CanBeSeparated, PreprocessorArgumentPath), take_arg!("-load", PathBuf, Separated, ExtraHashFile), take_arg!("-mllvm", OsString, Separated, PassThrough), flag!("-no-opaque-pointers", PreprocessorArgumentFlag), take_arg!("-plugin-arg", OsString, Concatenated('-'), PassThrough), take_arg!("-target", OsString, Separated, PassThrough), flag!("-verify", PreprocessorArgumentFlag), take_arg!("/winsysroot", PathBuf, CanBeSeparated, PassThroughPath), ]); // Maps the `-fprofile-use` argument to the actual path of the // .profdata file Clang will try to use. pub(crate) fn resolve_profile_use_path(arg: &Path, cwd: &Path) -> PathBuf { // Note that `arg` might be empty (if no argument was given to // -fprofile-use), in which case `path` will be `cwd` after // the next statement and "./default.profdata" at the end of the // block. This matches Clang's behavior for when no argument is // given. let mut path = cwd.join(arg); assert!(!arg.as_os_str().is_empty() || path == cwd); // Clang allows specifying a directory here, in which case it // will look for the file `default.profdata` in that directory. if path.is_dir() { path.push("default.profdata"); } path } #[cfg(test)] mod test { use super::*; use crate::compiler::gcc; use crate::compiler::*; use crate::mock_command::*; use crate::server; use crate::test::mock_storage::MockStorage; use crate::test::utils::*; use std::collections::HashMap; use std::future::Future; use std::path::PathBuf; fn parse_arguments_(arguments: Vec) -> CompilerArguments { let arguments = arguments.iter().map(OsString::from).collect::>(); Clang { clangplusplus: false, is_appleclang: false, version: None, } .parse_arguments(&arguments, &std::env::current_dir().unwrap(), &[]) } macro_rules! parses { ( $( $s:expr ),* ) => { match parse_arguments_(vec![ $( $s.to_string(), )* ]) { CompilerArguments::Ok(a) => a, o => panic!("Got unexpected parse result: {:?}", o), } } } #[test] fn test_is_minversion() { assert!(Clang { clangplusplus: false, is_appleclang: false, version: Some("\"Ubuntu Clang 14.0.0\"".to_string()), } .is_minversion(14)); assert!(!Clang { clangplusplus: false, is_appleclang: false, version: Some("\"Ubuntu Clang 13.0.0\"".to_string()), } .is_minversion(14)); assert!(Clang { clangplusplus: false, is_appleclang: false, version: Some("\"FreeBSD Clang 14.0.5 (https://github.com/llvm/llvm-project.git llvmorg-14.0.5-0-gc12386ae247c)\"".to_string()), }.is_minversion(14)); assert!(!Clang { clangplusplus: false, is_appleclang: false, version: Some("\"FreeBSD Clang 13.0.0 (git@github.com:llvm/llvm-project.git llvmorg-13.0.0-0-gd7b669b3a303)\"".to_string()), }.is_minversion(14)); assert!(!Clang { clangplusplus: false, is_appleclang: true, version: Some("\"FreeBSD Clang 14.0.5 (https://github.com/llvm/llvm-project.git llvmorg-14.0.5-0-gc12386ae247c)\"".to_string()), }.is_minversion(14)); // is_appleclang wins } #[test] fn test_parse_arguments_simple() { let a = parses!("-c", "foo.c", "-o", "foo.o"); assert_eq!(Some("foo.c"), a.input.to_str()); assert_eq!(Language::C, a.language); assert_map_contains!( a.outputs, ( "obj", ArtifactDescriptor { path: PathBuf::from("foo.o"), optional: false } ) ); assert!(a.preprocessor_args.is_empty()); assert!(a.common_args.is_empty()); } #[test] fn test_parse_arguments_values() { let a = parses!( "-c", "foo.cxx", "-arch", "xyz", "-fabc", "-I", "include", "-o", "foo.o", "-include", "file", "/winsysroot../some/dir" ); assert_eq!(Some("foo.cxx"), a.input.to_str()); assert_eq!(Language::Cxx, a.language); assert_map_contains!( a.outputs, ( "obj", ArtifactDescriptor { path: PathBuf::from("foo.o"), optional: false } ) ); assert_eq!(ovec!["-Iinclude", "-include", "file"], a.preprocessor_args); assert_eq!(ovec!["-fabc", "/winsysroot", "../some/dir"], a.common_args); assert_eq!(ovec!["-arch", "xyz"], a.arch_args); } #[test] fn test_parse_arguments_cuda() { let a = parses!("-c", "foo.cu", "-o", "foo.o"); assert_eq!(Some("foo.cu"), a.input.to_str()); assert_eq!(Language::Cuda, a.language); assert_map_contains!( a.outputs, ( "obj", ArtifactDescriptor { path: PathBuf::from("foo.o"), optional: false } ) ); assert!(a.preprocessor_args.is_empty()); assert!(a.common_args.is_empty()); } #[test] fn test_parse_arguments_cuda_flags() { let a = parses!( "-c", "foo.cpp", "-x", "cuda", "--cuda-gpu-arch=sm_50", "-o", "foo.o" ); assert_eq!(Some("foo.cpp"), a.input.to_str()); assert_eq!(Language::Cuda, a.language); assert_map_contains!( a.outputs, ( "obj", ArtifactDescriptor { path: PathBuf::from("foo.o"), optional: false } ) ); assert!(a.preprocessor_args.is_empty()); assert_eq!(ovec!["--cuda-gpu-arch=sm_50"], a.common_args); let b = parses!( "-c", "foo.cpp", "-x", "cu", "--cuda-gpu-arch=sm_50", "--no-cuda-include-ptx=sm_50", "-o", "foo.o" ); assert_eq!(Some("foo.cpp"), b.input.to_str()); assert_eq!(Language::Cuda, b.language); assert_map_contains!( b.outputs, ( "obj", ArtifactDescriptor { path: PathBuf::from("foo.o"), optional: false } ) ); assert!(b.preprocessor_args.is_empty()); assert_eq!( ovec!["--cuda-gpu-arch=sm_50", "--no-cuda-include-ptx=sm_50"], b.common_args ); } #[test] fn test_parse_arguments_hip() { let a = parses!("-c", "foo.hip", "-o", "foo.o"); assert_eq!(Some("foo.hip"), a.input.to_str()); assert_eq!(Language::Hip, a.language); assert_map_contains!( a.outputs, ( "obj", ArtifactDescriptor { path: PathBuf::from("foo.o"), optional: false } ) ); assert!(a.preprocessor_args.is_empty()); assert!(a.common_args.is_empty()); } #[test] fn test_parse_arguments_hip_flags() { let a = parses!( "-c", "foo.cpp", "-x", "hip", "--offload-arch=gfx900", "-o", "foo.o" ); assert_eq!(Some("foo.cpp"), a.input.to_str()); assert_eq!(Language::Hip, a.language); assert_map_contains!( a.outputs, ( "obj", ArtifactDescriptor { path: PathBuf::from("foo.o"), optional: false } ) ); assert!(a.preprocessor_args.is_empty()); assert_eq!(ovec!["--offload-arch=gfx900"], a.common_args); let b = parses!( "-c", "foo.cpp", "-x", "hip", "--offload-arch=gfx900", "-o", "foo.o" ); assert_eq!(Some("foo.cpp"), b.input.to_str()); assert_eq!(Language::Hip, b.language); assert_map_contains!( b.outputs, ( "obj", ArtifactDescriptor { path: PathBuf::from("foo.o"), optional: false } ) ); assert!(b.preprocessor_args.is_empty()); assert_eq!(ovec!["--offload-arch=gfx900"], b.common_args); } #[test] fn test_parse_arguments_hip_paths() { let a = parses!( "-c", "foo.cpp", "-x", "hip", "--offload-arch=gfx900", "-o", "foo.o", "--hip-path=/usr" ); assert_eq!(Some("foo.cpp"), a.input.to_str()); assert_eq!(Language::Hip, a.language); assert_map_contains!( a.outputs, ( "obj", ArtifactDescriptor { path: PathBuf::from("foo.o"), optional: false } ) ); assert!(a.preprocessor_args.is_empty()); assert_eq!( ovec!["--offload-arch=gfx900", "--hip-path=/usr"], a.common_args ); let b = parses!( "-c", "foo.cpp", "-x", "hip", "--offload-arch=gfx900", "-o", "foo.o", "--hip-device-lib-path=/usr/lib64/amdgcn/bitcode" ); assert_eq!(Some("foo.cpp"), b.input.to_str()); assert_eq!(Language::Hip, b.language); assert_map_contains!( b.outputs, ( "obj", ArtifactDescriptor { path: PathBuf::from("foo.o"), optional: false } ) ); assert!(b.preprocessor_args.is_empty()); assert_eq!( ovec![ "--offload-arch=gfx900", "--hip-device-lib-path=/usr/lib64/amdgcn/bitcode" ], b.common_args ); } #[test] fn test_dependent_lib() { let a = parses!( "-c", "foo.c", "-o", "foo.o", "-Xclang", "--dependent-lib=msvcrt" ); assert_eq!(Some("foo.c"), a.input.to_str()); assert_eq!(Language::C, a.language); assert_map_contains!( a.outputs, ( "obj", ArtifactDescriptor { path: PathBuf::from("foo.o"), optional: false } ) ); assert_eq!(ovec!["-Xclang", "--dependent-lib=msvcrt"], a.common_args); } #[test] fn test_parse_arguments_others() { parses!("-c", "foo.c", "-B", "somewhere", "-o", "foo.o"); parses!( "-c", "foo.c", "-target", "x86_64-apple-darwin11", "-o", "foo.o" ); parses!("-c", "foo.c", "-gcc-toolchain", "somewhere", "-o", "foo.o"); } #[test] fn test_gcodeview() { parses!("-c", "foo.c", "-o", "foo.o", "-Xclang", "-gcodeview"); } #[test] fn test_emit_pch() { let a = parses!( "-Xclang", "-emit-pch", "-Xclang", "-include", "-Xclang", "pch.hxx", "-x", "c++-header", "-o", "pch.hxx.pch", "-c", "pch.hxx.cxx" ); assert_eq!(Some("pch.hxx.cxx"), a.input.to_str()); assert_eq!(Language::CxxHeader, a.language); assert_map_contains!( a.outputs, ( "obj", ArtifactDescriptor { path: PathBuf::from("pch.hxx.pch"), optional: false } ) ); println!("{:?}", a); assert_eq!( ovec!["-Xclang", "-include", "-Xclang", "pch.hxx"], a.preprocessor_args ); assert_eq!(ovec!["-Xclang", "-emit-pch"], a.common_args) } #[test] fn test_parse_clang_short_dependency_arguments_can_be_separated() { let args = vec!["-MF", "-MT", "-MQ"]; let formats = vec![ "foo.c.d", "\"foo.c.d\"", "=foo.c.d", "./foo.c.d", "/somewhere/foo.c.d", ]; for arg in args { for format in &formats { let parsed_separated = parses!("-c", "foo.c", "-MD", arg, format); let parsed = parses!("-c", "foo.c", "-MD", format!("{arg}{format}")); assert_eq!(parsed.dependency_args, parsed_separated.dependency_args); } } } #[test] fn test_parse_arguments_clangmodules() { assert_eq!( CompilerArguments::CannotCache("-fcxx-modules", None), parse_arguments_(stringvec!["-c", "foo.c", "-fcxx-modules", "-o", "foo.o"]) ); assert_eq!( CompilerArguments::CannotCache("-fmodules", None), parse_arguments_(stringvec!["-c", "foo.c", "-fmodules", "-o", "foo.o"]) ); } #[test] fn test_parse_xclang_invalid() { assert_eq!( CompilerArguments::CannotCache( "Can't handle Raw arguments with -Xclang", Some("broken".to_string()) ), parse_arguments_(stringvec![ "-c", "foo.c", "-o", "foo.o", "-Xclang", "broken" ]) ); assert_eq!( CompilerArguments::CannotCache( "Can't handle UnknownFlag arguments with -Xclang", Some("-broken".to_string()) ), parse_arguments_(stringvec![ "-c", "foo.c", "-o", "foo.o", "-Xclang", "-broken" ]) ); assert_eq!( CompilerArguments::CannotCache( "argument parse", Some("Unexpected end of args".to_string()) ), parse_arguments_(stringvec!["-c", "foo.c", "-o", "foo.o", "-Xclang", "-load"]) ); } #[test] fn test_parse_xclang_load() { let a = parses!( "-c", "foo.c", "-o", "foo.o", "-Xclang", "-load", "-Xclang", "plugin.so" ); println!("A {:#?}", a); assert_eq!( ovec!["-Xclang", "-load", "-Xclang", "plugin.so"], a.common_args ); assert_eq!( ovec![std::env::current_dir().unwrap().join("plugin.so")], a.extra_hash_files ); } #[test] fn test_parse_xclang_add_plugin() { let a = parses!( "-c", "foo.c", "-o", "foo.o", "-Xclang", "-add-plugin", "-Xclang", "foo" ); assert_eq!( ovec!["-Xclang", "-add-plugin", "-Xclang", "foo"], a.common_args ); } #[test] fn test_parse_xclang_llvm_stuff() { let a = parses!( "-c", "foo.c", "-o", "foo.o", "-Xclang", "-mllvm", "-Xclang", "-instcombine-lower-dbg-declare=0", "-Xclang", "-debug-info-kind=constructor" ); assert_eq!( ovec![ "-Xclang", "-mllvm", "-Xclang", "-instcombine-lower-dbg-declare=0", "-Xclang", "-debug-info-kind=constructor" ], a.common_args ); } #[test] fn test_parse_xclang_plugin_arg_blink_gc_plugin() { let a = parses!( "-c", "foo.c", "-o", "foo.o", "-Xclang", "-add-plugin", "-Xclang", "blink-gc-plugin", "-Xclang", "-plugin-arg-blink-gc-plugin", "-Xclang", "no-members-in-stack-allocated" ); assert_eq!( ovec![ "-Xclang", "-add-plugin", "-Xclang", "blink-gc-plugin", "-Xclang", "-plugin-arg-blink-gc-plugin", "-Xclang", "no-members-in-stack-allocated" ], a.common_args ); } #[test] fn test_parse_xclang_plugin_arg_find_bad_constructs() { let a = parses!( "-c", "foo.c", "-o", "foo.o", "-Xclang", "-add-plugin", "-Xclang", "find-bad-constructs", "-Xclang", "-plugin-arg-find-bad-constructs", "-Xclang", "check-ipc" ); assert_eq!( ovec![ "-Xclang", "-add-plugin", "-Xclang", "find-bad-constructs", "-Xclang", "-plugin-arg-find-bad-constructs", "-Xclang", "check-ipc" ], a.common_args ); } #[test] fn test_parse_xclang_verify() { let a = parses!("-c", "foo.c", "-o", "foo.o", "-Xclang", "-verify"); assert_eq!(ovec!["-Xclang", "-verify"], a.preprocessor_args); } #[test] fn test_parse_xclang_no_opaque_pointers() { let a = parses!( "-c", "foo.c", "-o", "foo.o", "-Xclang", "-no-opaque-pointers" ); assert_eq!(ovec!["-Xclang", "-no-opaque-pointers"], a.preprocessor_args); } #[test] fn test_parse_xclang_fno_pch_timestamp() { let a = parses!( "-c", "foo.c", "-o", "foo.o", "-Xclang", "-fno-pch-timestamp" ); assert_eq!(ovec!["-Xclang", "-fno-pch-timestamp"], a.common_args); } #[test] fn test_parse_xclang_use_ctor_homing() { let a = parses!("-c", "foo.c", "-o", "foo.o", "-Xclang", "-fuse-ctor-homing"); assert_eq!(ovec!["-Xclang", "-fuse-ctor-homing"], a.common_args); } #[test] fn test_parse_fplugin() { let a = parses!("-c", "foo.c", "-o", "foo.o", "-fplugin", "plugin.so"); println!("A {:#?}", a); assert_eq!(ovec!["-fplugin", "plugin.so"], a.common_args); assert_eq!( ovec![std::env::current_dir().unwrap().join("plugin.so")], a.extra_hash_files ); } #[test] fn test_parse_fsanitize_blacklist() { let a = parses!( "-c", "foo.c", "-o", "foo.o", "-fsanitize-blacklist=list.txt" ); assert_eq!(ovec!["-fsanitize-blacklist=list.txt"], a.common_args); assert_eq!( ovec![std::env::current_dir().unwrap().join("list.txt")], a.extra_hash_files ); } #[test] fn test_parse_color_diags() { let a = parses!("-c", "foo.c", "-o", "foo.o", "-fcolor-diagnostics"); assert_eq!(a.color_mode, ColorMode::On); let a = parses!("-c", "foo.c", "-o", "foo.o", "-fno-color-diagnostics"); assert_eq!(a.color_mode, ColorMode::Off); let a = parses!("-c", "foo.c", "-o", "foo.o"); assert_eq!(a.color_mode, ColorMode::Auto); } #[test] fn test_parse_arguments_profile_instr_use() { let a = parses!( "-c", "foo.c", "-o", "foo.o", "-fprofile-instr-use=foo.profdata" ); assert_eq!(ovec!["-fprofile-instr-use=foo.profdata"], a.common_args); assert_eq!( ovec![std::env::current_dir().unwrap().join("foo.profdata")], a.extra_hash_files ); } #[test] fn test_parse_arguments_profile_use() { let a = parses!("-c", "foo.c", "-o", "foo.o", "-fprofile-use=xyz.profdata"); assert_eq!(ovec!["-fprofile-use=xyz.profdata"], a.common_args); assert_eq!( ovec![std::env::current_dir().unwrap().join("xyz.profdata")], a.extra_hash_files ); } #[test] fn test_parse_arguments_profile_use_with_directory() { let a = parses!("-c", "foo.c", "-o", "foo.o", "-fprofile-use=."); assert_eq!(ovec!["-fprofile-use=."], a.common_args); assert_eq!( ovec![std::env::current_dir().unwrap().join("default.profdata")], a.extra_hash_files ); } #[test] fn test_parse_arguments_profile_use_with_no_argument() { let a = parses!("-c", "foo.c", "-o", "foo.o", "-fprofile-use"); assert_eq!(ovec!["-fprofile-use"], a.common_args); assert_eq!( ovec![std::env::current_dir().unwrap().join("default.profdata")], a.extra_hash_files ); } #[test] fn test_parse_arguments_pgo_cancellation() { assert_eq!( CompilerArguments::CannotCache("-fno-profile-use", None), parse_arguments_(stringvec![ "-c", "foo.c", "-o", "foo.o", "-fprofile-use", "-fno-profile-use" ]) ); assert_eq!( CompilerArguments::CannotCache("-fno-profile-instr-use", None), parse_arguments_(stringvec![ "-c", "foo.c", "-o", "foo.o", "-fprofile-instr-use", "-fno-profile-instr-use" ]) ); assert_eq!( CompilerArguments::CannotCache("-fno-profile-generate", None), parse_arguments_(stringvec![ "-c", "foo.c", "-o", "foo.o", "-fprofile-generate", "-fno-profile-generate" ]) ); assert_eq!( CompilerArguments::CannotCache("-fno-profile-instr-generate", None), parse_arguments_(stringvec![ "-c", "foo.c", "-o", "foo.o", "-fprofile-instr-generate", "-fno-profile-instr-generate" ]) ); } #[test] fn test_compile_clang_cuda_does_not_dist_compile() { let creator = new_creator(); let f = TestFixture::new(); let parsed_args = ParsedArguments { input: "foo.cu".into(), double_dash_input: false, language: Language::Cuda, compilation_flag: "-c".into(), depfile: None, outputs: vec![( "obj", ArtifactDescriptor { path: "foo.cu.o".into(), optional: false, }, )] .into_iter() .collect(), dependency_args: vec![], preprocessor_args: vec![], common_args: vec![], arch_args: vec![], unhashed_args: vec![], extra_dist_files: vec![], extra_hash_files: vec![], msvc_show_includes: false, profile_generate: false, color_mode: ColorMode::Auto, suppress_rewrite_includes_only: false, too_hard_for_preprocessor_cache_mode: None, }; let runtime = single_threaded_runtime(); let storage = MockStorage::new(None, false); let storage: std::sync::Arc = std::sync::Arc::new(storage); let service = server::SccacheService::mock_with_storage(storage, runtime.handle().clone()); let compiler = &f.bins[0]; // Compiler invocation. next_command(&creator, Ok(MockChild::new(exit_status(0), "", ""))); let mut path_transformer = dist::PathTransformer::new(); let (command, dist_command, cacheable) = gcc::generate_compile_commands( &mut path_transformer, compiler, &parsed_args, f.tempdir.path(), &[], CCompilerKind::Clang, false, language_to_clang_arg, ) .unwrap(); // ClangCUDA cannot be dist-compiled assert!(dist_command.is_none()); let _ = command.execute(&service, &creator).wait(); assert_eq!(Cacheable::Yes, cacheable); // Ensure that we ran all processes. assert_eq!(0, creator.lock().unwrap().children.len()); } } mozilla-sccache-40c3d6b/src/compiler/compiler.rs000066400000000000000000003462071475712407500217700ustar00rootroot00000000000000// Copyright 2016 Mozilla Foundation // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use crate::cache::{Cache, CacheWrite, DecompressionFailure, FileObjectSource, Storage}; use crate::compiler::args::*; use crate::compiler::c::{CCompiler, CCompilerKind}; use crate::compiler::cicc::Cicc; use crate::compiler::clang::Clang; use crate::compiler::cudafe::CudaFE; use crate::compiler::diab::Diab; use crate::compiler::gcc::Gcc; use crate::compiler::msvc; use crate::compiler::msvc::Msvc; use crate::compiler::nvcc::Nvcc; use crate::compiler::nvcc::NvccHostCompiler; use crate::compiler::nvhpc::Nvhpc; use crate::compiler::ptxas::Ptxas; use crate::compiler::rust::{Rust, RustupProxy}; use crate::compiler::tasking_vx::TaskingVX; #[cfg(feature = "dist-client")] use crate::dist::pkg; #[cfg(feature = "dist-client")] use crate::lru_disk_cache; use crate::mock_command::{exit_status, CommandChild, CommandCreatorSync, RunCommand}; use crate::server; use crate::util::{fmt_duration_as_secs, run_input_output}; use crate::{counted_array, dist}; use async_trait::async_trait; use filetime::FileTime; use fs::File; use fs_err as fs; use serde::{Deserialize, Serialize}; use std::borrow::Cow; use std::ffi::{OsStr, OsString}; use std::fmt; use std::future::Future; use std::io::prelude::*; use std::path::{Path, PathBuf}; use std::pin::Pin; use std::process::{self, Stdio}; use std::str; use std::sync::Arc; use std::time::{Duration, Instant}; use tempfile::TempDir; use crate::errors::*; /// Can dylibs (shared libraries or proc macros) be distributed on this platform? #[cfg(all( feature = "dist-client", any( all(target_os = "linux", target_arch = "x86_64"), target_os = "freebsd" ) ))] pub const CAN_DIST_DYLIBS: bool = true; #[cfg(all( feature = "dist-client", not(any( all(target_os = "linux", target_arch = "x86_64"), target_os = "freebsd" )) ))] pub const CAN_DIST_DYLIBS: bool = false; #[async_trait] pub trait CompileCommand: Send + Sync + 'static where T: CommandCreatorSync, { async fn execute( &self, service: &server::SccacheService, creator: &T, ) -> Result; fn get_executable(&self) -> PathBuf; fn get_arguments(&self) -> Vec; fn get_env_vars(&self) -> Vec<(OsString, OsString)>; fn get_cwd(&self) -> PathBuf; } #[derive(Debug)] pub struct CCompileCommand where I: CompileCommandImpl, { cmd: I, } impl CCompileCommand where I: CompileCommandImpl, { #[allow(clippy::new_ret_no_self)] pub fn new(cmd: I) -> Box> where T: CommandCreatorSync, { Box::new(CCompileCommand { cmd }) as Box> } } #[async_trait] impl CompileCommand for CCompileCommand where T: CommandCreatorSync, I: CompileCommandImpl, { fn get_executable(&self) -> PathBuf { self.cmd.get_executable() } fn get_arguments(&self) -> Vec { self.cmd.get_arguments() } fn get_env_vars(&self) -> Vec<(OsString, OsString)> { self.cmd.get_env_vars() } fn get_cwd(&self) -> PathBuf { self.cmd.get_cwd() } async fn execute( &self, service: &server::SccacheService, creator: &T, ) -> Result { self.cmd.execute(service, creator).await } } #[async_trait] pub trait CompileCommandImpl: Send + Sync + 'static { fn get_executable(&self) -> PathBuf; fn get_arguments(&self) -> Vec; fn get_env_vars(&self) -> Vec<(OsString, OsString)>; fn get_cwd(&self) -> PathBuf; async fn execute( &self, service: &server::SccacheService, creator: &T, ) -> Result where T: CommandCreatorSync; } #[derive(Debug)] pub struct SingleCompileCommand { pub executable: PathBuf, pub arguments: Vec, pub env_vars: Vec<(OsString, OsString)>, pub cwd: PathBuf, } #[async_trait] impl CompileCommandImpl for SingleCompileCommand { fn get_executable(&self) -> PathBuf { self.executable.clone() } fn get_arguments(&self) -> Vec { self.arguments.clone() } fn get_env_vars(&self) -> Vec<(OsString, OsString)> { self.env_vars.clone() } fn get_cwd(&self) -> PathBuf { self.cwd.clone() } async fn execute( &self, _: &server::SccacheService, creator: &T, ) -> Result where T: CommandCreatorSync, { let SingleCompileCommand { executable, arguments, env_vars, cwd, } = self; let mut cmd = creator.clone().new_command_sync(executable); cmd.args(arguments) .env_clear() .envs(env_vars.to_vec()) .current_dir(cwd); run_input_output(cmd, None).await } } /// Supported compilers. #[derive(Debug, PartialEq, Eq, Clone)] pub enum CompilerKind { /// A C compiler. C(CCompilerKind), /// A Rust compiler. Rust, } #[derive(Debug, PartialEq, Eq, Clone, Copy)] pub enum Language { C, Cxx, GenericHeader, CHeader, CxxHeader, ObjectiveC, ObjectiveCxx, ObjectiveCxxHeader, Cuda, CudaFE, Ptx, Cubin, Rust, Hip, } impl Language { pub fn from_file_name(file: &Path) -> Option { match file.extension().and_then(|e| e.to_str()) { // gcc: https://gcc.gnu.org/onlinedocs/gcc/Overall-Options.html Some("c") => Some(Language::C), // Could be C or C++ Some("h") => Some(Language::GenericHeader), // TODO i Some("C") | Some("cc") | Some("cp") | Some("cpp") | Some("CPP") | Some("cxx") | Some("c++") => Some(Language::Cxx), // TODO ii Some("H") | Some("hh") | Some("hp") | Some("hpp") | Some("HPP") | Some("hxx") | Some("h++") | Some("tcc") => Some(Language::CxxHeader), Some("m") => Some(Language::ObjectiveC), // TODO mi Some("M") | Some("mm") => Some(Language::ObjectiveCxx), // TODO mii Some("cu") => Some(Language::Cuda), Some("ptx") => Some(Language::Ptx), Some("cubin") => Some(Language::Cubin), // TODO cy Some("rs") => Some(Language::Rust), Some("hip") => Some(Language::Hip), e => { trace!("Unknown source extension: {}", e.unwrap_or("(None)")); None } } } pub fn as_str(self) -> &'static str { match self { Language::C => "c", Language::CHeader => "cHeader", Language::Cxx => "c++", Language::CxxHeader => "c++Header", Language::GenericHeader => "c/c++", Language::ObjectiveC => "objc", Language::ObjectiveCxx | Language::ObjectiveCxxHeader => "objc++", Language::Cuda => "cuda", Language::CudaFE => "cuda", Language::Ptx => "ptx", Language::Cubin => "cubin", Language::Rust => "rust", Language::Hip => "hip", } } } impl CompilerKind { pub fn lang_kind(&self, lang: &Language) -> String { match lang { Language::C | Language::CHeader | Language::Cxx | Language::CxxHeader | Language::GenericHeader | Language::ObjectiveC | Language::ObjectiveCxx | Language::ObjectiveCxxHeader => "C/C++", Language::Cuda => "CUDA", Language::CudaFE => "CUDA (Device code)", Language::Ptx => "PTX", Language::Cubin => "CUBIN", Language::Rust => "Rust", Language::Hip => "HIP", } .to_string() } pub fn lang_comp_kind(&self, lang: &Language) -> String { let textual_lang = lang.as_str().to_owned(); match self { CompilerKind::C(CCompilerKind::Clang) => textual_lang + " [clang]", CompilerKind::C(CCompilerKind::Diab) => textual_lang + " [diab]", CompilerKind::C(CCompilerKind::Gcc) => textual_lang + " [gcc]", CompilerKind::C(CCompilerKind::Msvc) => textual_lang + " [msvc]", CompilerKind::C(CCompilerKind::Nvcc) => textual_lang + " [nvcc]", CompilerKind::C(CCompilerKind::CudaFE) => textual_lang + " [cudafe++]", CompilerKind::C(CCompilerKind::Cicc) => textual_lang + " [cicc]", CompilerKind::C(CCompilerKind::Ptxas) => textual_lang + " [ptxas]", CompilerKind::C(CCompilerKind::Nvhpc) => textual_lang + " [nvhpc]", CompilerKind::C(CCompilerKind::TaskingVX) => textual_lang + " [taskingvx]", CompilerKind::Rust => textual_lang, } } } #[cfg(feature = "dist-client")] pub type DistPackagers = ( Box, Box, Box, ); enum CacheLookupResult { Success(CompileResult, process::Output), Miss(MissType), } /// An interface to a compiler for argument parsing. pub trait Compiler: Send + Sync + 'static where T: CommandCreatorSync, { /// Return the kind of compiler. fn kind(&self) -> CompilerKind; /// Retrieve a packager #[cfg(feature = "dist-client")] fn get_toolchain_packager(&self) -> Box; /// Determine whether `arguments` are supported by this compiler. fn parse_arguments( &self, arguments: &[OsString], cwd: &Path, env_vars: &[(OsString, OsString)], ) -> CompilerArguments + 'static>>; fn box_clone(&self) -> Box>; } impl Clone for Box> { fn clone(&self) -> Box> { self.box_clone() } } pub trait CompilerProxy: Send + Sync + 'static where T: CommandCreatorSync + Sized, { /// Maps the executable to be used in `cwd` to the true, proxied compiler. /// /// Returns the absolute path to the true compiler and the timestamp of /// timestamp of the true compiler. Iff the resolution fails, /// the returned future resolves to an error with more information. fn resolve_proxied_executable( &self, creator: T, cwd: PathBuf, env_vars: &[(OsString, OsString)], ) -> Pin> + Send + 'static>>; /// Create a clone of `Self` and puts it in a `Box` fn box_clone(&self) -> Box>; } impl Clone for Box> { fn clone(&self) -> Box> { self.box_clone() } } /// An interface to a compiler for hash key generation, the result of /// argument parsing. #[async_trait] pub trait CompilerHasher: fmt::Debug + Send + 'static where T: CommandCreatorSync, { /// Given information about a compiler command, generate a hash key /// that can be used for cache lookups, as well as any additional /// information that can be reused for compilation if necessary. #[allow(clippy::too_many_arguments)] async fn generate_hash_key( self: Box, creator: &T, cwd: PathBuf, env_vars: Vec<(OsString, OsString)>, may_dist: bool, pool: &tokio::runtime::Handle, rewrite_includes_only: bool, storage: Arc, cache_control: CacheControl, ) -> Result>; /// Return the state of any `--color` option passed to the compiler. fn color_mode(&self) -> ColorMode; /// Look up a cached compile result in `storage`. If not found, run the /// compile and store the result. #[allow(clippy::too_many_arguments)] async fn get_cached_or_compile( self: Box, service: &server::SccacheService, dist_client: Option>, creator: T, storage: Arc, arguments: Vec, cwd: PathBuf, env_vars: Vec<(OsString, OsString)>, cache_control: CacheControl, pool: tokio::runtime::Handle, ) -> Result<(CompileResult, process::Output)> { let out_pretty = self.output_pretty().into_owned(); debug!("[{}]: get_cached_or_compile: {:?}", out_pretty, arguments); let start = Instant::now(); let may_dist = dist_client.is_some(); let rewrite_includes_only = match dist_client { Some(ref client) => client.rewrite_includes_only(), _ => false, }; let result = self .generate_hash_key( &creator, cwd.clone(), env_vars, may_dist, &pool, rewrite_includes_only, storage.clone(), cache_control, ) .await; debug!( "[{}]: generate_hash_key took {}", out_pretty, fmt_duration_as_secs(&start.elapsed()) ); let (key, compilation, weak_toolchain_key) = match result { Err(e) => { return match e.downcast::() { Ok(ProcessError(output)) => Ok((CompileResult::Error, output)), Err(e) => Err(e), }; } Ok(HashResult { key, compilation, weak_toolchain_key, }) => (key, compilation, weak_toolchain_key), }; debug!("[{}]: Hash key: {}", out_pretty, key); // If `ForceRecache` is enabled, we won't check the cache. let start = Instant::now(); let cache_status = async { if cache_control == CacheControl::ForceNoCache { Ok(Cache::None) } else if cache_control == CacheControl::ForceRecache { Ok(Cache::Recache) } else { storage.get(&key).await } }; // Set a maximum time limit for the cache to respond before we forge // ahead ourselves with a compilation. let timeout = Duration::new(60, 0); let cache_status = async { let res = tokio::time::timeout(timeout, cache_status).await; let duration = start.elapsed(); (res, duration) }; // Check the result of the cache lookup. let outputs = compilation .outputs() .map(|output| FileObjectSource { path: cwd.join(output.path), ..output }) .collect::>(); let lookup = match cache_status.await { (Ok(Ok(Cache::Hit(mut entry))), duration) => { debug!( "[{}]: Cache hit in {}", out_pretty, fmt_duration_as_secs(&duration) ); let stdout = entry.get_stdout(); let stderr = entry.get_stderr(); let output = process::Output { status: exit_status(0), stdout, stderr, }; let hit = CompileResult::CacheHit(duration); match entry.extract_objects(outputs.clone(), &pool).await { Ok(()) => Ok(CacheLookupResult::Success(hit, output)), Err(e) => { if e.downcast_ref::().is_some() { debug!("[{}]: Failed to decompress object", out_pretty); Ok(CacheLookupResult::Miss(MissType::CacheReadError)) } else { Err(e) } } } } (Ok(Ok(Cache::Miss)), duration) => { debug!( "[{}]: Cache miss in {}", out_pretty, fmt_duration_as_secs(&duration) ); Ok(CacheLookupResult::Miss(MissType::Normal)) } (Ok(Ok(Cache::None)), duration) => { debug!( "[{}]: Cache none in {}", out_pretty, fmt_duration_as_secs(&duration) ); Ok(CacheLookupResult::Miss(MissType::ForcedNoCache)) } (Ok(Ok(Cache::Recache)), duration) => { debug!( "[{}]: Cache recache in {}", out_pretty, fmt_duration_as_secs(&duration) ); Ok(CacheLookupResult::Miss(MissType::ForcedRecache)) } (Ok(Err(err)), duration) => { error!( "[{}]: Cache read error: {:?} in {}", out_pretty, err, fmt_duration_as_secs(&duration) ); Ok(CacheLookupResult::Miss(MissType::CacheReadError)) } (Err(_), duration) => { debug!( "[{}]: Cache timed out {}", out_pretty, fmt_duration_as_secs(&duration) ); Ok(CacheLookupResult::Miss(MissType::TimedOut)) } }?; match lookup { CacheLookupResult::Success(compile_result, output) => { Ok::<_, Error>((compile_result, output)) } CacheLookupResult::Miss(miss_type) => { // Cache miss, so compile it. let start = Instant::now(); let (cacheable, dist_type, compiler_result) = dist_or_local_compile( service, dist_client, creator, cwd, compilation, weak_toolchain_key, out_pretty.clone(), ) .await?; let duration_compilation = start.elapsed(); if !compiler_result.status.success() { debug!( "[{}]: Compiled in {}, but failed, not storing in cache", out_pretty, fmt_duration_as_secs(&duration_compilation) ); return Ok(( CompileResult::CompileFailed(dist_type, duration_compilation), compiler_result, )); } if miss_type == MissType::ForcedNoCache { // Do not cache debug!( "[{}]: Compiled in {}, but not caching", out_pretty, fmt_duration_as_secs(&duration_compilation) ); return Ok(( CompileResult::NotCached(dist_type, duration_compilation), compiler_result, )); } if cacheable != Cacheable::Yes { // Not cacheable debug!( "[{}]: Compiled in {}, but not cacheable", out_pretty, fmt_duration_as_secs(&duration_compilation) ); return Ok(( CompileResult::NotCacheable(dist_type, duration_compilation), compiler_result, )); } debug!( "[{}]: Compiled in {}, storing in cache", out_pretty, fmt_duration_as_secs(&duration_compilation) ); let start_create_artifact = Instant::now(); let mut entry = CacheWrite::from_objects(outputs, &pool) .await .context("failed to zip up compiler outputs")?; entry.put_stdout(&compiler_result.stdout)?; entry.put_stderr(&compiler_result.stderr)?; debug!( "[{}]: Created cache artifact in {}", out_pretty, fmt_duration_as_secs(&start_create_artifact.elapsed()) ); let out_pretty2 = out_pretty.clone(); // Try to finish storing the newly-written cache // entry. We'll get the result back elsewhere. let future = async move { let start = Instant::now(); match storage.put(&key, entry).await { Ok(_) => { debug!("[{}]: Stored in cache successfully!", out_pretty2); Ok(CacheWriteInfo { object_file_pretty: out_pretty2, duration: start.elapsed(), }) } Err(e) => Err(e), } }; let future = Box::pin(future); Ok(( CompileResult::CacheMiss(miss_type, dist_type, duration_compilation, future), compiler_result, )) } } .with_context(|| format!("failed to store `{}` to cache", out_pretty)) } /// A descriptive string about the file that we're going to be producing. /// /// This is primarily intended for debug logging and such, not for actual /// artifact generation. fn output_pretty(&self) -> Cow<'_, str>; fn box_clone(&self) -> Box>; fn language(&self) -> Language; } #[cfg(not(feature = "dist-client"))] async fn dist_or_local_compile( service: &server::SccacheService, _dist_client: Option>, creator: T, _cwd: PathBuf, compilation: Box>, _weak_toolchain_key: String, out_pretty: String, ) -> Result<(Cacheable, DistType, process::Output)> where T: CommandCreatorSync, { let mut path_transformer = dist::PathTransformer::new(); let (compile_cmd, _dist_compile_cmd, cacheable) = compilation .generate_compile_commands(&mut path_transformer, true) .context("Failed to generate compile commands")?; debug!("[{}]: Compiling locally", out_pretty); compile_cmd .execute(&service, &creator) .await .map(move |o| (cacheable, DistType::NoDist, o)) } #[cfg(feature = "dist-client")] async fn dist_or_local_compile( service: &server::SccacheService, dist_client: Option>, creator: T, cwd: PathBuf, compilation: Box>, weak_toolchain_key: String, out_pretty: String, ) -> Result<(Cacheable, DistType, process::Output)> where T: CommandCreatorSync, { use std::io; let rewrite_includes_only = match dist_client { Some(ref client) => client.rewrite_includes_only(), _ => false, }; let mut path_transformer = dist::PathTransformer::new(); let (compile_cmd, dist_compile_cmd, cacheable) = compilation .generate_compile_commands(&mut path_transformer, rewrite_includes_only) .context("Failed to generate compile commands")?; let dist_client = match dist_compile_cmd.clone().and(dist_client) { Some(dc) => dc, None => { debug!("[{}]: Compiling locally", out_pretty); return compile_cmd .execute(service, &creator) .await .map(move |o| (cacheable, DistType::NoDist, o)); } }; debug!("[{}]: Attempting distributed compilation", out_pretty); let out_pretty2 = out_pretty.clone(); let local_executable = compile_cmd.get_executable(); let local_executable2 = compile_cmd.get_executable(); let do_dist_compile = async move { let mut dist_compile_cmd = dist_compile_cmd.context("Could not create distributed compile command")?; debug!("[{}]: Creating distributed compile request", out_pretty); let dist_output_paths = compilation .outputs() .map(|output| path_transformer.as_dist_abs(&cwd.join(output.path))) .collect::>() .context("Failed to adapt an output path for distributed compile")?; let (inputs_packager, toolchain_packager, outputs_rewriter) = compilation.into_dist_packagers(path_transformer)?; debug!( "[{}]: Identifying dist toolchain for {:?}", out_pretty, local_executable ); let (dist_toolchain, maybe_dist_compile_executable) = dist_client .put_toolchain(local_executable, weak_toolchain_key, toolchain_packager) .await?; let mut tc_archive = None; if let Some((dist_compile_executable, archive_path)) = maybe_dist_compile_executable { dist_compile_cmd.executable = dist_compile_executable; tc_archive = Some(archive_path); } debug!("[{}]: Requesting allocation", out_pretty); let jares = dist_client.do_alloc_job(dist_toolchain.clone()).await?; let job_alloc = match jares { dist::AllocJobResult::Success { job_alloc, need_toolchain: true, } => { debug!( "[{}]: Sending toolchain {} for job {}", out_pretty, dist_toolchain.archive_id, job_alloc.job_id ); match dist_client .do_submit_toolchain(job_alloc.clone(), dist_toolchain) .await .map_err(|e| e.context("Could not submit toolchain"))? { dist::SubmitToolchainResult::Success => Ok(job_alloc), dist::SubmitToolchainResult::JobNotFound => { bail!("Job {} not found on server", job_alloc.job_id) } dist::SubmitToolchainResult::CannotCache => bail!( "Toolchain for job {} could not be cached by server", job_alloc.job_id ), } } dist::AllocJobResult::Success { job_alloc, need_toolchain: false, } => Ok(job_alloc), dist::AllocJobResult::Fail { msg } => { Err(anyhow!("Failed to allocate job").context(msg)) } }?; let job_id = job_alloc.job_id; let server_id = job_alloc.server_id; debug!("[{}]: Running job", out_pretty); let ((job_id, server_id), (jres, path_transformer)) = dist_client .do_run_job( job_alloc, dist_compile_cmd, dist_output_paths, inputs_packager, ) .await .map(move |res| ((job_id, server_id), res)) .with_context(|| { format!( "could not run distributed compilation job on {:?}", server_id ) })?; let jc = match jres { dist::RunJobResult::Complete(jc) => jc, dist::RunJobResult::JobNotFound => bail!("Job {} not found on server", job_id), }; debug!( "fetched {:?}", jc.outputs .iter() .map(|(p, bs)| (p, bs.lens().to_string())) .collect::>() ); let mut output_paths: Vec = vec![]; macro_rules! try_or_cleanup { ($v:expr) => {{ match $v { Ok(v) => v, Err(e) => { // Do our best to clear up. We may end up deleting a file that we just wrote over // the top of, but it's better to clear up too much than too little for local_path in output_paths.iter() { if let Err(e) = fs::remove_file(local_path) { if e.kind() != io::ErrorKind::NotFound { warn!("{} while attempting to clear up {}", e, local_path.display()) } } } return Err(e) }, } }}; } for (path, output_data) in jc.outputs { let len = output_data.lens().actual; let local_path = try_or_cleanup!(path_transformer .to_local(&path) .with_context(|| format!("unable to transform output path {}", path))); output_paths.push(local_path); // Do this first so cleanup works correctly let local_path = output_paths.last().expect("nothing in vec after push"); let mut file = try_or_cleanup!(File::create(local_path) .with_context(|| format!("Failed to create output file {}", local_path.display()))); let count = try_or_cleanup!(io::copy(&mut output_data.into_reader(), &mut file) .with_context(|| format!("Failed to write output to {}", local_path.display()))); assert!(count == len); } let extra_inputs = match tc_archive { Some(p) => vec![p], None => vec![], }; try_or_cleanup!(outputs_rewriter .handle_outputs(&path_transformer, &output_paths, &extra_inputs) .with_context(|| "failed to rewrite outputs from compile")); Ok((DistType::Ok(server_id), jc.output.into())) }; use futures::TryFutureExt; do_dist_compile .or_else(move |e| async move { if let Some(HttpClientError(_)) = e.downcast_ref::() { Err(e) } else if let Some(lru_disk_cache::Error::FileTooLarge) = e.downcast_ref::() { Err(anyhow!( "Could not cache dist toolchain for {:?} locally. Increase `toolchain_cache_size` or decrease the toolchain archive size.", local_executable2 )) } else { // `{:#}` prints the error and the causes in a single line. let errmsg = format!("{:#}", e); warn!( "[{}]: Could not perform distributed compile, falling back to local: {}", out_pretty2, errmsg ); compile_cmd .execute(service, &creator) .await .map(|o| (DistType::Error, o)) } }) .map_ok(move |(dt, o)| (cacheable, dt, o)) .await } impl Clone for Box> { fn clone(&self) -> Box> { self.box_clone() } } /// An interface to a compiler for actually invoking compilation. pub trait Compilation: Send where T: CommandCreatorSync, { /// Given information about a compiler command, generate a command that can /// execute the compiler. fn generate_compile_commands( &self, path_transformer: &mut dist::PathTransformer, rewrite_includes_only: bool, ) -> Result<( Box>, Option, Cacheable, )>; /// Create a function that will create the inputs used to perform a distributed compilation #[cfg(feature = "dist-client")] fn into_dist_packagers( self: Box, _path_transformer: dist::PathTransformer, ) -> Result; /// Returns an iterator over the results of this compilation. /// /// Each item is a descriptive (and unique) name of the output paired with /// the path where it'll show up. fn outputs<'a>(&'a self) -> Box + 'a>; } #[cfg(feature = "dist-client")] pub trait OutputsRewriter: Send { /// Perform any post-compilation handling of outputs, given a Vec of the dist_path and local_path fn handle_outputs( self: Box, path_transformer: &dist::PathTransformer, output_paths: &[PathBuf], extra_inputs: &[PathBuf], ) -> Result<()>; } #[cfg(feature = "dist-client")] pub struct NoopOutputsRewriter; #[cfg(feature = "dist-client")] impl OutputsRewriter for NoopOutputsRewriter { fn handle_outputs( self: Box, _path_transformer: &dist::PathTransformer, _output_paths: &[PathBuf], _extra_inputs: &[PathBuf], ) -> Result<()> { Ok(()) } } /// Result of generating a hash from a compiler command. pub struct HashResult where T: CommandCreatorSync, { /// The hash key of the inputs. pub key: String, /// An object to use for the actual compilation, if necessary. pub compilation: Box + 'static>, /// A weak key that may be used to identify the toolchain pub weak_toolchain_key: String, } /// Possible results of parsing compiler arguments. #[derive(Debug, PartialEq, Eq)] pub enum CompilerArguments { /// Commandline can be handled. Ok(T), /// Cannot cache this compilation. CannotCache(&'static str, Option), /// This commandline is not a compile. NotCompilation, } macro_rules! cannot_cache { ($why:expr) => { return CompilerArguments::CannotCache($why, None) }; ($why:expr, $extra_info:expr) => { return CompilerArguments::CannotCache($why, Some($extra_info)) }; } macro_rules! try_or_cannot_cache { ($arg:expr, $why:expr) => {{ match $arg { Ok(arg) => arg, Err(e) => cannot_cache!($why, e.to_string()), } }}; } /// Specifics about distributed compilation. #[derive(Debug, PartialEq, Eq)] pub enum DistType { /// Distribution was not enabled. NoDist, /// Distributed compile success. Ok(dist::ServerId), /// Distributed compile failed. Error, } /// Specifics about cache misses. #[derive(Debug, PartialEq, Eq)] pub enum MissType { /// The compilation was not found in the cache, nothing more. Normal, /// Do not cache the results of the compilation. ForcedNoCache, /// Cache lookup was overridden, recompilation was forced. ForcedRecache, /// Cache took too long to respond. TimedOut, /// Error reading from cache CacheReadError, } /// Information about a successful cache write. pub struct CacheWriteInfo { pub object_file_pretty: String, pub duration: Duration, } /// The result of a compilation or cache retrieval. pub enum CompileResult { /// An error made the compilation not possible. Error, /// Result was found in cache. CacheHit(Duration), /// Result was not found in cache. /// /// The `CacheWriteFuture` will resolve when the result is finished /// being stored in the cache. CacheMiss( MissType, DistType, Duration, // Compilation time Pin> + Send>>, ), /// Not in cache and do not cache the results of the compilation. NotCached(DistType, Duration), /// Not in cache, but the compilation result was determined to be not cacheable. NotCacheable(DistType, Duration), /// Not in cache, but compilation failed. CompileFailed(DistType, Duration), } /// The state of `--color` options passed to a compiler. #[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize, Default)] pub enum ColorMode { Off, On, #[default] Auto, } /// Can't derive(Debug) because of `CacheWriteFuture`. impl fmt::Debug for CompileResult { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { CompileResult::Error => write!(f, "CompileResult::Error"), CompileResult::CacheHit(ref d) => write!(f, "CompileResult::CacheHit({:?})", d), CompileResult::CacheMiss(ref m, ref dt, ref d, _) => { write!(f, "CompileResult::CacheMiss({:?}, {:?}, {:?}, _)", d, m, dt) } CompileResult::NotCached(ref dt, ref d) => { write!(f, "CompileResult::NotCached({:?}, {:?}_", dt, d) } CompileResult::NotCacheable(ref dt, ref d) => { write!(f, "CompileResult::NotCacheable({:?}, {:?}_", dt, d) } CompileResult::CompileFailed(ref dt, ref d) => { write!(f, "CompileResult::CompileFailed({:?}, {:?})", dt, d) } } } } /// Can't use derive(PartialEq) because of the `CacheWriteFuture`. impl PartialEq for CompileResult { fn eq(&self, other: &CompileResult) -> bool { match (self, other) { (&CompileResult::Error, &CompileResult::Error) => true, (&CompileResult::CacheHit(_), &CompileResult::CacheHit(_)) => true, (CompileResult::CacheMiss(m, dt, _, _), CompileResult::CacheMiss(n, dt2, _, _)) => { m == n && dt == dt2 } (CompileResult::NotCached(dt, _), CompileResult::NotCached(dt2, _)) => dt == dt2, (CompileResult::NotCacheable(dt, _), CompileResult::NotCacheable(dt2, _)) => dt == dt2, (CompileResult::CompileFailed(dt, _), CompileResult::CompileFailed(dt2, _)) => { dt == dt2 } _ => false, } } } /// Can this result be stored in cache? #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub enum Cacheable { Yes, No, } /// Control of caching behavior. #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub enum CacheControl { /// Default caching behavior. Default, /// Do not cache the results of the compilation. ForceNoCache, /// Ignore existing cache entries, force recompilation. ForceRecache, } /// Creates a future that will write `contents` to `path` inside of a temporary /// directory. /// /// The future will resolve to the temporary directory and an absolute path /// inside that temporary directory with a file that has the same filename as /// `path` contains the `contents` specified. /// /// Note that when the `TempDir` is dropped it will delete all of its contents /// including the path returned. pub async fn write_temp_file( pool: &tokio::runtime::Handle, path: &Path, contents: Vec, ) -> Result<(TempDir, PathBuf)> { let path = path.to_owned(); pool.spawn_blocking(move || { let dir = tempfile::Builder::new().prefix("sccache").tempdir()?; let src = dir.path().join(path); let mut file = File::create(&src)?; file.write_all(&contents)?; Ok::<_, anyhow::Error>((dir, src)) }) .await? .context("failed to write temporary file") } /// Returns true if the given path looks like a program known to have /// a rustc compatible interface. fn is_rustc_like>(p: P) -> bool { matches!( p.as_ref() .file_stem() .map(|s| s.to_string_lossy().to_lowercase()) .as_deref(), Some("rustc") | Some("clippy-driver") ) } /// Returns true if the given path looks like cudafe++ fn is_nvidia_cudafe>(p: P) -> bool { matches!( p.as_ref() .file_stem() .map(|s| s.to_string_lossy().to_lowercase()) .as_deref(), Some("cudafe++") ) } /// Returns true if the given path looks like cicc fn is_nvidia_cicc>(p: P) -> bool { matches!( p.as_ref() .file_stem() .map(|s| s.to_string_lossy().to_lowercase()) .as_deref(), Some("cicc") ) } /// Returns true if the given path looks like ptxas fn is_nvidia_ptxas>(p: P) -> bool { matches!( p.as_ref() .file_stem() .map(|s| s.to_string_lossy().to_lowercase()) .as_deref(), Some("ptxas") ) } /// Returns true if the given path looks like a c compiler program /// /// This does not check c compilers, it only report programs that are definitely not rustc fn is_known_c_compiler>(p: P) -> bool { matches!( p.as_ref() .file_stem() .map(|s| s.to_string_lossy().to_lowercase()) .as_deref(), Some( "cc" | "c++" | "gcc" | "g++" | "clang" | "clang++" | "clang-cl" | "cl" | "nvc" | "nvc++" | "nvcc" ) ) } /// If `executable` is a known compiler, return `Some(Box)`. async fn detect_compiler( creator: T, executable: &Path, cwd: &Path, args: &[OsString], env: &[(OsString, OsString)], pool: &tokio::runtime::Handle, dist_archive: Option, ) -> Result<(Box>, Option>>)> where T: CommandCreatorSync, { trace!("detect_compiler: {}", executable.display()); // First, see if this looks like rustc. let maybe_rustc_executable = if is_rustc_like(executable) { Some(executable.to_path_buf()) } else if env.iter().any(|(k, _)| k == OsStr::new("CARGO")) { // If not, detect the scenario where cargo is configured to wrap rustc with something other than sccache. // This happens when sccache is used as a `RUSTC_WRAPPER` and another tool is used as a // `RUSTC_WORKSPACE_WRAPPER`. In that case rustc will be the first argument rather than the command. // // The check for the `CARGO` env acts as a guardrail against false positives. // https://doc.rust-lang.org/cargo/reference/environment-variables.html#environment-variables-cargo-reads args.iter() .next() .filter(|arg1| is_rustc_like(arg1)) .map(PathBuf::from) } else { None }; let pool = pool.clone(); let rustc_executable = if let Some(ref rustc_executable) = maybe_rustc_executable { rustc_executable } else if is_nvidia_cudafe(executable) { debug!("Found cudafe++"); return CCompiler::new( CudaFE { // TODO: Use nvcc --version version: Some(String::new()), }, executable.to_owned(), &pool, ) .await .map(|c| (Box::new(c) as Box>, None)); } else if is_nvidia_cicc(executable) { debug!("Found cicc"); return CCompiler::new( Cicc { // TODO: Use nvcc --version version: Some(String::new()), }, executable.to_owned(), &pool, ) .await .map(|c| (Box::new(c) as Box>, None)); } else if is_nvidia_ptxas(executable) { debug!("Found ptxas"); return CCompiler::new( Ptxas { // TODO: Use nvcc --version version: Some(String::new()), }, executable.to_owned(), &pool, ) .await .map(|c| (Box::new(c) as Box>, None)); } else if is_known_c_compiler(executable) { let cc = detect_c_compiler(creator, executable, args, env.to_vec(), pool).await; return cc.map(|c| (c, None)); } else { // Even if it does not look like rustc like it might still be rustc driver // so we do full check executable }; match resolve_rust_compiler( creator.clone(), executable, rustc_executable.to_path_buf(), env, cwd.to_path_buf(), dist_archive, pool.clone(), ) .await { Ok(res) => Ok(res), Err(e) => { // in case we attempted to test for rustc while it didn't look like it, fallback to c compiler detection one lat time if maybe_rustc_executable.is_none() { let executable = executable.to_path_buf(); let cc = detect_c_compiler(creator, executable, args, env.to_vec(), pool).await; cc.map(|c| (c, None)) } else { Err(e) } } } } /// Tries to verify that provided executable is really rust compiler /// or rust driver async fn resolve_rust_compiler( creator: T, executable: &Path, rustc_executable: PathBuf, env: &[(OsString, OsString)], cwd: PathBuf, dist_archive: Option, pool: tokio::runtime::Handle, ) -> Result<(Box>, Option>>)> where T: CommandCreatorSync, { let mut child = creator.clone().new_command_sync(executable); // We're wrapping rustc if the executable doesn't match the detected rustc_executable. In this case the wrapper // expects rustc as the first argument. if rustc_executable != executable { child.arg(&rustc_executable); } child.env_clear().envs(env.to_vec()).args(&["-vV"]); let rustc_vv = run_input_output(child, None).await.map(|output| { if let Ok(stdout) = String::from_utf8(output.stdout.clone()) { if stdout.starts_with("rustc ") { return Ok(stdout); } } Err(ProcessError(output)) })?; // rustc -vV verification status match rustc_vv { Ok(rustc_verbose_version) => { let rustc_executable2 = rustc_executable.clone(); let proxy = RustupProxy::find_proxy_executable::( &rustc_executable2, "rustup", creator.clone(), env, ); use futures::TryFutureExt; let creator1 = creator.clone(); let res = proxy.and_then(move |proxy| async move { match proxy { Ok(Some(proxy)) => { trace!("Found rustup proxy executable"); // take the pathbuf for rustc as resolved by the proxy match proxy.resolve_proxied_executable(creator1, cwd, env).await { Ok((resolved_path, _time)) => { trace!("Resolved path with rustup proxy {:?}", &resolved_path); Ok((Some(proxy), resolved_path)) } Err(e) => { trace!("Could not resolve compiler with rustup proxy: {}", e); Ok((None, rustc_executable)) } } } Ok(None) => { trace!("Did not find rustup"); Ok((None, rustc_executable)) } Err(e) => { trace!("Did not find rustup due to {}, compiling without proxy", e); Ok((None, rustc_executable)) } } }); let (proxy, resolved_rustc) = res .await .map(|(proxy, resolved_compiler_executable)| { ( proxy .map(Box::new) .map(|x: Box| x as Box>), resolved_compiler_executable, ) }) .unwrap_or_else(|_e| { trace!("Compiling rust without proxy"); (None, rustc_executable2) }); debug!("Using rustc at path: {resolved_rustc:?}"); Rust::new( creator, resolved_rustc, env, &rustc_verbose_version, dist_archive, pool, ) .await .map(|c| { ( Box::new(c) as Box>, proxy as Option>>, ) }) } Err(e) => Err(e).context("Failed to launch subprocess for compiler determination"), } } ArgData! { PassThrough(OsString), } use self::ArgData::PassThrough as Detect_PassThrough; // Establish a set of compiler flags that are required for // valid execution of the compiler even in preprocessor mode. // If the requested compiler invocatiomn has any of these arguments // propagate them when doing our compiler vendor detection // // Current known required flags: // ccbin/compiler-bindir needed for nvcc // This flag specifies the host compiler to use otherwise // gcc is expected to exist on the PATH. So if gcc doesn't exist // compiler detection fails if we don't pass along the ccbin arg counted_array!(static ARGS: [ArgInfo; _] = [ take_arg!("--compiler-bindir", OsString, CanBeSeparated('='), Detect_PassThrough), take_arg!("-ccbin", OsString, CanBeSeparated('='), Detect_PassThrough), ]); async fn detect_c_compiler( creator: T, executable: P, arguments: &[OsString], env: Vec<(OsString, OsString)>, pool: tokio::runtime::Handle, ) -> Result>> where T: CommandCreatorSync, P: AsRef, { trace!("detect_c_compiler"); // NVCC needs to be first as msvc, clang, or gcc could // be the underlying host compiler for nvcc // Both clang and clang-cl define _MSC_VER on Windows, so we first // check for MSVC, then check whether _MT is defined, which is the // difference between clang and clang-cl. // NVHPC needs a custom version line since `__VERSION__` evaluates // to the EDG version. // // We prefix the information we need with `compiler_id` and `compiler_version` // so that we can support compilers that insert pre-amble code even in `-E` mode let test = b" #if defined(__NVCC__) && defined(__NVCOMPILER) compiler_id=nvcc-nvhpc compiler_version=__CUDACC_VER_MAJOR__.__CUDACC_VER_MINOR__.__CUDACC_VER_BUILD__ #elif defined(__NVCC__) && defined(_MSC_VER) compiler_id=nvcc-msvc compiler_version=__CUDACC_VER_MAJOR__.__CUDACC_VER_MINOR__.__CUDACC_VER_BUILD__ #elif defined(__NVCC__) compiler_id=nvcc compiler_version=__CUDACC_VER_MAJOR__.__CUDACC_VER_MINOR__.__CUDACC_VER_BUILD__ #elif defined(_MSC_VER) && !defined(__clang__) compiler_id=msvc #elif defined(_MSC_VER) && defined(_MT) compiler_id=msvc-clang #elif defined(__NVCOMPILER) compiler_id=nvhpc compiler_version=__NVCOMPILER_MAJOR__.__NVCOMPILER_MINOR__.__NVCOMPILER_PATCHLEVEL__ #elif defined(__clang__) && defined(__cplusplus) && defined(__apple_build_version__) compiler_id=apple-clang++ #elif defined(__clang__) && defined(__cplusplus) compiler_id=clang++ #elif defined(__clang__) && defined(__apple_build_version__) compiler_id=apple-clang #elif defined(__clang__) compiler_id=clang #elif defined(__GNUC__) && defined(__cplusplus) compiler_id=g++ #elif defined(__GNUC__) compiler_id=gcc #elif defined(__DCC__) compiler_id=diab #elif defined(__CTC__) compiler_id=tasking_vx #else compiler_id=unknown #endif compiler_version=__VERSION__ " .to_vec(); let (tempdir, src) = write_temp_file(&pool, "testfile.c".as_ref(), test).await?; let executable = executable.as_ref(); let mut cmd = creator.clone().new_command_sync(executable); cmd.stdout(Stdio::piped()) .stderr(Stdio::piped()) .envs(env.iter().map(|s| (&s.0, &s.1))); // Iterate over all the arguments for compilation and extract // any that are required for any valid execution of the compiler. // Allowing our compiler vendor detection to always properly execute for arg in ArgsIter::new(arguments.iter().cloned(), &ARGS[..]) { let arg = arg.unwrap_or_else(|_| Argument::Raw(OsString::from(""))); if let Some(Detect_PassThrough(_)) = arg.get_data() { let required_arg = arg.normalize(NormalizedDisposition::Concatenated); cmd.args(&Vec::from_iter(required_arg.iter_os_strings())); } } cmd.arg("-E").arg(src); trace!("compiler {:?}", cmd); let child = cmd.spawn().await?; let output = child .wait_with_output() .await .context("failed to read child output")?; drop(tempdir); let stdout = match str::from_utf8(&output.stdout) { Ok(s) => s, Err(_) => bail!("Failed to parse output"), }; let mut lines = stdout.lines().filter_map(|line| { let line = line.trim(); if line.starts_with("compiler_id=") { Some(line.strip_prefix("compiler_id=").unwrap()) } else if line.starts_with("compiler_version=") { Some(line.strip_prefix("compiler_version=").unwrap()) } else { None } }); if let Some(kind) = lines.next() { let executable = executable.to_owned(); let version = lines .next() // In case the compiler didn't expand the macro. .filter(|&line| line != "__VERSION__") .map(str::to_owned); match kind { "clang" | "clang++" | "apple-clang" | "apple-clang++" => { debug!("Found {}", kind); return CCompiler::new( Clang { clangplusplus: kind.ends_with("++"), is_appleclang: kind.starts_with("apple-"), version: version.clone(), }, executable, &pool, ) .await .map(|c| Box::new(c) as Box>); } "diab" => { debug!("Found diab"); return CCompiler::new( Diab { version: version.clone(), }, executable, &pool, ) .await .map(|c| Box::new(c) as Box>); } "gcc" | "g++" => { debug!("Found {}", kind); return CCompiler::new( Gcc { gplusplus: kind == "g++", version: version.clone(), }, executable, &pool, ) .await .map(|c| Box::new(c) as Box>); } "msvc" | "msvc-clang" => { let is_clang = kind == "msvc-clang"; debug!("Found MSVC (is clang: {})", is_clang); let prefix = msvc::detect_showincludes_prefix( &creator, executable.as_ref(), is_clang, env, &pool, ) .await?; trace!("showIncludes prefix: '{}'", prefix); return CCompiler::new( Msvc { includes_prefix: prefix, is_clang, version: version.clone(), }, executable, &pool, ) .await .map(|c| Box::new(c) as Box>); } "nvcc" | "nvcc-msvc" | "nvcc-nvhpc" => { let host_compiler = match kind { "nvcc-nvhpc" => NvccHostCompiler::Nvhpc, "nvcc-msvc" => NvccHostCompiler::Msvc, "nvcc" => NvccHostCompiler::Gcc, &_ => NvccHostCompiler::Gcc, }; let host_compiler_version = lines .next() // In case the compiler didn't expand the macro. .filter(|&line| line != "__VERSION__") .map(str::to_owned); return CCompiler::new( Nvcc { host_compiler, version, host_compiler_version, }, executable, &pool, ) .await .map(|c| Box::new(c) as Box>); } "nvhpc" => { debug!("Found NVHPC"); return CCompiler::new( Nvhpc { nvcplusplus: kind == "nvc++", version: version.clone(), }, executable, &pool, ) .await .map(|c| Box::new(c) as Box>); } "tasking_vx" => { debug!("Found Tasking VX"); return CCompiler::new(TaskingVX, executable, &pool) .await .map(|c| Box::new(c) as Box>); } _ => (), } } let stderr = String::from_utf8_lossy(&output.stderr); debug!("nothing useful in detection output {:?}", stdout); debug!("compiler status: {}", output.status); debug!("compiler stderr:\n{}", stderr); bail!(stderr.into_owned()) } /// If `executable` is a known compiler, return a `Box` containing information about it. pub async fn get_compiler_info( creator: T, executable: &Path, cwd: &Path, args: &[OsString], env: &[(OsString, OsString)], pool: &tokio::runtime::Handle, dist_archive: Option, ) -> Result<(Box>, Option>>)> where T: CommandCreatorSync, { let pool = pool.clone(); detect_compiler(creator, executable, cwd, args, env, &pool, dist_archive).await } #[cfg(test)] mod test { use super::*; use crate::cache::disk::DiskCache; use crate::cache::{CacheMode, CacheRead, PreprocessorCacheModeConfig}; use crate::mock_command::*; use crate::test::mock_storage::MockStorage; use crate::test::utils::*; use fs::File; use std::io::{Cursor, Write}; use std::sync::Arc; use std::time::Duration; use test_case::test_case; use tokio::runtime::Runtime; #[test] fn test_detect_compiler_kind_gcc() { let f = TestFixture::new(); let creator = new_creator(); let runtime = single_threaded_runtime(); let pool = runtime.handle(); next_command( &creator, Ok(MockChild::new( exit_status(1), "", "gcc: error: unrecognized command-line option '-vV'; did you mean '-v'?", )), ); next_command( &creator, Ok(MockChild::new(exit_status(0), "\n\ncompiler_id=gcc", "")), ); let c = detect_compiler(creator, &f.bins[0], f.tempdir.path(), &[], &[], pool, None) .wait() .unwrap() .0; assert_eq!(CompilerKind::C(CCompilerKind::Gcc), c.kind()); } #[test] fn test_detect_compiler_kind_clang() { let f = TestFixture::new(); let creator = new_creator(); let runtime = single_threaded_runtime(); let pool = runtime.handle(); next_command( &creator, Ok(MockChild::new( exit_status(1), "", "clang: error: unknown argument: '-vV'", )), ); next_command( &creator, Ok(MockChild::new(exit_status(0), "compiler_id=clang\n", "")), ); let c = detect_compiler(creator, &f.bins[0], f.tempdir.path(), &[], &[], pool, None) .wait() .unwrap() .0; assert_eq!(CompilerKind::C(CCompilerKind::Clang), c.kind()); } #[test] fn test_detect_compiler_must_be_clang() { let f = TestFixture::new(); let creator = new_creator(); let runtime = single_threaded_runtime(); let pool = runtime.handle(); let clang = f.mk_bin("clang").unwrap(); next_command( &creator, Ok(MockChild::new(exit_status(0), "compiler_id=clang\n", "")), ); let c = detect_compiler(creator, &clang, f.tempdir.path(), &[], &[], pool, None) .wait() .unwrap() .0; assert_eq!(CompilerKind::C(CCompilerKind::Clang), c.kind()); } #[test] fn test_detect_compiler_vv_clang() { let f = TestFixture::new(); let creator = new_creator(); let runtime = single_threaded_runtime(); let pool = runtime.handle(); next_command( &creator, Ok(MockChild::new(exit_status(0), "clang: 13\n", "")), ); next_command( &creator, Ok(MockChild::new(exit_status(0), "compiler_id=clang\n", "")), ); let c = detect_compiler(creator, &f.bins[0], f.tempdir.path(), &[], &[], pool, None) .wait() .unwrap() .0; assert_eq!(CompilerKind::C(CCompilerKind::Clang), c.kind()); } #[test] fn test_detect_compiler_kind_msvc() { drop(env_logger::try_init()); let creator = new_creator(); let runtime = single_threaded_runtime(); let pool = runtime.handle(); let f = TestFixture::new(); let srcfile = f.touch("test.h").unwrap(); let mut s = srcfile.to_str().unwrap(); if s.starts_with("\\\\?\\") { s = &s[4..]; } let prefix = String::from("blah: "); let stdout = format!("{}{}\r\n", prefix, s); // Compiler detection output next_command( &creator, Ok(MockChild::new( exit_status(1), "", "msvc-error: unknown argument: '-vV'", )), ); next_command( &creator, Ok(MockChild::new(exit_status(0), "\ncompiler_id=msvc\n", "")), ); // showincludes prefix detection output next_command( &creator, Ok(MockChild::new(exit_status(0), stdout, String::new())), ); let c = detect_compiler(creator, &f.bins[0], f.tempdir.path(), &[], &[], pool, None) .wait() .unwrap() .0; assert_eq!(CompilerKind::C(CCompilerKind::Msvc), c.kind()); } #[test] fn test_detect_compiler_kind_nvcc() { let f = TestFixture::new(); let creator = new_creator(); let runtime = single_threaded_runtime(); let pool = runtime.handle(); next_command(&creator, Ok(MockChild::new(exit_status(1), "", "no -vV"))); next_command( &creator, Ok(MockChild::new(exit_status(0), "compiler_id=nvcc\n", "")), ); let c = detect_compiler(creator, &f.bins[0], f.tempdir.path(), &[], &[], pool, None) .wait() .unwrap() .0; assert_eq!(CompilerKind::C(CCompilerKind::Nvcc), c.kind()); } #[test] fn test_detect_compiler_kind_nvhpc() { let f = TestFixture::new(); let creator = new_creator(); let runtime = single_threaded_runtime(); let pool = runtime.handle(); next_command(&creator, Ok(MockChild::new(exit_status(1), "", "no -vV"))); next_command( &creator, Ok(MockChild::new(exit_status(0), "compiler_id=nvhpc\n", "")), ); let c = detect_compiler(creator, &f.bins[0], f.tempdir.path(), &[], &[], pool, None) .wait() .unwrap() .0; assert_eq!(CompilerKind::C(CCompilerKind::Nvhpc), c.kind()); } #[test] fn test_detect_compiler_kind_rustc() { let f = TestFixture::new(); // Windows uses bin, everything else uses lib. Just create both. fs::create_dir(f.tempdir.path().join("lib")).unwrap(); fs::create_dir(f.tempdir.path().join("bin")).unwrap(); let rustc = f.mk_bin("rustc.exe").unwrap(); let creator = new_creator(); let runtime = single_threaded_runtime(); let pool = runtime.handle(); populate_rustc_command_mock(&creator, &f); let c = detect_compiler(creator, &rustc, f.tempdir.path(), &[], &[], pool, None) .wait() .unwrap() .0; assert_eq!(CompilerKind::Rust, c.kind()); } #[test] fn test_is_rustc_like() { assert!(is_rustc_like("rustc")); assert!(is_rustc_like("rustc.exe")); assert!(is_rustc_like("/path/to/rustc.exe")); assert!(is_rustc_like("/path/to/rustc")); assert!(is_rustc_like("/PATH/TO/RUSTC.EXE")); assert!(is_rustc_like("/Path/To/RustC.Exe")); assert!(is_rustc_like("/path/to/clippy-driver")); assert!(is_rustc_like("/path/to/clippy-driver.exe")); assert!(is_rustc_like("/PATH/TO/CLIPPY-DRIVER.EXE")); assert!(is_rustc_like("/Path/To/Clippy-Driver.Exe")); assert!(!is_rustc_like("rust")); assert!(!is_rustc_like("RUST")); } fn populate_rustc_command_mock( creator: &Arc>, f: &TestFixture, ) { // rustc --vV next_command( creator, Ok(MockChild::new( exit_status(0), "\ rustc 1.27.0 (3eda71b00 2018-06-19) binary: rustc commit-hash: 3eda71b00ad48d7bf4eef4c443e7f611fd061418 commit-date: 2018-06-19 host: x86_64-unknown-linux-gnu release: 1.27.0 LLVM version: 6.0", "", )), ); // rustc --print=sysroot let sysroot = f.tempdir.path().to_str().unwrap(); next_command(creator, Ok(MockChild::new(exit_status(0), sysroot, ""))); next_command(creator, Ok(MockChild::new(exit_status(0), sysroot, ""))); next_command(creator, Ok(MockChild::new(exit_status(0), sysroot, ""))); } #[test] fn test_detect_compiler_kind_rustc_workspace_wrapper() { let f = TestFixture::new(); // Windows uses bin, everything else uses lib. Just create both. fs::create_dir(f.tempdir.path().join("lib")).unwrap(); fs::create_dir(f.tempdir.path().join("bin")).unwrap(); let rustc = f.mk_bin("rustc-workspace-wrapper").unwrap(); let creator = new_creator(); let runtime = single_threaded_runtime(); let pool = runtime.handle(); populate_rustc_command_mock(&creator, &f); let c = detect_compiler( creator, &rustc, f.tempdir.path(), // Specifying an extension tests the ignoring &[OsString::from("rustc.exe")], &[(OsString::from("CARGO"), OsString::from("CARGO"))], pool, None, ) .wait() .unwrap() .0; assert_eq!(CompilerKind::Rust, c.kind()); // Test we don't detect rustc if the first arg is not rustc let creator = new_creator(); next_command(&creator, Ok(MockChild::new(exit_status(1), "", "no -vV"))); populate_rustc_command_mock(&creator, &f); assert!(detect_compiler( creator, &rustc, f.tempdir.path(), &[OsString::from("not-rustc")], &[(OsString::from("CARGO"), OsString::from("CARGO"))], pool, None, ) .wait() .is_err()); // Test we detect rustc if the CARGO env is not defined let creator = new_creator(); populate_rustc_command_mock(&creator, &f); assert!(detect_compiler( creator, &rustc, f.tempdir.path(), &[OsString::from("rustc")], &[], pool, None, ) .wait() .is_ok()); } #[test] fn test_detect_compiler_kind_diab() { let f = TestFixture::new(); let creator = new_creator(); let runtime = single_threaded_runtime(); let pool = runtime.handle(); next_command(&creator, Ok(MockChild::new(exit_status(1), "", "no -vV"))); next_command( &creator, Ok(MockChild::new(exit_status(0), "\ncompiler_id=diab\n", "")), ); let c = detect_compiler(creator, &f.bins[0], f.tempdir.path(), &[], &[], pool, None) .wait() .unwrap() .0; assert_eq!(CompilerKind::C(CCompilerKind::Diab), c.kind()); } #[test] fn test_detect_compiler_kind_unknown() { let f = TestFixture::new(); let creator = new_creator(); let runtime = single_threaded_runtime(); let pool = runtime.handle(); next_command(&creator, Ok(MockChild::new(exit_status(1), "", "no -vV"))); next_command( &creator, Ok(MockChild::new(exit_status(0), "something", "")), ); assert!(detect_compiler( creator, "/foo/bar".as_ref(), f.tempdir.path(), &[], &[], pool, None ) .wait() .is_err()); } #[test] fn test_detect_compiler_kind_process_fail() { let f = TestFixture::new(); let creator = new_creator(); let runtime = single_threaded_runtime(); let pool = runtime.handle(); next_command(&creator, Ok(MockChild::new(exit_status(1), "", "no -vV"))); next_command(&creator, Ok(MockChild::new(exit_status(1), "", ""))); assert!(detect_compiler( creator, "/foo/bar".as_ref(), f.tempdir.path(), &[], &[], pool, None ) .wait() .is_err()); } #[test_case(true ; "with preprocessor cache")] #[test_case(false ; "without preprocessor cache")] fn test_compiler_version_affects_hash(preprocessor_cache_mode: bool) { let f = TestFixture::new(); let clang = f.mk_bin("clang").unwrap(); let creator = new_creator(); let runtime = single_threaded_runtime(); let pool = runtime.handle(); let arguments = ovec!["-c", "foo.c", "-o", "foo.o"]; let cwd = f.tempdir.path(); // Write a dummy input file so the preprocessor cache mode can work std::fs::write(f.tempdir.path().join("foo.c"), "whatever").unwrap(); let results: Vec<_> = [11, 12] .iter() .map(|version| { let output = format!("compiler_id=clang\ncompiler_version=\"{}.0.0\"", version); next_command(&creator, Ok(MockChild::new(exit_status(0), output, ""))); let c = detect_compiler( creator.clone(), &clang, f.tempdir.path(), &[], &[], pool, None, ) .wait() .unwrap() .0; next_command( &creator, Ok(MockChild::new(exit_status(0), "preprocessor output", "")), ); let hasher = match c.parse_arguments(&arguments, ".".as_ref(), &[]) { CompilerArguments::Ok(h) => h, o => panic!("Bad result from parse_arguments: {:?}", o), }; hasher .generate_hash_key( &creator, cwd.to_path_buf(), vec![], false, pool, false, Arc::new(MockStorage::new(None, preprocessor_cache_mode)), CacheControl::Default, ) .wait() .unwrap() }) .collect(); assert_eq!(results.len(), 2); assert_ne!(results[0].key, results[1].key); } #[test_case(true ; "with preprocessor cache")] #[test_case(false ; "without preprocessor cache")] fn test_common_args_affects_hash(preprocessor_cache_mode: bool) { let f = TestFixture::new(); let creator = new_creator(); let runtime = single_threaded_runtime(); let pool = runtime.handle(); let output = "compiler_id=clang\ncompiler_version=\"16.0.0\""; let arguments = [ ovec!["-c", "foo.c", "-o", "foo.o", "-DHELLO"], ovec!["-c", "foo.c", "-o", "foo.o", "-DHI"], ovec!["-c", "foo.c", "-o", "foo.o"], ]; let cwd = f.tempdir.path(); // Write a dummy input file so the preprocessor cache mode can work std::fs::write(f.tempdir.path().join("foo.c"), "whatever").unwrap(); let results: Vec<_> = arguments .iter() .map(|argument| { next_command( &creator, Ok(MockChild::new( exit_status(1), "", "clang: error: unknown argument: '-vV'", )), ); next_command(&creator, Ok(MockChild::new(exit_status(0), output, ""))); let c = detect_compiler( creator.clone(), &f.bins[0], f.tempdir.path(), &[], &[], pool, None, ) .wait() .unwrap() .0; next_command( &creator, Ok(MockChild::new(exit_status(0), "preprocessor output", "")), ); let hasher = match c.parse_arguments(argument, ".".as_ref(), &[]) { CompilerArguments::Ok(h) => h, o => panic!("Bad result from parse_arguments: {:?}", o), }; hasher .generate_hash_key( &creator, cwd.to_path_buf(), vec![], false, pool, false, Arc::new(MockStorage::new(None, preprocessor_cache_mode)), CacheControl::Default, ) .wait() .unwrap() }) .collect(); assert_eq!(results.len(), 3); assert_ne!(results[0].key, results[1].key); assert_ne!(results[1].key, results[2].key); assert_ne!(results[0].key, results[2].key); } #[test] fn test_get_compiler_info() { let creator = new_creator(); let runtime = single_threaded_runtime(); let pool = runtime.handle(); let f = TestFixture::new(); // Pretend to be GCC. let gcc = f.mk_bin("gcc").unwrap(); next_command( &creator, Ok(MockChild::new(exit_status(0), "compiler_id=gcc", "")), ); let c = get_compiler_info(creator, &gcc, f.tempdir.path(), &[], &[], pool, None) .wait() .unwrap() .0; // digest of an empty file. assert_eq!(CompilerKind::C(CCompilerKind::Gcc), c.kind()); } #[test_case(true ; "with preprocessor cache")] #[test_case(false ; "without preprocessor cache")] fn test_compiler_get_cached_or_compile(preprocessor_cache_mode: bool) { drop(env_logger::try_init()); let creator = new_creator(); let f = TestFixture::new(); let gcc = f.mk_bin("gcc").unwrap(); let runtime = Runtime::new().unwrap(); let pool = runtime.handle().clone(); let storage = DiskCache::new( f.tempdir.path().join("cache"), u64::MAX, &pool, PreprocessorCacheModeConfig { use_preprocessor_cache_mode: preprocessor_cache_mode, ..Default::default() }, CacheMode::ReadWrite, ); // Write a dummy input file so the preprocessor cache mode can work std::fs::write(f.tempdir.path().join("foo.c"), "whatever").unwrap(); let storage = Arc::new(storage); let service = server::SccacheService::mock_with_storage(storage.clone(), pool.clone()); // Pretend to be GCC. next_command( &creator, Ok(MockChild::new(exit_status(0), "compiler_id=gcc", "")), ); let c = get_compiler_info( creator.clone(), &gcc, f.tempdir.path(), &[], &[], &pool, None, ) .wait() .unwrap() .0; // The preprocessor invocation. next_command( &creator, Ok(MockChild::new(exit_status(0), "preprocessor output", "")), ); // The compiler invocation. const COMPILER_STDOUT: &[u8] = b"compiler stdout"; const COMPILER_STDERR: &[u8] = b"compiler stderr"; let obj = f.tempdir.path().join("foo.o"); let o = obj.clone(); next_command_calls(&creator, move |_| { // Pretend to compile something. let mut f = File::create(&o)?; f.write_all(b"file contents")?; Ok(MockChild::new( exit_status(0), COMPILER_STDOUT, COMPILER_STDERR, )) }); let cwd = f.tempdir.path(); let arguments = ovec!["-c", "foo.c", "-o", "foo.o"]; let hasher = match c.parse_arguments(&arguments, ".".as_ref(), &[]) { CompilerArguments::Ok(h) => h, o => panic!("Bad result from parse_arguments: {:?}", o), }; let hasher2 = hasher.clone(); let (cached, res) = runtime .block_on(async { hasher .get_cached_or_compile( &service, None, creator.clone(), storage.clone(), arguments.clone(), cwd.to_path_buf(), vec![], CacheControl::Default, pool.clone(), ) .await }) .unwrap(); // Ensure that the object file was created. assert!(fs::metadata(&obj).map(|m| m.len() > 0).unwrap()); match cached { CompileResult::CacheMiss(MissType::Normal, DistType::NoDist, _, f) => { // wait on cache write future so we don't race with it! f.wait().unwrap(); } _ => panic!("Unexpected compile result: {:?}", cached), } assert_eq!(exit_status(0), res.status); assert_eq!(COMPILER_STDOUT, res.stdout.as_slice()); assert_eq!(COMPILER_STDERR, res.stderr.as_slice()); // Now compile again, which should be a cache hit. fs::remove_file(&obj).unwrap(); // The preprocessor invocation. next_command( &creator, Ok(MockChild::new(exit_status(0), "preprocessor output", "")), ); // There should be no actual compiler invocation. let (cached, res) = runtime .block_on(async { hasher2 .get_cached_or_compile( &service, None, creator, storage, arguments, cwd.to_path_buf(), vec![], CacheControl::Default, pool, ) .await }) .unwrap(); // Ensure that the object file was created. assert!(fs::metadata(&obj).map(|m| m.len() > 0).unwrap()); assert_eq!(CompileResult::CacheHit(Duration::new(0, 0)), cached); assert_eq!(exit_status(0), res.status); assert_eq!(COMPILER_STDOUT, res.stdout.as_slice()); assert_eq!(COMPILER_STDERR, res.stderr.as_slice()); } #[test_case(true ; "with preprocessor cache")] #[test_case(false ; "without preprocessor cache")] #[cfg(feature = "dist-client")] fn test_compiler_get_cached_or_compile_dist(preprocessor_cache_mode: bool) { drop(env_logger::try_init()); let creator = new_creator(); let f = TestFixture::new(); let gcc = f.mk_bin("gcc").unwrap(); let runtime = Runtime::new().unwrap(); let pool = runtime.handle().clone(); let storage = DiskCache::new( f.tempdir.path().join("cache"), u64::MAX, &pool, PreprocessorCacheModeConfig { use_preprocessor_cache_mode: preprocessor_cache_mode, ..Default::default() }, CacheMode::ReadWrite, ); // Write a dummy input file so the preprocessor cache mode can work std::fs::write(f.tempdir.path().join("foo.c"), "whatever").unwrap(); let storage = Arc::new(storage); // Pretend to be GCC. next_command( &creator, Ok(MockChild::new(exit_status(0), "compiler_id=gcc", "")), ); let c = get_compiler_info( creator.clone(), &gcc, f.tempdir.path(), &[], &[], &pool, None, ) .wait() .unwrap() .0; // The preprocessor invocation. next_command( &creator, Ok(MockChild::new(exit_status(0), "preprocessor output", "")), ); // The compiler invocation. const COMPILER_STDOUT: &[u8] = b"compiler stdout"; const COMPILER_STDERR: &[u8] = b"compiler stderr"; let obj = f.tempdir.path().join("foo.o"); // Dist client will do the compilation let dist_client = test_dist::OneshotClient::new( 0, COMPILER_STDOUT.to_owned(), COMPILER_STDERR.to_owned(), ); let service = server::SccacheService::mock_with_dist_client( dist_client.clone(), storage.clone(), pool.clone(), ); let cwd = f.tempdir.path(); let arguments = ovec!["-c", "foo.c", "-o", "foo.o"]; let hasher = match c.parse_arguments(&arguments, ".".as_ref(), &[]) { CompilerArguments::Ok(h) => h, o => panic!("Bad result from parse_arguments: {:?}", o), }; let hasher2 = hasher.clone(); let (cached, res) = runtime .block_on(async { hasher .get_cached_or_compile( &service, Some(dist_client.clone()), creator.clone(), storage.clone(), arguments.clone(), cwd.to_path_buf(), vec![], CacheControl::Default, pool.clone(), ) .await }) .unwrap(); // Ensure that the object file was created. assert!(fs::metadata(&obj).map(|m| m.len() > 0).unwrap()); match cached { CompileResult::CacheMiss(MissType::Normal, DistType::Ok(_), _, f) => { // wait on cache write future so we don't race with it! f.wait().unwrap(); } _ => panic!("Unexpected compile result: {:?}", cached), } assert_eq!(exit_status(0), res.status); assert_eq!(COMPILER_STDOUT, res.stdout.as_slice()); assert_eq!(COMPILER_STDERR, res.stderr.as_slice()); // Now compile again, which should be a cache hit. fs::remove_file(&obj).unwrap(); // The preprocessor invocation. next_command( &creator, Ok(MockChild::new(exit_status(0), "preprocessor output", "")), ); // There should be no actual compiler invocation. let (cached, res) = runtime .block_on(async { hasher2 .get_cached_or_compile( &service, Some(dist_client.clone()), creator, storage, arguments, cwd.to_path_buf(), vec![], CacheControl::Default, pool, ) .await }) .unwrap(); // Ensure that the object file was created. assert!(fs::metadata(&obj).map(|m| m.len() > 0).unwrap()); assert_eq!(CompileResult::CacheHit(Duration::new(0, 0)), cached); assert_eq!(exit_status(0), res.status); assert_eq!(COMPILER_STDOUT, res.stdout.as_slice()); assert_eq!(COMPILER_STDERR, res.stderr.as_slice()); } #[test_case(true ; "with preprocessor cache")] #[test_case(false ; "without preprocessor cache")] /// Test that a cache read that results in an error is treated as a cache /// miss. fn test_compiler_get_cached_or_compile_cache_error(preprocessor_cache_mode: bool) { drop(env_logger::try_init()); let creator = new_creator(); let f = TestFixture::new(); let gcc = f.mk_bin("gcc").unwrap(); let runtime = Runtime::new().unwrap(); let pool = runtime.handle().clone(); let storage = MockStorage::new(None, preprocessor_cache_mode); let storage: Arc = Arc::new(storage); let service = server::SccacheService::mock_with_storage(storage.clone(), pool.clone()); // Write a dummy input file so the preprocessor cache mode can work std::fs::write(f.tempdir.path().join("foo.c"), "whatever").unwrap(); // Pretend to be GCC. next_command( &creator, Ok(MockChild::new(exit_status(0), "compiler_id=gcc", "")), ); let c = get_compiler_info( creator.clone(), &gcc, f.tempdir.path(), &[], &[], &pool, None, ) .wait() .unwrap() .0; // The preprocessor invocation. next_command( &creator, Ok(MockChild::new(exit_status(0), "preprocessor output", "")), ); // The compiler invocation. const COMPILER_STDOUT: &[u8] = b"compiler stdout"; const COMPILER_STDERR: &[u8] = b"compiler stderr"; let obj = f.tempdir.path().join("foo.o"); let o = obj.clone(); next_command_calls(&creator, move |_| { // Pretend to compile something. let mut f = File::create(&o)?; f.write_all(b"file contents")?; Ok(MockChild::new( exit_status(0), COMPILER_STDOUT, COMPILER_STDERR, )) }); let cwd = f.tempdir.path(); let arguments = ovec!["-c", "foo.c", "-o", "foo.o"]; let hasher = match c.parse_arguments(&arguments, ".".as_ref(), &[]) { CompilerArguments::Ok(h) => h, o => panic!("Bad result from parse_arguments: {:?}", o), }; // The cache will return an error. storage.next_get(Err(anyhow!("Some Error"))); let (cached, res) = runtime .block_on(hasher.get_cached_or_compile( &service, None, creator, storage, arguments.clone(), cwd.to_path_buf(), vec![], CacheControl::Default, pool, )) .unwrap(); // Ensure that the object file was created. assert!(fs::metadata(&obj).map(|m| m.len() > 0).unwrap()); match cached { CompileResult::CacheMiss(MissType::CacheReadError, DistType::NoDist, _, f) => { // wait on cache write future so we don't race with it! let _ = f.wait(); } _ => panic!("Unexpected compile result: {:?}", cached), } assert_eq!(exit_status(0), res.status); assert_eq!(COMPILER_STDOUT, res.stdout.as_slice()); assert_eq!(COMPILER_STDERR, res.stderr.as_slice()); } #[test_case(true ; "with preprocessor cache")] #[test_case(false ; "without preprocessor cache")] /// Test that cache read timing is recorded. fn test_compiler_get_cached_or_compile_cache_get_timing(preprocessor_cache_mode: bool) { drop(env_logger::try_init()); let creator = new_creator(); let f = TestFixture::new(); let gcc = f.mk_bin("gcc").unwrap(); let runtime = Runtime::new().unwrap(); let pool = runtime.handle().clone(); // Write a dummy input file so the preprocessor cache mode can work std::fs::write(f.tempdir.path().join("foo.c"), "whatever").unwrap(); // Make our storage wait 2ms for each get/put operation. let storage_delay = Duration::from_millis(2); let storage = MockStorage::new(Some(storage_delay), preprocessor_cache_mode); let storage: Arc = Arc::new(storage); let service = server::SccacheService::mock_with_storage(storage.clone(), pool.clone()); // Pretend to be GCC. next_command( &creator, Ok(MockChild::new(exit_status(0), "compiler_id=gcc", "")), ); let c = get_compiler_info( creator.clone(), &gcc, f.tempdir.path(), &[], &[], &pool, None, ) .wait() .unwrap() .0; // The preprocessor invocation. next_command( &creator, Ok(MockChild::new(exit_status(0), "preprocessor output", "")), ); // The compiler invocation. const COMPILER_STDOUT: &[u8] = b"compiler stdout"; const COMPILER_STDERR: &[u8] = b"compiler stderr"; let obj_file: &[u8] = &[1, 2, 3, 4]; // A cache entry to hand out let mut cachewrite = CacheWrite::new(); cachewrite .put_stdout(COMPILER_STDOUT) .expect("Failed to store stdout"); cachewrite .put_stderr(COMPILER_STDERR) .expect("Failed to store stderr"); cachewrite .put_object("obj", &mut Cursor::new(obj_file), None) .expect("Failed to store cache object"); let entry = cachewrite.finish().expect("Failed to finish cache entry"); let entry = CacheRead::from(Cursor::new(entry)).expect("Failed to re-read cache entry"); let cwd = f.tempdir.path(); let arguments = ovec!["-c", "foo.c", "-o", "foo.o"]; let hasher = match c.parse_arguments(&arguments, ".".as_ref(), &[]) { CompilerArguments::Ok(h) => h, o => panic!("Bad result from parse_arguments: {:?}", o), }; storage.next_get(Ok(Cache::Hit(entry))); let (cached, _res) = runtime .block_on(hasher.get_cached_or_compile( &service, None, creator, storage, arguments.clone(), cwd.to_path_buf(), vec![], CacheControl::Default, pool, )) .unwrap(); match cached { CompileResult::CacheHit(duration) => { assert!(duration >= storage_delay); } _ => panic!("Unexpected compile result: {:?}", cached), } } #[test_case(true ; "with preprocessor cache")] #[test_case(false ; "without preprocessor cache")] fn test_compiler_get_cached_or_compile_force_recache(preprocessor_cache_mode: bool) { drop(env_logger::try_init()); let creator = new_creator(); let f = TestFixture::new(); let gcc = f.mk_bin("gcc").unwrap(); let runtime = single_threaded_runtime(); let pool = runtime.handle().clone(); let storage = DiskCache::new( f.tempdir.path().join("cache"), u64::MAX, &pool, PreprocessorCacheModeConfig { use_preprocessor_cache_mode: preprocessor_cache_mode, ..Default::default() }, CacheMode::ReadWrite, ); let storage = Arc::new(storage); let service = server::SccacheService::mock_with_storage(storage.clone(), pool.clone()); // Write a dummy input file so the preprocessor cache mode can work std::fs::write(f.tempdir.path().join("foo.c"), "whatever").unwrap(); // Pretend to be GCC. next_command( &creator, Ok(MockChild::new(exit_status(0), "compiler_id=gcc", "")), ); let c = get_compiler_info( creator.clone(), &gcc, f.tempdir.path(), &[], &[], &pool, None, ) .wait() .unwrap() .0; const COMPILER_STDOUT: &[u8] = b"compiler stdout"; const COMPILER_STDERR: &[u8] = b"compiler stderr"; // The compiler should be invoked twice, since we're forcing // recaching. let obj = f.tempdir.path().join("foo.o"); for _ in 0..2 { // The preprocessor invocation. next_command( &creator, Ok(MockChild::new(exit_status(0), "preprocessor output", "")), ); // The compiler invocation. let o = obj.clone(); next_command_calls(&creator, move |_| { // Pretend to compile something. let mut f = File::create(&o)?; f.write_all(b"file contents")?; Ok(MockChild::new( exit_status(0), COMPILER_STDOUT, COMPILER_STDERR, )) }); } let cwd = f.tempdir.path(); let arguments = ovec!["-c", "foo.c", "-o", "foo.o"]; let hasher = match c.parse_arguments(&arguments, ".".as_ref(), &[]) { CompilerArguments::Ok(h) => h, o => panic!("Bad result from parse_arguments: {:?}", o), }; let hasher2 = hasher.clone(); let (cached, res) = runtime .block_on(async { hasher .get_cached_or_compile( &service, None, creator.clone(), storage.clone(), arguments.clone(), cwd.to_path_buf(), vec![], CacheControl::Default, pool.clone(), ) .await }) .unwrap(); // Ensure that the object file was created. assert!(fs::metadata(&obj).map(|m| m.len() > 0).unwrap()); match cached { CompileResult::CacheMiss(MissType::Normal, DistType::NoDist, _, f) => { // wait on cache write future so we don't race with it! f.wait().unwrap(); } _ => panic!("Unexpected compile result: {:?}", cached), } assert_eq!(exit_status(0), res.status); assert_eq!(COMPILER_STDOUT, res.stdout.as_slice()); assert_eq!(COMPILER_STDERR, res.stderr.as_slice()); // Now compile again, but force recaching. fs::remove_file(&obj).unwrap(); let (cached, res) = hasher2 .get_cached_or_compile( &service, None, creator, storage, arguments, cwd.to_path_buf(), vec![], CacheControl::ForceRecache, pool, ) .wait() .unwrap(); // Ensure that the object file was created. assert!(fs::metadata(&obj).map(|m| m.len() > 0).unwrap()); match cached { CompileResult::CacheMiss(MissType::ForcedRecache, DistType::NoDist, _, f) => { // wait on cache write future so we don't race with it! f.wait().unwrap(); } _ => panic!("Unexpected compile result: {:?}", cached), } assert_eq!(exit_status(0), res.status); assert_eq!(COMPILER_STDOUT, res.stdout.as_slice()); assert_eq!(COMPILER_STDERR, res.stderr.as_slice()); } #[test_case(true ; "with preprocessor cache")] #[test_case(false ; "without preprocessor cache")] fn test_compiler_get_cached_or_compile_preprocessor_error(preprocessor_cache_mode: bool) { drop(env_logger::try_init()); let creator = new_creator(); let f = TestFixture::new(); let gcc = f.mk_bin("gcc").unwrap(); let runtime = single_threaded_runtime(); let pool = runtime.handle().clone(); let storage = DiskCache::new( f.tempdir.path().join("cache"), u64::MAX, &pool, PreprocessorCacheModeConfig { use_preprocessor_cache_mode: preprocessor_cache_mode, ..Default::default() }, CacheMode::ReadWrite, ); let storage = Arc::new(storage); let service = server::SccacheService::mock_with_storage(storage.clone(), pool.clone()); // Pretend to be GCC. Also inject a fake object file that the subsequent // preprocessor failure should remove. let obj = f.tempdir.path().join("foo.o"); // Write a dummy input file so the preprocessor cache mode can work std::fs::write(f.tempdir.path().join("foo.c"), "whatever").unwrap(); let o = obj.clone(); next_command_calls(&creator, move |_| { let mut f = File::create(&o)?; f.write_all(b"file contents")?; Ok(MockChild::new(exit_status(0), "compiler_id=gcc", "")) }); let c = get_compiler_info( creator.clone(), &gcc, f.tempdir.path(), &[], &[], &pool, None, ) .wait() .unwrap() .0; // We should now have a fake object file. assert!(fs::metadata(&obj).is_ok()); // The preprocessor invocation. const PREPROCESSOR_STDERR: &[u8] = b"something went wrong"; next_command( &creator, Ok(MockChild::new( exit_status(1), b"preprocessor output", PREPROCESSOR_STDERR, )), ); let cwd = f.tempdir.path(); let arguments = ovec!["-c", "foo.c", "-o", "foo.o"]; let hasher = match c.parse_arguments(&arguments, ".".as_ref(), &[]) { CompilerArguments::Ok(h) => h, o => panic!("Bad result from parse_arguments: {:?}", o), }; let (cached, res) = runtime .block_on(async { hasher .get_cached_or_compile( &service, None, creator, storage, arguments, cwd.to_path_buf(), vec![], CacheControl::Default, pool, ) .await }) .unwrap(); assert_eq!(cached, CompileResult::Error); assert_eq!(exit_status(1), res.status); // Shouldn't get anything on stdout, since that would just be preprocessor spew! assert_eq!(b"", res.stdout.as_slice()); assert_eq!(PREPROCESSOR_STDERR, res.stderr.as_slice()); // Errors in preprocessing should remove the object file. assert!(fs::metadata(&obj).is_err()); } #[test_case(true ; "with preprocessor cache")] #[test_case(false ; "without preprocessor cache")] #[cfg(feature = "dist-client")] fn test_compiler_get_cached_or_compile_dist_error(preprocessor_cache_mode: bool) { drop(env_logger::try_init()); let creator = new_creator(); let f = TestFixture::new(); let gcc = f.mk_bin("gcc").unwrap(); let runtime = Runtime::new().unwrap(); let pool = runtime.handle().clone(); let dist_clients = vec![ test_dist::ErrorPutToolchainClient::new(), test_dist::ErrorAllocJobClient::new(), test_dist::ErrorSubmitToolchainClient::new(), test_dist::ErrorRunJobClient::new(), ]; // Write a dummy input file so the preprocessor cache mode can work std::fs::write(f.tempdir.path().join("foo.c"), "whatever").unwrap(); let storage = DiskCache::new( f.tempdir.path().join("cache"), u64::MAX, &pool, PreprocessorCacheModeConfig { use_preprocessor_cache_mode: preprocessor_cache_mode, ..Default::default() }, CacheMode::ReadWrite, ); let storage = Arc::new(storage); // Pretend to be GCC. next_command( &creator, Ok(MockChild::new(exit_status(0), "compiler_id=gcc", "")), ); let c = get_compiler_info( creator.clone(), &gcc, f.tempdir.path(), &[], &[], &pool, None, ) .wait() .unwrap() .0; const COMPILER_STDOUT: &[u8] = b"compiler stdout"; const COMPILER_STDERR: &[u8] = b"compiler stderr"; // The compiler should be invoked twice, since we're forcing // recaching. let obj = f.tempdir.path().join("foo.o"); for _ in dist_clients.iter() { // The preprocessor invocation. next_command( &creator, Ok(MockChild::new(exit_status(0), "preprocessor output", "")), ); // The compiler invocation. let o = obj.clone(); next_command_calls(&creator, move |_| { // Pretend to compile something. let mut f = File::create(&o)?; f.write_all(b"file contents")?; Ok(MockChild::new( exit_status(0), COMPILER_STDOUT, COMPILER_STDERR, )) }); } let cwd = f.tempdir.path(); let arguments = ovec!["-c", "foo.c", "-o", "foo.o"]; let hasher = match c.parse_arguments(&arguments, ".".as_ref(), &[]) { CompilerArguments::Ok(h) => h, o => panic!("Bad result from parse_arguments: {:?}", o), }; // All these dist clients will fail, but should still result in successful compiles for dist_client in dist_clients { let service = server::SccacheService::mock_with_dist_client( dist_client.clone(), storage.clone(), pool.clone(), ); if obj.is_file() { fs::remove_file(&obj).unwrap(); } let hasher = hasher.clone(); let (cached, res) = hasher .get_cached_or_compile( &service, Some(dist_client.clone()), creator.clone(), storage.clone(), arguments.clone(), cwd.to_path_buf(), vec![], CacheControl::ForceRecache, pool.clone(), ) .wait() .expect("Does not error if storage put fails. qed"); // Ensure that the object file was created. assert!(fs::metadata(&obj).map(|m| m.len() > 0).unwrap()); match cached { CompileResult::CacheMiss(MissType::ForcedRecache, DistType::Error, _, f) => { // wait on cache write future so we don't race with it! f.wait().unwrap(); } _ => panic!("Unexpected compile result: {:?}", cached), } assert_eq!(exit_status(0), res.status); assert_eq!(COMPILER_STDOUT, res.stdout.as_slice()); assert_eq!(COMPILER_STDERR, res.stderr.as_slice()); } } } #[cfg(test)] #[cfg(feature = "dist-client")] mod test_dist { use crate::dist::pkg; use crate::dist::{ self, AllocJobResult, CompileCommand, JobAlloc, JobComplete, JobId, OutputData, PathTransformer, ProcessOutput, RunJobResult, SchedulerStatusResult, ServerId, SubmitToolchainResult, Toolchain, }; use async_trait::async_trait; use std::path::{Path, PathBuf}; use std::sync::{atomic::AtomicBool, Arc}; use crate::errors::*; pub struct ErrorPutToolchainClient; impl ErrorPutToolchainClient { #[allow(clippy::new_ret_no_self)] pub fn new() -> Arc { Arc::new(ErrorPutToolchainClient) } } #[async_trait] impl dist::Client for ErrorPutToolchainClient { async fn do_alloc_job(&self, _: Toolchain) -> Result { unreachable!() } async fn do_get_status(&self) -> Result { unreachable!() } async fn do_submit_toolchain( &self, _: JobAlloc, _: Toolchain, ) -> Result { unreachable!() } async fn do_run_job( &self, _: JobAlloc, _: CompileCommand, _: Vec, _: Box, ) -> Result<(RunJobResult, PathTransformer)> { unreachable!() } async fn put_toolchain( &self, _: PathBuf, _: String, _: Box, ) -> Result<(Toolchain, Option<(String, PathBuf)>)> { Err(anyhow!("MOCK: put toolchain failure")) } fn rewrite_includes_only(&self) -> bool { false } fn get_custom_toolchain(&self, _exe: &Path) -> Option { None } } pub struct ErrorAllocJobClient { tc: Toolchain, } impl ErrorAllocJobClient { #[allow(clippy::new_ret_no_self)] pub fn new() -> Arc { Arc::new(Self { tc: Toolchain { archive_id: "somearchiveid".to_owned(), }, }) } } #[async_trait] impl dist::Client for ErrorAllocJobClient { async fn do_alloc_job(&self, tc: Toolchain) -> Result { assert_eq!(self.tc, tc); Err(anyhow!("MOCK: alloc job failure")) } async fn do_get_status(&self) -> Result { unreachable!() } async fn do_submit_toolchain( &self, _: JobAlloc, _: Toolchain, ) -> Result { unreachable!() } async fn do_run_job( &self, _: JobAlloc, _: CompileCommand, _: Vec, _: Box, ) -> Result<(RunJobResult, PathTransformer)> { unreachable!() } async fn put_toolchain( &self, _: PathBuf, _: String, _: Box, ) -> Result<(Toolchain, Option<(String, PathBuf)>)> { Ok((self.tc.clone(), None)) } fn rewrite_includes_only(&self) -> bool { false } fn get_custom_toolchain(&self, _exe: &Path) -> Option { None } } pub struct ErrorSubmitToolchainClient { has_started: AtomicBool, tc: Toolchain, } impl ErrorSubmitToolchainClient { #[allow(clippy::new_ret_no_self)] pub fn new() -> Arc { Arc::new(Self { has_started: AtomicBool::default(), tc: Toolchain { archive_id: "somearchiveid".to_owned(), }, }) } } #[async_trait] impl dist::Client for ErrorSubmitToolchainClient { async fn do_alloc_job(&self, tc: Toolchain) -> Result { assert!(!self .has_started .swap(true, std::sync::atomic::Ordering::AcqRel)); assert_eq!(self.tc, tc); Ok(AllocJobResult::Success { job_alloc: JobAlloc { auth: "abcd".to_owned(), job_id: JobId(0), server_id: ServerId::new(([0, 0, 0, 0], 1).into()), }, need_toolchain: true, }) } async fn do_get_status(&self) -> Result { unreachable!("fn do_get_status is not used for this test. qed") } async fn do_submit_toolchain( &self, job_alloc: JobAlloc, tc: Toolchain, ) -> Result { assert_eq!(job_alloc.job_id, JobId(0)); assert_eq!(self.tc, tc); Err(anyhow!("MOCK: submit toolchain failure")) } async fn do_run_job( &self, _: JobAlloc, _: CompileCommand, _: Vec, _: Box, ) -> Result<(RunJobResult, PathTransformer)> { unreachable!("fn do_run_job is not used for this test. qed") } async fn put_toolchain( &self, _: PathBuf, _: String, _: Box, ) -> Result<(Toolchain, Option<(String, PathBuf)>)> { Ok((self.tc.clone(), None)) } fn rewrite_includes_only(&self) -> bool { false } fn get_custom_toolchain(&self, _exe: &Path) -> Option { None } } pub struct ErrorRunJobClient { has_started: AtomicBool, tc: Toolchain, } impl ErrorRunJobClient { #[allow(clippy::new_ret_no_self)] pub fn new() -> Arc { Arc::new(Self { has_started: AtomicBool::default(), tc: Toolchain { archive_id: "somearchiveid".to_owned(), }, }) } } #[async_trait] impl dist::Client for ErrorRunJobClient { async fn do_alloc_job(&self, tc: Toolchain) -> Result { assert!(!self .has_started .swap(true, std::sync::atomic::Ordering::AcqRel)); assert_eq!(self.tc, tc); Ok(AllocJobResult::Success { job_alloc: JobAlloc { auth: "abcd".to_owned(), job_id: JobId(0), server_id: ServerId::new(([0, 0, 0, 0], 1).into()), }, need_toolchain: true, }) } async fn do_get_status(&self) -> Result { unreachable!() } async fn do_submit_toolchain( &self, job_alloc: JobAlloc, tc: Toolchain, ) -> Result { assert_eq!(job_alloc.job_id, JobId(0)); assert_eq!(self.tc, tc); Ok(SubmitToolchainResult::Success) } async fn do_run_job( &self, job_alloc: JobAlloc, command: CompileCommand, _: Vec, _: Box, ) -> Result<(RunJobResult, PathTransformer)> { assert_eq!(job_alloc.job_id, JobId(0)); assert_eq!(command.executable, "/overridden/compiler"); Err(anyhow!("MOCK: run job failure")) } async fn put_toolchain( &self, _: PathBuf, _: String, _: Box, ) -> Result<(Toolchain, Option<(String, PathBuf)>)> { Ok(( self.tc.clone(), Some(( "/overridden/compiler".to_owned(), PathBuf::from("somearchiveid"), )), )) } fn rewrite_includes_only(&self) -> bool { false } fn get_custom_toolchain(&self, _exe: &Path) -> Option { None } } pub struct OneshotClient { has_started: AtomicBool, tc: Toolchain, output: ProcessOutput, } impl OneshotClient { #[allow(clippy::new_ret_no_self)] pub fn new(code: i32, stdout: Vec, stderr: Vec) -> Arc { Arc::new(Self { has_started: AtomicBool::default(), tc: Toolchain { archive_id: "somearchiveid".to_owned(), }, output: ProcessOutput::fake_output(code, stdout, stderr), }) } } #[async_trait] impl dist::Client for OneshotClient { async fn do_alloc_job(&self, tc: Toolchain) -> Result { assert!(!self .has_started .swap(true, std::sync::atomic::Ordering::AcqRel)); assert_eq!(self.tc, tc); Ok(AllocJobResult::Success { job_alloc: JobAlloc { auth: "abcd".to_owned(), job_id: JobId(0), server_id: ServerId::new(([0, 0, 0, 0], 1).into()), }, need_toolchain: true, }) } async fn do_get_status(&self) -> Result { unreachable!("fn do_get_status is not used for this test. qed") } async fn do_submit_toolchain( &self, job_alloc: JobAlloc, tc: Toolchain, ) -> Result { assert_eq!(job_alloc.job_id, JobId(0)); assert_eq!(self.tc, tc); Ok(SubmitToolchainResult::Success) } async fn do_run_job( &self, job_alloc: JobAlloc, command: CompileCommand, outputs: Vec, inputs_packager: Box, ) -> Result<(RunJobResult, PathTransformer)> { assert_eq!(job_alloc.job_id, JobId(0)); assert_eq!(command.executable, "/overridden/compiler"); let mut inputs = vec![]; let path_transformer = inputs_packager.write_inputs(&mut inputs).unwrap(); let outputs = outputs .into_iter() .map(|name| { let data = format!("some data in {}", name); let data = OutputData::try_from_reader(data.as_bytes()).unwrap(); (name, data) }) .collect(); let result = RunJobResult::Complete(JobComplete { output: self.output.clone(), outputs, }); Ok((result, path_transformer)) } async fn put_toolchain( &self, _: PathBuf, _: String, _: Box, ) -> Result<(Toolchain, Option<(String, PathBuf)>)> { Ok(( self.tc.clone(), Some(( "/overridden/compiler".to_owned(), PathBuf::from("somearchiveid"), )), )) } fn rewrite_includes_only(&self) -> bool { false } fn get_custom_toolchain(&self, _exe: &Path) -> Option { None } } } mozilla-sccache-40c3d6b/src/compiler/counted_array.rs000066400000000000000000000014661475712407500230100ustar00rootroot00000000000000/// Helper macro to create fixed-length arrays without specifying a fixed size #[macro_export] macro_rules! counted_array { ($v:vis static $name:ident : [ $t:ty ; _ ] = [$($value:expr),* $(,)?] ) => { $v static $name : [ $t; counted_array!(@count $($value,)*) ] = [ $( $value ),* ]; }; // The best way to count variadic args // according to (@count ) => { 0usize }; (@count $($arg:expr,)*) => { <[()]>::len(&[ $( counted_array!( @nil $arg ), )*]) }; (@nil $orig:expr) => { () }; } #[cfg(test)] mod test { #[test] fn counted_array_macro() { counted_array!(static ARR_QUAD: [u8;_] = [1,2,3,4,]); assert_eq!(ARR_QUAD.len(), 4); } } mozilla-sccache-40c3d6b/src/compiler/cudafe.rs000066400000000000000000000136461475712407500214030ustar00rootroot00000000000000// Copyright 2016 Mozilla Foundation // SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #![allow(unused_imports, dead_code, unused_variables)] use crate::compiler::args::*; use crate::compiler::c::{ArtifactDescriptor, CCompilerImpl, CCompilerKind, ParsedArguments}; use crate::compiler::cicc; use crate::compiler::{ CCompileCommand, Cacheable, ColorMode, CompileCommand, CompilerArguments, Language, SingleCompileCommand, }; use crate::{counted_array, dist}; use crate::mock_command::{CommandCreator, CommandCreatorSync, RunCommand}; use async_trait::async_trait; use std::collections::HashMap; use std::ffi::OsString; use std::fs; use std::path::{Path, PathBuf}; use std::process; use crate::errors::*; /// A unit struct on which to implement `CCompilerImpl`. #[derive(Clone, Debug)] pub struct CudaFE { pub version: Option, } #[async_trait] impl CCompilerImpl for CudaFE { fn kind(&self) -> CCompilerKind { CCompilerKind::CudaFE } fn plusplus(&self) -> bool { true } fn version(&self) -> Option { self.version.clone() } fn parse_arguments( &self, arguments: &[OsString], cwd: &Path, _env_vars: &[(OsString, OsString)], ) -> CompilerArguments { cicc::parse_arguments(arguments, cwd, Language::CudaFE, &ARGS[..], 1) } #[allow(clippy::too_many_arguments)] async fn preprocess( &self, _creator: &T, _executable: &Path, parsed_args: &ParsedArguments, cwd: &Path, _env_vars: &[(OsString, OsString)], _may_dist: bool, _rewrite_includes_only: bool, _preprocessor_cache_mode: bool, ) -> Result where T: CommandCreatorSync, { cicc::preprocess(cwd, parsed_args).await } fn generate_compile_commands( &self, path_transformer: &mut dist::PathTransformer, executable: &Path, parsed_args: &ParsedArguments, cwd: &Path, env_vars: &[(OsString, OsString)], _rewrite_includes_only: bool, ) -> Result<( Box>, Option, Cacheable, )> where T: CommandCreatorSync, { generate_compile_commands(path_transformer, executable, parsed_args, cwd, env_vars).map( |(command, dist_command, cacheable)| { (CCompileCommand::new(command), dist_command, cacheable) }, ) } } pub fn generate_compile_commands( path_transformer: &mut dist::PathTransformer, executable: &Path, parsed_args: &ParsedArguments, cwd: &Path, env_vars: &[(OsString, OsString)], ) -> Result<( SingleCompileCommand, Option, Cacheable, )> { // Unused arguments #[cfg(not(feature = "dist-client"))] { let _ = path_transformer; } let lang_str = &parsed_args.language.as_str(); let out_file = match parsed_args.outputs.get("obj") { Some(obj) => &obj.path, None => return Err(anyhow!("Missing {:?} file output", lang_str)), }; let mut arguments: Vec = vec![]; arguments.extend_from_slice(&parsed_args.common_args); arguments.extend_from_slice(&parsed_args.unhashed_args); arguments.extend(vec![ "--module_id_file_name".into(), out_file.into(), (&parsed_args.input).into(), ]); if log_enabled!(log::Level::Trace) { trace!( "[{}]: {} command: {:?}", out_file.file_name().unwrap().to_string_lossy(), executable.file_name().unwrap().to_string_lossy(), [ &[format!("cd {} &&", cwd.to_string_lossy()).to_string()], &[executable.to_str().unwrap_or_default().to_string()][..], &dist::osstrings_to_strings(&arguments).unwrap_or_default()[..] ] .concat() .join(" ") ); } let command = SingleCompileCommand { executable: executable.to_owned(), arguments, env_vars: env_vars.to_owned(), cwd: cwd.to_owned(), }; #[cfg(not(feature = "dist-client"))] let dist_command = None; #[cfg(feature = "dist-client")] let dist_command = (|| { let mut arguments: Vec = vec![]; arguments.extend(dist::osstrings_to_strings(&parsed_args.common_args)?); arguments.extend(dist::osstrings_to_strings(&parsed_args.unhashed_args)?); arguments.extend(vec![ "--module_id_file_name".into(), path_transformer.as_dist(out_file)?, path_transformer.as_dist(&parsed_args.input)?, ]); Some(dist::CompileCommand { executable: path_transformer.as_dist(executable.canonicalize().unwrap().as_path())?, arguments, env_vars: dist::osstring_tuples_to_strings(env_vars)?, cwd: path_transformer.as_dist_abs(cwd)?, }) })(); Ok((command, dist_command, Cacheable::Yes)) } use cicc::ArgData::*; counted_array!(pub static ARGS: [ArgInfo; _] = [ take_arg!("--gen_c_file_name", PathBuf, Separated, UnhashedOutput), flag!("--gen_module_id_file", GenModuleIdFileFlag), take_arg!("--module_id_file_name", PathBuf, Separated, Output), take_arg!("--stub_file_name", OsString, Separated, UnhashedPassThrough), ]); mozilla-sccache-40c3d6b/src/compiler/diab.rs000066400000000000000000000640351475712407500210510ustar00rootroot00000000000000// Copyright 2018 Mozilla Foundation // Copyright 2018 Felix Obenhuber // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use crate::compiler::args::{ ArgDisposition, ArgInfo, ArgToStringResult, ArgsIter, Argument, FromArg, IntoArg, NormalizedDisposition, PathTransformerFn, SearchableArgInfo, }; use crate::compiler::c::{ArtifactDescriptor, CCompilerImpl, CCompilerKind, ParsedArguments}; use crate::compiler::{ CCompileCommand, Cacheable, ColorMode, CompileCommand, CompilerArguments, Language, SingleCompileCommand, }; use crate::errors::*; use crate::mock_command::{CommandCreatorSync, RunCommand}; use crate::util::{run_input_output, OsStrExt}; use crate::{counted_array, dist}; use async_trait::async_trait; use fs::File; use fs_err as fs; use log::Level::Trace; use std::collections::HashMap; use std::ffi::OsString; use std::io::Read; use std::path::{Path, PathBuf}; use std::process; #[derive(Clone, Debug)] pub struct Diab { pub version: Option, } #[async_trait] impl CCompilerImpl for Diab { fn kind(&self) -> CCompilerKind { CCompilerKind::Diab } fn plusplus(&self) -> bool { false } fn version(&self) -> Option { self.version.clone() } fn parse_arguments( &self, arguments: &[OsString], cwd: &Path, _env_vars: &[(OsString, OsString)], ) -> CompilerArguments { parse_arguments(arguments, cwd, &ARGS[..]) } #[allow(clippy::too_many_arguments)] async fn preprocess( &self, creator: &T, executable: &Path, parsed_args: &ParsedArguments, cwd: &Path, env_vars: &[(OsString, OsString)], may_dist: bool, _rewrite_includes_only: bool, _preprocessor_cache_mode: bool, ) -> Result where T: CommandCreatorSync, { preprocess(creator, executable, parsed_args, cwd, env_vars, may_dist).await } fn generate_compile_commands( &self, path_transformer: &mut dist::PathTransformer, executable: &Path, parsed_args: &ParsedArguments, cwd: &Path, env_vars: &[(OsString, OsString)], _rewrite_includes_only: bool, ) -> Result<( Box>, Option, Cacheable, )> where T: CommandCreatorSync, { generate_compile_commands(path_transformer, executable, parsed_args, cwd, env_vars).map( |(command, dist_command, cacheable)| { (CCompileCommand::new(command), dist_command, cacheable) }, ) } } ArgData! { pub DoCompilation, Output(PathBuf), PassThrough(OsString), PreprocessorArgument(OsString), PreprocessorArgumentPath(PathBuf), DepArgumentFlag, DepArgument(OsString), DepArgumentPath(PathBuf), TooHardFlag, TooHard(OsString), } use self::ArgData::*; counted_array!(pub static ARGS: [ArgInfo; _] = [ flag!("-", TooHardFlag), flag!("-##", TooHardFlag), flag!("-###", TooHardFlag), take_arg!("-@", OsString, Concatenated, TooHard), take_arg!("-D", OsString, CanBeSeparated, PreprocessorArgument), flag!("-E", TooHardFlag), take_arg!("-I", PathBuf, CanBeSeparated, PreprocessorArgumentPath), take_arg!("-L", OsString, Separated, PassThrough), flag!("-P", TooHardFlag), flag!("-S", TooHardFlag), take_arg!("-U", OsString, CanBeSeparated, PreprocessorArgument), flag!("-V", TooHardFlag), flag!("-VV", TooHardFlag), take_arg!("-W", OsString, Separated, PassThrough), flag!("-Xmake-dependency", DepArgumentFlag), flag!( "-Xmake-dependency-canonicalize-path-off", DepArgumentFlag ), take_arg!( "-Xmake-dependency-savefile", PathBuf, Concatenated('='), DepArgumentPath ), take_arg!( "-Xmake-dependency-target", OsString, Concatenated('='), DepArgument ), flag!("-c", DoCompilation), take_arg!( "-include", PathBuf, CanBeSeparated, PreprocessorArgumentPath ), take_arg!("-l", OsString, Separated, PassThrough), take_arg!("-o", PathBuf, Separated, Output), take_arg!("-t", OsString, Separated, PassThrough), ]); /// Parse `arguments`, determining whether it is supported. /// /// If any of the entries in `arguments` result in a compilation that /// cannot be cached, return `CompilerArguments::CannotCache`. /// If the commandline described by `arguments` is not compilation, /// return `CompilerArguments::NotCompilation`. /// Otherwise, return `CompilerArguments::Ok(ParsedArguments)`, with /// the `ParsedArguments` struct containing information parsed from /// `arguments`. pub fn parse_arguments( arguments: &[OsString], cwd: &Path, arg_info: S, ) -> CompilerArguments where S: SearchableArgInfo, { let mut common_args = vec![]; let mut compilation = false; let mut compilation_flag = OsString::new(); let mut input_arg = None; let mut multiple_input = false; let mut output_arg = None; let mut preprocessor_args = vec![]; let mut dependency_args = vec![]; // Custom iterator to expand `@` arguments which stand for reading a file // and interpreting it as a list of more arguments. let it = ExpandAtArgs::new(cwd, arguments); for arg in ArgsIter::new(it, arg_info) { let arg = try_or_cannot_cache!(arg, "argument parse"); // Check if the value part of this argument begins with '@'. If so, we either // failed to expand it, or it was a concatenated argument - either way, bail. // We refuse to cache concatenated arguments (like "-include@foo") because they're a // mess. See https://github.com/mozilla/sccache/issues/150#issuecomment-318586953 match arg { Argument::WithValue(_, ref v, ArgDisposition::Separated) | Argument::WithValue(_, ref v, ArgDisposition::CanBeConcatenated(_)) | Argument::WithValue(_, ref v, ArgDisposition::CanBeSeparated(_)) => { if v.clone().into_arg_os_string().starts_with("@") { cannot_cache!("@"); } } // Empirically, concatenated arguments appear not to interpret '@' as // an include directive, so just continue. Argument::WithValue(_, _, ArgDisposition::Concatenated(_)) | Argument::Raw(_) | Argument::UnknownFlag(_) | Argument::Flag(_, _) => {} } match arg.get_data() { Some(TooHardFlag) | Some(TooHard(_)) => { cannot_cache!(arg.flag_str().expect("Can't be Argument::Raw/UnknownFlag",)) } Some(DepArgument(_)) | Some(DepArgumentFlag) | Some(DepArgumentPath(_)) => {} Some(DoCompilation) => { compilation = true; compilation_flag = OsString::from(arg.flag_str().expect("Compilation flag expected")); } Some(Output(p)) => output_arg = Some(p.clone()), Some(PreprocessorArgument(_)) | Some(PreprocessorArgumentPath(_)) | Some(PassThrough(_)) => {} None => match arg { Argument::Raw(ref val) => { if input_arg.is_some() { multiple_input = true; } input_arg = Some(val.clone()); } Argument::UnknownFlag(_) => {} _ => unreachable!(), }, } let args = match arg.get_data() { Some(PassThrough(_)) => &mut common_args, Some(DepArgument(_)) | Some(DepArgumentFlag) | Some(DepArgumentPath(_)) => { &mut dependency_args } Some(PreprocessorArgument(_)) | Some(PreprocessorArgumentPath(_)) => { &mut preprocessor_args } Some(DoCompilation) | Some(Output(_)) => continue, Some(TooHardFlag) | Some(TooHard(_)) => unreachable!(), None => match arg { Argument::Raw(_) => continue, Argument::UnknownFlag(_) => &mut common_args, _ => unreachable!(), }, }; // Normalize attributes such as "-I foo", "-D FOO=bar", as // "-Ifoo", "-DFOO=bar", etc. and "-includefoo", "idirafterbar" as // "-include foo", "-idirafter bar", etc. let norm = match arg.flag_str() { Some(s) if s.len() == 2 => NormalizedDisposition::Concatenated, _ => NormalizedDisposition::Separated, }; args.extend(arg.normalize(norm).iter_os_strings()); } // We only support compilation. if !compilation { return CompilerArguments::NotCompilation; } // Can't cache compilations with multiple inputs. if multiple_input { cannot_cache!("multiple input files"); } let input = match input_arg { Some(i) => i, // We can't cache compilation without an input. None => cannot_cache!("no input file"), }; let language = match Language::from_file_name(Path::new(&input)) { Some(l) => l, None => cannot_cache!("unknown source language"), }; let output = output_arg .map(PathBuf::from) .unwrap_or_else(|| Path::new(&input).with_extension("o")); let mut outputs = HashMap::new(); outputs.insert( "obj", ArtifactDescriptor { path: output, optional: false, }, ); CompilerArguments::Ok(ParsedArguments { input: input.into(), double_dash_input: false, language, compilation_flag, depfile: None, outputs, dependency_args, preprocessor_args, common_args, arch_args: vec![], unhashed_args: vec![], extra_dist_files: vec![], extra_hash_files: vec![], msvc_show_includes: false, profile_generate: false, // FIXME: Implement me. color_mode: ColorMode::Auto, suppress_rewrite_includes_only: false, too_hard_for_preprocessor_cache_mode: None, }) } pub async fn preprocess( creator: &T, executable: &Path, parsed_args: &ParsedArguments, cwd: &Path, env_vars: &[(OsString, OsString)], _may_dist: bool, ) -> Result where T: CommandCreatorSync, { let mut cmd = creator.clone().new_command_sync(executable); cmd.arg("-E") .arg(&parsed_args.input) .args(&parsed_args.dependency_args) .args(&parsed_args.preprocessor_args) .args(&parsed_args.common_args) .env_clear() .envs(env_vars.to_vec()) .current_dir(cwd); if log_enabled!(Trace) { trace!("preprocess: {:?}", cmd); } run_input_output(cmd, None).await } pub fn generate_compile_commands( _path_transformer: &mut dist::PathTransformer, executable: &Path, parsed_args: &ParsedArguments, cwd: &Path, env_vars: &[(OsString, OsString)], ) -> Result<( SingleCompileCommand, Option, Cacheable, )> { trace!("compile"); let out_file = match parsed_args.outputs.get("obj") { Some(obj) => &obj.path, None => return Err(anyhow!("Missing object file output")), }; let mut arguments: Vec = vec![ parsed_args.compilation_flag.clone(), parsed_args.input.clone().into(), "-o".into(), out_file.into(), ]; arguments.extend_from_slice(&parsed_args.preprocessor_args); arguments.extend_from_slice(&parsed_args.unhashed_args); arguments.extend_from_slice(&parsed_args.common_args); let command = SingleCompileCommand { executable: executable.to_owned(), arguments, env_vars: env_vars.to_owned(), cwd: cwd.to_owned(), }; Ok((command, None, Cacheable::Yes)) } pub struct ExpandAtArgs<'a> { cwd: &'a Path, stack: Vec, } impl<'a> ExpandAtArgs<'a> { pub fn new(cwd: &'a Path, args: &[OsString]) -> Self { ExpandAtArgs { stack: args.iter().rev().map(|a| a.to_owned()).collect(), cwd, } } } impl<'a> Iterator for ExpandAtArgs<'a> { type Item = OsString; fn next(&mut self) -> Option { loop { let arg = self.stack.pop()?; // Just return non @ arguments if !arg.starts_with("-@") { return Some(arg); } let value = match arg.split_prefix("-@") { Some(arg) => arg, None => return Some(arg), }; // Return options that produce additional output and are not cacheable if value.starts_with("E") || value.starts_with("O") || value.starts_with("@") { return Some(arg); } // According to diab [1], @file means: // Read command line options from either a file or an environment // variable. When -@name is encountered on the command line, the // driver first looks for an environment variable with the given // name and substitutes its value. If an environment variable is // not found then the driver tries to open a file with given name // and substitutes the contents of the file. If neither an // environment variable or a file can be found, an error message // is issued and the driver terminates. // // [1]: http://www.vxdev.com/docs/vx55man/diab5.0ppc/c-invoke.htm#3000619 // // The environment variable feature is *not* supported by sccache // since this would raise the need for the clients environment // and not just env::var. This is technically possible, but // considered as a unneeded edge case for now. let mut contents = String::new(); let file = self.cwd.join(&value); let res = File::open(file).and_then(|mut f| f.read_to_string(&mut contents)); if res.is_err() { // Failed to read the file, so return the argument as it is. // This will result in a CannotCache. return Some(arg); } if contents.contains('"') || contents.contains('\'') { return Some(arg); } let new_args = contents.split_whitespace().collect::>(); self.stack.extend(new_args.iter().rev().map(|s| s.into())); } } } #[cfg(test)] mod test { use super::{ dist, fs, generate_compile_commands, parse_arguments, Language, OsString, ParsedArguments, ARGS, }; use crate::compiler::c::ArtifactDescriptor; use crate::compiler::*; use crate::mock_command::*; use crate::server; use crate::test::mock_storage::MockStorage; use crate::test::utils::*; use fs::File; use std::io::Write; fn parse_arguments_(arguments: Vec) -> CompilerArguments { let args = arguments.iter().map(OsString::from).collect::>(); parse_arguments(&args, ".".as_ref(), &ARGS[..]) } #[test] fn test_parse_arguments_simple() { let args = stringvec!["-c", "foo.c", "-o", "foo.o"]; let ParsedArguments { input, language, outputs, preprocessor_args, msvc_show_includes, common_args, .. } = match parse_arguments_(args) { CompilerArguments::Ok(args) => args, o => panic!("Got unexpected parse result: {:?}", o), }; assert_eq!(Some("foo.c"), input.to_str()); assert_eq!(Language::C, language); assert_map_contains!( outputs, ( "obj", ArtifactDescriptor { path: "foo.o".into(), optional: false } ) ); assert!(preprocessor_args.is_empty()); assert!(common_args.is_empty()); assert!(!msvc_show_includes); } #[test] fn test_parse_arguments_default_name() { let args = stringvec!["-c", "foo.c"]; let ParsedArguments { input, language, outputs, preprocessor_args, msvc_show_includes, common_args, .. } = match parse_arguments_(args) { CompilerArguments::Ok(args) => args, o => panic!("Got unexpected parse result: {:?}", o), }; assert_eq!(Some("foo.c"), input.to_str()); assert_eq!(Language::C, language); assert_map_contains!( outputs, ( "obj", ArtifactDescriptor { path: "foo.o".into(), optional: false } ) ); assert!(preprocessor_args.is_empty()); assert!(common_args.is_empty()); assert!(!msvc_show_includes); } #[test] fn test_parse_arguments_extra() { let args = stringvec!["-c", "foo.cc", "-fabc", "-o", "foo.o", "-mxyz"]; let ParsedArguments { input, language, outputs, preprocessor_args, msvc_show_includes, common_args, .. } = match parse_arguments_(args) { CompilerArguments::Ok(args) => args, o => panic!("Got unexpected parse result: {:?}", o), }; assert_eq!(Some("foo.cc"), input.to_str()); assert_eq!(Language::Cxx, language); assert_map_contains!( outputs, ( "obj", ArtifactDescriptor { path: "foo.o".into(), optional: false } ) ); assert!(preprocessor_args.is_empty()); assert_eq!(ovec!["-fabc", "-mxyz"], common_args); assert!(!msvc_show_includes); } #[test] fn test_parse_arguments_values() { let args = stringvec![ "-c", "foo.cxx", "-fabc", "-I", "include", "-o", "foo.o", "-include", "file" ]; let ParsedArguments { input, language, outputs, preprocessor_args, msvc_show_includes, common_args, .. } = match parse_arguments_(args) { CompilerArguments::Ok(args) => args, o => panic!("Got unexpected parse result: {:?}", o), }; assert_eq!(Some("foo.cxx"), input.to_str()); assert_eq!(Language::Cxx, language); assert_map_contains!( outputs, ( "obj", ArtifactDescriptor { path: "foo.o".into(), optional: false } ) ); assert_eq!(ovec!["-Iinclude", "-include", "file"], preprocessor_args); assert_eq!(ovec!["-fabc"], common_args); assert!(!msvc_show_includes); } #[test] fn test_parse_arguments_preprocessor_args() { let args = stringvec![ "-c", "foo.c", "-fabc", "-Xmake-dependency", "-Xmake-dependency-canonicalize-path-off", "-Xmake-dependency-savefile=bar", "-Xmake-dependency-target=foo", "-o", "foo.o" ]; let ParsedArguments { input, language, outputs, dependency_args, msvc_show_includes, common_args, .. } = match parse_arguments_(args) { CompilerArguments::Ok(args) => args, o => panic!("Got unexpected parse result: {:?}", o), }; assert_eq!(Some("foo.c"), input.to_str()); assert_eq!(Language::C, language); assert_map_contains!( outputs, ( "obj", ArtifactDescriptor { path: "foo.o".into(), optional: false } ) ); assert_eq!( ovec![ "-Xmake-dependency", "-Xmake-dependency-canonicalize-path-off", "-Xmake-dependency-savefile=bar", "-Xmake-dependency-target=foo" ], dependency_args ); assert_eq!(ovec!["-fabc"], common_args); assert!(!msvc_show_includes); } #[test] fn test_parse_arguments_empty_args() { assert_eq!(CompilerArguments::NotCompilation, parse_arguments_(vec![])); } #[test] fn test_parse_arguments_not_compile() { assert_eq!( CompilerArguments::NotCompilation, parse_arguments_(stringvec!["-o", "foo"]) ); } #[test] fn test_parse_arguments_too_many_inputs() { assert_eq!( CompilerArguments::CannotCache("multiple input files", None), parse_arguments_(stringvec!["-c", "foo.c", "-o", "foo.o", "bar.c"]) ); } #[test] fn test_parse_arguments_link() { assert_eq!( CompilerArguments::NotCompilation, parse_arguments_(stringvec!["-shared", "foo.o", "-o", "foo.so", "bar.o"]) ); } #[test] fn test_parse_dry_run() { assert_eq!( CompilerArguments::CannotCache("-##", None), parse_arguments_(stringvec!["-##", "-c", "foo.c"]) ); assert_eq!( CompilerArguments::CannotCache("-###", None), parse_arguments_(stringvec!["-###", "-c", "foo.c"]) ); } #[test] fn test_at_signs() { let cannot_cache = CompilerArguments::CannotCache("-@", None); assert_eq!(parse_arguments_(vec!["-@@foo".into()]), cannot_cache); assert_eq!(parse_arguments_(vec!["-@E=foo".into()]), cannot_cache); assert_eq!(parse_arguments_(vec!["-@E+foo".into()]), cannot_cache); assert_eq!(parse_arguments_(vec!["-@O=foo".into()]), cannot_cache); assert_eq!(parse_arguments_(vec!["-@O+foo".into()]), cannot_cache); } #[test] fn test_at_signs_file_not_readable() { let td = tempfile::Builder::new() .prefix("sccache") .tempdir() .unwrap(); let arg = format!("-@{}", td.path().join("foo").display()); // File foo doesn't exist. assert_eq!( parse_arguments_(vec![arg]), CompilerArguments::CannotCache("-@", None) ); } #[test] fn test_at_signs_file() { let td = tempfile::Builder::new() .prefix("sccache") .tempdir() .unwrap(); File::create(td.path().join("foo")) .unwrap() .write_all(b"-c foo.c -o foo.o") .unwrap(); let arg = format!("-@{}", td.path().join("foo").display()); let ParsedArguments { input, language, outputs, preprocessor_args, msvc_show_includes, common_args, .. } = match parse_arguments_(vec![arg]) { CompilerArguments::Ok(args) => args, o => panic!("Got unexpected parse result: {:?}", o), }; assert_eq!(Some("foo.c"), input.to_str()); assert_eq!(Language::C, language); assert_map_contains!( outputs, ( "obj", ArtifactDescriptor { path: "foo.o".into(), optional: false } ) ); assert!(preprocessor_args.is_empty()); assert!(common_args.is_empty()); assert!(!msvc_show_includes); } #[test] fn test_compile_simple() { let creator = new_creator(); let f = TestFixture::new(); let parsed_args = ParsedArguments { input: "foo.c".into(), double_dash_input: false, language: Language::C, compilation_flag: "-c".into(), depfile: None, outputs: vec![( "obj", ArtifactDescriptor { path: "foo.o".into(), optional: false, }, )] .into_iter() .collect(), dependency_args: vec![], preprocessor_args: vec![], common_args: vec![], arch_args: vec![], unhashed_args: vec![], extra_dist_files: vec![], extra_hash_files: vec![], msvc_show_includes: false, profile_generate: false, color_mode: ColorMode::Auto, suppress_rewrite_includes_only: false, too_hard_for_preprocessor_cache_mode: None, }; let runtime = single_threaded_runtime(); let storage = MockStorage::new(None, false); let storage: std::sync::Arc = std::sync::Arc::new(storage); let service = server::SccacheService::mock_with_storage(storage, runtime.handle().clone()); let compiler = &f.bins[0]; // Compiler invocation. next_command(&creator, Ok(MockChild::new(exit_status(0), "", ""))); let mut path_transformer = dist::PathTransformer::new(); let (command, _, cacheable) = generate_compile_commands( &mut path_transformer, compiler, &parsed_args, f.tempdir.path(), &[], ) .unwrap(); let _ = command.execute(&service, &creator).wait(); assert_eq!(Cacheable::Yes, cacheable); // Ensure that we ran all processes. assert_eq!(0, creator.lock().unwrap().children.len()); } } mozilla-sccache-40c3d6b/src/compiler/gcc.rs000066400000000000000000002454661475712407500207170ustar00rootroot00000000000000// Copyright 2016 Mozilla Foundation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use crate::compiler::args::*; use crate::compiler::c::{ArtifactDescriptor, CCompilerImpl, CCompilerKind, ParsedArguments}; use crate::compiler::{ clang, CCompileCommand, Cacheable, ColorMode, CompileCommand, CompilerArguments, Language, SingleCompileCommand, }; use crate::mock_command::{CommandCreatorSync, RunCommand}; use crate::util::{run_input_output, OsStrExt}; use crate::{counted_array, dist}; use async_trait::async_trait; use fs::File; use fs_err as fs; use log::Level::Trace; use std::collections::HashMap; use std::env; use std::ffi::{OsStr, OsString}; use std::io::Read; use std::path::{Path, PathBuf}; use std::process; use crate::errors::*; /// A struct on which to implement `CCompilerImpl`. #[derive(Clone, Debug)] pub struct Gcc { pub gplusplus: bool, pub version: Option, } #[async_trait] impl CCompilerImpl for Gcc { fn kind(&self) -> CCompilerKind { CCompilerKind::Gcc } fn plusplus(&self) -> bool { self.gplusplus } fn version(&self) -> Option { self.version.clone() } fn parse_arguments( &self, arguments: &[OsString], cwd: &Path, _env_vars: &[(OsString, OsString)], ) -> CompilerArguments { parse_arguments(arguments, cwd, &ARGS[..], self.gplusplus, self.kind()) } #[allow(clippy::too_many_arguments)] async fn preprocess( &self, creator: &T, executable: &Path, parsed_args: &ParsedArguments, cwd: &Path, env_vars: &[(OsString, OsString)], may_dist: bool, rewrite_includes_only: bool, preprocessor_cache_mode: bool, ) -> Result where T: CommandCreatorSync, { let ignorable_whitespace_flags = if preprocessor_cache_mode { vec![] } else { vec!["-P".to_string()] }; preprocess( creator, executable, parsed_args, cwd, env_vars, may_dist, self.kind(), rewrite_includes_only, ignorable_whitespace_flags, language_to_gcc_arg, ) .await } fn generate_compile_commands( &self, path_transformer: &mut dist::PathTransformer, executable: &Path, parsed_args: &ParsedArguments, cwd: &Path, env_vars: &[(OsString, OsString)], rewrite_includes_only: bool, ) -> Result<( Box>, Option, Cacheable, )> where T: CommandCreatorSync, { generate_compile_commands( path_transformer, executable, parsed_args, cwd, env_vars, self.kind(), rewrite_includes_only, language_to_gcc_arg, ) .map(|(command, dist_command, cacheable)| { (CCompileCommand::new(command), dist_command, cacheable) }) } } ArgData! { pub TooHardFlag, TooHard(OsString), DiagnosticsColor(OsString), DiagnosticsColorFlag, NoDiagnosticsColorFlag, // Should only be necessary for -Xclang flags - unknown flags not hidden behind // that are assumed to not affect compilation PassThroughFlag, PassThrough(OsString), PassThroughPath(PathBuf), PreprocessorArgumentFlag, PreprocessorArgument(OsString), PreprocessorArgumentPath(PathBuf), // Used for arguments that shouldn't affect the computed hash UnhashedFlag, Unhashed(OsString), DoCompilation, Output(PathBuf), NeedDepTarget, // Though you might think this should be a path as it's a Makefile target, // it's not treated as a path by the compiler - it's just written wholesale // (including any funny make syntax) into the dep file. DepTarget(OsString), DepArgumentPath(PathBuf), Language(OsString), SplitDwarf, ProfileGenerate, ClangProfileUse(PathBuf), TestCoverage, Coverage, ExtraHashFile(PathBuf), // Only valid for clang, but this needs to be here since clang shares gcc's arg parsing. XClang(OsString), Arch(OsString), PedanticFlag, Standard(OsString), SerializeDiagnostics(PathBuf), } use self::ArgData::*; const ARCH_FLAG: &str = "-arch"; // Mostly taken from https://github.com/ccache/ccache/blob/master/src/compopt.cpp#L52-L172 counted_array!(pub static ARGS: [ArgInfo; _] = [ flag!("-", TooHardFlag), flag!("--coverage", Coverage), take_arg!("--param", OsString, Separated, PassThrough), flag!("--save-temps", TooHardFlag), take_arg!("--serialize-diagnostics", PathBuf, Separated, SerializeDiagnostics), take_arg!("--sysroot", PathBuf, Separated, PassThroughPath), take_arg!("-A", OsString, Separated, PassThrough), take_arg!("-B", PathBuf, CanBeSeparated, PassThroughPath), take_arg!("-D", OsString, CanBeSeparated, PassThrough), flag!("-E", TooHardFlag), take_arg!("-F", PathBuf, CanBeSeparated, PreprocessorArgumentPath), take_arg!("-G", OsString, Separated, PassThrough), take_arg!("-I", PathBuf, CanBeSeparated, PreprocessorArgumentPath), take_arg!("-L", OsString, Separated, PassThrough), flag!("-M", TooHardFlag), flag!("-MD", NeedDepTarget), take_arg!("-MF", PathBuf, Separated, DepArgumentPath), flag!("-MM", TooHardFlag), flag!("-MMD", NeedDepTarget), flag!("-MP", NeedDepTarget), take_arg!("-MQ", OsString, Separated, DepTarget), take_arg!("-MT", OsString, Separated, DepTarget), flag!("-P", TooHardFlag), take_arg!("-U", OsString, CanBeSeparated, PassThrough), take_arg!("-V", OsString, Separated, PassThrough), flag!("-Werror=pedantic", PedanticFlag), take_arg!("-Wp", OsString, Concatenated(','), PreprocessorArgument), flag!("-Wpedantic", PedanticFlag), take_arg!("-Xassembler", OsString, Separated, PassThrough), take_arg!("-Xlinker", OsString, Separated, PassThrough), take_arg!("-Xpreprocessor", OsString, Separated, PreprocessorArgument), take_arg!(ARCH_FLAG, OsString, Separated, Arch), take_arg!("-aux-info", OsString, Separated, PassThrough), take_arg!("-b", OsString, Separated, PassThrough), flag!("-c", DoCompilation), take_arg!("-fdiagnostics-color", OsString, Concatenated('='), DiagnosticsColor), flag!("-fno-diagnostics-color", NoDiagnosticsColorFlag), flag!("-fno-profile-generate", TooHardFlag), flag!("-fno-profile-use", TooHardFlag), flag!("-fno-working-directory", PreprocessorArgumentFlag), flag!("-fplugin=libcc1plugin", TooHardFlag), flag!("-fprofile-arcs", ProfileGenerate), flag!("-fprofile-generate", ProfileGenerate), take_arg!("-fprofile-use", OsString, Concatenated, TooHard), flag!("-frepo", TooHardFlag), flag!("-fsyntax-only", TooHardFlag), flag!("-ftest-coverage", TestCoverage), flag!("-fworking-directory", PreprocessorArgumentFlag), flag!("-gsplit-dwarf", SplitDwarf), take_arg!("-idirafter", PathBuf, CanBeSeparated, PreprocessorArgumentPath), take_arg!("-iframework", PathBuf, CanBeSeparated, PreprocessorArgumentPath), take_arg!("-imacros", PathBuf, CanBeSeparated, PreprocessorArgumentPath), take_arg!("-imultilib", PathBuf, CanBeSeparated, PreprocessorArgumentPath), take_arg!("-include", PathBuf, CanBeSeparated, PreprocessorArgumentPath), take_arg!("-index-store-path", OsString, Separated, TooHard), take_arg!("-install_name", OsString, Separated, PassThrough), take_arg!("-iprefix", PathBuf, CanBeSeparated, PreprocessorArgumentPath), take_arg!("-iquote", PathBuf, CanBeSeparated, PreprocessorArgumentPath), take_arg!("-isysroot", PathBuf, CanBeSeparated, PreprocessorArgumentPath), take_arg!("-isystem", PathBuf, CanBeSeparated, PreprocessorArgumentPath), take_arg!("-ivfsstatcache", PathBuf, CanBeSeparated, PassThroughPath), take_arg!("-iwithprefix", PathBuf, CanBeSeparated, PreprocessorArgumentPath), take_arg!("-iwithprefixbefore", PathBuf, CanBeSeparated, PreprocessorArgumentPath), flag!("-nostdinc", PreprocessorArgumentFlag), flag!("-nostdinc++", PreprocessorArgumentFlag), take_arg!("-o", PathBuf, CanBeSeparated, Output), flag!("-pedantic", PedanticFlag), flag!("-pedantic-errors", PedanticFlag), flag!("-remap", PreprocessorArgumentFlag), flag!("-save-temps", TooHardFlag), take_arg!("-std", OsString, Concatenated('='), Standard), take_arg!("-stdlib", OsString, Concatenated('='), PreprocessorArgument), flag!("-trigraphs", PreprocessorArgumentFlag), take_arg!("-u", OsString, CanBeSeparated, PassThrough), take_arg!("-x", OsString, CanBeSeparated, Language), take_arg!("-z", OsString, CanBeSeparated, PassThrough), take_arg!("@", OsString, Concatenated, TooHard), ]); /// Parse `arguments`, determining whether it is supported. /// /// If any of the entries in `arguments` result in a compilation that /// cannot be cached, return `CompilerArguments::CannotCache`. /// If the commandline described by `arguments` is not compilation, /// return `CompilerArguments::NotCompilation`. /// Otherwise, return `CompilerArguments::Ok(ParsedArguments)`, with /// the `ParsedArguments` struct containing information parsed from /// `arguments`. pub fn parse_arguments( arguments: &[OsString], cwd: &Path, arg_info: S, plusplus: bool, kind: CCompilerKind, ) -> CompilerArguments where S: SearchableArgInfo, { let mut output_arg = None; let mut input_arg = None; let mut double_dash_input = false; let mut dep_target = None; let mut dep_flag = OsString::from("-MT"); let mut common_args = vec![]; let mut arch_args = vec![]; let mut unhashed_args = vec![]; let mut preprocessor_args = vec![]; let mut dependency_args = vec![]; let mut extra_hash_files = vec![]; let mut compilation = false; let mut multiple_input = false; let mut multiple_input_files = Vec::new(); let mut pedantic_flag = false; let mut language_extensions = true; // by default, GCC allows extensions let mut split_dwarf = false; let mut need_explicit_dep_target = false; enum DepArgumentRequirePath { NotNeeded, Missing, Provided, } let mut need_explicit_dep_argument_path = DepArgumentRequirePath::NotNeeded; let mut language = None; let mut compilation_flag = OsString::new(); let mut profile_generate = false; let mut outputs_gcno = false; let mut xclangs: Vec = vec![]; let mut color_mode = ColorMode::Auto; let mut seen_arch = None; let mut serialize_diagnostics = None; let dont_cache_multiarch = env::var("SCCACHE_CACHE_MULTIARCH").is_err(); // Custom iterator to expand `@` arguments which stand for reading a file // and interpreting it as a list of more arguments. let it = ExpandIncludeFile::new(cwd, arguments); let mut too_hard_for_preprocessor_cache_mode = None; let mut args_iter = ArgsIter::new(it, arg_info); if kind == CCompilerKind::Clang { args_iter = args_iter.with_double_dashes(); } for arg in args_iter { let arg = try_or_cannot_cache!(arg, "argument parse"); // Check if the value part of this argument begins with '@'. If so, we either // failed to expand it, or it was a concatenated argument - either way, bail. // We refuse to cache concatenated arguments (like "-include@foo") because they're a // mess. See https://github.com/mozilla/sccache/issues/150#issuecomment-318586953 match arg { Argument::WithValue(_, ref v, ArgDisposition::Separated) | Argument::WithValue(_, ref v, ArgDisposition::CanBeConcatenated(_)) | Argument::WithValue(_, ref v, ArgDisposition::CanBeSeparated(_)) => { if v.clone().into_arg_os_string().starts_with("@") { cannot_cache!("@"); } } // Empirically, concatenated arguments appear not to interpret '@' as // an include directive, so just continue. Argument::WithValue(_, _, ArgDisposition::Concatenated(_)) | Argument::Raw(_) | Argument::UnknownFlag(_) | Argument::Flag(_, _) => {} } match arg.get_data() { Some(TooHardFlag) | Some(TooHard(_)) => { cannot_cache!(arg.flag_str().expect("Can't be Argument::Raw/UnknownFlag",)) } Some(PedanticFlag) => pedantic_flag = true, // standard values vary, but extension values all start with "gnu" Some(Standard(version)) => language_extensions = version.starts_with("gnu"), Some(SplitDwarf) => split_dwarf = true, Some(DoCompilation) => { compilation = true; compilation_flag = OsString::from(arg.flag_str().expect("Compilation flag expected")); } Some(ProfileGenerate) => profile_generate = true, Some(ClangProfileUse(path)) => { extra_hash_files.push(clang::resolve_profile_use_path(path, cwd)); } Some(TestCoverage) => outputs_gcno = true, Some(Coverage) => { outputs_gcno = true; profile_generate = true; } Some(DiagnosticsColorFlag) => color_mode = ColorMode::On, Some(NoDiagnosticsColorFlag) => color_mode = ColorMode::Off, Some(DiagnosticsColor(value)) => { color_mode = match value.to_str().unwrap_or("auto") { "" | "always" => ColorMode::On, "never" => ColorMode::Off, _ => ColorMode::Auto, }; } Some(Output(p)) => output_arg = Some(p.clone()), Some(NeedDepTarget) => { too_hard_for_preprocessor_cache_mode = Some(arg.to_os_string()); need_explicit_dep_target = true; if let DepArgumentRequirePath::NotNeeded = need_explicit_dep_argument_path { need_explicit_dep_argument_path = DepArgumentRequirePath::Missing; } } Some(DepTarget(s)) => { dep_flag = OsString::from(arg.flag_str().expect("Dep target flag expected")); dep_target = Some(s.clone()); } Some(DepArgumentPath(_)) => { need_explicit_dep_argument_path = DepArgumentRequirePath::Provided; } Some(SerializeDiagnostics(path)) => { serialize_diagnostics = Some(path.clone()); } Some(ExtraHashFile(_)) | Some(PassThroughFlag) | Some(PreprocessorArgumentFlag) | Some(PreprocessorArgument(_)) | Some(PreprocessorArgumentPath(_)) | Some(PassThrough(_)) | Some(PassThroughPath(_)) | Some(UnhashedFlag) | Some(Unhashed(_)) => {} Some(Language(lang)) => { language = match lang.to_string_lossy().as_ref() { "c" => Some(Language::C), "c-header" => Some(Language::CHeader), "c++" => Some(Language::Cxx), "c++-header" => Some(Language::CxxHeader), "objective-c" => Some(Language::ObjectiveC), "objective-c++" => Some(Language::ObjectiveCxx), "objective-c++-header" => Some(Language::ObjectiveCxxHeader), "cu" => Some(Language::Cuda), "rs" => Some(Language::Rust), "cuda" => Some(Language::Cuda), "hip" => Some(Language::Hip), _ => cannot_cache!("-x"), }; } Some(Arch(arch)) => { match seen_arch { Some(s) if &s != arch && dont_cache_multiarch => { cannot_cache!( "multiple different -arch, and SCCACHE_CACHE_MULTIARCH not set" ) } _ => {} }; seen_arch = Some(arch.clone()); } Some(XClang(s)) => xclangs.push(s.clone()), None => match arg { Argument::Raw(ref val) if val == "--" => { if input_arg.is_none() { double_dash_input = true; } } Argument::Raw(ref val) => { if input_arg.is_some() { multiple_input = true; multiple_input_files.push(val.clone()); } input_arg = Some(val.clone()); } Argument::UnknownFlag(_) => {} _ => unreachable!(), }, } let args = match arg.get_data() { Some(SplitDwarf) | Some(PedanticFlag) | Some(Standard(_)) | Some(ProfileGenerate) | Some(ClangProfileUse(_)) | Some(TestCoverage) | Some(Coverage) | Some(DiagnosticsColor(_)) | Some(DiagnosticsColorFlag) | Some(NoDiagnosticsColorFlag) | Some(PassThroughFlag) | Some(PassThrough(_)) | Some(PassThroughPath(_)) => &mut common_args, Some(UnhashedFlag) | Some(Unhashed(_)) => &mut unhashed_args, Some(Arch(_)) => &mut arch_args, Some(ExtraHashFile(path)) => { extra_hash_files.push(cwd.join(path)); &mut common_args } Some(PreprocessorArgument(_)) => { too_hard_for_preprocessor_cache_mode = match arg.flag_str() { Some(s) if s == "-Xpreprocessor" || s == "-Wp" => Some(arg.to_os_string()), _ => None, }; &mut preprocessor_args } Some(PreprocessorArgumentFlag) | Some(PreprocessorArgumentPath(_)) => { &mut preprocessor_args } Some(DepArgumentPath(_)) | Some(NeedDepTarget) => &mut dependency_args, Some(DoCompilation) | Some(Language(_)) | Some(Output(_)) | Some(XClang(_)) | Some(DepTarget(_)) | Some(SerializeDiagnostics(_)) => continue, Some(TooHardFlag) | Some(TooHard(_)) => unreachable!(), None => match arg { Argument::Raw(_) => continue, Argument::UnknownFlag(_) => &mut common_args, _ => unreachable!(), }, }; // Normalize attributes such as "-I foo", "-D FOO=bar", as // "-Ifoo", "-DFOO=bar", etc. and "-includefoo", "idirafterbar" as // "-include foo", "-idirafter bar", etc. let norm = match arg.flag_str() { Some(s) if s.len() == 2 => NormalizedDisposition::Concatenated, _ => NormalizedDisposition::Separated, }; args.extend(arg.normalize(norm).iter_os_strings()); } let xclang_it = ExpandIncludeFile::new(cwd, &xclangs); let mut follows_plugin_arg = false; for arg in ArgsIter::new(xclang_it, (&ARGS[..], &clang::ARGS[..])) { let arg = try_or_cannot_cache!(arg, "argument parse"); let args = match arg.get_data() { Some(SplitDwarf) | Some(PedanticFlag) | Some(Standard(_)) | Some(ProfileGenerate) | Some(ClangProfileUse(_)) | Some(TestCoverage) | Some(Coverage) | Some(DoCompilation) | Some(Language(_)) | Some(Output(_)) | Some(TooHardFlag) | Some(XClang(_)) | Some(TooHard(_)) => cannot_cache!(arg .flag_str() .unwrap_or("Can't handle complex arguments through clang",)), None => match arg { Argument::Raw(_) if follows_plugin_arg => &mut common_args, Argument::Raw(flag) => cannot_cache!( "Can't handle Raw arguments with -Xclang", flag.to_str().unwrap_or("").to_string() ), Argument::UnknownFlag(flag) => { cannot_cache!( "Can't handle UnknownFlag arguments with -Xclang", flag.to_str().unwrap_or("").to_string() ) } _ => unreachable!(), }, Some(DiagnosticsColor(_)) | Some(DiagnosticsColorFlag) | Some(NoDiagnosticsColorFlag) | Some(Arch(_)) | Some(PassThrough(_)) | Some(PassThroughFlag) | Some(PassThroughPath(_)) | Some(SerializeDiagnostics(_)) => &mut common_args, Some(UnhashedFlag) | Some(Unhashed(_)) => &mut unhashed_args, Some(ExtraHashFile(path)) => { extra_hash_files.push(cwd.join(path)); &mut common_args } Some(PreprocessorArgumentFlag) | Some(PreprocessorArgument(_)) | Some(PreprocessorArgumentPath(_)) => &mut preprocessor_args, Some(DepTarget(_)) | Some(DepArgumentPath(_)) | Some(NeedDepTarget) => { &mut dependency_args } }; follows_plugin_arg = match arg.flag_str() { Some(s) => s == "-plugin-arg", _ => false, }; // Normalize attributes such as "-I foo", "-D FOO=bar", as // "-Ifoo", "-DFOO=bar", etc. and "-includefoo", "idirafterbar" as // "-include foo", "-idirafter bar", etc. let norm = match arg.flag_str() { Some(s) if s.len() == 2 => NormalizedDisposition::Concatenated, _ => NormalizedDisposition::Separated, }; for arg in arg.normalize(norm).iter_os_strings() { args.push("-Xclang".into()); args.push(arg) } } // We only support compilation. if !compilation { return CompilerArguments::NotCompilation; } // Can't cache compilations with multiple inputs. if multiple_input { cannot_cache!( "multiple input files", format!("{:?}", multiple_input_files) ); } let input = match input_arg { Some(i) => i, // We can't cache compilation without an input. None => cannot_cache!("no input file"), }; let language = match language { None => { let mut lang = Language::from_file_name(Path::new(&input)); if let (Some(Language::C), true) = (lang, plusplus) { lang = Some(Language::Cxx); } lang } l => l, }; let language = match language { Some(l) => l, None => cannot_cache!("unknown source language"), }; let mut outputs = HashMap::new(); let output = match output_arg { // We can't cache compilation that doesn't go to a file None => PathBuf::from(Path::new(&input).with_extension("o").file_name().unwrap()), Some(o) => o, }; if split_dwarf { let dwo = output.with_extension("dwo"); common_args.push(OsString::from( "-D_gsplit_dwarf_path=".to_owned() + dwo.to_str().unwrap(), )); // -gsplit-dwarf doesn't guarantee .dwo file if no -g is specified outputs.insert( "dwo", ArtifactDescriptor { path: dwo, optional: true, }, ); } let suppress_rewrite_includes_only = match kind { CCompilerKind::Gcc => language_extensions && pedantic_flag, _ => false, }; if outputs_gcno { let gcno = output.with_extension("gcno"); outputs.insert( "gcno", ArtifactDescriptor { path: gcno, optional: false, }, ); profile_generate = true; } if need_explicit_dep_target { dependency_args.push(dep_flag); dependency_args.push(dep_target.unwrap_or_else(|| output.clone().into_os_string())); } if let DepArgumentRequirePath::Missing = need_explicit_dep_argument_path { dependency_args.push(OsString::from("-MF")); dependency_args.push(Path::new(&output).with_extension("d").into_os_string()); } if let Some(path) = serialize_diagnostics { outputs.insert( "dia", ArtifactDescriptor { path: path.clone(), optional: false, }, ); } outputs.insert( "obj", ArtifactDescriptor { path: output, optional: false, }, ); CompilerArguments::Ok(ParsedArguments { input: input.into(), double_dash_input, language, compilation_flag, depfile: None, outputs, dependency_args, preprocessor_args, common_args, arch_args, unhashed_args, extra_dist_files: vec![], extra_hash_files, msvc_show_includes: false, profile_generate, color_mode, suppress_rewrite_includes_only, too_hard_for_preprocessor_cache_mode, }) } pub fn language_to_gcc_arg(lang: Language) -> Option<&'static str> { match lang { Language::C => Some("c"), Language::CHeader => Some("c-header"), Language::Cxx => Some("c++"), Language::CxxHeader => Some("c++-header"), Language::ObjectiveC => Some("objective-c"), Language::ObjectiveCxx => Some("objective-c++"), Language::ObjectiveCxxHeader => Some("objective-c++-header"), Language::Cuda => Some("cu"), Language::CudaFE => None, Language::Ptx => None, Language::Cubin => None, Language::Rust => None, // Let the compiler decide Language::Hip => Some("hip"), Language::GenericHeader => None, // Let the compiler decide } } #[allow(clippy::too_many_arguments)] fn preprocess_cmd( cmd: &mut T, parsed_args: &ParsedArguments, cwd: &Path, env_vars: &[(OsString, OsString)], may_dist: bool, kind: CCompilerKind, rewrite_includes_only: bool, ignorable_whitespace_flags: Vec, language_to_arg: F, ) where F: Fn(Language) -> Option<&'static str>, T: RunCommand, { let language = language_to_arg(parsed_args.language); if let Some(lang) = &language { cmd.arg("-x").arg(lang); } cmd.arg("-E"); // When performing distributed compilation, line number info is important for error // reporting and to not cause spurious compilation failure (e.g. no exceptions build // fails due to exceptions transitively included in the stdlib). // With -fprofile-generate line number information is important, so don't use -P. if !may_dist && !parsed_args.profile_generate { cmd.args(&ignorable_whitespace_flags); } if rewrite_includes_only { if parsed_args.suppress_rewrite_includes_only { if log_enabled!(Trace) { trace!("preprocess: pedantic arguments disable rewrite_includes_only"); } } else { match kind { CCompilerKind::Clang => { cmd.arg("-frewrite-includes"); } CCompilerKind::Gcc => { cmd.arg("-fdirectives-only"); } _ => {} } } } // Explicitly rewrite the -arch args to be preprocessor defines of the form // __arch__ so that they affect the preprocessor output but don't cause // clang to error. let rewritten_arch_args = parsed_args .arch_args .iter() .filter(|&arg| arg.ne(ARCH_FLAG)) .filter_map(|arg| { arg.to_str() .map(|arg_string| format!("-D__{}__=1", arg_string).into()) }) .collect::>(); let mut arch_args_to_use = &rewritten_arch_args; let mut unique_rewritten = rewritten_arch_args.clone(); unique_rewritten.sort(); unique_rewritten.dedup(); if unique_rewritten.len() <= 1 { // don't use rewritten arch args if there is only one arch arch_args_to_use = &parsed_args.arch_args; } else { debug!("-arch args before rewrite: {:?}", parsed_args.arch_args); debug!("-arch args after rewrite: {:?}", arch_args_to_use); } cmd.args(&parsed_args.preprocessor_args) .args(&parsed_args.dependency_args) .args(&parsed_args.common_args) .args(arch_args_to_use); if parsed_args.double_dash_input { cmd.arg("--"); } cmd.arg(&parsed_args.input) .env_clear() .envs(env_vars.to_vec()) .current_dir(cwd); } #[allow(clippy::too_many_arguments)] pub async fn preprocess( creator: &T, executable: &Path, parsed_args: &ParsedArguments, cwd: &Path, env_vars: &[(OsString, OsString)], may_dist: bool, kind: CCompilerKind, rewrite_includes_only: bool, ignorable_whitespace_flags: Vec, language_to_arg: F, ) -> Result where F: Fn(Language) -> Option<&'static str>, T: CommandCreatorSync, { trace!("preprocess"); let mut cmd = creator.clone().new_command_sync(executable); preprocess_cmd( &mut cmd, parsed_args, cwd, env_vars, may_dist, kind, rewrite_includes_only, ignorable_whitespace_flags, language_to_arg, ); if log_enabled!(Trace) { trace!("preprocess: {:?}", cmd); } run_input_output(cmd, None).await } #[allow(clippy::too_many_arguments)] pub fn generate_compile_commands( path_transformer: &mut dist::PathTransformer, executable: &Path, parsed_args: &ParsedArguments, cwd: &Path, env_vars: &[(OsString, OsString)], kind: CCompilerKind, rewrite_includes_only: bool, language_to_arg: F, ) -> Result<( SingleCompileCommand, Option, Cacheable, )> where F: Fn(Language) -> Option<&'static str>, { // Unused arguments #[cfg(not(feature = "dist-client"))] { let _ = path_transformer; let _ = kind; let _ = rewrite_includes_only; } let out_file = match parsed_args.outputs.get("obj") { Some(obj) => &obj.path, None => return Err(anyhow!("Missing object file output")), }; // Pass the language explicitly as we might have gotten it from the // command line. let language = language_to_arg(parsed_args.language); let mut arguments: Vec = vec![]; if let Some(lang) = &language { arguments.extend(vec!["-x".into(), lang.into()]) } arguments.extend(vec![ parsed_args.compilation_flag.clone(), "-o".into(), out_file.into(), ]); arguments.extend_from_slice(&parsed_args.preprocessor_args); arguments.extend_from_slice(&parsed_args.dependency_args); arguments.extend_from_slice(&parsed_args.unhashed_args); arguments.extend_from_slice(&parsed_args.common_args); arguments.extend_from_slice(&parsed_args.arch_args); if parsed_args.double_dash_input { arguments.push("--".into()); } arguments.push(parsed_args.input.clone().into()); trace!( "compile: {} {}", executable.to_string_lossy(), arguments.join(OsStr::new(" ")).to_string_lossy() ); #[cfg(feature = "dist-client")] let has_verbose_flag = arguments.contains(&OsString::from("-v")) || arguments.contains(&OsString::from("--verbose")); let command = SingleCompileCommand { executable: executable.to_owned(), arguments, env_vars: env_vars.to_owned(), cwd: cwd.to_owned(), }; #[cfg(not(feature = "dist-client"))] let dist_command = None; #[cfg(feature = "dist-client")] // 1. Compilations with -v|--verbose must be run locally, since the verbose // output is parsed by tools like CMake and must reflect the local toolchain // 2. ClangCUDA cannot be dist-compiled because Clang has separate host and // device preprocessor outputs and cannot compile preprocessed CUDA files. let dist_command = if has_verbose_flag || parsed_args.language == Language::Cuda { None } else { (|| { // https://gcc.gnu.org/onlinedocs/gcc-4.9.0/gcc/Overall-Options.html let mut language: Option = language_to_arg(parsed_args.language).map(|lang| lang.into()); if !rewrite_includes_only { match parsed_args.language { Language::C => language = Some("cpp-output".into()), Language::GenericHeader | Language::CHeader | Language::CxxHeader => {} _ => language.as_mut()?.push_str("-cpp-output"), } } let mut arguments: Vec = vec![]; // Language needs to be before input if let Some(lang) = &language { arguments.extend(vec!["-x".into(), lang.into()]) } arguments.extend(vec![ parsed_args.compilation_flag.clone().into_string().ok()?, path_transformer.as_dist(&parsed_args.input)?, "-o".into(), path_transformer.as_dist(out_file)?, ]); if let CCompilerKind::Gcc = kind { // From https://gcc.gnu.org/onlinedocs/gcc/Preprocessor-Options.html: // // -fdirectives-only // // [...] // // With -fpreprocessed, predefinition of command line and most // builtin macros is disabled. Macros such as __LINE__, which // are contextually dependent, are handled normally. This // enables compilation of files previously preprocessed with -E // -fdirectives-only. // // Which is exactly what we do :-) if rewrite_includes_only && !parsed_args.suppress_rewrite_includes_only { arguments.push("-fdirectives-only".into()); } arguments.push("-fpreprocessed".into()); } arguments.extend(dist::osstrings_to_strings(&parsed_args.common_args)?); Some(dist::CompileCommand { executable: path_transformer.as_dist(executable)?, arguments, env_vars: dist::osstring_tuples_to_strings(env_vars)?, cwd: path_transformer.as_dist_abs(cwd)?, }) })() }; Ok((command, dist_command, Cacheable::Yes)) } pub struct ExpandIncludeFile<'a> { cwd: &'a Path, stack: Vec, } impl<'a> ExpandIncludeFile<'a> { pub fn new(cwd: &'a Path, args: &[OsString]) -> Self { ExpandIncludeFile { stack: args.iter().rev().map(|a| a.to_owned()).collect(), cwd, } } } impl<'a> Iterator for ExpandIncludeFile<'a> { type Item = OsString; fn next(&mut self) -> Option { loop { let arg = match self.stack.pop() { Some(arg) => arg, None => return None, }; let file = match arg.split_prefix("@") { Some(arg) => self.cwd.join(arg), None => return Some(arg), }; // According to gcc [1], @file means: // // Read command-line options from file. The options read are // inserted in place of the original @file option. If file does // not exist, or cannot be read, then the option will be // treated literally, and not removed. // // Options in file are separated by whitespace. A // whitespace character may be included in an option by // surrounding the entire option in either single or double // quotes. Any character (including a backslash) may be // included by prefixing the character to be included with // a backslash. The file may itself contain additional // @file options; any such options will be processed // recursively. // // So here we interpret any I/O errors as "just return this // argument". Currently we don't implement handling of arguments // with quotes, so if those are encountered we just pass the option // through literally anyway. // // At this time we interpret all `@` arguments above as non // cacheable, so if we fail to interpret this we'll just call the // compiler anyway. // // [1]: https://gcc.gnu.org/onlinedocs/gcc/Overall-Options.html#Overall-Options let mut contents = String::new(); let res = File::open(&file).and_then(|mut f| f.read_to_string(&mut contents)); if let Err(e) = res { debug!("failed to read @-file `{}`: {}", file.display(), e); return Some(arg); } if contents.contains('"') || contents.contains('\'') { return Some(arg); } let new_args = contents.split_whitespace().collect::>(); self.stack.extend(new_args.iter().rev().map(|s| s.into())); } } } #[cfg(test)] mod test { use fs::File; use std::io::Write; use super::*; use crate::compiler::*; use crate::mock_command::*; use crate::server; use crate::test::mock_storage::MockStorage; use crate::test::utils::*; use temp_env::{with_var, with_var_unset}; fn parse_arguments_( arguments: Vec, plusplus: bool, ) -> CompilerArguments { let args = arguments.iter().map(OsString::from).collect::>(); parse_arguments(&args, ".".as_ref(), &ARGS[..], plusplus, CCompilerKind::Gcc) } fn parse_arguments_clang( arguments: Vec, plusplus: bool, ) -> CompilerArguments { let args = arguments.iter().map(OsString::from).collect::>(); parse_arguments( &args, ".".as_ref(), &ARGS[..], plusplus, CCompilerKind::Clang, ) } #[test] fn test_parse_arguments_simple() { let args = stringvec!["-c", "foo.c", "-o", "foo.o"]; let ParsedArguments { input, language, compilation_flag, outputs, preprocessor_args, msvc_show_includes, common_args, .. } = match parse_arguments_(args, false) { CompilerArguments::Ok(args) => args, o => panic!("Got unexpected parse result: {:?}", o), }; assert_eq!(Some("foo.c"), input.to_str()); assert_eq!(Language::C, language); assert_eq!(Some("-c"), compilation_flag.to_str()); assert_map_contains!( outputs, ( "obj", ArtifactDescriptor { path: "foo.o".into(), optional: false } ) ); assert!(preprocessor_args.is_empty()); assert!(common_args.is_empty()); assert!(!msvc_show_includes); } #[test] fn test_parse_arguments_default_name() { let args = stringvec!["-c", "foo.c"]; let ParsedArguments { input, language, outputs, preprocessor_args, msvc_show_includes, common_args, .. } = match parse_arguments_(args, false) { CompilerArguments::Ok(args) => args, o => panic!("Got unexpected parse result: {:?}", o), }; assert_eq!(Some("foo.c"), input.to_str()); assert_eq!(Language::C, language); assert_map_contains!( outputs, ( "obj", ArtifactDescriptor { path: "foo.o".into(), optional: false } ) ); assert!(preprocessor_args.is_empty()); assert!(common_args.is_empty()); assert!(!msvc_show_includes); } #[test] fn test_parse_arguments_default_outputdir() { let args = stringvec!["-c", "/tmp/foo.c"]; let ParsedArguments { outputs, .. } = match parse_arguments_(args, false) { CompilerArguments::Ok(args) => args, o => panic!("Got unexpected parse result: {:?}", o), }; assert_map_contains!( outputs, ( "obj", ArtifactDescriptor { path: "foo.o".into(), optional: false } ) ); } #[test] fn test_parse_arguments_split_dwarf() { let args = stringvec!["-gsplit-dwarf", "-c", "foo.cpp", "-o", "foo.o"]; let ParsedArguments { input, language, outputs, preprocessor_args, msvc_show_includes, common_args, .. } = match parse_arguments_(args, false) { CompilerArguments::Ok(args) => args, o => panic!("Got unexpected parse result: {:?}", o), }; let mut common_and_arch_args = common_args.clone(); common_and_arch_args.extend(common_args.to_vec()); debug!("common_and_arch_args: {:?}", common_and_arch_args); assert_eq!(Some("foo.cpp"), input.to_str()); assert_eq!(Language::Cxx, language); assert_map_contains!( outputs, ( "obj", ArtifactDescriptor { path: "foo.o".into(), optional: false } ), ( "dwo", ArtifactDescriptor { path: "foo.dwo".into(), optional: true } ) ); assert!(preprocessor_args.is_empty()); assert!( common_args.contains(&"-gsplit-dwarf".into()) && common_args.contains(&"-D_gsplit_dwarf_path=foo.dwo".into()) ); assert!(!msvc_show_includes); } #[test] fn test_parse_arguments_linker_options() { let args = stringvec![ // is basically the same as `-z deps` "-Wl,--unresolved-symbols=report-all", "-z", "call-nop=suffix-nop", "-z", "deps", "-c", "foo.c", "-o", "foo.o" ]; let ParsedArguments { input, language, outputs, preprocessor_args, msvc_show_includes, common_args, .. } = match parse_arguments_(args, false) { CompilerArguments::Ok(args) => args, o => panic!("Got unexpected parse result: {:?}", o), }; assert_eq!(Some("foo.c"), input.to_str()); assert_eq!(Language::C, language); assert_map_contains!( outputs, ( "obj", ArtifactDescriptor { path: "foo.o".into(), optional: false } ) ); assert!(preprocessor_args.is_empty()); assert_eq!(3, common_args.len()); assert!(!msvc_show_includes); } #[test] fn test_parse_arguments_coverage_outputs_gcno() { let args = stringvec!["--coverage", "-c", "foo.cpp", "-o", "foo.o"]; let ParsedArguments { input, language, outputs, preprocessor_args, msvc_show_includes, common_args, profile_generate, .. } = match parse_arguments_(args, false) { CompilerArguments::Ok(args) => args, o => panic!("Got unexpected parse result: {:?}", o), }; assert_eq!(Some("foo.cpp"), input.to_str()); assert_eq!(Language::Cxx, language); assert_map_contains!( outputs, ( "obj", ArtifactDescriptor { path: "foo.o".into(), optional: false } ), ( "gcno", ArtifactDescriptor { path: PathBuf::from("foo.gcno"), optional: false } ) ); assert!(preprocessor_args.is_empty()); assert_eq!(ovec!["--coverage"], common_args); assert!(!msvc_show_includes); assert!(profile_generate); } #[test] fn test_parse_arguments_test_coverage_outputs_gcno() { let args = stringvec!["-ftest-coverage", "-c", "foo.cpp", "-o", "foo.o"]; let ParsedArguments { input, language, outputs, preprocessor_args, msvc_show_includes, common_args, profile_generate, .. } = match parse_arguments_(args, false) { CompilerArguments::Ok(args) => args, o => panic!("Got unexpected parse result: {:?}", o), }; assert_eq!(Some("foo.cpp"), input.to_str()); assert_eq!(Language::Cxx, language); assert_map_contains!( outputs, ( "obj", ArtifactDescriptor { path: "foo.o".into(), optional: false } ), ( "gcno", ArtifactDescriptor { path: PathBuf::from("foo.gcno"), optional: false } ) ); assert!(preprocessor_args.is_empty()); assert_eq!(ovec!["-ftest-coverage"], common_args); assert!(!msvc_show_includes); assert!(profile_generate); } #[test] fn test_parse_arguments_profile_generate() { let args = stringvec!["-fprofile-generate", "-c", "foo.cpp", "-o", "foo.o"]; let ParsedArguments { input, language, outputs, preprocessor_args, msvc_show_includes, common_args, profile_generate, .. } = match parse_arguments_(args, false) { CompilerArguments::Ok(args) => args, o => panic!("Got unexpected parse result: {:?}", o), }; assert_eq!(Some("foo.cpp"), input.to_str()); assert_eq!(Language::Cxx, language); assert_map_contains!( outputs, ( "obj", ArtifactDescriptor { path: "foo.o".into(), optional: false } ) ); assert!(preprocessor_args.is_empty()); assert_eq!(ovec!["-fprofile-generate"], common_args); assert!(!msvc_show_includes); assert!(profile_generate); } #[test] fn test_parse_arguments_extra() { let args = stringvec!["-c", "foo.cc", "-fabc", "-o", "foo.o", "-mxyz"]; let ParsedArguments { input, language, outputs, preprocessor_args, msvc_show_includes, common_args, .. } = match parse_arguments_(args, false) { CompilerArguments::Ok(args) => args, o => panic!("Got unexpected parse result: {:?}", o), }; assert_eq!(Some("foo.cc"), input.to_str()); assert_eq!(Language::Cxx, language); assert_map_contains!( outputs, ( "obj", ArtifactDescriptor { path: "foo.o".into(), optional: false } ) ); assert!(preprocessor_args.is_empty()); assert_eq!(ovec!["-fabc", "-mxyz"], common_args); assert!(!msvc_show_includes); } #[test] fn test_parse_arguments_values() { let args = stringvec![ "-c", "foo.cxx", "-fabc", "-I", "include", "-o", "foo.o", "-include", "file" ]; let ParsedArguments { input, language, outputs, preprocessor_args, msvc_show_includes, common_args, .. } = match parse_arguments_(args, false) { CompilerArguments::Ok(args) => args, o => panic!("Got unexpected parse result: {:?}", o), }; assert_eq!(Some("foo.cxx"), input.to_str()); assert_eq!(Language::Cxx, language); assert_map_contains!( outputs, ( "obj", ArtifactDescriptor { path: "foo.o".into(), optional: false } ) ); assert_eq!(ovec!["-Iinclude", "-include", "file"], preprocessor_args); assert_eq!(ovec!["-fabc"], common_args); assert!(!msvc_show_includes); } #[test] fn test_parse_arguments_preprocessor_args() { let args = stringvec![ "-c", "foo.c", "-fabc", "-MF", "foo.o.d", "-o", "foo.o", "-MQ", "abc", "-nostdinc" ]; let ParsedArguments { input, language, outputs, dependency_args, preprocessor_args, msvc_show_includes, common_args, .. } = match parse_arguments_(args, false) { CompilerArguments::Ok(args) => args, o => panic!("Got unexpected parse result: {:?}", o), }; assert_eq!(Some("foo.c"), input.to_str()); assert_eq!(Language::C, language); assert_map_contains!( outputs, ( "obj", ArtifactDescriptor { path: "foo.o".into(), optional: false } ) ); assert_eq!(ovec!["-MF", "foo.o.d"], dependency_args); assert_eq!(ovec!["-nostdinc"], preprocessor_args); assert_eq!(ovec!["-fabc"], common_args); assert!(!msvc_show_includes); } #[test] fn test_parse_arguments_double_dash() { let args = stringvec!["-c", "-o", "foo.o", "--", "foo.c"]; let ParsedArguments { input, double_dash_input, common_args, .. } = match parse_arguments_(args.clone(), false) { CompilerArguments::Ok(args) => args, o => panic!("Got unexpected parse result: {:?}", o), }; assert_eq!(Some("foo.c"), input.to_str()); // GCC doesn't support double dashes. If we got one, we'll pass them // through to GCC for it to error out. assert!(!double_dash_input); assert_eq!(ovec!["--"], common_args); let ParsedArguments { input, double_dash_input, common_args, .. } = match parse_arguments_clang(args, false) { CompilerArguments::Ok(args) => args, o => panic!("Got unexpected parse result: {:?}", o), }; assert_eq!(Some("foo.c"), input.to_str()); assert!(double_dash_input); assert!(common_args.is_empty()); let args = stringvec!["-c", "-o", "foo.o", "foo.c", "--"]; let ParsedArguments { input, double_dash_input, common_args, .. } = match parse_arguments_clang(args, false) { CompilerArguments::Ok(args) => args, o => panic!("Got unexpected parse result: {:?}", o), }; assert_eq!(Some("foo.c"), input.to_str()); // Double dash after input file is ignored. assert!(!double_dash_input); assert!(common_args.is_empty()); let args = stringvec!["-c", "-o", "foo.o", "foo.c", "--", "bar.c"]; assert_eq!( CompilerArguments::CannotCache("multiple input files", Some("[\"bar.c\"]".to_string())), parse_arguments_clang(args, false) ); let args = stringvec!["-c", "-o", "foo.o", "foo.c", "--", "-fPIC"]; assert_eq!( CompilerArguments::CannotCache("multiple input files", Some("[\"-fPIC\"]".to_string())), parse_arguments_clang(args, false) ); } #[test] fn test_parse_arguments_explicit_dep_target() { let args = stringvec!["-c", "foo.c", "-MT", "depfile", "-fabc", "-MF", "foo.o.d", "-o", "foo.o"]; let ParsedArguments { input, language, outputs, dependency_args, msvc_show_includes, common_args, .. } = match parse_arguments_(args, false) { CompilerArguments::Ok(args) => args, o => panic!("Got unexpected parse result: {:?}", o), }; assert_eq!(Some("foo.c"), input.to_str()); assert_eq!(Language::C, language); assert_map_contains!( outputs, ( "obj", ArtifactDescriptor { path: "foo.o".into(), optional: false } ) ); assert_eq!(ovec!["-MF", "foo.o.d"], dependency_args); assert_eq!(ovec!["-fabc"], common_args); assert!(!msvc_show_includes); } #[test] fn test_parse_arguments_explicit_dep_target_needed() { let args = stringvec![ "-c", "foo.c", "-MT", "depfile", "-fabc", "-MF", "foo.o.d", "-o", "foo.o", "-MD" ]; let ParsedArguments { input, language, outputs, dependency_args, preprocessor_args, msvc_show_includes, common_args, .. } = match parse_arguments_(args, false) { CompilerArguments::Ok(args) => args, o => panic!("Got unexpected parse result: {:?}", o), }; assert_eq!(Some("foo.c"), input.to_str()); assert_eq!(Language::C, language); assert_map_contains!( outputs, ( "obj", ArtifactDescriptor { path: "foo.o".into(), optional: false } ) ); assert_eq!( ovec!["-MF", "foo.o.d", "-MD", "-MT", "depfile"], dependency_args ); assert!(preprocessor_args.is_empty()); assert_eq!(ovec!["-fabc"], common_args); assert!(!msvc_show_includes); } #[test] fn test_parse_arguments_explicit_mq_dep_target_needed() { let args = stringvec![ "-c", "foo.c", "-MQ", "depfile", "-fabc", "-MF", "foo.o.d", "-o", "foo.o", "-MD" ]; let ParsedArguments { input, language, outputs, dependency_args, preprocessor_args, msvc_show_includes, common_args, .. } = match parse_arguments_(args, false) { CompilerArguments::Ok(args) => args, o => panic!("Got unexpected parse result: {:?}", o), }; assert_eq!(Some("foo.c"), input.to_str()); assert_eq!(Language::C, language); assert_map_contains!( outputs, ( "obj", ArtifactDescriptor { path: "foo.o".into(), optional: false } ) ); assert_eq!( ovec!["-MF", "foo.o.d", "-MD", "-MQ", "depfile"], dependency_args ); assert!(preprocessor_args.is_empty()); assert_eq!(ovec!["-fabc"], common_args); assert!(!msvc_show_includes); } #[test] fn test_parse_arguments_diagnostics_color() { fn get_color_mode(color_flag: &str) -> ColorMode { let args = stringvec!["-c", "foo.c", color_flag]; match parse_arguments_(args, false) { CompilerArguments::Ok(args) => args.color_mode, o => panic!("Got unexpected parse result: {:?}", o), } } assert_eq!(get_color_mode("-fdiagnostics-color=always"), ColorMode::On); assert_eq!(get_color_mode("-fdiagnostics-color=never"), ColorMode::Off); assert_eq!(get_color_mode("-fdiagnostics-color=auto"), ColorMode::Auto); assert_eq!(get_color_mode("-fno-diagnostics-color"), ColorMode::Off); assert_eq!(get_color_mode("-fdiagnostics-color"), ColorMode::On); } #[test] fn color_mode_preprocess() { let args = stringvec!["-c", "foo.c", "-fdiagnostics-color"]; let args = match parse_arguments_(args, false) { CompilerArguments::Ok(args) => args, o => panic!("Got unexpected parse result: {:?}", o), }; assert!(args.common_args.contains(&"-fdiagnostics-color".into())); } #[test] fn test_preprocess_cmd_rewrites_archs() { with_var("SCCACHE_CACHE_MULTIARCH", Some("1"), || { let args = stringvec!["-arch", "arm64", "-arch", "i386", "-c", "foo.cc"]; let parsed_args = match parse_arguments_(args, false) { CompilerArguments::Ok(args) => args, o => panic!("Got unexpected parse result: {:?}", o), }; let mut cmd = MockCommand { child: None, args: vec![], }; preprocess_cmd( &mut cmd, &parsed_args, Path::new(""), &[], true, CCompilerKind::Gcc, true, vec![], language_to_gcc_arg, ); // make sure the architectures were rewritten to prepocessor defines let expected_args = ovec![ "-x", "c++", "-E", "-fdirectives-only", "-D__arm64__=1", "-D__i386__=1", "foo.cc" ]; assert_eq!(cmd.args, expected_args); }); } #[test] fn test_preprocess_cmd_doesnt_rewrite_single_arch() { let args = stringvec!["-arch", "arm64", "-c", "foo.cc"]; let parsed_args = match parse_arguments_(args, false) { CompilerArguments::Ok(args) => args, o => panic!("Got unexpected parse result: {:?}", o), }; let mut cmd = MockCommand { child: None, args: vec![], }; preprocess_cmd( &mut cmd, &parsed_args, Path::new(""), &[], true, CCompilerKind::Gcc, true, vec![], language_to_gcc_arg, ); // make sure the architectures were rewritten to prepocessor defines let expected_args = ovec![ "-x", "c++", "-E", "-fdirectives-only", "-arch", "arm64", "foo.cc" ]; assert_eq!(cmd.args, expected_args); } #[test] fn test_preprocess_double_dash_input() { let args = stringvec!["-c", "-o", "foo.o", "--", "foo.c"]; let parsed_args = match parse_arguments_clang(args, false) { CompilerArguments::Ok(args) => args, o => panic!("Got unexpected parse result: {:?}", o), }; let mut cmd = MockCommand { child: None, args: vec![], }; preprocess_cmd( &mut cmd, &parsed_args, Path::new(""), &[], true, CCompilerKind::Clang, true, vec![], language_to_gcc_arg, ); let expected_args = ovec!["-x", "c", "-E", "-frewrite-includes", "--", "foo.c"]; assert_eq!(cmd.args, expected_args); } #[test] fn pedantic_default() { let args = stringvec!["-pedantic", "-c", "foo.cc"]; let parsed_args = match parse_arguments_(args, false) { CompilerArguments::Ok(args) => args, o => panic!("Got unexpected parse result: {:?}", o), }; let mut cmd = MockCommand { child: None, args: vec![], }; preprocess_cmd( &mut cmd, &parsed_args, Path::new(""), &[], true, CCompilerKind::Gcc, true, vec![], language_to_gcc_arg, ); // disable with extensions enabled assert!(!cmd.args.contains(&"-fdirectives-only".into())); } #[test] fn pedantic_std() { let args = stringvec!["-pedantic-errors", "-c", "-std=c++14", "foo.cc"]; let parsed_args = match parse_arguments_(args, false) { CompilerArguments::Ok(args) => args, o => panic!("Got unexpected parse result: {:?}", o), }; let mut cmd = MockCommand { child: None, args: vec![], }; preprocess_cmd( &mut cmd, &parsed_args, Path::new(""), &[], true, CCompilerKind::Gcc, true, vec![], language_to_gcc_arg, ); // no reason to disable it with no extensions enabled assert!(cmd.args.contains(&"-fdirectives-only".into())); } #[test] fn pedantic_gnu() { let args = stringvec!["-pedantic-errors", "-c", "-std=gnu++14", "foo.cc"]; let parsed_args = match parse_arguments_(args, false) { CompilerArguments::Ok(args) => args, o => panic!("Got unexpected parse result: {:?}", o), }; let mut cmd = MockCommand { child: None, args: vec![], }; preprocess_cmd( &mut cmd, &parsed_args, Path::new(""), &[], true, CCompilerKind::Gcc, true, vec![], language_to_gcc_arg, ); // disable with extensions enabled assert!(!cmd.args.contains(&"-fdirectives-only".into())); } #[test] fn test_parse_arguments_dep_target_needed() { let args = stringvec!["-c", "foo.c", "-fabc", "-MF", "foo.o.d", "-o", "foo.o", "-MD"]; let ParsedArguments { input, language, outputs, dependency_args, msvc_show_includes, common_args, .. } = match parse_arguments_(args, false) { CompilerArguments::Ok(args) => args, o => panic!("Got unexpected parse result: {:?}", o), }; assert_eq!(Some("foo.c"), input.to_str()); assert_eq!(Language::C, language); assert_map_contains!( outputs, ( "obj", ArtifactDescriptor { path: "foo.o".into(), optional: false } ) ); assert_eq!( ovec!["-MF", "foo.o.d", "-MD", "-MT", "foo.o"], dependency_args ); assert_eq!(ovec!["-fabc"], common_args); assert!(!msvc_show_includes); } #[test] fn test_parse_arguments_dep_target_and_file_needed() { let args = stringvec!["-c", "foo/bar.c", "-fabc", "-o", "foo/bar.o", "-MMD"]; let ParsedArguments { input, language, outputs, dependency_args, msvc_show_includes, common_args, .. } = match parse_arguments_(args, false) { CompilerArguments::Ok(args) => args, o => panic!("Got unexpected parse result: {:?}", o), }; assert_eq!(Some("foo/bar.c"), input.to_str()); assert_eq!(Language::C, language); assert_map_contains!( outputs, ( "obj", ArtifactDescriptor { path: PathBuf::from("foo/bar.o"), optional: false } ) ); assert_eq!( ovec!["-MMD", "-MT", "foo/bar.o", "-MF", "foo/bar.d"], dependency_args ); assert_eq!(ovec!["-fabc"], common_args); assert!(!msvc_show_includes); } #[test] fn test_parse_arguments_empty_args() { assert_eq!( CompilerArguments::NotCompilation, parse_arguments_(vec!(), false) ); } #[test] fn test_parse_arguments_not_compile() { assert_eq!( CompilerArguments::NotCompilation, parse_arguments_(stringvec!["-o", "foo"], false) ); } #[test] fn test_parse_arguments_too_many_inputs_single() { assert_eq!( CompilerArguments::CannotCache("multiple input files", Some("[\"bar.c\"]".to_string())), parse_arguments_(stringvec!["-c", "foo.c", "-o", "foo.o", "bar.c"], false) ); } #[test] fn test_parse_arguments_too_many_inputs_multiple() { assert_eq!( CompilerArguments::CannotCache( "multiple input files", Some("[\"bar.c\", \"baz.c\"]".to_string()) ), parse_arguments_( stringvec!["-c", "foo.c", "-o", "foo.o", "bar.c", "baz.c"], false ) ); } #[test] fn test_parse_arguments_link() { assert_eq!( CompilerArguments::NotCompilation, parse_arguments_( stringvec!["-shared", "foo.o", "-o", "foo.so", "bar.o"], false ) ); } #[test] fn test_parse_arguments_pgo() { assert_eq!( CompilerArguments::CannotCache("-fprofile-use", None), parse_arguments_( stringvec!["-c", "foo.c", "-fprofile-use", "-o", "foo.o"], false ) ); assert_eq!( CompilerArguments::CannotCache("-fprofile-use", None), parse_arguments_( stringvec!["-c", "foo.c", "-fprofile-use=file", "-o", "foo.o"], false ) ); } #[test] fn test_parse_arguments_response_file() { assert_eq!( CompilerArguments::CannotCache("@", None), parse_arguments_(stringvec!["-c", "foo.c", "@foo", "-o", "foo.o"], false) ); assert_eq!( CompilerArguments::CannotCache("@", None), parse_arguments_(stringvec!["-c", "foo.c", "-o", "@foo"], false) ); } #[test] fn test_parse_index_store_path() { assert_eq!( CompilerArguments::CannotCache("-index-store-path", None), parse_arguments_( stringvec![ "-c", "foo.c", "-index-store-path", "index.store", "-o", "foo.o" ], false ) ); } #[test] fn test_parse_arguments_multiarch_cache_disabled() { with_var_unset("SCCACHE_CACHE_MULTIARCH", || { assert_eq!( CompilerArguments::CannotCache( "multiple different -arch, and SCCACHE_CACHE_MULTIARCH not set", None ), parse_arguments_( stringvec![ "-fPIC", "-arch", "arm64", "-arch", "i386", "-o", "foo.o", "-c", "foo.cpp" ], false ) ) }); } #[test] fn test_parse_arguments_multiple_arch() { match parse_arguments_( stringvec!["-arch", "arm64", "-o", "foo.o", "-c", "foo.cpp"], false, ) { CompilerArguments::Ok(_) => {} o => panic!("Got unexpected parse result: {:?}", o), } with_var("SCCACHE_CACHE_MULTIARCH", Some("1"), || { match parse_arguments_( stringvec!["-arch", "arm64", "-arch", "arm64", "-o", "foo.o", "-c", "foo.cpp"], false, ) { CompilerArguments::Ok(_) => {} o => panic!("Got unexpected parse result: {:?}", o), } let args = stringvec![ "-fPIC", "-arch", "arm64", "-arch", "i386", "-o", "foo.o", "-c", "foo.cpp" ]; let ParsedArguments { input, language, compilation_flag, outputs, preprocessor_args, msvc_show_includes, common_args, arch_args, .. } = match parse_arguments_(args, false) { CompilerArguments::Ok(args) => args, o => panic!("Got unexpected parse result: {:?}", o), }; assert_eq!(Some("foo.cpp"), input.to_str()); assert_eq!(Language::Cxx, language); assert_eq!(Some("-c"), compilation_flag.to_str()); assert_map_contains!( outputs, ( "obj", ArtifactDescriptor { path: "foo.o".into(), optional: false } ) ); assert!(preprocessor_args.is_empty()); assert_eq!(ovec!["-fPIC"], common_args); assert_eq!(ovec!["-arch", "arm64", "-arch", "i386"], arch_args); assert!(!msvc_show_includes); }); } #[test] fn at_signs() { let td = tempfile::Builder::new() .prefix("sccache") .tempdir() .unwrap(); File::create(td.path().join("foo")) .unwrap() .write_all( b"\ -c foo.c -o foo.o\ ", ) .unwrap(); let arg = format!("@{}", td.path().join("foo").display()); let ParsedArguments { input, language, outputs, preprocessor_args, msvc_show_includes, common_args, .. } = match parse_arguments_(vec![arg], false) { CompilerArguments::Ok(args) => args, o => panic!("Got unexpected parse result: {:?}", o), }; assert_eq!(Some("foo.c"), input.to_str()); assert_eq!(Language::C, language); assert_map_contains!( outputs, ( "obj", ArtifactDescriptor { path: "foo.o".into(), optional: false } ) ); assert!(preprocessor_args.is_empty()); assert!(common_args.is_empty()); assert!(!msvc_show_includes); } #[test] fn test_compile_simple() { let creator = new_creator(); let f = TestFixture::new(); let parsed_args = ParsedArguments { input: "foo.c".into(), double_dash_input: false, language: Language::C, compilation_flag: "-c".into(), depfile: None, outputs: vec![( "obj", ArtifactDescriptor { path: "foo.o".into(), optional: false, }, )] .into_iter() .collect(), dependency_args: vec![], preprocessor_args: vec![], common_args: vec![], arch_args: vec![], unhashed_args: vec![], extra_dist_files: vec![], extra_hash_files: vec![], msvc_show_includes: false, profile_generate: false, color_mode: ColorMode::Auto, suppress_rewrite_includes_only: false, too_hard_for_preprocessor_cache_mode: None, }; let runtime = single_threaded_runtime(); let storage = MockStorage::new(None, false); let storage: std::sync::Arc = std::sync::Arc::new(storage); let service = server::SccacheService::mock_with_storage(storage, runtime.handle().clone()); let compiler = &f.bins[0]; // Compiler invocation. next_command(&creator, Ok(MockChild::new(exit_status(0), "", ""))); let mut path_transformer = dist::PathTransformer::new(); let (command, dist_command, cacheable) = generate_compile_commands( &mut path_transformer, compiler, &parsed_args, f.tempdir.path(), &[], CCompilerKind::Gcc, false, language_to_gcc_arg, ) .unwrap(); #[cfg(feature = "dist-client")] assert!(dist_command.is_some()); #[cfg(not(feature = "dist-client"))] assert!(dist_command.is_none()); let _ = command.execute(&service, &creator).wait(); assert_eq!(Cacheable::Yes, cacheable); // Ensure that we ran all processes. assert_eq!(0, creator.lock().unwrap().children.len()); } #[test] fn test_compile_simple_verbose_short() { let creator = new_creator(); let f = TestFixture::new(); let parsed_args = ParsedArguments { input: "foo.c".into(), double_dash_input: false, language: Language::C, compilation_flag: "-c".into(), depfile: None, outputs: vec![( "obj", ArtifactDescriptor { path: "foo.o".into(), optional: false, }, )] .into_iter() .collect(), dependency_args: vec![], preprocessor_args: vec![], common_args: vec!["-v".into()], arch_args: vec![], unhashed_args: vec![], extra_dist_files: vec![], extra_hash_files: vec![], msvc_show_includes: false, profile_generate: false, color_mode: ColorMode::Auto, suppress_rewrite_includes_only: false, too_hard_for_preprocessor_cache_mode: None, }; let runtime = single_threaded_runtime(); let storage = MockStorage::new(None, false); let storage: std::sync::Arc = std::sync::Arc::new(storage); let service = server::SccacheService::mock_with_storage(storage, runtime.handle().clone()); let compiler = &f.bins[0]; // Compiler invocation. next_command(&creator, Ok(MockChild::new(exit_status(0), "", ""))); let mut path_transformer = dist::PathTransformer::new(); let (command, dist_command, cacheable) = generate_compile_commands( &mut path_transformer, compiler, &parsed_args, f.tempdir.path(), &[], CCompilerKind::Gcc, false, language_to_gcc_arg, ) .unwrap(); // -v should never generate a dist_command assert!(dist_command.is_none()); let _ = command.execute(&service, &creator).wait(); assert_eq!(Cacheable::Yes, cacheable); // Ensure that we ran all processes. assert_eq!(0, creator.lock().unwrap().children.len()); } #[test] fn test_compile_simple_verbose_long() { let creator = new_creator(); let f = TestFixture::new(); let parsed_args = ParsedArguments { input: "foo.c".into(), double_dash_input: false, language: Language::C, compilation_flag: "-c".into(), depfile: None, outputs: vec![( "obj", ArtifactDescriptor { path: "foo.o".into(), optional: false, }, )] .into_iter() .collect(), dependency_args: vec![], preprocessor_args: vec![], common_args: vec!["--verbose".into()], arch_args: vec![], unhashed_args: vec![], extra_dist_files: vec![], extra_hash_files: vec![], msvc_show_includes: false, profile_generate: false, color_mode: ColorMode::Auto, suppress_rewrite_includes_only: false, too_hard_for_preprocessor_cache_mode: None, }; let runtime = single_threaded_runtime(); let storage = MockStorage::new(None, false); let storage: std::sync::Arc = std::sync::Arc::new(storage); let service = server::SccacheService::mock_with_storage(storage, runtime.handle().clone()); let compiler = &f.bins[0]; // Compiler invocation. next_command(&creator, Ok(MockChild::new(exit_status(0), "", ""))); let mut path_transformer = dist::PathTransformer::new(); let (command, dist_command, cacheable) = generate_compile_commands( &mut path_transformer, compiler, &parsed_args, f.tempdir.path(), &[], CCompilerKind::Gcc, false, language_to_gcc_arg, ) .unwrap(); // --verbose should never generate a dist_command assert!(dist_command.is_none()); let _ = command.execute(&service, &creator).wait(); assert_eq!(Cacheable::Yes, cacheable); // Ensure that we ran all processes. assert_eq!(0, creator.lock().unwrap().children.len()); } #[test] fn test_compile_double_dash_input() { let args = stringvec!["-c", "-o", "foo.o", "--", "foo.c"]; let parsed_args = match parse_arguments_clang(args, false) { CompilerArguments::Ok(args) => args, o => panic!("Got unexpected parse result: {:?}", o), }; let f = TestFixture::new(); let compiler = &f.bins[0]; let mut path_transformer = dist::PathTransformer::new(); let (command, _, _) = generate_compile_commands( &mut path_transformer, compiler, &parsed_args, f.tempdir.path(), &[], CCompilerKind::Clang, false, language_to_gcc_arg, ) .unwrap(); let expected_args = ovec!["-x", "c", "-c", "-o", "foo.o", "--", "foo.c"]; assert_eq!(command.get_arguments(), expected_args); } #[test] fn test_parse_arguments_plusplus() { let args = stringvec!["-c", "foo.c", "-o", "foo.o"]; let ParsedArguments { input, language, compilation_flag, outputs, preprocessor_args, msvc_show_includes, common_args, .. } = match parse_arguments_(args, true) { CompilerArguments::Ok(args) => args, o => panic!("Got unexpected parse result: {:?}", o), }; assert_eq!(Some("foo.c"), input.to_str()); assert_eq!(Language::Cxx, language); assert_eq!(Some("-c"), compilation_flag.to_str()); assert_map_contains!( outputs, ( "obj", ArtifactDescriptor { path: "foo.o".into(), optional: false } ) ); assert!(preprocessor_args.is_empty()); assert!(common_args.is_empty()); assert!(!msvc_show_includes); } #[test] fn test_pch_explicit() { let args = stringvec!["-c", "-x", "c++-header", "pch.h", "-o", "pch.pch"]; let parsed_args = match parse_arguments_(args, false) { CompilerArguments::Ok(args) => args, o => panic!("Got unexpected parse result: {:?}", o), }; let mut cmd = MockCommand { child: None, args: vec![], }; preprocess_cmd( &mut cmd, &parsed_args, Path::new(""), &[], true, CCompilerKind::Gcc, true, vec![], language_to_gcc_arg, ); assert!(cmd.args.contains(&"-x".into()) && cmd.args.contains(&"c++-header".into())); } #[test] fn test_pch_implicit() { let args = stringvec!["-c", "pch.hpp", "-o", "pch.pch"]; let parsed_args = match parse_arguments_(args, false) { CompilerArguments::Ok(args) => args, o => panic!("Got unexpected parse result: {:?}", o), }; let mut cmd = MockCommand { child: None, args: vec![], }; preprocess_cmd( &mut cmd, &parsed_args, Path::new(""), &[], true, CCompilerKind::Gcc, true, vec![], language_to_gcc_arg, ); assert!(cmd.args.contains(&"-x".into()) && cmd.args.contains(&"c++-header".into())); } #[test] fn test_pch_generic() { let args = stringvec!["-c", "pch.h", "-o", "pch.pch"]; let parsed_args = match parse_arguments_(args, false) { CompilerArguments::Ok(args) => args, o => panic!("Got unexpected parse result: {:?}", o), }; let mut cmd = MockCommand { child: None, args: vec![], }; preprocess_cmd( &mut cmd, &parsed_args, Path::new(""), &[], true, CCompilerKind::Gcc, true, vec![], language_to_gcc_arg, ); assert!(!cmd.args.contains(&"-x".into())); } #[test] fn test_too_hard_for_preprocessor_cache_mode() { let args = stringvec!["-c", "foo.c", "-o", "foo.o"]; let parsed_args = match parse_arguments_(args, false) { CompilerArguments::Ok(args) => args, o => panic!("Got unexpected parse result: {:?}", o), }; assert!(parsed_args.too_hard_for_preprocessor_cache_mode.is_none()); let args = stringvec!["-c", "foo.c", "-o", "foo.o", "-Xpreprocessor", "-M"]; let parsed_args = match parse_arguments_(args, false) { CompilerArguments::Ok(args) => args, o => panic!("Got unexpected parse result: {:?}", o), }; assert_eq!( parsed_args.too_hard_for_preprocessor_cache_mode, Some("-Xpreprocessor".into()) ); let args = stringvec!["-c", "foo.c", "-o", "foo.o", r#"-Wp,-DFOO="something""#]; let parsed_args = match parse_arguments_(args, false) { CompilerArguments::Ok(args) => args, o => panic!("Got unexpected parse result: {:?}", o), }; assert_eq!( parsed_args.too_hard_for_preprocessor_cache_mode, Some("-Wp".into()) ); } } mozilla-sccache-40c3d6b/src/compiler/mod.rs000066400000000000000000000017671475712407500207340ustar00rootroot00000000000000// Copyright 2016 Mozilla Foundation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #[macro_use] mod args; mod c; mod cicc; mod clang; #[macro_use] #[allow(clippy::module_inception)] mod compiler; mod cudafe; mod diab; mod gcc; mod msvc; mod nvcc; mod nvhpc; mod preprocessor_cache; mod ptxas; mod rust; mod tasking_vx; #[macro_use] mod counted_array; pub use crate::compiler::c::CCompilerKind; pub use crate::compiler::compiler::*; pub use crate::compiler::preprocessor_cache::PreprocessorCacheEntry; mozilla-sccache-40c3d6b/src/compiler/msvc.rs000066400000000000000000002716651475712407500211330ustar00rootroot00000000000000// Copyright 2016 Mozilla Foundation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use crate::compiler::args::*; use crate::compiler::c::{ArtifactDescriptor, CCompilerImpl, CCompilerKind, ParsedArguments}; use crate::compiler::{ clang, gcc, write_temp_file, CCompileCommand, Cacheable, ColorMode, CompileCommand, CompilerArguments, Language, SingleCompileCommand, }; use crate::mock_command::{CommandCreatorSync, RunCommand}; use crate::util::{encode_path, run_input_output, OsStrExt}; use crate::{counted_array, dist}; use async_trait::async_trait; use fs::File; use fs_err as fs; use log::Level::Debug; use std::collections::{HashMap, HashSet}; use std::ffi::{OsStr, OsString}; use std::io::{self, BufWriter, Read, Write}; use std::path::{Path, PathBuf}; use std::process::{self, Stdio}; use crate::errors::*; /// A struct on which to implement `CCompilerImpl`. /// /// Needs a little bit of state just to persist `includes_prefix`. #[derive(Debug, PartialEq, Eq, Clone)] pub struct Msvc { /// The prefix used in the output of `-showIncludes`. pub includes_prefix: String, pub is_clang: bool, pub version: Option, } #[async_trait] impl CCompilerImpl for Msvc { fn kind(&self) -> CCompilerKind { CCompilerKind::Msvc } fn plusplus(&self) -> bool { false } fn version(&self) -> Option { self.version.clone() } fn parse_arguments( &self, arguments: &[OsString], cwd: &Path, _env_vars: &[(OsString, OsString)], ) -> CompilerArguments { parse_arguments(arguments, cwd, self.is_clang) } #[allow(clippy::too_many_arguments)] async fn preprocess( &self, creator: &T, executable: &Path, parsed_args: &ParsedArguments, cwd: &Path, env_vars: &[(OsString, OsString)], may_dist: bool, rewrite_includes_only: bool, _preprocessor_cache_mode: bool, ) -> Result where T: CommandCreatorSync, { preprocess( creator, executable, parsed_args, cwd, env_vars, may_dist, &self.includes_prefix, rewrite_includes_only, self.is_clang, ) .await } fn generate_compile_commands( &self, path_transformer: &mut dist::PathTransformer, executable: &Path, parsed_args: &ParsedArguments, cwd: &Path, env_vars: &[(OsString, OsString)], _rewrite_includes_only: bool, ) -> Result<( Box>, Option, Cacheable, )> where T: CommandCreatorSync, { generate_compile_commands(path_transformer, executable, parsed_args, cwd, env_vars).map( |(command, dist_command, cacheable)| { (CCompileCommand::new(command), dist_command, cacheable) }, ) } } #[cfg(not(windows))] fn from_local_codepage(multi_byte_str: &[u8]) -> io::Result { String::from_utf8(multi_byte_str.to_vec()) .map_err(|e| io::Error::new(io::ErrorKind::InvalidInput, e)) } #[cfg(windows)] pub fn from_local_codepage(multi_byte_str: &[u8]) -> io::Result { use windows_sys::Win32::Globalization::{MultiByteToWideChar, CP_OEMCP, MB_ERR_INVALID_CHARS}; let codepage = CP_OEMCP; let flags = MB_ERR_INVALID_CHARS; // Empty string if multi_byte_str.is_empty() { return Ok(String::new()); } unsafe { // Get length of UTF-16 string let len = MultiByteToWideChar( codepage, flags, multi_byte_str.as_ptr() as _, multi_byte_str.len() as i32, std::ptr::null_mut(), 0, ); if len > 0 { // Convert to UTF-16 let mut wstr: Vec = Vec::with_capacity(len as usize); let len = MultiByteToWideChar( codepage, flags, multi_byte_str.as_ptr() as _, multi_byte_str.len() as i32, wstr.as_mut_ptr() as _, len, ); if len > 0 { // wstr's contents have now been initialized wstr.set_len(len as usize); return String::from_utf16(&wstr[0..(len as usize)]) .map_err(|e| io::Error::new(io::ErrorKind::InvalidInput, e)); } } Err(io::Error::last_os_error()) } } /// Detect the prefix included in the output of MSVC's -showIncludes output. pub async fn detect_showincludes_prefix( creator: &T, exe: &OsStr, is_clang: bool, env: Vec<(OsString, OsString)>, pool: &tokio::runtime::Handle, ) -> Result where T: CommandCreatorSync, { let (tempdir, input) = write_temp_file(pool, "test.c".as_ref(), b"#include \"test.h\"\n".to_vec()).await?; let exe = exe.to_os_string(); let mut creator = creator.clone(); let pool = pool.clone(); let header = tempdir.path().join("test.h"); let tempdir = pool .spawn_blocking(move || { let mut file = File::create(&header)?; file.write_all(b"/* empty */\n")?; Ok::<_, std::io::Error>(tempdir) }) .await? .context("Failed to write temporary file")?; let mut cmd = creator.new_command_sync(&exe); // clang.exe on Windows reports the same set of built-in preprocessor defines as clang-cl, // but it doesn't accept MSVC commandline arguments unless you pass --driver-mode=cl. // clang-cl.exe will accept this argument as well, so always add it in this case. if is_clang { cmd.arg("--driver-mode=cl"); } cmd.args(&["-nologo", "-showIncludes", "-c", "-Fonul", "-I."]) .arg(&input) .current_dir(tempdir.path()) // The MSDN docs say the -showIncludes output goes to stderr, // but that's not true unless running with -E. .stdout(Stdio::piped()) .stderr(Stdio::null()); for (k, v) in env { cmd.env(k, v); } trace!("detect_showincludes_prefix: {:?}", cmd); let output = run_input_output(cmd, None).await?; if !output.status.success() { bail!("Failed to detect showIncludes prefix") } let process::Output { stdout: stdout_bytes, .. } = output; let stdout = from_local_codepage(&stdout_bytes) .context("Failed to convert compiler stdout while detecting showIncludes prefix")?; for line in stdout.lines() { if !line.ends_with("test.h") { continue; } for (i, c) in line.char_indices().rev() { if c != ' ' { continue; } let path = tempdir.path().join(&line[i + 1..]); // See if the rest of this line is a full pathname. if path.exists() { // Everything from the beginning of the line // to this index is the prefix. return Ok(line[..=i].to_owned()); } } } drop(tempdir); debug!( "failed to detect showIncludes prefix with output: {}", stdout ); bail!("Failed to detect showIncludes prefix") } ArgData! { TooHardFlag, TooHard(OsString), TooHardPath(PathBuf), PreprocessorArgument(OsString), PreprocessorArgumentPath(PathBuf), SuppressCompilation, DoCompilation, ShowIncludes, Output(PathBuf), DepFile(PathBuf), ProgramDatabase(PathBuf), DebugInfo, PassThrough, // Miscellaneous flags that don't prevent caching. PassThroughWithPath(PathBuf), // As above, recognised by prefix. PassThroughWithSuffix(OsString), // As above, recognised by prefix. Ignore, // The flag is not passed to the compiler. IgnoreWithSuffix(OsString), // As above, recognized by prefix. ExtraHashFile(PathBuf), XClang(OsString), // -Xclang ... Clang(OsString), // -clang:... ExternalIncludePath(PathBuf), } use self::ArgData::*; macro_rules! msvc_args { (static ARGS: [$t:ty; _] = [$($macro:ident ! ($($v:tt)*),)*]) => { counted_array!(static ARGS: [$t; _] = [$(msvc_args!(@one "-", $macro!($($v)*)),)*]); counted_array!(static SLASH_ARGS: [$t; _] = [$(msvc_args!(@one "/", $macro!($($v)*)),)*]); }; (@one $prefix:expr, msvc_take_arg!($s:expr, $($t:tt)*)) => { take_arg!(concat!($prefix, $s), $($t)+) }; (@one $prefix:expr, msvc_flag!($s:expr, $($t:tt)+)) => { flag!(concat!($prefix, $s), $($t)+) }; (@one $prefix:expr, $other:expr) => { $other }; } // Reference: // https://docs.microsoft.com/en-us/cpp/build/reference/compiler-options-listed-alphabetically?view=vs-2019 msvc_args!(static ARGS: [ArgInfo; _] = [ msvc_flag!("?", SuppressCompilation), msvc_flag!("Brepro", PassThrough), msvc_flag!("C", PassThrough), // Ignored unless a preprocess-only flag is specified. msvc_take_arg!("D", OsString, CanBeSeparated, PreprocessorArgument), msvc_flag!("E", SuppressCompilation), msvc_take_arg!("EH", OsString, Concatenated, PassThroughWithSuffix), // /EH[acsr\-]+ - TODO: use a regex? msvc_flag!("EP", SuppressCompilation), msvc_take_arg!("F", OsString, Concatenated, PassThroughWithSuffix), msvc_take_arg!("FA", OsString, Concatenated, TooHard), msvc_flag!("FC", PassThrough), // Use absolute paths in error messages, does not affect caching, only the debug output of the build msvc_take_arg!("FI", PathBuf, CanBeSeparated, PreprocessorArgumentPath), msvc_take_arg!("FR", PathBuf, Concatenated, TooHardPath), msvc_flag!("FS", Ignore), msvc_take_arg!("FU", PathBuf, CanBeSeparated, TooHardPath), msvc_take_arg!("Fa", PathBuf, Concatenated, TooHardPath), msvc_take_arg!("Fd", PathBuf, Concatenated, ProgramDatabase), msvc_take_arg!("Fe", PathBuf, Concatenated, TooHardPath), msvc_take_arg!("Fi", PathBuf, Concatenated, TooHardPath), msvc_take_arg!("Fm", PathBuf, Concatenated, PassThroughWithPath), // No effect if /c is specified. msvc_take_arg!("Fo", PathBuf, Concatenated, Output), msvc_take_arg!("Fp", PathBuf, Concatenated, TooHardPath), // allows users to specify the name for a PCH (when using /Yu or /Yc), PCHs are not supported in sccache. msvc_take_arg!("Fr", PathBuf, Concatenated, TooHardPath), msvc_flag!("Fx", TooHardFlag), msvc_flag!("GA", PassThrough), msvc_flag!("GF", PassThrough), msvc_flag!("GH", PassThrough), msvc_flag!("GL", PassThrough), msvc_flag!("GL-", PassThrough), msvc_flag!("GR", PassThrough), msvc_flag!("GR-", PassThrough), msvc_flag!("GS", PassThrough), msvc_flag!("GS-", PassThrough), msvc_flag!("GT", PassThrough), msvc_flag!("GX", PassThrough), msvc_flag!("GZ", PassThrough), msvc_flag!("Gd", PassThrough), msvc_flag!("Ge", PassThrough), msvc_flag!("Gh", PassThrough), msvc_flag!("Gm", TooHardFlag), // enable minimal rebuild, we do not support this msvc_flag!("Gm-", PassThrough), // disable minimal rebuild; we prefer no minimal rebuild, so marking it as disabled is fine msvc_flag!("Gr", PassThrough), msvc_take_arg!("Gs", OsString, Concatenated, PassThroughWithSuffix), msvc_flag!("Gv", PassThrough), msvc_flag!("Gw", PassThrough), msvc_flag!("Gw-", PassThrough), msvc_flag!("Gy", PassThrough), msvc_flag!("Gy-", PassThrough), msvc_flag!("Gz", PassThrough), msvc_take_arg!("H", OsString, Concatenated, PassThroughWithSuffix), msvc_flag!("HELP", SuppressCompilation), msvc_take_arg!("I", PathBuf, CanBeSeparated, PreprocessorArgumentPath), msvc_flag!("J", PassThrough), msvc_flag!("JMC", PassThrough), msvc_flag!("JMC-", PassThrough), msvc_flag!("LD", PassThrough), msvc_flag!("LDd", PassThrough), msvc_flag!("MD", PassThrough), msvc_flag!("MDd", PassThrough), msvc_take_arg!("MP", OsString, Concatenated, IgnoreWithSuffix), msvc_flag!("MT", PassThrough), msvc_flag!("MTd", PassThrough), msvc_flag!("O1", PassThrough), msvc_flag!("O2", PassThrough), msvc_flag!("Ob0", PassThrough), msvc_flag!("Ob1", PassThrough), msvc_flag!("Ob2", PassThrough), msvc_flag!("Ob3", PassThrough), msvc_flag!("Od", PassThrough), msvc_flag!("Og", PassThrough), msvc_flag!("Oi", PassThrough), msvc_flag!("Oi-", PassThrough), msvc_flag!("Os", PassThrough), msvc_flag!("Ot", PassThrough), msvc_flag!("Ox", PassThrough), msvc_flag!("Oy", PassThrough), msvc_flag!("Oy-", PassThrough), msvc_flag!("P", SuppressCompilation), msvc_flag!("QIfist", PassThrough), msvc_flag!("QIntel-jcc-erratum", PassThrough), msvc_flag!("Qfast_transcendentals", PassThrough), msvc_flag!("Qimprecise_fwaits", PassThrough), msvc_flag!("Qpar", PassThrough), msvc_flag!("Qpar-", PassThrough), msvc_flag!("Qsafe_fp_loads", PassThrough), msvc_flag!("Qspectre", PassThrough), msvc_flag!("Qspectre-load", PassThrough), msvc_flag!("Qspectre-load-cf", PassThrough), msvc_flag!("Qvec-report:1", PassThrough), msvc_flag!("Qvec-report:2", PassThrough), msvc_take_arg!("RTC", OsString, Concatenated, PassThroughWithSuffix), msvc_flag!("TC", PassThrough), // TODO: disable explicit language check, hope for the best for now? Also, handle /Tc & /Tp. msvc_flag!("TP", PassThrough), // As above. msvc_take_arg!("U", OsString, Concatenated, PreprocessorArgument), msvc_take_arg!("V", OsString, Concatenated, PassThroughWithSuffix), msvc_flag!("W0", PassThrough), msvc_flag!("W1", PassThrough), msvc_flag!("W2", PassThrough), msvc_flag!("W3", PassThrough), msvc_flag!("W4", PassThrough), msvc_flag!("WL", PassThrough), msvc_flag!("WX", PassThrough), msvc_flag!("WX-", PassThrough), msvc_flag!("Wall", PassThrough), msvc_take_arg!("Wv:", OsString, Concatenated, PassThroughWithSuffix), msvc_flag!("X", PassThrough), msvc_take_arg!("Xclang", OsString, Separated, XClang), msvc_take_arg!("Yc", PathBuf, Concatenated, TooHardPath), // Compile PCH - not yet supported. msvc_flag!("Yd", PassThrough), msvc_flag!("Z7", PassThrough), // Add debug info to .obj files. msvc_take_arg!("ZH:", OsString, Concatenated, PassThroughWithSuffix), msvc_flag!("ZI", DebugInfo), // Implies /FC, which puts absolute paths in error messages -> TooHardFlag? msvc_flag!("ZW", PassThrough), msvc_flag!("Za", PassThrough), msvc_take_arg!("Zc:", OsString, Concatenated, PassThroughWithSuffix), msvc_flag!("Ze", PassThrough), msvc_flag!("Zi", DebugInfo), msvc_take_arg!("Zm", OsString, Concatenated, PassThroughWithSuffix), msvc_flag!("Zo", PassThrough), msvc_flag!("Zo-", PassThrough), msvc_flag!("Zp1", PassThrough), msvc_flag!("Zp16", PassThrough), msvc_flag!("Zp2", PassThrough), msvc_flag!("Zp4", PassThrough), msvc_flag!("Zp8", PassThrough), msvc_flag!("Zs", SuppressCompilation), msvc_flag!("analyze", PassThrough), msvc_flag!("analyze-", PassThrough), msvc_take_arg!("analyze:", OsString, Concatenated, PassThroughWithSuffix), msvc_take_arg!("arch:", OsString, Concatenated, PassThroughWithSuffix), msvc_flag!("await", PassThrough), msvc_flag!("bigobj", PassThrough), msvc_flag!("c", DoCompilation), msvc_take_arg!("cgthreads", OsString, Concatenated, PassThroughWithSuffix), msvc_take_arg!("clang:", OsString, Concatenated, Clang), msvc_flag!("clr", PassThrough), msvc_take_arg!("clr:", OsString, Concatenated, PassThroughWithSuffix), msvc_take_arg!("constexpr:", OsString, Concatenated, PassThroughWithSuffix), msvc_take_arg!("deps", PathBuf, Concatenated, DepFile), msvc_take_arg!("diagnostics:", OsString, Concatenated, PassThroughWithSuffix), msvc_take_arg!("doc", PathBuf, Concatenated, TooHardPath), // Creates an .xdc file. msvc_take_arg!("errorReport:", OsString, Concatenated, PassThroughWithSuffix), // Deprecated. msvc_take_arg!("execution-charset:", OsString, Concatenated, PassThroughWithSuffix), msvc_flag!("experimental:deterministic", PassThrough), msvc_flag!("experimental:external", PassThrough), msvc_flag!("experimental:module", TooHardFlag), msvc_flag!("experimental:module-", PassThrough), // Explicitly disabled modules. msvc_take_arg!("experimental:preprocessor", OsString, Concatenated, PassThroughWithSuffix), msvc_take_arg!("external:I", PathBuf, CanBeSeparated, ExternalIncludePath), msvc_flag!("external:W0", PassThrough), msvc_flag!("external:W1", PassThrough), msvc_flag!("external:W2", PassThrough), msvc_flag!("external:W3", PassThrough), msvc_flag!("external:W4", PassThrough), msvc_flag!("external:anglebrackets", PassThrough), msvc_take_arg!("favor:", OsString, Concatenated, PassThroughWithSuffix), msvc_take_arg!("fp:", OsString, Concatenated, PassThroughWithSuffix), msvc_take_arg!("fsanitize-blacklist", PathBuf, Concatenated('='), ExtraHashFile), msvc_flag!("fsanitize=address", PassThrough), msvc_flag!("fsyntax-only", SuppressCompilation), msvc_take_arg!("guard:cf", OsString, Concatenated, PassThroughWithSuffix), msvc_flag!("homeparams", PassThrough), msvc_flag!("hotpatch", PassThrough), msvc_take_arg!("imsvc", PathBuf, CanBeSeparated, PreprocessorArgumentPath), msvc_flag!("kernel", PassThrough), msvc_flag!("kernel-", PassThrough), msvc_flag!("nologo", PassThrough), msvc_take_arg!("o", PathBuf, Separated, Output), // Deprecated but valid msvc_flag!("openmp", PassThrough), msvc_flag!("openmp-", PassThrough), msvc_flag!("openmp:experimental", PassThrough), msvc_flag!("permissive", PassThrough), msvc_flag!("permissive-", PassThrough), msvc_flag!("sdl", PassThrough), msvc_flag!("sdl-", PassThrough), msvc_flag!("showIncludes", ShowIncludes), msvc_take_arg!("source-charset:", OsString, Concatenated, PassThroughWithSuffix), msvc_take_arg!("sourceDependencies", PathBuf, CanBeSeparated, DepFile), msvc_take_arg!("std:", OsString, Concatenated, PassThroughWithSuffix), msvc_flag!("u", PassThrough), msvc_flag!("utf-8", PassThrough), msvc_flag!("validate-charset", PassThrough), msvc_flag!("validate-charset-", PassThrough), msvc_flag!("vd0", PassThrough), msvc_flag!("vd1", PassThrough), msvc_flag!("vd2", PassThrough), msvc_flag!("vmb", PassThrough), msvc_flag!("vmg", PassThrough), msvc_flag!("vmm", PassThrough), msvc_flag!("vms", PassThrough), msvc_flag!("vmv", PassThrough), msvc_flag!("volatile:iso", PassThrough), msvc_flag!("volatile:ms", PassThrough), msvc_flag!("w", PassThrough), msvc_take_arg!("w1", OsString, Concatenated, PassThroughWithSuffix), msvc_take_arg!("w2", OsString, Concatenated, PassThroughWithSuffix), msvc_take_arg!("w3", OsString, Concatenated, PassThroughWithSuffix), msvc_take_arg!("w4", OsString, Concatenated, PassThroughWithSuffix), msvc_take_arg!("wd", OsString, Concatenated, PassThroughWithSuffix), msvc_take_arg!("we", OsString, Concatenated, PassThroughWithSuffix), msvc_take_arg!("winsysroot", PathBuf, CanBeSeparated, PassThroughWithPath), msvc_take_arg!("wo", OsString, Concatenated, PassThroughWithSuffix), take_arg!("@", PathBuf, Concatenated, TooHardPath), ]); // TODO: what do do with precompiled header flags? eg: /Y-, /Yc, /YI, /Yu, /Zf, /Zm pub fn parse_arguments( arguments: &[OsString], cwd: &Path, is_clang: bool, ) -> CompilerArguments { let mut output_arg = None; let mut input_arg = None; let mut double_dash_input = false; let mut common_args = vec![]; let mut unhashed_args = vec![]; let mut preprocessor_args = vec![]; let mut dependency_args = vec![]; let mut extra_hash_files = vec![]; let mut compilation = false; let mut compilation_flag = OsString::new(); let mut debug_info = false; let mut pdb = None; let mut depfile = None; let mut show_includes = false; let mut xclangs: Vec = vec![]; let mut clangs: Vec = vec![]; let mut profile_generate = false; let mut multiple_input = false; let mut multiple_input_files = Vec::new(); // Custom iterator to expand `@` arguments which stand for reading a file // and interpreting it as a list of more arguments. let it = ExpandIncludeFile::new(cwd, arguments); let mut it = ArgsIter::new(it, (&ARGS[..], &SLASH_ARGS[..])); if is_clang { it = it.with_double_dashes(); } for arg in it { let arg = try_or_cannot_cache!(arg, "argument parse"); match arg.get_data() { Some(PassThrough) | Some(PassThroughWithPath(_)) | Some(PassThroughWithSuffix(_)) => {} Some(TooHardFlag) | Some(TooHard(_)) | Some(TooHardPath(_)) => { cannot_cache!(arg.flag_str().expect("Can't be Argument::Raw/UnknownFlag",)) } Some(DoCompilation) => { compilation = true; compilation_flag = OsString::from(arg.flag_str().expect("Compilation flag expected")); } Some(ShowIncludes) => { show_includes = true; dependency_args.push(arg.to_os_string()); } Some(Output(out)) => { output_arg = Some(out.clone()); // Can't usefully cache output that goes to nul anyway, // and it breaks reading entries from cache. if out.as_os_str() == "nul" { cannot_cache!("output to nul") } } Some(DepFile(p)) => depfile = Some(p.clone()), Some(ProgramDatabase(p)) => pdb = Some(p.clone()), Some(DebugInfo) => debug_info = true, Some(PreprocessorArgument(_)) | Some(PreprocessorArgumentPath(_)) | Some(ExtraHashFile(_)) | Some(Ignore) | Some(IgnoreWithSuffix(_)) | Some(ExternalIncludePath(_)) => {} Some(SuppressCompilation) => { return CompilerArguments::NotCompilation; } Some(XClang(s)) => xclangs.push(s.clone()), Some(Clang(s)) => clangs.push(s.clone()), None => { match arg { Argument::Raw(ref val) if val == "--" => { if input_arg.is_none() { double_dash_input = true; } } Argument::Raw(ref val) => { if input_arg.is_some() { // Can't cache compilations with multiple inputs. multiple_input = true; multiple_input_files.push(val.clone()); } input_arg = Some(val.clone()); } Argument::UnknownFlag(ref flag) => common_args.push(flag.clone()), _ => unreachable!(), } } } match arg.get_data() { Some(PreprocessorArgument(_)) | Some(PreprocessorArgumentPath(_)) => preprocessor_args .extend( arg.normalize(NormalizedDisposition::Concatenated) .iter_os_strings(), ), Some(ProgramDatabase(_)) | Some(DebugInfo) | Some(PassThrough) | Some(PassThroughWithPath(_)) | Some(PassThroughWithSuffix(_)) => common_args.extend( arg.normalize(NormalizedDisposition::Concatenated) .iter_os_strings(), ), Some(ExtraHashFile(path)) => { extra_hash_files.push(cwd.join(path)); common_args.extend( arg.normalize(NormalizedDisposition::Concatenated) .iter_os_strings(), ) } Some(ExternalIncludePath(_)) => common_args.extend( arg.normalize(NormalizedDisposition::Separated) .iter_os_strings(), ), // We ignore -MP and -FS and never pass them down to the compiler. // // -MP tells the compiler to build with multiple processes and is used // to spread multiple compilations when there are multiple inputs. // Either we have multiple inputs on the command line, and we're going // to bail out and not cache, or -MP is not going to be useful. // -MP also implies -FS. // // -FS forces synchronous access to PDB files via a MSPDBSRV process. // This option is only useful when multiple compiler invocations are going // to share the same PDB file, which is not supported by sccache. So either // -Fd was passed with a pdb that is not shared and sccache is going to // handle the compile, in which case -FS is not needed, or -Fd was not passed // and we're going to bail out and not cache. // // In both cases, the flag is not going to be useful if we are going to cache, // so we just skip them entirely. -FS may also have a side effect of creating // race conditions in which we may try to read the PDB before MSPDBSRC is done // writing it, so we're better off ignoring the flags. Some(Ignore) | Some(IgnoreWithSuffix(_)) => {} _ => {} } } // TODO: doing this here reorders the arguments, hopefully that doesn't affect the meaning fn xclang_append(arg: OsString, args: &mut Vec) { args.push("-Xclang".into()); args.push(arg); } fn dash_clang_append(arg: OsString, args: &mut Vec) { let mut a = OsString::from("-clang:"); a.push(arg); args.push(a); } for (args, append_fn) in Iterator::zip( [xclangs, clangs].iter(), &[xclang_append, dash_clang_append], ) { let it = gcc::ExpandIncludeFile::new(cwd, args); for arg in ArgsIter::new(it, (&gcc::ARGS[..], &clang::ARGS[..])) { let arg = try_or_cannot_cache!(arg, "argument parse"); // Eagerly bail if it looks like we need to do more complicated work use crate::compiler::gcc::ArgData::*; let args = match arg.get_data() { Some(SplitDwarf) | Some(TestCoverage) | Some(Coverage) | Some(DoCompilation) | Some(Language(_)) | Some(Output(_)) | Some(TooHardFlag) | Some(XClang(_)) | Some(TooHard(_)) => cannot_cache!(arg .flag_str() .unwrap_or("Can't handle complex arguments through clang",)), None => match arg { Argument::Raw(_) | Argument::UnknownFlag(_) => &mut common_args, _ => unreachable!(), }, Some(DiagnosticsColor(_)) | Some(DiagnosticsColorFlag) | Some(NoDiagnosticsColorFlag) | Some(Arch(_)) | Some(PassThroughFlag) | Some(PassThrough(_)) | Some(PassThroughPath(_)) | Some(PedanticFlag) | Some(Standard(_)) | Some(SerializeDiagnostics(_)) => &mut common_args, Some(UnhashedFlag) | Some(Unhashed(_)) => &mut unhashed_args, Some(ProfileGenerate) => { profile_generate = true; &mut common_args } Some(ClangProfileUse(path)) => { extra_hash_files.push(clang::resolve_profile_use_path(path, cwd)); &mut common_args } Some(ExtraHashFile(path)) => { extra_hash_files.push(cwd.join(path)); &mut common_args } Some(PreprocessorArgumentFlag) | Some(PreprocessorArgument(_)) | Some(PreprocessorArgumentPath(_)) => &mut preprocessor_args, Some(DepArgumentPath(_)) | Some(DepTarget(_)) | Some(NeedDepTarget) => { &mut dependency_args } }; // Normalize attributes such as "-I foo", "-D FOO=bar", as // "-Ifoo", "-DFOO=bar", etc. and "-includefoo", "idirafterbar" as // "-include foo", "-idirafter bar", etc. let norm = match arg.flag_str() { Some(s) if s.len() == 2 => NormalizedDisposition::Concatenated, _ => NormalizedDisposition::Separated, }; for arg in arg.normalize(norm).iter_os_strings() { append_fn(arg, args); } } } // We only support compilation. if !compilation { return CompilerArguments::NotCompilation; } // Can't cache compilations with multiple inputs. if multiple_input { cannot_cache!( "multiple input files", format!("{:?}", multiple_input_files) ); } let (input, language) = match input_arg { Some(i) => match Language::from_file_name(Path::new(&i)) { Some(l) => (i.to_owned(), l), None => cannot_cache!("unknown source language"), }, // We can't cache compilation without an input. None => cannot_cache!("no input file"), }; let mut outputs = HashMap::new(); match output_arg { // If output file name is not given, use default naming rule None => { outputs.insert( "obj", ArtifactDescriptor { path: Path::new(&input).with_extension("obj"), optional: false, }, ); } Some(o) => { if o.extension().is_none() && compilation { outputs.insert( "obj", ArtifactDescriptor { path: o.with_extension("obj"), optional: false, }, ); } else { outputs.insert( "obj", ArtifactDescriptor { path: o, optional: false, }, ); } } } if language == Language::Cxx { if let Some(obj) = outputs.get("obj") { // MSVC can produce "type library headers"[1], with the extensions "tlh" and "tli". // These files can be used in later compilation steps to interact with COM interfaces. // // These files are only created when the `#import` directive is used. // Figuring out if an import directive is used would require parsing C++, which would be a lot of work. // To avoid that problem, we just optionally cache these headers if they happen to be produced. // This isn't perfect, but it is easy! // // [1]: https://learn.microsoft.com/en-us/cpp/preprocessor/hash-import-directive-cpp?view=msvc-170#_predir_the_23import_directive_header_files_created_by_import let tlh = obj.path.with_extension("tlh"); let tli = obj.path.with_extension("tli"); // Primary type library header outputs.insert( "tlh", ArtifactDescriptor { path: tlh, optional: true, }, ); // Secondary type library header outputs.insert( "tli", ArtifactDescriptor { path: tli, optional: true, }, ); } } // -Fd is not taken into account unless -Zi or -ZI are given // Clang is currently unable to generate PDB files if debug_info && !is_clang { match pdb { Some(p) => outputs.insert( "pdb", ArtifactDescriptor { path: p, optional: false, }, ), None => { // -Zi and -ZI without -Fd defaults to vcxxx.pdb (where xxx depends on the // MSVC version), and that's used for all compilations with the same // working directory. We can't cache such a pdb. cannot_cache!("shared pdb"); } }; } CompilerArguments::Ok(ParsedArguments { input: input.into(), double_dash_input, language, compilation_flag, depfile, outputs, dependency_args, preprocessor_args, common_args, arch_args: vec![], unhashed_args, extra_dist_files: vec![], extra_hash_files, msvc_show_includes: show_includes, profile_generate, // FIXME: implement color_mode for msvc. color_mode: ColorMode::Auto, suppress_rewrite_includes_only: false, too_hard_for_preprocessor_cache_mode: None, }) } #[cfg(windows)] fn normpath(path: &str) -> String { use std::os::windows::ffi::OsStringExt; use std::os::windows::io::AsRawHandle; use std::ptr; use windows_sys::Win32::Storage::FileSystem::GetFinalPathNameByHandleW; File::open(path) .and_then(|f| { let handle = f.as_raw_handle() as _; let size = unsafe { GetFinalPathNameByHandleW(handle, ptr::null_mut(), 0, 0) }; if size == 0 { return Err(io::Error::last_os_error()); } let mut wchars = vec![0; size as usize]; if unsafe { GetFinalPathNameByHandleW(handle, wchars.as_mut_ptr(), wchars.len() as u32, 0) } == 0 { return Err(io::Error::last_os_error()); } // The return value of GetFinalPathNameByHandleW uses the // '\\?\' prefix. let o = OsString::from_wide(&wchars[4..wchars.len() - 1]); o.into_string() .map(|s| s.replace('\\', "/")) .map_err(|_| io::Error::new(io::ErrorKind::Other, "Error converting string")) }) .unwrap_or_else(|_| path.replace('\\', "/")) } #[cfg(not(windows))] fn normpath(path: &str) -> String { path.to_owned() } #[allow(clippy::too_many_arguments)] pub fn preprocess_cmd( cmd: &mut T, parsed_args: &ParsedArguments, cwd: &Path, env_vars: &[(OsString, OsString)], may_dist: bool, rewrite_includes_only: bool, is_clang: bool, ) where T: RunCommand, { // When performing distributed compilation, line number info is important for error // reporting and to not cause spurious compilation failure (e.g. no exceptions build // fails due to exceptions transitively included in the stdlib). // With -fprofile-generate line number information is important, so use -E. // Otherwise, use -EP to maximize cache hits (because no absolute file paths are // emitted) and improve performance. if may_dist || parsed_args.profile_generate { cmd.arg("-E"); } else { cmd.arg("-EP"); } cmd.arg("-nologo") .args(&parsed_args.preprocessor_args) .args(&parsed_args.dependency_args) .args(&parsed_args.common_args) .env_clear() .envs(env_vars.to_vec()) .current_dir(cwd); if is_clang { if parsed_args.depfile.is_some() && !parsed_args.msvc_show_includes { cmd.arg("-showIncludes"); } } else { // cl.exe can product the dep list itself, in a JSON format that some tools will be expecting. if let Some(ref depfile) = parsed_args.depfile { cmd.arg("/sourceDependencies"); cmd.arg(depfile); } // Windows SDK generates C4668 during preprocessing, but compiles fine. // Read for more info: https://github.com/mozilla/sccache/issues/1725 // And here: https://github.com/mozilla/sccache/issues/2250 cmd.arg("/WX-"); } if rewrite_includes_only && is_clang { cmd.arg("-clang:-frewrite-includes"); } if parsed_args.double_dash_input { cmd.arg("--"); } cmd.arg(&parsed_args.input); } #[allow(clippy::too_many_arguments)] pub async fn preprocess( creator: &T, executable: &Path, parsed_args: &ParsedArguments, cwd: &Path, env_vars: &[(OsString, OsString)], may_dist: bool, includes_prefix: &str, rewrite_includes_only: bool, is_clang: bool, ) -> Result where T: CommandCreatorSync, { let mut cmd = creator.clone().new_command_sync(executable); preprocess_cmd( &mut cmd, parsed_args, cwd, env_vars, may_dist, rewrite_includes_only, is_clang, ); if log_enabled!(Debug) { debug!("preprocess: {:?}", cmd); } let parsed_args = parsed_args.clone(); let includes_prefix = includes_prefix.to_string(); let cwd = cwd.to_owned(); let output = run_input_output(cmd, None).await?; if !is_clang { return Ok(output); } let parsed_args = &parsed_args; if let (Some(obj), Some(depfile)) = (parsed_args.outputs.get("obj"), &parsed_args.depfile) { let objfile = &obj.path; let f = File::create(cwd.join(depfile))?; let mut f = BufWriter::new(f); encode_path(&mut f, objfile) .with_context(|| format!("Couldn't encode objfile filename: '{:?}'", objfile))?; write!(f, ": ")?; encode_path(&mut f, &parsed_args.input) .with_context(|| format!("Couldn't encode input filename: '{:?}'", objfile))?; write!(f, " ")?; let process::Output { status, stdout, stderr: stderr_bytes, } = output; let stderr = from_local_codepage(&stderr_bytes).context("Failed to convert preprocessor stderr")?; let mut deps = HashSet::new(); let mut stderr_bytes = vec![]; for line in stderr.lines() { if line.starts_with(&includes_prefix) { let dep = normpath(line[includes_prefix.len()..].trim()); trace!("included: {}", dep); if deps.insert(dep.clone()) && !dep.contains(' ') { write!(f, "{} ", dep)?; } if !parsed_args.msvc_show_includes { continue; } } stderr_bytes.extend_from_slice(line.as_bytes()); stderr_bytes.push(b'\n'); } writeln!(f)?; // Write extra rules for each dependency to handle // removed files. encode_path(&mut f, &parsed_args.input) .with_context(|| format!("Couldn't encode filename: '{:?}'", parsed_args.input))?; writeln!(f, ":")?; let mut sorted = deps.into_iter().collect::>(); sorted.sort(); for dep in sorted { if !dep.contains(' ') { writeln!(f, "{}:", dep)?; } } Ok(process::Output { status, stdout, stderr: stderr_bytes, }) } else { Ok(output) } } fn generate_compile_commands( path_transformer: &mut dist::PathTransformer, executable: &Path, parsed_args: &ParsedArguments, cwd: &Path, env_vars: &[(OsString, OsString)], ) -> Result<( SingleCompileCommand, Option, Cacheable, )> { #[cfg(not(feature = "dist-client"))] let _ = path_transformer; trace!("compile"); let out_file = match parsed_args.outputs.get("obj") { Some(obj) => &obj.path, None => bail!("Missing object file output"), }; // See if this compilation will produce a PDB. let cacheable = parsed_args .outputs .get("pdb") .map_or(Cacheable::Yes, |pdb| { // If the PDB exists, we don't know if it's shared with another // compilation. If it is, we can't cache. if Path::new(&cwd).join(pdb.path.clone()).exists() { Cacheable::No } else { Cacheable::Yes } }); let mut fo = OsString::from("-Fo"); fo.push(out_file); let mut arguments: Vec = vec![parsed_args.compilation_flag.clone(), fo]; arguments.extend_from_slice(&parsed_args.preprocessor_args); arguments.extend_from_slice(&parsed_args.dependency_args); arguments.extend_from_slice(&parsed_args.unhashed_args); arguments.extend_from_slice(&parsed_args.common_args); if parsed_args.double_dash_input { arguments.push("--".into()); } arguments.push(parsed_args.input.clone().into()); let command = SingleCompileCommand { executable: executable.to_owned(), arguments, env_vars: env_vars.to_owned(), cwd: cwd.to_owned(), }; #[cfg(not(feature = "dist-client"))] let dist_command = None; #[cfg(feature = "dist-client")] let dist_command = (|| { // http://releases.llvm.org/6.0.0/tools/clang/docs/UsersManual.html#clang-cl // TODO: Use /T... for language? let mut fo = String::from("-Fo"); fo.push_str(&path_transformer.as_dist(out_file)?); let mut arguments: Vec = vec![parsed_args.compilation_flag.clone().into_string().ok()?, fo]; // It's important to avoid preprocessor_args because of things like /FI which // forcibly includes another file. This does mean we're potentially vulnerable // to misidentification of flags like -DYNAMICBASE (though in that specific // case we're safe as it only applies to link time, which sccache avoids). arguments.extend(dist::osstrings_to_strings(&parsed_args.common_args)?); if parsed_args.double_dash_input { arguments.push("--".into()); } arguments.push(path_transformer.as_dist(&parsed_args.input)?); Some(dist::CompileCommand { executable: path_transformer.as_dist(executable)?, arguments, env_vars: dist::osstring_tuples_to_strings(env_vars)?, cwd: path_transformer.as_dist(cwd)?, }) })(); Ok((command, dist_command, cacheable)) } /// Iterator that expands @response files in-place. /// /// According to MSDN [1], @file means: /// /// ```text /// A text file containing compiler commands. /// /// A response file can contain any commands that you would specify on the /// command line. This can be useful if your command-line arguments exceed /// 127 characters. /// /// It is not possible to specify the @ option from within a response file. /// That is, a response file cannot embed another response file. /// /// From the command line you can specify as many response file options (for /// example, @respfile.1 @respfile.2) as you want. /// ``` /// /// Per Microsoft [2], response files are used by MSBuild: /// /// ```text /// Response (.rsp) files are text files that contain MSBuild.exe /// command-line switches. Each switch can be on a separate line or all /// switches can be on one line. Comment lines are prefaced with a # symbol. /// The @ switch is used to pass another response file to MSBuild.exe. /// /// The autoresponse file is a special .rsp file that MSBuild.exe automatically /// uses when building a project. This file, MSBuild.rsp, must be in the same /// directory as MSBuild.exe, otherwise it will not be found. You can edit /// this file to specify default command-line switches to MSBuild.exe. /// For example, if you use the same logger every time you build a project, /// you can add the -logger switch to MSBuild.rsp, and MSBuild.exe will /// use the logger every time a project is built. /// ``` /// /// Note that, in order to conform to the spec, response files are not /// recursively expanded. /// /// [1]: https://docs.microsoft.com/en-us/cpp/build/reference/at-specify-a-compiler-response-file /// [2]: https://learn.microsoft.com/en-us/visualstudio/msbuild/msbuild-response-files?view=vs-2019 struct ExpandIncludeFile<'a> { cwd: &'a Path, /// Arguments provided during initialization, which may include response-file directives (@). /// Order is reversed from the iterator provided, /// so they can be visited in front-to-back order by popping from the end. args: Vec, /// Arguments found in provided response-files. /// These are also reversed compared to the order in the response file, /// so they can be visited in front-to-back order by popping from the end. stack: Vec, } impl<'a> ExpandIncludeFile<'a> { pub fn new(cwd: &'a Path, args: &[OsString]) -> Self { ExpandIncludeFile { // Reverse the provided iterator so we can pop from end to visit in the original order. args: args.iter().rev().map(|a| a.to_owned()).collect(), stack: Vec::new(), cwd, } } } impl<'a> Iterator for ExpandIncludeFile<'a> { type Item = OsString; fn next(&mut self) -> Option { loop { // Visit all arguments found in the most recently read response file. // Since response files are not recursive, we do not need to worry // about these containing additional @ directives. if let Some(response_file_arg) = self.stack.pop() { return Some(response_file_arg); } // Visit the next argument provided by the original command iterator. let arg = match self.args.pop() { Some(arg) => arg, None => return None, }; let file_arg = match arg.split_prefix("@") { Some(file_arg) => file_arg, None => return Some(arg), }; let file_path = self.cwd.join(file_arg); // Read the contents of the response file, accounting for non-utf8 encodings. let content = match File::open(&file_path).and_then(|mut file| read_text(&mut file)) { Ok(content) => content, Err(err) => { debug!("failed to read @-file `{}`: {}", file_path.display(), err); // If we failed to read the file content, return the original arg (including the `@` directive). return Some(arg); } }; trace!("Expanded response file {:?} to {:?}", file_path, content); // Parse the response file contents, taking into account quote-wrapped strings and new-line separators. // Special implementation to account for MSVC response file format. let resp_file_args = SplitMsvcResponseFileArgs::from(&content).collect::>(); // Pump arguments back to the stack, in reverse order so we can `Vec::pop` and visit in original front-to-back order. let rev_args = resp_file_args.iter().rev().map(|s| s.into()); self.stack.extend(rev_args); } } } /// Reads the text stream as a unicode buffer, prioritizing UTF-8, UTF-16 (big and little endian), and falling back on ISO 8859-1. fn read_text(reader: &mut R) -> io::Result where R: Read, { let mut buf = Vec::new(); reader.read_to_end(&mut buf)?; let (result, _, has_error) = encoding_rs::WINDOWS_1252.decode(&buf); if has_error { Err(io::Error::new( io::ErrorKind::InvalidData, "failed to decode text", )) } else { Ok(result.to_string()) } } /// An iterator over the arguments in a Windows command line. /// /// This produces results identical to `CommandLineToArgvW` except in the /// following cases: /// /// 1. When passed an empty string, CommandLineToArgvW returns the path to the /// current executable file. Here, the iterator will simply be empty. /// 2. CommandLineToArgvW interprets the first argument differently than the /// rest. Here, all arguments are treated in identical fashion. /// /// Parsing rules: /// /// - Arguments are delimited by whitespace (either a space or tab). /// - A string surrounded by double quotes is interpreted as a single argument. /// - Backslashes are interpreted literally unless followed by a double quote. /// - 2n backslashes followed by a double quote reduce to n backslashes and we /// enter the "in quote" state. /// - 2n+1 backslashes followed by a double quote reduces to n backslashes, /// we do *not* enter the "in quote" state, and the double quote is /// interpreted literally. /// /// References: /// - https://msdn.microsoft.com/en-us/library/windows/desktop/bb776391(v=vs.85).aspx /// - https://msdn.microsoft.com/en-us/library/windows/desktop/17w5ykft(v=vs.85).aspx #[derive(Clone, Debug)] struct SplitMsvcResponseFileArgs<'a> { /// String slice of the file content that is being parsed. /// Slice is mutated as this iterator is executed. file_content: &'a str, } impl<'a, T> From<&'a T> for SplitMsvcResponseFileArgs<'a> where T: AsRef + 'static, { fn from(file_content: &'a T) -> Self { Self { file_content: file_content.as_ref(), } } } impl<'a> SplitMsvcResponseFileArgs<'a> { /// Appends backslashes to `target` by decrementing `count`. /// If `step` is >1, then `count` is decremented by `step`, resulting in 1 backslash appended for every `step`. fn append_backslashes_to(target: &mut String, count: &mut usize, step: usize) { while *count >= step { target.push('\\'); *count -= step; } } } impl<'a> Iterator for SplitMsvcResponseFileArgs<'a> { type Item = String; fn next(&mut self) -> Option { let mut in_quotes = false; let mut backslash_count: usize = 0; // Strip any leading whitespace before relevant characters let is_whitespace = |c| matches!(c, ' ' | '\t' | '\n' | '\r'); self.file_content = self.file_content.trim_start_matches(is_whitespace); if self.file_content.is_empty() { return None; } // The argument string to return, built by analyzing the current slice in the iterator. let mut arg = String::new(); // All characters still in the string slice. Will be mutated by consuming // values until the current arg is built. let mut chars = self.file_content.chars(); // Build the argument by evaluating each character in the string slice. for c in &mut chars { match c { // In order to handle the escape character based on the char(s) which come after it, // they are counted instead of appended literally, until a non-backslash character is encountered. '\\' => backslash_count += 1, // Either starting or ending a quoted argument, or appending a literal character (if the quote was escaped). '"' => { // Only append half the number of backslashes encountered, because this is an escaped string. // This will reduce `backslash_count` to either 0 or 1. Self::append_backslashes_to(&mut arg, &mut backslash_count, 2); match backslash_count == 0 { // If there are no remaining encountered backslashes, // then we have found either the start or end of a quoted argument. true => in_quotes = !in_quotes, // The quote character is escaped, so it is treated as a literal and appended to the arg string. false => { backslash_count = 0; arg.push('"'); } } } // If whitespace is encountered, only preserve it if we are currently in quotes. // Otherwise it marks the end of the current argument. ' ' | '\t' | '\n' | '\r' => { Self::append_backslashes_to(&mut arg, &mut backslash_count, 1); // If not in a quoted string, then this is the end of the argument. if !in_quotes { break; } // Otherwise, the whitespace must be preserved in the argument. arg.push(c); } // All other characters treated as is _ => { Self::append_backslashes_to(&mut arg, &mut backslash_count, 1); arg.push(c); } } } // Flush any backslashes at the end of the string. Self::append_backslashes_to(&mut arg, &mut backslash_count, 1); // Save the current remaining characters for the next step in the iterator. self.file_content = chars.as_str(); Some(arg) } } #[cfg(test)] mod test { use std::str::FromStr; use super::*; use crate::compiler::*; use crate::mock_command::*; use crate::server; use crate::test::mock_storage::MockStorage; use crate::test::utils::*; fn parse_arguments(arguments: Vec) -> CompilerArguments { super::parse_arguments(&arguments, &std::env::current_dir().unwrap(), false) } fn parse_arguments_clang(arguments: Vec) -> CompilerArguments { super::parse_arguments(&arguments, &std::env::current_dir().unwrap(), true) } #[test] fn test_detect_showincludes_prefix() { drop(env_logger::try_init()); let creator = new_creator(); let runtime = single_threaded_runtime(); let pool = runtime.handle().clone(); let f = TestFixture::new(); let srcfile = f.touch("test.h").unwrap(); let mut s = srcfile.to_str().unwrap(); if s.starts_with("\\\\?\\") { s = &s[4..]; } let stdout = format!("blah: {}\r\n", s); let stderr = String::from("some\r\nstderr\r\n"); next_command(&creator, Ok(MockChild::new(exit_status(0), stdout, stderr))); assert_eq!( "blah: ", detect_showincludes_prefix(&creator, "cl.exe".as_ref(), false, Vec::new(), &pool) .wait() .unwrap() ); } #[test] fn test_parse_arguments_simple() { let args = ovec!["-c", "foo.c", "-Fofoo.obj"]; let ParsedArguments { input, language, compilation_flag, outputs, preprocessor_args, msvc_show_includes, common_args, .. } = match parse_arguments(args) { CompilerArguments::Ok(args) => args, o => panic!("Got unexpected parse result: {:?}", o), }; assert_eq!(Some("foo.c"), input.to_str()); assert_eq!(Language::C, language); assert_eq!(Some("-c"), compilation_flag.to_str()); assert_map_contains!( outputs, ( "obj", ArtifactDescriptor { path: PathBuf::from("foo.obj"), optional: false } ) ); assert!(preprocessor_args.is_empty()); assert!(common_args.is_empty()); assert!(!msvc_show_includes); } #[test] fn test_cpp_parse_arguments_collects_type_library_headers() { let args = ovec!["-c", "foo.cpp", "-Fofoo.obj"]; let ParsedArguments { input, language, outputs, .. } = match parse_arguments(args) { CompilerArguments::Ok(args) => args, o => panic!("Got unexpected parse result: {:?}", o), }; assert_eq!(Some("foo.cpp"), input.to_str()); assert_eq!(Language::Cxx, language); assert_map_contains!( outputs, ( "obj", ArtifactDescriptor { path: PathBuf::from("foo.obj"), optional: false } ), ( "tlh", ArtifactDescriptor { path: PathBuf::from("foo.tlh"), optional: true } ), ( "tli", ArtifactDescriptor { path: PathBuf::from("foo.tli"), optional: true } ) ); } #[test] fn test_c_parse_arguments_does_not_collect_type_library_headers() { let args = ovec!["-c", "foo.c", "-Fofoo.obj"]; let ParsedArguments { input, language, outputs, .. } = match parse_arguments(args) { CompilerArguments::Ok(args) => args, o => panic!("Got unexpected parse result: {:?}", o), }; assert_eq!(Some("foo.c"), input.to_str()); assert_eq!(Language::C, language); assert_map_contains!( outputs, ( "obj", ArtifactDescriptor { path: PathBuf::from("foo.obj"), optional: false } ) ); } #[test] fn test_parse_compile_flag() { let args = ovec!["/c", "foo.c", "-Fofoo.obj"]; let ParsedArguments { input, language, compilation_flag, outputs, preprocessor_args, msvc_show_includes, common_args, .. } = match parse_arguments(args) { CompilerArguments::Ok(args) => args, o => panic!("Got unexpected parse result: {:?}", o), }; assert_eq!(Some("foo.c"), input.to_str()); assert_eq!(Language::C, language); assert_eq!(Some("/c"), compilation_flag.to_str()); assert_map_contains!( outputs, ( "obj", ArtifactDescriptor { path: PathBuf::from("foo.obj"), optional: false } ) ); assert!(preprocessor_args.is_empty()); assert!(common_args.is_empty()); assert!(!msvc_show_includes); } #[test] fn test_parse_arguments_default_name() { let args = ovec!["-c", "foo.c"]; let ParsedArguments { input, language, outputs, preprocessor_args, msvc_show_includes, common_args, .. } = match parse_arguments(args) { CompilerArguments::Ok(args) => args, o => panic!("Got unexpected parse result: {:?}", o), }; assert_eq!(Some("foo.c"), input.to_str()); assert_eq!(Language::C, language); assert_map_contains!( outputs, ( "obj", ArtifactDescriptor { path: PathBuf::from("foo.obj"), optional: false } ) ); assert!(preprocessor_args.is_empty()); assert!(common_args.is_empty()); assert!(!msvc_show_includes); } #[test] fn test_parse_arguments_double_dash() { let args = ovec!["-c", "-Fofoo.obj", "--", "foo.c"]; let ParsedArguments { input, double_dash_input, common_args, .. } = match parse_arguments(args.clone()) { CompilerArguments::Ok(args) => args, o => panic!("Got unexpected parse result: {:?}", o), }; assert_eq!(Some("foo.c"), input.to_str()); // MSVC doesn't support double dashes. If we got one, we'll pass them // through to MSVC for it to error out. assert!(!double_dash_input); assert_eq!(ovec!["--"], common_args); let ParsedArguments { input, double_dash_input, common_args, .. } = match parse_arguments_clang(args) { CompilerArguments::Ok(args) => args, o => panic!("Got unexpected parse result: {:?}", o), }; assert_eq!(Some("foo.c"), input.to_str()); assert!(double_dash_input); assert!(common_args.is_empty()); let args = ovec!["-c", "-Fofoo.obj", "foo.c", "--"]; let ParsedArguments { input, double_dash_input, common_args, .. } = match parse_arguments_clang(args) { CompilerArguments::Ok(args) => args, o => panic!("Got unexpected parse result: {:?}", o), }; assert_eq!(Some("foo.c"), input.to_str()); // Double dash after input file is ignored. assert!(!double_dash_input); assert!(common_args.is_empty()); let args = ovec!["-c", "-Fofoo.obj", "foo.c", "--", "bar.c"]; assert_eq!( CompilerArguments::CannotCache("multiple input files", Some("[\"bar.c\"]".to_string())), parse_arguments_clang(args) ); let args = ovec!["-c", "-Fofoo.obj", "foo.c", "--", "-fPIC"]; assert_eq!( CompilerArguments::CannotCache("multiple input files", Some("[\"-fPIC\"]".to_string())), parse_arguments_clang(args) ); } #[test] fn parse_argument_slashes() { let args = ovec!["-c", "foo.c", "/Fofoo.obj"]; let ParsedArguments { input, language, outputs, preprocessor_args, msvc_show_includes, common_args, .. } = match parse_arguments(args) { CompilerArguments::Ok(args) => args, o => panic!("Got unexpected parse result: {:?}", o), }; assert_eq!(Some("foo.c"), input.to_str()); assert_eq!(Language::C, language); assert_map_contains!( outputs, ( "obj", ArtifactDescriptor { path: PathBuf::from("foo.obj"), optional: false } ) ); assert!(preprocessor_args.is_empty()); assert!(common_args.is_empty()); assert!(!msvc_show_includes); } #[test] fn parse_deps_arguments() { let arg_sets = vec![ ovec!["-c", "foo.c", "/Fofoo.obj", "/depsfoo.obj.json"], ovec![ "-c", "foo.c", "/Fofoo.obj", "/sourceDependenciesfoo.obj.json" ], ovec![ "-c", "foo.c", "/Fofoo.obj", "/sourceDependencies", "foo.obj.json" ], ]; for args in arg_sets { let ParsedArguments { input, language, outputs, preprocessor_args, msvc_show_includes, common_args, depfile, .. } = match parse_arguments(args) { CompilerArguments::Ok(args) => args, o => panic!("Got unexpected parse result: {:?}", o), }; assert_eq!(Some("foo.c"), input.to_str()); assert_eq!(Language::C, language); assert_eq!(Some(PathBuf::from_str("foo.obj.json").unwrap()), depfile); assert_map_contains!( outputs, ( "obj", ArtifactDescriptor { path: PathBuf::from("foo.obj"), optional: false } ) ); assert!(preprocessor_args.is_empty()); assert!(common_args.is_empty()); assert!(!msvc_show_includes); } } #[test] fn test_parse_arguments_clang_passthrough() { let args = ovec![ "-Fohost_dictionary.obj", "-c", "-Xclang", "-MP", "-Xclang", "-dependency-file", "-Xclang", ".deps/host_dictionary.obj.pp", "-Xclang", "-MT", "-Xclang", "host_dictionary.obj", "-clang:-fprofile-generate", "-clang:-fprofile-use=xyz.profdata", "dictionary.c" ]; let ParsedArguments { dependency_args, preprocessor_args, common_args, profile_generate, extra_hash_files, .. } = match parse_arguments(args) { CompilerArguments::Ok(args) => args, o => panic!("Got unexpected parse result: {:?}", o), }; assert!(profile_generate); assert!(preprocessor_args.is_empty()); assert_eq!( dependency_args, ovec!( "-Xclang", "-MP", "-Xclang", "-dependency-file", "-Xclang", ".deps/host_dictionary.obj.pp", "-Xclang", "-MT", "-Xclang", "host_dictionary.obj" ) ); assert_eq!( common_args, ovec!( "-clang:-fprofile-generate", "-clang:-fprofile-use=xyz.profdata" ) ); assert_eq!( extra_hash_files, ovec!(std::env::current_dir().unwrap().join("xyz.profdata")) ); } #[test] fn test_parse_arguments_extra() { let args = ovec!["-c", "foo.c", "-foo", "-Fofoo.obj", "-bar"]; let ParsedArguments { input, language, outputs, preprocessor_args, msvc_show_includes, common_args, .. } = match parse_arguments(args) { CompilerArguments::Ok(args) => args, o => panic!("Got unexpected parse result: {:?}", o), }; assert_eq!(Some("foo.c"), input.to_str()); assert_eq!(Language::C, language); assert_map_contains!( outputs, ( "obj", ArtifactDescriptor { path: PathBuf::from("foo.obj"), optional: false } ) ); assert!(preprocessor_args.is_empty()); assert_eq!(common_args, ovec!["-foo", "-bar"]); assert!(!msvc_show_includes); } #[test] fn test_parse_arguments_values() { let args = ovec![ "-c", "foo.c", "-FI", "file", "-imsvc", "/a/b/c", "-Fofoo.obj", "/showIncludes", "/winsysroot../../some/dir" ]; let ParsedArguments { input, language, outputs, preprocessor_args, dependency_args, msvc_show_includes, common_args, .. } = match parse_arguments(args) { CompilerArguments::Ok(args) => args, o => panic!("Got unexpected parse result: {:?}", o), }; assert_eq!(Some("foo.c"), input.to_str()); assert_eq!(Language::C, language); assert_map_contains!( outputs, ( "obj", ArtifactDescriptor { path: PathBuf::from("foo.obj"), optional: false } ) ); assert_eq!(preprocessor_args, ovec!["-FIfile", "-imsvc/a/b/c"]); assert_eq!(dependency_args, ovec!["/showIncludes"]); assert_eq!(common_args, ovec!["/winsysroot../../some/dir"]); assert!(msvc_show_includes); } #[test] fn test_parse_arguments_pdb() { let args = ovec!["-c", "foo.c", "-Zi", "-Fdfoo.pdb", "-Fofoo.obj"]; let ParsedArguments { input, language, outputs, preprocessor_args, msvc_show_includes, common_args, .. } = match parse_arguments(args) { CompilerArguments::Ok(args) => args, o => panic!("Got unexpected parse result: {:?}", o), }; assert_eq!(Some("foo.c"), input.to_str()); assert_eq!(Language::C, language); assert_map_contains!( outputs, ( "obj", ArtifactDescriptor { path: PathBuf::from("foo.obj"), optional: false } ), ( "pdb", ArtifactDescriptor { path: PathBuf::from("foo.pdb"), optional: false } ) ); assert!(preprocessor_args.is_empty()); assert_eq!(common_args, ovec!["-Zi", "-Fdfoo.pdb"]); assert!(!msvc_show_includes); } #[test] fn test_parse_arguments_external_include() { // Parsing -external:I relies on -experimental:external being parsed // and placed into common_args. let args = ovec![ "-c", "foo.c", "-Fofoo.obj", "-experimental:external", "-external:templates-", "-external:I", "path/to/system/includes" ]; let ParsedArguments { input, language, outputs, preprocessor_args, msvc_show_includes, common_args, .. } = match parse_arguments(args) { CompilerArguments::Ok(args) => args, o => panic!("Got unexpected parse result: {:?}", o), }; assert_eq!(Some("foo.c"), input.to_str()); assert_eq!(Language::C, language); assert_map_contains!( outputs, ( "obj", ArtifactDescriptor { path: PathBuf::from("foo.obj"), optional: false } ) ); assert_eq!(1, outputs.len()); assert!(preprocessor_args.is_empty()); assert_eq!( common_args, ovec![ "-experimental:external", "-external:templates-", "-external:I", "path/to/system/includes" ] ); assert!(!msvc_show_includes); } #[test] fn test_parse_arguments_external_warning_suppression_forward_slashes() { // Parsing /external:W relies on /experimental:external being parsed // and placed into common_args. for n in 0..5 { let args = ovec![ "-c", "foo.c", "/Fofoo.obj", "/experimental:external", format!("/external:W{}", n) ]; let ParsedArguments { input, language, outputs, preprocessor_args, msvc_show_includes, common_args, .. } = match parse_arguments(args) { CompilerArguments::Ok(args) => args, o => panic!("Got unexpected parse result: {:?}", o), }; assert_eq!(Some("foo.c"), input.to_str()); assert_eq!(Language::C, language); assert_map_contains!( outputs, ( "obj", ArtifactDescriptor { path: PathBuf::from("foo.obj"), optional: false } ) ); assert_eq!(1, outputs.len()); assert!(preprocessor_args.is_empty()); assert_eq!( common_args, ovec!["/experimental:external", format!("/external:W{}", n)] ); assert!(!msvc_show_includes); } } #[test] fn test_parse_arguments_empty_args() { assert_eq!(CompilerArguments::NotCompilation, parse_arguments(vec!())); } #[test] fn test_parse_arguments_not_compile() { assert_eq!( CompilerArguments::NotCompilation, parse_arguments(ovec!["-Fofoo", "foo.c"]) ); } #[test] fn test_parse_arguments_passthrough() { let args = ovec![ "-Oy", "-Qpar", "-Qpar-", "-Gw", "-EHa", "-Fmdictionary-map", "-c", "-Fohost_dictionary.obj", "dictionary.c" ]; let ParsedArguments { input, common_args, dependency_args, preprocessor_args, .. } = match parse_arguments(args) { CompilerArguments::Ok(args) => args, o => panic!("Got unexpected parse result: {:?}", o), }; assert_eq!(Some("dictionary.c"), input.to_str()); assert!(preprocessor_args.is_empty()); assert!(dependency_args.is_empty()); assert!(!common_args.is_empty()); assert_eq!( common_args, ovec!("-Oy", "-Qpar", "-Qpar-", "-Gw", "-EHa", "-Fmdictionary-map") ); } #[test] fn test_parse_arguments_too_many_inputs_single() { assert_eq!( CompilerArguments::CannotCache("multiple input files", Some("[\"bar.c\"]".to_string())), parse_arguments(ovec!["-c", "foo.c", "-Fofoo.obj", "bar.c"]) ); } #[test] fn test_parse_arguments_too_many_inputs_multiple() { assert_eq!( CompilerArguments::CannotCache( "multiple input files", Some("[\"bar.c\", \"baz.c\"]".to_string()) ), parse_arguments(ovec!["-c", "foo.c", "-Fofoo.obj", "bar.c", "baz.c"]) ); } #[test] fn test_parse_arguments_unsupported() { assert_eq!( CompilerArguments::CannotCache("-FA", None), parse_arguments(ovec!["-c", "foo.c", "-Fofoo.obj", "-FA"]) ); assert_eq!( CompilerArguments::CannotCache("-Fa", None), parse_arguments(ovec!["-Fa", "-c", "foo.c", "-Fofoo.obj"]) ); assert_eq!( CompilerArguments::CannotCache("-FR", None), parse_arguments(ovec!["-c", "foo.c", "-FR", "-Fofoo.obj"]) ); assert_eq!( CompilerArguments::CannotCache("-Fp", None), parse_arguments(ovec!["-c", "-Fpfoo.h", "foo.c"]) ); assert_eq!( CompilerArguments::CannotCache("-Yc", None), parse_arguments(ovec!["-c", "-Ycfoo.h", "foo.c"]) ); } #[test] fn test_responsefile_missing() { assert_eq!( CompilerArguments::CannotCache("@", None), parse_arguments(ovec!["-c", "foo.c", "@foo", "-Fofoo.obj"]) ); } #[test] fn test_responsefile_absolute_path() { let td = tempfile::Builder::new() .prefix("sccache") .tempdir() .unwrap(); let cmd_file_path = td.path().join("foo"); { let mut file = File::create(&cmd_file_path).unwrap(); let content = b"-c foo.c -o foo.o"; file.write_all(content).unwrap(); } let arg = format!("@{}", cmd_file_path.display()); let ParsedArguments { input, language, outputs, preprocessor_args, msvc_show_includes, common_args, .. } = match parse_arguments(ovec![arg]) { CompilerArguments::Ok(args) => args, o => panic!("Failed to parse @-file, err: {:?}", o), }; assert_eq!(Some("foo.c"), input.to_str()); assert_eq!(Language::C, language); assert_map_contains!( outputs, ( "obj", ArtifactDescriptor { path: "foo.o".into(), optional: false } ) ); assert!(preprocessor_args.is_empty()); assert!(common_args.is_empty()); assert!(!msvc_show_includes); } #[test] fn test_responsefile_relative_path() { // Generate the tempdir in the currentdir so we can use a relative path in this test. // MSVC allows relative paths to response files, so we must support that. let td = tempfile::Builder::new() .prefix("sccache") .tempdir_in("./") .unwrap(); let relative_to_tmp = td .path() .strip_prefix(std::env::current_dir().unwrap()) .unwrap(); let cmd_file_path = relative_to_tmp.join("foo"); { let mut file = File::create(&cmd_file_path).unwrap(); let content = b"-c foo.c -o foo.o"; file.write_all(content).unwrap(); } let arg = format!("@{}", cmd_file_path.display()); let ParsedArguments { input, language, outputs, preprocessor_args, msvc_show_includes, common_args, .. } = match parse_arguments(ovec![arg]) { CompilerArguments::Ok(args) => args, o => panic!("Failed to parse @-file, err: {:?}", o), }; assert_eq!(Some("foo.c"), input.to_str()); assert_eq!(Language::C, language); assert_map_contains!( outputs, ( "obj", ArtifactDescriptor { path: "foo.o".into(), optional: false } ) ); assert!(preprocessor_args.is_empty()); assert!(common_args.is_empty()); assert!(!msvc_show_includes); } #[test] fn test_responsefile_with_quotes() { let td = tempfile::Builder::new() .prefix("sccache") .tempdir() .unwrap(); let cmd_file_path = td.path().join("foo"); { let mut file = File::create(&cmd_file_path).unwrap(); let content = b"-c \"Foo Bar.c\" -o foo.o"; file.write_all(content).unwrap(); } let arg = format!("@{}", cmd_file_path.display()); let ParsedArguments { input, language, outputs, preprocessor_args, msvc_show_includes, common_args, .. } = match parse_arguments(ovec![arg]) { CompilerArguments::Ok(args) => args, o => panic!("Failed to parse @-file, err: {:?}", o), }; assert_eq!(Some("Foo Bar.c"), input.to_str()); assert_eq!(Language::C, language); assert_map_contains!( outputs, ( "obj", ArtifactDescriptor { path: "foo.o".into(), optional: false } ) ); assert!(preprocessor_args.is_empty()); assert!(common_args.is_empty()); assert!(!msvc_show_includes); } #[test] fn test_responsefile_multiline() { let td = tempfile::Builder::new() .prefix("sccache") .tempdir() .unwrap(); let cmd_file_path = td.path().join("foo"); { let mut file = File::create(&cmd_file_path).unwrap(); let content = b"\n-c foo.c\n-o foo.o"; file.write_all(content).unwrap(); } let arg = format!("@{}", cmd_file_path.display()); let ParsedArguments { input, language, outputs, preprocessor_args, msvc_show_includes, common_args, .. } = match parse_arguments(ovec![arg]) { CompilerArguments::Ok(args) => args, o => panic!("Failed to parse @-file, err: {:?}", o), }; assert_eq!(Some("foo.c"), input.to_str()); assert_eq!(Language::C, language); assert_map_contains!( outputs, ( "obj", ArtifactDescriptor { path: "foo.o".into(), optional: false } ) ); assert!(preprocessor_args.is_empty()); assert!(common_args.is_empty()); assert!(!msvc_show_includes); } #[test] fn test_responsefile_multiline_cr() { let td = tempfile::Builder::new() .prefix("sccache") .tempdir() .unwrap(); let cmd_file_path = td.path().join("foo"); { let mut file = File::create(&cmd_file_path).unwrap(); let content = b"\r-c foo.c\r-o foo.o"; file.write_all(content).unwrap(); } let arg = format!("@{}", cmd_file_path.display()); let ParsedArguments { input, language, outputs, preprocessor_args, msvc_show_includes, common_args, .. } = match parse_arguments(ovec![arg]) { CompilerArguments::Ok(args) => args, o => panic!("Failed to parse @-file, err: {:?}", o), }; assert_eq!(Some("foo.c"), input.to_str()); assert_eq!(Language::C, language); assert_map_contains!( outputs, ( "obj", ArtifactDescriptor { path: "foo.o".into(), optional: false } ) ); assert!(preprocessor_args.is_empty()); assert!(common_args.is_empty()); assert!(!msvc_show_includes); } #[test] fn test_responsefile_encoding_utf16le() { let td = tempfile::Builder::new() .prefix("sccache") .tempdir() .unwrap(); let cmd_file_path = td.path().join("foo"); { let mut file = File::create(&cmd_file_path).unwrap(); // pre-encoded with utf16le let content: [u8; 0x26] = [ 0xFF, 0xFE, // little endian BOM // `-c foo€.c -o foo.o` 0x2D, 0x00, 0x63, 0x00, 0x20, 0x00, 0x66, 0x00, 0x6F, 0x00, 0x6F, 0x00, 0xAC, 0x20, 0x2E, 0x00, 0x63, 0x00, 0x20, 0x00, 0x2D, 0x00, 0x6F, 0x00, 0x20, 0x00, 0x66, 0x00, 0x6F, 0x00, 0x6F, 0x00, 0x2E, 0x00, 0x6F, 0x00, ]; file.write_all(&content).unwrap(); } let arg = format!("@{}", cmd_file_path.display()); let ParsedArguments { input, language, outputs, preprocessor_args, msvc_show_includes, common_args, .. } = match parse_arguments(ovec![arg]) { CompilerArguments::Ok(args) => args, o => panic!("Failed to parse @-file, err: {:?}", o), }; assert_eq!(Some("foo€.c"), input.to_str()); assert_eq!(Language::C, language); assert_map_contains!( outputs, ( "obj", ArtifactDescriptor { path: "foo.o".into(), optional: false } ) ); assert!(preprocessor_args.is_empty()); assert!(common_args.is_empty()); assert!(!msvc_show_includes); } #[test] fn test_responsefile_encoding_win1252() { let td = tempfile::Builder::new() .prefix("sccache") .tempdir() .unwrap(); let cmd_file_path = td.path().join("foo"); { let mut file = File::create(&cmd_file_path).unwrap(); // pre-encoded with Windows 1252 let content: [u8; 0x12] = [ // `-c foo€.c -o foo.o` // the euro symbol is 0x80 in Windows 1252 (and undefined in ISO-8859-1) 0x2D, 0x63, 0x20, 0x66, 0x6F, 0x6F, 0x80, 0x2E, 0x63, 0x20, 0x2D, 0x6F, 0x20, 0x66, 0x6F, 0x6F, 0x2E, 0x6F, ]; file.write_all(&content).unwrap(); } let arg = format!("@{}", cmd_file_path.display()); let ParsedArguments { input, language, outputs, preprocessor_args, msvc_show_includes, common_args, .. } = match parse_arguments(ovec![arg]) { CompilerArguments::Ok(args) => args, o => panic!("Failed to parse @-file, err: {:?}", o), }; assert_eq!(Some("foo€.c"), input.to_str()); assert_eq!(Language::C, language); assert_map_contains!( outputs, ( "obj", ArtifactDescriptor { path: "foo.o".into(), optional: false } ) ); assert!(preprocessor_args.is_empty()); assert!(common_args.is_empty()); assert!(!msvc_show_includes); } #[test] fn test_parse_arguments_missing_pdb() { assert_eq!( CompilerArguments::CannotCache("shared pdb", None), parse_arguments(ovec!["-c", "foo.c", "-Zi", "-Fofoo.obj"]) ); } #[test] fn test_parse_arguments_missing_edit_and_continue_pdb() { assert_eq!( CompilerArguments::CannotCache("shared pdb", None), parse_arguments(ovec!["-c", "foo.c", "-ZI", "-Fofoo.obj"]) ); } #[test] fn test_preprocess_double_dash_input() { let args = ovec!["-c", "-Fofoo.o.bj", "--", "foo.c"]; let parsed_args = match parse_arguments_clang(args) { CompilerArguments::Ok(args) => args, o => panic!("Got unexpected parse result: {:?}", o), }; let mut cmd = MockCommand { child: None, args: vec![], }; preprocess_cmd(&mut cmd, &parsed_args, Path::new(""), &[], true, true, true); let expected_args = ovec!["-E", "-nologo", "-clang:-frewrite-includes", "--", "foo.c"]; assert_eq!(cmd.args, expected_args); } #[test] fn test_compile_simple() { let creator = new_creator(); let f = TestFixture::new(); let parsed_args = ParsedArguments { input: "foo.c".into(), double_dash_input: false, language: Language::C, compilation_flag: "-c".into(), depfile: None, outputs: vec![( "obj", ArtifactDescriptor { path: "foo.obj".into(), optional: false, }, )] .into_iter() .collect(), dependency_args: vec![], preprocessor_args: vec![], common_args: vec![], arch_args: vec![], unhashed_args: vec![], extra_dist_files: vec![], extra_hash_files: vec![], msvc_show_includes: false, profile_generate: false, color_mode: ColorMode::Auto, suppress_rewrite_includes_only: false, too_hard_for_preprocessor_cache_mode: None, }; let runtime = single_threaded_runtime(); let storage = MockStorage::new(None, false); let storage: std::sync::Arc = std::sync::Arc::new(storage); let service = server::SccacheService::mock_with_storage(storage, runtime.handle().clone()); let compiler = &f.bins[0]; // Compiler invocation. next_command(&creator, Ok(MockChild::new(exit_status(0), "", ""))); let mut path_transformer = dist::PathTransformer::new(); let (command, dist_command, cacheable) = generate_compile_commands( &mut path_transformer, compiler, &parsed_args, f.tempdir.path(), &[], ) .unwrap(); #[cfg(feature = "dist-client")] assert!(dist_command.is_some()); #[cfg(not(feature = "dist-client"))] assert!(dist_command.is_none()); let _ = command.execute(&service, &creator).wait(); assert_eq!(Cacheable::Yes, cacheable); // Ensure that we ran all processes. assert_eq!(0, creator.lock().unwrap().children.len()); } #[test] fn test_compile_double_dash_input() { let args = ovec!["-c", "-Fofoo.obj", "--", "foo.c"]; let parsed_args = match parse_arguments_clang(args) { CompilerArguments::Ok(args) => args, o => panic!("Got unexpected parse result: {:?}", o), }; let f = TestFixture::new(); let compiler = &f.bins[0]; let mut path_transformer = dist::PathTransformer::new(); let (command, _, _) = generate_compile_commands( &mut path_transformer, compiler, &parsed_args, f.tempdir.path(), &[], ) .unwrap(); let expected_args = ovec!["-c", "-Fofoo.obj", "--", "foo.c"]; assert_eq!(command.arguments, expected_args); } #[test] fn test_compile_not_cacheable_pdb() { let creator = new_creator(); let f = TestFixture::new(); let pdb = f.touch("foo.pdb").unwrap(); let parsed_args = ParsedArguments { input: "foo.c".into(), double_dash_input: false, language: Language::C, compilation_flag: "/c".into(), depfile: None, outputs: vec![ ( "obj", ArtifactDescriptor { path: "foo.obj".into(), optional: false, }, ), ( "pdb", ArtifactDescriptor { path: pdb, optional: false, }, ), ] .into_iter() .collect(), dependency_args: vec![], preprocessor_args: vec![], common_args: vec![], arch_args: vec![], unhashed_args: vec![], extra_dist_files: vec![], extra_hash_files: vec![], msvc_show_includes: false, profile_generate: false, color_mode: ColorMode::Auto, suppress_rewrite_includes_only: false, too_hard_for_preprocessor_cache_mode: None, }; let runtime = single_threaded_runtime(); let storage = MockStorage::new(None, false); let storage: std::sync::Arc = std::sync::Arc::new(storage); let service = server::SccacheService::mock_with_storage(storage, runtime.handle().clone()); let compiler = &f.bins[0]; // Compiler invocation. next_command(&creator, Ok(MockChild::new(exit_status(0), "", ""))); let mut path_transformer = dist::PathTransformer::new(); let (command, dist_command, cacheable) = generate_compile_commands( &mut path_transformer, compiler, &parsed_args, f.tempdir.path(), &[], ) .unwrap(); #[cfg(feature = "dist-client")] assert!(dist_command.is_some()); #[cfg(not(feature = "dist-client"))] assert!(dist_command.is_none()); let _ = command.execute(&service, &creator).wait(); assert_eq!(Cacheable::No, cacheable); // Ensure that we ran all processes. assert_eq!(0, creator.lock().unwrap().children.len()); } #[test] fn test_parse_fsanitize_blacklist() { let args = ovec![ "-c", "foo.c", "-o", "foo.o", "-fsanitize-blacklist=list.txt" ]; let ParsedArguments { common_args, extra_hash_files, .. } = match parse_arguments(args) { CompilerArguments::Ok(args) => args, o => panic!("Got unexpected parse result: {:?}", o), }; assert_eq!(ovec!["-fsanitize-blacklist=list.txt"], common_args); assert_eq!( ovec![std::env::current_dir().unwrap().join("list.txt")], extra_hash_files ); } #[test] #[cfg(windows)] fn local_oem_codepage_conversions() { use crate::util::wide_char_to_multi_byte; use windows_sys::Win32::Globalization::GetOEMCP; let current_oemcp = unsafe { GetOEMCP() }; // We don't control the local OEM codepage so test only if it is one of: // United Stats, Latin-1 and Latin-1 + euro symbol if current_oemcp == 437 || current_oemcp == 850 || current_oemcp == 858 { // Non-ASCII characters const INPUT_STRING: &str = "ÇüéâäàåçêëèïîìÄÅ"; // The characters in INPUT_STRING encoded per the OEM codepage const INPUT_BYTES: [u8; 16] = [ 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, ]; // Test the conversion from the OEM codepage to UTF-8 assert_eq!(from_local_codepage(&INPUT_BYTES).unwrap(), INPUT_STRING); // The characters in INPUT_STRING encoded in UTF-16 const INPUT_WORDS: [u16; 16] = [ 199, 252, 233, 226, 228, 224, 229, 231, 234, 235, 232, 239, 238, 236, 196, 197, ]; // Test the conversion from UTF-16 to the OEM codepage assert_eq!(wide_char_to_multi_byte(&INPUT_WORDS).unwrap(), INPUT_BYTES); } } } mozilla-sccache-40c3d6b/src/compiler/nvcc.rs000066400000000000000000002141711475712407500211010ustar00rootroot00000000000000// Copyright 2016 Mozilla Foundation // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #![allow(unused_imports, dead_code, unused_variables)] use crate::compiler::args::*; use crate::compiler::c::{ArtifactDescriptor, CCompilerImpl, CCompilerKind, ParsedArguments}; use crate::compiler::gcc::ArgData::*; use crate::compiler::{ self, gcc, get_compiler_info, write_temp_file, CCompileCommand, Cacheable, CompileCommand, CompileCommandImpl, CompilerArguments, Language, }; use crate::mock_command::{ exit_status, CommandChild, CommandCreator, CommandCreatorSync, ExitStatusValue, RunCommand, }; use crate::util::{run_input_output, OsStrExt}; use crate::{counted_array, dist, protocol, server}; use async_trait::async_trait; use fs::File; use fs_err as fs; use futures::{FutureExt, StreamExt, TryFutureExt, TryStreamExt}; use itertools::Itertools; use log::Level::Trace; use regex::Regex; use std::collections::HashMap; use std::ffi::{OsStr, OsString}; use std::future::{Future, IntoFuture}; use std::io::{self, BufRead, Read, Write}; #[cfg(unix)] use std::os::unix::process::ExitStatusExt; use std::path::{Path, PathBuf}; use std::process; use which::which_in; use crate::errors::*; /// A unit struct on which to implement `CCompilerImpl`. #[derive(Clone, Debug)] pub enum NvccHostCompiler { Gcc, Msvc, Nvhpc, } #[derive(Clone, Debug)] pub struct Nvcc { pub host_compiler: NvccHostCompiler, pub host_compiler_version: Option, pub version: Option, } #[async_trait] impl CCompilerImpl for Nvcc { fn kind(&self) -> CCompilerKind { CCompilerKind::Nvcc } fn plusplus(&self) -> bool { false } fn version(&self) -> Option { let nvcc_ver = self.version.clone().unwrap_or_default(); let host_ver = self.host_compiler_version.clone().unwrap_or_default(); let both_ver = [nvcc_ver, host_ver] .iter() .filter(|x| !x.is_empty()) .join("-"); if both_ver.is_empty() { None } else { Some(both_ver) } } fn parse_arguments( &self, arguments: &[OsString], cwd: &Path, env_vars: &[(OsString, OsString)], ) -> CompilerArguments { let mut arguments = arguments.to_vec(); if let Some(flags) = env_vars .iter() .find(|(k, _)| k == "NVCC_PREPEND_FLAGS") .and_then(|(_, p)| p.to_str()) { arguments = shlex::split(flags) .unwrap_or_default() .iter() .map(|s| s.clone().into_arg_os_string()) .chain(arguments.iter().cloned()) .collect::>(); } if let Some(flags) = env_vars .iter() .find(|(k, _)| k == "NVCC_APPEND_FLAGS") .and_then(|(_, p)| p.to_str()) { arguments.extend( shlex::split(flags) .unwrap_or_default() .iter() .map(|s| s.clone().into_arg_os_string()), ); } let parsed_args = gcc::parse_arguments( &arguments, cwd, (&gcc::ARGS[..], &ARGS[..]), false, self.kind(), ); match parsed_args { CompilerArguments::Ok(mut parsed_args) => { match parsed_args.compilation_flag.to_str() { Some("") => { /* no compile flag is valid */ } Some(flag) => { // Add the compilation flag to `parsed_args.common_args` so // it's considered when computing the hash. // // Consider the following cases: // $ sccache nvcc x.cu -o x.bin // $ sccache nvcc x.cu -o x.cu.o -c // $ sccache nvcc x.cu -o x.ptx -ptx // $ sccache nvcc x.cu -o x.cubin -cubin // // The preprocessor output for all four are identical, so // without including the compilation flag in the hasher's // inputs, the same hash would be generated for all four. parsed_args.common_args.push(flag.into()); } _ => unreachable!(), } CompilerArguments::Ok(parsed_args) } CompilerArguments::CannotCache(_, _) | CompilerArguments::NotCompilation => parsed_args, } } #[allow(clippy::too_many_arguments)] async fn preprocess( &self, creator: &T, executable: &Path, parsed_args: &ParsedArguments, cwd: &Path, env_vars: &[(OsString, OsString)], may_dist: bool, rewrite_includes_only: bool, _preprocessor_cache_mode: bool, ) -> Result where T: CommandCreatorSync, { let env_vars = env_vars .iter() .filter(|(k, _)| k != "NVCC_PREPEND_FLAGS" && k != "NVCC_APPEND_FLAGS") .cloned() .collect::>(); let language = match parsed_args.language { Language::C => Ok("c"), Language::Cxx => Ok("c++"), Language::ObjectiveC => Ok("objective-c"), Language::ObjectiveCxx => Ok("objective-c++"), Language::Cuda => Ok("cu"), _ => Err(anyhow!("PCH not supported by nvcc")), }?; let initialize_cmd_and_args = || { let mut command = creator.clone().new_command_sync(executable); command .current_dir(cwd) .env_clear() .envs(env_vars.clone()) .args(&parsed_args.preprocessor_args) .args(&parsed_args.common_args) .arg("-x") .arg(language) .arg(&parsed_args.input); command }; let dependencies_command = || { // NVCC doesn't support generating both the dependency information // and the preprocessor output at the same time. So if we have // need for both, we need separate compiler invocations let mut dependency_cmd = initialize_cmd_and_args(); dependency_cmd.args( &parsed_args .dependency_args .iter() .map(|arg| match arg.to_str().unwrap_or_default() { "-MD" | "--generate-dependencies-with-compile" => "-M", "-MMD" | "--generate-nonsystem-dependencies-with-compile" => "-MM", arg => arg, }) // protect against duplicate -M and -MM flags after transform .unique() .collect::>(), ); if log_enabled!(Trace) { let output_file_name = &parsed_args .outputs .get("obj") .context("Missing object file output") .unwrap() .path .file_name() .unwrap(); trace!( "[{}]: dependencies command: {:?}", output_file_name.to_string_lossy(), dependency_cmd ); } dependency_cmd }; let preprocessor_command = || { let mut preprocess_cmd = initialize_cmd_and_args(); // NVCC only supports `-E` when it comes after preprocessor and common flags. preprocess_cmd.arg("-E"); preprocess_cmd.arg(match self.host_compiler { // nvc/nvc++ don't support eliding line numbers NvccHostCompiler::Nvhpc => "", // msvc requires the `-EP` flag to elide line numbers NvccHostCompiler::Msvc => "-Xcompiler=-EP", // other host compilers are presumed to match `gcc` behavior NvccHostCompiler::Gcc => "-Xcompiler=-P", }); if log_enabled!(Trace) { let output_file_name = &parsed_args .outputs .get("obj") .context("Missing object file output") .unwrap() .path .file_name() .unwrap(); trace!( "[{}]: preprocessor command: {:?}", output_file_name.to_string_lossy(), preprocess_cmd ); } preprocess_cmd }; // Chain dependency generation and the preprocessor command to emulate a `proper` front end if !parsed_args.dependency_args.is_empty() { run_input_output(dependencies_command(), None).await?; } run_input_output(preprocessor_command(), None).await } fn generate_compile_commands( &self, path_transformer: &mut dist::PathTransformer, executable: &Path, parsed_args: &ParsedArguments, cwd: &Path, env_vars: &[(OsString, OsString)], rewrite_includes_only: bool, ) -> Result<( Box>, Option, Cacheable, )> where T: CommandCreatorSync, { generate_compile_commands(parsed_args, executable, cwd, env_vars, &self.host_compiler).map( |(command, dist_command, cacheable)| { (CCompileCommand::new(command), dist_command, cacheable) }, ) } } pub fn generate_compile_commands( parsed_args: &ParsedArguments, executable: &Path, cwd: &Path, env_vars: &[(OsString, OsString)], host_compiler: &NvccHostCompiler, ) -> Result<(NvccCompileCommand, Option, Cacheable)> { let mut unhashed_args = parsed_args.unhashed_args.clone(); let keep_dir = { let mut keep = false; let mut keep_dir = None; // Remove all occurrences of `-keep` and `-keep-dir`, but save the keep dir for copying to later loop { if let Some(idx) = unhashed_args .iter() .position(|x| x == "-keep-dir" || x == "--keep-dir") { let dir = PathBuf::from(unhashed_args[idx + 1].as_os_str()); let dir = if dir.is_absolute() { dir } else { cwd.join(dir) }; unhashed_args.splice(idx..(idx + 2), []); keep_dir = Some(dir); continue; } else if let Some(idx) = unhashed_args.iter().position(|x| { x == "-keep" || x == "--keep" || x == "-save-temps" || x == "--save-temps" }) { keep = true; unhashed_args.splice(idx..(idx + 1), []); if keep_dir.is_none() { keep_dir = Some(cwd.to_path_buf()) } continue; } break; } // Match nvcc behavior where intermediate files are kept if: // * Only `-keep` is specified (files copied to cwd) // * Both `-keep -keep-dir=` are specified (files copied to ) // nvcc does _not_ keep intermediate files if `-keep-dir=` is specified without `-keep` keep.then_some(()).and(keep_dir) }; let num_parallel = { let mut num_parallel = 1; // Remove all occurrences of `-t=` or `--threads` because it's incompatible with --dryrun // Prefer the last occurrence of `-t=` or `--threads` to match nvcc behavior loop { if let Some(idx) = unhashed_args.iter().position(|x| x.starts_with("-t")) { let arg = unhashed_args.get(idx); if let Some(arg) = arg.and_then(|arg| arg.to_str()) { let range = if arg.contains('=') { 3..arg.len() } else { 2..arg.len() }; if let Ok(arg) = arg[range].parse::() { num_parallel = arg; } } unhashed_args.splice(idx..(idx + 1), []); continue; } if let Some(idx) = unhashed_args.iter().position(|x| x == "--threads") { let arg = unhashed_args.get(idx + 1); if let Some(arg) = arg.and_then(|arg| arg.to_str()) { if let Ok(arg) = arg.parse::() { num_parallel = arg; } } unhashed_args.splice(idx..(idx + 2), []); continue; } break; } num_parallel }; let env_vars = env_vars .iter() .filter(|(k, _)| k != "NVCC_PREPEND_FLAGS" && k != "NVCC_APPEND_FLAGS") .cloned() .collect::>(); let temp_dir = tempfile::Builder::new() .prefix("sccache_nvcc") .tempdir() .unwrap() .into_path(); let mut arguments = vec![]; if let Some(lang) = gcc::language_to_gcc_arg(parsed_args.language) { arguments.extend(vec!["-x".into(), lang.into()]) } let output = &parsed_args .outputs .get("obj") .context("Missing object file output") .unwrap() .path; arguments.extend(vec![ "-o".into(), // Canonicalize the output path if the compile flag indicates we won't // produce an object file. Since we run cicc and ptxas in a temp dir, // but we run the host compiler in `cwd` (the dir from which sccache was // executed), cicc/ptxas `-o` argument should point at the real out path // that's potentially relative to `cwd`. match parsed_args.compilation_flag.to_str() { Some("-c") | Some("--compile") // compile to object | Some("-dc") | Some("--device-c") // compile to object with -rdc=true | Some("-dw") | Some("--device-w") // compile to object with -rdc=false => output.clone().into(), _ => { if output.is_absolute() { output.clone().into() } else { cwd.join(output).into() } } }, ]); arguments.extend_from_slice(&parsed_args.preprocessor_args); arguments.extend_from_slice(&unhashed_args); arguments.extend_from_slice(&parsed_args.common_args); arguments.extend_from_slice(&parsed_args.arch_args); if parsed_args.double_dash_input { arguments.push("--".into()); } // Canonicalize here so the absolute path to the input is in the // preprocessor output instead of paths relative to `cwd`. // // Since cicc's input is the post-processed source run through cudafe++'s // transforms, its cache key is sensitive to the preprocessor output. The // preprocessor embeds the name of the input file in comments, so without // canonicalizing here, cicc will get cache misses on otherwise identical // input that should produce a cache hit. arguments.push( (if parsed_args.input.is_absolute() { parsed_args.input.clone() } else { cwd.join(&parsed_args.input).canonicalize().unwrap() }) .into(), ); let command = NvccCompileCommand { temp_dir, keep_dir, num_parallel, executable: executable.to_owned(), arguments, compilation_flag: parsed_args.compilation_flag.clone(), env_vars, cwd: cwd.to_owned(), host_compiler: host_compiler.clone(), // Only here so we can include it in logs output_file_name: output.file_name().unwrap().to_owned(), }; Ok(( command, None, // Never assume the outer `nvcc` call is cacheable. We must decompose the nvcc call into // its constituent subcommands with `--dryrun` and only cache the final build product. // // Always decomposing `nvcc --dryrun` is the only way to ensure caching nvcc invocations // is fully sound, because the `nvcc -E` preprocessor output is not sufficient to detect // all source code changes. // // Specifically, `nvcc -E` always defines __CUDA_ARCH__, which means changes to host-only // code guarded by an `#ifndef __CUDA_ARCH__` will _not_ be captured in `nvcc -E` output. Cacheable::No, )) } #[derive(Clone, Debug)] pub struct NvccCompileCommand { pub temp_dir: PathBuf, pub keep_dir: Option, pub num_parallel: usize, pub executable: PathBuf, pub arguments: Vec, pub compilation_flag: OsString, pub env_vars: Vec<(OsString, OsString)>, pub cwd: PathBuf, pub host_compiler: NvccHostCompiler, pub output_file_name: OsString, } #[async_trait] impl CompileCommandImpl for NvccCompileCommand { fn get_executable(&self) -> PathBuf { self.executable.clone() } fn get_arguments(&self) -> Vec { self.arguments.clone() } fn get_env_vars(&self) -> Vec<(OsString, OsString)> { self.env_vars.clone() } fn get_cwd(&self) -> PathBuf { self.cwd.clone() } async fn execute( &self, service: &server::SccacheService, creator: &T, ) -> Result where T: CommandCreatorSync, { let NvccCompileCommand { temp_dir, keep_dir, num_parallel, executable, arguments, compilation_flag, env_vars, cwd, host_compiler, output_file_name, } = self; let nvcc_subcommand_groups = group_nvcc_subcommands_by_compilation_stage( creator, executable, arguments, compilation_flag, cwd, temp_dir.as_path(), keep_dir.clone(), env_vars, host_compiler, output_file_name, ) .await?; let maybe_keep_temps_then_clean = || { // If the caller passed `-keep` or `-keep-dir`, copy the // temp files to the requested location. We do this because we // override `-keep` and `-keep-dir` in our `nvcc --dryrun` call. let maybe_keep_temps = keep_dir.as_ref().and_then(|dst| { fs::create_dir_all(dst) .and_then(|_| fs::read_dir(temp_dir)) .and_then(|files| { files .filter_map(|path| path.ok()) .filter_map(|path| { path.file_name() .to_str() .map(|file| (path.path(), file.to_owned())) }) .try_fold((), |res, (path, file)| fs::rename(path, dst.join(file))) }) .ok() }); maybe_keep_temps .map_or_else( || fs::remove_dir_all(temp_dir).ok(), |_| fs::remove_dir_all(temp_dir).ok(), ) .unwrap_or(()); }; let mut output = process::Output { status: process::ExitStatus::default(), stdout: vec![], stderr: vec![], }; let n = nvcc_subcommand_groups.len(); let cuda_front_end_range = if n > 0 { 0..1 } else { 0..0 }; let final_assembly_range = if n > 1 { n - 1..n } else { 0..0 }; let device_compile_range = if n > 2 { 1..n - 1 } else { 0..0 }; let num_parallel = device_compile_range.len().min(*num_parallel).max(1); for command_group_chunks in [ nvcc_subcommand_groups[cuda_front_end_range].chunks(1), // compile multiple device architectures in parallel when `nvcc -t=N` is specified nvcc_subcommand_groups[device_compile_range].chunks(num_parallel), nvcc_subcommand_groups[final_assembly_range].chunks(1), ] { for command_groups in command_group_chunks { let results = futures::future::join_all(command_groups.iter().map(|commands| { run_nvcc_subcommands_group(service, creator, cwd, commands, output_file_name) })) .await; for result in results { output = aggregate_output(output, result.unwrap_or_else(error_to_output)); } if !output.status.success() { output.stdout.shrink_to_fit(); output.stderr.shrink_to_fit(); maybe_keep_temps_then_clean(); return Err(ProcessError(output).into()); } } } output.stdout.shrink_to_fit(); output.stderr.shrink_to_fit(); maybe_keep_temps_then_clean(); Ok(output) } } #[derive(Clone, Debug)] pub struct NvccGeneratedSubcommand { pub exe: PathBuf, pub args: Vec, pub cwd: PathBuf, pub env_vars: Vec<(OsString, OsString)>, pub cacheable: Cacheable, } #[allow(clippy::too_many_arguments)] async fn group_nvcc_subcommands_by_compilation_stage( creator: &T, executable: &Path, arguments: &[OsString], compilation_flag: &OsStr, cwd: &Path, tmp: &Path, keep_dir: Option, env_vars: &[(OsString, OsString)], host_compiler: &NvccHostCompiler, output_file_name: &OsStr, ) -> Result>> where T: CommandCreatorSync, { // Run `nvcc --dryrun` twice to ensure the commands are correct // relative to the directory where they're run. // // All the "nvcc" commands (cudafe++, cicc, ptxas, nvlink, fatbinary) // are run in the temp dir, so their arguments should be relative to // the temp dir, e.g. `cudafe++ [...] "x.cpp4.ii"` // // All the host compiler invocations are run in the original `cwd` where // sccache was invoked. Arguments will be relative to the cwd, except // any arguments that reference nvcc-generated files should be absolute // to the temp dir, e.g. `gcc -E [...] x.cu -o /tmp/dir/x.cpp4.ii` // Roughly equivalent to: // ```shell // cat <(nvcc --dryrun --keep \ // | nl -n ln -s ' ' -w 1 \ // | grep -P "^[0-9]+ (cicc|ptxas|cudafe|nvlink|fatbinary)") \ // \ // <(nvcc --dryrun --keep --keep-dir /tmp/dir \ // | nl -n ln -s ' ' -w 1 \ // | grep -P -v "^[0-9]+ (cicc|ptxas|cudafe|nvlink|fatbinary)") \ // \ // | sort -k 1n // ``` let mut env_vars_1 = env_vars.to_vec(); let mut env_vars_2 = env_vars.to_vec(); let is_nvcc_exe = |exe: &str| matches!(exe, "cicc" | "ptxas" | "cudafe++" | "nvlink" | "fatbinary"); let (nvcc_commands, host_commands) = futures::future::try_join( // Get the nvcc compile command lines with paths relative to `tmp` select_nvcc_subcommands( creator, executable, cwd, &mut env_vars_1, keep_dir.is_none(), arguments, is_nvcc_exe, host_compiler, output_file_name, ), // Get the host compile command lines with paths relative to `cwd` and absolute paths to `tmp` select_nvcc_subcommands( creator, executable, cwd, &mut env_vars_2, keep_dir.is_none(), &[arguments, &["--keep-dir".into(), tmp.into()][..]].concat(), |exe| !is_nvcc_exe(exe), host_compiler, output_file_name, ), ) .await?; drop(env_vars_2); let env_vars = env_vars_1; // Now zip the two lists of commands again by sorting on original line index. // Transform to tuples that include the dir in which each command should run. let all_commands = nvcc_commands .iter() // Run cudafe++, nvlink, cicc, ptxas, and fatbinary in `tmp` .map(|(idx, exe, args)| (idx, tmp, exe, args)) .chain( host_commands .iter() // Run host preprocessing and compilation steps in `cwd` .map(|(idx, exe, args)| (idx, cwd, exe, args)), ) .sorted_by(|a, b| Ord::cmp(&a.0, &b.0)); // Create groups of commands that should be run sequential relative to each other, // but can optionally be run in parallel to other groups if the user requested via // `nvcc --threads`. let preprocessor_flag = match host_compiler { NvccHostCompiler::Msvc => "-P", _ => "-E", } .to_owned(); let gen_module_id_file_flag = "--gen_module_id_file".to_owned(); let mut cuda_front_end_group = Vec::::new(); let mut final_assembly_group = Vec::::new(); let mut device_compile_groups = HashMap::>::new(); for (_, dir, exe, args) in all_commands { let mut args = args.clone(); if let (env_vars, cacheable, Some(group)) = match exe.file_stem().and_then(|s| s.to_str()) { // fatbinary and nvlink are not cacheable Some("fatbinary") | Some("nvlink") => ( env_vars.clone(), Cacheable::No, Some(&mut final_assembly_group), ), // cicc and ptxas are cacheable Some("cicc") => { match compilation_flag.to_str() { // Fix for CTK < 12.8: // If `nvcc` is invoked with `-c` (or any of its variants), remove the // `--gen_module_id_file` flag. In this mode, we instruct `cudafe++` // to generate this file, so cicc shouldn't generate it again. Some("-c") | Some("--compile") | Some("-dc") | Some("--device-c") | Some("-dw") | Some("--device-w") => { if let Some(idx) = args.iter().position(|x| x == &gen_module_id_file_flag) { args.splice(idx..(idx + 1), []); } } _ => {} } let group = device_compile_groups.get_mut(&args[args.len() - 3]); (env_vars.clone(), Cacheable::Yes, group) } Some("ptxas") => { let group = device_compile_groups.values_mut().find(|cmds| { if let Some(cicc) = cmds.last() { if let Some(cicc_out) = cicc.args.last() { return cicc_out == &args[args.len() - 3]; } } false }); (env_vars.clone(), Cacheable::Yes, group) } // cudafe++ _must be_ cached, because the `.module_id` file is unique to each invocation (new in CTK 12.8) Some("cudafe++") => { // Fix for CTK < 12.0: // Add `--gen_module_id_file` if the cudafe++ args include `--module_id_file_name` if !args.contains(&gen_module_id_file_flag) { if let Some(idx) = args.iter().position(|x| x == "--module_id_file_name") { // Insert `--gen_module_id_file` just before `--module_id_file_name` to match nvcc behavior args.splice(idx..idx, [gen_module_id_file_flag.clone()]); } } ( env_vars.clone(), Cacheable::Yes, Some(&mut cuda_front_end_group), ) } _ => { // All generated host compiler commands include one of these defines. // If one of these isn't present, this command is either a new binary // in the CTK that we don't know about, or a line like `rm x_dlink.reg.c` // that nvcc generates in certain cases. if !args.iter().any(|arg| { arg.starts_with("-D__CUDACC__") || arg.starts_with("-D__NVCC__") || arg.starts_with("-D__CUDA_ARCH__") || arg.starts_with("-D__CUDA_ARCH_LIST__") }) { continue; } if args.contains(&preprocessor_flag) { // Each preprocessor step represents the start of a new command group if let Some(out_file) = if cfg!(target_os = "windows") { args.iter() .find(|x| x.starts_with("-Fi")) .and_then(|x| x.strip_prefix("-Fi")) } else { args.iter() .position(|x| x == "-o") .and_then(|i| args.get(i + 1).map(|o| o.as_str())) } .map(PathBuf::from) .and_then(|out_path| { out_path .file_name() .and_then(|out_name| out_name.to_str()) .map(|out_name| out_name.to_owned()) }) .and_then(|out_name| { // If the output file ends with... // * .cpp1.ii - cicc/ptxas input // * .cpp4.ii - cudafe++ input if out_name.ends_with(".cpp1.ii") { Some(out_name.to_owned()) } else { None } }) { let new_device_compile_group = vec![]; device_compile_groups.insert(out_file.clone(), new_device_compile_group); ( env_vars.clone(), Cacheable::No, device_compile_groups.get_mut(&out_file), ) } else { ( env_vars.clone(), Cacheable::No, Some(&mut cuda_front_end_group), ) } } else { // Cache the host compiler calls, since we've marked the outer `nvcc` call // as non-cacheable. This ensures `sccache nvcc ...` _always_ decomposes the // nvcc call into its constituent subcommands with `--dryrun`, but only caches // the final build product once. // // Always decomposing `nvcc --dryrun` is the only way to ensure caching nvcc invocations // is fully sound, because the `nvcc -E` preprocessor output is not sufficient to detect // all source code changes. // // Specifically, `nvcc -E` always defines __CUDA_ARCH__, which means changes to host-only // code guarded by an `#ifndef __CUDA_ARCH__` will _not_ be captured in `nvcc -E` output. ( env_vars .iter() .chain( [ // HACK: This compilation will look like a C/C++ compilation, // but we want to report it in the stats as a CUDA compilation. // The SccacheService API doesn't have a great way to specify this // case, so we set a special envvar here that it can read when the // compilation is finished. ("__SCCACHE_THIS_IS_A_CUDA_COMPILATION__".into(), "".into()), ] .iter(), ) .cloned() .collect::>(), Cacheable::Yes, Some(&mut final_assembly_group), ) } } } { if log_enabled!(log::Level::Trace) { trace!( "[{}]: transformed nvcc command: \"{}\"", output_file_name.to_string_lossy(), [ &[format!("cd {} &&", dir.to_string_lossy()).to_string()], &[exe.to_str().unwrap_or_default().to_string()][..], &args[..] ] .concat() .join(" ") ); } group.push(NvccGeneratedSubcommand { exe: exe.clone(), args: args.clone(), cwd: dir.into(), env_vars, cacheable, }); } } let mut command_groups = vec![]; command_groups.push(cuda_front_end_group); command_groups.extend(device_compile_groups.into_values()); command_groups.push(final_assembly_group); Ok(command_groups) } #[allow(clippy::too_many_arguments)] async fn select_nvcc_subcommands( creator: &T, executable: &Path, cwd: &Path, env_vars: &mut Vec<(OsString, OsString)>, remap_filenames: bool, arguments: &[OsString], select_subcommand: F, host_compiler: &NvccHostCompiler, output_file_name: &OsStr, ) -> Result)>> where F: Fn(&str) -> bool, T: CommandCreatorSync, { if log_enabled!(log::Level::Trace) { trace!( "[{}]: nvcc dryrun command: {:?}", output_file_name.to_string_lossy(), [ &[executable.to_str().unwrap_or_default().to_string()][..], &dist::osstrings_to_strings(arguments).unwrap_or_default()[..], &["--dryrun".into(), "--keep".into()][..] ] .concat() .join(" ") ); } let mut nvcc_dryrun_cmd = creator.clone().new_command_sync(executable); nvcc_dryrun_cmd .args(&[arguments, &["--dryrun".into(), "--keep".into()][..]].concat()) .env_clear() .current_dir(cwd) .envs(env_vars.to_vec()); let nvcc_dryrun_output = run_input_output(nvcc_dryrun_cmd, None).await?; let mut ext_counts = HashMap::::new(); let mut old_to_new = HashMap::::new(); let is_valid_line_re = Regex::new(r"^#\$ (.*)$").unwrap(); let is_envvar_line_re = Regex::new(r"^([_A-Z]+)=(.*)$").unwrap(); let mut dryrun_env_vars = Vec::<(OsString, OsString)>::new(); let mut dryrun_env_vars_re_map = HashMap::::new(); let mut lines = Vec::<(usize, PathBuf, Vec)>::new(); #[cfg(unix)] let reader = std::io::BufReader::new(&nvcc_dryrun_output.stderr[..]); #[cfg(windows)] let reader = std::io::BufReader::new(&nvcc_dryrun_output.stdout[..]); for pair in reader.lines().enumerate() { let (idx, line) = pair; // Select lines that match the `#$ ` prefix from nvcc --dryrun let line = match select_valid_dryrun_lines(&is_valid_line_re, &line?) { Ok(line) => line, // Ignore lines that don't start with `#$ `. For some reason, nvcc // on Windows prints the name of the input file without the prefix Err(err) => continue, }; let maybe_exe_and_args = fold_env_vars_or_split_into_exe_and_args( &is_envvar_line_re, &mut dryrun_env_vars, &mut dryrun_env_vars_re_map, cwd, &line, host_compiler, )?; let (exe, mut args) = match maybe_exe_and_args { Some(exe_and_args) => exe_and_args, _ => continue, }; // Remap nvcc's generated file names to deterministic names if remap_filenames { args = remap_generated_filenames(&args, &mut old_to_new, &mut ext_counts); } match exe.file_stem().and_then(|s| s.to_str()) { None => continue, Some(exe_name) => { if select_subcommand(exe_name) { lines.push((idx, exe, args)); } } } } for pair in dryrun_env_vars { env_vars.splice( if let Some(idx) = env_vars.iter().position(|(k, _)| *k == pair.0) { idx..idx + 1 } else { env_vars.len()..env_vars.len() }, [pair], ); } Ok(lines) } fn select_valid_dryrun_lines(re: &Regex, line: &str) -> Result { match re.captures(line) { Some(caps) => { let (_, [rest]) = caps.extract(); Ok(rest.to_string()) } _ => Err(anyhow!("nvcc error: {:?}", line)), } } fn fold_env_vars_or_split_into_exe_and_args( re: &Regex, env_vars: &mut Vec<(OsString, OsString)>, env_var_re_map: &mut HashMap, cwd: &Path, line: &str, host_compiler: &NvccHostCompiler, ) -> Result)>> { fn envvar_in_shell_format(var: &str) -> String { if cfg!(target_os = "windows") { format!("%{}%", var) // %CICC_PATH% } else { format!("${}", var) // $CICC_PATH } } fn envvar_in_shell_format_re(var: &str) -> Regex { Regex::new( &(if cfg!(target_os = "windows") { regex::escape(&envvar_in_shell_format(var)) } else { regex::escape(&envvar_in_shell_format(var)) + r"[^\w]" }), ) .unwrap() } // Intercept the environment variable lines and add them to the env_vars list if let Some(var) = re.captures(line) { let (_, [var, val]) = var.extract(); env_var_re_map .entry(var.to_owned()) .or_insert_with_key(|var| envvar_in_shell_format_re(var)); env_vars.push((var.into(), val.into())); return Ok(None); } // The rest of the lines are subcommands, so parse into a vec of [cmd, args..] let mut line = if cfg!(target_os = "windows") { let line = line .replace("\"\"", "\"") .replace(r"\\?\", "") .replace('\\', "/") .replace(r"//?/", ""); match host_compiler { NvccHostCompiler::Msvc => line.replace(" -E ", " -P ").replace(" > ", " -Fi"), _ => line, } } else { line.to_owned() }; // Expand envvars in nvcc subcommands, i.e. "$CICC_PATH/cicc ..." or "%CICC_PATH%/cicc" if let Some(env_vars) = dist::osstring_tuples_to_strings(env_vars) { for (var, val) in env_vars { if let Some(re) = env_var_re_map.get(&var) { if re.is_match(&line) { line = line.replace(&envvar_in_shell_format(&var), &val); } } } } let args = match shlex::split(&line) { Some(args) => args, None => return Err(anyhow!("Could not parse shell line")), }; let (exe, args) = match args.split_first() { Some(exe_and_args) => exe_and_args, None => return Err(anyhow!("Could not split shell line")), }; let env_path = env_vars .iter() .find(|(k, _)| k == "PATH") .map(|(_, p)| p.to_owned()) .unwrap(); let exe = which_in(exe, env_path.into(), cwd)?; Ok(Some((exe.clone(), args.to_vec()))) } fn remap_generated_filenames( args: &[String], old_to_new: &mut HashMap, ext_counts: &mut HashMap, ) -> Vec { args.iter() .map(|arg| { // Special case for MSVC's preprocess output file name flag let arg_is_msvc_preprocessor_output = arg.starts_with("-Fi"); let arg = if arg_is_msvc_preprocessor_output { arg.trim_start_matches("-Fi").to_owned() } else { arg.to_owned() }; // If the argument doesn't start with `-` and is a file that // ends in one of the below extensions, rename the file to an // auto-incrementing stable name let maybe_extension = (!arg.starts_with('-')) .then(|| { [ ".cpp1.ii", ".cpp4.ii", ".cudafe1.c", ".cudafe1.cpp", ".cudafe1.stub.c", ] .iter() .find(|ext| arg.ends_with(*ext)) .copied() }) .unwrap_or(None); // If the argument is a file that ends in one of the above extensions: // * If it's our first time seeing this file, create a unique name for it // * If we've seen this file before, lookup its unique name in the hash map // // This ensures stable names are in cudafe++ output and #include directives, // eliminating one source of false-positive cache misses. let arg = match maybe_extension { Some(extension) => { old_to_new .entry(arg) .or_insert_with_key(|arg| { // Initialize or update the number of files with a given extension: // compute_70.cudafe1.stub.c -> x_0.cudafe1.stub.c // compute_60.cudafe1.stub.c -> x_1.cudafe1.stub.c // etc. let count = ext_counts .entry(extension.into()) .and_modify(|c| *c += 1) .or_insert(0) .to_string(); // Return `/tmp/dir/x_{count}.{ext}` as the new name, i.e. `/tmp/dir/x_0.cudafe1.stub.c` PathBuf::from(arg) .parent() .unwrap_or(Path::new("")) // Don't use the count as the first character of the file name, because the file name // may be used as an identifier (via the __FILE__ macro) and identifiers with leading // digits are not valid in C/C++, i.e. `x_0.cudafe1.cpp` instead of `0.cudafe1.cpp`. .join("x_".to_owned() + &count + extension) .to_string_lossy() .to_string() }) .to_owned() } None => { // If the argument isn't a file name with one of our extensions, // it may _reference_ files we've renamed. Go through and replace // all old names with their new stable names. // // Sort by string length descending so we don't accidentally replace // `zzz.cudafe1.cpp` with the new name for `zzz.cudafe1.c`. // // For example, if we have these renames: // // compute_70.cudafe1.cpp -> x_0.cudafe1.cpp // compute_70.cudafe1.c -> x_2.cudafe1.c // // `compute_70.cudafe1.cpp` should be replaced with `x_0.cudafe1.cpp`, not `x_2.cudafe1.c` // let mut arg = arg.clone(); for (old, new) in old_to_new .iter() .sorted_by(|a, b| b.0.len().cmp(&a.0.len())) { arg = arg.replace(old, new); } arg } }; if arg_is_msvc_preprocessor_output { format!("-Fi{}", arg) } else { arg } }) .collect::>() } async fn run_nvcc_subcommands_group( service: &server::SccacheService, creator: &T, cwd: &Path, commands: &[NvccGeneratedSubcommand], output_file_name: &OsStr, ) -> Result where T: CommandCreatorSync, { let mut output = process::Output { status: process::ExitStatus::default(), stdout: vec![], stderr: vec![], }; for cmd in commands { let NvccGeneratedSubcommand { exe, args, cwd, env_vars, cacheable, } = cmd; if log_enabled!(log::Level::Trace) { trace!( "[{}]: run_commands_sequential cwd={:?}, cmd=\"{}\"", output_file_name.to_string_lossy(), cwd, [ vec![exe.clone().into_os_string().into_string().unwrap()], args.to_vec() ] .concat() .join(" ") ); } let out = match cacheable { Cacheable::No => { let mut cmd = creator.clone().new_command_sync(exe); cmd.args(args) .current_dir(cwd) .env_clear() .envs(env_vars.to_vec()); run_input_output(cmd, None) .await .unwrap_or_else(error_to_output) } Cacheable::Yes => { let srvc = service.clone(); let args = dist::strings_to_osstrings(args); match srvc .compiler_info(exe.clone(), cwd.to_owned(), &args, env_vars) .await { Err(err) => error_to_output(err), Ok(compiler) => match compiler.parse_arguments(&args, cwd, env_vars) { CompilerArguments::NotCompilation => Err(anyhow!("Not compilation")), CompilerArguments::CannotCache(why, extra_info) => Err(extra_info .map_or_else( || anyhow!("Cannot cache({}): {:?} {:?}", why, exe, args), |desc| { anyhow!("Cannot cache({}, {}): {:?} {:?}", why, desc, exe, args) }, )), CompilerArguments::Ok(hasher) => { srvc.start_compile_task( compiler, hasher, args, cwd.to_owned(), env_vars .iter() .chain([("SCCACHE_DIRECT".into(), "false".into())].iter()) .cloned() .collect::>(), ) .await } } .map_or_else(error_to_output, |res| compile_result_to_output(exe, res)), } } }; output = aggregate_output(output, out); if !output.status.success() { break; } } Ok(output) } fn aggregate_output(lhs: process::Output, rhs: process::Output) -> process::Output { process::Output { status: exit_status( std::cmp::max(status_to_code(lhs.status), status_to_code(rhs.status)) as ExitStatusValue, ), stdout: [lhs.stdout, rhs.stdout].concat(), stderr: [lhs.stderr, rhs.stderr].concat(), } } fn error_to_output(err: Error) -> process::Output { match err.downcast::() { Ok(ProcessError(out)) => out, Err(err) => process::Output { status: exit_status(1 as ExitStatusValue), stdout: vec![], stderr: err.to_string().into_bytes(), }, } } fn compile_result_to_output(exe: &Path, res: protocol::CompileFinished) -> process::Output { if let Some(signal) = res.signal { return process::Output { status: exit_status(signal as ExitStatusValue), stdout: res.stdout, stderr: [ format!( "{} terminated (signal: {})", exe.file_stem().unwrap().to_string_lossy(), signal ) .as_bytes(), &res.stderr, ] .concat(), }; } process::Output { status: exit_status(res.retcode.unwrap_or(0) as ExitStatusValue), stdout: res.stdout, stderr: res.stderr, } } #[cfg(unix)] fn status_to_code(res: process::ExitStatus) -> ExitStatusValue { if res.success() { 0 as ExitStatusValue } else { res.signal().or(res.code()).unwrap_or(1) as ExitStatusValue } } #[cfg(windows)] fn status_to_code(res: process::ExitStatus) -> ExitStatusValue { if res.success() { 0 as ExitStatusValue } else { res.code().unwrap_or(1) as ExitStatusValue } } counted_array!(pub static ARGS: [ArgInfo; _] = [ //todo: refactor show_includes into dependency_args take_arg!("--Werror", OsString, CanBeSeparated('='), PreprocessorArgument), take_arg!("--archive-options options", OsString, CanBeSeparated('='), PassThrough), flag!("--compile", DoCompilation), take_arg!("--compiler-bindir", OsString, CanBeSeparated('='), PassThrough), take_arg!("--compiler-options", OsString, CanBeSeparated('='), PreprocessorArgument), flag!("--cubin", DoCompilation), take_arg!("--default-stream", OsString, CanBeSeparated('='), PassThrough), flag!("--device-c", DoCompilation), flag!("--device-w", DoCompilation), flag!("--expt-extended-lambda", PreprocessorArgumentFlag), flag!("--expt-relaxed-constexpr", PreprocessorArgumentFlag), flag!("--extended-lambda", PreprocessorArgumentFlag), flag!("--fatbin", DoCompilation), take_arg!("--generate-code", OsString, CanBeSeparated('='), PassThrough), flag!("--generate-dependencies-with-compile", NeedDepTarget), flag!("--generate-nonsystem-dependencies-with-compile", NeedDepTarget), take_arg!("--gpu-architecture", OsString, CanBeSeparated('='), PassThrough), take_arg!("--gpu-code", OsString, CanBeSeparated('='), PassThrough), take_arg!("--include-path", PathBuf, CanBeSeparated('='), PreprocessorArgumentPath), flag!("--keep", UnhashedFlag), take_arg!("--keep-dir", OsString, CanBeSeparated('='), Unhashed), take_arg!("--linker-options", OsString, CanBeSeparated('='), PassThrough), take_arg!("--maxrregcount", OsString, CanBeSeparated('='), PassThrough), flag!("--no-host-device-initializer-list", PreprocessorArgumentFlag), take_arg!("--nvlink-options", OsString, CanBeSeparated('='), PassThrough), take_arg!("--options-file", PathBuf, CanBeSeparated('='), ExtraHashFile), flag!("--optix-ir", DoCompilation), flag!("--ptx", DoCompilation), take_arg!("--ptxas-options", OsString, CanBeSeparated('='), PassThrough), take_arg!("--relocatable-device-code", OsString, CanBeSeparated('='), PreprocessorArgument), flag!("--save-temps", UnhashedFlag), take_arg!("--system-include", PathBuf, CanBeSeparated('='), PreprocessorArgumentPath), take_arg!("--threads", OsString, CanBeSeparated('='), Unhashed), take_arg!("--x", OsString, CanBeSeparated('='), Language), take_arg!("-Werror", OsString, CanBeSeparated('='), PreprocessorArgument), take_arg!("-Xarchive", OsString, CanBeSeparated('='), PassThrough), take_arg!("-Xcompiler", OsString, CanBeSeparated('='), PreprocessorArgument), take_arg!("-Xlinker", OsString, CanBeSeparated('='), PassThrough), take_arg!("-Xnvlink", OsString, CanBeSeparated('='), PassThrough), take_arg!("-Xptxas", OsString, CanBeSeparated('='), PassThrough), take_arg!("-arch", OsString, CanBeSeparated('='), PassThrough), take_arg!("-ccbin", OsString, CanBeSeparated('='), PassThrough), take_arg!("-code", OsString, CanBeSeparated('='), PassThrough), flag!("-cubin", DoCompilation), flag!("-dc", DoCompilation), take_arg!("-default-stream", OsString, CanBeSeparated('='), PassThrough), flag!("-dw", DoCompilation), flag!("-expt-extended-lambda", PreprocessorArgumentFlag), flag!("-expt-relaxed-constexpr", PreprocessorArgumentFlag), flag!("-extended-lambda", PreprocessorArgumentFlag), flag!("-fatbin", DoCompilation), take_arg!("-gencode", OsString, CanBeSeparated('='), PassThrough), take_arg!("-isystem", PathBuf, CanBeSeparated('='), PreprocessorArgumentPath), flag!("-keep", UnhashedFlag), take_arg!("-keep-dir", OsString, CanBeSeparated('='), Unhashed), take_arg!("-maxrregcount", OsString, CanBeSeparated('='), PassThrough), flag!("-nohdinitlist", PreprocessorArgumentFlag), flag!("-optix-ir", DoCompilation), flag!("-ptx", DoCompilation), take_arg!("-rdc", OsString, CanBeSeparated('='), PreprocessorArgument), flag!("-save-temps", UnhashedFlag), take_arg!("-t", OsString, CanBeSeparated, Unhashed), take_arg!("-t=", OsString, Concatenated, Unhashed), take_arg!("-x", OsString, CanBeSeparated('='), Language), ]); #[cfg(test)] mod test { use super::*; use crate::compiler::gcc; use crate::compiler::*; use crate::mock_command::*; use crate::test::utils::*; use std::collections::HashMap; use std::path::PathBuf; fn parse_arguments_gcc(arguments: Vec) -> CompilerArguments { let arguments = arguments.iter().map(OsString::from).collect::>(); Nvcc { host_compiler: NvccHostCompiler::Gcc, host_compiler_version: None, version: None, } .parse_arguments(&arguments, ".".as_ref(), &[]) } fn parse_arguments_msvc(arguments: Vec) -> CompilerArguments { let arguments = arguments.iter().map(OsString::from).collect::>(); Nvcc { host_compiler: NvccHostCompiler::Msvc, host_compiler_version: None, version: None, } .parse_arguments(&arguments, ".".as_ref(), &[]) } fn parse_arguments_nvc(arguments: Vec) -> CompilerArguments { let arguments = arguments.iter().map(OsString::from).collect::>(); Nvcc { host_compiler: NvccHostCompiler::Nvhpc, host_compiler_version: None, version: None, } .parse_arguments(&arguments, ".".as_ref(), &[]) } macro_rules! parses { ( $( $s:expr ),* ) => { match parse_arguments_gcc(vec![ $( $s.to_string(), )* ]) { CompilerArguments::Ok(a) => a, o => panic!("Got unexpected parse result: {:?}", o), } } } macro_rules! parses_msvc { ( $( $s:expr ),* ) => { match parse_arguments_msvc(vec![ $( $s.to_string(), )* ]) { CompilerArguments::Ok(a) => a, o => panic!("Got unexpected parse result: {:?}", o), } } } macro_rules! parses_nvc { ( $( $s:expr ),* ) => { match parse_arguments_nvc(vec![ $( $s.to_string(), )* ]) { CompilerArguments::Ok(a) => a, o => panic!("Got unexpected parse result: {:?}", o), } } } #[test] fn test_parse_arguments_simple_c() { let a = parses!("-c", "foo.c", "-o", "foo.o"); assert_eq!(Some("foo.c"), a.input.to_str()); assert_eq!(Language::C, a.language); assert_map_contains!( a.outputs, ( "obj", ArtifactDescriptor { path: "foo.o".into(), optional: false } ) ); assert!(a.preprocessor_args.is_empty()); assert_eq!(ovec!["-c"], a.common_args); } #[test] fn test_parse_arguments_simple_cu_gcc() { let a = parses!("-c", "foo.cu", "-o", "foo.o"); assert_eq!(Some("foo.cu"), a.input.to_str()); assert_eq!(Language::Cuda, a.language); assert_map_contains!( a.outputs, ( "obj", ArtifactDescriptor { path: "foo.o".into(), optional: false } ) ); assert!(a.preprocessor_args.is_empty()); assert_eq!(ovec!["-c"], a.common_args); } #[test] fn test_parse_arguments_simple_cu_nvc() { let a = parses_nvc!("-c", "foo.cu", "-o", "foo.o"); assert_eq!(Some("foo.cu"), a.input.to_str()); assert_eq!(Language::Cuda, a.language); assert_map_contains!( a.outputs, ( "obj", ArtifactDescriptor { path: "foo.o".into(), optional: false } ) ); assert!(a.preprocessor_args.is_empty()); assert_eq!(ovec!["-c"], a.common_args); } fn test_parse_arguments_simple_cu_msvc() { let a = parses_msvc!("-c", "foo.cu", "-o", "foo.o"); assert_eq!(Some("foo.cu"), a.input.to_str()); assert_eq!(Language::Cuda, a.language); assert_map_contains!( a.outputs, ( "obj", ArtifactDescriptor { path: "foo.o".into(), optional: false } ) ); assert!(a.preprocessor_args.is_empty()); assert_eq!(ovec!["-c"], a.common_args); } #[test] fn test_parse_arguments_ccbin_no_path() { let a = parses!("-ccbin=gcc", "-c", "foo.cu", "-o", "foo.o"); assert_eq!(Some("foo.cu"), a.input.to_str()); assert_eq!(Language::Cuda, a.language); assert_map_contains!( a.outputs, ( "obj", ArtifactDescriptor { path: "foo.o".into(), optional: false } ) ); assert!(a.preprocessor_args.is_empty()); assert_eq!(ovec!["-ccbin", "gcc", "-c"], a.common_args); } #[test] fn test_parse_arguments_ccbin_dir() { let a = parses!("-ccbin=/usr/bin/", "-c", "foo.cu", "-o", "foo.o"); assert_eq!(Some("foo.cu"), a.input.to_str()); assert_eq!(Language::Cuda, a.language); assert_map_contains!( a.outputs, ( "obj", ArtifactDescriptor { path: "foo.o".into(), optional: false } ) ); assert!(a.preprocessor_args.is_empty()); assert_eq!(ovec!["-ccbin", "/usr/bin/", "-c"], a.common_args); } #[test] fn test_parse_threads_argument_simple_cu() { let a = parses!( "-t1", "-t=2", "-t", "3", "--threads=1", "--threads=2", "-c", "foo.cu", "-o", "foo.o" ); assert_eq!(Some("foo.cu"), a.input.to_str()); assert_eq!(Language::Cuda, a.language); assert_map_contains!( a.outputs, ( "obj", ArtifactDescriptor { path: "foo.o".into(), optional: false } ) ); assert!(a.preprocessor_args.is_empty()); assert_eq!( ovec!["-t1", "-t=2", "-t3", "--threads", "1", "--threads", "2"], a.unhashed_args ); } #[test] fn test_parse_arguments_simple_c_as_cu() { let a = parses!("-x", "cu", "-c", "foo.c", "-o", "foo.o"); assert_eq!(Some("foo.c"), a.input.to_str()); assert_eq!(Language::Cuda, a.language); assert_map_contains!( a.outputs, ( "obj", ArtifactDescriptor { path: "foo.o".into(), optional: false } ) ); assert!(a.preprocessor_args.is_empty()); assert_eq!(ovec!["-c"], a.common_args); } #[test] fn test_parse_arguments_dc_compile_flag() { let a = parses!("-x", "cu", "-dc", "foo.c", "-o", "foo.o"); assert_eq!(Some("foo.c"), a.input.to_str()); assert_eq!(Language::Cuda, a.language); assert_eq!(Some("-dc"), a.compilation_flag.to_str()); assert_map_contains!( a.outputs, ( "obj", ArtifactDescriptor { path: "foo.o".into(), optional: false } ) ); assert!(a.preprocessor_args.is_empty()); assert_eq!(ovec!["-dc"], a.common_args); } #[test] fn test_parse_arguments_fatbin_compile_flag() { let a = parses!("-x", "cu", "-fatbin", "foo.c", "-o", "foo.o"); assert_eq!(Some("foo.c"), a.input.to_str()); assert_eq!(Language::Cuda, a.language); assert_eq!(Some("-fatbin"), a.compilation_flag.to_str()); assert_map_contains!( a.outputs, ( "obj", ArtifactDescriptor { path: "foo.o".into(), optional: false } ) ); assert!(a.preprocessor_args.is_empty()); assert_eq!(ovec!["-fatbin"], a.common_args); } #[test] fn test_parse_arguments_cubin_compile_flag() { let a = parses!("-x", "cu", "-cubin", "foo.c", "-o", "foo.o"); assert_eq!(Some("foo.c"), a.input.to_str()); assert_eq!(Language::Cuda, a.language); assert_eq!(Some("-cubin"), a.compilation_flag.to_str()); assert_map_contains!( a.outputs, ( "obj", ArtifactDescriptor { path: "foo.o".into(), optional: false } ) ); assert!(a.preprocessor_args.is_empty()); assert_eq!(ovec!["-cubin"], a.common_args); } #[test] fn test_parse_arguments_values() { let a = parses!( "-c", "foo.cpp", "-fabc", "-I", "include-file", "-o", "foo.o", "--include-path", "include-file", "-isystem=/system/include/file", "-Werror", "cross-execution-space-call", "-Werror=all-warnings" ); assert_eq!(Some("foo.cpp"), a.input.to_str()); assert_eq!(Language::Cxx, a.language); assert_map_contains!( a.outputs, ( "obj", ArtifactDescriptor { path: "foo.o".into(), optional: false } ) ); assert_eq!( ovec![ "-Iinclude-file", "--include-path", "include-file", "-isystem", "/system/include/file", "-Werror", "cross-execution-space-call", "-Werror", "all-warnings" ], a.preprocessor_args ); assert!(a.dependency_args.is_empty()); assert_eq!(ovec!["-fabc", "-c"], a.common_args); } #[test] fn test_parse_md_mt_flags_cu() { let a = parses!( "-x", "cu", "-c", "foo.c", "-fabc", "-MD", "-MT", "foo.o", "-MF", "foo.o.d", "-o", "foo.o" ); assert_eq!(Some("foo.c"), a.input.to_str()); assert_eq!(Language::Cuda, a.language); assert_eq!(Some("-c"), a.compilation_flag.to_str()); assert_map_contains!( a.outputs, ( "obj", ArtifactDescriptor { path: "foo.o".into(), optional: false } ) ); assert_eq!( ovec!["-MD", "-MF", "foo.o.d", "-MT", "foo.o"], a.dependency_args ); assert_eq!(ovec!["-fabc", "-c"], a.common_args); } #[test] fn test_parse_generate_code_flags() { let a = parses!( "-x", "cu", "--generate-code=arch=compute_61,code=sm_61", "-c", "foo.c", "-o", "foo.o" ); assert_eq!(Some("foo.c"), a.input.to_str()); assert_eq!(Language::Cuda, a.language); assert_map_contains!( a.outputs, ( "obj", ArtifactDescriptor { path: "foo.o".into(), optional: false } ) ); assert!(a.preprocessor_args.is_empty()); assert_eq!( ovec!["--generate-code", "arch=compute_61,code=sm_61", "-c"], a.common_args ); } #[test] fn test_parse_pass_to_host_flags() { let a = parses!( "-x=cu", "--generate-code=arch=compute_60,code=[sm_60,sm_61]", "-Xnvlink=--suppress-stack-size-warning", "-Xcompiler", "-fPIC,-fno-common", "-Xcompiler=-fvisibility=hidden", "-Xcompiler=-Wall,-Wno-unknown-pragmas,-Wno-unused-local-typedefs", "-Xcudafe", "--display_error_number", "-c", "foo.c", "-o", "foo.o" ); assert_eq!(Some("foo.c"), a.input.to_str()); assert_eq!(Language::Cuda, a.language); assert_map_contains!( a.outputs, ( "obj", ArtifactDescriptor { path: "foo.o".into(), optional: false } ) ); assert_eq!( ovec![ "-Xcompiler", "-fPIC,-fno-common", "-Xcompiler", "-fvisibility=hidden", "-Xcompiler", "-Wall,-Wno-unknown-pragmas,-Wno-unused-local-typedefs" ], a.preprocessor_args ); assert_eq!( ovec![ "--generate-code", "arch=compute_60,code=[sm_60,sm_61]", "-Xnvlink", "--suppress-stack-size-warning", "-Xcudafe", "--display_error_number", "-c" ], a.common_args ); } #[test] fn test_parse_no_capturing_of_xcompiler() { let a = parses!( "-x=cu", "-forward-unknown-to-host-compiler", "--expt-relaxed-constexpr", "-Xcompiler", "-pthread", "-std=c++14", "-c", "foo.c", "-o", "foo.o" ); assert_eq!(Some("foo.c"), a.input.to_str()); assert_eq!(Language::Cuda, a.language); assert_map_contains!( a.outputs, ( "obj", ArtifactDescriptor { path: "foo.o".into(), optional: false } ) ); assert_eq!( ovec!["--expt-relaxed-constexpr", "-Xcompiler", "-pthread"], a.preprocessor_args ); assert_eq!( ovec!["-forward-unknown-to-host-compiler", "-std=c++14", "-c"], a.common_args ); } #[test] fn test_parse_dlink_is_not_compilation() { assert_eq!( CompilerArguments::NotCompilation, parse_arguments_gcc(stringvec![ "-forward-unknown-to-host-compiler", "--generate-code=arch=compute_50,code=[compute_50,sm_50,sm_52]", "-dlink", "main.cu.o", "-o", "device_link.o" ]) ); assert_eq!( CompilerArguments::NotCompilation, parse_arguments_nvc(stringvec![ "-forward-unknown-to-host-compiler", "--generate-code=arch=compute_50,code=[compute_50,sm_50,sm_52]", "-dlink", "main.cu.o", "-o", "device_link.o" ]) ); } #[test] fn test_parse_cant_cache_flags() { assert_eq!( CompilerArguments::CannotCache("-E", None), parse_arguments_gcc(stringvec!["-x", "cu", "-c", "foo.c", "-o", "foo.o", "-E"]) ); assert_eq!( CompilerArguments::CannotCache("-E", None), parse_arguments_msvc(stringvec!["-x", "cu", "-c", "foo.c", "-o", "foo.o", "-E"]) ); assert_eq!( CompilerArguments::CannotCache("-E", None), parse_arguments_nvc(stringvec!["-x", "cu", "-c", "foo.c", "-o", "foo.o", "-E"]) ); assert_eq!( CompilerArguments::CannotCache("-M", None), parse_arguments_gcc(stringvec!["-x", "cu", "-c", "foo.c", "-o", "foo.o", "-M"]) ); assert_eq!( CompilerArguments::CannotCache("-M", None), parse_arguments_msvc(stringvec!["-x", "cu", "-c", "foo.c", "-o", "foo.o", "-M"]) ); assert_eq!( CompilerArguments::CannotCache("-M", None), parse_arguments_nvc(stringvec!["-x", "cu", "-c", "foo.c", "-o", "foo.o", "-M"]) ); } } mozilla-sccache-40c3d6b/src/compiler/nvhpc.rs000066400000000000000000000321051475712407500212610ustar00rootroot00000000000000// Copyright 2016 Mozilla Foundation // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #![allow(unused_imports, dead_code, unused_variables)] use crate::compiler::args::*; use crate::compiler::c::{ArtifactDescriptor, CCompilerImpl, CCompilerKind, ParsedArguments}; use crate::compiler::gcc::ArgData::*; use crate::compiler::{ gcc, write_temp_file, CCompileCommand, Cacheable, CompileCommand, CompilerArguments, Language, }; use crate::mock_command::{CommandCreator, CommandCreatorSync, RunCommand}; use crate::util::{run_input_output, OsStrExt}; use crate::{counted_array, dist}; use async_trait::async_trait; use fs::File; use fs_err as fs; use log::Level::Trace; use std::ffi::OsString; use std::future::Future; use std::io::{self, Write}; use std::path::{Path, PathBuf}; use std::process; use crate::errors::*; /// A unit struct on which to implement `CCompilerImpl`. #[derive(Clone, Debug)] pub struct Nvhpc { /// true iff this is nvc++. pub nvcplusplus: bool, pub version: Option, } #[async_trait] impl CCompilerImpl for Nvhpc { fn kind(&self) -> CCompilerKind { CCompilerKind::Nvhpc } fn plusplus(&self) -> bool { self.nvcplusplus } fn version(&self) -> Option { self.version.clone() } fn parse_arguments( &self, arguments: &[OsString], cwd: &Path, _env_vars: &[(OsString, OsString)], ) -> CompilerArguments { gcc::parse_arguments( arguments, cwd, (&gcc::ARGS[..], &ARGS[..]), self.nvcplusplus, self.kind(), ) } #[allow(clippy::too_many_arguments)] async fn preprocess( &self, creator: &T, executable: &Path, parsed_args: &ParsedArguments, cwd: &Path, env_vars: &[(OsString, OsString)], may_dist: bool, rewrite_includes_only: bool, _preprocessor_cache_mode: bool, ) -> Result where T: CommandCreatorSync, { let language = match parsed_args.language { Language::C => Ok("c"), Language::Cxx => Ok("c++"), Language::ObjectiveC => Ok("objective-c"), Language::ObjectiveCxx => Ok("objective-c++"), Language::Cuda => Err(anyhow!("CUDA compilation not supported by nvhpc")), _ => Err(anyhow!("PCH not supported by nvhpc")), }?; let initialize_cmd_and_args = || { let mut command = creator.clone().new_command_sync(executable); command.args(&parsed_args.preprocessor_args); command.args(&parsed_args.common_args); command.arg("-x").arg(language).arg(&parsed_args.input); command }; let dep_before_preprocessor = || { //nvhpc doesn't support generating both the dependency information //and the preprocessor output at the same time. So if we have //need for both we need separate compiler invocations let mut dep_cmd = initialize_cmd_and_args(); let mut transformed_deps = vec![]; for item in parsed_args.dependency_args.iter() { if item == "-MD" { transformed_deps.push(OsString::from("-M")); } else if item == "-MMD" { transformed_deps.push(OsString::from("-MM")); } else { transformed_deps.push(item.clone()); } } dep_cmd .args(&transformed_deps) .env_clear() .envs(env_vars.to_vec()) .current_dir(cwd); if log_enabled!(Trace) { trace!("dep-gen command: {:?}", dep_cmd); } dep_cmd }; trace!("preprocess"); let mut cmd = initialize_cmd_and_args(); //NVHPC doesn't support disabling line info when outputting to console cmd.arg("-E") .env_clear() .envs(env_vars.to_vec()) .current_dir(cwd); if log_enabled!(Trace) { trace!("preprocess: {:?}", cmd); } //Need to chain the dependency generation and the preprocessor //to emulate a `proper` front end if !parsed_args.dependency_args.is_empty() { let first = run_input_output(dep_before_preprocessor(), None); let second = run_input_output(cmd, None); // TODO: If we need to chain these to emulate a frontend, shouldn't // we explicitly wait on the first one before starting the second one? // (rather than via which drives these concurrently) let (_f, s) = futures::future::try_join(first, second).await?; Ok(s) } else { run_input_output(cmd, None).await } } fn generate_compile_commands( &self, path_transformer: &mut dist::PathTransformer, executable: &Path, parsed_args: &ParsedArguments, cwd: &Path, env_vars: &[(OsString, OsString)], rewrite_includes_only: bool, ) -> Result<( Box>, Option, Cacheable, )> where T: CommandCreatorSync, { gcc::generate_compile_commands( path_transformer, executable, parsed_args, cwd, env_vars, self.kind(), rewrite_includes_only, gcc::language_to_gcc_arg, ) .map(|(command, dist_command, cacheable)| { (CCompileCommand::new(command), dist_command, cacheable) }) } } counted_array!(pub static ARGS: [ArgInfo; _] = [ //todo: refactor show_includes into dependency_args take_arg!("--gcc-toolchain", OsString, CanBeSeparated('='), PassThrough), take_arg!("--include-path", PathBuf, CanBeSeparated, PreprocessorArgumentPath), take_arg!("--linker-options", OsString, CanBeSeparated, PassThrough), take_arg!("--system-include-path", PathBuf, CanBeSeparated, PreprocessorArgumentPath), take_arg!("-Mconcur", OsString, CanBeSeparated('='), PassThrough), flag!("-Mnostdlib", PreprocessorArgumentFlag), take_arg!("-Werror", OsString, CanBeSeparated, PreprocessorArgument), take_arg!("-Xcompiler", OsString, CanBeSeparated('='), PreprocessorArgument), take_arg!("-Xfatbinary", OsString, CanBeSeparated, PassThrough), take_arg!("-Xlinker", OsString, CanBeSeparated('='), PassThrough), take_arg!("-Xnvlink", OsString, CanBeSeparated, PassThrough), take_arg!("-Xptxas", OsString, CanBeSeparated, PassThrough), take_arg!("-acc", OsString, CanBeSeparated('='), PassThrough), flag!("-acclibs", PassThroughFlag), take_arg!("-c++", OsString, Concatenated, Standard), flag!("-c++libs", PassThroughFlag), flag!("-cuda", PreprocessorArgumentFlag), flag!("-cudaforlibs", PassThroughFlag), take_arg!("-cudalib", OsString, CanBeSeparated('='), PassThrough), flag!("-fortranlibs", PassThroughFlag), flag!("-gopt", PassThroughFlag), take_arg!("-gpu", OsString, CanBeSeparated('='), PassThrough), take_arg!("-mcmodel", OsString, CanBeSeparated('='), PassThrough), take_arg!("-mcpu", OsString, CanBeSeparated('='), PassThrough), flag!("-noswitcherror", PassThroughFlag), take_arg!("-ta", OsString, CanBeSeparated('='), PassThrough), take_arg!("-target", OsString, CanBeSeparated('='), PassThrough), take_arg!("-tp", OsString, CanBeSeparated('='), PassThrough), take_arg!("-x", OsString, CanBeSeparated('='), Language) ]); #[cfg(test)] mod test { use super::*; use crate::compiler::gcc; use crate::compiler::*; use crate::mock_command::*; use crate::test::utils::*; use std::collections::HashMap; use std::path::PathBuf; fn parse_arguments_(arguments: Vec) -> CompilerArguments { let arguments = arguments.iter().map(OsString::from).collect::>(); Nvhpc { nvcplusplus: false, version: None, } .parse_arguments(&arguments, ".".as_ref(), &[]) } macro_rules! parses { ( $( $s:expr ),* ) => { match parse_arguments_(vec![ $( $s.to_string(), )* ]) { CompilerArguments::Ok(a) => a, o => panic!("Got unexpected parse result: {:?}", o), } } } #[test] fn test_parse_arguments_simple_c() { let a = parses!("-c", "foo.c", "-o", "foo.o"); assert_eq!(Some("foo.c"), a.input.to_str()); assert_eq!(Language::C, a.language); assert_map_contains!( a.outputs, ( "obj", ArtifactDescriptor { path: "foo.o".into(), optional: false } ) ); assert!(a.preprocessor_args.is_empty()); assert!(a.common_args.is_empty()); } #[test] fn test_parse_arguments_simple_cxx() { let a = parses!("-c", "foo.cxx", "-o", "foo.o"); assert_eq!(Some("foo.cxx"), a.input.to_str()); assert_eq!(Language::Cxx, a.language); assert_map_contains!( a.outputs, ( "obj", ArtifactDescriptor { path: "foo.o".into(), optional: false } ) ); assert!(a.preprocessor_args.is_empty()); assert!(a.common_args.is_empty()); } #[test] fn test_parse_arguments_values() { let a = parses!( "-c", "foo.cpp", "-fabc", "-I", "include-file", "-o", "foo.o", "--include-path", "include-file", "-isystem", "/system/include/file", "-gpu=ccnative", "-Werror", "an_error" ); assert_eq!(Some("foo.cpp"), a.input.to_str()); assert_eq!(Language::Cxx, a.language); assert_map_contains!( a.outputs, ( "obj", ArtifactDescriptor { path: "foo.o".into(), optional: false } ) ); assert_eq!( ovec![ "-Iinclude-file", "--include-path", "include-file", "-isystem", "/system/include/file", "-Werror", "an_error" ], a.preprocessor_args ); assert!(a.dependency_args.is_empty()); assert_eq!(ovec!["-fabc", "-gpu", "ccnative"], a.common_args); } #[test] fn test_parse_md_mt_flags_cxx() { let a = parses!( "-x", "c++", "-c", "foo.c", "-fabc", "-MD", "-MT", "foo.o", "-MF", "foo.o.d", "-o", "foo.o" ); assert_eq!(Some("foo.c"), a.input.to_str()); assert_eq!(Language::Cxx, a.language); assert_eq!(Some("-c"), a.compilation_flag.to_str()); assert_map_contains!( a.outputs, ( "obj", ArtifactDescriptor { path: "foo.o".into(), optional: false } ) ); assert_eq!( ovec!["-MD", "-MF", "foo.o.d", "-MT", "foo.o"], a.dependency_args ); assert_eq!(ovec!["-fabc"], a.common_args); } #[test] fn test_parse_generate_code_flags() { let a = parses!( "-x", "c++", "-cuda", "-gpu=cc60,cc70", "-c", "foo.c", "-o", "foo.o" ); assert_eq!(Some("foo.c"), a.input.to_str()); assert_eq!(Language::Cxx, a.language); assert_map_contains!( a.outputs, ( "obj", ArtifactDescriptor { path: "foo.o".into(), optional: false } ) ); assert_eq!(ovec!["-cuda"], a.preprocessor_args); assert_eq!(ovec!["-gpu", "cc60,cc70"], a.common_args); } #[test] fn test_parse_cant_cache_flags() { assert_eq!( CompilerArguments::CannotCache("-E", None), parse_arguments_(stringvec!["-c", "foo.c", "-o", "foo.o", "-E"]) ); assert_eq!( CompilerArguments::CannotCache("-M", None), parse_arguments_(stringvec!["-c", "foo.c", "-o", "foo.o", "-M"]) ); } } mozilla-sccache-40c3d6b/src/compiler/preprocessor_cache.rs000066400000000000000000000636641475712407500240320ustar00rootroot00000000000000// Copyright 2023 Mozilla Foundation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! The preprocessor cache entry is a description of all information needed //! to cache pre-processor output in C-family languages for a given input file. //! The current implementation is very much inspired from the "manifest" //! that `ccache` uses for its "direct mode", though the on-disk format is //! different. use std::{ collections::{BTreeMap, HashSet}, ffi::{OsStr, OsString}, hash::Hash, io::Write, path::{Path, PathBuf}, time::SystemTime, }; use anyhow::Context; use chrono::Datelike; use once_cell::sync::Lazy; use serde::{Deserialize, Serialize}; use crate::{ cache::PreprocessorCacheModeConfig, util::{encode_path, Digest, HashToDigest, MetadataCtimeExt, Timestamp}, }; use super::Language; /// The current format is 1 header byte for the version + bincode encoding /// of the [`PreprocessorCacheEntry`] struct. const FORMAT_VERSION: u8 = 0; const MAX_PREPROCESSOR_CACHE_ENTRIES: usize = 100; const MAX_PREPROCESSOR_CACHE_FILE_INFO_ENTRIES: usize = 10000; #[derive(Deserialize, Serialize, Debug, Default, PartialEq, Eq)] pub struct PreprocessorCacheEntry { /// A counter of the overall number of [`IncludeEntry`] in this /// preprocessor cache entry, as an optimization when checking /// we're not ballooning in size. number_of_entries: usize, /// The digest of a result is computed by hashing the output of the /// C preprocessor. Entries correspond to the included files during the /// preprocessing step. results: BTreeMap>, } impl PreprocessorCacheEntry { pub fn new() -> Self { Default::default() } /// Tries to deserialize a preprocessor cache entry from `contents` pub fn read(contents: &[u8]) -> Result { if contents.is_empty() { Ok(Self { number_of_entries: 0, results: Default::default(), }) } else if contents[0] != FORMAT_VERSION { Err(Error::UnknownFormat(contents[0])) } else { Ok(bincode::deserialize(&contents[1..])?) } } /// Serialize the preprocessor cache entry to `buf` pub fn serialize_to(&self, mut buf: impl Write) -> Result<(), Error> { // Add the starting byte for version check since `bincode` doesn't // support it. buf.write_all(&[FORMAT_VERSION])?; bincode::serialize_into(buf, self)?; Ok(()) } /// Insert the full compilation key and included files for a given source file. /// /// There can be more than one result at once for a source file if one /// or more of the include files has changed but not the source file. pub fn add_result( &mut self, compilation_time_start: SystemTime, result_key: &str, included_files: impl IntoIterator, ) { if self.results.len() > MAX_PREPROCESSOR_CACHE_ENTRIES { // Normally, there shouldn't be many result entries in the // preprocessor cache entry since new entries are added only if // an include file has changed but not the source file, and you // typically change source files more often than header files. // However, it's certainly possible to imagine cases where the // preprocessor cache entry will grow large (for instance, // a generated header file that changes for every build), and this // must be taken care of since processing an ever growing // preprocessor cache entry eventually will take too much time. // A good way of solving this would be to maintain the // result entries in LRU order and discarding the old ones. // An easy way is to throw away all entries when there are too many. // Let's do that for now. debug!( "Too many entries in preprocessor cache entry file ({}/{}), starting over", self.results.len(), MAX_PREPROCESSOR_CACHE_ENTRIES ); self.results.clear(); self.number_of_entries = 0; } let includes: Result, std::io::Error> = included_files .into_iter() .map(|(digest, path)| { let meta = std::fs::symlink_metadata(&path)?; let mtime: Option = meta.modified().ok().map(|t| t.into()); let ctime = meta.ctime_or_creation().ok(); let should_cache_time = match (mtime, ctime) { (Some(mtime), Some(ctime)) => { Timestamp::from(compilation_time_start) > mtime.max(ctime) } _ => false, }; Ok(IncludeEntry { path: path.into_os_string(), digest, file_size: meta.len(), mtime: if should_cache_time { mtime } else { None }, ctime: if should_cache_time { ctime } else { None }, }) }) .collect(); match includes { Ok(includes) => { let new_number_of_entries = includes.len() + self.number_of_entries; if new_number_of_entries > MAX_PREPROCESSOR_CACHE_FILE_INFO_ENTRIES { // Rarely, entries can grow large in pathological cases // where many included files change, but the main file // does not. This also puts an upper bound on the number // of entries. debug!( "Too many include entries in preprocessor cache entry file ({}/{}), starting over", new_number_of_entries, MAX_PREPROCESSOR_CACHE_FILE_INFO_ENTRIES ); self.results.clear(); } match self.results.entry(result_key.to_string()) { std::collections::btree_map::Entry::Occupied(mut entry) => { self.number_of_entries -= entry.get().len(); self.number_of_entries += includes.len(); *entry.get_mut() = includes; } std::collections::btree_map::Entry::Vacant(vacant) => { self.number_of_entries += includes.len(); vacant.insert(includes); } }; debug!("Added result key {result_key} to preprocessor cache entry"); } Err(e) => { debug!("Could not add result key {result_key} to preprocessor cache entry: {e}"); } } } /// Returns the digest of the first result whose expected included files /// are already on disk and have not changed. pub fn lookup_result_digest( &mut self, config: PreprocessorCacheModeConfig, updated: &mut bool, ) -> Option { // Check newest result first since it's more likely to match. for (digest, includes) in self.results.iter_mut().rev() { let result_matches = Self::result_matches(digest, includes, config, updated); if result_matches { return Some(digest.to_string()); } } None } /// A result matches if all of its include files exist on disk and have not changed. fn result_matches( digest: &str, includes: &mut [IncludeEntry], config: PreprocessorCacheModeConfig, updated: &mut bool, ) -> bool { for include in includes { let path = Path::new(include.path.as_os_str()); let meta = match std::fs::symlink_metadata(path) { Ok(meta) => { if meta.len() != include.file_size { return false; } meta } Err(e) => { debug!( "{} is in a preprocessor cache entry but can't be read ({})", path.display(), e ); return false; } }; if config.file_stat_matches { match (include.mtime, include.ctime) { (Some(mtime), Some(ctime)) if config.use_ctime_for_stat => { let mtime_matches = meta.modified().map(Into::into).ok() == Some(mtime); let ctime_matches = meta.ctime_or_creation().map(Into::into).ok() == Some(ctime); if mtime_matches && ctime_matches { trace!("mtime+ctime hit for {}", path.display()); continue; } else { trace!("mtime+ctime miss for {}", path.display()); } } (Some(mtime), None) => { let mtime_matches = meta.modified().map(Into::into).ok() == Some(mtime); if mtime_matches { trace!("mtime hit for {}", path.display()); continue; } else { trace!("mtime miss for {}", path.display()); } } _ => { /* Nothing was recorded, fall back to contents comparison */ } } } let file = match std::fs::File::open(path) { Ok(file) => file, Err(e) => { debug!( "{} is in a preprocessor cache entry but can't be opened ({})", path.display(), e ); return false; } }; if config.ignore_time_macros { match Digest::reader_sync(file) { Ok(new_digest) => return include.digest == new_digest, Err(e) => { debug!( "{} is in a preprocessor cache entry but can't be read ({})", path.display(), e ); return false; } } } else { let (new_digest, finder): (String, _) = match Digest::reader_sync_time_macros(file) { Ok((new_digest, finder)) => (new_digest, finder), Err(e) => { debug!( "{} is in a preprocessor cache entry but can't be read ({})", path.display(), e ); return false; } }; if !finder.found_time_macros() && include.digest != new_digest { return false; } if finder.found_time() { // We don't know for sure that the program actually uses the __TIME__ macro, // but we have to assume it anyway and hash the time stamp. However, that's // not very useful since the chance that we get a cache hit later the same // second should be quite slim... So, just signal back to the caller that // __TIME__ has been found so that the preprocessor cache mode can be disabled. debug!("Found __TIME__ in {}", path.display()); return false; } // __DATE__ or __TIMESTAMP__ found. We now make sure that the digest changes // if the (potential) expansion of those macros changes by computing a new // digest comprising the file digest and time information that represents the // macro expansions. let mut new_digest = Digest::new(); new_digest.update(digest.as_bytes()); if finder.found_date() { debug!("found __DATE__ in {}", path.display()); new_digest.delimiter(b"date"); let date = chrono::Local::now().date_naive(); new_digest.update(&date.year().to_le_bytes()); new_digest.update(&date.month().to_le_bytes()); new_digest.update(&date.day().to_le_bytes()); // If the compiler has support for it, the expansion of __DATE__ will change // according to the value of SOURCE_DATE_EPOCH. Note: We have to hash both // SOURCE_DATE_EPOCH and the current date since we can't be sure that the // compiler honors SOURCE_DATE_EPOCH. if let Ok(source_date_epoch) = std::env::var("SOURCE_DATE_EPOCH") { new_digest.update(source_date_epoch.as_bytes()) } } if finder.found_timestamp() { debug!("found __TIMESTAMP__ in {}", path.display()); let meta = match std::fs::symlink_metadata(path) { Ok(meta) => meta, Err(e) => { debug!( "{} is in a preprocessor cache entry but can't be read ({})", path.display(), e ); return false; } }; let mtime = match meta.modified() { Ok(mtime) => mtime, Err(_) => { debug!( "Couldn't get mtime of {} which contains __TIMESTAMP__", path.display() ); return false; } }; let mtime: chrono::DateTime = chrono::DateTime::from(mtime); new_digest.delimiter(b"timestamp"); new_digest.update(&mtime.naive_local().and_utc().timestamp().to_le_bytes()); include.digest = new_digest.finish(); // Signal that the preprocessor cache entry has been updated and needs to be // written to disk. *updated = true; } } } true } } /// Environment variables that are factored into the preprocessor cache entry cached key. static CACHED_ENV_VARS: Lazy> = Lazy::new(|| { [ // SCCACHE_C_CUSTOM_CACHE_BUSTER has no particular meaning behind it, // serving as a way for the user to factor custom data into the hash. // One can set it to different values for different invocations // to prevent cache reuse between them. "SCCACHE_C_CUSTOM_CACHE_BUSTER", "CPATH", "C_INCLUDE_PATH", "CPLUS_INCLUDE_PATH", "OBJC_INCLUDE_PATH", "OBJCPLUS_INCLUDE_PATH", ] .iter() .map(OsStr::new) .collect() }); /// Compute the hash key of compiler preprocessing `input` with `args`. #[allow(clippy::too_many_arguments)] pub fn preprocessor_cache_entry_hash_key( compiler_digest: &str, language: Language, arguments: &[OsString], extra_hashes: &[String], env_vars: &[(OsString, OsString)], input_file: &Path, plusplus: bool, config: PreprocessorCacheModeConfig, ) -> anyhow::Result> { // If you change any of the inputs to the hash, you should change `FORMAT_VERSION`. let mut m = Digest::new(); m.update(compiler_digest.as_bytes()); // clang and clang++ have different behavior despite being byte-for-byte identical binaries, so // we have to incorporate that into the hash as well. m.update(&[plusplus as u8]); m.update(&[FORMAT_VERSION]); m.update(language.as_str().as_bytes()); for arg in arguments { arg.hash(&mut HashToDigest { digest: &mut m }); } for hash in extra_hashes { m.update(hash.as_bytes()); } for (var, val) in env_vars.iter() { if CACHED_ENV_VARS.contains(var.as_os_str()) { var.hash(&mut HashToDigest { digest: &mut m }); m.update(&b"="[..]); val.hash(&mut HashToDigest { digest: &mut m }); } } // Hash the input file otherwise: // - a/r.h exists. // - a/x.c has #include "r.h". // - b/x.c is identical to a/x.c. // - Compiling a/x.c records a/r.h in the preprocessor cache entry. // - Compiling b/x.c results in a false cache hit since a/x.c and b/x.c // share preprocessor cache entries and a/r.h exists. let mut buf = vec![]; encode_path(&mut buf, input_file)?; m.update(&buf); let reader = std::fs::File::open(input_file) .with_context(|| format!("while hashing the input file '{}'", input_file.display()))?; let digest = if config.ignore_time_macros { Digest::reader_sync(reader)? } else { let (digest, finder) = Digest::reader_sync_time_macros(reader)?; if finder.found_time() { // Disable preprocessor cache mode debug!("Found __TIME__ in {}", input_file.display()); return Ok(None); } digest }; m.update(digest.as_bytes()); Ok(Some(m.finish())) } /// Corresponds to a cached include file used in the pre-processor stage #[derive(Deserialize, Serialize, Debug, PartialEq, Eq)] pub struct IncludeEntry { /// Its absolute path path: OsString, /// The hash of its contents digest: String, /// Its file size, in bytes file_size: u64, /// Its modification time, `None` if not recorded. mtime: Option, /// Its status change time, `None` if not recorded. ctime: Option, } #[derive(Debug)] pub enum Error { Io(std::io::Error), Deserialization(bincode::Error), UnknownFormat(u8), } impl From for Error { fn from(e: std::io::Error) -> Self { Self::Io(e) } } impl From for Error { fn from(e: bincode::Error) -> Self { Self::Deserialization(e) } } impl std::fmt::Display for Error { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Error::Io(e) => e.fmt(f), Error::Deserialization(e) => e.fmt(f), Error::UnknownFormat(format) => f.write_fmt(format_args!( "Unknown preprocessor cache entry format {:x}", format )), } } } impl std::error::Error for Error {} #[cfg(test)] mod test { use crate::util::{HASH_BUFFER_SIZE, MAX_TIME_MACRO_HAYSTACK_LEN}; use super::*; #[test] fn test_find_time_macros_empty_file() { let buf: Vec = vec![]; let hash = Digest::reader_sync_time_macros(buf.as_slice()).unwrap().0; assert_eq!(hash, Digest::new().finish()); } #[test] fn test_find_time_macros_small_file_no_match() { let buf = b"This is a small file, which doesn't contain any time macros."; let finder = Digest::reader_sync_time_macros(buf.as_slice()).unwrap().1; assert!(!finder.found_time_macros()); } #[test] fn test_find_time_macros_small_file_match() { let buf = b"__TIME__"; let finder = Digest::reader_sync_time_macros(buf.as_slice()).unwrap().1; assert!(finder.found_time_macros()); assert!(finder.found_time()); assert!(!finder.found_timestamp()); assert!(!finder.found_date()); let buf = b"__DATE__"; let finder = Digest::reader_sync_time_macros(buf.as_slice()).unwrap().1; assert!(finder.found_time_macros()); assert!(!finder.found_time()); assert!(!finder.found_timestamp()); assert!(finder.found_date()); let buf = b"__TIMESTAMP__"; let finder = Digest::reader_sync_time_macros(buf.as_slice()).unwrap().1; assert!(finder.found_time_macros()); assert!(!finder.found_time()); assert!(finder.found_timestamp()); assert!(!finder.found_date()); } #[test] fn test_find_time_macros_small_file_match_multiple() { let buf = b"__TIMESTAMP____DATE____TIME__"; let finder = Digest::reader_sync_time_macros(buf.as_slice()).unwrap().1; assert!(finder.found_time_macros()); assert!(finder.found_time()); assert!(finder.found_timestamp()); assert!(finder.found_date()); } #[test] fn test_find_time_macros_large_file_no_match() { let buf = vec![0; HASH_BUFFER_SIZE * 2]; let finder = Digest::reader_sync_time_macros(buf.as_slice()).unwrap().1; assert!(!finder.found_time_macros()); assert!(!finder.found_time()); assert!(!finder.found_timestamp()); assert!(!finder.found_date()); } #[test] fn test_find_time_macros_large_file_match_no_overlap() { let mut buf = vec![0; HASH_BUFFER_SIZE * 2]; buf.extend(b"__TIMESTAMP____DATE____TIME__"); let finder = Digest::reader_sync_time_macros(buf.as_slice()).unwrap().1; assert!(finder.found_time_macros()); assert!(finder.found_time()); assert!(finder.found_timestamp()); assert!(finder.found_date()); } #[test] fn test_find_time_macros_large_file_match_overlap() { let mut buf = vec![0; HASH_BUFFER_SIZE * 2]; // Make the pattern overlap two buffer chunks to make sure we account for this let start = HASH_BUFFER_SIZE - MAX_TIME_MACRO_HAYSTACK_LEN / 2; buf[start..][..b"__TIMESTAMP__".len()].copy_from_slice(b"__TIMESTAMP__"); let finder = Digest::reader_sync_time_macros(buf.as_slice()).unwrap().1; assert!(finder.found_time_macros()); assert!(!finder.found_time()); assert!(finder.found_timestamp()); assert!(!finder.found_date()); let mut buf = vec![0; HASH_BUFFER_SIZE * 2]; // Make the pattern overlap two buffer chunks to make sure we account for this let start = HASH_BUFFER_SIZE - MAX_TIME_MACRO_HAYSTACK_LEN / 2; buf[start..][..b"__TIME__".len()].copy_from_slice(b"__TIME__"); let finder = Digest::reader_sync_time_macros(buf.as_slice()).unwrap().1; assert!(finder.found_time_macros()); assert!(finder.found_time()); assert!(!finder.found_timestamp()); assert!(!finder.found_date()); let mut buf = vec![0; HASH_BUFFER_SIZE * 2]; // Make the pattern overlap two buffer chunks to make sure we account for this let start = HASH_BUFFER_SIZE - MAX_TIME_MACRO_HAYSTACK_LEN / 2; buf[start..][..b"__DATE__".len()].copy_from_slice(b"__DATE__"); let finder = Digest::reader_sync_time_macros(buf.as_slice()).unwrap().1; assert!(finder.found_time_macros()); assert!(!finder.found_time()); assert!(!finder.found_timestamp()); assert!(finder.found_date()); } #[test] fn test_find_time_macros_large_file_match_overlap_multiple_pages() { let mut buf = vec![0; HASH_BUFFER_SIZE * 3]; // Make the patterns overlap buffer chunks twice to make sure we account for this let start = HASH_BUFFER_SIZE - MAX_TIME_MACRO_HAYSTACK_LEN / 2; buf[start..][..b"__TIME__".len()].copy_from_slice(b"__TIME__"); let start = HASH_BUFFER_SIZE * 2 - MAX_TIME_MACRO_HAYSTACK_LEN / 2; buf[start..][..b"__DATE__".len()].copy_from_slice(b"__DATE__"); let finder = Digest::reader_sync_time_macros(buf.as_slice()).unwrap().1; assert!(finder.found_time_macros()); assert!(finder.found_time()); assert!(!finder.found_timestamp()); assert!(finder.found_date()); } #[test] fn test_find_time_macros_large_file_match_overlap_multiple_pages_tiny() { let mut buf = vec![0; HASH_BUFFER_SIZE * 3]; // Make the patterns overlap buffer chunks twice to make sure we account for this let start = HASH_BUFFER_SIZE - MAX_TIME_MACRO_HAYSTACK_LEN / 2; buf[start..][..b"__TIME__".len()].copy_from_slice(b"__TIME__"); let start = HASH_BUFFER_SIZE * 2 - MAX_TIME_MACRO_HAYSTACK_LEN / 2; buf[start..][..b"__DATE__".len()].copy_from_slice(b"__DATE__"); // Test overlap with the last chunk being less than the haystack buf.extend([0; MAX_TIME_MACRO_HAYSTACK_LEN / 2 + 1]); let start = HASH_BUFFER_SIZE * 3 - MAX_TIME_MACRO_HAYSTACK_LEN / 2; buf[start..][..b"__TIMESTAMP__".len()].copy_from_slice(b"__TIMESTAMP__"); let finder = Digest::reader_sync_time_macros(buf.as_slice()).unwrap().1; assert!(finder.found_time_macros()); assert!(finder.found_time()); assert!(finder.found_timestamp()); assert!(finder.found_date()); } #[test] fn test_find_time_macros_ghost_pattern() { // Check the (unlikely) case of a pattern being spread between the // start of a chunk and its end. let mut buf = vec![0; HASH_BUFFER_SIZE * 3]; buf[HASH_BUFFER_SIZE..HASH_BUFFER_SIZE + b"__TI".len()].copy_from_slice(b"__TI"); buf[HASH_BUFFER_SIZE * 2 - "ME__".len()..HASH_BUFFER_SIZE * 2].copy_from_slice(b"ME__"); let finder = Digest::reader_sync_time_macros(buf.as_slice()).unwrap().1; assert!(!finder.found_time_macros()); assert!(!finder.found_time()); assert!(!finder.found_timestamp()); assert!(!finder.found_date()); } } mozilla-sccache-40c3d6b/src/compiler/ptxas.rs000066400000000000000000000065231475712407500213070ustar00rootroot00000000000000// Copyright 2016 Mozilla Foundation // SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #![allow(unused_imports, dead_code, unused_variables)] use crate::compiler::args::*; use crate::compiler::c::{ArtifactDescriptor, CCompilerImpl, CCompilerKind, ParsedArguments}; use crate::compiler::cicc; use crate::compiler::{ CCompileCommand, Cacheable, ColorMode, CompileCommand, CompilerArguments, Language, SingleCompileCommand, }; use crate::{counted_array, dist}; use crate::mock_command::{CommandCreator, CommandCreatorSync, RunCommand}; use async_trait::async_trait; use std::collections::HashMap; use std::ffi::OsString; use std::fs; use std::path::{Path, PathBuf}; use std::process; use crate::errors::*; /// A unit struct on which to implement `CCompilerImpl`. #[derive(Clone, Debug)] pub struct Ptxas { pub version: Option, } #[async_trait] impl CCompilerImpl for Ptxas { fn kind(&self) -> CCompilerKind { CCompilerKind::Ptxas } fn plusplus(&self) -> bool { true } fn version(&self) -> Option { self.version.clone() } fn parse_arguments( &self, arguments: &[OsString], cwd: &Path, _env_vars: &[(OsString, OsString)], ) -> CompilerArguments { cicc::parse_arguments(arguments, cwd, Language::Cubin, &ARGS[..], 3) } #[allow(clippy::too_many_arguments)] async fn preprocess( &self, _creator: &T, _executable: &Path, parsed_args: &ParsedArguments, cwd: &Path, _env_vars: &[(OsString, OsString)], _may_dist: bool, _rewrite_includes_only: bool, _preprocessor_cache_mode: bool, ) -> Result where T: CommandCreatorSync, { cicc::preprocess(cwd, parsed_args).await } fn generate_compile_commands( &self, path_transformer: &mut dist::PathTransformer, executable: &Path, parsed_args: &ParsedArguments, cwd: &Path, env_vars: &[(OsString, OsString)], _rewrite_includes_only: bool, ) -> Result<( Box>, Option, Cacheable, )> where T: CommandCreatorSync, { cicc::generate_compile_commands(path_transformer, executable, parsed_args, cwd, env_vars) .map(|(command, dist_command, cacheable)| { (CCompileCommand::new(command), dist_command, cacheable) }) } } use cicc::ArgData::*; counted_array!(pub static ARGS: [ArgInfo; _] = [ take_arg!("-arch", OsString, CanBeSeparated('='), PassThrough), take_arg!("-m", OsString, CanBeSeparated('='), PassThrough), take_arg!("-o", PathBuf, Separated, Output), ]); mozilla-sccache-40c3d6b/src/compiler/rust.rs000066400000000000000000004024021475712407500211410ustar00rootroot00000000000000// Copyright 2016 Mozilla Foundation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use crate::cache::{FileObjectSource, Storage}; use crate::compiler::args::*; use crate::compiler::{ c::ArtifactDescriptor, CCompileCommand, Cacheable, ColorMode, Compilation, CompileCommand, Compiler, CompilerArguments, CompilerHasher, CompilerKind, CompilerProxy, HashResult, Language, SingleCompileCommand, }; #[cfg(feature = "dist-client")] use crate::compiler::{DistPackagers, OutputsRewriter}; #[cfg(feature = "dist-client")] use crate::dist::pkg; #[cfg(feature = "dist-client")] use crate::lru_disk_cache::{LruCache, Meter}; use crate::mock_command::{CommandCreatorSync, RunCommand}; use crate::util::{fmt_duration_as_secs, hash_all, hash_all_archives, run_input_output, Digest}; use crate::util::{HashToDigest, OsStrExt}; use crate::{counted_array, dist}; use async_trait::async_trait; use filetime::FileTime; use fs_err as fs; use log::Level::Trace; use once_cell::sync::Lazy; #[cfg(feature = "dist-client")] use semver::Version; #[cfg(feature = "dist-client")] use std::borrow::Borrow; use std::borrow::Cow; #[cfg(feature = "dist-client")] use std::collections::hash_map::RandomState; use std::collections::{HashMap, HashSet}; use std::env::consts::DLL_EXTENSION; #[cfg(feature = "dist-client")] use std::env::consts::{DLL_PREFIX, EXE_EXTENSION}; use std::ffi::OsString; use std::fmt; use std::future::Future; use std::hash::Hash; #[cfg(feature = "dist-client")] use std::io; use std::io::Read; use std::iter; use std::path::{Path, PathBuf}; use std::pin::Pin; use std::process; use std::sync::Arc; #[cfg(feature = "dist-client")] use std::sync::Mutex; use std::time; use crate::errors::*; #[cfg(feature = "dist-client")] const RLIB_PREFIX: &str = "lib"; #[cfg(feature = "dist-client")] const RLIB_EXTENSION: &str = "rlib"; #[cfg(feature = "dist-client")] const RMETA_EXTENSION: &str = "rmeta"; /// Directory in the sysroot containing binary to which rustc is linked. #[cfg(feature = "dist-client")] const BINS_DIR: &str = "bin"; /// Directory in the sysroot containing shared libraries to which rustc is linked. #[cfg(not(windows))] const LIBS_DIR: &str = "lib"; /// Directory in the sysroot containing shared libraries to which rustc is linked. #[cfg(windows)] const LIBS_DIR: &str = "bin"; /// A struct on which to hang a `Compiler` impl. #[derive(Debug, Clone)] pub struct Rust { /// The path to the rustc executable. executable: PathBuf, /// The host triple for this rustc. host: String, /// The verbose version for this rustc. /// /// Hash calculation will take this version into consideration to prevent /// cached object broken after version bump. /// /// Looks like the following: /// /// ```shell /// :) rustc -vV /// rustc 1.66.1 (90743e729 2023-01-10) /// binary: rustc /// commit-hash: 90743e7298aca107ddaa0c202a4d3604e29bfeb6 /// commit-date: 2023-01-10 /// host: x86_64-unknown-linux-gnu /// release: 1.66.1 /// LLVM version: 15.0.2 /// ``` version: String, /// The path to the rustc sysroot. sysroot: PathBuf, /// The digests of all the shared libraries in rustc's $sysroot/lib (or /bin on Windows). compiler_shlibs_digests: Vec, /// A shared, caching reader for rlib dependencies #[cfg(feature = "dist-client")] rlib_dep_reader: Option>, } /// A struct on which to hang a `CompilerHasher` impl. #[derive(Debug, Clone)] pub struct RustHasher { /// The path to the rustc executable, not the rustup proxy. executable: PathBuf, /// The host triple for this rustc. host: String, /// The version for this rustc. version: String, /// The path to the rustc sysroot. sysroot: PathBuf, /// The digests of all the shared libraries in rustc's $sysroot/lib (or /bin on Windows). compiler_shlibs_digests: Vec, /// A shared, caching reader for rlib dependencies #[cfg(feature = "dist-client")] rlib_dep_reader: Option>, /// Parsed arguments from the rustc invocation parsed_args: ParsedArguments, } /// a lookup proxy for determining the actual compiler used per file or directory #[derive(Debug, Clone)] pub struct RustupProxy { proxy_executable: PathBuf, } #[derive(Debug, Clone, PartialEq)] pub struct ParsedArguments { /// The full commandline, with all parsed arguments arguments: Vec>, /// The location of compiler outputs. output_dir: PathBuf, /// Paths to extern crates used in the compile. externs: Vec, /// The directories searched for rlibs crate_link_paths: Vec, /// Static libraries linked to in the compile. staticlibs: Vec, /// The crate name passed to --crate-name. crate_name: String, /// The crate types that will be generated crate_types: CrateTypes, /// If dependency info is being emitted, the name of the dep info file. dep_info: Option, /// If profile info is being emitted, the path of the profile. /// /// This could be filled while `-Cprofile-use` been enabled. /// /// We need to add the profile into our outputs to enable distributed compilation. /// We don't need to track `profile-generate` since it's users work to make sure /// the `profdata` been generated from profraw files. /// /// For more information, see https://doc.rust-lang.org/rustc/profile-guided-optimization.html profile: Option, /// If `-Z profile` has been enabled, we will use a GCC-compatible, gcov-based /// coverage implementation. /// /// This is not supported in latest stable rust anymore, but we still keep it here /// for the old nightly rustc. /// /// We need to add the profile into our outputs to enable distributed compilation. /// /// For more information, see https://doc.rust-lang.org/rustc/instrument-coverage.html gcno: Option, /// rustc says that emits .rlib for --emit=metadata /// https://github.com/rust-lang/rust/issues/54852 emit: HashSet, /// The value of any `--color` option passed on the commandline. color_mode: ColorMode, /// Whether `--json` was passed to this invocation. has_json: bool, /// A `--target` parameter that specifies a path to a JSON file. target_json: Option, } /// A struct on which to hang a `Compilation` impl. #[derive(Debug, Clone)] pub struct RustCompilation { /// The path to the rustc executable, not the rustup proxy. executable: PathBuf, /// The host triple for this rustc. host: String, /// The sysroot for this rustc sysroot: PathBuf, /// A shared, caching reader for rlib dependencies #[cfg(feature = "dist-client")] rlib_dep_reader: Option>, /// All arguments passed to rustc arguments: Vec>, /// The compiler inputs. inputs: Vec, /// The compiler outputs. outputs: HashMap, /// The directories searched for rlibs crate_link_paths: Vec, /// The crate name being compiled. crate_name: String, /// The crate types that will be generated crate_types: CrateTypes, /// If dependency info is being emitted, the name of the dep info file. dep_info: Option, /// The current working directory cwd: PathBuf, /// The environment variables env_vars: Vec<(OsString, OsString)>, } // The selection of crate types for this compilation #[derive(Debug, Clone, PartialEq, Eq)] pub struct CrateTypes { rlib: bool, staticlib: bool, } /// Emit types that we will cache. static ALLOWED_EMIT: Lazy> = Lazy::new(|| ["link", "metadata", "dep-info"].iter().copied().collect()); /// Version number for cache key. const CACHE_VERSION: &[u8] = b"6"; /// Get absolute paths for all source files and env-deps listed in rustc's dep-info output. async fn get_source_files_and_env_deps( creator: &T, crate_name: &str, executable: &Path, arguments: &[OsString], cwd: &Path, env_vars: &[(OsString, OsString)], pool: &tokio::runtime::Handle, ) -> Result<(Vec, Vec<(OsString, OsString)>)> where T: CommandCreatorSync, { let start = time::Instant::now(); // Get the full list of source files from rustc's dep-info. let temp_dir = tempfile::Builder::new() .prefix("sccache") .tempdir() .context("Failed to create temp dir")?; let dep_file = temp_dir.path().join("deps.d"); let mut cmd = creator.clone().new_command_sync(executable); cmd.args(arguments) .args(&["--emit", "dep-info"]) .arg("-o") .arg(&dep_file) .env_clear() .envs(env_vars.to_vec()) .current_dir(cwd); trace!("[{}]: get dep-info: {:?}", crate_name, cmd); // Output of command is in file under dep_file, so we ignore stdout&stderr let _dep_info = run_input_output(cmd, None).await?; // Parse the dep-info file, then hash the contents of those files. let cwd = cwd.to_owned(); let name2 = crate_name.to_owned(); let parsed = pool .spawn_blocking(move || { parse_dep_file(&dep_file, &cwd) .with_context(|| format!("Failed to parse dep info for {}", name2)) }) .await?; parsed.map(move |(files, env_deps)| { trace!( "[{}]: got {} source files and {} env-deps from dep-info in {}", crate_name, files.len(), env_deps.len(), fmt_duration_as_secs(&start.elapsed()) ); // Just to make sure we capture temp_dir. drop(temp_dir); (files, env_deps) }) } /// Parse dependency info from `file` and return a Vec of files mentioned. /// Treat paths as relative to `cwd`. fn parse_dep_file(file: T, cwd: U) -> Result<(Vec, Vec<(OsString, OsString)>)> where T: AsRef, U: AsRef, { let mut f = fs::File::open(file.as_ref())?; let mut deps = String::new(); f.read_to_string(&mut deps)?; Ok((parse_dep_info(&deps, cwd), parse_env_dep_info(&deps))) } fn parse_dep_info(dep_info: &str, cwd: T) -> Vec where T: AsRef, { let cwd = cwd.as_ref(); // Just parse the first line, which should have the dep-info file and all // source files. let line = match dep_info.lines().next() { None => return vec![], Some(l) => l, }; let pos = match line.find(": ") { None => return vec![], Some(p) => p, }; let mut deps = Vec::new(); let mut current_dep = String::new(); let mut iter = line[pos + 2..].chars().peekable(); loop { match iter.next() { Some('\\') => { if iter.peek() == Some(&' ') { current_dep.push(' '); iter.next(); } else { current_dep.push('\\'); } } Some(' ') => { deps.push(current_dep); current_dep = String::new(); } Some(c) => current_dep.push(c), None => { if !current_dep.is_empty() { deps.push(current_dep); } break; } } } let mut deps = deps.iter().map(|s| cwd.join(s)).collect::>(); deps.sort(); deps } fn parse_env_dep_info(dep_info: &str) -> Vec<(OsString, OsString)> { let mut env_deps = Vec::new(); for line in dep_info.lines() { if let Some(env_dep) = line.strip_prefix("# env-dep:") { let mut split = env_dep.splitn(2, '='); match (split.next(), split.next()) { (Some(var), Some(val)) => env_deps.push((var.into(), val.into())), _ => env_deps.push((env_dep.into(), "".into())), } } } env_deps } /// Run `rustc --print file-names` to get the outputs of compilation. async fn get_compiler_outputs( creator: &T, executable: &Path, arguments: Vec, cwd: &Path, env_vars: &[(OsString, OsString)], ) -> Result> where T: Clone + CommandCreatorSync, { let mut cmd = creator.clone().new_command_sync(executable); cmd.args(&arguments) .args(&["--print", "file-names"]) .env_clear() .envs(env_vars.to_vec()) .current_dir(cwd); if log_enabled!(Trace) { trace!("get_compiler_outputs: {:?}", cmd); } let outputs = run_input_output(cmd, None).await?; let outstr = String::from_utf8(outputs.stdout).context("Error parsing rustc output")?; if log_enabled!(Trace) { trace!("get_compiler_outputs: {:?}", outstr); } Ok(outstr.lines().map(|l| l.to_owned()).collect()) } impl Rust { /// Create a new Rust compiler instance, calculating the hashes of /// all the shared libraries in its sysroot. pub async fn new( mut creator: T, executable: PathBuf, env_vars: &[(OsString, OsString)], rustc_verbose_version: &str, dist_archive: Option, pool: tokio::runtime::Handle, ) -> Result where T: CommandCreatorSync, { // Taken from Cargo let host = rustc_verbose_version .lines() .find(|l| l.starts_with("host: ")) .map(|l| &l[6..]) .context("rustc verbose version didn't have a line for `host:`")? .to_string(); // it's fine to use the `executable` directly no matter if proxied or not let mut cmd = creator.new_command_sync(&executable); cmd.stdout(process::Stdio::piped()) .stderr(process::Stdio::null()) .arg("--print=sysroot") .env_clear() .envs(env_vars.to_vec()); let sysroot_and_libs = async move { let output = run_input_output(cmd, None).await?; //debug!("output.and_then: {}", output); let outstr = String::from_utf8(output.stdout).context("Error parsing sysroot")?; let sysroot = PathBuf::from(outstr.trim_end()); let libs_path = sysroot.join(LIBS_DIR); let mut libs = fs::read_dir(&libs_path) .with_context(|| format!("Failed to list rustc sysroot: `{:?}`", libs_path))? .filter_map(|e| { e.ok().and_then(|e| { e.file_type().ok().and_then(|t| { let p = e.path(); if (t.is_file() || t.is_symlink() && p.is_file()) && p.extension().map(|e| e == DLL_EXTENSION).unwrap_or(false) { Some(p) } else { None } }) }) }) .collect::>(); if let Some(path) = dist_archive { trace!("Hashing {:?} along with rustc libs.", path); libs.push(path); }; libs.sort(); Result::Ok((sysroot, libs)) }; #[cfg(feature = "dist-client")] { use futures::TryFutureExt; let rlib_dep_reader = { let executable = executable.clone(); let env_vars = env_vars.to_owned(); pool.spawn_blocking(move || RlibDepReader::new_with_check(executable, &env_vars)) .map_err(anyhow::Error::from) }; let ((sysroot, libs), rlib_dep_reader) = futures::future::try_join(sysroot_and_libs, rlib_dep_reader).await?; let rlib_dep_reader = match rlib_dep_reader { Ok(r) => Some(Arc::new(r)), Err(e) => { warn!("Failed to initialise RlibDepDecoder, distributed compiles will be inefficient: {}", e); None } }; hash_all(&libs, &pool).await.map(move |digests| Rust { executable, host, version: rustc_verbose_version.to_string(), sysroot, compiler_shlibs_digests: digests, rlib_dep_reader, }) } #[cfg(not(feature = "dist-client"))] { let (sysroot, libs) = sysroot_and_libs.await?; hash_all(&libs, &pool).await.map(move |digests| Rust { executable, host, version: rustc_verbose_version.to_string(), sysroot, compiler_shlibs_digests: digests, }) } } } impl Compiler for Rust where T: CommandCreatorSync, { fn kind(&self) -> CompilerKind { CompilerKind::Rust } #[cfg(feature = "dist-client")] fn get_toolchain_packager(&self) -> Box { Box::new(RustToolchainPackager { sysroot: self.sysroot.clone(), }) } /// Parse `arguments` as rustc command-line arguments, determine if /// we can cache the result of compilation. This is only intended to /// cover a subset of rustc invocations, primarily focused on those /// that will occur when cargo invokes rustc. /// /// Caveats: /// * We don't support compilation from stdin. /// * We require --emit. /// * We only support `link` and `dep-info` in --emit (and don't support *just* 'dep-info') /// * We require `--out-dir`. /// * We don't support `-o file`. fn parse_arguments( &self, arguments: &[OsString], cwd: &Path, _env_vars: &[(OsString, OsString)], ) -> CompilerArguments + 'static>> { match parse_arguments(arguments, cwd) { CompilerArguments::Ok(args) => CompilerArguments::Ok(Box::new(RustHasher { executable: self.executable.clone(), // if rustup exists, this must already contain the true resolved compiler path host: self.host.clone(), version: self.version.clone(), sysroot: self.sysroot.clone(), compiler_shlibs_digests: self.compiler_shlibs_digests.clone(), #[cfg(feature = "dist-client")] rlib_dep_reader: self.rlib_dep_reader.clone(), parsed_args: args, })), CompilerArguments::NotCompilation => CompilerArguments::NotCompilation, CompilerArguments::CannotCache(why, extra_info) => { CompilerArguments::CannotCache(why, extra_info) } } } fn box_clone(&self) -> Box> { Box::new((*self).clone()) } } impl CompilerProxy for RustupProxy where T: CommandCreatorSync, { fn resolve_proxied_executable( &self, mut creator: T, cwd: PathBuf, env: &[(OsString, OsString)], ) -> Pin> + Send>> { let mut child = creator.new_command_sync(&self.proxy_executable); child .current_dir(&cwd) .env_clear() .envs(env.to_vec()) .args(&["which", "rustc"]); Box::pin(async move { let output = run_input_output(child, None) .await .context("Failed to execute rustup which rustc")?; let stdout = String::from_utf8(output.stdout) .context("Failed to parse output of rustup which rustc")?; let proxied_compiler = PathBuf::from(stdout.trim()); trace!( "proxy: rustup which rustc produced: {:?}", &proxied_compiler ); // TODO: Delegate FS access to a thread pool if possible let attr = fs::metadata(proxied_compiler.as_path()) .context("Failed to obtain metadata of the resolved, true rustc")?; if attr.is_file() { Ok(FileTime::from_last_modification_time(&attr)) } else { Err(anyhow!( "proxy: rustup resolved compiler is not of type file" )) } .map(move |filetime| (proxied_compiler, filetime)) }) } fn box_clone(&self) -> Box> { Box::new((*self).clone()) } } impl RustupProxy { pub fn new

(proxy_executable: P) -> Result where P: AsRef, { let proxy_executable = proxy_executable.as_ref().to_owned(); Ok(Self { proxy_executable }) } pub async fn find_proxy_executable( compiler_executable: &Path, proxy_name: &str, mut creator: T, env: &[(OsString, OsString)], ) -> Result>> where T: CommandCreatorSync, { enum ProxyPath { Candidate(PathBuf), ToBeDiscovered, None, } // verification if rustc is a proxy or not // // the process is multistaged // // if it is determined that rustc is a proxy, // then check if there is a rustup binary next to rustc // if not then check if which() knows about a rustup and use that. // // The produced candidate is then tested if it is a rustup. // // // The test for rustc being a proxy or not is done as follows // and follow firefox rustc detection closely: // // https://searchfox.org/mozilla-central/rev/c79c0d65a183d9d38676855f455a5c6a7f7dadd3/build/moz.configure/rust.configure#23-80 // // which boils down to // // `rustc +stable` returns retcode 0 if it is the rustup proxy // `rustc +stable` returns retcode 1 (!=0) if it is installed via i.e. rpm packages // verify rustc is proxy let mut child = creator.new_command_sync(compiler_executable); child.env_clear().envs(env.to_vec()).args(&["+stable"]); let state = run_input_output(child, None).await.map(move |output| { if output.status.success() { trace!("proxy: Found a compiler proxy managed by rustup"); ProxyPath::ToBeDiscovered } else { trace!("proxy: Found a regular compiler"); ProxyPath::None } }); let state = match state { Ok(ProxyPath::Candidate(_)) => unreachable!("Q.E.D."), Ok(ProxyPath::ToBeDiscovered) => { // simple check: is there a rustup in the same parent dir as rustc? // that would be the preferred one Ok(match compiler_executable.parent().map(Path::to_owned) { Some(parent) => { let proxy_candidate = parent.join(proxy_name); if proxy_candidate.exists() { trace!( "proxy: Found a compiler proxy at {}", proxy_candidate.display() ); ProxyPath::Candidate(proxy_candidate) } else { ProxyPath::ToBeDiscovered } } None => ProxyPath::ToBeDiscovered, }) } x => x, }; let state = match state { Ok(ProxyPath::ToBeDiscovered) => { // still no rustup found, use which crate to find one match which::which(proxy_name) { Ok(proxy_candidate) => { warn!( "proxy: rustup found, but not where it was expected (next to rustc {})", compiler_executable.display() ); Ok(ProxyPath::Candidate(proxy_candidate)) } Err(e) => { trace!("proxy: rustup is not present: {}", e); Ok(ProxyPath::ToBeDiscovered) } } } x => x, }; match state { Err(e) => Err(e), Ok(ProxyPath::ToBeDiscovered) => Ok(Err(anyhow!( "Failed to discover a rustup executable, but rustc behaves like a proxy" ))), Ok(ProxyPath::None) => Ok(Ok(None)), Ok(ProxyPath::Candidate(proxy_executable)) => { // verify the candidate is a rustup let mut child = creator.new_command_sync(&proxy_executable); child.env_clear().envs(env.to_vec()).args(&["--version"]); let rustup_candidate_check = run_input_output(child, None).await?; let stdout = String::from_utf8(rustup_candidate_check.stdout) .map_err(|_e| anyhow!("Response of `rustup --version` is not valid UTF-8"))?; Ok(if stdout.trim().starts_with("rustup ") { trace!("PROXY rustup --version produced: {}", &stdout); Self::new(&proxy_executable).map(Some) } else { Err(anyhow!("Unexpected output or `rustup --version`")) }) } } } } macro_rules! make_os_string { ($( $v:expr ),*) => {{ let mut s = OsString::new(); $( s.push($v); )* s }}; } #[derive(Clone, Debug, PartialEq)] struct ArgCrateTypes { rlib: bool, staticlib: bool, others: HashSet, } impl FromArg for ArgCrateTypes { fn process(arg: OsString) -> ArgParseResult { let arg = String::process(arg)?; let mut crate_types = ArgCrateTypes { rlib: false, staticlib: false, others: HashSet::new(), }; for ty in arg.split(',') { match ty { // It is assumed that "lib" always refers to "rlib", which // is true right now but may not be in the future "lib" | "rlib" => crate_types.rlib = true, "staticlib" => crate_types.staticlib = true, other => { crate_types.others.insert(other.to_owned()); } } } Ok(crate_types) } } impl IntoArg for ArgCrateTypes { fn into_arg_os_string(self) -> OsString { let ArgCrateTypes { rlib, staticlib, others, } = self; let mut types: Vec<_> = others .iter() .map(String::as_str) .chain(if rlib { Some("rlib") } else { None }) .chain(if staticlib { Some("staticlib") } else { None }) .collect(); types.sort_unstable(); let types_string = types.join(","); types_string.into() } fn into_arg_string(self, _transformer: PathTransformerFn<'_>) -> ArgToStringResult { let ArgCrateTypes { rlib, staticlib, others, } = self; let mut types: Vec<_> = others .iter() .map(String::as_str) .chain(if rlib { Some("rlib") } else { None }) .chain(if staticlib { Some("staticlib") } else { None }) .collect(); types.sort_unstable(); let types_string = types.join(","); Ok(types_string) } } #[derive(Clone, Debug, PartialEq)] struct ArgLinkLibrary { kind: String, name: String, } impl FromArg for ArgLinkLibrary { fn process(arg: OsString) -> ArgParseResult { let (kind, name) = match split_os_string_arg(arg, "=")? { (kind, Some(name)) => (kind, name), // If no kind is specified, the default is dylib. (name, None) => ("dylib".to_owned(), name), }; Ok(ArgLinkLibrary { kind, name }) } } impl IntoArg for ArgLinkLibrary { fn into_arg_os_string(self) -> OsString { let ArgLinkLibrary { kind, name } = self; make_os_string!(kind, "=", name) } fn into_arg_string(self, _transformer: PathTransformerFn<'_>) -> ArgToStringResult { let ArgLinkLibrary { kind, name } = self; Ok(format!("{}={}", kind, name)) } } #[derive(Clone, Debug, PartialEq)] struct ArgLinkPath { kind: String, path: PathBuf, } impl FromArg for ArgLinkPath { fn process(arg: OsString) -> ArgParseResult { let (kind, path) = match split_os_string_arg(arg, "=")? { (kind, Some(path)) => (kind, path), // If no kind is specified, the path is used to search for all kinds (path, None) => ("all".to_owned(), path), }; Ok(ArgLinkPath { kind, path: path.into(), }) } } impl IntoArg for ArgLinkPath { fn into_arg_os_string(self) -> OsString { let ArgLinkPath { kind, path } = self; make_os_string!(kind, "=", path) } fn into_arg_string(self, transformer: PathTransformerFn<'_>) -> ArgToStringResult { let ArgLinkPath { kind, path } = self; Ok(format!("{}={}", kind, path.into_arg_string(transformer)?)) } } #[derive(Clone, Debug, PartialEq)] struct ArgCodegen { opt: String, value: Option, } impl FromArg for ArgCodegen { fn process(arg: OsString) -> ArgParseResult { let (opt, value) = split_os_string_arg(arg, "=")?; Ok(ArgCodegen { opt, value }) } } impl IntoArg for ArgCodegen { fn into_arg_os_string(self) -> OsString { let ArgCodegen { opt, value } = self; if let Some(value) = value { make_os_string!(opt, "=", value) } else { make_os_string!(opt) } } fn into_arg_string(self, transformer: PathTransformerFn<'_>) -> ArgToStringResult { let ArgCodegen { opt, value } = self; Ok(if let Some(value) = value { format!("{}={}", opt, value.into_arg_string(transformer)?) } else { opt }) } } #[derive(Clone, Debug, PartialEq)] struct ArgUnstable { opt: String, value: Option, } impl FromArg for ArgUnstable { fn process(arg: OsString) -> ArgParseResult { let (opt, value) = split_os_string_arg(arg, "=")?; Ok(ArgUnstable { opt, value }) } } impl IntoArg for ArgUnstable { fn into_arg_os_string(self) -> OsString { let ArgUnstable { opt, value } = self; if let Some(value) = value { make_os_string!(opt, "=", value) } else { make_os_string!(opt) } } fn into_arg_string(self, transformer: PathTransformerFn<'_>) -> ArgToStringResult { let ArgUnstable { opt, value } = self; Ok(if let Some(value) = value { format!("{}={}", opt, value.into_arg_string(transformer)?) } else { opt }) } } #[derive(Clone, Debug, PartialEq)] struct ArgExtern { name: String, path: PathBuf, } impl FromArg for ArgExtern { fn process(arg: OsString) -> ArgParseResult { if let (name, Some(path)) = split_os_string_arg(arg, "=")? { Ok(ArgExtern { name, path: path.into(), }) } else { Err(ArgParseError::Other("no path for extern")) } } } impl IntoArg for ArgExtern { fn into_arg_os_string(self) -> OsString { let ArgExtern { name, path } = self; make_os_string!(name, "=", path) } fn into_arg_string(self, transformer: PathTransformerFn<'_>) -> ArgToStringResult { let ArgExtern { name, path } = self; Ok(format!("{}={}", name, path.into_arg_string(transformer)?)) } } #[derive(Clone, Debug, PartialEq)] enum ArgTarget { Name(String), Path(PathBuf), Unsure(OsString), } impl FromArg for ArgTarget { fn process(arg: OsString) -> ArgParseResult { // Is it obviously a json file path? if Path::new(&arg) .extension() .map(|ext| ext == "json") .unwrap_or(false) { return Ok(ArgTarget::Path(arg.into())); } // Time for clever detection - if we append .json (even if it's clearly // a directory, i.e. resulting in /my/dir/.json), does the path exist? let mut path = arg.clone(); path.push(".json"); if Path::new(&path).is_file() { // Unfortunately, we're now not sure what will happen without having // a list of all the built-in targets handy, as they don't get .json // auto-added for target json discovery return Ok(ArgTarget::Unsure(arg)); } // The file doesn't exist so it can't be a path, safe to assume it's a name Ok(ArgTarget::Name( arg.into_string().map_err(ArgParseError::InvalidUnicode)?, )) } } impl IntoArg for ArgTarget { fn into_arg_os_string(self) -> OsString { match self { ArgTarget::Name(s) => s.into(), ArgTarget::Path(p) => p.into(), ArgTarget::Unsure(s) => s, } } fn into_arg_string(self, transformer: PathTransformerFn<'_>) -> ArgToStringResult { Ok(match self { ArgTarget::Name(s) => s, ArgTarget::Path(p) => p.into_arg_string(transformer)?, ArgTarget::Unsure(s) => s.into_arg_string(transformer)?, }) } } ArgData! { TooHardFlag, TooHardPath(PathBuf), NotCompilationFlag, NotCompilation(OsString), LinkLibrary(ArgLinkLibrary), LinkPath(ArgLinkPath), Emit(String), Extern(ArgExtern), Color(String), Json(String), CrateName(String), CrateType(ArgCrateTypes), OutDir(PathBuf), CodeGen(ArgCodegen), PassThrough(OsString), Target(ArgTarget), Unstable(ArgUnstable), } use self::ArgData::*; use super::CacheControl; // These are taken from https://github.com/rust-lang/rust/blob/b671c32ddc8c36d50866428d83b7716233356721/src/librustc/session/config.rs#L1186 counted_array!(static ARGS: [ArgInfo; _] = [ flag!("-", TooHardFlag), take_arg!("--allow", OsString, CanBeSeparated('='), PassThrough), take_arg!("--cap-lints", OsString, CanBeSeparated('='), PassThrough), take_arg!("--cfg", OsString, CanBeSeparated('='), PassThrough), take_arg!("--check-cfg", OsString, CanBeSeparated('='), PassThrough), take_arg!("--codegen", ArgCodegen, CanBeSeparated('='), CodeGen), take_arg!("--color", String, CanBeSeparated('='), Color), take_arg!("--crate-name", String, CanBeSeparated('='), CrateName), take_arg!("--crate-type", ArgCrateTypes, CanBeSeparated('='), CrateType), take_arg!("--deny", OsString, CanBeSeparated('='), PassThrough), take_arg!("--emit", String, CanBeSeparated('='), Emit), take_arg!("--error-format", OsString, CanBeSeparated('='), PassThrough), take_arg!("--explain", OsString, CanBeSeparated('='), NotCompilation), take_arg!("--extern", ArgExtern, CanBeSeparated('='), Extern), take_arg!("--forbid", OsString, CanBeSeparated('='), PassThrough), flag!("--help", NotCompilationFlag), take_arg!("--json", String, CanBeSeparated('='), Json), take_arg!("--out-dir", PathBuf, CanBeSeparated('='), OutDir), take_arg!("--pretty", OsString, CanBeSeparated('='), NotCompilation), take_arg!("--print", OsString, CanBeSeparated('='), NotCompilation), take_arg!("--remap-path-prefix", OsString, CanBeSeparated('='), PassThrough), take_arg!("--sysroot", PathBuf, CanBeSeparated('='), TooHardPath), take_arg!("--target", ArgTarget, CanBeSeparated('='), Target), take_arg!("--unpretty", OsString, CanBeSeparated('='), NotCompilation), flag!("--version", NotCompilationFlag), take_arg!("--warn", OsString, CanBeSeparated('='), PassThrough), take_arg!("-A", OsString, CanBeSeparated, PassThrough), take_arg!("-C", ArgCodegen, CanBeSeparated, CodeGen), take_arg!("-D", OsString, CanBeSeparated, PassThrough), take_arg!("-F", OsString, CanBeSeparated, PassThrough), take_arg!("-L", ArgLinkPath, CanBeSeparated, LinkPath), flag!("-V", NotCompilationFlag), take_arg!("-W", OsString, CanBeSeparated, PassThrough), take_arg!("-Z", ArgUnstable, CanBeSeparated, Unstable), take_arg!("-l", ArgLinkLibrary, CanBeSeparated, LinkLibrary), take_arg!("-o", PathBuf, CanBeSeparated, TooHardPath), ]); fn parse_arguments(arguments: &[OsString], cwd: &Path) -> CompilerArguments { let mut args = vec![]; let mut emit: Option> = None; let mut input = None; let mut output_dir = None; let mut crate_name = None; let mut crate_types = CrateTypes { rlib: false, staticlib: false, }; let mut extra_filename = None; let mut externs = vec![]; let mut crate_link_paths = vec![]; let mut static_lib_names = vec![]; let mut static_link_paths: Vec = vec![]; let mut color_mode = ColorMode::Auto; let mut has_json = false; let mut profile = None; let mut gcno = false; let mut target_json = None; for arg in ArgsIter::new(arguments.iter().cloned(), &ARGS[..]) { let arg = try_or_cannot_cache!(arg, "argument parse"); match arg.get_data() { Some(TooHardFlag) | Some(TooHardPath(_)) => { cannot_cache!(arg.flag_str().expect("Can't be Argument::Raw/UnknownFlag",)) } Some(NotCompilationFlag) | Some(NotCompilation(_)) => { return CompilerArguments::NotCompilation } Some(LinkLibrary(ArgLinkLibrary { kind, name })) => { if kind == "static" { static_lib_names.push(name.to_owned()) } } Some(LinkPath(ArgLinkPath { kind, path })) => { // "crate" is not typically necessary as cargo will normally // emit explicit --extern arguments if kind == "crate" || kind == "dependency" || kind == "all" { crate_link_paths.push(cwd.join(path)) } if kind == "native" || kind == "all" { static_link_paths.push(cwd.join(path)) } } Some(Emit(value)) => { if emit.is_some() { // We don't support passing --emit more than once. cannot_cache!("more than one --emit"); } emit = Some(value.split(',').map(str::to_owned).collect()) } Some(CrateType(ArgCrateTypes { rlib, staticlib, others, })) => { // We can't cache non-rlib/staticlib crates, because rustc invokes the // system linker to link them, and we don't know about all the linker inputs. if !others.is_empty() { let others: Vec<&str> = others.iter().map(String::as_str).collect(); let others_string = others.join(","); cannot_cache!("crate-type", others_string) } crate_types.rlib |= rlib; crate_types.staticlib |= staticlib; } Some(CrateName(value)) => crate_name = Some(value.clone()), Some(OutDir(value)) => output_dir = Some(value.clone()), Some(Extern(ArgExtern { path, .. })) => externs.push(path.clone()), Some(CodeGen(ArgCodegen { opt, value })) => { match (opt.as_ref(), value) { ("extra-filename", Some(value)) => extra_filename = Some(value.to_owned()), ("extra-filename", None) => cannot_cache!("extra-filename"), ("profile-use", Some(v)) => profile = Some(v.to_string()), // Incremental compilation makes a mess of sccache's entire world // view. It produces additional compiler outputs that we don't cache, // and just letting rustc do its work in incremental mode is likely // to be faster than trying to fetch a result from cache anyway, so // don't bother caching compiles where it's enabled currently. // Longer-term we would like to figure out better integration between // sccache and rustc in the incremental scenario: // https://github.com/mozilla/sccache/issues/236 ("incremental", _) => cannot_cache!("incremental"), (_, _) => (), } } Some(Unstable(ArgUnstable { opt, value })) => match value.as_deref() { Some("y") | Some("yes") | Some("on") | None if opt == "profile" => { gcno = true; } _ => (), }, Some(Color(value)) => { // We'll just assume the last specified value wins. color_mode = match value.as_ref() { "always" => ColorMode::On, "never" => ColorMode::Off, _ => ColorMode::Auto, }; } Some(Json(_)) => { has_json = true; } Some(PassThrough(_)) => (), Some(Target(target)) => match target { ArgTarget::Path(json_path) => target_json = Some(json_path.to_owned()), ArgTarget::Unsure(_) => cannot_cache!("target unsure"), ArgTarget::Name(_) => (), }, None => { match arg { Argument::Raw(ref val) => { if input.is_some() { // Can't cache compilations with multiple inputs. cannot_cache!("multiple input files"); } input = Some(val.clone()); } Argument::UnknownFlag(_) => {} _ => unreachable!(), } } } // We'll drop --color arguments, we're going to pass --color=always and the client will // strip colors if necessary. match arg.get_data() { Some(Color(_)) => {} _ => args.push(arg.normalize(NormalizedDisposition::Separated)), } } // Unwrap required values. macro_rules! req { ($x:ident) => { let $x = if let Some($x) = $x { $x } else { debug!("Can't cache compilation, missing `{}`", stringify!($x)); cannot_cache!(concat!("missing ", stringify!($x))); }; }; } // We don't actually save the input value, but there needs to be one. req!(input); drop(input); req!(output_dir); req!(emit); req!(crate_name); // We won't cache invocations that are not producing // binary output. if !emit.is_empty() && !emit.contains("link") && !emit.contains("metadata") { return CompilerArguments::NotCompilation; } // If it's not an rlib and not a staticlib then crate-type wasn't passed, // so it will usually be inferred as a binary, though the `#![crate_type` // annotation may dictate otherwise - either way, we don't know what to do. if let CrateTypes { rlib: false, staticlib: false, } = crate_types { cannot_cache!("crate-type", "No crate-type passed".to_owned()) } // We won't cache invocations that are outputting anything but // linker output and dep-info. if emit.iter().any(|e| !ALLOWED_EMIT.contains(e.as_str())) { cannot_cache!("unsupported --emit"); } // Figure out the dep-info filename, if emitting dep-info. let dep_info = if emit.contains("dep-info") { let mut dep_info = crate_name.clone(); if let Some(extra_filename) = extra_filename.clone() { dep_info.push_str(&extra_filename[..]); } dep_info.push_str(".d"); Some(dep_info) } else { None }; // Ignore profile is `link` is not in emit which means we are running `cargo check`. let profile = if emit.contains("link") { profile } else { None }; // Figure out the gcno filename, if producing gcno files with `-Zprofile`. let gcno = if gcno && emit.contains("link") { let mut gcno = crate_name.clone(); if let Some(extra_filename) = extra_filename { gcno.push_str(&extra_filename[..]); } gcno.push_str(".gcno"); Some(gcno) } else { None }; // Locate all static libs specified on the commandline. let staticlibs = static_lib_names .into_iter() .filter_map(|name| { for path in static_link_paths.iter() { for f in &[ format_args!("lib{}.a", name), format_args!("{}.lib", name), format_args!("{}.a", name), ] { let lib_path = path.join(fmt::format(*f)); if lib_path.exists() { return Some(lib_path); } } } // rustc will just error if there's a missing static library, so don't worry about // it too much. None }) .collect(); // We'll figure out the source files and outputs later in // `generate_hash_key` where we can run rustc. // Cargo doesn't deterministically order --externs, and we need the hash inputs in a // deterministic order. externs.sort(); CompilerArguments::Ok(ParsedArguments { arguments: args, output_dir, crate_types, externs, crate_link_paths, staticlibs, crate_name, dep_info: dep_info.map(|s| s.into()), profile: profile.map(|s| s.into()), gcno: gcno.map(|s| s.into()), emit, color_mode, has_json, target_json, }) } #[allow(clippy::suspicious_else_formatting)] // False positive #[async_trait] impl CompilerHasher for RustHasher where T: CommandCreatorSync, { async fn generate_hash_key( self: Box, creator: &T, cwd: PathBuf, env_vars: Vec<(OsString, OsString)>, _may_dist: bool, pool: &tokio::runtime::Handle, _rewrite_includes_only: bool, _storage: Arc, _cache_control: CacheControl, ) -> Result> { let RustHasher { executable, host, version, sysroot, compiler_shlibs_digests, #[cfg(feature = "dist-client")] rlib_dep_reader, parsed_args: ParsedArguments { arguments, output_dir, externs, crate_link_paths, staticlibs, crate_name, crate_types, dep_info, emit, has_json, profile, gcno, target_json, .. }, } = *self; trace!("[{}]: generate_hash_key", crate_name); // TODO: this doesn't produce correct arguments if they should be concatenated - should use iter_os_strings let os_string_arguments: Vec<(OsString, Option)> = arguments .iter() .map(|arg| { ( arg.to_os_string(), arg.get_data().cloned().map(IntoArg::into_arg_os_string), ) }) .collect(); // `filtered_arguments` omits --emit and --out-dir arguments. // It's used for invoking rustc with `--emit=dep-info` to get the list of // source files for this crate. let filtered_arguments = os_string_arguments .iter() .filter_map(|(arg, val)| { if arg == "--emit" || arg == "--out-dir" { None } else { Some((arg, val)) } }) .flat_map(|(arg, val)| Some(arg).into_iter().chain(val)) .cloned() .collect::>(); // Find all the source files and hash them let source_hashes_pool = pool.clone(); let source_files_and_hashes_and_env_deps = async { let (source_files, env_deps) = get_source_files_and_env_deps( creator, &crate_name, &executable, &filtered_arguments, &cwd, &env_vars, pool, ) .await?; let source_hashes = hash_all(&source_files, &source_hashes_pool).await?; Ok((source_files, source_hashes, env_deps)) }; // Hash the contents of the externs listed on the commandline. trace!("[{}]: hashing {} externs", crate_name, externs.len()); let abs_externs = externs.iter().map(|e| cwd.join(e)).collect::>(); let extern_hashes = hash_all(&abs_externs, pool); // Hash the contents of the staticlibs listed on the commandline. trace!("[{}]: hashing {} staticlibs", crate_name, staticlibs.len()); let abs_staticlibs = staticlibs.iter().map(|s| cwd.join(s)).collect::>(); let staticlib_hashes = hash_all_archives(&abs_staticlibs, pool); // Hash the content of the specified target json file, if any. let mut target_json_files = Vec::new(); if let Some(path) = &target_json { trace!( "[{}]: hashing target json file {}", crate_name, path.display() ); let abs_target_json = cwd.join(path); target_json_files.push(abs_target_json); } let target_json_hash = hash_all(&target_json_files, pool); // Perform all hashing operations on the files. let ( (source_files, source_hashes, mut env_deps), extern_hashes, staticlib_hashes, target_json_hash, ) = futures::try_join!( source_files_and_hashes_and_env_deps, extern_hashes, staticlib_hashes, target_json_hash )?; // If you change any of the inputs to the hash, you should change `CACHE_VERSION`. let mut m = Digest::new(); // Hash inputs: // 1. A version m.update(CACHE_VERSION); // 2. compiler_shlibs_digests for d in compiler_shlibs_digests { m.update(d.as_bytes()); } let weak_toolchain_key = m.clone().finish(); // 3. The full commandline (self.arguments) // TODO: there will be full paths here, it would be nice to // normalize them so we can get cross-machine cache hits. // A few argument types are not passed in a deterministic order // by cargo: --extern, -L, --cfg. We'll filter those out, sort them, // and append them to the rest of the arguments. let args = { let (mut sortables, rest): (Vec<_>, Vec<_>) = os_string_arguments .iter() // We exclude a few arguments from the hash: // -L, --extern, --out-dir // These contain paths which aren't relevant to the output, and the compiler inputs // in those paths (rlibs and static libs used in the compilation) are used as hash // inputs below. .filter(|&(arg, _)| !(arg == "--extern" || arg == "-L" || arg == "--out-dir")) // We also exclude `--target` if it specifies a path to a .json file. The file content // is used as hash input below. // If `--target` specifies a string, it continues to be hashed as part of the arguments. .filter(|&(arg, _)| target_json.is_none() || arg != "--target") // A few argument types were not passed in a deterministic order // by older versions of cargo: --extern, -L, --cfg. We'll filter the rest of those // out, sort them, and append them to the rest of the arguments. .partition(|&(arg, _)| arg == "--cfg"); sortables.sort(); rest.into_iter() .chain(sortables) .flat_map(|(arg, val)| iter::once(arg).chain(val.as_ref())) .fold(OsString::new(), |mut a, b| { a.push(b); a }) }; args.hash(&mut HashToDigest { digest: &mut m }); // 4. The digest of all source files (this includes src file from cmdline). // 5. The digest of all files listed on the commandline (self.externs). // 6. The digest of all static libraries listed on the commandline (self.staticlibs). // 7. The digest of the content of the target json file specified via `--target` (if any). for h in source_hashes .into_iter() .chain(extern_hashes) .chain(staticlib_hashes) .chain(target_json_hash) { m.update(h.as_bytes()); } // 8. Environment variables: Hash all environment variables listed in the rustc dep-info // output. Additionally also has all environment variables starting with `CARGO_`, // since those are not listed in dep-info but affect cacheability. env_deps.sort(); for (var, val) in env_deps.iter() { var.hash(&mut HashToDigest { digest: &mut m }); m.update(b"="); val.hash(&mut HashToDigest { digest: &mut m }); } let mut env_vars: Vec<_> = env_vars .iter() // Filter out RUSTC_COLOR since we control color usage with command line flags. // rustc reports an error when both are present. .filter(|(ref k, _)| k != "RUSTC_COLOR") .cloned() .collect(); env_vars.sort(); for (var, val) in env_vars.iter() { if !var.starts_with("CARGO_") { continue; } // CARGO_MAKEFLAGS will have jobserver info which is extremely non-cacheable. // CARGO_REGISTRIES_*_TOKEN contains non-cacheable secrets. // Registry override config doesn't need to be hashed, because deps' package IDs // already uniquely identify the relevant registries. if var == "CARGO_MAKEFLAGS" || var.starts_with("CARGO_REGISTRIES_") { continue; } var.hash(&mut HashToDigest { digest: &mut m }); m.update(b"="); val.hash(&mut HashToDigest { digest: &mut m }); } // 9. The cwd of the compile. This will wind up in the rlib. cwd.hash(&mut HashToDigest { digest: &mut m }); // 10. The version of the compiler. version.hash(&mut HashToDigest { digest: &mut m }); // Turn arguments into a simple Vec to calculate outputs. let flat_os_string_arguments: Vec = os_string_arguments .into_iter() .flat_map(|(arg, val)| iter::once(arg).chain(val)) .collect(); let mut outputs = get_compiler_outputs( creator, &executable, flat_os_string_arguments, &cwd, &env_vars, ) .await?; // metadata / dep-info don't ever generate binaries, but // rustc still makes them appear in the --print // file-names output (see // https://github.com/rust-lang/rust/pull/68799). // // So if we see a binary in the rustc output and figure // out that we're not _actually_ generating it, then we // can avoid generating everything that isn't an rlib / // rmeta. // // This can go away once the above rustc PR makes it in. let emit_generates_only_metadata = !emit.is_empty() && emit.iter().all(|e| e == "metadata" || e == "dep-info"); if emit_generates_only_metadata { outputs.retain(|o| o.ends_with(".rlib") || o.ends_with(".rmeta")); } if emit.contains("metadata") { // rustc currently does not report rmeta outputs with --print file-names // --emit metadata the rlib is printed, and with --emit metadata,link // only the rlib is printed. let rlibs: HashSet<_> = outputs .iter() .filter(|&p| p.ends_with(".rlib")) .cloned() .collect(); for lib in rlibs { let rmeta = lib.replacen(".rlib", ".rmeta", 1); // Do this defensively for future versions of rustc that may // be fixed. if !outputs.contains(&rmeta) { outputs.push(rmeta); } if !emit.contains("link") { outputs.retain(|p| *p != lib); } } } // Convert output files into a map of basename -> full // path, and remove some unneeded / non-existing ones, // see https://github.com/rust-lang/rust/pull/68799. let mut outputs = outputs .into_iter() .map(|o| { let p = output_dir.join(&o); ( o, ArtifactDescriptor { path: p, optional: false, }, ) }) .collect::>(); let dep_info = if let Some(dep_info) = dep_info { let p = output_dir.join(&dep_info); outputs.insert( dep_info.to_string_lossy().into_owned(), ArtifactDescriptor { path: p.clone(), optional: false, }, ); Some(p) } else { None }; if let Some(profile) = profile { let p = output_dir.join(&profile); outputs.insert( profile.to_string_lossy().into_owned(), ArtifactDescriptor { path: p, optional: true, }, ); } if let Some(gcno) = gcno { let p = output_dir.join(&gcno); outputs.insert( gcno.to_string_lossy().into_owned(), ArtifactDescriptor { path: p, optional: true, }, ); } let mut arguments = arguments; // Request color output unless json was requested. The client will strip colors if needed. if !has_json { arguments.push(Argument::WithValue( "--color", ArgData::Color("always".into()), ArgDisposition::Separated, )); } let inputs = source_files .into_iter() .chain(abs_externs) .chain(abs_staticlibs) .collect(); Ok(HashResult { key: m.finish(), compilation: Box::new(RustCompilation { executable, host, sysroot, arguments, inputs, outputs, crate_link_paths, crate_name, crate_types, dep_info, cwd, env_vars, #[cfg(feature = "dist-client")] rlib_dep_reader, }), weak_toolchain_key, }) } fn color_mode(&self) -> ColorMode { self.parsed_args.color_mode } fn output_pretty(&self) -> Cow<'_, str> { Cow::Borrowed(&self.parsed_args.crate_name) } fn box_clone(&self) -> Box> { Box::new((*self).clone()) } fn language(&self) -> Language { Language::Rust } } impl Compilation for RustCompilation { fn generate_compile_commands( &self, path_transformer: &mut dist::PathTransformer, _rewrite_includes_only: bool, ) -> Result<( Box>, Option, Cacheable, )> { let RustCompilation { ref executable, ref arguments, ref crate_name, ref cwd, ref env_vars, ref host, ref sysroot, .. } = *self; // Ignore unused variables #[cfg(not(feature = "dist-client"))] { let _ = path_transformer; let _ = host; let _ = sysroot; } trace!("[{}]: compile", crate_name); let command = SingleCompileCommand { executable: executable.to_owned(), arguments: arguments .iter() .flat_map(|arg| arg.iter_os_strings()) .collect(), env_vars: env_vars.to_owned(), cwd: cwd.to_owned(), }; #[cfg(not(feature = "dist-client"))] let dist_command = None; #[cfg(feature = "dist-client")] let dist_command = (|| { macro_rules! try_string_arg { ($e:expr) => { match $e { Ok(s) => s, Err(e) => { debug!("Conversion failed for distributed compile argument: {}", e); return None; } } }; } let mut dist_arguments = vec![]; let mut saw_target = false; // flat_map would be nice but the lifetimes don't work out for argument in arguments.iter() { let path_transformer_fn = &mut |p: &Path| path_transformer.as_dist(p); if let Argument::Raw(input_path) = argument { // Need to explicitly handle the input argument as it's not parsed as a path let input_path = Path::new(input_path).to_owned(); dist_arguments.push(try_string_arg!( input_path.into_arg_string(path_transformer_fn) )) } else { if let Some(Target(_)) = argument.get_data() { saw_target = true } for string_arg in argument.iter_strings(path_transformer_fn) { dist_arguments.push(try_string_arg!(string_arg)) } } } // We can't rely on the packaged toolchain necessarily having the same default target triple // as us (typically host triple), so make sure to always explicitly specify a target. if !saw_target { dist_arguments.push(format!("--target={}", host)) } // Convert the paths of some important environment variables let mut env_vars = dist::osstring_tuples_to_strings(env_vars)?; let mut changed_out_dir: Option = None; for (k, v) in env_vars.iter_mut() { match k.as_str() { // We round-tripped from path to string and back to path, but it should be lossless "OUT_DIR" => { let dist_out_dir = path_transformer.as_dist(Path::new(v))?; if dist_out_dir != *v { changed_out_dir = Some(v.to_owned().into()); } *v = dist_out_dir } "TMPDIR" => { // The server will need to find its own tempdir. *v = "".to_string(); } "CARGO" | "CARGO_MANIFEST_DIR" => { *v = path_transformer.as_dist(Path::new(v))? } _ => (), } } // OUT_DIR was changed during transformation, check if this compilation is relying on anything // inside it - if so, disallow distributed compilation (there are sometimes hardcoded paths present) if let Some(out_dir) = changed_out_dir { if self.inputs.iter().any(|input| input.starts_with(&out_dir)) { return None; } } // Add any necessary path transforms - although we haven't packaged up inputs yet, we've // probably seen all drives (e.g. on Windows), so let's just transform those rather than // trying to do every single path. let mut remapped_disks = HashSet::new(); for (local_path, dist_path) in get_path_mappings(path_transformer) { let local_path = local_path.to_str()?; // "The from=to parameter is scanned from right to left, so from may contain '=', but to may not." if local_path.contains('=') { return None; } if remapped_disks.contains(&dist_path) { continue; } dist_arguments.push(format!("--remap-path-prefix={}={}", &dist_path, local_path)); remapped_disks.insert(dist_path); } let sysroot_executable = sysroot .join(BINS_DIR) .join("rustc") .with_extension(EXE_EXTENSION); Some(dist::CompileCommand { executable: path_transformer.as_dist(&sysroot_executable)?, arguments: dist_arguments, env_vars, cwd: path_transformer.as_dist_abs(cwd)?, }) })(); Ok((CCompileCommand::new(command), dist_command, Cacheable::Yes)) } #[cfg(feature = "dist-client")] fn into_dist_packagers( self: Box, path_transformer: dist::PathTransformer, ) -> Result { let RustCompilation { inputs, crate_link_paths, sysroot, crate_types, dep_info, rlib_dep_reader, env_vars, .. } = *{ self }; trace!( "Dist inputs: inputs={:?} crate_link_paths={:?}", inputs, crate_link_paths ); let inputs_packager = Box::new(RustInputsPackager { env_vars, crate_link_paths, crate_types, inputs, path_transformer, rlib_dep_reader, }); let toolchain_packager = Box::new(RustToolchainPackager { sysroot }); let outputs_rewriter = Box::new(RustOutputsRewriter { dep_info }); Ok((inputs_packager, toolchain_packager, outputs_rewriter)) } fn outputs<'a>(&'a self) -> Box + 'a> { Box::new(self.outputs.iter().map(|(k, v)| FileObjectSource { key: k.to_string(), path: v.path.clone(), optional: v.optional, })) } } // TODO: we do end up with slashes facing the wrong way, but Windows is agnostic so it's // mostly ok. We currently don't get mappings for every single path because it means we need to // figure out all prefixes and send them over the wire. #[cfg(feature = "dist-client")] fn get_path_mappings( path_transformer: &dist::PathTransformer, ) -> impl Iterator { path_transformer.disk_mappings() } #[cfg(feature = "dist-client")] struct RustInputsPackager { env_vars: Vec<(OsString, OsString)>, crate_link_paths: Vec, crate_types: CrateTypes, inputs: Vec, path_transformer: dist::PathTransformer, rlib_dep_reader: Option>, } #[cfg(feature = "dist-client")] fn can_trim_this(input_path: &Path) -> bool { trace!("can_trim_this: input_path={:?}", input_path); let mut ar_path = input_path.to_path_buf(); ar_path.set_extension("a"); // Check if the input path exists with both a .rlib and a .a, in which case // we want to refuse to trim, otherwise triggering // https://bugzilla.mozilla.org/show_bug.cgi?id=1760743 input_path .extension() .map(|e| e == RLIB_EXTENSION) .unwrap_or(false) && !ar_path.exists() } #[test] #[cfg(feature = "dist-client")] fn test_can_trim_this() { use crate::test::utils::create_file; let tempdir = tempfile::Builder::new() .prefix("sccache_test") .tempdir() .unwrap(); let tempdir = tempdir.path(); // With only one rlib file we should be fine let rlib_file = create_file(tempdir, "libtest.rlib", |_f| Ok(())).unwrap(); assert!(can_trim_this(&rlib_file)); // Adding an ar from a staticlib (i.e., crate-type = ["staticlib", "rlib"] // we need to refuse to allow trimming let _ar_file = create_file(tempdir, "libtest.a", |_f| Ok(())).unwrap(); assert!(!can_trim_this(&rlib_file)); } #[cfg(feature = "dist-client")] fn maybe_add_cargo_toml(input_path: &Path, verify: bool) -> Option { let lib_rs = PathBuf::new().join("src").join("lib.rs"); if input_path.ends_with(lib_rs) { let cargo_toml_path = input_path .parent() .expect("No parent") .parent() .expect("No parent") .join("Cargo.toml"); // We want to: // - either make sure the file exists (verify=true) // - just return the path (verify=false) if cargo_toml_path.is_file() || !verify { Some(cargo_toml_path) } else { None } } else { None } } #[test] #[cfg(feature = "dist-client")] fn test_maybe_add_cargo_toml() { let (root, result_cargo_toml_path) = if cfg!(windows) { ( r"C:\mozilla-source\mozilla-unified\third_party\rust", r"C:\mozilla-source\mozilla-unified\third_party\rust\wgpu-core\Cargo.toml", ) } else { ( "/home/user/mozilla-source/mozilla-unified/third_party/rust", "/home/user/mozilla-source/mozilla-unified/third_party/rust/wgpu-core/Cargo.toml", ) }; let wgpu_core = PathBuf::from(&root) .join("wgpu-core") .join("src") .join("core.rs"); let wgpu_lib = PathBuf::from(&root) .join("wgpu-core") .join("src") .join("lib.rs"); assert!(maybe_add_cargo_toml(&wgpu_core, false).is_none()); assert!(maybe_add_cargo_toml(&wgpu_core, true).is_none()); assert!( maybe_add_cargo_toml(&wgpu_lib, false) == Some(PathBuf::from(&root).join("wgpu-core").join("Cargo.toml")) ); assert!( maybe_add_cargo_toml(&wgpu_lib, false).unwrap().to_str() == Some(result_cargo_toml_path) ); assert!(maybe_add_cargo_toml(&wgpu_lib, true).is_none()); } #[cfg(feature = "dist-client")] impl pkg::InputsPackager for RustInputsPackager { #[allow(clippy::cognitive_complexity)] // TODO simplify this method. fn write_inputs(self: Box, wtr: &mut dyn io::Write) -> Result { debug!("Packaging compile inputs for compile"); let RustInputsPackager { crate_link_paths, crate_types, inputs, mut path_transformer, rlib_dep_reader, env_vars, } = *{ self }; // If this is a cargo build, we can assume all immediate `extern crate` dependencies // have been passed on the command line, allowing us to scan them all and find the // complete list of crates we might need. // If it's not a cargo build, we can't to extract the `extern crate` statements and // so have no way to build a list of necessary crates - send all rlibs. let is_cargo = env_vars.iter().any(|(k, _)| k == "CARGO_PKG_NAME"); let mut rlib_dep_reader_and_names = if is_cargo { rlib_dep_reader.map(|r| (r, HashSet::new())) } else { None }; let mut tar_inputs = vec![]; for input_path in inputs.into_iter() { let input_path = pkg::simplify_path(&input_path)?; if let Some(ext) = input_path.extension() { if !super::CAN_DIST_DYLIBS && ext == DLL_EXTENSION { bail!( "Cannot distribute dylib input {} on this platform", input_path.display() ) } else if ext == RLIB_EXTENSION || ext == RMETA_EXTENSION { if let Some((ref rlib_dep_reader, ref mut dep_crate_names)) = rlib_dep_reader_and_names { dep_crate_names.extend( rlib_dep_reader .discover_rlib_deps(&env_vars, &input_path) .with_context(|| { format!("Failed to read deps of {}", input_path.display()) })?, ) } } } if let Some(cargo_toml_path) = maybe_add_cargo_toml(&input_path, true) { let dist_cargo_toml_path = path_transformer .as_dist(&cargo_toml_path) .with_context(|| { format!( "unable to transform input path {}", cargo_toml_path.display() ) })?; tar_inputs.push((cargo_toml_path, dist_cargo_toml_path)); } let dist_input_path = path_transformer.as_dist(&input_path).with_context(|| { format!("unable to transform input path {}", input_path.display()) })?; tar_inputs.push((input_path, dist_input_path)) } if log_enabled!(Trace) { if let Some((_, ref dep_crate_names)) = rlib_dep_reader_and_names { trace!("Identified dependency crate names: {:?}", dep_crate_names) } } // Given the link paths, find the things we need to send over the wire to the remote machine. If // we've been able to use a dependency searcher then we can filter down just candidates for that // crate, otherwise we need to send everything. let mut tar_crate_libs = vec![]; for crate_link_path in crate_link_paths.into_iter() { let crate_link_path = pkg::simplify_path(&crate_link_path)?; let dir_entries = match fs::read_dir(crate_link_path) { Ok(iter) => iter, Err(e) if e.kind() == io::ErrorKind::NotFound => continue, Err(e) => return Err(e).context("Failed to read dir entries in crate link path"), }; for entry in dir_entries { let entry = match entry { Ok(entry) => entry, Err(e) => return Err(e).context("Error during iteration over crate link path"), }; let path = entry.path(); { // Take a look at the path and see if it's something we care about let libname: &str = match path.file_name().and_then(|s| s.to_str()) { Some(name) => { let mut rev_name_split = name.rsplitn(2, '-'); let _extra_filename_and_ext = rev_name_split.next(); let libname = if let Some(libname) = rev_name_split.next() { libname } else { continue; }; assert!(rev_name_split.next().is_none()); libname } None => continue, }; let (crate_name, ext): (&str, _) = match path.extension() { Some(ext) if libname.starts_with(DLL_PREFIX) && ext == DLL_EXTENSION => { (&libname[DLL_PREFIX.len()..], ext) } Some(ext) if libname.starts_with(RLIB_PREFIX) && ext == RLIB_EXTENSION => { (&libname[RLIB_PREFIX.len()..], ext) } Some(ext) if libname.starts_with(RLIB_PREFIX) && ext == RMETA_EXTENSION => { (&libname[RLIB_PREFIX.len()..], ext) } _ => continue, }; if let Some((_, ref dep_crate_names)) = rlib_dep_reader_and_names { // We have a list of crate names we care about, see if this lib is a candidate if !dep_crate_names.contains(crate_name) { continue; } } if !path.is_file() { continue; } else if !super::CAN_DIST_DYLIBS && ext == DLL_EXTENSION { bail!( "Cannot distribute dylib input {} on this platform", path.display() ) } } // This is a lib that may be of interest during compilation let dist_path = path_transformer .as_dist(&path) .with_context(|| format!("unable to transform lib path {}", path.display()))?; tar_crate_libs.push((path, dist_path)) } } let mut all_tar_inputs: Vec<_> = tar_inputs.into_iter().chain(tar_crate_libs).collect(); all_tar_inputs.sort(); // There are almost certainly duplicates from explicit externs also within the lib search paths all_tar_inputs.dedup(); // If we're just creating an rlib then the only thing inspected inside dependency rlibs is the // metadata, in which case we can create a trimmed rlib (which is actually a .a) with the metadata let can_trim_rlibs = matches!( crate_types, CrateTypes { rlib: true, staticlib: false, } ); let mut builder = tar::Builder::new(wtr); for (input_path, dist_input_path) in all_tar_inputs.iter() { let mut file_header = pkg::make_tar_header(input_path, dist_input_path)?; let file = fs::File::open(input_path)?; if can_trim_rlibs && can_trim_this(input_path) { let mut archive = ar::Archive::new(file); while let Some(entry_result) = archive.next_entry() { let mut entry = entry_result?; if entry.header().identifier() != b"rust.metadata.bin" { continue; } let mut metadata_ar = vec![]; { let mut ar_builder = ar::Builder::new(&mut metadata_ar); let header = entry.header().clone(); ar_builder.append(&header, &mut entry)? } file_header.set_size(metadata_ar.len() as u64); file_header.set_cksum(); builder.append(&file_header, metadata_ar.as_slice())?; break; } } else { file_header.set_cksum(); builder.append(&file_header, file)? } } // Finish archive let _ = builder.into_inner()?; Ok(path_transformer) } } #[cfg(feature = "dist-client")] #[allow(unused)] struct RustToolchainPackager { sysroot: PathBuf, } #[cfg(feature = "dist-client")] #[cfg(all(target_os = "linux", target_arch = "x86_64"))] impl pkg::ToolchainPackager for RustToolchainPackager { fn write_pkg(self: Box, f: fs::File) -> Result<()> { info!( "Packaging Rust compiler for sysroot {}", self.sysroot.display() ); let RustToolchainPackager { sysroot } = *self; let mut package_builder = pkg::ToolchainPackageBuilder::new(); package_builder.add_common()?; let bins_path = sysroot.join(BINS_DIR); let sysroot_executable = bins_path.join("rustc").with_extension(EXE_EXTENSION); package_builder.add_executable_and_deps(sysroot_executable)?; package_builder.add_dir_contents(&bins_path)?; if BINS_DIR != LIBS_DIR { let libs_path = sysroot.join(LIBS_DIR); package_builder.add_dir_contents(&libs_path)? } package_builder.into_compressed_tar(f) } } #[cfg(feature = "dist-client")] struct RustOutputsRewriter { dep_info: Option, } #[cfg(feature = "dist-client")] impl OutputsRewriter for RustOutputsRewriter { fn handle_outputs( self: Box, path_transformer: &dist::PathTransformer, output_paths: &[PathBuf], extra_inputs: &[PathBuf], ) -> Result<()> { use std::io::Write; // Outputs in dep files (the files at the beginning of lines) are untransformed at this point - // remap-path-prefix is documented to only apply to 'inputs'. trace!("Pondering on rewriting dep file {:?}", self.dep_info); if let Some(dep_info) = self.dep_info { let extra_input_str = extra_inputs .iter() .fold(String::new(), |s, p| s + " " + &p.to_string_lossy()); for dep_info_local_path in output_paths { trace!("Comparing with {}", dep_info_local_path.display()); if dep_info == *dep_info_local_path { info!("Replacing using the transformer {:?}", path_transformer); // Found the dep info file, read it in let f = fs::File::open(&dep_info) .with_context(|| "Failed to open dep info file")?; let mut deps = String::new(); { f }.read_to_string(&mut deps)?; // Replace all the output paths, at the beginning of lines for (local_path, dist_path) in get_path_mappings(path_transformer) { let re_str = format!("(?m)^{}", regex::escape(&dist_path)); let local_path_str = local_path.to_str().with_context(|| { format!( "could not convert {} to string for RE replacement", local_path.display() ) })?; error!( "RE replacing {} with {} in {}", re_str, local_path_str, deps ); let re = regex::Regex::new(&re_str).expect("Invalid regex"); deps = re.replace_all(&deps, local_path_str).into_owned(); } if !extra_inputs.is_empty() { deps = deps.replace(": ", &format!(":{} ", extra_input_str)); } // Write the depinfo file let f = fs::File::create(&dep_info).context("Failed to recreate dep info file")?; { f }.write_all(deps.as_bytes())?; return Ok(()); } } // We expected there to be dep info, but none of the outputs matched bail!("No outputs matched dep info file {}", dep_info.display()); } Ok(()) } } #[test] #[cfg(all(feature = "dist-client", target_os = "windows"))] fn test_rust_outputs_rewriter() { use crate::compiler::compiler::OutputsRewriter; use crate::test::utils::create_file; use std::io::Write; let mut pt = dist::PathTransformer::new(); pt.as_dist(Path::new("c:\\")).unwrap(); let mappings: Vec<_> = pt.disk_mappings().collect(); assert!(mappings.len() == 1); let linux_prefix = &mappings[0].1; let depinfo_data = format!("{prefix}/sccache/target/x86_64-unknown-linux-gnu/debug/deps/sccache_dist-c6f3229b9ef0a5c3.rmeta: src/bin/sccache-dist/main.rs src/bin/sccache-dist/build.rs src/bin/sccache-dist/token_check.rs {prefix}/sccache/target/x86_64-unknown-linux-gnu/debug/deps/sccache_dist-c6f3229b9ef0a5c3.d: src/bin/sccache-dist/main.rs src/bin/sccache-dist/build.rs src/bin/sccache-dist/token_check.rs src/bin/sccache-dist/main.rs: src/bin/sccache-dist/build.rs: src/bin/sccache-dist/token_check.rs: ", prefix=linux_prefix); let depinfo_resulting_data = format!("{prefix}/sccache/target/x86_64-unknown-linux-gnu/debug/deps/sccache_dist-c6f3229b9ef0a5c3.rmeta: src/bin/sccache-dist/main.rs src/bin/sccache-dist/build.rs src/bin/sccache-dist/token_check.rs {prefix}/sccache/target/x86_64-unknown-linux-gnu/debug/deps/sccache_dist-c6f3229b9ef0a5c3.d: src/bin/sccache-dist/main.rs src/bin/sccache-dist/build.rs src/bin/sccache-dist/token_check.rs src/bin/sccache-dist/main.rs: src/bin/sccache-dist/build.rs: src/bin/sccache-dist/token_check.rs: ", prefix="c:"); let tempdir = tempfile::Builder::new() .prefix("sccache_test") .tempdir() .unwrap(); let tempdir = tempdir.path(); let depinfo_file = create_file(tempdir, "depinfo.d", |mut f| { f.write_all(depinfo_data.as_bytes()) }) .unwrap(); let ror = Box::new(RustOutputsRewriter { dep_info: Some(depinfo_file.clone()), }); let () = ror .handle_outputs(&pt, &[depinfo_file.clone()], &[]) .unwrap(); let mut s = String::new(); fs::File::open(depinfo_file) .unwrap() .read_to_string(&mut s) .unwrap(); assert_eq!(s, depinfo_resulting_data) } #[cfg(feature = "dist-client")] #[derive(Debug)] struct RlibDepsDetail { deps: Vec, mtime: time::SystemTime, } #[cfg(feature = "dist-client")] struct DepsSize; #[cfg(feature = "dist-client")] impl Meter for DepsSize { type Measure = usize; fn measure(&self, _k: &Q, v: &RlibDepsDetail) -> usize where PathBuf: Borrow, { use std::mem; // TODO: unfortunately there is exactly nothing you can do with the k given the // current trait bounds. Just use some kind of sane value; //let k_size = mem::size_of::() + k.capacity(); let k_size = 3 * 8 + 100; let crate_names_size: usize = v.deps.iter().map(|s| s.capacity()).sum(); let v_size: usize = mem::size_of::() + // Systemtime and vec itself v.deps.capacity() * mem::size_of::() + // Each string in the vec crate_names_size; // Contents of all strings k_size + v_size } } #[cfg(feature = "dist-client")] #[derive(Debug)] struct RlibDepReader { cache: Mutex>, executable: PathBuf, ls_arg: String, } #[cfg(feature = "dist-client")] impl RlibDepReader { fn new_with_check(executable: PathBuf, env_vars: &[(OsString, OsString)]) -> Result { let temp_dir = tempfile::Builder::new() .prefix("sccache-rlibreader") .tempdir() .context("Could not create temporary directory for rlib output")?; let temp_rlib = temp_dir.path().join("x.rlib"); let mut cmd = process::Command::new(&executable); cmd.arg("--crate-type=rlib") .arg("-o") .arg(&temp_rlib) .arg("-") .env_clear() .envs(env_vars.to_vec()); let process::Output { status, stdout, stderr, } = cmd.output()?; if !stdout.is_empty() { bail!( "rustc stdout non-empty when compiling a minimal rlib: {:?}", String::from_utf8_lossy(&stdout) ) } if !stderr.is_empty() { bail!( "rustc stderr non-empty when compiling a minimal rlib: {:?}", String::from_utf8_lossy(&stderr) ) } if !status.success() { bail!( "Failed to compile a minimal rlib with {}", executable.display() ) } // The goal of this cache is to avoid repeated lookups when building a single project. Let's budget 3MB. // Allowing for a 100 byte path, 50 dependencies per rlib and 20 characters per crate name, this roughly // approximates to `path_size + path + vec_size + num_deps * (systemtime_size + string_size + crate_name_len)` // ` 3*8 + 100 + 3*8 + 50 * ( 8 + 3*8 + 20 )` // `2748` bytes per crate // Allowing for possible overhead of up to double (for unused space in allocated memory), this means we // can cache information from about 570 rlibs - easily enough for a single project. const CACHE_SIZE: u64 = 3 * 1024 * 1024; let cache = LruCache::with_meter(CACHE_SIZE, DepsSize); let rustc_version = Self::get_rustc_version(&executable, env_vars)?; let rlib_dep_reader = RlibDepReader { cache: Mutex::new(cache), executable, ls_arg: Self::get_correct_ls_arg(rustc_version), }; if let Err(e) = rlib_dep_reader.discover_rlib_deps(env_vars, &temp_rlib) { bail!("Failed to read deps from minimal rlib: {}", e) } Ok(rlib_dep_reader) } fn get_rustc_version( executable: &PathBuf, env_vars: &[(OsString, OsString)], ) -> Result { let mut cmd = process::Command::new(executable); cmd.arg("--version").env_clear().envs(env_vars.to_vec()); let process::Output { status, stdout, stderr, } = cmd.output()?; if !status.success() { bail!("Failed to get rustc version with {}", executable.display()) } if stdout.is_empty() { bail!("rustc stdout empty when parsing version") } if !stderr.is_empty() { bail!( "rustc stderr non-empty when parsing version: {:?}", String::from_utf8_lossy(&stderr) ) } Self::parse_rustc_version(&stdout) } fn parse_rustc_version(stdout: &[u8]) -> Result { let stdout_string = String::from_utf8_lossy(stdout); let rustc_version: Vec<&str> = stdout_string.split_whitespace().collect(); if rustc_version[0] != "rustc" { bail!( "Expected rustc string in rustc version with {:?}", String::from_utf8_lossy(stdout) ) } Ok(Version::parse(rustc_version[1])?) } fn get_correct_ls_arg(version: Version) -> String { if version.major <= 1 && version.minor <= 74 { String::from("ls") } else { String::from("ls=root") } } fn discover_rlib_deps( &self, env_vars: &[(OsString, OsString)], rlib: &Path, ) -> Result> { let rlib_mtime = fs::metadata(rlib) .and_then(|m| m.modified()) .context("Unable to get rlib modified time")?; { let mut cache = self.cache.lock().unwrap(); if let Some(deps_detail) = cache.get(rlib) { if rlib_mtime == deps_detail.mtime { return Ok(deps_detail.deps.clone()); } } } trace!("Discovering dependencies of {}", rlib.display()); let mut cmd = process::Command::new(&self.executable); cmd.args(["-Z", &self.ls_arg]) .arg(rlib) .env_clear() .envs(env_vars.to_vec()) .env("RUSTC_BOOTSTRAP", "1"); // TODO: this is fairly naughty let process::Output { status, stdout, stderr, } = cmd.output()?; if !status.success() { bail!(format!("Failed to list deps of {}", rlib.display())) } if !stderr.is_empty() { bail!( "rustc -Z ls stderr non-empty: {:?}", String::from_utf8_lossy(&stderr) ) } let stdout = String::from_utf8(stdout).context("Error parsing rustc -Z ls output")?; let deps: Vec<_> = parse_rustc_z_ls(&stdout) .map(|deps| deps.into_iter().map(|dep| dep.to_owned()).collect())?; { // This will behave poorly if the rlib is changing under our feet, but in that case rustc // will also do the wrong thing, so the user has bigger issues to deal with. let mut cache = self.cache.lock().unwrap(); cache.insert( rlib.to_owned(), RlibDepsDetail { deps: deps.clone(), mtime: rlib_mtime, }, ); } Ok(deps) } } // Parse output like the following: // // ``` // =External Dependencies= // 1 std-08a5bd1ca58a28ee // 2 core-ed31c38c1a60e6f9 // 3 compiler_builtins-6bd92a903b271497 // 4 alloc-5184f4fa2c87f835 // 5 alloc_system-7a70df28ae5ce6c3 // 6 libc-fb97b8e8c331f065 // 7 unwind-3fec89e45492b583 // 8 alloc_jemalloc-3e9fce05c4bf31e5 // 9 panic_unwind-376f1801255ba526 // 10 bitflags-f482823cbc05f4d7 // 11 cfg_if-cf72e166fff77ced // ``` #[cfg(feature = "dist-client")] fn parse_rustc_z_ls(stdout: &str) -> Result> { let mut lines = stdout.lines(); loop { match lines.next() { Some("=External Dependencies=") => break, Some(_s) => {} None => bail!("No output from rustc -Z ls"), } } let mut dep_names = vec![]; for line in &mut lines { if line.is_empty() { break; } let mut line_splits = line.splitn(2, ' '); let num: usize = line_splits .next() .expect("Zero strings from line split") .parse() .context("Could not parse number from rustc -Z ls")?; let libstring = line_splits .next() .context("No lib string on line from rustc -Z ls")?; if num != dep_names.len() + 1 { bail!( "Unexpected numbering of {} in rustc -Z ls output", libstring ) } assert!(line_splits.next().is_none()); let mut libstring_splits = libstring.rsplitn(2, '-'); // Most things get printed as ${LIBNAME}-${HASH} but for some things // (native code-only libraries?), ${LIBNAME} is all you get. let libname = { let maybe_hash = libstring_splits .next() .context("Nothing in lib string from `rustc -Z ls`")?; if let Some(name) = libstring_splits.next() { name } else { maybe_hash } }; assert!(libstring_splits.next().is_none()); dep_names.push(libname); } for line in lines { if !line.is_empty() { bail!("Trailing non-blank lines in rustc -Z ls output") } } Ok(dep_names) } #[cfg(test)] mod test { use super::*; use crate::compiler::*; use crate::mock_command::*; use crate::test::mock_storage::MockStorage; use crate::test::utils::*; use fs::File; use itertools::Itertools; use std::ffi::OsStr; use std::io::{self, Write}; use std::sync::{Arc, Mutex}; use test_case::test_case; fn _parse_arguments(arguments: &[String]) -> CompilerArguments { let arguments = arguments.iter().map(OsString::from).collect::>(); parse_arguments(&arguments, ".".as_ref()) } macro_rules! parses { ( $( $s:expr ),* ) => { match _parse_arguments(&[ $( $s.to_string(), )* ]) { CompilerArguments::Ok(a) => a, o => panic!("Got unexpected parse result: {:?}", o), } } } macro_rules! fails { ( $( $s:expr ),* ) => { match _parse_arguments(&[ $( $s.to_string(), )* ]) { CompilerArguments::Ok(_) => panic!("Should not have parsed ok: `{}`", stringify!($( $s, )*)), o => o, } } } const TEST_RUSTC_VERSION: &str = r#" rustc 1.66.1 (90743e729 2023-01-10) binary: rustc commit-hash: 90743e7298aca107ddaa0c202a4d3604e29bfeb6 commit-date: 2023-01-10 host: x86_64-unknown-linux-gnu release: 1.66.1 LLVM version: 15.0.2 "#; #[test] #[allow(clippy::cognitive_complexity)] fn test_parse_arguments_simple() { let h = parses!( "--emit", "link", "foo.rs", "--out-dir", "out", "--crate-name", "foo", "--crate-type", "lib" ); assert_eq!(h.output_dir.to_str(), Some("out")); assert!(h.dep_info.is_none()); assert!(h.externs.is_empty()); let h = parses!( "--emit=link", "foo.rs", "--out-dir", "out", "--crate-name=foo", "--crate-type=lib" ); assert_eq!(h.output_dir.to_str(), Some("out")); assert!(h.dep_info.is_none()); let h = parses!( "--emit", "link", "foo.rs", "--out-dir=out", "--crate-name=foo", "--crate-type=lib" ); assert_eq!(h.output_dir.to_str(), Some("out")); assert_eq!( parses!( "--emit", "link", "-C", "opt-level=1", "foo.rs", "--out-dir", "out", "--crate-name", "foo", "--crate-type", "lib" ), parses!( "--emit=link", "-Copt-level=1", "foo.rs", "--out-dir=out", "--crate-name=foo", "--crate-type=lib" ) ); let h = parses!( "--emit", "link,dep-info", "foo.rs", "--out-dir", "out", "--crate-name", "my_crate", "--crate-type", "lib", "-C", "extra-filename=-abcxyz" ); assert_eq!(h.output_dir.to_str(), Some("out")); assert_eq!(h.dep_info.unwrap().to_str().unwrap(), "my_crate-abcxyz.d"); fails!( "--emit", "link", "--out-dir", "out", "--crate-name=foo", "--crate-type=lib" ); fails!( "--emit", "link", "foo.rs", "--crate-name=foo", "--crate-type=lib" ); fails!( "--emit", "asm", "foo.rs", "--out-dir", "out", "--crate-name=foo", "--crate-type=lib" ); fails!( "--emit", "asm,link", "foo.rs", "--out-dir", "out", "--crate-name=foo", "--crate-type=lib" ); fails!( "--emit", "asm,link,dep-info", "foo.rs", "--out-dir", "out", "--crate-name=foo", "--crate-type=lib" ); fails!( "--emit", "link", "foo.rs", "--out-dir", "out", "--crate-name=foo" ); fails!( "--emit", "link", "foo.rs", "--out-dir", "out", "--crate-type=lib" ); // From an actual cargo compilation, with some args shortened: let h = parses!( "--crate-name", "foo", "src/lib.rs", "--crate-type", "lib", "--emit=dep-info,link", "-C", "debuginfo=2", "-C", "metadata=d6ae26f5bcfb7733", "-C", "extra-filename=-d6ae26f5bcfb7733", "--out-dir", "/foo/target/debug/deps", "-L", "dependency=/foo/target/debug/deps", "--extern", "libc=/foo/target/debug/deps/liblibc-89a24418d48d484a.rlib", "--extern", "log=/foo/target/debug/deps/liblog-2f7366be74992849.rlib" ); assert_eq!(h.output_dir.to_str(), Some("/foo/target/debug/deps")); assert_eq!(h.crate_name, "foo"); assert_eq!( h.dep_info.unwrap().to_str().unwrap(), "foo-d6ae26f5bcfb7733.d" ); assert_eq!( h.externs, ovec![ "/foo/target/debug/deps/liblibc-89a24418d48d484a.rlib", "/foo/target/debug/deps/liblog-2f7366be74992849.rlib" ] ); } #[test] fn test_parse_arguments_incremental() { parses!( "--emit", "link", "foo.rs", "--out-dir", "out", "--crate-name", "foo", "--crate-type", "lib" ); let r = fails!( "--emit", "link", "foo.rs", "--out-dir", "out", "--crate-name", "foo", "--crate-type", "lib", "-C", "incremental=/foo" ); assert_eq!(r, CompilerArguments::CannotCache("incremental", None)) } #[test] fn test_parse_arguments_dep_info_no_extra_filename() { let h = parses!( "--crate-name", "foo", "--crate-type", "lib", "src/lib.rs", "--emit=dep-info,link", "--out-dir", "/out" ); assert_eq!(h.dep_info, Some("foo.d".into())); } #[test] fn test_parse_arguments_native_libs() { parses!( "--crate-name", "foo", "--crate-type", "lib,staticlib", "--emit", "link", "-l", "bar", "foo.rs", "--out-dir", "out" ); parses!( "--crate-name", "foo", "--crate-type", "lib,staticlib", "--emit", "link", "-l", "static=bar", "foo.rs", "--out-dir", "out" ); parses!( "--crate-name", "foo", "--crate-type", "lib,staticlib", "--emit", "link", "-l", "dylib=bar", "foo.rs", "--out-dir", "out" ); } #[test] fn test_parse_arguments_non_rlib_crate() { parses!( "--crate-type", "rlib", "--emit", "link", "foo.rs", "--out-dir", "out", "--crate-name", "foo" ); parses!( "--crate-type", "lib", "--emit", "link", "foo.rs", "--out-dir", "out", "--crate-name", "foo" ); parses!( "--crate-type", "staticlib", "--emit", "link", "foo.rs", "--out-dir", "out", "--crate-name", "foo" ); parses!( "--crate-type", "rlib,staticlib", "--emit", "link", "foo.rs", "--out-dir", "out", "--crate-name", "foo" ); fails!( "--crate-type", "bin", "--emit", "link", "foo.rs", "--out-dir", "out", "--crate-name", "foo" ); fails!( "--crate-type", "rlib,dylib", "--emit", "link", "foo.rs", "--out-dir", "out", "--crate-name", "foo" ); } #[test] fn test_parse_arguments_color() { let h = parses!( "--emit", "link", "foo.rs", "--out-dir", "out", "--crate-name", "foo", "--crate-type", "lib" ); assert_eq!(h.color_mode, ColorMode::Auto); let h = parses!( "--emit", "link", "foo.rs", "--out-dir", "out", "--crate-name", "foo", "--crate-type", "lib", "--color=always" ); assert_eq!(h.color_mode, ColorMode::On); let h = parses!( "--emit", "link", "foo.rs", "--out-dir", "out", "--crate-name", "foo", "--crate-type", "lib", "--color=never" ); assert_eq!(h.color_mode, ColorMode::Off); let h = parses!( "--emit", "link", "foo.rs", "--out-dir", "out", "--crate-name", "foo", "--crate-type", "lib", "--color=auto" ); assert_eq!(h.color_mode, ColorMode::Auto); } #[test] fn test_get_compiler_outputs() { let creator = new_creator(); next_command( &creator, Ok(MockChild::new(exit_status(0), "foo\nbar\nbaz", "")), ); let outputs = get_compiler_outputs( &creator, "rustc".as_ref(), ovec!("a", "b"), "cwd".as_ref(), &[], ) .wait() .unwrap(); assert_eq!(outputs, &["foo", "bar", "baz"]); } #[test] fn test_get_compiler_outputs_fail() { let creator = new_creator(); next_command(&creator, Ok(MockChild::new(exit_status(1), "", "error"))); assert!(get_compiler_outputs( &creator, "rustc".as_ref(), ovec!("a", "b"), "cwd".as_ref(), &[] ) .wait() .is_err()); } #[test] fn test_parse_dep_info() { let deps = "foo: baz.rs abc.rs bar.rs baz.rs: abc.rs: bar.rs: "; assert_eq!( pathvec!["abc.rs", "bar.rs", "baz.rs"], parse_dep_info(deps, "") ); } #[test] fn test_parse_dep_info_with_escaped_spaces() { let deps = r#"foo: baz.rs abc\ def.rs baz.rs: abc def.rs: "#; assert_eq!(pathvec!["abc def.rs", "baz.rs"], parse_dep_info(deps, "")); } #[cfg(not(windows))] #[test] fn test_parse_dep_info_cwd() { let deps = "foo: baz.rs abc.rs bar.rs baz.rs: abc.rs: bar.rs: "; assert_eq!( pathvec!["foo/abc.rs", "foo/bar.rs", "foo/baz.rs"], parse_dep_info(deps, "foo/") ); assert_eq!( pathvec!["/foo/bar/abc.rs", "/foo/bar/bar.rs", "/foo/bar/baz.rs"], parse_dep_info(deps, "/foo/bar/") ); } #[cfg(not(windows))] #[test] fn test_parse_dep_info_abs_paths() { let deps = "/foo/foo: /foo/baz.rs /foo/abc.rs /foo/bar.rs /foo/baz.rs: /foo/abc.rs: /foo/bar.rs: "; assert_eq!( pathvec!["/foo/abc.rs", "/foo/bar.rs", "/foo/baz.rs"], parse_dep_info(deps, "/bar/") ); } #[cfg(windows)] #[test] fn test_parse_dep_info_cwd() { let deps = "foo: baz.rs abc.rs bar.rs baz.rs: abc.rs: bar.rs: "; assert_eq!( pathvec!["foo/abc.rs", "foo/bar.rs", "foo/baz.rs"], parse_dep_info(deps, "foo/") ); assert_eq!( pathvec![ "c:/foo/bar/abc.rs", "c:/foo/bar/bar.rs", "c:/foo/bar/baz.rs" ], parse_dep_info(deps, "c:/foo/bar/") ); } #[cfg(windows)] #[test] fn test_parse_dep_info_abs_paths() { let deps = "c:/foo/foo: c:/foo/baz.rs c:/foo/abc.rs c:/foo/bar.rs c:/foo/baz.rs: c:/foo/bar.rs c:/foo/abc.rs: c:/foo/bar.rs: "; assert_eq!( pathvec!["c:/foo/abc.rs", "c:/foo/bar.rs", "c:/foo/baz.rs"], parse_dep_info(deps, "c:/bar/") ); } #[cfg(feature = "dist-client")] #[test] fn test_parse_rustc_z_ls_pre_1_55() { let output = "=External Dependencies= 1 lucet_runtime 2 lucet_runtime_internals-1ff6232b6940e924 3 lucet_runtime_macros-c18e1952b835769e "; let res = parse_rustc_z_ls(output); assert!(res.is_ok()); let res = res.unwrap(); assert_eq!(res.len(), 3); assert_eq!(res[0], "lucet_runtime"); assert_eq!(res[1], "lucet_runtime_internals"); assert_eq!(res[2], "lucet_runtime_macros"); } #[cfg(feature = "dist-client")] #[test] fn test_parse_rustc_z_ls_post_1_55() { // This was introduced in rust 1.55 by // https://github.com/rust-lang/rust/commit/cef3ab75b12155e0582dd8b7710b7b901215fdd6 let output = "Crate info: name lucet_runtime hash 6c42566fc9757bba stable_crate_id StableCrateId(11157525371370257329) proc_macro false =External Dependencies= 1 lucet_runtime 2 lucet_runtime_internals-1ff6232b6940e924 3 lucet_runtime_macros-c18e1952b835769e "; let res = parse_rustc_z_ls(output); assert!(res.is_ok()); let res = res.unwrap(); assert_eq!(res.len(), 3); assert_eq!(res[0], "lucet_runtime"); assert_eq!(res[1], "lucet_runtime_internals"); assert_eq!(res[2], "lucet_runtime_macros"); } #[cfg(feature = "dist-client")] #[test] fn test_rlib_dep_reader_call() { let cargo_home = std::env::var("CARGO_HOME"); assert!(cargo_home.is_ok()); let mut env_vars = vec![]; if let Some(rustup_home) = std::env::var_os("RUSTUP_HOME") { env_vars.push(("RUSTUP_HOME".into(), rustup_home)); } let mut rustc_path = PathBuf::from(cargo_home.unwrap()); rustc_path.push("bin"); rustc_path.push("rustc"); let rlib_dep_reader = RlibDepReader::new_with_check(rustc_path, &env_vars); let is_ok = rlib_dep_reader.is_ok(); // Unwrap so the error is reported in the test output let _ = rlib_dep_reader.unwrap(); assert!(is_ok); } #[cfg(feature = "dist-client")] #[test] fn test_rlib_dep_reader_parse_rustc_version() { let v0 = RlibDepReader::parse_rustc_version("rustc 1.2.3 aaaa".as_bytes()); assert!(v0.is_ok()); let v0 = v0.unwrap(); assert_eq!(v0.major, 1); assert_eq!(v0.minor, 2); assert_eq!(v0.patch, 3); assert!(RlibDepReader::parse_rustc_version("rutc 1.2.3 aaaa".as_bytes()).is_err()); assert!(RlibDepReader::parse_rustc_version("1.2.3".as_bytes()).is_err()); } #[cfg(feature = "dist-client")] #[test] fn test_rlib_dep_reader_get_correct_ls_arg() { assert_eq!( RlibDepReader::get_correct_ls_arg(Version::new(0, 73, 0)), "ls" ); assert_eq!( RlibDepReader::get_correct_ls_arg(Version::new(1, 73, 0)), "ls" ); assert_eq!( RlibDepReader::get_correct_ls_arg(Version::new(1, 73, 1)), "ls" ); assert_eq!( RlibDepReader::get_correct_ls_arg(Version::new(1, 74, 0)), "ls" ); assert_eq!( RlibDepReader::get_correct_ls_arg(Version::new(1, 74, 1)), "ls" ); assert_eq!( RlibDepReader::get_correct_ls_arg(Version::new(1, 75, 0)), "ls=root" ); assert_eq!( RlibDepReader::get_correct_ls_arg(Version::new(2, 73, 0)), "ls=root" ); assert_eq!( RlibDepReader::get_correct_ls_arg(Version::new(2, 74, 0)), "ls=root" ); } fn mock_dep_info(creator: &Arc>, dep_srcs: &[&str]) { // Mock the `rustc --emit=dep-info` process by writing // a dep-info file. let mut sorted_deps = dep_srcs .iter() .map(|s| (*s).to_string()) .collect::>(); sorted_deps.sort(); next_command_calls(creator, move |args| { let mut dep_info_path = None; let mut it = args.iter(); while let Some(a) = it.next() { if a == "-o" { dep_info_path = it.next(); break; } } let dep_info_path = dep_info_path.unwrap(); let mut f = File::create(dep_info_path)?; writeln!(f, "blah: {}", sorted_deps.iter().join(" "))?; for d in sorted_deps.iter() { writeln!(f, "{}:", d)?; } Ok(MockChild::new(exit_status(0), "", "")) }); } fn mock_file_names(creator: &Arc>, filenames: &[&str]) { // Mock the `rustc --print=file-names` process output. next_command( creator, Ok(MockChild::new( exit_status(0), filenames.iter().join("\n"), "", )), ); } #[test_case(true ; "with preprocessor cache")] #[test_case(false ; "without preprocessor cache")] fn test_generate_hash_key(preprocessor_cache_mode: bool) { use ar::{Builder, Header}; drop(env_logger::try_init()); let f = TestFixture::new(); const FAKE_DIGEST: &str = "abcd1234"; const BAZ_O_SIZE: u64 = 1024; // We'll just use empty files for each of these. for s in ["foo.rs", "bar.rs", "bar.rlib"].iter() { f.touch(s).unwrap(); } // libbaz.a needs to be a valid archive. create_file(f.tempdir.path(), "libbaz.a", |f| { let mut builder = Builder::new(f); let hdr = Header::new(b"baz.o".to_vec(), BAZ_O_SIZE); builder.append(&hdr, io::repeat(0).take(BAZ_O_SIZE))?; Ok(()) }) .unwrap(); let mut m = Digest::new(); m.update(b"baz.o"); m.update(&vec![0; BAZ_O_SIZE as usize]); let libbaz_a_digest = m.finish(); let mut emit = HashSet::new(); emit.insert("link".to_string()); emit.insert("metadata".to_string()); let hasher = Box::new(RustHasher { executable: "rustc".into(), host: "x86-64-unknown-unknown-unknown".to_owned(), version: TEST_RUSTC_VERSION.to_string(), sysroot: f.tempdir.path().join("sysroot"), compiler_shlibs_digests: vec![FAKE_DIGEST.to_owned()], #[cfg(feature = "dist-client")] rlib_dep_reader: None, parsed_args: ParsedArguments { arguments: vec![ Argument::Raw("a".into()), Argument::WithValue( "--cfg", ArgData::PassThrough("xyz".into()), ArgDisposition::Separated, ), Argument::Raw("b".into()), Argument::WithValue( "--cfg", ArgData::PassThrough("abc".into()), ArgDisposition::Separated, ), ], output_dir: "foo/".into(), externs: vec!["bar.rlib".into()], crate_link_paths: vec![], staticlibs: vec![f.tempdir.path().join("libbaz.a")], crate_name: "foo".into(), crate_types: CrateTypes { rlib: true, staticlib: false, }, dep_info: None, emit, color_mode: ColorMode::Auto, has_json: false, profile: None, gcno: None, target_json: None, }, }); let creator = new_creator(); mock_dep_info(&creator, &["foo.rs", "bar.rs"]); mock_file_names(&creator, &["foo.rlib", "foo.a"]); let runtime = single_threaded_runtime(); let pool = runtime.handle().clone(); let res = hasher .generate_hash_key( &creator, f.tempdir.path().to_owned(), [ (OsString::from("CARGO_PKG_NAME"), OsString::from("foo")), (OsString::from("FOO"), OsString::from("bar")), (OsString::from("CARGO_BLAH"), OsString::from("abc")), ( OsString::from("CARGO_REGISTRIES_A_TOKEN"), OsString::from("ignored"), ), ] .to_vec(), false, &pool, false, Arc::new(MockStorage::new(None, preprocessor_cache_mode)), CacheControl::Default, ) .wait() .unwrap(); let m = Digest::new(); let empty_digest = m.finish(); let mut m = Digest::new(); // Version. m.update(CACHE_VERSION); // sysroot shlibs digests. m.update(FAKE_DIGEST.as_bytes()); // Arguments, with cfgs sorted at the end. OsStr::new("ab--cfgabc--cfgxyz").hash(&mut HashToDigest { digest: &mut m }); // bar.rs (source file, from dep-info) m.update(empty_digest.as_bytes()); // foo.rs (source file, from dep-info) m.update(empty_digest.as_bytes()); // bar.rlib (extern crate, from externs) m.update(empty_digest.as_bytes()); // libbaz.a (static library, from staticlibs), containing a single // file, baz.o, consisting of 1024 bytes of zeroes. m.update(libbaz_a_digest.as_bytes()); // Env vars OsStr::new("CARGO_BLAH").hash(&mut HashToDigest { digest: &mut m }); m.update(b"="); OsStr::new("abc").hash(&mut HashToDigest { digest: &mut m }); OsStr::new("CARGO_PKG_NAME").hash(&mut HashToDigest { digest: &mut m }); m.update(b"="); OsStr::new("foo").hash(&mut HashToDigest { digest: &mut m }); f.tempdir.path().hash(&mut HashToDigest { digest: &mut m }); TEST_RUSTC_VERSION.hash(&mut HashToDigest { digest: &mut m }); let digest = m.finish(); assert_eq!(res.key, digest); let mut out = res.compilation.outputs().map(|k| k.key).collect::>(); out.sort(); assert_eq!(out, vec!["foo.a", "foo.rlib", "foo.rmeta"]); } fn hash_key( f: &TestFixture, args: &[&'static str], env_vars: &[(OsString, OsString)], pre_func: F, preprocessor_cache_mode: bool, ) -> String where F: Fn(&Path) -> Result<()>, { let oargs = args.iter().map(OsString::from).collect::>(); let parsed_args = match parse_arguments(&oargs, f.tempdir.path()) { CompilerArguments::Ok(parsed_args) => parsed_args, o => panic!("Got unexpected parse result: {:?}", o), }; // Just use empty files for sources. { let src = &"foo.rs"; let s = format!("Failed to create {}", src); f.touch(src).expect(&s); } // as well as externs for e in parsed_args.externs.iter() { let s = format!("Failed to create {:?}", e); f.touch(e.to_str().unwrap()).expect(&s); } pre_func(f.tempdir.path()).expect("Failed to execute pre_func"); let hasher = Box::new(RustHasher { executable: "rustc".into(), host: "x86-64-unknown-unknown-unknown".to_owned(), version: TEST_RUSTC_VERSION.to_string(), sysroot: f.tempdir.path().join("sysroot"), compiler_shlibs_digests: vec![], #[cfg(feature = "dist-client")] rlib_dep_reader: None, parsed_args, }); let creator = new_creator(); let runtime = single_threaded_runtime(); let pool = runtime.handle().clone(); mock_dep_info(&creator, &["foo.rs"]); mock_file_names(&creator, &["foo.rlib"]); hasher .generate_hash_key( &creator, f.tempdir.path().to_owned(), env_vars.to_owned(), false, &pool, false, Arc::new(MockStorage::new(None, preprocessor_cache_mode)), CacheControl::Default, ) .wait() .unwrap() .key } #[allow(clippy::unnecessary_unwrap)] fn nothing(_path: &Path) -> Result<()> { Ok(()) } #[test_case(true ; "with preprocessor cache")] #[test_case(false ; "without preprocessor cache")] fn test_equal_hashes_externs(preprocessor_cache_mode: bool) { // Put some content in the extern rlibs so we can verify that the content hashes are // used in the right order. fn mk_files(tempdir: &Path) -> Result<()> { create_file(tempdir, "a.rlib", |mut f| f.write_all(b"this is a.rlib"))?; create_file(tempdir, "b.rlib", |mut f| f.write_all(b"this is b.rlib"))?; Ok(()) } let f = TestFixture::new(); assert_eq!( hash_key( &f, &[ "--emit", "link", "foo.rs", "--extern", "a=a.rlib", "--out-dir", "out", "--crate-name", "foo", "--crate-type", "lib", "--extern", "b=b.rlib" ], &[], mk_files, preprocessor_cache_mode, ), hash_key( &f, &[ "--extern", "b=b.rlib", "--emit", "link", "--extern", "a=a.rlib", "foo.rs", "--out-dir", "out", "--crate-name", "foo", "--crate-type", "lib" ], &[], mk_files, preprocessor_cache_mode, ) ); } #[test_case(true ; "with preprocessor cache")] #[test_case(false ; "without preprocessor cache")] fn test_equal_hashes_link_paths(preprocessor_cache_mode: bool) { let f = TestFixture::new(); assert_eq!( hash_key( &f, &[ "--emit", "link", "-L", "x=x", "foo.rs", "--out-dir", "out", "--crate-name", "foo", "--crate-type", "lib", "-L", "y=y" ], &[], nothing, preprocessor_cache_mode, ), hash_key( &f, &[ "-L", "y=y", "--emit", "link", "-L", "x=x", "foo.rs", "--out-dir", "out", "--crate-name", "foo", "--crate-type", "lib" ], &[], nothing, preprocessor_cache_mode, ) ); } #[test_case(true ; "with preprocessor cache")] #[test_case(false ; "without preprocessor cache")] fn test_equal_hashes_ignored_args(preprocessor_cache_mode: bool) { let f = TestFixture::new(); assert_eq!( hash_key( &f, &[ "--emit", "link", "-L", "x=x", "foo.rs", "--out-dir", "out", "--extern", "a=1", "--crate-name", "foo", "--crate-type", "lib", "-L", "y=y" ], &[], nothing, preprocessor_cache_mode, ), hash_key( &f, &[ "-L", "y=a", "--emit", "link", "-L", "x=b", "foo.rs", "--extern", "a=2", "--out-dir", "out2", "--crate-name", "foo", "--crate-type", "lib" ], &[], nothing, preprocessor_cache_mode, ) ); } #[test_case(true ; "with preprocessor cache")] #[test_case(false ; "without preprocessor cache")] fn test_equal_hashes_cfg_features(preprocessor_cache_mode: bool) { let f = TestFixture::new(); assert_eq!( hash_key( &f, &[ "--emit", "link", "--cfg", "feature=a", "foo.rs", "--out-dir", "out", "--crate-name", "foo", "--crate-type", "lib", "--cfg", "feature=b" ], &[], nothing, preprocessor_cache_mode, ), hash_key( &f, &[ "--cfg", "feature=b", "--emit", "link", "--cfg", "feature=a", "foo.rs", "--out-dir", "out", "--crate-name", "foo", "--crate-type", "lib" ], &[], nothing, preprocessor_cache_mode, ) ); } #[test] fn test_parse_unstable_profile_flag() { let h = parses!( "--crate-name", "foo", "--crate-type", "lib", "./src/lib.rs", "--emit=dep-info,link", "--out-dir", "/out", "-Zprofile" ); assert_eq!(h.gcno, Some("foo.gcno".into())); let h = parses!( "--crate-name", "foo", "--crate-type", "lib", "./src/lib.rs", "--emit=dep-info,link", "-C", "extra-filename=-a1b6419f8321841f", "--out-dir", "/out", "-Zprofile" ); assert_eq!(h.gcno, Some("foo-a1b6419f8321841f.gcno".into())); } #[test] fn test_parse_remap_path_prefix() { let h = parses!( "--crate-name", "foo", "--crate-type", "lib", "./src/lib.rs", "--emit=dep-info,link", "--out-dir", "/out", "--remap-path-prefix", "/home/test=~", "--remap-path-prefix", "/root=~" ); assert!(h.arguments.contains(&Argument::WithValue( "--remap-path-prefix", ArgData::PassThrough(OsString::from("/home/test=~")), ArgDisposition::Separated ))); assert!(h.arguments.contains(&Argument::WithValue( "--remap-path-prefix", ArgData::PassThrough(OsString::from("/root=~")), ArgDisposition::Separated ))); } #[test] fn test_parse_target() { // Parse a --target argument that is a string (not a path to a .json file). let h = parses!( "--crate-name", "foo", "--crate-type", "lib", "./src/lib.rs", "--emit=dep-info,link", "--out-dir", "/out", "--target", "string" ); assert!(h.arguments.contains(&Argument::WithValue( "--target", ArgData::Target(ArgTarget::Name("string".to_owned())), ArgDisposition::Separated ))); assert!(h.target_json.is_none()); // Parse a --target argument that is a path. let h = parses!( "--crate-name", "foo", "--crate-type", "lib", "./src/lib.rs", "--emit=dep-info,link", "--out-dir", "/out", "--target", "/path/to/target.json" ); assert!(h.arguments.contains(&Argument::WithValue( "--target", ArgData::Target(ArgTarget::Path(PathBuf::from("/path/to/target.json"))), ArgDisposition::Separated ))); assert_eq!(h.target_json, Some(PathBuf::from("/path/to/target.json"))); } } mozilla-sccache-40c3d6b/src/compiler/tasking_vx.rs000066400000000000000000000635211475712407500223260ustar00rootroot00000000000000// Copyright 2018 Mozilla Foundation // Copyright 2019 ESRLabs AG // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use crate::{ compiler::{ args::{ ArgDisposition, ArgInfo, ArgToStringResult, ArgsIter, Argument, FromArg, IntoArg, NormalizedDisposition, PathTransformerFn, SearchableArgInfo, }, c::{ArtifactDescriptor, CCompilerImpl, CCompilerKind, ParsedArguments}, CCompileCommand, Cacheable, ColorMode, CompileCommand, CompilerArguments, Language, SingleCompileCommand, }, counted_array, dist, errors::*, mock_command::{CommandCreatorSync, RunCommand}, util::run_input_output, }; use async_trait::async_trait; use futures::TryFutureExt; use log::Level::Trace; use std::{ collections::HashMap, ffi::OsString, path::{Path, PathBuf}, process, }; #[derive(Clone, Debug)] pub struct TaskingVX; #[async_trait] impl CCompilerImpl for TaskingVX { fn kind(&self) -> CCompilerKind { CCompilerKind::TaskingVX } fn plusplus(&self) -> bool { false } fn version(&self) -> Option { None } fn parse_arguments( &self, arguments: &[OsString], cwd: &Path, _env_vars: &[(OsString, OsString)], ) -> CompilerArguments { parse_arguments(arguments, cwd, &ARGS[..]) } async fn preprocess( &self, creator: &T, executable: &Path, parsed_args: &ParsedArguments, cwd: &Path, env_vars: &[(OsString, OsString)], may_dist: bool, rewrite_includes_only: bool, _preprocessor_cache_mode: bool, ) -> Result where T: CommandCreatorSync, { preprocess( creator, executable, parsed_args, cwd, env_vars, may_dist, rewrite_includes_only, ) .await } fn generate_compile_commands( &self, path_transformer: &mut dist::PathTransformer, executable: &Path, parsed_args: &ParsedArguments, cwd: &Path, env_vars: &[(OsString, OsString)], _rewrite_includes_only: bool, ) -> Result<( Box>, Option, Cacheable, )> where T: CommandCreatorSync, { generate_compile_commands(path_transformer, executable, parsed_args, cwd, env_vars).map( |(command, dist_command, cacheable)| { (CCompileCommand::new(command), dist_command, cacheable) }, ) } } ArgData! { pub DoCompilation, NotCompilationFlag, NotCompilation(OsString), Output(PathBuf), PassThrough(OsString), PreprocessorArgument(OsString), PreprocessorArgumentPath(PathBuf), DepFile(PathBuf), TooHardFlag, TooHard(OsString), } use self::ArgData::*; counted_array!(pub static ARGS: [ArgInfo; _] = [ take_arg!("--define", OsString, Concatenated('='), PreprocessorArgument), take_arg!("--dep-file", PathBuf, Concatenated('='), DepFile), flag!("--dry-run", TooHardFlag), take_arg!("--help", OsString, Concatenated('='), NotCompilation), take_arg!("--include-directory", PathBuf, Concatenated('='), PreprocessorArgumentPath), take_arg!("--include-file", PathBuf, Concatenated('='), PreprocessorArgumentPath), take_arg!("--library-directory", OsString, Concatenated('='), PassThrough), take_arg!("--mil-split", OsString, Concatenated('='), TooHard), take_arg!("--option-file", OsString, Concatenated('='), TooHard), take_arg!("--output", PathBuf, Concatenated('='), Output), take_arg!("--preprocess", OsString, Concatenated('='), TooHard), take_arg!("--undefine", OsString, Separated, PreprocessorArgument), // ok flag!("--version", NotCompilationFlag), flag!("-?", NotCompilationFlag), take_arg!("-D", OsString, CanBeSeparated, PreprocessorArgument), flag!("-E", TooHardFlag), take_arg!("-H", PathBuf, CanBeSeparated, PreprocessorArgumentPath), take_arg!("-I", PathBuf, CanBeSeparated, PreprocessorArgumentPath), take_arg!("-L", OsString, CanBeSeparated, PassThrough), take_arg!("-U", OsString, CanBeSeparated, PreprocessorArgument), flag!("-V", NotCompilationFlag), flag!("-c", DoCompilation), take_arg!("-f", OsString, Separated, TooHard), flag!("-n", TooHardFlag), take_arg!("-o", PathBuf, Separated, Output), ]); /// Parse `arguments`, determining whether it is supported. /// /// If any of the entries in `arguments` result in a compilation that /// cannot be cached, return `CompilerArguments::CannotCache`. /// If the commandline described by `arguments` is not compilation, /// return `CompilerArguments::NotCompilation`. /// Otherwise, return `CompilerArguments::Ok(ParsedArguments)`, with /// the `ParsedArguments` struct containing information parsed from /// `arguments`. fn parse_arguments( arguments: &[OsString], _cwd: &Path, arg_info: S, ) -> CompilerArguments where S: SearchableArgInfo, { let mut common_args = vec![]; let mut compilation = false; let mut input_arg = None; let mut multiple_input = false; let mut output_arg = None; let mut preprocessor_args = vec![]; let mut depfile = None; for arg in ArgsIter::new(arguments.iter().cloned(), arg_info) { let arg = try_or_cannot_cache!(arg, "argument parse"); match arg.get_data() { Some(TooHardFlag) | Some(TooHard(_)) => { cannot_cache!(arg.flag_str().expect("Can't be Argument::Raw/UnknownFlag",)) } Some(NotCompilationFlag) | Some(NotCompilation(_)) => { return CompilerArguments::NotCompilation } Some(DoCompilation) => compilation = true, Some(Output(p)) => output_arg = Some(p.clone()), Some(DepFile(d)) => depfile = Some(d.clone()), Some(PreprocessorArgument(_)) | Some(PreprocessorArgumentPath(_)) | Some(PassThrough(_)) => {} None => match arg { Argument::Raw(ref val) => { if input_arg.is_some() { multiple_input = true; } input_arg = Some(val.clone()); } Argument::UnknownFlag(_) => {} _ => unreachable!(), }, } let args = match arg.get_data() { Some(PassThrough(_)) => &mut common_args, Some(DepFile(_)) => continue, Some(PreprocessorArgument(_)) | Some(PreprocessorArgumentPath(_)) => { &mut preprocessor_args } Some(DoCompilation) | Some(Output(_)) => continue, Some(TooHardFlag) | Some(TooHard(_)) | Some(NotCompilationFlag) | Some(NotCompilation(_)) => unreachable!(), None => match arg { Argument::Raw(_) => continue, Argument::UnknownFlag(_) => &mut common_args, _ => unreachable!(), }, }; // Normalize attributes such as "-I foo", "-D FOO=bar", as // "-Ifoo", "-DFOO=bar", etc. and "-includefoo", "idirafterbar" as // "-include foo", "-idirafter bar", etc. let norm = match arg.flag_str() { Some(s) if s.len() == 2 => NormalizedDisposition::Concatenated, _ => NormalizedDisposition::Separated, }; args.extend(arg.normalize(norm).iter_os_strings()); } // We only support compilation. if !compilation { return CompilerArguments::NotCompilation; } // Can't cache compilations with multiple inputs. if multiple_input { cannot_cache!("multiple input files"); } let input = match input_arg { Some(i) => i, // We can't cache compilation without an input. None => cannot_cache!("no input file"), }; let language = match Language::from_file_name(Path::new(&input)) { Some(l) => l, None => cannot_cache!("unknown source language"), }; // --dep-file without any argument is valid too and uses the source file name // with extension .d as depfile name depfile = depfile.map(|d| { if d.as_os_str().is_empty() { Path::new(&input).with_extension("d") } else { d } }); let output = output_arg .map(PathBuf::from) .unwrap_or_else(|| Path::new(&input).with_extension("o")); let mut outputs = HashMap::with_capacity(1); outputs.insert( "obj", ArtifactDescriptor { path: output, optional: false, }, ); CompilerArguments::Ok(ParsedArguments { input: input.into(), double_dash_input: false, language, compilation_flag: "-c".into(), depfile, outputs, dependency_args: vec![], preprocessor_args, common_args, arch_args: vec![], unhashed_args: vec![], extra_dist_files: vec![], extra_hash_files: vec![], msvc_show_includes: false, profile_generate: false, color_mode: ColorMode::Auto, suppress_rewrite_includes_only: false, too_hard_for_preprocessor_cache_mode: None, }) } async fn preprocess( creator: &T, executable: &Path, parsed_args: &ParsedArguments, cwd: &Path, env_vars: &[(OsString, OsString)], _may_dist: bool, _rewrite_includes_only: bool, ) -> Result where T: CommandCreatorSync, { let mut preprocess = creator.clone().new_command_sync(executable); preprocess .arg("-E") .arg(&parsed_args.input) .args(&parsed_args.preprocessor_args) .args(&parsed_args.common_args) .env_clear() .envs(env_vars.to_vec()) .current_dir(cwd); if log_enabled!(Trace) { trace!("preprocess: {:?}", preprocess); } let preprocess = run_input_output(preprocess, None); // Tasking can produce a dep file while preprocessing, BUT if this is // enabled the preprocessor output is discarded. Run depfile generation // first and preprocessing for hash generation afterwards. // // From: TASKING VX-toolset for TriCore User Guide // With --preprocess=+make the compiler // will generate dependency lines that can be used in a Makefile. The // preprocessor output is discarded. The default target name is the basename // of the input file, with the extension .o. With the option --make-target // you can specify a target name which overrules the default target name. if let Some(ref depfile) = parsed_args.depfile { let mut generate_depfile = creator.clone().new_command_sync(executable); generate_depfile .arg("-Em") .arg("-o") .arg(depfile) .arg(&parsed_args.input) .args(&parsed_args.preprocessor_args) .args(&parsed_args.common_args) .env_clear() .envs(env_vars.to_vec()) .current_dir(cwd); if log_enabled!(Trace) { trace!("dep file generation: {:?}", generate_depfile); } let generate_depfile = run_input_output(generate_depfile, None); generate_depfile.and_then(|_| preprocess).await } else { preprocess.await } } fn generate_compile_commands( _: &mut dist::PathTransformer, executable: &Path, parsed_args: &ParsedArguments, cwd: &Path, env_vars: &[(OsString, OsString)], ) -> Result<( SingleCompileCommand, Option, Cacheable, )> { trace!("compile"); let out_file = match parsed_args.outputs.get("obj") { Some(obj) => obj, None => return Err(anyhow!("Missing object file output")), }; let mut arguments: Vec = vec![ parsed_args.compilation_flag.clone(), parsed_args.input.clone().into(), "-o".into(), out_file.path.as_os_str().into(), ]; arguments.extend_from_slice(&parsed_args.preprocessor_args); arguments.extend_from_slice(&parsed_args.unhashed_args); arguments.extend_from_slice(&parsed_args.common_args); let command = SingleCompileCommand { executable: executable.to_owned(), arguments, env_vars: env_vars.to_owned(), cwd: cwd.to_owned(), }; Ok((command, None, Cacheable::Yes)) } #[cfg(test)] mod test { use super::{ dist, generate_compile_commands, parse_arguments, Language, OsString, ParsedArguments, PathBuf, ARGS, }; use crate::compiler::c::ArtifactDescriptor; use crate::compiler::*; use crate::mock_command::*; use crate::server; use crate::test::mock_storage::MockStorage; use crate::test::utils::*; fn parse_arguments_(arguments: Vec) -> CompilerArguments { let args = arguments.iter().map(OsString::from).collect::>(); parse_arguments(&args, ".".as_ref(), &ARGS[..]) } #[test] fn test_parse_arguments_simple() { let args = stringvec!["-c", "foo.c", "-o", "foo.o"]; let ParsedArguments { input, language, outputs, preprocessor_args, msvc_show_includes, common_args, .. } = match parse_arguments_(args) { CompilerArguments::Ok(args) => args, o => panic!("Got unexpected parse result: {:?}", o), }; assert_eq!(Some("foo.c"), input.to_str()); assert_eq!(Language::C, language); assert_map_contains!( outputs, ( "obj", ArtifactDescriptor { path: PathBuf::from("foo.o"), optional: false } ) ); assert!(preprocessor_args.is_empty()); assert!(common_args.is_empty()); assert!(!msvc_show_includes); } #[test] fn test_parse_arguments_default_name() { let args = stringvec!["-c", "foo.c"]; let ParsedArguments { input, language, outputs, preprocessor_args, msvc_show_includes, common_args, .. } = match parse_arguments_(args) { CompilerArguments::Ok(args) => args, o => panic!("Got unexpected parse result: {:?}", o), }; assert_eq!(Some("foo.c"), input.to_str()); assert_eq!(Language::C, language); assert_map_contains!( outputs, ( "obj", ArtifactDescriptor { path: PathBuf::from("foo.o"), optional: false } ) ); assert!(preprocessor_args.is_empty()); assert!(common_args.is_empty()); assert!(!msvc_show_includes); } #[test] fn test_parse_arguments_extra() { let args = stringvec!["-c", "foo.cc", "-fabc", "-o", "foo.o", "-mxyz"]; let ParsedArguments { input, language, outputs, preprocessor_args, msvc_show_includes, common_args, .. } = match parse_arguments_(args) { CompilerArguments::Ok(args) => args, o => panic!("Got unexpected parse result: {:?}", o), }; assert_eq!(Some("foo.cc"), input.to_str()); assert_eq!(Language::Cxx, language); assert_map_contains!( outputs, ( "obj", ArtifactDescriptor { path: PathBuf::from("foo.o"), optional: false } ) ); assert!(preprocessor_args.is_empty()); assert_eq!(ovec!["-fabc", "-mxyz"], common_args); assert!(!msvc_show_includes); } #[test] fn test_parse_arguments_values() { let args = stringvec!["-c", "foo.cxx", "-fabc", "-I", "include", "-o", "foo.o", "-H", "file"]; let ParsedArguments { input, language, outputs, preprocessor_args, msvc_show_includes, common_args, .. } = match parse_arguments_(args) { CompilerArguments::Ok(args) => args, o => panic!("Got unexpected parse result: {:?}", o), }; assert_eq!(Some("foo.cxx"), input.to_str()); assert_eq!(Language::Cxx, language); assert_map_contains!( outputs, ( "obj", ArtifactDescriptor { path: PathBuf::from("foo.o"), optional: false } ) ); assert_eq!(ovec!["-Iinclude", "-Hfile"], preprocessor_args); assert_eq!(ovec!["-fabc"], common_args); assert!(!msvc_show_includes); } #[test] fn test_parse_arguments_preprocessor_args() { let args = stringvec![ "-c", "foo.c", "-fabc", "--include-directory=bar", "--include-file=foo", "-o", "foo.o" ]; let ParsedArguments { input, language, outputs, preprocessor_args, msvc_show_includes, common_args, .. } = match parse_arguments_(args) { CompilerArguments::Ok(args) => args, o => panic!("Got unexpected parse result: {:?}", o), }; assert_eq!(Some("foo.c"), input.to_str()); assert_eq!(Language::C, language); assert_map_contains!( outputs, ( "obj", ArtifactDescriptor { path: PathBuf::from("foo.o"), optional: false } ) ); assert_eq!( ovec!["--include-directory=bar", "--include-file=foo"], preprocessor_args ); assert_eq!(ovec!["-fabc"], common_args); assert!(!msvc_show_includes); } #[test] fn test_parse_arguments_explicit_dep_target() { let args = stringvec!["-c", "foo.c", "--dep-file=depfile", "-fabc", "-o", "foo.o"]; let ParsedArguments { input, language, depfile, outputs, preprocessor_args, msvc_show_includes, common_args, .. } = match parse_arguments_(args) { CompilerArguments::Ok(args) => args, o => panic!("Got unexpected parse result: {:?}", o), }; assert_eq!(Some("foo.c"), input.to_str()); assert_eq!(Language::C, language); assert_eq!(Some("depfile"), depfile.unwrap().to_str()); assert_map_contains!( outputs, ( "obj", ArtifactDescriptor { path: PathBuf::from("foo.o"), optional: false } ) ); assert!(preprocessor_args.is_empty()); assert_eq!(ovec!["-fabc"], common_args); assert!(!msvc_show_includes); } #[test] fn test_parse_arguments_implicit_dep_target() { let args = stringvec!["-c", "foo.c", "--dep-file", "-fabc", "-o", "foo.o"]; let ParsedArguments { input, language, depfile, outputs, preprocessor_args, msvc_show_includes, common_args, .. } = match parse_arguments_(args) { CompilerArguments::Ok(args) => args, o => panic!("Got unexpected parse result: {:?}", o), }; assert_eq!(Some("foo.c"), input.to_str()); assert_eq!(Language::C, language); assert_eq!(Some("foo.d"), depfile.unwrap().to_str()); assert_map_contains!( outputs, ( "obj", ArtifactDescriptor { path: PathBuf::from("foo.o"), optional: false } ) ); assert!(preprocessor_args.is_empty()); assert_eq!(ovec!["-fabc"], common_args); assert!(!msvc_show_includes); } #[test] fn test_parse_arguments_empty_args() { assert_eq!(CompilerArguments::NotCompilation, parse_arguments_(vec![])); } #[test] fn test_parse_arguments_not_compile() { assert_eq!( CompilerArguments::NotCompilation, parse_arguments_(stringvec!["-o", "foo"]) ); } #[test] fn test_parse_arguments_too_many_inputs() { assert_eq!( CompilerArguments::CannotCache("multiple input files", None), parse_arguments_(stringvec!["-c", "foo.c", "-o", "foo.o", "bar.c"]) ); } #[test] fn test_parse_arguments_link() { assert_eq!( CompilerArguments::NotCompilation, parse_arguments_(stringvec!["--link-only", "foo.o", "-o", "foo.so", "bar.o"]) ); } #[test] fn test_parse_dry_run() { assert_eq!( CompilerArguments::CannotCache("--dry-run", None), parse_arguments_(stringvec!["--dry-run", "-c", "foo.c"]) ); assert_eq!( CompilerArguments::CannotCache("-n", None), parse_arguments_(stringvec!["-n", "-c", "foo.c"]) ); } #[test] fn test_compile_simple() { let creator = new_creator(); let f = TestFixture::new(); let parsed_args = ParsedArguments { input: "foo.c".into(), double_dash_input: false, language: Language::C, compilation_flag: "-c".into(), depfile: None, outputs: vec![( "obj", ArtifactDescriptor { path: "foo.o".into(), optional: false, }, )] .into_iter() .collect(), dependency_args: vec![], preprocessor_args: vec![], common_args: vec![], arch_args: vec![], unhashed_args: vec![], extra_dist_files: vec![], extra_hash_files: vec![], msvc_show_includes: false, profile_generate: false, color_mode: ColorMode::Auto, suppress_rewrite_includes_only: false, too_hard_for_preprocessor_cache_mode: None, }; let runtime = single_threaded_runtime(); let storage = MockStorage::new(None, false); let storage: std::sync::Arc = std::sync::Arc::new(storage); let service = server::SccacheService::mock_with_storage(storage, runtime.handle().clone()); let compiler = &f.bins[0]; // Compiler invocation. next_command(&creator, Ok(MockChild::new(exit_status(0), "", ""))); let mut path_transformer = dist::PathTransformer::new(); let (command, _, cacheable) = generate_compile_commands( &mut path_transformer, compiler, &parsed_args, f.tempdir.path(), &[], ) .unwrap(); let _ = command.execute(&service, &creator).wait(); assert_eq!(Cacheable::Yes, cacheable); // Ensure that we ran all processes. assert_eq!(0, creator.lock().unwrap().children.len()); } #[test] fn test_cuda_threads_included_in_compile_command() { let creator = new_creator(); let f = TestFixture::new(); let parsed_args = ParsedArguments { input: "foo.cu".into(), double_dash_input: false, language: Language::Cuda, compilation_flag: "-c".into(), depfile: None, outputs: vec![( "obj", ArtifactDescriptor { path: "foo.o".into(), optional: false, }, )] .into_iter() .collect(), dependency_args: vec![], preprocessor_args: vec![], common_args: vec![], arch_args: vec![], unhashed_args: ovec!["--threads", "2"], extra_dist_files: vec![], extra_hash_files: vec![], msvc_show_includes: false, profile_generate: false, color_mode: ColorMode::Auto, suppress_rewrite_includes_only: false, too_hard_for_preprocessor_cache_mode: None, }; let runtime = single_threaded_runtime(); let storage = MockStorage::new(None, false); let storage: std::sync::Arc = std::sync::Arc::new(storage); let service = server::SccacheService::mock_with_storage(storage, runtime.handle().clone()); let compiler = &f.bins[0]; // Compiler invocation. next_command(&creator, Ok(MockChild::new(exit_status(0), "", ""))); let mut path_transformer = dist::PathTransformer::new(); let (command, _, cacheable) = generate_compile_commands( &mut path_transformer, compiler, &parsed_args, f.tempdir.path(), &[], ) .unwrap(); assert_eq!( ovec!["-c", "foo.cu", "-o", "foo.o", "--threads", "2"], command.arguments ); let _ = command.execute(&service, &creator).wait(); assert_eq!(Cacheable::Yes, cacheable); // Ensure that we ran all processes. assert_eq!(0, creator.lock().unwrap().children.len()); } } mozilla-sccache-40c3d6b/src/config.rs000066400000000000000000001457111475712407500176060ustar00rootroot00000000000000// Copyright 2016 Mozilla Foundation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use crate::cache::CacheMode; use directories::ProjectDirs; use fs::File; use fs_err as fs; use once_cell::sync::Lazy; #[cfg(any(feature = "dist-client", feature = "dist-server"))] use serde::ser::Serializer; use serde::{ de::{DeserializeOwned, Deserializer}, Deserialize, Serialize, }; #[cfg(test)] use serial_test::serial; use std::collections::HashMap; use std::env; use std::io::{Read, Write}; use std::path::{Path, PathBuf}; use std::result::Result as StdResult; use std::str::FromStr; use std::sync::Mutex; pub use crate::cache::PreprocessorCacheModeConfig; use crate::errors::*; static CACHED_CONFIG_PATH: Lazy = Lazy::new(CachedConfig::file_config_path); static CACHED_CONFIG: Mutex> = Mutex::new(None); const ORGANIZATION: &str = "Mozilla"; const APP_NAME: &str = "sccache"; const DIST_APP_NAME: &str = "sccache-dist-client"; const TEN_GIGS: u64 = 10 * 1024 * 1024 * 1024; const MOZILLA_OAUTH_PKCE_CLIENT_ID: &str = "F1VVD6nRTckSVrviMRaOdLBWIk1AvHYo"; // The sccache audience is an API set up in auth0 for sccache to allow 7 day expiry, // the openid scope allows us to query the auth0 /userinfo endpoint which contains // group information due to Mozilla rules. const MOZILLA_OAUTH_PKCE_AUTH_URL: &str = "https://auth.mozilla.auth0.com/authorize?audience=sccache&scope=openid%20profile"; const MOZILLA_OAUTH_PKCE_TOKEN_URL: &str = "https://auth.mozilla.auth0.com/oauth/token"; pub const INSECURE_DIST_CLIENT_TOKEN: &str = "dangerously_insecure_client"; // Unfortunately this means that nothing else can use the sccache cache dir as // this top level directory is used directly to store sccache cached objects... pub fn default_disk_cache_dir() -> PathBuf { ProjectDirs::from("", ORGANIZATION, APP_NAME) .expect("Unable to retrieve disk cache directory") .cache_dir() .to_owned() } // ...whereas subdirectories are used of this one pub fn default_dist_cache_dir() -> PathBuf { ProjectDirs::from("", ORGANIZATION, DIST_APP_NAME) .expect("Unable to retrieve dist cache directory") .cache_dir() .to_owned() } fn default_disk_cache_size() -> u64 { TEN_GIGS } fn default_toolchain_cache_size() -> u64 { TEN_GIGS } pub fn parse_size(val: &str) -> Option { let multiplier = match val.chars().last() { Some('K') => 1024, Some('M') => 1024 * 1024, Some('G') => 1024 * 1024 * 1024, Some('T') => 1024 * 1024 * 1024 * 1024, _ => 1, }; let val = if multiplier > 1 && !val.is_empty() { val.split_at(val.len() - 1).0 } else { val }; u64::from_str(val).ok().map(|size| size * multiplier) } #[cfg(any(feature = "dist-client", feature = "dist-server"))] #[derive(Clone, Debug, PartialEq, Eq)] pub struct HTTPUrl(reqwest::Url); #[cfg(any(feature = "dist-client", feature = "dist-server"))] impl Serialize for HTTPUrl { fn serialize(&self, serializer: S) -> StdResult where S: Serializer, { serializer.serialize_str(self.0.as_str()) } } #[cfg(any(feature = "dist-client", feature = "dist-server"))] impl<'a> Deserialize<'a> for HTTPUrl { fn deserialize(deserializer: D) -> StdResult where D: Deserializer<'a>, { use serde::de::Error; let helper: String = Deserialize::deserialize(deserializer)?; let url = parse_http_url(&helper).map_err(D::Error::custom)?; Ok(HTTPUrl(url)) } } #[cfg(any(feature = "dist-client", feature = "dist-server"))] fn parse_http_url(url: &str) -> Result { use std::net::SocketAddr; let url = if let Ok(sa) = url.parse::() { warn!("Url {} has no scheme, assuming http", url); reqwest::Url::parse(&format!("http://{}", sa)) } else { reqwest::Url::parse(url) }?; if url.scheme() != "http" && url.scheme() != "https" { bail!("url not http or https") } // TODO: relative url handling just hasn't been implemented and tested if url.path() != "/" { bail!("url has a relative path (currently unsupported)") } Ok(url) } #[cfg(any(feature = "dist-client", feature = "dist-server"))] impl HTTPUrl { pub fn from_url(u: reqwest::Url) -> Self { HTTPUrl(u) } pub fn to_url(&self) -> reqwest::Url { self.0.clone() } } #[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] #[serde(deny_unknown_fields)] pub struct AzureCacheConfig { pub connection_string: String, pub container: String, pub key_prefix: String, } #[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] #[serde(deny_unknown_fields)] #[serde(default)] pub struct DiskCacheConfig { pub dir: PathBuf, // TODO: use deserialize_with to allow human-readable sizes in toml pub size: u64, pub preprocessor_cache_mode: PreprocessorCacheModeConfig, pub rw_mode: CacheModeConfig, } impl Default for DiskCacheConfig { fn default() -> Self { DiskCacheConfig { dir: default_disk_cache_dir(), size: default_disk_cache_size(), preprocessor_cache_mode: PreprocessorCacheModeConfig::activated(), rw_mode: CacheModeConfig::ReadWrite, } } } #[derive(Copy, Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] #[serde(deny_unknown_fields)] pub enum CacheModeConfig { #[serde(rename = "READ_ONLY")] ReadOnly, #[serde(rename = "READ_WRITE")] ReadWrite, } impl From for CacheMode { fn from(value: CacheModeConfig) -> Self { match value { CacheModeConfig::ReadOnly => CacheMode::ReadOnly, CacheModeConfig::ReadWrite => CacheMode::ReadWrite, } } } #[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] #[serde(deny_unknown_fields)] pub struct GCSCacheConfig { pub bucket: String, pub key_prefix: String, pub cred_path: Option, pub service_account: Option, pub rw_mode: CacheModeConfig, pub credential_url: Option, } #[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] #[serde(deny_unknown_fields)] pub struct GHACacheConfig { pub enabled: bool, /// Version for gha cache is a namespace. By setting different versions, /// we can avoid mixed caches. pub version: String, } /// Memcached's default value of expiration is 10800s (3 hours), which is too /// short for use case of sccache. /// /// We increase the default expiration to 86400s (1 day) to balance between /// memory consumpation and cache hit rate. /// /// Please change this value freely if we have a better choice. const DEFAULT_MEMCACHED_CACHE_EXPIRATION: u32 = 86400; fn default_memcached_cache_expiration() -> u32 { DEFAULT_MEMCACHED_CACHE_EXPIRATION } #[derive(Debug, PartialEq, Eq, Serialize, Deserialize, Default)] #[serde(deny_unknown_fields)] pub struct MemcachedCacheConfig { #[serde(alias = "endpoint")] pub url: String, /// Username to authenticate with. pub username: Option, /// Password to authenticate with. pub password: Option, /// the expiration time in seconds. /// /// Default to 24 hours (86400) /// Up to 30 days (2592000) #[serde(default = "default_memcached_cache_expiration")] pub expiration: u32, #[serde(default)] pub key_prefix: String, } /// redis has no default TTL - all caches live forever /// /// We keep the TTL as 0 here as redis does /// /// Please change this value freely if we have a better choice. const DEFAULT_REDIS_CACHE_TTL: u64 = 0; pub const DEFAULT_REDIS_DB: u32 = 0; #[derive(Debug, PartialEq, Eq, Serialize, Deserialize, Default)] #[serde(deny_unknown_fields)] pub struct RedisCacheConfig { /// The single-node redis endpoint. /// Mutually exclusive with `cluster_endpoints`. pub endpoint: Option, /// The redis cluster endpoints. /// Mutually exclusive with `endpoint`. pub cluster_endpoints: Option, /// Username to authenticate with. pub username: Option, /// Password to authenticate with. pub password: Option, /// The redis URL. /// Deprecated in favor of `endpoint`. pub url: Option, /// the db number to use /// /// Default to 0 #[serde(default)] pub db: u32, /// the ttl (expiration) time in seconds. /// /// Default to infinity (0) #[serde(default, alias = "expiration")] pub ttl: u64, #[serde(default)] pub key_prefix: String, } #[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] #[serde(deny_unknown_fields)] pub struct WebdavCacheConfig { pub endpoint: String, #[serde(default)] pub key_prefix: String, pub username: Option, pub password: Option, pub token: Option, } #[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] #[serde(deny_unknown_fields)] pub struct S3CacheConfig { pub bucket: String, pub region: Option, #[serde(default)] pub key_prefix: String, pub no_credentials: bool, pub endpoint: Option, pub use_ssl: Option, pub server_side_encryption: Option, } #[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] #[serde(deny_unknown_fields)] pub struct OSSCacheConfig { pub bucket: String, #[serde(default)] pub key_prefix: String, pub endpoint: Option, pub no_credentials: bool, } #[derive(Debug, PartialEq, Eq)] pub enum CacheType { Azure(AzureCacheConfig), GCS(GCSCacheConfig), GHA(GHACacheConfig), Memcached(MemcachedCacheConfig), Redis(RedisCacheConfig), S3(S3CacheConfig), Webdav(WebdavCacheConfig), OSS(OSSCacheConfig), } #[derive(Debug, Default, Serialize, Deserialize, PartialEq, Eq)] #[serde(deny_unknown_fields)] pub struct CacheConfigs { pub azure: Option, pub disk: Option, pub gcs: Option, pub gha: Option, pub memcached: Option, pub redis: Option, pub s3: Option, pub webdav: Option, pub oss: Option, } impl CacheConfigs { /// Return cache type in an arbitrary but /// consistent ordering fn into_fallback(self) -> (Option, DiskCacheConfig) { let CacheConfigs { azure, disk, gcs, gha, memcached, redis, s3, webdav, oss, } = self; let cache_type = s3 .map(CacheType::S3) .or_else(|| redis.map(CacheType::Redis)) .or_else(|| memcached.map(CacheType::Memcached)) .or_else(|| gcs.map(CacheType::GCS)) .or_else(|| gha.map(CacheType::GHA)) .or_else(|| azure.map(CacheType::Azure)) .or_else(|| webdav.map(CacheType::Webdav)) .or_else(|| oss.map(CacheType::OSS)); let fallback = disk.unwrap_or_default(); (cache_type, fallback) } /// Override self with any existing fields from other fn merge(&mut self, other: Self) { let CacheConfigs { azure, disk, gcs, gha, memcached, redis, s3, webdav, oss, } = other; if azure.is_some() { self.azure = azure } if disk.is_some() { self.disk = disk } if gcs.is_some() { self.gcs = gcs } if gha.is_some() { self.gha = gha } if memcached.is_some() { self.memcached = memcached } if redis.is_some() { self.redis = redis } if s3.is_some() { self.s3 = s3 } if webdav.is_some() { self.webdav = webdav } if oss.is_some() { self.oss = oss } } } #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] #[serde(deny_unknown_fields)] #[serde(tag = "type")] pub enum DistToolchainConfig { #[serde(rename = "no_dist")] NoDist { compiler_executable: PathBuf }, #[serde(rename = "path_override")] PathOverride { compiler_executable: PathBuf, archive: PathBuf, archive_compiler_executable: String, }, } #[derive(Clone, Debug, PartialEq, Eq, Serialize)] #[serde(tag = "type")] pub enum DistAuth { #[serde(rename = "token")] Token { token: String }, #[serde(rename = "oauth2_code_grant_pkce")] Oauth2CodeGrantPKCE { client_id: String, auth_url: String, token_url: String, }, #[serde(rename = "oauth2_implicit")] Oauth2Implicit { client_id: String, auth_url: String }, } // Convert a type = "mozilla" immediately into an actual oauth configuration // https://github.com/serde-rs/serde/issues/595 could help if implemented impl<'a> Deserialize<'a> for DistAuth { fn deserialize(deserializer: D) -> StdResult where D: Deserializer<'a>, { #[derive(Deserialize)] #[serde(deny_unknown_fields)] #[serde(tag = "type")] pub enum Helper { #[serde(rename = "token")] Token { token: String }, #[serde(rename = "mozilla")] Mozilla, #[serde(rename = "oauth2_code_grant_pkce")] Oauth2CodeGrantPKCE { client_id: String, auth_url: String, token_url: String, }, #[serde(rename = "oauth2_implicit")] Oauth2Implicit { client_id: String, auth_url: String }, } let helper: Helper = Deserialize::deserialize(deserializer)?; Ok(match helper { Helper::Token { token } => DistAuth::Token { token }, Helper::Mozilla => DistAuth::Oauth2CodeGrantPKCE { client_id: MOZILLA_OAUTH_PKCE_CLIENT_ID.to_owned(), auth_url: MOZILLA_OAUTH_PKCE_AUTH_URL.to_owned(), token_url: MOZILLA_OAUTH_PKCE_TOKEN_URL.to_owned(), }, Helper::Oauth2CodeGrantPKCE { client_id, auth_url, token_url, } => DistAuth::Oauth2CodeGrantPKCE { client_id, auth_url, token_url, }, Helper::Oauth2Implicit { client_id, auth_url, } => DistAuth::Oauth2Implicit { client_id, auth_url, }, }) } } impl Default for DistAuth { fn default() -> Self { DistAuth::Token { token: INSECURE_DIST_CLIENT_TOKEN.to_owned(), } } } #[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] #[serde(default)] #[serde(deny_unknown_fields)] pub struct DistConfig { pub auth: DistAuth, #[cfg(any(feature = "dist-client", feature = "dist-server"))] pub scheduler_url: Option, #[cfg(not(any(feature = "dist-client", feature = "dist-server")))] pub scheduler_url: Option, pub cache_dir: PathBuf, pub toolchains: Vec, pub toolchain_cache_size: u64, pub rewrite_includes_only: bool, } impl Default for DistConfig { fn default() -> Self { Self { auth: Default::default(), scheduler_url: Default::default(), cache_dir: default_dist_cache_dir(), toolchains: Default::default(), toolchain_cache_size: default_toolchain_cache_size(), rewrite_includes_only: false, } } } // TODO: fields only pub for tests #[derive(Debug, Default, Serialize, Deserialize, Eq, PartialEq)] #[serde(default)] #[serde(deny_unknown_fields)] pub struct FileConfig { pub cache: CacheConfigs, pub dist: DistConfig, pub server_startup_timeout_ms: Option, } // If the file doesn't exist or we can't read it, log the issue and proceed. If the // config exists but doesn't parse then something is wrong - return an error. pub fn try_read_config_file(path: &Path) -> Result> { debug!("Attempting to read config file at {:?}", path); let mut file = match File::open(path) { Ok(f) => f, Err(e) => { debug!("Couldn't open config file: {}", e); return Ok(None); } }; let mut string = String::new(); match file.read_to_string(&mut string) { Ok(_) => (), Err(e) => { warn!("Failed to read config file: {}", e); return Ok(None); } } let res = if path.extension().map_or(false, |e| e == "json") { serde_json::from_str(&string) .with_context(|| format!("Failed to load json config file from {}", path.display()))? } else { toml::from_str(&string) .with_context(|| format!("Failed to load toml config file from {}", path.display()))? }; Ok(Some(res)) } #[derive(Debug)] pub struct EnvConfig { cache: CacheConfigs, } fn key_prefix_from_env_var(env_var_name: &str) -> String { env::var(env_var_name) .ok() .as_ref() .map(|s| s.trim_end_matches('/')) .filter(|s| !s.is_empty()) .unwrap_or_default() .to_owned() } fn number_from_env_var(env_var_name: &str) -> Option> where ::Err: std::fmt::Debug, { let value = env::var(env_var_name).ok()?; value .parse::() .map_err(|err| anyhow!("{env_var_name} value is invalid: {err:?}")) .into() } fn bool_from_env_var(env_var_name: &str) -> Result> { env::var(env_var_name) .ok() .map(|value| match value.to_lowercase().as_str() { "true" | "on" | "1" => Ok(true), "false" | "off" | "0" => Ok(false), _ => bail!( "{} must be 'true', 'on', '1', 'false', 'off' or '0'.", env_var_name ), }) .transpose() } fn config_from_env() -> Result { // ======= AWS ======= let s3 = if let Ok(bucket) = env::var("SCCACHE_BUCKET") { let region = env::var("SCCACHE_REGION").ok(); let no_credentials = bool_from_env_var("SCCACHE_S3_NO_CREDENTIALS")?.unwrap_or(false); let use_ssl = bool_from_env_var("SCCACHE_S3_USE_SSL")?; let server_side_encryption = bool_from_env_var("SCCACHE_S3_SERVER_SIDE_ENCRYPTION")?; let endpoint = env::var("SCCACHE_ENDPOINT").ok(); let key_prefix = key_prefix_from_env_var("SCCACHE_S3_KEY_PREFIX"); Some(S3CacheConfig { bucket, region, no_credentials, key_prefix, endpoint, use_ssl, server_side_encryption, }) } else { None }; if s3.as_ref().map(|s3| s3.no_credentials).unwrap_or_default() && (env::var_os("AWS_ACCESS_KEY_ID").is_some() || env::var_os("AWS_SECRET_ACCESS_KEY").is_some()) { bail!("If setting S3 credentials, SCCACHE_S3_NO_CREDENTIALS must not be set."); } // ======= redis ======= let redis = match ( env::var("SCCACHE_REDIS").ok(), env::var("SCCACHE_REDIS_ENDPOINT").ok(), env::var("SCCACHE_REDIS_CLUSTER_ENDPOINTS").ok(), ) { (None, None, None) => None, (url, endpoint, cluster_endpoints) => { let db = number_from_env_var("SCCACHE_REDIS_DB") .transpose()? .unwrap_or(DEFAULT_REDIS_DB); let username = env::var("SCCACHE_REDIS_USERNAME").ok(); let password = env::var("SCCACHE_REDIS_PASSWORD").ok(); let ttl = number_from_env_var("SCCACHE_REDIS_EXPIRATION") .or_else(|| number_from_env_var("SCCACHE_REDIS_TTL")) .transpose()? .unwrap_or(DEFAULT_REDIS_CACHE_TTL); let key_prefix = key_prefix_from_env_var("SCCACHE_REDIS_KEY_PREFIX"); Some(RedisCacheConfig { url, endpoint, cluster_endpoints, username, password, db, ttl, key_prefix, }) } }; if env::var_os("SCCACHE_REDIS_EXPIRATION").is_some() && env::var_os("SCCACHE_REDIS_TTL").is_some() { bail!("You mustn't set both SCCACHE_REDIS_EXPIRATION and SCCACHE_REDIS_TTL. Use only one."); } // ======= memcached ======= let memcached = if let Ok(url) = env::var("SCCACHE_MEMCACHED").or_else(|_| env::var("SCCACHE_MEMCACHED_ENDPOINT")) { let username = env::var("SCCACHE_MEMCACHED_USERNAME").ok(); let password = env::var("SCCACHE_MEMCACHED_PASSWORD").ok(); let expiration = number_from_env_var("SCCACHE_MEMCACHED_EXPIRATION") .transpose()? .unwrap_or(DEFAULT_MEMCACHED_CACHE_EXPIRATION); let key_prefix = key_prefix_from_env_var("SCCACHE_MEMCACHED_KEY_PREFIX"); Some(MemcachedCacheConfig { url, username, password, expiration, key_prefix, }) } else { None }; if env::var_os("SCCACHE_MEMCACHED").is_some() && env::var_os("SCCACHE_MEMCACHED_ENDPOINT").is_some() { bail!("You mustn't set both SCCACHE_MEMCACHED and SCCACHE_MEMCACHED_ENDPOINT. Please, use only SCCACHE_MEMCACHED_ENDPOINT."); } // ======= GCP/GCS ======= if (env::var("SCCACHE_GCS_CREDENTIALS_URL").is_ok() || env::var("SCCACHE_GCS_OAUTH_URL").is_ok() || env::var("SCCACHE_GCS_KEY_PATH").is_ok()) && env::var("SCCACHE_GCS_BUCKET").is_err() { bail!( "If setting GCS credentials, SCCACHE_GCS_BUCKET and an auth mechanism need to be set." ); } let gcs = env::var("SCCACHE_GCS_BUCKET").ok().map(|bucket| { let key_prefix = key_prefix_from_env_var("SCCACHE_GCS_KEY_PREFIX"); if env::var("SCCACHE_GCS_OAUTH_URL").is_ok() { eprintln!("SCCACHE_GCS_OAUTH_URL has been deprecated"); eprintln!("if you intend to use vm metadata for auth, please set correct service account instead"); } let credential_url = env::var("SCCACHE_GCS_CREDENTIALS_URL").ok(); let cred_path = env::var("SCCACHE_GCS_KEY_PATH").ok(); let service_account = env::var("SCCACHE_GCS_SERVICE_ACCOUNT").ok(); let rw_mode = match env::var("SCCACHE_GCS_RW_MODE").as_ref().map(String::as_str) { Ok("READ_ONLY") => CacheModeConfig::ReadOnly, Ok("READ_WRITE") => CacheModeConfig::ReadWrite, // TODO: unsure if these should warn during the configuration loading // or at the time when they're actually used to connect to GCS Ok(_) => { warn!("Invalid SCCACHE_GCS_RW_MODE -- defaulting to READ_ONLY."); CacheModeConfig::ReadOnly } _ => { warn!("No SCCACHE_GCS_RW_MODE specified -- defaulting to READ_ONLY."); CacheModeConfig::ReadOnly } }; GCSCacheConfig { bucket, key_prefix, cred_path, service_account, rw_mode, credential_url, } }); // ======= GHA ======= let gha = if let Ok(version) = env::var("SCCACHE_GHA_VERSION") { // If SCCACHE_GHA_VERSION has been set, we don't need to check // SCCACHE_GHA_ENABLED's value anymore. Some(GHACacheConfig { enabled: true, version, }) } else if bool_from_env_var("SCCACHE_GHA_ENABLED")?.unwrap_or(false) { // If only SCCACHE_GHA_ENABLED has been set to the true value, enable with // default version. Some(GHACacheConfig { enabled: true, version: "".to_string(), }) } else { None }; // ======= Azure ======= let azure = if let (Ok(connection_string), Ok(container)) = ( env::var("SCCACHE_AZURE_CONNECTION_STRING"), env::var("SCCACHE_AZURE_BLOB_CONTAINER"), ) { let key_prefix = key_prefix_from_env_var("SCCACHE_AZURE_KEY_PREFIX"); Some(AzureCacheConfig { connection_string, container, key_prefix, }) } else { None }; // ======= WebDAV ======= let webdav = if let Ok(endpoint) = env::var("SCCACHE_WEBDAV_ENDPOINT") { let key_prefix = key_prefix_from_env_var("SCCACHE_WEBDAV_KEY_PREFIX"); let username = env::var("SCCACHE_WEBDAV_USERNAME").ok(); let password = env::var("SCCACHE_WEBDAV_PASSWORD").ok(); let token = env::var("SCCACHE_WEBDAV_TOKEN").ok(); Some(WebdavCacheConfig { endpoint, key_prefix, username, password, token, }) } else { None }; // ======= OSS ======= let oss = if let Ok(bucket) = env::var("SCCACHE_OSS_BUCKET") { let endpoint = env::var("SCCACHE_OSS_ENDPOINT").ok(); let key_prefix = key_prefix_from_env_var("SCCACHE_OSS_KEY_PREFIX"); let no_credentials = bool_from_env_var("SCCACHE_OSS_NO_CREDENTIALS")?.unwrap_or(false); Some(OSSCacheConfig { bucket, endpoint, key_prefix, no_credentials, }) } else { None }; if oss .as_ref() .map(|oss| oss.no_credentials) .unwrap_or_default() && (env::var_os("ALIBABA_CLOUD_ACCESS_KEY_ID").is_some() || env::var_os("ALIBABA_CLOUD_ACCESS_KEY_SECRET").is_some()) { bail!("If setting OSS credentials, SCCACHE_OSS_NO_CREDENTIALS must not be set."); } // ======= Local ======= let disk_dir = env::var_os("SCCACHE_DIR").map(PathBuf::from); let disk_sz = env::var("SCCACHE_CACHE_SIZE") .ok() .and_then(|v| parse_size(&v)); let mut preprocessor_mode_config = PreprocessorCacheModeConfig::activated(); let preprocessor_mode_overridden = if let Some(value) = bool_from_env_var("SCCACHE_DIRECT")? { preprocessor_mode_config.use_preprocessor_cache_mode = value; true } else { false }; let (disk_rw_mode, disk_rw_mode_overridden) = match env::var("SCCACHE_LOCAL_RW_MODE") .as_ref() .map(String::as_str) { Ok("READ_ONLY") => (CacheModeConfig::ReadOnly, true), Ok("READ_WRITE") => (CacheModeConfig::ReadWrite, true), Ok(_) => { warn!("Invalid SCCACHE_LOCAL_RW_MODE -- defaulting to READ_WRITE."); (CacheModeConfig::ReadWrite, false) } _ => (CacheModeConfig::ReadWrite, false), }; let any_overridden = disk_dir.is_some() || disk_sz.is_some() || preprocessor_mode_overridden || disk_rw_mode_overridden; let disk = if any_overridden { Some(DiskCacheConfig { dir: disk_dir.unwrap_or_else(default_disk_cache_dir), size: disk_sz.unwrap_or_else(default_disk_cache_size), preprocessor_cache_mode: preprocessor_mode_config, rw_mode: disk_rw_mode, }) } else { None }; let cache = CacheConfigs { azure, disk, gcs, gha, memcached, redis, s3, webdav, oss, }; Ok(EnvConfig { cache }) } // The directories crate changed the location of `config_dir` on macos in version 3, // so we also check the config in `preference_dir` (new in that version), which // corresponds to the old location, for compatibility with older setups. fn config_file(env_var: &str, leaf: &str) -> PathBuf { if let Some(env_value) = env::var_os(env_var) { return env_value.into(); } let dirs = ProjectDirs::from("", ORGANIZATION, APP_NAME).expect("Unable to get config directory"); // If the new location exists, use that. let path = dirs.config_dir().join(leaf); if path.exists() { return path; } // If the old location exists, use that. let path = dirs.preference_dir().join(leaf); if path.exists() { return path; } // Otherwise, use the new location. dirs.config_dir().join(leaf) } #[derive(Debug, Default, PartialEq, Eq)] pub struct Config { pub cache: Option, pub fallback_cache: DiskCacheConfig, pub dist: DistConfig, pub server_startup_timeout: Option, } impl Config { pub fn load() -> Result { let env_conf = config_from_env()?; let file_conf_path = config_file("SCCACHE_CONF", "config"); let file_conf = try_read_config_file(&file_conf_path) .context("Failed to load config file")? .unwrap_or_default(); Ok(Self::from_env_and_file_configs(env_conf, file_conf)) } fn from_env_and_file_configs(env_conf: EnvConfig, file_conf: FileConfig) -> Self { let mut conf_caches: CacheConfigs = Default::default(); let FileConfig { cache, dist, server_startup_timeout_ms, } = file_conf; conf_caches.merge(cache); let server_startup_timeout = server_startup_timeout_ms.map(std::time::Duration::from_millis); let EnvConfig { cache } = env_conf; conf_caches.merge(cache); let (caches, fallback_cache) = conf_caches.into_fallback(); Self { cache: caches, fallback_cache, dist, server_startup_timeout, } } } #[derive(Clone, Debug, Default, Serialize, Deserialize)] #[serde(default)] #[serde(deny_unknown_fields)] pub struct CachedDistConfig { pub auth_tokens: HashMap, } #[derive(Clone, Debug, Default, Serialize, Deserialize)] #[serde(default)] #[serde(deny_unknown_fields)] pub struct CachedFileConfig { pub dist: CachedDistConfig, } #[derive(Debug, Default, PartialEq, Eq)] pub struct CachedConfig(()); impl CachedConfig { pub fn load() -> Result { let mut cached_file_config = CACHED_CONFIG.lock().unwrap(); if cached_file_config.is_none() { let cfg = Self::load_file_config().context("Unable to initialise cached config")?; *cached_file_config = Some(cfg) } Ok(CachedConfig(())) } pub fn reload() -> Result { { let mut cached_file_config = CACHED_CONFIG.lock().unwrap(); *cached_file_config = None; }; Self::load() } pub fn with T, T>(&self, f: F) -> T { let cached_file_config = CACHED_CONFIG.lock().unwrap(); let cached_file_config = cached_file_config.as_ref().unwrap(); f(cached_file_config) } pub fn with_mut(&self, f: F) -> Result<()> { let mut cached_file_config = CACHED_CONFIG.lock().unwrap(); let cached_file_config = cached_file_config.as_mut().unwrap(); let mut new_config = cached_file_config.clone(); f(&mut new_config); Self::save_file_config(&new_config)?; *cached_file_config = new_config; Ok(()) } fn file_config_path() -> PathBuf { config_file("SCCACHE_CACHED_CONF", "cached-config") } fn load_file_config() -> Result { let file_conf_path = &*CACHED_CONFIG_PATH; if !file_conf_path.exists() { let file_conf_dir = file_conf_path .parent() .expect("Cached conf file has no parent directory"); if !file_conf_dir.is_dir() { fs::create_dir_all(file_conf_dir) .context("Failed to create dir to hold cached config")? } Self::save_file_config(&Default::default()).with_context(|| { format!( "Unable to create cached config file at {}", file_conf_path.display() ) })? } try_read_config_file(file_conf_path) .context("Failed to load cached config file")? .with_context(|| format!("Failed to load from {}", file_conf_path.display())) } fn save_file_config(c: &CachedFileConfig) -> Result<()> { let file_conf_path = &*CACHED_CONFIG_PATH; let mut file = File::create(file_conf_path).context("Could not open config for writing")?; file.write_all(toml::to_string(c).unwrap().as_bytes()) .map_err(Into::into) } } #[cfg(feature = "dist-server")] pub mod scheduler { use std::net::SocketAddr; use std::path::Path; use crate::errors::*; use serde::{Deserialize, Serialize}; #[derive(Debug, Serialize, Deserialize)] #[serde(tag = "type")] #[serde(deny_unknown_fields)] pub enum ClientAuth { #[serde(rename = "DANGEROUSLY_INSECURE")] Insecure, #[serde(rename = "token")] Token { token: String }, #[serde(rename = "jwt_validate")] JwtValidate { audience: String, issuer: String, jwks_url: String, }, #[serde(rename = "mozilla")] Mozilla { required_groups: Vec }, #[serde(rename = "proxy_token")] ProxyToken { url: String, cache_secs: Option, }, } #[derive(Debug, Serialize, Deserialize)] #[serde(tag = "type")] #[serde(deny_unknown_fields)] pub enum ServerAuth { #[serde(rename = "DANGEROUSLY_INSECURE")] Insecure, #[serde(rename = "jwt_hs256")] JwtHS256 { secret_key: String }, #[serde(rename = "token")] Token { token: String }, } #[derive(Debug, Serialize, Deserialize)] #[serde(deny_unknown_fields)] pub struct Config { pub public_addr: SocketAddr, pub client_auth: ClientAuth, pub server_auth: ServerAuth, } pub fn from_path(conf_path: &Path) -> Result> { super::try_read_config_file(conf_path).context("Failed to load scheduler config file") } } #[cfg(feature = "dist-server")] pub mod server { use super::HTTPUrl; use serde::{Deserialize, Serialize}; use std::net::SocketAddr; use std::path::{Path, PathBuf}; use crate::errors::*; const TEN_GIGS: u64 = 10 * 1024 * 1024 * 1024; fn default_toolchain_cache_size() -> u64 { TEN_GIGS } const DEFAULT_POT_CLONE_FROM: &str = "sccache-template"; const DEFAULT_POT_FS_ROOT: &str = "/opt/pot"; const DEFAULT_POT_CMD: &str = "pot"; const DEFAULT_POT_CLONE_ARGS: &[&str] = &["-i", "lo0|127.0.0.2"]; fn default_pot_clone_from() -> String { DEFAULT_POT_CLONE_FROM.to_string() } fn default_pot_fs_root() -> PathBuf { DEFAULT_POT_FS_ROOT.into() } fn default_pot_cmd() -> PathBuf { DEFAULT_POT_CMD.into() } fn default_pot_clone_args() -> Vec { DEFAULT_POT_CLONE_ARGS .iter() .map(|s| s.to_string()) .collect() } #[derive(Debug, Serialize, Deserialize, PartialEq)] #[serde(tag = "type")] #[serde(deny_unknown_fields)] pub enum BuilderType { #[serde(rename = "docker")] Docker, #[serde(rename = "overlay")] Overlay { build_dir: PathBuf, bwrap_path: PathBuf, }, #[serde(rename = "pot")] Pot { #[serde(default = "default_pot_fs_root")] pot_fs_root: PathBuf, #[serde(default = "default_pot_clone_from")] clone_from: String, #[serde(default = "default_pot_cmd")] pot_cmd: PathBuf, #[serde(default = "default_pot_clone_args")] pot_clone_args: Vec, }, } #[derive(Debug, Serialize, Deserialize, PartialEq)] #[serde(tag = "type")] #[serde(deny_unknown_fields)] pub enum SchedulerAuth { #[serde(rename = "DANGEROUSLY_INSECURE")] Insecure, #[serde(rename = "jwt_token")] JwtToken { token: String }, #[serde(rename = "token")] Token { token: String }, } #[derive(Debug, Serialize, Deserialize, PartialEq)] #[serde(deny_unknown_fields)] pub struct Config { pub builder: BuilderType, pub cache_dir: PathBuf, pub public_addr: SocketAddr, pub bind_address: Option, pub scheduler_url: HTTPUrl, pub scheduler_auth: SchedulerAuth, #[serde(default = "default_toolchain_cache_size")] pub toolchain_cache_size: u64, } pub fn from_path(conf_path: &Path) -> Result> { super::try_read_config_file(conf_path).context("Failed to load server config file") } } #[test] fn test_parse_size() { assert_eq!(None, parse_size("")); assert_eq!(None, parse_size("bogus value")); assert_eq!(Some(100), parse_size("100")); assert_eq!(Some(2048), parse_size("2K")); assert_eq!(Some(10 * 1024 * 1024), parse_size("10M")); assert_eq!(Some(TEN_GIGS), parse_size("10G")); assert_eq!(Some(1024 * TEN_GIGS), parse_size("10T")); } #[test] fn config_overrides() { let env_conf = EnvConfig { cache: CacheConfigs { azure: Some(AzureCacheConfig { connection_string: String::new(), container: String::new(), key_prefix: String::new(), }), disk: Some(DiskCacheConfig { dir: "/env-cache".into(), size: 5, preprocessor_cache_mode: Default::default(), rw_mode: CacheModeConfig::ReadWrite, }), redis: Some(RedisCacheConfig { endpoint: Some("myotherredisurl".to_owned()), ttl: 24 * 3600, key_prefix: "/redis/prefix".into(), db: 10, username: Some("user".to_owned()), password: Some("secret".to_owned()), ..Default::default() }), ..Default::default() }, }; let file_conf = FileConfig { cache: CacheConfigs { disk: Some(DiskCacheConfig { dir: "/file-cache".into(), size: 15, preprocessor_cache_mode: Default::default(), rw_mode: CacheModeConfig::ReadWrite, }), memcached: Some(MemcachedCacheConfig { url: "memurl".to_owned(), expiration: 24 * 3600, key_prefix: String::new(), ..Default::default() }), redis: Some(RedisCacheConfig { url: Some("myredisurl".to_owned()), ttl: 25 * 3600, key_prefix: String::new(), ..Default::default() }), ..Default::default() }, dist: Default::default(), server_startup_timeout_ms: None, }; assert_eq!( Config::from_env_and_file_configs(env_conf, file_conf), Config { cache: Some(CacheType::Redis(RedisCacheConfig { endpoint: Some("myotherredisurl".to_owned()), ttl: 24 * 3600, key_prefix: "/redis/prefix".into(), db: 10, username: Some("user".to_owned()), password: Some("secret".to_owned()), ..Default::default() }),), fallback_cache: DiskCacheConfig { dir: "/env-cache".into(), size: 5, preprocessor_cache_mode: Default::default(), rw_mode: CacheModeConfig::ReadWrite, }, dist: Default::default(), server_startup_timeout: None, } ); } #[test] #[serial] #[cfg(feature = "s3")] fn test_s3_no_credentials_conflict() { env::set_var("SCCACHE_S3_NO_CREDENTIALS", "true"); env::set_var("SCCACHE_BUCKET", "my-bucket"); env::set_var("AWS_ACCESS_KEY_ID", "aws-access-key-id"); env::set_var("AWS_SECRET_ACCESS_KEY", "aws-secret-access-key"); let cfg = config_from_env(); env::remove_var("SCCACHE_S3_NO_CREDENTIALS"); env::remove_var("SCCACHE_BUCKET"); env::remove_var("AWS_ACCESS_KEY_ID"); env::remove_var("AWS_SECRET_ACCESS_KEY"); let error = cfg.unwrap_err(); assert_eq!( "If setting S3 credentials, SCCACHE_S3_NO_CREDENTIALS must not be set.", error.to_string() ); } #[test] #[serial] fn test_s3_no_credentials_invalid() { env::set_var("SCCACHE_S3_NO_CREDENTIALS", "yes"); env::set_var("SCCACHE_BUCKET", "my-bucket"); let cfg = config_from_env(); env::remove_var("SCCACHE_S3_NO_CREDENTIALS"); env::remove_var("SCCACHE_BUCKET"); let error = cfg.unwrap_err(); assert_eq!( "SCCACHE_S3_NO_CREDENTIALS must be 'true', 'on', '1', 'false', 'off' or '0'.", error.to_string() ); } #[test] #[serial] fn test_s3_no_credentials_valid_true() { env::set_var("SCCACHE_S3_NO_CREDENTIALS", "true"); env::set_var("SCCACHE_BUCKET", "my-bucket"); let cfg = config_from_env(); env::remove_var("SCCACHE_S3_NO_CREDENTIALS"); env::remove_var("SCCACHE_BUCKET"); let env_cfg = cfg.unwrap(); match env_cfg.cache.s3 { Some(S3CacheConfig { ref bucket, no_credentials, .. }) => { assert_eq!(bucket, "my-bucket"); assert!(no_credentials); } None => unreachable!(), }; } #[test] #[serial] fn test_s3_no_credentials_valid_false() { env::set_var("SCCACHE_S3_NO_CREDENTIALS", "false"); env::set_var("SCCACHE_BUCKET", "my-bucket"); let cfg = config_from_env(); env::remove_var("SCCACHE_S3_NO_CREDENTIALS"); env::remove_var("SCCACHE_BUCKET"); let env_cfg = cfg.unwrap(); match env_cfg.cache.s3 { Some(S3CacheConfig { ref bucket, no_credentials, .. }) => { assert_eq!(bucket, "my-bucket"); assert!(!no_credentials); } None => unreachable!(), }; } #[test] #[serial] #[cfg(feature = "gcs")] fn test_gcs_service_account() { env::set_var("SCCACHE_GCS_BUCKET", "my-bucket"); env::set_var("SCCACHE_GCS_SERVICE_ACCOUNT", "my@example.com"); env::set_var("SCCACHE_GCS_RW_MODE", "READ_WRITE"); let cfg = config_from_env(); env::remove_var("SCCACHE_GCS_BUCKET"); env::remove_var("SCCACHE_GCS_SERVICE_ACCOUNT"); env::remove_var("SCCACHE_GCS_RW_MODE"); let env_cfg = cfg.unwrap(); match env_cfg.cache.gcs { Some(GCSCacheConfig { ref bucket, service_account, rw_mode, .. }) => { assert_eq!(bucket, "my-bucket"); assert_eq!(service_account, Some("my@example.com".to_string())); assert_eq!(rw_mode, CacheModeConfig::ReadWrite); } None => unreachable!(), }; } #[test] fn full_toml_parse() { const CONFIG_STR: &str = r#" server_startup_timeout_ms = 10000 [dist] # where to find the scheduler scheduler_url = "http://1.2.3.4:10600" # a set of prepackaged toolchains toolchains = [] # the maximum size of the toolchain cache in bytes toolchain_cache_size = 5368709120 cache_dir = "/home/user/.cache/sccache-dist-client" [dist.auth] type = "token" token = "secrettoken" #[cache.azure] # does not work as it appears [cache.disk] dir = "/tmp/.cache/sccache" size = 7516192768 # 7 GiBytes [cache.gcs] rw_mode = "READ_ONLY" # rw_mode = "READ_WRITE" cred_path = "/psst/secret/cred" bucket = "bucket" key_prefix = "prefix" service_account = "example_service_account" [cache.gha] enabled = true version = "sccache" [cache.memcached] # Deprecated alias for `endpoint` # url = "127.0.0.1:11211" endpoint = "tcp://127.0.0.1:11211" # Username and password for authentication username = "user" password = "passwd" expiration = 90000 key_prefix = "/custom/prefix/if/need" [cache.redis] url = "redis://user:passwd@1.2.3.4:6379/?db=1" endpoint = "redis://127.0.0.1:6379" cluster_endpoints = "tcp://10.0.0.1:6379,redis://10.0.0.2:6379" username = "another_user" password = "new_passwd" db = 12 expiration = 86400 key_prefix = "/my/redis/cache" [cache.s3] bucket = "name" region = "us-east-2" endpoint = "s3-us-east-1.amazonaws.com" use_ssl = true key_prefix = "s3prefix" no_credentials = true server_side_encryption = false [cache.webdav] endpoint = "http://127.0.0.1:8080" key_prefix = "webdavprefix" username = "webdavusername" password = "webdavpassword" token = "webdavtoken" [cache.oss] bucket = "name" endpoint = "oss-us-east-1.aliyuncs.com" key_prefix = "ossprefix" no_credentials = true "#; let file_config: FileConfig = toml::from_str(CONFIG_STR).expect("Is valid toml."); assert_eq!( file_config, FileConfig { cache: CacheConfigs { azure: None, // TODO not sure how to represent a unit struct in TOML Some(AzureCacheConfig), disk: Some(DiskCacheConfig { dir: PathBuf::from("/tmp/.cache/sccache"), size: 7 * 1024 * 1024 * 1024, preprocessor_cache_mode: PreprocessorCacheModeConfig::activated(), rw_mode: CacheModeConfig::ReadWrite, }), gcs: Some(GCSCacheConfig { bucket: "bucket".to_owned(), cred_path: Some("/psst/secret/cred".to_string()), service_account: Some("example_service_account".to_string()), rw_mode: CacheModeConfig::ReadOnly, key_prefix: "prefix".into(), credential_url: None, }), gha: Some(GHACacheConfig { enabled: true, version: "sccache".to_string() }), redis: Some(RedisCacheConfig { url: Some("redis://user:passwd@1.2.3.4:6379/?db=1".to_owned()), endpoint: Some("redis://127.0.0.1:6379".to_owned()), cluster_endpoints: Some("tcp://10.0.0.1:6379,redis://10.0.0.2:6379".to_owned()), username: Some("another_user".to_owned()), password: Some("new_passwd".to_owned()), db: 12, ttl: 24 * 3600, key_prefix: "/my/redis/cache".into(), }), memcached: Some(MemcachedCacheConfig { url: "tcp://127.0.0.1:11211".to_owned(), username: Some("user".to_owned()), password: Some("passwd".to_owned()), expiration: 25 * 3600, key_prefix: "/custom/prefix/if/need".into(), }), s3: Some(S3CacheConfig { bucket: "name".to_owned(), region: Some("us-east-2".to_owned()), endpoint: Some("s3-us-east-1.amazonaws.com".to_owned()), use_ssl: Some(true), key_prefix: "s3prefix".into(), no_credentials: true, server_side_encryption: Some(false) }), webdav: Some(WebdavCacheConfig { endpoint: "http://127.0.0.1:8080".to_string(), key_prefix: "webdavprefix".into(), username: Some("webdavusername".to_string()), password: Some("webdavpassword".to_string()), token: Some("webdavtoken".to_string()), }), oss: Some(OSSCacheConfig { bucket: "name".to_owned(), endpoint: Some("oss-us-east-1.aliyuncs.com".to_owned()), key_prefix: "ossprefix".into(), no_credentials: true, }), }, dist: DistConfig { auth: DistAuth::Token { token: "secrettoken".to_owned() }, #[cfg(any(feature = "dist-client", feature = "dist-server"))] scheduler_url: Some( parse_http_url("http://1.2.3.4:10600") .map(|url| { HTTPUrl::from_url(url) }) .expect("Scheduler url must be valid url str") ), #[cfg(not(any(feature = "dist-client", feature = "dist-server")))] scheduler_url: Some("http://1.2.3.4:10600".to_owned()), cache_dir: PathBuf::from("/home/user/.cache/sccache-dist-client"), toolchains: vec![], toolchain_cache_size: 5368709120, rewrite_includes_only: false, }, server_startup_timeout_ms: Some(10000), } ) } #[test] #[cfg(feature = "dist-server")] fn server_toml_parse() { use server::BuilderType; use server::SchedulerAuth; const CONFIG_STR: &str = r#" # This is where client toolchains will be stored. cache_dir = "/tmp/toolchains" # The maximum size of the toolchain cache, in bytes. # If unspecified the default is 10GB. toolchain_cache_size = 10737418240 # A public IP address and port that clients will use to connect to this builder. public_addr = "192.168.1.1:10501" # The socket address the builder will listen on. bind_address = "0.0.0.0:10501" # The URL used to connect to the scheduler (should use https, given an ideal # setup of a HTTPS server in front of the scheduler) scheduler_url = "https://192.168.1.1" [builder] type = "overlay" # The directory under which a sandboxed filesystem will be created for builds. build_dir = "/tmp/build" # The path to the bubblewrap version 0.3.0+ `bwrap` binary. bwrap_path = "/usr/bin/bwrap" [scheduler_auth] type = "jwt_token" # This will be generated by the `generate-jwt-hs256-server-token` command or # provided by an administrator of the sccache cluster. token = "my server's token" "#; let server_config: server::Config = toml::from_str(CONFIG_STR).expect("Is valid toml."); assert_eq!( server_config, server::Config { builder: BuilderType::Overlay { build_dir: PathBuf::from("/tmp/build"), bwrap_path: PathBuf::from("/usr/bin/bwrap"), }, cache_dir: PathBuf::from("/tmp/toolchains"), public_addr: "192.168.1.1:10501" .parse() .expect("Public address must be valid socket address"), bind_address: Some( "0.0.0.0:10501" .parse() .expect("Bind address must be valid socket address") ), scheduler_url: parse_http_url("https://192.168.1.1") .map(|url| { HTTPUrl::from_url(url) }) .expect("Scheduler url must be valid url str"), scheduler_auth: SchedulerAuth::JwtToken { token: "my server's token".to_owned() }, toolchain_cache_size: 10737418240, } ) } mozilla-sccache-40c3d6b/src/dist/000077500000000000000000000000001475712407500167255ustar00rootroot00000000000000mozilla-sccache-40c3d6b/src/dist/cache.rs000066400000000000000000000513201475712407500203370ustar00rootroot00000000000000use crate::dist::Toolchain; use crate::lru_disk_cache::Result as LruResult; use crate::lru_disk_cache::{LruDiskCache, ReadSeek}; use anyhow::{anyhow, Result}; use fs_err as fs; use std::io; use std::path::{Path, PathBuf}; #[cfg(feature = "dist-client")] pub use self::client::ClientToolchains; use crate::util::Digest; use std::io::Read; #[cfg(feature = "dist-client")] mod client { use crate::config; use crate::dist::pkg::ToolchainPackager; use crate::dist::Toolchain; use crate::lru_disk_cache::Error as LruError; use anyhow::{bail, Context, Error, Result}; use fs_err as fs; use std::collections::{HashMap, HashSet}; use std::io::Write; use std::path::{Path, PathBuf}; use std::sync::Mutex; use super::{path_key, TcCache}; #[derive(Clone, Debug)] pub struct CustomToolchain { archive: PathBuf, compiler_executable: String, } // TODO: possibly shouldn't be public pub struct ClientToolchains { cache_dir: PathBuf, cache: Mutex, // Lookup from dist toolchain -> path to custom toolchain archive custom_toolchain_archives: Mutex>, // Lookup from local path -> toolchain details // The Option could be populated on startup, but it's lazy for efficiency custom_toolchain_paths: Mutex)>>, // Toolchains configured to not be distributed disabled_toolchains: HashSet, // Local machine mapping from 'weak' hashes to strong toolchain hashes // - Weak hashes are what sccache uses to determine if a compiler has changed // on the local machine - they're fast and 'good enough' (assuming we trust // the local machine), but not safe if other users can update the cache. // - Strong hashes (or 'archive ids') are the hash of the complete compiler contents that // will be sent over the wire for use in distributed compilation - it is assumed // that if two of them match, the contents of a compiler archive cannot // have been tampered with weak_map: Mutex>, } impl ClientToolchains { pub fn new( cache_dir: &Path, cache_size: u64, toolchain_configs: &[config::DistToolchainConfig], ) -> Result { let cache_dir = cache_dir.to_owned(); fs::create_dir_all(&cache_dir).context(format!( "failed to create top level toolchain cache dir: {}", cache_dir.display() ))?; let toolchain_creation_dir = cache_dir.join("toolchain_tmp"); if toolchain_creation_dir.exists() { fs::remove_dir_all(&toolchain_creation_dir).context(format!( "failed to clean up temporary toolchain creation directory: {}", toolchain_creation_dir.display() ))? } fs::create_dir(&toolchain_creation_dir).context(format!( "failed to create temporary toolchain creation directory: {}", toolchain_creation_dir.display() ))?; let weak_map_path = cache_dir.join("weak_map.json"); if !weak_map_path.exists() { fs::File::create(&weak_map_path) .and_then(|mut f| f.write_all(b"{}")) .context(format!( "failed to create new toolchain weak map file: {}", weak_map_path.display() ))? } let weak_map = fs::File::open(&weak_map_path) .map_err(Error::from) .and_then(|f| serde_json::from_reader(f).map_err(Error::from)) .context(format!( "failed to load toolchain weak map: {}", weak_map_path.display() ))?; let tc_cache_dir = cache_dir.join("tc"); let cache = TcCache::new(&tc_cache_dir, cache_size) .map(Mutex::new) .context("failed to initialise a toolchain cache")?; // Load in toolchain configuration let mut custom_toolchain_paths = HashMap::new(); let mut disabled_toolchains = HashSet::new(); for ct in toolchain_configs.iter() { match ct { config::DistToolchainConfig::PathOverride { compiler_executable, archive, archive_compiler_executable, } => { debug!( "Registering custom toolchain for {}", compiler_executable.display() ); let custom_tc = CustomToolchain { archive: archive.clone(), compiler_executable: archive_compiler_executable.clone(), }; if custom_toolchain_paths .insert(compiler_executable.clone(), (custom_tc, None)) .is_some() { bail!("Multiple toolchains for {}", compiler_executable.display()) } if disabled_toolchains.contains(compiler_executable) { bail!( "Override for toolchain {} conflicts with it being disabled", compiler_executable.display() ) } } config::DistToolchainConfig::NoDist { compiler_executable, } => { debug!("Disabling toolchain {}", compiler_executable.display()); if !disabled_toolchains.insert(compiler_executable.clone()) { bail!( "Disabled toolchain {} multiple times", compiler_executable.display() ) } if custom_toolchain_paths.contains_key(compiler_executable) { bail!( "Override for toolchain {} conflicts with it being disabled", compiler_executable.display() ) } } } } let custom_toolchain_paths = Mutex::new(custom_toolchain_paths); Ok(Self { cache_dir, cache, custom_toolchain_archives: Mutex::new(HashMap::new()), custom_toolchain_paths, disabled_toolchains, // TODO: shouldn't clear on restart, but also should have some // form of pruning weak_map: Mutex::new(weak_map), }) } // Get the bytes of a toolchain tar // TODO: by this point the toolchain should be known to exist pub fn get_toolchain(&self, tc: &Toolchain) -> Result> { // TODO: be more relaxed about path casing and slashes on Windows let file = if let Some(custom_tc_archive) = self.custom_toolchain_archives.lock().unwrap().get(tc) { fs::File::open(custom_tc_archive).with_context(|| { format!( "could not open file for toolchain {}", custom_tc_archive.display() ) })? } else { match self.cache.lock().unwrap().get_file(tc) { Ok(file) => file, Err(LruError::FileNotInCache) => return Ok(None), Err(e) => return Err(e).context("error while retrieving toolchain from cache"), } }; Ok(Some(file)) } // If the toolchain doesn't already exist, create it and insert into the cache pub fn put_toolchain( &self, compiler_path: &Path, weak_key: &str, toolchain_packager: Box, ) -> Result<(Toolchain, Option<(String, PathBuf)>)> { if self.disabled_toolchains.contains(compiler_path) { bail!( "Toolchain distribution for {} is disabled", compiler_path.display() ) } if let Some(tc_and_paths) = self.get_custom_toolchain(compiler_path) { debug!("Using custom toolchain for {:?}", compiler_path); let (tc, compiler_path, archive) = tc_and_paths?; return Ok((tc, Some((compiler_path, archive)))); } // Only permit one toolchain creation at a time. Not an issue if there are multiple attempts // to create the same toolchain, just a waste of time let mut cache = self.cache.lock().unwrap(); if let Some(archive_id) = self.weak_to_strong(weak_key) { debug!("Using cached toolchain {} -> {}", weak_key, archive_id); return Ok((Toolchain { archive_id }, None)); } debug!("Weak key {} appears to be new", weak_key); let tmpfile = tempfile::NamedTempFile::new_in(self.cache_dir.join("toolchain_tmp"))?; toolchain_packager .write_pkg(fs_err::File::from_parts(tmpfile.reopen()?, tmpfile.path())) .context("Could not package toolchain")?; let tc = cache.insert_file(tmpfile.path())?; self.record_weak(weak_key.to_owned(), tc.archive_id.clone())?; Ok((tc, None)) } pub fn get_custom_toolchain( &self, compiler_path: &Path, ) -> Option> { match self .custom_toolchain_paths .lock() .unwrap() .get_mut(compiler_path) { Some((custom_tc, Some(tc))) => Some(Ok(( tc.clone(), custom_tc.compiler_executable.clone(), custom_tc.archive.clone(), ))), Some((custom_tc, maybe_tc @ None)) => { let archive_id = match path_key(&custom_tc.archive) { Ok(archive_id) => archive_id, Err(e) => return Some(Err(e)), }; let tc = Toolchain { archive_id }; *maybe_tc = Some(tc.clone()); // If this entry already exists, someone has two custom toolchains with the same strong hash if let Some(old_path) = self .custom_toolchain_archives .lock() .unwrap() .insert(tc.clone(), custom_tc.archive.clone()) { // Log a warning if the user has identical toolchains at two different locations - it's // not strictly wrong, but it is a bit odd if old_path != custom_tc.archive { warn!( "Detected interchangeable toolchain archives at {} and {}", old_path.display(), custom_tc.archive.display() ) } } Some(Ok(( tc, custom_tc.compiler_executable.clone(), custom_tc.archive.clone(), ))) } None => None, } } fn weak_to_strong(&self, weak_key: &str) -> Option { self.weak_map .lock() .unwrap() .get(weak_key) .map(String::to_owned) } fn record_weak(&self, weak_key: String, key: String) -> Result<()> { let mut weak_map = self.weak_map.lock().unwrap(); weak_map.insert(weak_key, key); let weak_map_path = self.cache_dir.join("weak_map.json"); fs::File::create(weak_map_path) .map_err(Error::from) .and_then(|f| serde_json::to_writer(f, &*weak_map).map_err(Error::from)) .context("failed to enter toolchain in weak map") } } #[cfg(test)] mod test { use crate::config; use crate::test::utils::create_file; use std::io::Write; use super::ClientToolchains; struct PanicToolchainPackager; impl PanicToolchainPackager { fn new() -> Box { Box::new(PanicToolchainPackager) } } #[cfg(all(target_os = "linux", target_arch = "x86_64"))] impl crate::dist::pkg::ToolchainPackager for PanicToolchainPackager { fn write_pkg(self: Box, _f: super::fs::File) -> crate::errors::Result<()> { panic!("should not have called packager") } } #[test] fn test_client_toolchains_custom() { let td = tempfile::Builder::new() .prefix("sccache") .tempdir() .unwrap(); let ct1 = create_file(td.path(), "ct1", |mut f| f.write_all(b"toolchain_contents")).unwrap(); let client_toolchains = ClientToolchains::new( &td.path().join("cache"), 1024, &[config::DistToolchainConfig::PathOverride { compiler_executable: "/my/compiler".into(), archive: ct1.clone(), archive_compiler_executable: "/my/compiler/in_archive".into(), }], ) .unwrap(); let (_tc, newpath) = client_toolchains .put_toolchain( "/my/compiler".as_ref(), "weak_key", PanicToolchainPackager::new(), ) .unwrap(); assert!(newpath.unwrap() == ("/my/compiler/in_archive".to_string(), ct1)); } #[test] fn test_client_toolchains_custom_multiuse_archive() { let td = tempfile::Builder::new() .prefix("sccache") .tempdir() .unwrap(); let ct1 = create_file(td.path(), "ct1", |mut f| f.write_all(b"toolchain_contents")).unwrap(); let client_toolchains = ClientToolchains::new( &td.path().join("cache"), 1024, &[ config::DistToolchainConfig::PathOverride { compiler_executable: "/my/compiler".into(), archive: ct1.clone(), archive_compiler_executable: "/my/compiler/in_archive".into(), }, // Uses the same archive, but a maps a different external compiler to a different archive compiler config::DistToolchainConfig::PathOverride { compiler_executable: "/my/compiler2".into(), archive: ct1.clone(), archive_compiler_executable: "/my/compiler2/in_archive".into(), }, // Uses the same archive, but a maps a different external compiler to the same archive compiler as the first config::DistToolchainConfig::PathOverride { compiler_executable: "/my/compiler3".into(), archive: ct1.clone(), archive_compiler_executable: "/my/compiler/in_archive".into(), }, ], ) .unwrap(); let (_tc, newpath) = client_toolchains .put_toolchain( "/my/compiler".as_ref(), "weak_key", PanicToolchainPackager::new(), ) .unwrap(); assert!(newpath.unwrap() == ("/my/compiler/in_archive".to_string(), ct1.clone())); let (_tc, newpath) = client_toolchains .put_toolchain( "/my/compiler2".as_ref(), "weak_key2", PanicToolchainPackager::new(), ) .unwrap(); assert!(newpath.unwrap() == ("/my/compiler2/in_archive".to_string(), ct1.clone())); let (_tc, newpath) = client_toolchains .put_toolchain( "/my/compiler3".as_ref(), "weak_key2", PanicToolchainPackager::new(), ) .unwrap(); assert!(newpath.unwrap() == ("/my/compiler/in_archive".to_string(), ct1)); } #[test] fn test_client_toolchains_nodist() { let td = tempfile::Builder::new() .prefix("sccache") .tempdir() .unwrap(); let client_toolchains = ClientToolchains::new( &td.path().join("cache"), 1024, &[config::DistToolchainConfig::NoDist { compiler_executable: "/my/compiler".into(), }], ) .unwrap(); assert!(client_toolchains .put_toolchain( "/my/compiler".as_ref(), "weak_key", PanicToolchainPackager::new() ) .is_err()); } #[test] fn test_client_toolchains_custom_nodist_conflict() { let td = tempfile::Builder::new() .prefix("sccache") .tempdir() .unwrap(); let ct1 = create_file(td.path(), "ct1", |mut f| f.write_all(b"toolchain_contents")).unwrap(); let client_toolchains = ClientToolchains::new( &td.path().join("cache"), 1024, &[ config::DistToolchainConfig::PathOverride { compiler_executable: "/my/compiler".into(), archive: ct1, archive_compiler_executable: "/my/compiler".into(), }, config::DistToolchainConfig::NoDist { compiler_executable: "/my/compiler".into(), }, ], ); assert!(client_toolchains.is_err()) } } } pub struct TcCache { inner: LruDiskCache, } impl TcCache { pub fn new(cache_dir: &Path, cache_size: u64) -> Result { trace!("Using TcCache({:?}, {})", cache_dir, cache_size); Ok(TcCache { inner: LruDiskCache::new(cache_dir, cache_size)?, }) } pub fn contains_toolchain(&self, tc: &Toolchain) -> bool { self.inner.contains_key(make_lru_key_path(&tc.archive_id)) } pub fn insert_with io::Result<()>>( &mut self, tc: &Toolchain, with: F, ) -> Result<()> { self.inner .insert_with(make_lru_key_path(&tc.archive_id), with)?; let verified_archive_id = file_key(self.get(tc)?)?; // TODO: remove created toolchain? if verified_archive_id == tc.archive_id { Ok(()) } else { Err(anyhow!("written file does not match expected hash key")) } } pub fn get_file(&mut self, tc: &Toolchain) -> LruResult { self.inner.get_file(make_lru_key_path(&tc.archive_id)) } pub fn get(&mut self, tc: &Toolchain) -> LruResult> { self.inner.get(make_lru_key_path(&tc.archive_id)) } pub fn len(&self) -> usize { self.inner.len() } pub fn is_empty(&self) -> bool { self.len() == 0 } pub fn remove(&mut self, tc: &Toolchain) -> LruResult<()> { self.inner.remove(make_lru_key_path(&tc.archive_id)) } #[cfg(feature = "dist-client")] fn insert_file(&mut self, path: &Path) -> Result { let archive_id = path_key(path)?; self.inner .insert_file(make_lru_key_path(&archive_id), path)?; Ok(Toolchain { archive_id }) } } #[cfg(feature = "dist-client")] fn path_key(path: &Path) -> Result { file_key(fs::File::open(path)?) } fn file_key(rdr: R) -> Result { Digest::reader_sync(rdr) } /// Make a path to the cache entry with key `key`. fn make_lru_key_path(key: &str) -> PathBuf { Path::new(&key[0..1]).join(&key[1..2]).join(key) } mozilla-sccache-40c3d6b/src/dist/client_auth.rs000066400000000000000000000552061475712407500216020ustar00rootroot00000000000000use bytes::Bytes; use futures::channel::oneshot; use http::header::{CONTENT_LENGTH, CONTENT_TYPE}; use http::StatusCode; use http_body_util::Full; use hyper::Response; use serde::Serialize; use std::collections::HashMap; use std::error::Error as StdError; use std::io; use std::net::{SocketAddr, TcpStream, ToSocketAddrs}; use std::sync::mpsc; use std::time::Duration; use tokio::runtime::Runtime; use url::Url; use uuid::Uuid; use crate::errors::*; // These (arbitrary) ports need to be registered as valid redirect urls in the oauth provider you're using pub const VALID_PORTS: &[u16] = &[12731, 32492, 56909]; // If token is valid for under this amount of time, print a warning const MIN_TOKEN_VALIDITY: Duration = Duration::from_secs(2 * 24 * 60 * 60); const MIN_TOKEN_VALIDITY_WARNING: &str = "two days"; fn query_pairs(url: &str) -> Result> { // Url::parse operates on absolute URLs, so ensure there's a prefix let url = Url::parse("http://unused_base") .expect("Failed to parse fake url prefix") .join(url) .context("Failed to parse url while extracting query params")?; Ok(url .query_pairs() .map(|(k, v)| (k.into_owned(), v.into_owned())) .collect()) } fn html_response(body: &'static str) -> Response> { Response::builder() .header(CONTENT_TYPE, mime::TEXT_HTML.to_string()) .header(CONTENT_LENGTH, body.len()) .body(body.into()) .unwrap() } fn json_response(data: &T) -> Result>> { let body = serde_json::to_vec(data).context("Failed to serialize to JSON")?; let len = body.len(); Ok(Response::builder() .header(CONTENT_TYPE, mime::APPLICATION_JSON.to_string()) .header(CONTENT_LENGTH, len) .body(body.into()) .unwrap()) } const REDIRECT_WITH_AUTH_JSON: &str = r##" "##; mod code_grant_pkce { use super::{ html_response, json_response, query_pairs, MIN_TOKEN_VALIDITY, MIN_TOKEN_VALIDITY_WARNING, REDIRECT_WITH_AUTH_JSON, }; use crate::util::new_reqwest_blocking_client; use crate::util::BASE64_URL_SAFE_ENGINE; use base64::Engine; use bytes::Bytes; use futures::channel::oneshot; use http_body_util::Full; use hyper::{Method, Request, Response, StatusCode}; use rand::{rngs::OsRng, RngCore}; use serde::{Deserialize, Serialize}; use sha2::{Digest, Sha256}; use std::collections::HashMap; use std::sync::mpsc; use std::sync::Mutex; use std::time::{Duration, Instant}; use url::Url; use crate::errors::*; // Code request - https://tools.ietf.org/html/rfc7636#section-4.3 const CLIENT_ID_PARAM: &str = "client_id"; const CODE_CHALLENGE_PARAM: &str = "code_challenge"; const CODE_CHALLENGE_METHOD_PARAM: &str = "code_challenge_method"; const CODE_CHALLENGE_METHOD_VALUE: &str = "S256"; const REDIRECT_PARAM: &str = "redirect_uri"; const RESPONSE_TYPE_PARAM: &str = "response_type"; const RESPONSE_TYPE_PARAM_VALUE: &str = "code"; const STATE_PARAM: &str = "state"; // Code response - https://tools.ietf.org/html/rfc6749#section-4.1.2 const CODE_RESULT_PARAM: &str = "code"; const STATE_RESULT_PARAM: &str = "state"; // Token request - https://tools.ietf.org/html/rfc7636#section-4.5 #[derive(Serialize)] struct TokenRequest<'a> { client_id: &'a str, code_verifier: &'a str, code: &'a str, grant_type: &'a str, redirect_uri: &'a str, } const GRANT_TYPE_PARAM_VALUE: &str = "authorization_code"; // Token response - https://tools.ietf.org/html/rfc6749#section-5.1 #[derive(Deserialize)] struct TokenResponse { access_token: String, token_type: String, expires_in: u64, // Technically not required by the spec } const TOKEN_TYPE_RESULT_PARAM_VALUE: &str = "bearer"; // case-insensitive const NUM_CODE_VERIFIER_BYTES: usize = 256 / 8; pub struct State { pub auth_url: String, pub auth_state_value: String, pub code_tx: mpsc::SyncSender, pub shutdown_tx: Option>, } pub static STATE: Mutex> = Mutex::new(None); pub fn generate_verifier_and_challenge() -> Result<(String, String)> { let mut code_verifier_bytes = vec![0; NUM_CODE_VERIFIER_BYTES]; OsRng.fill_bytes(&mut code_verifier_bytes); let code_verifier = BASE64_URL_SAFE_ENGINE.encode(&code_verifier_bytes); let mut hasher = Sha256::new(); hasher.update(&code_verifier); let code_challenge = BASE64_URL_SAFE_ENGINE.encode(hasher.finalize()); Ok((code_verifier, code_challenge)) } pub fn finish_url( client_id: &str, url: &mut Url, redirect_uri: &str, state: &str, code_challenge: &str, ) { url.query_pairs_mut() .append_pair(CLIENT_ID_PARAM, client_id) .append_pair(CODE_CHALLENGE_PARAM, code_challenge) .append_pair(CODE_CHALLENGE_METHOD_PARAM, CODE_CHALLENGE_METHOD_VALUE) .append_pair(REDIRECT_PARAM, redirect_uri) .append_pair(RESPONSE_TYPE_PARAM, RESPONSE_TYPE_PARAM_VALUE) .append_pair(STATE_PARAM, state); } fn handle_code_response(params: HashMap) -> Result<(String, String)> { let code = params .get(CODE_RESULT_PARAM) .context("No code found in response")?; let state = params .get(STATE_RESULT_PARAM) .context("No state found in response")?; Ok((code.to_owned(), state.to_owned())) } fn handle_token_response(res: TokenResponse) -> Result<(String, Instant)> { let token = res.access_token; if res.token_type.to_lowercase() != TOKEN_TYPE_RESULT_PARAM_VALUE { bail!( "Token type in response is not {}", TOKEN_TYPE_RESULT_PARAM_VALUE ) } // Calculate ASAP the actual time at which the token will expire let expires_at = Instant::now() + Duration::from_secs(res.expires_in); Ok((token, expires_at)) } const SUCCESS_AFTER_REDIRECT: &str = r##" In-browser step of authentication complete, you can now close this page! "##; pub fn serve(req: Request) -> Result>> { let mut state = STATE.lock().unwrap(); let state = state.as_mut().unwrap(); debug!("Handling {} {}", req.method(), req.uri()); let response = match (req.method(), req.uri().path()) { (&Method::GET, "/") => html_response(REDIRECT_WITH_AUTH_JSON), (&Method::GET, "/auth_detail.json") => json_response(&state.auth_url)?, (&Method::GET, "/redirect") => { let query_pairs = query_pairs(&req.uri().to_string())?; let (code, auth_state) = handle_code_response(query_pairs) .context("Failed to handle response from redirect")?; if auth_state != state.auth_state_value { return Err(anyhow!("Mismatched auth states after redirect")); } // Deliberately in reverse order for a 'happens-before' relationship state.code_tx.send(code).unwrap(); state.shutdown_tx.take().unwrap().send(()).unwrap(); html_response(SUCCESS_AFTER_REDIRECT) } _ => { warn!("Route not found"); Response::builder() .status(StatusCode::NOT_FOUND) .body("".into())? } }; Ok(response) } pub fn code_to_token( token_url: &str, client_id: &str, code_verifier: &str, code: &str, redirect_uri: &str, ) -> Result { let token_request = TokenRequest { client_id, code_verifier, code, grant_type: GRANT_TYPE_PARAM_VALUE, redirect_uri, }; let client = new_reqwest_blocking_client(); let res = client.post(token_url).json(&token_request).send()?; if !res.status().is_success() { bail!( "Sending code to {} failed, HTTP error: {}", token_url, res.status() ) } let (token, expires_at) = handle_token_response( res.json() .context("Failed to parse token response as JSON")?, )?; if expires_at - Instant::now() < MIN_TOKEN_VALIDITY { warn!( "Token retrieved expires in under {}", MIN_TOKEN_VALIDITY_WARNING ); eprintln!( "sccache: Token retrieved expires in under {}", MIN_TOKEN_VALIDITY_WARNING ); } Ok(token) } } mod implicit { use super::{ html_response, json_response, query_pairs, MIN_TOKEN_VALIDITY, MIN_TOKEN_VALIDITY_WARNING, REDIRECT_WITH_AUTH_JSON, }; use bytes::Bytes; use futures::channel::oneshot; use http_body_util::Full; use hyper::{Method, Request, Response, StatusCode}; use std::collections::HashMap; use std::sync::mpsc; use std::sync::Mutex; use std::time::{Duration, Instant}; use url::Url; use crate::errors::*; // Request - https://tools.ietf.org/html/rfc6749#section-4.2.1 const CLIENT_ID_PARAM: &str = "client_id"; const REDIRECT_PARAM: &str = "redirect_uri"; const RESPONSE_TYPE_PARAM: &str = "response_type"; const RESPONSE_TYPE_PARAM_VALUE: &str = "token"; const STATE_PARAM: &str = "state"; // Response - https://tools.ietf.org/html/rfc6749#section-4.2.2 const TOKEN_RESULT_PARAM: &str = "access_token"; const TOKEN_TYPE_RESULT_PARAM: &str = "token_type"; const TOKEN_TYPE_RESULT_PARAM_VALUE: &str = "bearer"; // case-insensitive const EXPIRES_IN_RESULT_PARAM: &str = "expires_in"; // Technically not required by the spec const STATE_RESULT_PARAM: &str = "state"; pub struct State { pub auth_url: String, pub auth_state_value: String, pub token_tx: mpsc::SyncSender, pub shutdown_tx: Option>, } pub static STATE: Mutex> = Mutex::new(None); pub fn finish_url(client_id: &str, url: &mut Url, redirect_uri: &str, state: &str) { url.query_pairs_mut() .append_pair(CLIENT_ID_PARAM, client_id) .append_pair(REDIRECT_PARAM, redirect_uri) .append_pair(RESPONSE_TYPE_PARAM, RESPONSE_TYPE_PARAM_VALUE) .append_pair(STATE_PARAM, state); } fn handle_response(params: HashMap) -> Result<(String, Instant, String)> { let token = params .get(TOKEN_RESULT_PARAM) .context("No token found in response")?; let bearer = params .get(TOKEN_TYPE_RESULT_PARAM) .context("No token type found in response")?; if bearer.to_lowercase() != TOKEN_TYPE_RESULT_PARAM_VALUE { bail!( "Token type in response is not {}", TOKEN_TYPE_RESULT_PARAM_VALUE ) } let expires_in = params .get(EXPIRES_IN_RESULT_PARAM) .context("No expiry found in response")?; // Calculate ASAP the actual time at which the token will expire let expires_at = Instant::now() + Duration::from_secs( expires_in .parse() .map_err(|_| anyhow!("Failed to parse expiry as integer"))?, ); let state = params .get(STATE_RESULT_PARAM) .context("No state found in response")?; Ok((token.to_owned(), expires_at, state.to_owned())) } const SAVE_AUTH_AFTER_REDIRECT: &str = r##" "##; pub fn serve(req: Request) -> Result>> { let mut state = STATE.lock().unwrap(); let state = state.as_mut().unwrap(); debug!("Handling {} {}", req.method(), req.uri()); let response = match (req.method(), req.uri().path()) { (&Method::GET, "/") => html_response(REDIRECT_WITH_AUTH_JSON), (&Method::GET, "/auth_detail.json") => json_response(&state.auth_url)?, (&Method::GET, "/redirect") => html_response(SAVE_AUTH_AFTER_REDIRECT), (&Method::POST, "/save_auth") => { let query_pairs = query_pairs(&req.uri().to_string())?; let (token, expires_at, auth_state) = handle_response(query_pairs).context("Failed to save auth after redirect")?; if auth_state != state.auth_state_value { return Err(anyhow!("Mismatched auth states after redirect")); } if expires_at - Instant::now() < MIN_TOKEN_VALIDITY { warn!( "Token retrieved expires in under {}", MIN_TOKEN_VALIDITY_WARNING ); eprintln!( "sccache: Token retrieved expires in under {}", MIN_TOKEN_VALIDITY_WARNING ); } // Deliberately in reverse order for a 'happens-before' relationship state.token_tx.send(token).unwrap(); state.shutdown_tx.take().unwrap().send(()).unwrap(); json_response(&"")? } _ => { warn!("Route not found"); Response::builder() .status(StatusCode::NOT_FOUND) .body("".into()) .unwrap() } }; Ok(response) } } use tokio::net::TcpListener; struct HyperBuilderWrap { listener: TcpListener, } impl HyperBuilderWrap { pub async fn try_bind(addr: SocketAddr) -> io::Result { let listener = TcpListener::bind(addr).await?; Ok(HyperBuilderWrap { listener }) } // Typing out a hyper service is a major pain, so let's focus on our simple // `fn(Request) -> Response` handler functions; to reduce repetition // we create a relevant service using hyper's own helper factory functions. async fn serve(&mut self, sfn: F) -> io::Result<()> where F: Fn(hyper::Request) -> anyhow::Result>> + Send + 'static + Copy + Sync, { use hyper::server::conn::http1; use hyper_util::rt::tokio::TokioIo; loop { let (tcp, _) = self.listener.accept().await?; let io = TokioIo::new(tcp); tokio::task::spawn(async move { let conn = http1::Builder::new().serve_connection( io, hyper::service::service_fn(|req| async move { let uri = req.uri().clone(); sfn(req).or_else(|e| error_code_response(uri, e)) }), ); tokio::pin!(conn); conn.await.unwrap(); }); } } pub fn local_addr(&self) -> SocketAddr { self.listener.local_addr().unwrap() } } #[allow(clippy::unnecessary_wraps)] fn error_code_response(uri: hyper::Uri, e: E) -> hyper::Result>> where E: std::fmt::Debug, { let body = format!("{:?}", e); eprintln!( "sccache: Error during a request to {} on the client auth web server\n{}", uri, body ); let len = body.len(); let builder = Response::builder().status(StatusCode::INTERNAL_SERVER_ERROR); let res = builder .header(CONTENT_TYPE, mime::TEXT_PLAIN.to_string()) .header(CONTENT_LENGTH, len) .body(body.into()) .unwrap(); Ok::>, hyper::Error>(res) } /// Try to bind a TCP stream to any of the available port out of [`VALID_PORTS`]. async fn try_bind() -> Result { // Try all the valid ports for &port in VALID_PORTS { let mut addrs = ("localhost", port) .to_socket_addrs() .expect("Failed to interpret localhost address to listen on"); let addr = addrs .next() .expect("Expected at least one address in parsed socket address"); // Hyper binds with reuseaddr and reuseport so binding won't fail as you'd expect on Linux match TcpStream::connect(addr) { // Already open Ok(_) => continue, // Doesn't seem to be open Err(ref e) if e.kind() == io::ErrorKind::ConnectionRefused => (), Err(e) => { return Err(e) .with_context(|| format!("Failed to check {} is available for binding", addr)) } } match HyperBuilderWrap::try_bind(addr).await { Ok(s) => return Ok(s), Err(ref err) if err .source() .and_then(|err| err.downcast_ref::()) .map(|err| err.kind() == io::ErrorKind::AddrInUse) .unwrap_or(false) => { continue } Err(e) => return Err(e).with_context(|| format!("Failed to bind to {}", addr)), } } bail!("Could not bind to any valid port: ({:?})", VALID_PORTS) } // https://auth0.com/docs/api-auth/tutorials/authorization-code-grant-pkce pub fn get_token_oauth2_code_grant_pkce( client_id: &str, mut auth_url: Url, token_url: &str, ) -> Result { let runtime = Runtime::new()?; let mut server = runtime.block_on(async move { try_bind().await })?; let port = server.local_addr().port(); let _guard = runtime.enter(); let handle = runtime.spawn(async move { server.serve(code_grant_pkce::serve).await.unwrap(); }); let redirect_uri = format!("http://localhost:{}/redirect", port); let auth_state_value = Uuid::new_v4().as_simple().to_string(); let (verifier, challenge) = code_grant_pkce::generate_verifier_and_challenge()?; code_grant_pkce::finish_url( client_id, &mut auth_url, &redirect_uri, &auth_state_value, &challenge, ); info!("Listening on http://localhost:{} with 1 thread.", port); println!( "sccache: Please visit http://localhost:{} in your browser", port ); let (shutdown_tx, shutdown_rx) = oneshot::channel(); let (code_tx, code_rx) = mpsc::sync_channel(1); let state = code_grant_pkce::State { auth_url: auth_url.to_string(), auth_state_value, code_tx, shutdown_tx: Some(shutdown_tx), }; *code_grant_pkce::STATE.lock().unwrap() = Some(state); runtime.block_on(async move { if let Err(e) = shutdown_rx.await { warn!( "Something went wrong while waiting for auth server shutdown: {}", e ) } }); handle.abort(); info!("Server finished, using code to request token"); let code = code_rx .try_recv() .expect("Hyper shutdown but code not available - internal error"); code_grant_pkce::code_to_token(token_url, client_id, &verifier, &code, &redirect_uri) .context("Failed to convert oauth2 code into a token") } // https://auth0.com/docs/api-auth/tutorials/implicit-grant pub fn get_token_oauth2_implicit(client_id: &str, mut auth_url: Url) -> Result { let runtime = Runtime::new()?; let mut server = runtime.block_on(async move { try_bind().await })?; let port = server.local_addr().port(); let _guard = runtime.enter(); let handle = runtime.spawn(async move { server.serve(implicit::serve).await.unwrap(); }); let redirect_uri = format!("http://localhost:{}/redirect", port); let auth_state_value = Uuid::new_v4().as_simple().to_string(); implicit::finish_url(client_id, &mut auth_url, &redirect_uri, &auth_state_value); info!("Listening on http://localhost:{} with 1 thread.", port); println!( "sccache: Please visit http://localhost:{} in your browser", port ); let (shutdown_tx, shutdown_rx) = oneshot::channel(); let (token_tx, token_rx) = mpsc::sync_channel(1); let state = implicit::State { auth_url: auth_url.to_string(), auth_state_value, token_tx, shutdown_tx: Some(shutdown_tx), }; *implicit::STATE.lock().unwrap() = Some(state); runtime.block_on(async move { if let Err(e) = shutdown_rx.await { warn!( "Something went wrong while waiting for auth server shutdown: {}", e ) } }); handle.abort(); info!("Server finished, returning token"); Ok(token_rx .try_recv() .expect("Hyper shutdown but token not available - internal error")) } mozilla-sccache-40c3d6b/src/dist/http.rs000066400000000000000000001523461475712407500202650ustar00rootroot00000000000000// Copyright 2016 Mozilla Foundation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #[cfg(feature = "dist-client")] pub use self::client::Client; #[cfg(feature = "dist-server")] pub use self::server::Server; #[cfg(feature = "dist-server")] pub use self::server::{ ClientAuthCheck, ClientVisibleMsg, Scheduler, ServerAuthCheck, HEARTBEAT_TIMEOUT, }; mod common { use reqwest::header; use serde::{Deserialize, Serialize}; #[cfg(feature = "dist-server")] use std::collections::HashMap; use std::fmt; use crate::dist; use crate::errors::*; // Note that content-length is necessary due to https://github.com/tiny-http/tiny-http/issues/147 pub trait ReqwestRequestBuilderExt: Sized { fn bincode(self, bincode: &T) -> Result; fn bytes(self, bytes: Vec) -> Self; } impl ReqwestRequestBuilderExt for reqwest::blocking::RequestBuilder { fn bincode(self, bincode: &T) -> Result { let bytes = bincode::serialize(bincode).context("Failed to serialize body to bincode")?; Ok(self.bytes(bytes)) } fn bytes(self, bytes: Vec) -> Self { self.header( header::CONTENT_TYPE, mime::APPLICATION_OCTET_STREAM.to_string(), ) .header(header::CONTENT_LENGTH, bytes.len()) .body(bytes) } } impl ReqwestRequestBuilderExt for reqwest::RequestBuilder { fn bincode(self, bincode: &T) -> Result { let bytes = bincode::serialize(bincode).context("Failed to serialize body to bincode")?; Ok(self.bytes(bytes)) } fn bytes(self, bytes: Vec) -> Self { self.header( header::CONTENT_TYPE, mime::APPLICATION_OCTET_STREAM.to_string(), ) .header(header::CONTENT_LENGTH, bytes.len()) .body(bytes) } } #[cfg(feature = "dist-client")] pub async fn bincode_req_fut( req: reqwest::RequestBuilder, ) -> Result { // Work around tiny_http issue #151 by disabling HTTP pipeline with // `Connection: close`. let res = req.header(header::CONNECTION, "close").send().await?; let status = res.status(); let bytes = res.bytes().await?; if !status.is_success() { let errmsg = format!( "Error {}: {}", status.as_u16(), String::from_utf8_lossy(&bytes) ); if status.is_client_error() { anyhow::bail!(HttpClientError(errmsg)); } else { anyhow::bail!(errmsg); } } else { Ok(bincode::deserialize(&bytes)?) } } #[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)] #[serde(deny_unknown_fields)] pub struct JobJwt { pub exp: u64, pub job_id: dist::JobId, } #[derive(Clone, Debug, Serialize, Deserialize)] #[serde(deny_unknown_fields)] pub enum AllocJobHttpResponse { Success { job_alloc: dist::JobAlloc, need_toolchain: bool, cert_digest: Vec, }, Fail { msg: String, }, } impl AllocJobHttpResponse { #[cfg(feature = "dist-server")] pub fn from_alloc_job_result( res: dist::AllocJobResult, certs: &HashMap, Vec)>, ) -> Self { match res { dist::AllocJobResult::Success { job_alloc, need_toolchain, } => { if let Some((digest, _)) = certs.get(&job_alloc.server_id) { AllocJobHttpResponse::Success { job_alloc, need_toolchain, cert_digest: digest.to_owned(), } } else { AllocJobHttpResponse::Fail { msg: format!( "missing certificates for server {}", job_alloc.server_id.addr() ), } } } dist::AllocJobResult::Fail { msg } => AllocJobHttpResponse::Fail { msg }, } } } #[derive(Clone, Debug, Serialize, Deserialize)] #[serde(deny_unknown_fields)] pub struct ServerCertificateHttpResponse { pub cert_digest: Vec, pub cert_pem: Vec, } #[derive(Clone, Serialize, Deserialize)] #[serde(deny_unknown_fields)] pub struct HeartbeatServerHttpRequest { pub jwt_key: Vec, pub num_cpus: usize, pub server_nonce: dist::ServerNonce, pub cert_digest: Vec, pub cert_pem: Vec, } // cert_pem is quite long so elide it (you can retrieve it by hitting the server url anyway) impl fmt::Debug for HeartbeatServerHttpRequest { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let HeartbeatServerHttpRequest { jwt_key, num_cpus, server_nonce, cert_digest, cert_pem, } = self; write!(f, "HeartbeatServerHttpRequest {{ jwt_key: {:?}, num_cpus: {:?}, server_nonce: {:?}, cert_digest: {:?}, cert_pem: [...{} bytes...] }}", jwt_key, num_cpus, server_nonce, cert_digest, cert_pem.len()) } } #[derive(Clone, Debug, Serialize, Deserialize)] #[serde(deny_unknown_fields)] pub struct RunJobHttpRequest { pub command: dist::CompileCommand, pub outputs: Vec, } } pub mod urls { use crate::dist::{JobId, ServerId}; pub fn scheduler_alloc_job(scheduler_url: &reqwest::Url) -> reqwest::Url { scheduler_url .join("/api/v1/scheduler/alloc_job") .expect("failed to create alloc job url") } pub fn scheduler_server_certificate( scheduler_url: &reqwest::Url, server_id: ServerId, ) -> reqwest::Url { scheduler_url .join(&format!( "/api/v1/scheduler/server_certificate/{}", server_id.addr() )) .expect("failed to create server certificate url") } pub fn scheduler_heartbeat_server(scheduler_url: &reqwest::Url) -> reqwest::Url { scheduler_url .join("/api/v1/scheduler/heartbeat_server") .expect("failed to create heartbeat url") } pub fn scheduler_job_state(scheduler_url: &reqwest::Url, job_id: JobId) -> reqwest::Url { scheduler_url .join(&format!("/api/v1/scheduler/job_state/{}", job_id)) .expect("failed to create job state url") } pub fn scheduler_status(scheduler_url: &reqwest::Url) -> reqwest::Url { scheduler_url .join("/api/v1/scheduler/status") .expect("failed to create alloc job url") } pub fn server_assign_job(server_id: ServerId, job_id: JobId) -> reqwest::Url { let url = format!( "https://{}/api/v1/distserver/assign_job/{}", server_id.addr(), job_id ); reqwest::Url::parse(&url).expect("failed to create assign job url") } pub fn server_submit_toolchain(server_id: ServerId, job_id: JobId) -> reqwest::Url { let url = format!( "https://{}/api/v1/distserver/submit_toolchain/{}", server_id.addr(), job_id ); reqwest::Url::parse(&url).expect("failed to create submit toolchain url") } pub fn server_run_job(server_id: ServerId, job_id: JobId) -> reqwest::Url { let url = format!( "https://{}/api/v1/distserver/run_job/{}", server_id.addr(), job_id ); reqwest::Url::parse(&url).expect("failed to create run job url") } } #[cfg(feature = "dist-server")] mod server { use crate::util::new_reqwest_blocking_client; use byteorder::{BigEndian, ReadBytesExt}; use flate2::read::ZlibDecoder as ZlibReadDecoder; use once_cell::sync::Lazy; use rand::{rngs::OsRng, RngCore}; use rouille::accept; use serde::Serialize; use std::collections::HashMap; use std::convert::Infallible; use std::io::Read; use std::net::SocketAddr; use std::result::Result as StdResult; use std::sync::atomic; use std::sync::Mutex; use std::thread; use std::time::Duration; use super::common::{ AllocJobHttpResponse, HeartbeatServerHttpRequest, JobJwt, ReqwestRequestBuilderExt, RunJobHttpRequest, ServerCertificateHttpResponse, }; use super::urls; use crate::dist::{ self, AllocJobResult, AssignJobResult, HeartbeatServerResult, InputsReader, JobAuthorizer, JobId, JobState, RunJobResult, SchedulerStatusResult, ServerId, ServerNonce, SubmitToolchainResult, Toolchain, ToolchainReader, UpdateJobStateResult, }; use crate::errors::*; const HEARTBEAT_INTERVAL: Duration = Duration::from_secs(30); const HEARTBEAT_ERROR_INTERVAL: Duration = Duration::from_secs(10); pub const HEARTBEAT_TIMEOUT: Duration = Duration::from_secs(90); pub fn bincode_req( req: reqwest::blocking::RequestBuilder, ) -> Result { // Work around tiny_http issue #151 by disabling HTTP pipeline with // `Connection: close`. let mut res = req.header(reqwest::header::CONNECTION, "close").send()?; let status = res.status(); let mut body = vec![]; res.copy_to(&mut body) .context("error reading response body")?; if !status.is_success() { Err(anyhow!( "Error {} (Headers={:?}): {}", status.as_u16(), res.headers(), String::from_utf8_lossy(&body) )) } else { bincode::deserialize(&body).map_err(Into::into) } } fn create_https_cert_and_privkey(addr: SocketAddr) -> Result<(Vec, Vec, Vec)> { let rsa_key = openssl::rsa::Rsa::::generate(2048) .context("failed to generate rsa privkey")?; let privkey_pem = rsa_key .private_key_to_pem() .context("failed to create pem from rsa privkey")?; let privkey: openssl::pkey::PKey = openssl::pkey::PKey::from_rsa(rsa_key) .context("failed to create openssl pkey from rsa privkey")?; let mut builder = openssl::x509::X509::builder().context("failed to create x509 builder")?; // Populate the certificate with the necessary parts, mostly from mkcert in openssl builder .set_version(2) .context("failed to set x509 version")?; let serial_number = openssl::bn::BigNum::from_u32(0) .and_then(|bn| bn.to_asn1_integer()) .context("failed to create openssl asn1 0")?; builder .set_serial_number(serial_number.as_ref()) .context("failed to set x509 serial number")?; let not_before = openssl::asn1::Asn1Time::days_from_now(0) .context("failed to create openssl not before asn1")?; builder .set_not_before(not_before.as_ref()) .context("failed to set not before on x509")?; let not_after = openssl::asn1::Asn1Time::days_from_now(365) .context("failed to create openssl not after asn1")?; builder .set_not_after(not_after.as_ref()) .context("failed to set not after on x509")?; builder .set_pubkey(privkey.as_ref()) .context("failed to set pubkey for x509")?; let mut name = openssl::x509::X509Name::builder()?; name.append_entry_by_nid(openssl::nid::Nid::COMMONNAME, &addr.to_string())?; let name = name.build(); builder .set_subject_name(&name) .context("failed to set subject name")?; builder .set_issuer_name(&name) .context("failed to set issuer name")?; // Add the SubjectAlternativeName let extension = openssl::x509::extension::SubjectAlternativeName::new() .ip(&addr.ip().to_string()) .build(&builder.x509v3_context(None, None)) .context("failed to build SAN extension for x509")?; builder .append_extension(extension) .context("failed to append SAN extension for x509")?; // Add ExtendedKeyUsage let ext_key_usage = openssl::x509::extension::ExtendedKeyUsage::new() .server_auth() .build() .context("failed to build EKU extension for x509")?; builder .append_extension(ext_key_usage) .context("fails to append EKU extension for x509")?; // Finish the certificate builder .sign(&privkey, openssl::hash::MessageDigest::sha1()) .context("failed to sign x509 with sha1")?; let cert: openssl::x509::X509 = builder.build(); let cert_pem = cert.to_pem().context("failed to create pem from x509")?; let cert_digest = cert .digest(openssl::hash::MessageDigest::sha256()) .context("failed to create digest of x509 certificate")? .as_ref() .to_owned(); Ok((cert_digest, cert_pem, privkey_pem)) } // Messages that are non-sensitive and can be sent to the client #[derive(Debug)] pub struct ClientVisibleMsg(String); impl ClientVisibleMsg { pub fn from_nonsensitive(s: String) -> Self { ClientVisibleMsg(s) } } pub trait ClientAuthCheck: Send + Sync { fn check(&self, token: &str) -> StdResult<(), ClientVisibleMsg>; } pub type ServerAuthCheck = Box Option + Send + Sync>; const JWT_KEY_LENGTH: usize = 256 / 8; static JWT_HEADER: Lazy = Lazy::new(|| jwt::Header::new(jwt::Algorithm::HS256)); static JWT_VALIDATION: Lazy = Lazy::new(|| { let mut validation = jwt::Validation::new(jwt::Algorithm::HS256); validation.leeway = 0; validation.validate_exp = false; validation.validate_nbf = false; validation }); // Based on rouille::input::json::json_input #[derive(Debug)] pub enum RouilleBincodeError { BodyAlreadyExtracted, WrongContentType, ParseError(bincode::Error), } impl From for RouilleBincodeError { fn from(err: bincode::Error) -> RouilleBincodeError { RouilleBincodeError::ParseError(err) } } impl std::error::Error for RouilleBincodeError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match *self { RouilleBincodeError::ParseError(ref e) => Some(e), _ => None, } } } impl std::fmt::Display for RouilleBincodeError { fn fmt( &self, fmt: &mut std::fmt::Formatter<'_>, ) -> std::result::Result<(), std::fmt::Error> { write!( fmt, "{}", match *self { RouilleBincodeError::BodyAlreadyExtracted => { "the body of the request was already extracted" } RouilleBincodeError::WrongContentType => { "the request didn't have a binary content type" } RouilleBincodeError::ParseError(_) => "error while parsing the bincode body", } ) } } fn bincode_input(request: &rouille::Request) -> std::result::Result where O: serde::de::DeserializeOwned, { if let Some(header) = request.header("Content-Type") { if !header.starts_with("application/octet-stream") { return Err(RouilleBincodeError::WrongContentType); } } else { return Err(RouilleBincodeError::WrongContentType); } if let Some(mut b) = request.data() { bincode::deserialize_from::<_, O>(&mut b).map_err(From::from) } else { Err(RouilleBincodeError::BodyAlreadyExtracted) } } // Based on try_or_400 in rouille, but with logging #[derive(Serialize)] pub struct ErrJson { description: String, cause: Option>, } impl ErrJson { fn from_err(err: &E) -> ErrJson { let cause = err.source().map(ErrJson::from_err).map(Box::new); ErrJson { description: err.to_string(), cause, } } fn into_data(self) -> String { serde_json::to_string(&self).expect("infallible serialization for ErrJson failed") } } macro_rules! try_or_err_and_log { ($reqid:expr, $code:expr, $result:expr) => { match $result { Ok(r) => r, Err(err) => { // TODO: would ideally just use error_chain #[allow(unused_imports)] use std::error::Error; let mut err_msg = err.to_string(); let mut maybe_cause = err.source(); while let Some(cause) = maybe_cause { err_msg.push_str(", caused by: "); err_msg.push_str(&cause.to_string()); maybe_cause = cause.source(); } warn!("Res {} error: {}", $reqid, err_msg); let err: Box = err.into(); let json = ErrJson::from_err(&*err); return rouille::Response::json(&json).with_status_code($code); } } }; } macro_rules! try_or_400_log { ($reqid:expr, $result:expr) => { try_or_err_and_log!($reqid, 400, $result) }; } macro_rules! try_or_500_log { ($reqid:expr, $result:expr) => { try_or_err_and_log!($reqid, 500, $result) }; } fn make_401_with_body(short_err: &str, body: ClientVisibleMsg) -> rouille::Response { rouille::Response { status_code: 401, headers: vec![( "WWW-Authenticate".into(), format!("Bearer error=\"{}\"", short_err).into(), )], data: rouille::ResponseBody::from_data(body.0), upgrade: None, } } fn make_401(short_err: &str) -> rouille::Response { make_401_with_body(short_err, ClientVisibleMsg(String::new())) } fn bearer_http_auth(request: &rouille::Request) -> Option<&str> { let header = request.header("Authorization")?; let mut split = header.splitn(2, |c| c == ' '); let authtype = split.next()?; if authtype != "Bearer" { return None; } split.next() } /// Return `content` as a bincode-encoded `Response`. pub fn bincode_response(content: &T) -> rouille::Response where T: serde::Serialize, { let data = bincode::serialize(content).context("Failed to serialize response body"); let data = try_or_500_log!("bincode body serialization", data); rouille::Response { status_code: 200, headers: vec![ ("Content-Type".into(), "application/octet-stream".into()), ("Content-Length".into(), data.len().to_string().into()), ], data: rouille::ResponseBody::from_data(data), upgrade: None, } } /// Return `content` as either a bincode or json encoded `Response` /// depending on the Accept header in `request`. pub fn prepare_response(request: &rouille::Request, content: &T) -> rouille::Response where T: serde::Serialize, { accept!(request, "application/octet-stream" => bincode_response(content), "application/json" => rouille::Response::json(content), ) } // Verification of job auth in a request macro_rules! job_auth_or_401 { ($request:ident, $job_authorizer:expr, $job_id:expr) => {{ let verify_result = match bearer_http_auth($request) { Some(token) => $job_authorizer.verify_token($job_id, token), None => Err(anyhow!("no Authorization header")), }; match verify_result { Ok(()) => (), Err(err) => { let err: Box = err.into(); let json = ErrJson::from_err(&*err); return make_401_with_body("invalid_jwt", ClientVisibleMsg(json.into_data())); } } }}; } // Generation and verification of job auth struct JWTJobAuthorizer { server_key: Vec, } impl JWTJobAuthorizer { fn new(server_key: Vec) -> Box { Box::new(Self { server_key }) } } impl dist::JobAuthorizer for JWTJobAuthorizer { fn generate_token(&self, job_id: JobId) -> Result { let claims = JobJwt { exp: 0, job_id }; let key = jwt::EncodingKey::from_secret(&self.server_key); jwt::encode(&JWT_HEADER, &claims, &key) .map_err(|e| anyhow!("Failed to create JWT for job: {}", e)) } fn verify_token(&self, job_id: JobId, token: &str) -> Result<()> { let valid_claims = JobJwt { exp: 0, job_id }; let key = jwt::DecodingKey::from_secret(&self.server_key); jwt::decode(token, &key, &JWT_VALIDATION) .map_err(|e| anyhow!("JWT decode failed: {}", e)) .and_then(|res| { fn identical_t(_: &T, _: &T) {} identical_t(&res.claims, &valid_claims); if res.claims == valid_claims { Ok(()) } else { Err(anyhow!("mismatched claims")) } }) } } #[test] fn test_job_token_verification() { let ja = JWTJobAuthorizer::new(vec![1, 2, 2]); let job_id = JobId(55); let token = ja.generate_token(job_id).unwrap(); let job_id2 = JobId(56); let token2 = ja.generate_token(job_id2).unwrap(); let ja2 = JWTJobAuthorizer::new(vec![1, 2, 3]); // Check tokens are deterministic assert_eq!(token, ja.generate_token(job_id).unwrap()); // Check token verification works assert!(ja.verify_token(job_id, &token).is_ok()); assert!(ja.verify_token(job_id, &token2).is_err()); assert!(ja.verify_token(job_id2, &token).is_err()); assert!(ja.verify_token(job_id2, &token2).is_ok()); // Check token verification with a different key fails assert!(ja2.verify_token(job_id, &token).is_err()); assert!(ja2.verify_token(job_id2, &token2).is_err()); } pub struct Scheduler { public_addr: SocketAddr, handler: S, // Is this client permitted to use the scheduler? check_client_auth: Box, // Do we believe the server is who they appear to be? check_server_auth: ServerAuthCheck, } impl Scheduler { pub fn new( public_addr: SocketAddr, handler: S, check_client_auth: Box, check_server_auth: ServerAuthCheck, ) -> Self { Self { public_addr, handler, check_client_auth, check_server_auth, } } pub fn start(self) -> Result { let Self { public_addr, handler, check_client_auth, check_server_auth, } = self; let requester = SchedulerRequester { client: Mutex::new(new_reqwest_blocking_client()), }; macro_rules! check_server_auth_or_err { ($request:ident) => {{ match bearer_http_auth($request).and_then(&*check_server_auth) { Some(server_id) => { let origin_ip = if let Some(header_val) = $request.header("X-Real-IP") { trace!("X-Real-IP: {:?}", header_val); match header_val.parse() { Ok(ip) => ip, Err(err) => { warn!( "X-Real-IP value {:?} could not be parsed: {:?}", header_val, err ); return rouille::Response::empty_400(); } } } else { $request.remote_addr().ip() }; if server_id.addr().ip() != origin_ip { trace!("server ip: {:?}", server_id.addr().ip()); trace!("request ip: {:?}", $request.remote_addr().ip()); return make_401("invalid_bearer_token_mismatched_address"); } else { server_id } } None => return make_401("invalid_bearer_token"), } }}; } fn maybe_update_certs( client: &mut reqwest::blocking::Client, certs: &mut HashMap, Vec)>, server_id: ServerId, cert_digest: Vec, cert_pem: Vec, ) -> Result<()> { if let Some((saved_cert_digest, _)) = certs.get(&server_id) { if saved_cert_digest == &cert_digest { return Ok(()); } } info!( "Adding new certificate for {} to scheduler", server_id.addr() ); let mut client_builder = reqwest::blocking::ClientBuilder::new(); // Add all the certificates we know about client_builder = client_builder.add_root_certificate( reqwest::Certificate::from_pem(&cert_pem) .context("failed to interpret pem as certificate")?, ); for (_, cert_pem) in certs.values() { client_builder = client_builder.add_root_certificate( reqwest::Certificate::from_pem(cert_pem).expect("previously valid cert"), ); } // Finish the client let new_client = client_builder // Disable connection pool to avoid broken connection // between runtime .pool_max_idle_per_host(0) .build() .context("failed to create a HTTP client")?; // Use the updated certificates *client = new_client; certs.insert(server_id, (cert_digest, cert_pem)); Ok(()) } info!("Scheduler listening for clients on {}", public_addr); let request_count = atomic::AtomicUsize::new(0); // From server_id -> cert_digest, cert_pem let server_certificates: Mutex, Vec)>> = Default::default(); let server = rouille::Server::new(public_addr, move |request| { let req_id = request_count.fetch_add(1, atomic::Ordering::SeqCst); trace!(target: "sccache_http", "Req {} ({}): {:?}", req_id, request.remote_addr(), request); let response = (|| router!(request, (POST) (/api/v1/scheduler/alloc_job) => { let bearer_auth = match bearer_http_auth(request) { Some(s) => s, None => return make_401("no_bearer_auth"), }; match check_client_auth.check(bearer_auth) { Ok(()) => (), Err(client_msg) => { warn!("Bearer auth failed: {:?}", client_msg); return make_401_with_body("bearer_auth_failed", client_msg) }, } let toolchain = try_or_400_log!(req_id, bincode_input(request)); trace!("Req {}: alloc_job: {:?}", req_id, toolchain); let alloc_job_res: AllocJobResult = try_or_500_log!(req_id, handler.handle_alloc_job(&requester, toolchain)); let certs = server_certificates.lock().unwrap(); let res = AllocJobHttpResponse::from_alloc_job_result(alloc_job_res, &certs); prepare_response(request, &res) }, (GET) (/api/v1/scheduler/server_certificate/{server_id: ServerId}) => { let certs = { let guard = server_certificates.lock().unwrap(); guard.get(&server_id).map(|v|v.to_owned()) }; let (cert_digest, cert_pem) = try_or_500_log!(req_id, certs .context("server cert not available")); let res = ServerCertificateHttpResponse { cert_digest, cert_pem, }; prepare_response(request, &res) }, (POST) (/api/v1/scheduler/heartbeat_server) => { let server_id = check_server_auth_or_err!(request); let heartbeat_server = try_or_400_log!(req_id, bincode_input(request)); trace!(target: "sccache_heartbeat", "Req {}: heartbeat_server: {:?}", req_id, heartbeat_server); let HeartbeatServerHttpRequest { num_cpus, jwt_key, server_nonce, cert_digest, cert_pem } = heartbeat_server; try_or_500_log!(req_id, maybe_update_certs( &mut requester.client.lock().unwrap(), &mut server_certificates.lock().unwrap(), server_id, cert_digest, cert_pem )); let job_authorizer = JWTJobAuthorizer::new(jwt_key); let res: HeartbeatServerResult = try_or_500_log!(req_id, handler.handle_heartbeat_server( server_id, server_nonce, num_cpus, job_authorizer )); prepare_response(request, &res) }, (POST) (/api/v1/scheduler/job_state/{job_id: JobId}) => { let server_id = check_server_auth_or_err!(request); let job_state = try_or_400_log!(req_id, bincode_input(request)); trace!("Req {}: job state: {:?}", req_id, job_state); let res: UpdateJobStateResult = try_or_500_log!(req_id, handler.handle_update_job_state( job_id, server_id, job_state )); prepare_response(request, &res) }, (GET) (/api/v1/scheduler/status) => { let res: SchedulerStatusResult = try_or_500_log!(req_id, handler.handle_status()); prepare_response(request, &res) }, _ => { warn!("Unknown request {:?}", request); rouille::Response::empty_404() }, ))(); trace!(target: "sccache_http", "Res {}: {:?}", req_id, response); response }).map_err(|e| anyhow!(format!("Failed to start http server for sccache scheduler: {}", e)))?; // This limit is rouille's default for `start_server_with_pool`, which // we would use, except that interface doesn't permit any sort of // error handling to be done. let server = server.pool_size(num_cpus::get() * 8); server.run(); panic!("Rouille server terminated") } } struct SchedulerRequester { client: Mutex, } impl dist::SchedulerOutgoing for SchedulerRequester { fn do_assign_job( &self, server_id: ServerId, job_id: JobId, tc: Toolchain, auth: String, ) -> Result { let url = urls::server_assign_job(server_id, job_id); let req = self.client.lock().unwrap().post(url); bincode_req(req.bearer_auth(auth).bincode(&tc)?) .context("POST to scheduler assign_job failed") } } pub struct Server { bind_address: SocketAddr, scheduler_url: reqwest::Url, scheduler_auth: String, // HTTPS pieces all the builders will use for connection encryption cert_digest: Vec, cert_pem: Vec, privkey_pem: Vec, // Key used to sign any requests relating to jobs jwt_key: Vec, // Randomly generated nonce to allow the scheduler to detect server restarts server_nonce: ServerNonce, handler: S, } impl Server { pub fn new( public_addr: SocketAddr, bind_address: Option, scheduler_url: reqwest::Url, scheduler_auth: String, handler: S, ) -> Result { let (cert_digest, cert_pem, privkey_pem) = create_https_cert_and_privkey(public_addr) .context("failed to create HTTPS certificate for server")?; let mut jwt_key = vec![0; JWT_KEY_LENGTH]; OsRng.fill_bytes(&mut jwt_key); let server_nonce = ServerNonce::new(); Ok(Self { bind_address: bind_address.unwrap_or(public_addr), scheduler_url, scheduler_auth, cert_digest, cert_pem, privkey_pem, jwt_key, server_nonce, handler, }) } pub fn start(self) -> Result { let Self { bind_address, scheduler_url, scheduler_auth, cert_digest, cert_pem, privkey_pem, jwt_key, server_nonce, handler, } = self; let heartbeat_req = HeartbeatServerHttpRequest { num_cpus: num_cpus::get(), jwt_key: jwt_key.clone(), server_nonce, cert_digest, cert_pem: cert_pem.clone(), }; let job_authorizer = JWTJobAuthorizer::new(jwt_key); let heartbeat_url = urls::scheduler_heartbeat_server(&scheduler_url); let requester = ServerRequester { client: new_reqwest_blocking_client(), scheduler_url, scheduler_auth: scheduler_auth.clone(), }; // TODO: detect if this panics thread::spawn(move || { let client = new_reqwest_blocking_client(); loop { trace!(target: "sccache_heartbeat", "Performing heartbeat"); match bincode_req( client .post(heartbeat_url.clone()) .bearer_auth(scheduler_auth.clone()) .bincode(&heartbeat_req) .expect("failed to serialize heartbeat"), ) { Ok(HeartbeatServerResult { is_new }) => { trace!(target: "sccache_heartbeat", "Heartbeat success is_new={}", is_new); // TODO: if is_new, terminate all running jobs if is_new { info!("Server connected to scheduler"); } thread::sleep(HEARTBEAT_INTERVAL) } Err(e) => { error!(target: "sccache_heartbeat", "Failed to send heartbeat to server: {}", e); thread::sleep(HEARTBEAT_ERROR_INTERVAL) } } } }); info!("Server listening for clients on {}", bind_address); let request_count = atomic::AtomicUsize::new(0); let server = rouille::Server::new_ssl(bind_address, move |request| { let req_id = request_count.fetch_add(1, atomic::Ordering::SeqCst); trace!("Req {} ({}): {:?}", req_id, request.remote_addr(), request); let response = (|| router!(request, (POST) (/api/v1/distserver/assign_job/{job_id: JobId}) => { job_auth_or_401!(request, &job_authorizer, job_id); let toolchain = try_or_400_log!(req_id, bincode_input(request)); trace!("Req {}: assign_job({}): {:?}", req_id, job_id, toolchain); let res: AssignJobResult = try_or_500_log!(req_id, handler.handle_assign_job(job_id, toolchain)); prepare_response(request, &res) }, (POST) (/api/v1/distserver/submit_toolchain/{job_id: JobId}) => { job_auth_or_401!(request, &job_authorizer, job_id); trace!("Req {}: submit_toolchain({})", req_id, job_id); let body = request.data().expect("body was already read in submit_toolchain"); let toolchain_rdr = ToolchainReader(Box::new(body)); let res: SubmitToolchainResult = try_or_500_log!(req_id, handler.handle_submit_toolchain(&requester, job_id, toolchain_rdr)); prepare_response(request, &res) }, (POST) (/api/v1/distserver/run_job/{job_id: JobId}) => { job_auth_or_401!(request, &job_authorizer, job_id); let mut body = request.data().expect("body was already read in run_job"); let bincode_length = try_or_500_log!(req_id, body.read_u32::() .context("failed to read run job input length")) as u64; let mut bincode_reader = body.take(bincode_length); let runjob = try_or_500_log!(req_id, bincode::deserialize_from(&mut bincode_reader) .context("failed to deserialize run job request")); trace!("Req {}: run_job({}): {:?}", req_id, job_id, runjob); let RunJobHttpRequest { command, outputs } = runjob; let body = bincode_reader.into_inner(); let inputs_rdr = InputsReader(Box::new(ZlibReadDecoder::new(body))); let outputs = outputs.into_iter().collect(); let res: RunJobResult = try_or_500_log!(req_id, handler.handle_run_job(&requester, job_id, command, outputs, inputs_rdr)); prepare_response(request, &res) }, _ => { warn!("Unknown request {:?}", request); rouille::Response::empty_404() }, ))(); trace!("Res {}: {:?}", req_id, response); response }, cert_pem, privkey_pem).map_err(|e| anyhow!(format!("Failed to start http server for sccache server: {}", e)))?; // This limit is rouille's default for `start_server_with_pool`, which // we would use, except that interface doesn't permit any sort of // error handling to be done. let server = server.pool_size(num_cpus::get() * 8); server.run(); panic!("Rouille server terminated") } } struct ServerRequester { client: reqwest::blocking::Client, scheduler_url: reqwest::Url, scheduler_auth: String, } impl dist::ServerOutgoing for ServerRequester { fn do_update_job_state( &self, job_id: JobId, state: JobState, ) -> Result { let url = urls::scheduler_job_state(&self.scheduler_url, job_id); bincode_req( self.client .post(url) .bearer_auth(self.scheduler_auth.clone()) .bincode(&state)?, ) .context("POST to scheduler job_state failed") } } } #[cfg(feature = "dist-client")] mod client { use super::super::cache; use crate::config; use crate::dist::pkg::{InputsPackager, ToolchainPackager}; use crate::dist::{ self, AllocJobResult, CompileCommand, JobAlloc, PathTransformer, RunJobResult, SchedulerStatusResult, SubmitToolchainResult, Toolchain, }; use async_trait::async_trait; use byteorder::{BigEndian, WriteBytesExt}; use flate2::write::ZlibEncoder as ZlibWriteEncoder; use flate2::Compression; use futures::TryFutureExt; use reqwest::Body; use std::collections::HashMap; use std::io::Write; use std::path::{Path, PathBuf}; use std::sync::{Arc, Mutex}; use std::time::Duration; use super::common::{ bincode_req_fut, AllocJobHttpResponse, ReqwestRequestBuilderExt, RunJobHttpRequest, ServerCertificateHttpResponse, }; use super::urls; use crate::errors::*; const REQUEST_TIMEOUT_SECS: u64 = 1200; const CONNECT_TIMEOUT_SECS: u64 = 5; pub struct Client { auth_token: String, scheduler_url: reqwest::Url, // cert_digest -> cert_pem server_certs: Arc, Vec>>>, client: Arc>, pool: tokio::runtime::Handle, tc_cache: Arc, rewrite_includes_only: bool, } impl Client { pub fn new( pool: &tokio::runtime::Handle, scheduler_url: reqwest::Url, cache_dir: &Path, cache_size: u64, toolchain_configs: &[config::DistToolchainConfig], auth_token: String, rewrite_includes_only: bool, ) -> Result { let timeout = Duration::new(REQUEST_TIMEOUT_SECS, 0); let connect_timeout = Duration::new(CONNECT_TIMEOUT_SECS, 0); let client = reqwest::ClientBuilder::new() .timeout(timeout) .connect_timeout(connect_timeout) // Disable connection pool to avoid broken connection // between runtime .pool_max_idle_per_host(0) .build() .context("failed to create an async HTTP client")?; let client_toolchains = cache::ClientToolchains::new(cache_dir, cache_size, toolchain_configs) .context("failed to initialise client toolchains")?; Ok(Self { auth_token, scheduler_url, server_certs: Default::default(), client: Arc::new(Mutex::new(client)), pool: pool.clone(), tc_cache: Arc::new(client_toolchains), rewrite_includes_only, }) } fn update_certs( client: &mut reqwest::Client, certs: &mut HashMap, Vec>, cert_digest: Vec, cert_pem: Vec, ) -> Result<()> { let mut client_async_builder = reqwest::ClientBuilder::new(); // Add all the certificates we know about client_async_builder = client_async_builder.add_root_certificate( reqwest::Certificate::from_pem(&cert_pem) .context("failed to interpret pem as certificate")?, ); for cert_pem in certs.values() { client_async_builder = client_async_builder.add_root_certificate( reqwest::Certificate::from_pem(cert_pem).expect("previously valid cert"), ); } // Finish the client let timeout = Duration::new(REQUEST_TIMEOUT_SECS, 0); let new_client_async = client_async_builder .timeout(timeout) // Disable keep-alive .pool_max_idle_per_host(0) .build() .context("failed to create an async HTTP client")?; // Use the updated certificates *client = new_client_async; certs.insert(cert_digest, cert_pem); Ok(()) } } #[async_trait] impl dist::Client for Client { async fn do_alloc_job(&self, tc: Toolchain) -> Result { let scheduler_url = self.scheduler_url.clone(); let url = urls::scheduler_alloc_job(&scheduler_url); let mut req = self.client.lock().unwrap().post(url); req = req.bearer_auth(self.auth_token.clone()).bincode(&tc)?; let client = self.client.clone(); let server_certs = self.server_certs.clone(); match bincode_req_fut(req).await? { AllocJobHttpResponse::Success { job_alloc, need_toolchain, cert_digest, } => { let server_id = job_alloc.server_id; let alloc_job_res = Ok(AllocJobResult::Success { job_alloc, need_toolchain, }); if server_certs.lock().unwrap().contains_key(&cert_digest) { return alloc_job_res; } info!( "Need to request new certificate for server {}", server_id.addr() ); let url = urls::scheduler_server_certificate(&scheduler_url, server_id); let req = client.lock().unwrap().get(url); let res: ServerCertificateHttpResponse = bincode_req_fut(req) .await .context("GET to scheduler server_certificate failed")?; // TODO: Move to asynchronous reqwest client only. // This function internally builds a blocking reqwest client; // However, it does so by utilizing a runtime which it drops, // triggering (rightfully) a sanity check that prevents from // dropping a runtime in asynchronous context. // For the time being, we work around this by off-loading it // to a dedicated blocking-friendly thread pool. let _ = self .pool .spawn_blocking(move || { Self::update_certs( &mut client.lock().unwrap(), &mut server_certs.lock().unwrap(), res.cert_digest, res.cert_pem, ) .context("Failed to update certificate") .unwrap_or_else(|e| warn!("Failed to update certificate: {:?}", e)); }) .await; alloc_job_res } AllocJobHttpResponse::Fail { msg } => Ok(AllocJobResult::Fail { msg }), } } async fn do_get_status(&self) -> Result { let scheduler_url = self.scheduler_url.clone(); let url = urls::scheduler_status(&scheduler_url); let req = self.client.lock().unwrap().get(url); bincode_req_fut(req).await } async fn do_submit_toolchain( &self, job_alloc: JobAlloc, tc: Toolchain, ) -> Result { match self.tc_cache.get_toolchain(&tc) { Ok(Some(toolchain_file)) => { let url = urls::server_submit_toolchain(job_alloc.server_id, job_alloc.job_id); let req = self.client.lock().unwrap().post(url); let toolchain_file = tokio::fs::File::from_std(toolchain_file.into()); let toolchain_file_stream = tokio_util::io::ReaderStream::new(toolchain_file); let body = Body::wrap_stream(toolchain_file_stream); let req = req.bearer_auth(job_alloc.auth).body(body); bincode_req_fut(req).await } Ok(None) => Err(anyhow!("couldn't find toolchain locally")), Err(e) => Err(e), } } async fn do_run_job( &self, job_alloc: JobAlloc, command: CompileCommand, outputs: Vec, inputs_packager: Box, ) -> Result<(RunJobResult, PathTransformer)> { let url = urls::server_run_job(job_alloc.server_id, job_alloc.job_id); let (body, path_transformer) = self .pool .spawn_blocking(move || -> Result<_> { let bincode = bincode::serialize(&RunJobHttpRequest { command, outputs }) .context("failed to serialize run job request")?; let bincode_length = bincode.len(); let mut body = vec![]; body.write_u32::(bincode_length as u32) .expect("Infallible write of bincode length to vec failed"); body.write_all(&bincode) .expect("Infallible write of bincode body to vec failed"); let path_transformer; { let mut compressor = ZlibWriteEncoder::new(&mut body, Compression::fast()); path_transformer = inputs_packager .write_inputs(&mut compressor) .context("Could not write inputs for compilation")?; compressor.flush().context("failed to flush compressor")?; trace!( "Compressed inputs from {} -> {}", compressor.total_in(), compressor.total_out() ); compressor.finish().context("failed to finish compressor")?; } Ok((body, path_transformer)) }) .await??; let mut req = self.client.lock().unwrap().post(url); req = req.bearer_auth(job_alloc.auth.clone()).bytes(body); bincode_req_fut(req) .map_ok(|res| (res, path_transformer)) .await } async fn put_toolchain( &self, compiler_path: PathBuf, weak_key: String, toolchain_packager: Box, ) -> Result<(Toolchain, Option<(String, PathBuf)>)> { let compiler_path = compiler_path.to_owned(); let weak_key = weak_key.to_owned(); let tc_cache = self.tc_cache.clone(); self.pool .spawn_blocking(move || { tc_cache.put_toolchain(&compiler_path, &weak_key, toolchain_packager) }) .await? } fn rewrite_includes_only(&self) -> bool { self.rewrite_includes_only } fn get_custom_toolchain(&self, exe: &Path) -> Option { match self.tc_cache.get_custom_toolchain(exe) { Some(Ok((_, _, path))) => Some(path), _ => None, } } } } mozilla-sccache-40c3d6b/src/dist/mod.rs000066400000000000000000000551151475712407500200610ustar00rootroot00000000000000// Copyright 2016 Mozilla Foundation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use crate::compiler; use async_trait::async_trait; use rand::{rngs::OsRng, RngCore}; use serde::{Deserialize, Serialize}; use std::ffi::OsString; use std::fmt; use std::io::{self, Read}; use std::net::SocketAddr; use std::path::{Path, PathBuf}; use std::process; use std::str::FromStr; #[cfg(feature = "dist-server")] use std::sync::Mutex; use crate::errors::*; #[cfg(any(feature = "dist-client", feature = "dist-server"))] mod cache; #[cfg(feature = "dist-client")] pub mod client_auth; #[cfg(any(feature = "dist-client", feature = "dist-server"))] pub mod http; #[cfg(test)] mod test; #[cfg(any(feature = "dist-client", feature = "dist-server"))] pub use crate::dist::cache::TcCache; // TODO: paths (particularly outputs, which are accessed by an unsandboxed program) // should be some pre-sanitised AbsPath type pub use self::path_transform::PathTransformer; #[cfg(feature = "dist-client")] pub mod pkg; #[cfg(not(feature = "dist-client"))] mod pkg { pub trait ToolchainPackager {} pub trait InputsPackager {} } #[cfg(target_os = "windows")] mod path_transform { use std::collections::HashMap; use std::path::{Component, Components, Path, PathBuf, Prefix, PrefixComponent}; use std::str; fn take_prefix<'a>(components: &'a mut Components<'_>) -> Option> { let prefix = components.next()?; let pc = match prefix { Component::Prefix(pc) => pc, _ => return None, }; let root = components.next()?; if root != Component::RootDir { return None; } Some(pc) } fn transform_prefix_component(pc: PrefixComponent<'_>) -> Option { match pc.kind() { // Transforming these to the same place means these may flip-flop // in the tracking map, but they're equivalent so not really an // issue Prefix::Disk(diskchar) | Prefix::VerbatimDisk(diskchar) => { assert!(diskchar.is_ascii_alphabetic()); let diskchar = diskchar.to_ascii_uppercase(); Some(format!( "/prefix/disk-{}", str::from_utf8(&[diskchar]).expect("invalid disk char") )) } Prefix::Verbatim(_) | Prefix::VerbatimUNC(_, _) | Prefix::DeviceNS(_) | Prefix::UNC(_, _) => None, } } #[derive(Debug)] pub struct PathTransformer { dist_to_local_path: HashMap, } impl PathTransformer { pub fn new() -> Self { PathTransformer { dist_to_local_path: HashMap::new(), } } pub fn as_dist_abs(&mut self, p: &Path) -> Option { if !p.is_absolute() { return None; } self.as_dist(p) } pub fn as_dist(&mut self, p: &Path) -> Option { let mut components = p.components(); // Extract the prefix (e.g. "C:/") if present let maybe_dist_prefix = if p.is_absolute() { let pc = take_prefix(&mut components).expect("could not take prefix from absolute path"); Some(transform_prefix_component(pc)?) } else { None }; // Reconstruct the path (minus the prefix) as a Linux path let mut dist_suffix = String::new(); for component in components { let part = match component { Component::Prefix(_) | Component::RootDir => { // On Windows there is such a thing as a path like C:file.txt // It's not clear to me what the semantics of such a path are, // so give up. error!("unexpected part in path {:?}", p); return None; } Component::Normal(osstr) => osstr.to_str()?, // TODO: should be forbidden Component::CurDir => ".", Component::ParentDir => "..", }; if !dist_suffix.is_empty() { dist_suffix.push('/') } dist_suffix.push_str(part) } let dist_path = if let Some(mut dist_prefix) = maybe_dist_prefix { dist_prefix.push('/'); dist_prefix.push_str(&dist_suffix); dist_prefix } else { dist_suffix }; self.dist_to_local_path .insert(dist_path.clone(), p.to_owned()); Some(dist_path) } pub fn disk_mappings(&self) -> impl Iterator { let mut normal_mappings = HashMap::new(); let mut verbatim_mappings = HashMap::new(); for (_dist_path, local_path) in self.dist_to_local_path.iter() { if !local_path.is_absolute() { continue; } let mut components = local_path.components(); let local_prefix = take_prefix(&mut components).expect("could not take prefix from absolute path"); let local_prefix_component = Component::Prefix(local_prefix); let local_prefix_path: &Path = local_prefix_component.as_ref(); let mappings = if let Prefix::VerbatimDisk(_) = local_prefix.kind() { &mut verbatim_mappings } else { &mut normal_mappings }; if mappings.contains_key(local_prefix_path) { continue; } let dist_prefix = transform_prefix_component(local_prefix) .expect("prefix already in tracking map could not be transformed"); mappings.insert(local_prefix_path.to_owned(), dist_prefix); } // Prioritise normal mappings for the same disk, as verbatim mappings can // look odd to users normal_mappings.into_iter().chain(verbatim_mappings) } pub fn to_local(&self, p: &str) -> Option { self.dist_to_local_path.get(p).cloned() } } #[test] fn test_basic() { let mut pt = PathTransformer::new(); assert_eq!(pt.as_dist(Path::new("C:/a")).unwrap(), "/prefix/disk-C/a"); assert_eq!( pt.as_dist(Path::new(r#"C:\a\b.c"#)).unwrap(), "/prefix/disk-C/a/b.c" ); assert_eq!( pt.as_dist(Path::new("X:/other.c")).unwrap(), "/prefix/disk-X/other.c" ); let mut disk_mappings: Vec<_> = pt.disk_mappings().collect(); disk_mappings.sort(); assert_eq!( disk_mappings, &[ (Path::new("C:").into(), "/prefix/disk-C".into()), (Path::new("X:").into(), "/prefix/disk-X".into()), ] ); assert_eq!(pt.to_local("/prefix/disk-C/a").unwrap(), Path::new("C:/a")); assert_eq!( pt.to_local("/prefix/disk-C/a/b.c").unwrap(), Path::new("C:/a/b.c") ); assert_eq!( pt.to_local("/prefix/disk-X/other.c").unwrap(), Path::new("X:/other.c") ); } #[test] fn test_relative_paths() { let mut pt = PathTransformer::new(); assert_eq!(pt.as_dist(Path::new("a/b")).unwrap(), "a/b"); assert_eq!(pt.as_dist(Path::new(r#"a\b"#)).unwrap(), "a/b"); assert_eq!(pt.to_local("a/b").unwrap(), Path::new("a/b")); } #[test] fn test_verbatim_disks() { let mut pt = PathTransformer::new(); assert_eq!( pt.as_dist(Path::new("X:/other.c")).unwrap(), "/prefix/disk-X/other.c" ); pt.as_dist(Path::new(r#"\\?\X:\out\other.o"#)); assert_eq!( pt.to_local("/prefix/disk-X/other.c").unwrap(), Path::new("X:/other.c") ); assert_eq!( pt.to_local("/prefix/disk-X/out/other.o").unwrap(), Path::new(r#"\\?\X:\out\other.o"#) ); let disk_mappings: Vec<_> = pt.disk_mappings().collect(); // Verbatim disks should come last assert_eq!( disk_mappings, &[ (Path::new("X:").into(), "/prefix/disk-X".into()), (Path::new(r#"\\?\X:"#).into(), "/prefix/disk-X".into()), ] ); } #[test] fn test_slash_directions() { let mut pt = PathTransformer::new(); assert_eq!(pt.as_dist(Path::new("C:/a")).unwrap(), "/prefix/disk-C/a"); assert_eq!(pt.as_dist(Path::new("C:\\a")).unwrap(), "/prefix/disk-C/a"); assert_eq!(pt.to_local("/prefix/disk-C/a").unwrap(), Path::new("C:/a")); assert_eq!(pt.disk_mappings().count(), 1); } } #[cfg(unix)] mod path_transform { use std::iter; use std::path::{Path, PathBuf}; #[derive(Debug)] pub struct PathTransformer; impl PathTransformer { pub fn new() -> Self { PathTransformer } pub fn as_dist_abs(&mut self, p: &Path) -> Option { if !p.is_absolute() { return None; } self.as_dist(p) } pub fn as_dist(&mut self, p: &Path) -> Option { p.as_os_str().to_str().map(Into::into) } pub fn disk_mappings(&self) -> impl Iterator { iter::empty() } pub fn to_local(&self, p: &str) -> Option { Some(PathBuf::from(p)) } } } pub fn osstrings_to_strings(osstrings: &[OsString]) -> Option> { osstrings .iter() .map(|arg| arg.clone().into_string().ok()) .collect::>() } pub fn osstring_tuples_to_strings( osstring_tuples: &[(OsString, OsString)], ) -> Option> { osstring_tuples .iter() .map(|(k, v)| Some((k.clone().into_string().ok()?, v.clone().into_string().ok()?))) .collect::>() } pub fn strings_to_osstrings(strings: &[String]) -> Vec { strings .iter() .map(|arg| std::ffi::OsStr::new(arg).to_os_string()) .collect::>() } // TODO: TryFrom pub fn try_compile_command_to_dist( command: compiler::SingleCompileCommand, ) -> Option { let compiler::SingleCompileCommand { executable, arguments, env_vars, cwd, } = command; Some(CompileCommand { executable: executable.into_os_string().into_string().ok()?, arguments: arguments .into_iter() .map(|arg| arg.into_string().ok()) .collect::>()?, env_vars: env_vars .into_iter() .map(|(k, v)| Some((k.into_string().ok()?, v.into_string().ok()?))) .collect::>()?, cwd: cwd.into_os_string().into_string().ok()?, }) } // TODO: Clone by assuming immutable/no GC for now // TODO: make fields non-public? // TODO: make archive_id validate that it's just a bunch of hex chars #[derive(Debug, Hash, Eq, PartialEq, Clone, Serialize, Deserialize)] #[serde(deny_unknown_fields)] pub struct Toolchain { pub archive_id: String, } #[derive(Hash, Eq, PartialEq, Clone, Copy, Debug, Ord, PartialOrd, Serialize, Deserialize)] #[serde(deny_unknown_fields)] pub struct JobId(pub u64); impl fmt::Display for JobId { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { self.0.fmt(f) } } impl FromStr for JobId { type Err = ::Err; fn from_str(s: &str) -> ::std::result::Result { u64::from_str(s).map(JobId) } } #[derive(Hash, Eq, PartialEq, Clone, Copy, Debug, Serialize, Deserialize)] #[serde(deny_unknown_fields)] pub struct ServerId(SocketAddr); impl ServerId { pub fn new(addr: SocketAddr) -> Self { ServerId(addr) } pub fn addr(&self) -> SocketAddr { self.0 } } impl FromStr for ServerId { type Err = ::Err; fn from_str(s: &str) -> ::std::result::Result { SocketAddr::from_str(s).map(ServerId) } } #[derive(Eq, PartialEq, Clone, Debug, Serialize, Deserialize)] #[serde(deny_unknown_fields)] pub struct ServerNonce(u64); impl ServerNonce { pub fn new() -> Self { ServerNonce(OsRng.next_u64()) } } #[derive(Hash, Eq, PartialEq, Clone, Copy, Debug, Serialize, Deserialize)] #[serde(deny_unknown_fields)] pub enum JobState { Pending, Ready, Started, Complete, } impl fmt::Display for JobState { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { use self::JobState::*; match *self { Pending => "pending", Ready => "ready", Started => "started", Complete => "complete", } .fmt(f) } } #[derive(Clone, Debug, Serialize, Deserialize)] #[serde(deny_unknown_fields)] pub struct CompileCommand { pub executable: String, pub arguments: Vec, pub env_vars: Vec<(String, String)>, pub cwd: String, } // process::Output is not serialize so we have a custom Output type. However, // we cannot encode all information in here, such as Unix signals, as the other // end may not understand them (e.g. if it's Windows) #[derive(Clone, Serialize, Deserialize)] #[serde(deny_unknown_fields)] pub struct ProcessOutput { code: i32, stdout: Vec, stderr: Vec, } impl ProcessOutput { #[cfg(unix)] pub fn try_from(o: process::Output) -> Result { let process::Output { status, stdout, stderr, } = o; let code = match (status.code(), status.signal()) { (Some(c), _) => c, (None, Some(s)) => bail!("Process status {} terminated with signal {}", status, s), (None, None) => bail!("Process status {} has no exit code or signal", status), }; Ok(ProcessOutput { code, stdout, stderr, }) } #[cfg(test)] pub fn fake_output(code: i32, stdout: Vec, stderr: Vec) -> Self { Self { code, stdout, stderr, } } } #[cfg(unix)] use std::os::unix::process::ExitStatusExt; #[cfg(windows)] use std::os::windows::process::ExitStatusExt; #[cfg(unix)] fn exit_status(code: i32) -> process::ExitStatus { process::ExitStatus::from_raw(code) } #[cfg(windows)] fn exit_status(code: i32) -> process::ExitStatus { // TODO: this is probably a subideal conversion - it's not clear how Unix exit codes map to // Windows exit codes (other than 0 being a success) process::ExitStatus::from_raw(code as u32) } impl From for process::Output { fn from(o: ProcessOutput) -> Self { // TODO: handle signals, i.e. None code process::Output { status: exit_status(o.code), stdout: o.stdout, stderr: o.stderr, } } } #[derive(Clone, Serialize, Deserialize)] #[serde(deny_unknown_fields)] pub struct OutputData(Vec, u64); impl OutputData { #[cfg(any(feature = "dist-server", all(feature = "dist-client", test)))] pub fn try_from_reader(r: R) -> io::Result { use flate2::read::ZlibEncoder as ZlibReadEncoder; use flate2::Compression; let mut compressor = ZlibReadEncoder::new(r, Compression::fast()); let mut res = vec![]; io::copy(&mut compressor, &mut res)?; Ok(OutputData(res, compressor.total_in())) } pub fn lens(&self) -> OutputDataLens { OutputDataLens { actual: self.1, compressed: self.0.len() as u64, } } #[cfg(feature = "dist-client")] pub fn into_reader(self) -> impl Read { use flate2::read::ZlibDecoder as ZlibReadDecoder; ZlibReadDecoder::new(io::Cursor::new(self.0)) } } pub struct OutputDataLens { pub actual: u64, pub compressed: u64, } impl fmt::Display for OutputDataLens { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "Size: {}->{}", self.actual, self.compressed) } } // TODO: standardise on compressed or not for inputs and toolchain // TODO: make fields not public // AllocJob #[derive(Clone, Debug, Serialize, Deserialize)] #[serde(deny_unknown_fields)] pub struct JobAlloc { pub auth: String, pub job_id: JobId, pub server_id: ServerId, } #[derive(Clone, Serialize, Deserialize)] #[serde(deny_unknown_fields)] pub enum AllocJobResult { Success { job_alloc: JobAlloc, need_toolchain: bool, }, Fail { msg: String, }, } // AssignJob #[derive(Clone, Serialize, Deserialize)] #[serde(deny_unknown_fields)] pub struct AssignJobResult { pub state: JobState, pub need_toolchain: bool, } // JobState #[derive(Clone, Serialize, Deserialize)] #[serde(deny_unknown_fields)] pub enum UpdateJobStateResult { Success, Fail { msg: String }, } // HeartbeatServer #[derive(Clone, Serialize, Deserialize)] #[serde(deny_unknown_fields)] pub struct HeartbeatServerResult { pub is_new: bool, } // RunJob #[derive(Clone, Serialize, Deserialize)] #[serde(deny_unknown_fields)] pub enum RunJobResult { JobNotFound, Complete(JobComplete), } #[derive(Clone, Serialize, Deserialize)] #[serde(deny_unknown_fields)] pub struct JobComplete { pub output: ProcessOutput, pub outputs: Vec<(String, OutputData)>, } // Status #[derive(Clone, Debug, Serialize, Deserialize)] #[serde(deny_unknown_fields)] pub struct SchedulerStatusResult { pub num_servers: usize, pub num_cpus: usize, pub in_progress: usize, } // SubmitToolchain #[derive(Clone, Serialize, Deserialize)] #[serde(deny_unknown_fields)] pub enum SubmitToolchainResult { Success, JobNotFound, CannotCache, } /////////////////// // BuildResult pub struct BuildResult { pub output: ProcessOutput, pub outputs: Vec<(String, OutputData)>, } /////////////////// // TODO: it's unfortunate all these are public, but in order to describe the trait // bound on the instance (e.g. scheduler) we pass to the actual communication (e.g. // http implementation) they need to be public, which has knock-on effects for private // structs pub struct ToolchainReader<'a>(Box); impl<'a> Read for ToolchainReader<'a> { fn read(&mut self, buf: &mut [u8]) -> io::Result { self.0.read(buf) } } pub struct InputsReader<'a>(Box); impl<'a> Read for InputsReader<'a> { fn read(&mut self, buf: &mut [u8]) -> io::Result { self.0.read(buf) } } #[cfg(feature = "dist-server")] type ExtResult = ::std::result::Result; #[cfg(feature = "dist-server")] pub trait SchedulerOutgoing { // To Server fn do_assign_job( &self, server_id: ServerId, job_id: JobId, tc: Toolchain, auth: String, ) -> Result; } #[cfg(feature = "dist-server")] pub trait ServerOutgoing { // To Scheduler fn do_update_job_state(&self, job_id: JobId, state: JobState) -> Result; } // Trait to handle the creation and verification of job authorization tokens #[cfg(feature = "dist-server")] pub trait JobAuthorizer: Send { fn generate_token(&self, job_id: JobId) -> Result; fn verify_token(&self, job_id: JobId, token: &str) -> Result<()>; } #[cfg(feature = "dist-server")] pub trait SchedulerIncoming: Send + Sync { // From Client fn handle_alloc_job( &self, requester: &dyn SchedulerOutgoing, tc: Toolchain, ) -> ExtResult; // From Server fn handle_heartbeat_server( &self, server_id: ServerId, server_nonce: ServerNonce, num_cpus: usize, job_authorizer: Box, ) -> ExtResult; // From Server fn handle_update_job_state( &self, job_id: JobId, server_id: ServerId, job_state: JobState, ) -> ExtResult; // From anyone fn handle_status(&self) -> ExtResult; } #[cfg(feature = "dist-server")] pub trait ServerIncoming: Send + Sync { // From Scheduler fn handle_assign_job(&self, job_id: JobId, tc: Toolchain) -> ExtResult; // From Client fn handle_submit_toolchain( &self, requester: &dyn ServerOutgoing, job_id: JobId, tc_rdr: ToolchainReader<'_>, ) -> ExtResult; // From Client fn handle_run_job( &self, requester: &dyn ServerOutgoing, job_id: JobId, command: CompileCommand, outputs: Vec, inputs_rdr: InputsReader<'_>, ) -> ExtResult; } #[cfg(feature = "dist-server")] pub trait BuilderIncoming: Send + Sync { // From Server fn run_build( &self, toolchain: Toolchain, command: CompileCommand, outputs: Vec, inputs_rdr: InputsReader<'_>, cache: &Mutex, ) -> ExtResult; } ///////// #[async_trait] pub trait Client: Send + Sync { // To Scheduler async fn do_alloc_job(&self, tc: Toolchain) -> Result; // To Scheduler async fn do_get_status(&self) -> Result; // To Server async fn do_submit_toolchain( &self, job_alloc: JobAlloc, tc: Toolchain, ) -> Result; // To Server async fn do_run_job( &self, job_alloc: JobAlloc, command: CompileCommand, outputs: Vec, inputs_packager: Box, ) -> Result<(RunJobResult, PathTransformer)>; async fn put_toolchain( &self, compiler_path: PathBuf, weak_key: String, toolchain_packager: Box, ) -> Result<(Toolchain, Option<(String, PathBuf)>)>; fn rewrite_includes_only(&self) -> bool; fn get_custom_toolchain(&self, exe: &Path) -> Option; } mozilla-sccache-40c3d6b/src/dist/pkg.rs000066400000000000000000000462201475712407500200600ustar00rootroot00000000000000// Copyright 2016 Mozilla Foundation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use crate::dist; use fs_err as fs; use std::io; use std::path::{Component, Path, PathBuf}; use std::str; use crate::errors::*; #[cfg(all(target_os = "linux", target_arch = "x86_64"))] pub use self::toolchain_imp::*; pub trait ToolchainPackager: Send { fn write_pkg(self: Box, f: fs::File) -> Result<()>; } pub trait InputsPackager: Send { fn write_inputs(self: Box, wtr: &mut dyn io::Write) -> Result; } pub trait OutputsRepackager { fn repackage_outputs(self: Box, wtr: &mut dyn io::Write) -> Result; } #[cfg(not(all(target_os = "linux", target_arch = "x86_64")))] mod toolchain_imp { use super::ToolchainPackager; use fs_err as fs; use crate::errors::*; // Distributed client, but an unsupported platform for toolchain packaging so // create a failing implementation that will conflict with any others. impl ToolchainPackager for T { fn write_pkg(self: Box, _f: fs::File) -> Result<()> { bail!("Automatic packaging not supported on this platform") } } } #[cfg(all(target_os = "linux", target_arch = "x86_64"))] mod toolchain_imp { use super::SimplifyPath; use fs_err as fs; use std::collections::BTreeMap; use std::io::{Read, Write}; use std::path::{Component, Path, PathBuf}; use std::process; use std::str; use walkdir::WalkDir; use crate::errors::*; pub struct ToolchainPackageBuilder { // Put dirs and file in a deterministic order (map from tar_path -> real_path) dir_set: BTreeMap, file_set: BTreeMap, // Symlinks to add to the tar // These are _not_ tar safe, and must be made so before being added to the tar (see // `tar_safe_path`). symlinks: BTreeMap, } impl ToolchainPackageBuilder { pub fn new() -> Self { ToolchainPackageBuilder { dir_set: BTreeMap::new(), file_set: BTreeMap::new(), symlinks: BTreeMap::new(), } } pub fn add_common(&mut self) -> Result<()> { self.add_dir(PathBuf::from("/tmp")) } pub fn add_executable_and_deps(&mut self, executable: PathBuf) -> Result<()> { let mut remaining = vec![executable]; while let Some(obj_path) = remaining.pop() { assert!(obj_path.is_absolute()); // If any parent directories are a symlink, resolve it first and record the link. // This is important because ld-linux may not be configured to look in the resolved // or non-resolved directory (i.e., both directories must work at runtime). // let tar_path = self.tarify_path(&obj_path)?; // If file already in the set, assume we've analysed all deps if self.file_set.contains_key(&tar_path) { continue; } let ldd_libraries = find_ldd_libraries(&obj_path).with_context(|| { format!("Failed to analyse {} with ldd", obj_path.display()) })?; remaining.extend(ldd_libraries); self.file_set.insert(tar_path, obj_path); } Ok(()) } pub fn add_dir(&mut self, dir_path: PathBuf) -> Result<()> { assert!(dir_path.is_absolute()); if !dir_path.is_dir() { bail!(format!( "{} was not a dir when readying for tar", dir_path.to_string_lossy() )) } if dir_path .components() .next_back() .expect("asserted absolute") == Component::RootDir { return Ok(()); } let tar_path = self.tarify_path(&dir_path)?; self.dir_set.insert(tar_path, dir_path); Ok(()) } pub fn add_file(&mut self, file_path: PathBuf) -> Result<()> { assert!(file_path.is_absolute()); if !file_path.is_file() { bail!(format!( "{} was not a file when readying for tar", file_path.to_string_lossy() )) } let tar_path = self.tarify_path(&file_path)?; self.file_set.insert(tar_path, file_path); Ok(()) } pub fn add_dir_contents(&mut self, dir_path: &Path) -> Result<()> { // Although by not following symlinks we could break a custom // constructed toolchain with links everywhere, this is just a // best-effort auto packaging for entry in WalkDir::new(dir_path).follow_links(false) { let entry = entry?; let file_type = entry.file_type(); if file_type.is_dir() { continue; } else if file_type.is_symlink() { let metadata = fs::metadata(entry.path())?; if !metadata.file_type().is_file() { continue; } } else if !file_type.is_file() { // Device or other oddity continue; } trace!("walkdir add_file {}", entry.path().display()); // It's either a file, or a symlink pointing to a file self.add_file(entry.path().to_owned())? } Ok(()) } pub fn into_compressed_tar(self, writer: W) -> Result<()> { use gzp::{ deflate::Gzip, par::compress::{Compression, ParCompress, ParCompressBuilder}, }; let ToolchainPackageBuilder { dir_set, file_set, symlinks, } = self; let par: ParCompress = ParCompressBuilder::new() .compression_level(Compression::default()) .from_writer(writer); let mut builder = tar::Builder::new(par); for (tar_path, dir_path) in dir_set { builder.append_dir(tar_path, dir_path)? } for (tar_path, file_path) in file_set { let file = &mut fs::File::open(file_path)?; builder.append_file(tar_path, file.file_mut())? } for (from_path, to_path) in symlinks { let mut header = tar::Header::new_gnu(); header.set_entry_type(tar::EntryType::Symlink); header.set_size(0); // Leave `to_path` as absolute, assuming the tar will be used in a chroot-like // environment. builder.append_link(&mut header, tar_safe_path(from_path), to_path)? } builder.finish().map_err(Into::into) } /// Simplify the path and strip the leading slash. /// /// Symlinks in the path are recorded for inclusion in the tarball. fn tarify_path(&mut self, path: &Path) -> Result { SimplifyPath { resolved_symlinks: Some(&mut self.symlinks), } .simplify(path) .map(tar_safe_path) } } /// Strip a leading slash, if any. fn tar_safe_path(path: PathBuf) -> PathBuf { path.strip_prefix(Component::RootDir) .map(ToOwned::to_owned) .unwrap_or(path) } // The dynamic linker is the only thing that truly knows how dynamic libraries will be // searched for, so we need to ask it directly. // // This function will extract any absolute paths from output like the following: // $ ldd /bin/ls // linux-vdso.so.1 => (0x00007ffeb41f6000) // libselinux.so.1 => /lib/x86_64-linux-gnu/libselinux.so.1 (0x00007f6877f4f000) // libc.so.6 => /lib/x86_64-linux-gnu/libc.so.6 (0x00007f6877b85000) // libpcre.so.3 => /lib/x86_64-linux-gnu/libpcre.so.3 (0x00007f6877915000) // libdl.so.2 => /lib/x86_64-linux-gnu/libdl.so.2 (0x00007f6877711000) // /lib64/ld-linux-x86-64.so.2 (0x00007f6878171000) // libpthread.so.0 => /lib/x86_64-linux-gnu/libpthread.so.0 (0x00007f68774f4000) // // Elf executables can be statically or dynamically linked, and position independent (PIE) or not: // - dynamic + PIE = ET_DYN, ldd stdouts something like the list above and exits with code 0 // - dynamic + non-PIE = ET_EXEC, ldd stdouts something like the list above and exits with code 0 // - static + PIE = ET_DYN, ldd stdouts something like "\tstatically linked" or // "\tldd (0x7f79ef662000)" and exits with code 0 // - static + non-PIE = ET_EXEC, ldd stderrs something like "\tnot a dynamic executable" or // "ldd: a.out: Not a valid dynamic program" and exits with code 1 // fn find_ldd_libraries(executable: &Path) -> Result> { let process::Output { status, stdout, stderr, } = process::Command::new("ldd").arg(executable).output()?; // Not a file ldd can handle. This can be a non-executable, or a static non-PIE if !status.success() { // Best-effort detection of static non-PIE let mut elf = fs::File::open(executable)?; let mut elf_bytes = [0; 0x12]; elf.read_exact(&mut elf_bytes)?; if elf_bytes[..0x4] != [0x7f, 0x45, 0x4c, 0x46] { bail!("Elf magic not found") } let little_endian = match elf_bytes[0x5] { 1 => true, 2 => false, _ => bail!("Invalid endianness in elf header"), }; let e_type = if little_endian { (elf_bytes[0x11] as u16) << 8 | elf_bytes[0x10] as u16 } else { (elf_bytes[0x10] as u16) << 8 | elf_bytes[0x11] as u16 }; if e_type != 0x02 { bail!("ldd failed on a non-ET_EXEC elf") } // It appears to be an ET_EXEC, good enough for us return Ok(vec![]); } if !stderr.is_empty() { trace!( "ldd stderr non-empty: {:?}", String::from_utf8_lossy(&stderr) ) } let stdout = str::from_utf8(&stdout).context("ldd output not utf8")?; Ok(parse_ldd_output(stdout)) } // If it's a static PIE the output will be a line like "\tstatically linked", so be forgiving // in the parsing here and treat parsing oddities as an empty list. fn parse_ldd_output(stdout: &str) -> Vec { let mut libs = vec![]; for line in stdout.lines() { let line = line.trim(); let mut parts: Vec<_> = line.split_whitespace().collect(); // Remove a possible "(0xdeadbeef)" or assume this isn't a library line match parts.pop() { Some(s) if s.starts_with('(') && s.ends_with(')') => (), Some(_) | None => continue, } if parts.len() > 3 { continue; } let libpath = match (parts.first(), parts.get(1), parts.get(2)) { // "linux-vdso.so.1 => (0x00007ffeb41f6000)" (Some(_libname), Some(&"=>"), None) => continue, // "libc.so.6 => /lib/x86_64-linux-gnu/libc.so.6 (0x00007f6877b85000)" (Some(libname), Some(&"=>"), Some(libpath)) => { // ldd (version 2.30) will output something like this: // ... // /lib64/ld-linux-x86-64.so.2 => /usr/lib64/ld-linux-x86-64.so.2 // ... // We need to add /lib64/ld-linux-x86-64.so.2 to deps, else we'll get error "No // such file or directory". // // Workaround: add libname to deps if it's abusolute and exists. let libname_path = PathBuf::from(libname); if libname_path.is_absolute() && libname_path.exists() { libs.push(libname_path) } PathBuf::from(libpath) } // "/lib64/ld-linux-x86-64.so.2 (0x00007f6878171000)" (Some(libpath), None, None) => PathBuf::from(libpath), _ => continue, }; if !libpath.is_absolute() { continue; } libs.push(libpath) } libs } #[test] fn test_ldd_parse() { let ubuntu_ls_output = "\tlinux-vdso.so.1 => (0x00007fffcfffe000) \tlibselinux.so.1 => /lib/x86_64-linux-gnu/libselinux.so.1 (0x00007f69caa6b000) \tlibc.so.6 => /lib/x86_64-linux-gnu/libc.so.6 (0x00007f69ca6a1000) \tlibpcre.so.3 => /lib/x86_64-linux-gnu/libpcre.so.3 (0x00007f69ca431000) \tlibdl.so.2 => /lib/x86_64-linux-gnu/libdl.so.2 (0x00007f69ca22d000) \t/lib64/ld-linux-x86-64.so.2 (0x00007f69cac8d000) \tlibpthread.so.0 => /lib/x86_64-linux-gnu/libpthread.so.0 (0x00007f69ca010000) "; assert_eq!( parse_ldd_output(ubuntu_ls_output) .iter() .map(|p| p.to_str().unwrap()) .collect::>(), &[ "/lib/x86_64-linux-gnu/libselinux.so.1", "/lib/x86_64-linux-gnu/libc.so.6", "/lib/x86_64-linux-gnu/libpcre.so.3", "/lib/x86_64-linux-gnu/libdl.so.2", "/lib64/ld-linux-x86-64.so.2", "/lib/x86_64-linux-gnu/libpthread.so.0", ] ) } #[test] fn test_ldd_parse_static() { let static_outputs = &[ "\tstatically linked", // glibc ldd output "\tldd (0x7f79ef662000)", // musl ldd output ]; for static_output in static_outputs { assert_eq!(parse_ldd_output(static_output).len(), 0) } } #[test] fn test_ldd_parse_v2_30() { let archlinux_ls_output = "\tlinux-vdso.so.1 (0x00007ffddc1f6000) \tlibcap.so.2 => /usr/lib/libcap.so.2 (0x00007f4980989000) \tlibc.so.6 => /lib/x86_64-linux-gnu/libc.so.6 (0x00007f69ca6a1000) \tlibc.so.6 => /usr/lib/libc.so.6 (0x00007f49807c2000) \t/lib64/ld-linux-x86-64.so.2 => /usr/lib64/ld-linux-x86-64.so.2 (0x00007f49809e9000) "; assert_eq!( parse_ldd_output(archlinux_ls_output) .iter() .map(|p| p.to_str().unwrap()) .collect::>(), &[ "/usr/lib/libcap.so.2", "/lib/x86_64-linux-gnu/libc.so.6", "/usr/lib/libc.so.6", "/lib64/ld-linux-x86-64.so.2", "/usr/lib64/ld-linux-x86-64.so.2", ] ) } } pub fn make_tar_header(src: &Path, dest: &str) -> io::Result { let metadata_res = fs::metadata(src); let mut file_header = tar::Header::new_ustar(); // TODO: test this works if let Ok(metadata) = metadata_res { // TODO: if the source file is a symlink, I think this does bad things file_header.set_metadata(&metadata); } else { warn!( "Couldn't get metadata of file {:?}, falling back to some defaults", src ); file_header.set_mode(0o644); file_header.set_uid(0); file_header.set_gid(0); file_header.set_mtime(0); file_header .set_device_major(0) .expect("expected a ustar header"); file_header .set_device_minor(0) .expect("expected a ustar header"); file_header.set_entry_type(tar::EntryType::file()); } // tar-rs imposes that `set_path` takes a relative path assert!(dest.starts_with('/')); let dest = dest.trim_start_matches('/'); assert!(!dest.starts_with('/')); // `set_path` converts its argument to a Path and back to bytes on Windows, so this is // a bit of an inefficient round-trip. Windows path separators will also be normalised // to be like Unix, and the path is (now) relative so there should be no funny results // due to Windows // TODO: should really use a `set_path_str` or similar file_header.set_path(dest)?; Ok(file_header) } /// Simplify a path to one without any relative components, erroring if it looks /// like there could be any symlink complexity that means a simplified path is not /// equivalent to the original (see the documentation of `fs::canonicalize` for an /// example). /// /// So why avoid resolving symlinks? Any path that we are trying to simplify has /// (usually) been added to an archive because something will try access it, but /// resolving symlinks (be they for the actual file or directory components) can /// make the accessed path 'disappear' in favour of the canonical path. pub fn simplify_path(path: &Path) -> Result { SimplifyPath { resolved_symlinks: None, } .simplify(path) } struct SimplifyPath<'a> { pub resolved_symlinks: Option<&'a mut std::collections::BTreeMap>, } impl SimplifyPath<'_> { pub fn simplify(&mut self, path: &Path) -> Result { let mut final_path = PathBuf::new(); for component in path.components() { match component { c @ Component::RootDir | c @ Component::Prefix(_) | c @ Component::Normal(_) => { final_path.push(c); if self.resolved_symlinks.is_some() && final_path.is_symlink() { let parent = final_path.parent().expect("symlinks have parents"); let link_target = final_path.read_link()?; let new_final_path = self.simplify(&parent.join(&link_target))?; let old_final_path = std::mem::replace(&mut final_path, new_final_path.clone()); self.resolved_symlinks .as_mut() .unwrap() .insert(old_final_path, new_final_path); } } Component::ParentDir => { // If the path is doing funny symlink traversals, just give up. // // This case should only occur if `resolved_symlinks` is `None`. if final_path.is_symlink() { bail!("Cannot handle symlinks in parent paths") } final_path.pop(); } Component::CurDir => continue, } } Ok(final_path) } } mozilla-sccache-40c3d6b/src/dist/test.rs000066400000000000000000000000011475712407500202410ustar00rootroot00000000000000 mozilla-sccache-40c3d6b/src/errors.rs000066400000000000000000000037711475712407500176540ustar00rootroot00000000000000// Copyright 2016 Mozilla Foundation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. pub use anyhow::{anyhow, bail, Context, Error}; use std::process; // We use `anyhow` for error handling. // - Use `context()`/`with_context()` to annotate errors. // - Use `anyhow!` with a string to create a new `anyhow::Error`. // - The error types below (`BadHttpStatusError`, etc.) are internal ones that // need to be checked at points other than the outermost error-checking // layer. #[cfg(feature = "hyper")] #[derive(Debug)] pub struct BadHttpStatusError(pub hyper::StatusCode); #[derive(Debug)] pub struct HttpClientError(pub String); #[derive(Debug)] pub struct ProcessError(pub process::Output); #[cfg(feature = "hyper")] impl std::error::Error for BadHttpStatusError {} impl std::error::Error for HttpClientError {} impl std::error::Error for ProcessError {} #[cfg(feature = "hyper")] impl std::fmt::Display for BadHttpStatusError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "didn't get a successful HTTP status, got `{}`", self.0) } } impl std::fmt::Display for HttpClientError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "didn't get a successful HTTP status, got `{}`", self.0) } } impl std::fmt::Display for ProcessError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{}", String::from_utf8_lossy(&self.0.stderr)) } } pub type Result = anyhow::Result; mozilla-sccache-40c3d6b/src/jobserver.rs000066400000000000000000000125771475712407500203450ustar00rootroot00000000000000use std::io; use std::process::Command; use std::sync::Arc; use futures::channel::mpsc; use futures::channel::oneshot; use futures::StreamExt; use crate::errors::*; // The execution model of sccache is that on the first run it spawns a server // in the background and detaches it. // When normally executing the rust compiler from either cargo or make, it // will use cargo/make's jobserver and limit its resource usage accordingly. // When executing the rust compiler through the sccache server, that jobserver // is not available, and spawning as many rustc as there are CPUs can lead to // a quadratic use of the CPU resources (each rustc spawning as many threads // as there are CPUs). // One way around this issue is to inherit the jobserver from cargo or make // when the sccache server is spawned, but that means that in some cases, the // cargo or make process can't terminate until the sccache server terminates // after its idle timeout (which also never happens if SCCACHE_IDLE_TIMEOUT=0). // Also, if the sccache server ends up shared between multiple runs of // cargo/make, then which jobserver is used doesn't make sense anymore. // Ideally, the sccache client would give a handle to the jobserver it has // access to, so that the rust compiler would "just" use the jobserver it // would have used if it had run without sccache, but that adds some extra // complexity, and requires to use Unix domain sockets. // What we do instead is to arbitrary use our own jobserver. // Unfortunately, that doesn't absolve us from having to deal with the original // jobserver, because make may give us file descriptors to its pipes, and the // simple fact of keeping them open can block it. // So if it does give us those file descriptors, close the preemptively. // // unsafe because it can use the wrong fds. #[cfg(not(windows))] pub unsafe fn discard_inherited_jobserver() { if let Some(value) = ["CARGO_MAKEFLAGS", "MAKEFLAGS", "MFLAGS"] .into_iter() .find_map(|env| std::env::var(env).ok()) { if let Some(auth) = value.rsplit(' ').find_map(|arg| { arg.strip_prefix("--jobserver-auth=") .or_else(|| arg.strip_prefix("--jobserver-fds=")) }) { if !auth.starts_with("fifo:") { let mut parts = auth.splitn(2, ','); let read = parts.next().unwrap(); let write = match parts.next() { Some(w) => w, None => return, }; let read = read.parse().unwrap(); let write = write.parse().unwrap(); if read < 0 || write < 0 { return; } unsafe { if libc::fcntl(read, libc::F_GETFD) == -1 { return; } if libc::fcntl(write, libc::F_GETFD) == -1 { return; } libc::close(read); libc::close(write); } } } } } #[derive(Clone)] pub struct Client { helper: Option>, tx: Option>>>, inner: jobserver::Client, } pub struct Acquired { _token: Option, } impl Client { pub fn new() -> Client { Client::new_num(num_cpus::get()) } pub fn new_num(num: usize) -> Client { let inner = jobserver::Client::new(num).expect("failed to create jobserver"); Client::_new(inner, false) } fn _new(inner: jobserver::Client, inherited: bool) -> Client { let (helper, tx) = if inherited { (None, None) } else { let (tx, mut rx) = mpsc::unbounded::>(); let helper = inner .clone() .into_helper_thread(move |token| { let rt = tokio::runtime::Builder::new_current_thread() .build() .unwrap(); rt.block_on(async { if let Some(sender) = rx.next().await { drop(sender.send(token)); } }); }) .expect("failed to spawn helper thread"); (Some(Arc::new(helper)), Some(tx)) }; Client { inner, helper, tx } } /// Configures this jobserver to be inherited by the specified command pub fn configure(&self, cmd: &mut Command) { self.inner.configure(cmd) } /// Returns a future that represents an acquired jobserver token. /// /// This should be invoked before any "work" is spawned (for whatever the /// definition of "work" is) to ensure that the system is properly /// rate-limiting itself. pub async fn acquire(&self) -> Result { let (helper, tx) = match (self.helper.as_ref(), self.tx.as_ref()) { (Some(a), Some(b)) => (a, b), _ => return Ok(Acquired { _token: None }), }; let (mytx, myrx) = oneshot::channel(); helper.request_token(); tx.unbounded_send(mytx).unwrap(); let acquired = myrx .await .context("jobserver helper panicked")? .context("failed to acquire jobserver token")?; Ok(Acquired { _token: Some(acquired), }) } } mozilla-sccache-40c3d6b/src/lib.rs000066400000000000000000000060111475712407500170740ustar00rootroot00000000000000// Copyright 2016 Mozilla Foundation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #![deny(rust_2018_idioms)] #![allow( clippy::type_complexity, clippy::new_without_default, clippy::blocks_in_conditions )] #![recursion_limit = "256"] #[macro_use] extern crate log; #[cfg(feature = "rouille")] #[macro_use(router)] extern crate rouille; // To get macros in scope, this has to be first. #[cfg(test)] #[macro_use] mod test; #[macro_use] pub mod errors; mod cache; mod client; mod cmdline; mod commands; mod compiler; pub mod config; pub mod dist; mod jobserver; pub mod lru_disk_cache; mod mock_command; mod net; mod protocol; pub mod server; #[doc(hidden)] pub mod util; use std::env; /// VERSION is the pkg version of sccache. /// /// This version is safe to be used in cache services to indicate the version /// that sccache ie. pub const VERSION: &str = env!("CARGO_PKG_VERSION"); /// Used to denote the environment variable that controls /// logging for sccache, and sccache-dist. pub const LOGGING_ENV: &str = "SCCACHE_LOG"; pub fn main() { init_logging(); let incr_env_strs = ["CARGO_BUILD_INCREMENTAL", "CARGO_INCREMENTAL"]; incr_env_strs .iter() .for_each(|incr_str| match env::var(incr_str) { Ok(incr_val) if incr_val == "1" => { println!("sccache: increment compilation is prohibited."); std::process::exit(1); } _ => (), }); let command = match cmdline::try_parse() { Ok(cmd) => cmd, Err(e) => match e.downcast::() { // If the error is from clap then let them handle formatting and exiting Ok(clap_err) => clap_err.exit(), Err(some_other_err) => { println!("sccache: {some_other_err}"); for source in some_other_err.chain().skip(1) { println!("sccache: caused by: {source}"); } std::process::exit(1); } }, }; std::process::exit(match commands::run_command(command) { Ok(s) => s, Err(e) => { eprintln!("sccache: error: {}", e); for e in e.chain().skip(1) { eprintln!("sccache: caused by: {}", e); } 2 } }); } fn init_logging() { if env::var(LOGGING_ENV).is_ok() { match env_logger::Builder::from_env(LOGGING_ENV).try_init() { Ok(_) => (), Err(e) => panic!("Failed to initialize logging: {:?}", e), } } } mozilla-sccache-40c3d6b/src/lru_disk_cache/000077500000000000000000000000001475712407500207215ustar00rootroot00000000000000mozilla-sccache-40c3d6b/src/lru_disk_cache/lru_cache.rs000066400000000000000000000620651475712407500232250ustar00rootroot00000000000000// Copyright 2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 or the MIT license // , at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! A cache that holds a limited number of key-value pairs. When the //! capacity of the cache is exceeded, the least-recently-used //! (where "used" means a look-up or putting the pair into the cache) //! pair is automatically removed. //! //! # Examples //! //! ```rust,ignore //! use lru_cache::LruCache; //! //! let mut cache = LruCache::new(2); //! //! cache.insert(1, 10); //! cache.insert(2, 20); //! cache.insert(3, 30); //! assert!(cache.get_mut(&1).is_none()); //! assert_eq!(*cache.get_mut(&2).unwrap(), 20); //! assert_eq!(*cache.get_mut(&3).unwrap(), 30); //! //! cache.insert(2, 22); //! assert_eq!(*cache.get_mut(&2).unwrap(), 22); //! //! cache.insert(6, 60); //! assert!(cache.get_mut(&3).is_none()); //! //! cache.set_capacity(1); //! assert!(cache.get_mut(&2).is_none()); //! ``` //! use std::borrow::Borrow; use std::collections::hash_map::RandomState; use std::fmt; use std::hash::{BuildHasher, Hash}; use linked_hash_map::LinkedHashMap; // FIXME(conventions): implement indexing? /// A trait for measuring the size of a cache entry. /// /// If you implement this trait, you should use `usize` as the `Measure` type, otherwise you will /// also have to implement [`CountableMeter`][countablemeter]. /// /// [countablemeter]: trait.Meter.html pub trait Meter { /// The type used to store measurements. type Measure: Default + Copy; /// Calculate the size of `key` and `value`. fn measure(&self, key: &Q, value: &V) -> Self::Measure where K: Borrow; } /// Size limit based on a simple count of cache items. pub struct Count; impl Meter for Count { /// Don't store anything, the measurement can be derived from the map. type Measure = (); /// Don't actually count anything either. fn measure(&self, _: &Q, _: &V) where K: Borrow, { } } /// A trait to allow the default `Count` measurement to not store an /// extraneous counter. pub trait CountableMeter: Meter { /// Add `amount` to `current` and return the sum. fn add(&self, current: Self::Measure, amount: Self::Measure) -> Self::Measure; /// Subtract `amount` from `current` and return the difference. fn sub(&self, current: Self::Measure, amount: Self::Measure) -> Self::Measure; /// Return `current` as a `usize` if possible, otherwise return `None`. /// /// If this method returns `None` the cache will use the number of cache entries as /// its size. fn size(&self, current: Self::Measure) -> Option; } /// `Count` is all no-ops, the number of entries in the map is the size. impl> CountableMeter for T where T: CountableMeterWithMeasure>::Measure>, { fn add(&self, current: Self::Measure, amount: Self::Measure) -> Self::Measure { CountableMeterWithMeasure::meter_add(self, current, amount) } fn sub(&self, current: Self::Measure, amount: Self::Measure) -> Self::Measure { CountableMeterWithMeasure::meter_sub(self, current, amount) } fn size(&self, current: Self::Measure) -> Option { CountableMeterWithMeasure::meter_size(self, current) } } pub trait CountableMeterWithMeasure { /// Add `amount` to `current` and return the sum. fn meter_add(&self, current: M, amount: M) -> M; /// Subtract `amount` from `current` and return the difference. fn meter_sub(&self, current: M, amount: M) -> M; /// Return `current` as a `usize` if possible, otherwise return `None`. /// /// If this method returns `None` the cache will use the number of cache entries as /// its size. fn meter_size(&self, current: M) -> Option; } /// For any other `Meter` with `Measure=usize`, just do the simple math. impl CountableMeterWithMeasure for T where T: Meter, { fn meter_add(&self, current: usize, amount: usize) -> usize { current + amount } fn meter_sub(&self, current: usize, amount: usize) -> usize { current - amount } fn meter_size(&self, current: usize) -> Option { Some(current as u64) } } impl CountableMeterWithMeasure for Count { fn meter_add(&self, _current: (), _amount: ()) {} fn meter_sub(&self, _current: (), _amount: ()) {} fn meter_size(&self, _current: ()) -> Option { None } } /// An LRU cache. #[derive(Clone)] pub struct LruCache = Count> { map: LinkedHashMap, current_measure: M::Measure, max_capacity: u64, meter: M, } impl LruCache { /// Creates an empty cache that can hold at most `capacity` items. /// /// # Examples /// /// ```rust,ignore /// use lru_cache::LruCache; /// let mut cache: LruCache = LruCache::new(10); /// ``` pub fn new(capacity: u64) -> Self { LruCache { map: LinkedHashMap::new(), current_measure: (), max_capacity: capacity, meter: Count, } } } impl> LruCache { /// Creates an empty cache that can hold at most `capacity` as measured by `meter`. /// /// You can implement the [`Meter`][meter] trait to allow custom metrics. /// /// [meter]: trait.Meter.html /// /// # Examples /// /// ```rust,ignore /// use lru_cache::{LruCache, Meter}; /// use std::borrow::Borrow; /// /// /// Measure Vec items by their length /// struct VecLen; /// /// impl Meter> for VecLen { /// // Use `Measure = usize` or implement `CountableMeter` as well. /// type Measure = usize; /// fn measure(&self, _: &Q, v: &Vec) -> usize /// where K: Borrow /// { /// v.len() /// } /// } /// /// let mut cache = LruCache::with_meter(5, VecLen); /// cache.insert(1, vec![1, 2]); /// assert_eq!(cache.size(), 2); /// cache.insert(2, vec![3, 4]); /// cache.insert(3, vec![5, 6]); /// assert_eq!(cache.size(), 4); /// assert_eq!(cache.len(), 2); /// ``` pub fn with_meter(capacity: u64, meter: M) -> LruCache { LruCache { map: LinkedHashMap::new(), current_measure: Default::default(), max_capacity: capacity, meter, } } } impl LruCache { /// Creates an empty cache that can hold at most `capacity` items with the given hash builder. pub fn with_hasher(capacity: u64, hash_builder: S) -> LruCache { LruCache { map: LinkedHashMap::with_hasher(hash_builder), current_measure: (), max_capacity: capacity, meter: Count, } } /// Returns a mutable reference to the value corresponding to the given key in the cache, if /// any. /// /// Note that this method is not available for cache objects using `Meter` implementations /// other than `Count`. /// /// # Examples /// /// ```rust,ignore /// use lru_cache::LruCache; /// /// let mut cache = LruCache::new(2); /// /// cache.insert(1, "a"); /// cache.insert(2, "b"); /// cache.insert(2, "c"); /// cache.insert(3, "d"); /// /// assert_eq!(cache.get_mut(&1), None); /// assert_eq!(cache.get_mut(&2), Some(&mut "c")); /// ``` pub fn get_mut(&mut self, k: &Q) -> Option<&mut V> where K: Borrow, { self.map.get_refresh(k) } /// Returns an iterator over the cache's key-value pairs in least- to most-recently-used order, /// with mutable references to the values. /// /// Accessing the cache through the iterator does _not_ affect the cache's LRU state. /// Note that this method is not available for cache objects using `Meter` implementations. /// other than `Count`. /// /// # Examples /// /// ```rust,ignore /// use lru_cache::LruCache; /// /// let mut cache = LruCache::new(2); /// /// cache.insert(1, 10); /// cache.insert(2, 20); /// cache.insert(3, 30); /// /// let mut n = 2; /// /// for (k, v) in cache.iter_mut() { /// assert_eq!(*k, n); /// assert_eq!(*v, n * 10); /// *v *= 10; /// n += 1; /// } /// /// assert_eq!(n, 4); /// assert_eq!(cache.get_mut(&2), Some(&mut 200)); /// assert_eq!(cache.get_mut(&3), Some(&mut 300)); /// ``` pub fn iter_mut(&mut self) -> IterMut<'_, K, V> { self.internal_iter_mut() } } impl> LruCache { /// Creates an empty cache that can hold at most `capacity` as measured by `meter` with the /// given hash builder. pub fn with_meter_and_hasher(capacity: u64, meter: M, hash_builder: S) -> Self { LruCache { map: LinkedHashMap::with_hasher(hash_builder), current_measure: Default::default(), max_capacity: capacity, meter, } } /// Returns the maximum size of the key-value pairs the cache can hold, as measured by the /// `Meter` used by the cache. /// /// # Examples /// /// ```rust,ignore /// use lru_cache::LruCache; /// let mut cache: LruCache = LruCache::new(2); /// assert_eq!(cache.capacity(), 2); /// ``` pub fn capacity(&self) -> u64 { self.max_capacity } /// Checks if the map contains the given key. /// /// # Examples /// /// ```rust,ignore /// use lru_cache::LruCache; /// /// let mut cache = LruCache::new(1); /// /// cache.insert(1, "a"); /// assert!(cache.contains_key(&1)); /// ``` pub fn contains_key(&self, key: &Q) -> bool where K: Borrow, { self.map.contains_key(key) } pub fn get(&mut self, k: &Q) -> Option<&V> where K: Borrow, { self.map.get_refresh(k).map(|v| v as &V) } /// Inserts a key-value pair into the cache. If the key already existed, the old value is /// returned. /// /// # Examples /// /// ```rust,ignore /// use lru_cache::LruCache; /// /// let mut cache = LruCache::new(2); /// /// cache.insert(1, "a"); /// cache.insert(2, "b"); /// assert_eq!(cache.get_mut(&1), Some(&mut "a")); /// assert_eq!(cache.get_mut(&2), Some(&mut "b")); /// ``` pub fn insert(&mut self, k: K, v: V) -> Option { let new_size = self.meter.measure(&k, &v); self.current_measure = self.meter.add(self.current_measure, new_size); if let Some(old) = self.map.get(&k) { self.current_measure = self .meter .sub(self.current_measure, self.meter.measure(&k, old)); } let old_val = self.map.insert(k, v); while self.size() > self.capacity() { self.remove_lru(); } old_val } /// Removes the given key from the cache and returns its corresponding value. /// /// # Examples /// /// ```rust,ignore /// use lru_cache::LruCache; /// /// let mut cache = LruCache::new(2); /// /// cache.insert(2, "a"); /// /// assert_eq!(cache.remove(&1), None); /// assert_eq!(cache.remove(&2), Some("a")); /// assert_eq!(cache.remove(&2), None); /// assert_eq!(cache.len(), 0); /// ``` pub fn remove(&mut self, k: &Q) -> Option where K: Borrow, { self.map.remove(k).map(|v| { self.current_measure = self .meter .sub(self.current_measure, self.meter.measure(k, &v)); v }) } /// Sets the size of the key-value pairs the cache can hold, as measured by the `Meter` used by /// the cache. /// /// Removes least-recently-used key-value pairs if necessary. /// /// # Examples /// /// ```rust,ignore /// use lru_cache::LruCache; /// /// let mut cache = LruCache::new(2); /// /// cache.insert(1, "a"); /// cache.insert(2, "b"); /// cache.insert(3, "c"); /// /// assert_eq!(cache.get_mut(&1), None); /// assert_eq!(cache.get_mut(&2), Some(&mut "b")); /// assert_eq!(cache.get_mut(&3), Some(&mut "c")); /// /// cache.set_capacity(3); /// cache.insert(1, "a"); /// cache.insert(2, "b"); /// /// assert_eq!(cache.get_mut(&1), Some(&mut "a")); /// assert_eq!(cache.get_mut(&2), Some(&mut "b")); /// assert_eq!(cache.get_mut(&3), Some(&mut "c")); /// /// cache.set_capacity(1); /// /// assert_eq!(cache.get_mut(&1), None); /// assert_eq!(cache.get_mut(&2), None); /// assert_eq!(cache.get_mut(&3), Some(&mut "c")); /// ``` pub fn set_capacity(&mut self, capacity: u64) { while self.size() > capacity { self.remove_lru(); } self.max_capacity = capacity; } /// Removes and returns the least recently used key-value pair as a tuple. /// /// # Examples /// /// ```rust,ignore /// use lru_cache::LruCache; /// /// let mut cache = LruCache::new(2); /// /// cache.insert(1, "a"); /// cache.insert(2, "b"); /// /// assert_eq!(cache.remove_lru(), Some((1, "a"))); /// assert_eq!(cache.len(), 1); /// ``` #[inline] pub fn remove_lru(&mut self) -> Option<(K, V)> { self.map.pop_front().map(|(k, v)| { self.current_measure = self .meter .sub(self.current_measure, self.meter.measure(&k, &v)); (k, v) }) } /// Returns the number of key-value pairs in the cache. pub fn len(&self) -> usize { self.map.len() } /// Returns the size of all the key-value pairs in the cache, as measured by the `Meter` used /// by the cache. pub fn size(&self) -> u64 { self.meter .size(self.current_measure) .unwrap_or_else(|| self.map.len() as u64) } /// Returns `true` if the cache contains no key-value pairs. pub fn is_empty(&self) -> bool { self.map.is_empty() } /// Removes all key-value pairs from the cache. pub fn clear(&mut self) { self.map.clear(); self.current_measure = Default::default(); } /// Returns an iterator over the cache's key-value pairs in least- to most-recently-used order. /// /// Accessing the cache through the iterator does _not_ affect the cache's LRU state. /// /// # Examples /// /// ```rust,ignore /// use lru_cache::LruCache; /// /// let mut cache = LruCache::new(2); /// /// cache.insert(1, 10); /// cache.insert(2, 20); /// cache.insert(3, 30); /// /// let kvs: Vec<_> = cache.iter().collect(); /// assert_eq!(kvs, [(&2, &20), (&3, &30)]); /// ``` pub fn iter(&self) -> Iter<'_, K, V> { Iter(self.map.iter()) } fn internal_iter_mut(&mut self) -> IterMut<'_, K, V> { IterMut(self.map.iter_mut()) } } impl> Extend<(K, V)> for LruCache { fn extend>(&mut self, iter: I) { for (k, v) in iter { self.insert(k, v); } } } impl> fmt::Debug for LruCache { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_map().entries(self.iter().rev()).finish() } } impl> IntoIterator for LruCache { type Item = (K, V); type IntoIter = IntoIter; fn into_iter(self) -> IntoIter { IntoIter(self.map.into_iter()) } } impl<'a, K: Eq + Hash, V, S: BuildHasher, M: CountableMeter> IntoIterator for &'a LruCache { type Item = (&'a K, &'a V); type IntoIter = Iter<'a, K, V>; fn into_iter(self) -> Iter<'a, K, V> { self.iter() } } impl<'a, K: Eq + Hash, V, S: BuildHasher, M: CountableMeter> IntoIterator for &'a mut LruCache { type Item = (&'a K, &'a mut V); type IntoIter = IterMut<'a, K, V>; fn into_iter(self) -> IterMut<'a, K, V> { self.internal_iter_mut() } } /// An iterator over a cache's key-value pairs in least- to most-recently-used order. /// /// # Examples /// /// ```rust,ignore /// use lru_cache::LruCache; /// /// let mut cache = LruCache::new(2); /// /// cache.insert(1, 10); /// cache.insert(2, 20); /// cache.insert(3, 30); /// /// let mut n = 2; /// /// for (k, v) in cache { /// assert_eq!(k, n); /// assert_eq!(v, n * 10); /// n += 1; /// } /// /// assert_eq!(n, 4); /// ``` #[derive(Clone)] pub struct IntoIter(linked_hash_map::IntoIter); impl Iterator for IntoIter { type Item = (K, V); fn next(&mut self) -> Option<(K, V)> { self.0.next() } fn size_hint(&self) -> (usize, Option) { self.0.size_hint() } } impl DoubleEndedIterator for IntoIter { fn next_back(&mut self) -> Option<(K, V)> { self.0.next_back() } } impl ExactSizeIterator for IntoIter { fn len(&self) -> usize { self.0.len() } } /// An iterator over a cache's key-value pairs in least- to most-recently-used order. /// /// Accessing a cache through the iterator does _not_ affect the cache's LRU state. pub struct Iter<'a, K, V>(linked_hash_map::Iter<'a, K, V>); impl<'a, K, V> Clone for Iter<'a, K, V> { fn clone(&self) -> Iter<'a, K, V> { Iter(self.0.clone()) } } impl<'a, K, V> Iterator for Iter<'a, K, V> { type Item = (&'a K, &'a V); fn next(&mut self) -> Option<(&'a K, &'a V)> { self.0.next() } fn size_hint(&self) -> (usize, Option) { self.0.size_hint() } } impl<'a, K, V> DoubleEndedIterator for Iter<'a, K, V> { fn next_back(&mut self) -> Option<(&'a K, &'a V)> { self.0.next_back() } } impl<'a, K, V> ExactSizeIterator for Iter<'a, K, V> { fn len(&self) -> usize { self.0.len() } } /// An iterator over a cache's key-value pairs in least- to most-recently-used order with mutable /// references to the values. /// /// Accessing a cache through the iterator does _not_ affect the cache's LRU state. pub struct IterMut<'a, K, V>(linked_hash_map::IterMut<'a, K, V>); impl<'a, K, V> Iterator for IterMut<'a, K, V> { type Item = (&'a K, &'a mut V); fn next(&mut self) -> Option<(&'a K, &'a mut V)> { self.0.next() } fn size_hint(&self) -> (usize, Option) { self.0.size_hint() } } impl<'a, K, V> DoubleEndedIterator for IterMut<'a, K, V> { fn next_back(&mut self) -> Option<(&'a K, &'a mut V)> { self.0.next_back() } } impl<'a, K, V> ExactSizeIterator for IterMut<'a, K, V> { fn len(&self) -> usize { self.0.len() } } #[cfg(test)] mod tests { use super::{LruCache, Meter}; use std::borrow::Borrow; #[test] fn test_put_and_get() { let mut cache = LruCache::new(2); cache.insert(1, 10); cache.insert(2, 20); assert_eq!(cache.get_mut(&1), Some(&mut 10)); assert_eq!(cache.get_mut(&2), Some(&mut 20)); assert_eq!(cache.len(), 2); assert_eq!(cache.size(), 2); } #[test] fn test_put_update() { let mut cache = LruCache::new(1); cache.insert("1", 10); cache.insert("1", 19); assert_eq!(cache.get_mut("1"), Some(&mut 19)); assert_eq!(cache.len(), 1); } #[test] fn test_contains_key() { let mut cache = LruCache::new(1); cache.insert("1", 10); assert!(cache.contains_key("1")); } #[test] fn test_expire_lru() { let mut cache = LruCache::new(2); cache.insert("foo1", "bar1"); cache.insert("foo2", "bar2"); cache.insert("foo3", "bar3"); assert!(cache.get_mut("foo1").is_none()); cache.insert("foo2", "bar2update"); cache.insert("foo4", "bar4"); assert!(cache.get_mut("foo3").is_none()); } #[test] fn test_pop() { let mut cache = LruCache::new(2); cache.insert(1, 10); cache.insert(2, 20); assert_eq!(cache.len(), 2); let opt1 = cache.remove(&1); assert!(opt1.is_some()); assert_eq!(opt1.unwrap(), 10); assert!(cache.get_mut(&1).is_none()); assert_eq!(cache.len(), 1); } #[test] fn test_change_capacity() { let mut cache = LruCache::new(2); assert_eq!(cache.capacity(), 2); cache.insert(1, 10); cache.insert(2, 20); cache.set_capacity(1); assert!(cache.get_mut(&1).is_none()); assert_eq!(cache.capacity(), 1); } #[test] fn test_debug() { let mut cache = LruCache::new(3); cache.insert(1, 10); cache.insert(2, 20); cache.insert(3, 30); assert_eq!(format!("{:?}", cache), "{3: 30, 2: 20, 1: 10}"); cache.insert(2, 22); assert_eq!(format!("{:?}", cache), "{2: 22, 3: 30, 1: 10}"); cache.insert(6, 60); assert_eq!(format!("{:?}", cache), "{6: 60, 2: 22, 3: 30}"); cache.get_mut(&3); assert_eq!(format!("{:?}", cache), "{3: 30, 6: 60, 2: 22}"); cache.set_capacity(2); assert_eq!(format!("{:?}", cache), "{3: 30, 6: 60}"); } #[test] fn test_remove() { let mut cache = LruCache::new(3); cache.insert(1, 10); cache.insert(2, 20); cache.insert(3, 30); cache.insert(4, 40); cache.insert(5, 50); cache.remove(&3); cache.remove(&4); assert!(cache.get_mut(&3).is_none()); assert!(cache.get_mut(&4).is_none()); cache.insert(6, 60); cache.insert(7, 70); cache.insert(8, 80); assert!(cache.get_mut(&5).is_none()); assert_eq!(cache.get_mut(&6), Some(&mut 60)); assert_eq!(cache.get_mut(&7), Some(&mut 70)); assert_eq!(cache.get_mut(&8), Some(&mut 80)); } #[test] fn test_clear() { let mut cache = LruCache::new(2); cache.insert(1, 10); cache.insert(2, 20); cache.clear(); assert!(cache.get_mut(&1).is_none()); assert!(cache.get_mut(&2).is_none()); assert_eq!(format!("{:?}", cache), "{}"); } #[test] fn test_iter() { let mut cache = LruCache::new(3); cache.insert(1, 10); cache.insert(2, 20); cache.insert(3, 30); cache.insert(4, 40); cache.insert(5, 50); assert_eq!( cache.iter().collect::>(), [(&3, &30), (&4, &40), (&5, &50)] ); assert_eq!( cache.iter_mut().collect::>(), [(&3, &mut 30), (&4, &mut 40), (&5, &mut 50)] ); assert_eq!( cache.iter().rev().collect::>(), [(&5, &50), (&4, &40), (&3, &30)] ); assert_eq!( cache.iter_mut().rev().collect::>(), [(&5, &mut 50), (&4, &mut 40), (&3, &mut 30)] ); } struct VecLen; impl Meter> for VecLen { type Measure = usize; fn measure(&self, _: &Q, v: &Vec) -> usize where K: Borrow, { v.len() } } #[test] fn test_metered_cache() { let mut cache = LruCache::with_meter(5, VecLen); cache.insert("foo1", vec![1, 2]); assert_eq!(cache.size(), 2); cache.insert("foo2", vec![3, 4]); cache.insert("foo3", vec![5, 6]); assert_eq!(cache.size(), 4); assert!(!cache.contains_key("foo1")); cache.insert("foo2", vec![7, 8]); cache.insert("foo4", vec![9, 10]); assert_eq!(cache.size(), 4); assert!(!cache.contains_key("foo3")); assert_eq!(cache.get("foo2"), Some(&vec![7, 8])); } #[test] fn test_metered_cache_reinsert_larger() { let mut cache = LruCache::with_meter(5, VecLen); cache.insert("foo1", vec![1, 2]); cache.insert("foo2", vec![3, 4]); assert_eq!(cache.size(), 4); cache.insert("foo2", vec![5, 6, 7, 8]); assert_eq!(cache.size(), 4); assert!(!cache.contains_key("foo1")); } #[test] fn test_metered_cache_oversize() { let mut cache = LruCache::with_meter(2, VecLen); cache.insert("foo1", vec![1, 2]); cache.insert("foo2", vec![3, 4, 5, 6]); assert_eq!(cache.size(), 0); assert!(!cache.contains_key("foo1")); assert!(!cache.contains_key("foo2")); } } mozilla-sccache-40c3d6b/src/lru_disk_cache/mod.rs000066400000000000000000000625371475712407500220630ustar00rootroot00000000000000pub mod lru_cache; use fs::File; use fs_err as fs; use std::borrow::Borrow; use std::boxed::Box; use std::collections::hash_map::RandomState; use std::error::Error as StdError; use std::ffi::{OsStr, OsString}; use std::fmt; use std::hash::BuildHasher; use std::io; use std::io::prelude::*; use std::path::{Path, PathBuf}; use filetime::{set_file_times, FileTime}; pub use lru_cache::{LruCache, Meter}; use tempfile::NamedTempFile; use walkdir::WalkDir; use crate::util::OsStrExt; const TEMPFILE_PREFIX: &str = ".sccachetmp"; struct FileSize; /// Given a tuple of (path, filesize), use the filesize for measurement. impl Meter for FileSize { type Measure = usize; fn measure(&self, _: &Q, v: &u64) -> usize where K: Borrow, { *v as usize } } /// Return an iterator of `(path, size)` of files under `path` sorted by ascending last-modified /// time, such that the oldest modified file is returned first. fn get_all_files>(path: P) -> Box> { let mut files: Vec<_> = WalkDir::new(path.as_ref()) .into_iter() .filter_map(|e| { e.ok().and_then(|f| { // Only look at files if f.file_type().is_file() { // Get the last-modified time, size, and the full path. f.metadata().ok().and_then(|m| { m.modified() .ok() .map(|mtime| (mtime, f.path().to_owned(), m.len())) }) } else { None } }) }) .collect(); // Sort by last-modified-time, so oldest file first. files.sort_by_key(|k| k.0); Box::new(files.into_iter().map(|(_mtime, path, size)| (path, size))) } /// An LRU cache of files on disk. pub struct LruDiskCache { lru: LruCache, root: PathBuf, pending: Vec, pending_size: u64, } /// Errors returned by this crate. #[derive(Debug)] pub enum Error { /// The file was too large to fit in the cache. FileTooLarge, /// The file was not in the cache. FileNotInCache, /// An IO Error occurred. Io(io::Error), } impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { Error::FileTooLarge => write!(f, "File too large"), Error::FileNotInCache => write!(f, "File not in cache"), Error::Io(ref e) => write!(f, "{}", e), } } } impl StdError for Error { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match self { Error::FileTooLarge => None, Error::FileNotInCache => None, Error::Io(ref e) => Some(e), } } } impl From for Error { fn from(e: io::Error) -> Error { Error::Io(e) } } /// A convenience `Result` type pub type Result = std::result::Result; /// Trait objects can't be bounded by more than one non-builtin trait. pub trait ReadSeek: Read + Seek + Send {} impl ReadSeek for T {} enum AddFile<'a> { AbsPath(PathBuf), RelPath(&'a OsStr), } pub struct LruDiskCacheAddEntry { file: NamedTempFile, key: OsString, size: u64, } impl LruDiskCacheAddEntry { pub fn as_file_mut(&mut self) -> &mut std::fs::File { self.file.as_file_mut() } } impl LruDiskCache { /// Create an `LruDiskCache` that stores files in `path`, limited to `size` bytes. /// /// Existing files in `path` will be stored with their last-modified time from the filesystem /// used as the order for the recency of their use. Any files that are individually larger /// than `size` bytes will be removed. /// /// The cache is not observant of changes to files under `path` from external sources, it /// expects to have sole maintence of the contents. pub fn new(path: T, size: u64) -> Result where PathBuf: From, { LruDiskCache { lru: LruCache::with_meter(size, FileSize), root: PathBuf::from(path), pending: vec![], pending_size: 0, } .init() } /// Return the current size of all the files in the cache. pub fn size(&self) -> u64 { self.lru.size() + self.pending_size } /// Return the count of entries in the cache. pub fn len(&self) -> usize { self.lru.len() } pub fn is_empty(&self) -> bool { self.lru.len() == 0 } /// Return the maximum size of the cache. pub fn capacity(&self) -> u64 { self.lru.capacity() } /// Return the path in which the cache is stored. pub fn path(&self) -> &Path { self.root.as_path() } /// Return the path that `key` would be stored at. fn rel_to_abs_path>(&self, rel_path: K) -> PathBuf { self.root.join(rel_path) } /// Scan `self.root` for existing files and store them. fn init(mut self) -> Result { fs::create_dir_all(&self.root)?; for (file, size) in get_all_files(&self.root) { if file .file_name() .expect("Bad path?") .starts_with(TEMPFILE_PREFIX) { fs::remove_file(&file).unwrap_or_else(|e| { error!("Error removing temporary file `{}`: {}", file.display(), e) }); } else if !self.can_store(size) { fs::remove_file(file).unwrap_or_else(|e| { error!( "Error removing file `{}` which is too large for the cache ({} bytes)", e, size ) }); } else { self.add_file(AddFile::AbsPath(file), size) .unwrap_or_else(|e| error!("Error adding file: {}", e)); } } Ok(self) } /// Returns `true` if the disk cache can store a file of `size` bytes. pub fn can_store(&self, size: u64) -> bool { size <= self.lru.capacity() } fn make_space(&mut self, size: u64) -> Result<()> { if !self.can_store(size) { return Err(Error::FileTooLarge); } //TODO: ideally LRUCache::insert would give us back the entries it had to remove. while self.size() + size > self.capacity() { let (rel_path, _) = self.lru.remove_lru().expect("Unexpectedly empty cache!"); let remove_path = self.rel_to_abs_path(rel_path); //TODO: check that files are removable during `init`, so that this is only // due to outside interference. fs::remove_file(&remove_path).unwrap_or_else(|e| { // Sometimes the file has already been removed // this seems to happen when the max cache size has been reached // https://github.com/mozilla/sccache/issues/2092 if e.kind() == std::io::ErrorKind::NotFound { debug!( "Error removing file from cache as it was not found: `{:?}`", remove_path ); } else { panic!( "Error removing file from cache: `{:?}`: {}, {:?}", remove_path, e, e.kind() ) } }); } Ok(()) } /// Add the file at `path` of size `size` to the cache. fn add_file(&mut self, addfile_path: AddFile<'_>, size: u64) -> Result<()> { let rel_path = match addfile_path { AddFile::AbsPath(ref p) => p.strip_prefix(&self.root).expect("Bad path?").as_os_str(), AddFile::RelPath(p) => p, }; self.make_space(size)?; self.lru.insert(rel_path.to_owned(), size); Ok(()) } fn insert_by, F: FnOnce(&Path) -> io::Result<()>>( &mut self, key: K, size: Option, by: F, ) -> Result<()> { if let Some(size) = size { if !self.can_store(size) { return Err(Error::FileTooLarge); } } let rel_path = key.as_ref(); let path = self.rel_to_abs_path(rel_path); fs::create_dir_all(path.parent().expect("Bad path?"))?; by(&path)?; let size = match size { Some(size) => size, None => fs::metadata(path)?.len(), }; self.add_file(AddFile::RelPath(rel_path), size) .map_err(|e| { error!( "Failed to insert file `{}`: {}", rel_path.to_string_lossy(), e ); fs::remove_file(self.rel_to_abs_path(rel_path)) .expect("Failed to remove file we just created!"); e }) } /// Add a file by calling `with` with the open `File` corresponding to the cache at path `key`. pub fn insert_with, F: FnOnce(File) -> io::Result<()>>( &mut self, key: K, with: F, ) -> Result<()> { self.insert_by(key, None, |path| with(File::create(path)?)) } /// Add a file with `bytes` as its contents to the cache at path `key`. pub fn insert_bytes>(&mut self, key: K, bytes: &[u8]) -> Result<()> { self.insert_by(key, Some(bytes.len() as u64), |path| { let mut f = File::create(path)?; f.write_all(bytes)?; Ok(()) }) } /// Add an existing file at `path` to the cache at path `key`. pub fn insert_file, P: AsRef>(&mut self, key: K, path: P) -> Result<()> { let size = fs::metadata(path.as_ref())?.len(); self.insert_by(key, Some(size), |new_path| { fs::rename(path.as_ref(), new_path).or_else(|_| { warn!("fs::rename failed, falling back to copy!"); fs::copy(path.as_ref(), new_path)?; fs::remove_file(path.as_ref()).unwrap_or_else(|e| { error!("Failed to remove original file in insert_file: {}", e) }); Ok(()) }) }) } /// Prepare the insertion of a file at path `key`. The resulting entry must be /// committed with `LruDiskCache::commit`. pub fn prepare_add<'a, K: AsRef + 'a>( &mut self, key: K, size: u64, ) -> Result { // Ensure we have enough space for the advertized space. self.make_space(size)?; let key = key.as_ref().to_owned(); self.pending.push(key.clone()); self.pending_size += size; tempfile::Builder::new() .prefix(TEMPFILE_PREFIX) .tempfile_in(&self.root) .map(|file| LruDiskCacheAddEntry { file, key, size }) .map_err(Into::into) } /// Commit an entry coming from `LruDiskCache::prepare_add`. pub fn commit(&mut self, entry: LruDiskCacheAddEntry) -> Result<()> { let LruDiskCacheAddEntry { mut file, key, size, } = entry; file.flush()?; let real_size = file.as_file().metadata()?.len(); // If the file is larger than the size that had been advertized, ensure // we have enough space for it. self.make_space(real_size.saturating_sub(size))?; self.pending .iter() .position(|k| k == &key) .map(|i| self.pending.remove(i)) .unwrap(); self.pending_size -= size; let path = self.rel_to_abs_path(&key); fs::create_dir_all(path.parent().unwrap())?; file.persist(path).map_err(|e| e.error)?; self.lru.insert(key, real_size); Ok(()) } /// Return `true` if a file with path `key` is in the cache. Entries created /// by `LruDiskCache::prepare_add` but not yet committed return `false`. pub fn contains_key>(&self, key: K) -> bool { self.lru.contains_key(key.as_ref()) } /// Get an opened `File` for `key`, if one exists and can be opened. Updates the LRU state /// of the file if present. Avoid using this method if at all possible, prefer `.get`. /// Entries created by `LruDiskCache::prepare_add` but not yet committed return /// `Err(Error::FileNotInCache)`. pub fn get_file>(&mut self, key: K) -> Result { let rel_path = key.as_ref(); let path = self.rel_to_abs_path(rel_path); self.lru .get(rel_path) .ok_or(Error::FileNotInCache) .and_then(|_| { let t = FileTime::now(); set_file_times(&path, t, t)?; File::open(path).map_err(Into::into) }) } /// Get an opened readable and seekable handle to the file at `key`, if one exists and can /// be opened. Updates the LRU state of the file if present. /// Entries created by `LruDiskCache::prepare_add` but not yet committed return /// `Err(Error::FileNotInCache)`. pub fn get>(&mut self, key: K) -> Result> { self.get_file(key).map(|f| Box::new(f) as Box) } /// Remove the given key from the cache. pub fn remove>(&mut self, key: K) -> Result<()> { match self.lru.remove(key.as_ref()) { Some(_) => { let path = self.rel_to_abs_path(key.as_ref()); fs::remove_file(&path).map_err(|e| { error!("Error removing file from cache: `{:?}`: {}", path, e); Into::into(e) }) } None => Ok(()), } } } #[cfg(test)] mod tests { use super::fs::{self, File}; use super::{get_all_files, Error, LruDiskCache, LruDiskCacheAddEntry}; use filetime::{set_file_times, FileTime}; use std::io::{self, Read, Write}; use std::path::{Path, PathBuf}; use tempfile::TempDir; struct TestFixture { /// Temp directory. pub tempdir: TempDir, } fn create_file, F: FnOnce(File) -> io::Result<()>>( dir: &Path, path: T, fill_contents: F, ) -> io::Result { let b = dir.join(path); fs::create_dir_all(b.parent().unwrap())?; let f = fs::File::create(&b)?; fill_contents(f)?; b.canonicalize() } /// Set the last modified time of `path` backwards by `seconds` seconds. fn set_mtime_back>(path: T, seconds: usize) { let m = fs::metadata(path.as_ref()).unwrap(); let t = FileTime::from_last_modification_time(&m); let t = FileTime::from_unix_time(t.unix_seconds() - seconds as i64, t.nanoseconds()); set_file_times(path, t, t).unwrap(); } fn read_all(r: &mut R) -> io::Result> { let mut v = vec![]; r.read_to_end(&mut v)?; Ok(v) } impl TestFixture { pub fn new() -> TestFixture { TestFixture { tempdir: tempfile::Builder::new() .prefix("lru-disk-cache-test") .tempdir() .unwrap(), } } pub fn tmp(&self) -> &Path { self.tempdir.path() } pub fn create_file>(&self, path: T, size: usize) -> PathBuf { create_file(self.tempdir.path(), path, |mut f| { f.write_all(&vec![0; size]) }) .unwrap() } } #[test] fn test_empty_dir() { let f = TestFixture::new(); LruDiskCache::new(f.tmp(), 1024).unwrap(); } #[test] fn test_missing_root() { let f = TestFixture::new(); LruDiskCache::new(f.tmp().join("not-here"), 1024).unwrap(); } #[test] fn test_some_existing_files() { let f = TestFixture::new(); f.create_file("file1", 10); f.create_file("file2", 10); let c = LruDiskCache::new(f.tmp(), 20).unwrap(); assert_eq!(c.size(), 20); assert_eq!(c.len(), 2); } #[test] fn test_existing_file_too_large() { let f = TestFixture::new(); // Create files explicitly in the past. set_mtime_back(f.create_file("file1", 10), 10); set_mtime_back(f.create_file("file2", 10), 5); let c = LruDiskCache::new(f.tmp(), 15).unwrap(); assert_eq!(c.size(), 10); assert_eq!(c.len(), 1); assert!(!c.contains_key("file1")); assert!(c.contains_key("file2")); } #[test] fn test_existing_files_lru_mtime() { let f = TestFixture::new(); // Create files explicitly in the past. set_mtime_back(f.create_file("file1", 10), 5); set_mtime_back(f.create_file("file2", 10), 10); let mut c = LruDiskCache::new(f.tmp(), 25).unwrap(); assert_eq!(c.size(), 20); c.insert_bytes("file3", &[0; 10]).unwrap(); assert_eq!(c.size(), 20); // The oldest file on disk should have been removed. assert!(!c.contains_key("file2")); assert!(c.contains_key("file1")); } #[test] fn test_insert_bytes() { let f = TestFixture::new(); let mut c = LruDiskCache::new(f.tmp(), 25).unwrap(); c.insert_bytes("a/b/c", &[0; 10]).unwrap(); assert!(c.contains_key("a/b/c")); c.insert_bytes("a/b/d", &[0; 10]).unwrap(); assert_eq!(c.size(), 20); // Adding this third file should put the cache above the limit. c.insert_bytes("x/y/z", &[0; 10]).unwrap(); assert_eq!(c.size(), 20); // The least-recently-used file should have been removed. assert!(!c.contains_key("a/b/c")); assert!(!f.tmp().join("a/b/c").exists()); } #[test] fn test_insert_bytes_exact() { // Test that files adding up to exactly the size limit works. let f = TestFixture::new(); let mut c = LruDiskCache::new(f.tmp(), 20).unwrap(); c.insert_bytes("file1", &[1; 10]).unwrap(); c.insert_bytes("file2", &[2; 10]).unwrap(); assert_eq!(c.size(), 20); c.insert_bytes("file3", &[3; 10]).unwrap(); assert_eq!(c.size(), 20); assert!(!c.contains_key("file1")); } #[test] fn test_add_get_lru() { let f = TestFixture::new(); { let mut c = LruDiskCache::new(f.tmp(), 25).unwrap(); c.insert_bytes("file1", &[1; 10]).unwrap(); c.insert_bytes("file2", &[2; 10]).unwrap(); // Get the file to bump its LRU status. assert_eq!( read_all(&mut c.get("file1").unwrap()).unwrap(), vec![1u8; 10] ); // Adding this third file should put the cache above the limit. c.insert_bytes("file3", &[3; 10]).unwrap(); assert_eq!(c.size(), 20); // The least-recently-used file should have been removed. assert!(!c.contains_key("file2")); } // Get rid of the cache, to test that the LRU persists on-disk as mtimes. // This is hacky, but mtime resolution on my mac with HFS+ is only 1 second, so we either // need to have a 1 second sleep in the test (boo) or adjust the mtimes back a bit so // that updating one file to the current time actually works to make it newer. set_mtime_back(f.tmp().join("file1"), 5); set_mtime_back(f.tmp().join("file3"), 5); { let mut c = LruDiskCache::new(f.tmp(), 25).unwrap(); // Bump file1 again. c.get("file1").unwrap(); } // Now check that the on-disk mtimes were updated and used. { let mut c = LruDiskCache::new(f.tmp(), 25).unwrap(); assert!(c.contains_key("file1")); assert!(c.contains_key("file3")); assert_eq!(c.size(), 20); // Add another file to bump out the least-recently-used. c.insert_bytes("file4", &[4; 10]).unwrap(); assert_eq!(c.size(), 20); assert!(!c.contains_key("file3")); assert!(c.contains_key("file1")); } } #[test] fn test_insert_bytes_too_large() { let f = TestFixture::new(); let mut c = LruDiskCache::new(f.tmp(), 1).unwrap(); match c.insert_bytes("a/b/c", &[0; 2]) { Err(Error::FileTooLarge) => {} x => panic!("Unexpected result: {:?}", x), } } #[test] fn test_insert_file() { let f = TestFixture::new(); let p1 = f.create_file("file1", 10); let p2 = f.create_file("file2", 10); let p3 = f.create_file("file3", 10); let mut c = LruDiskCache::new(f.tmp().join("cache"), 25).unwrap(); c.insert_file("file1", &p1).unwrap(); assert_eq!(c.len(), 1); c.insert_file("file2", &p2).unwrap(); assert_eq!(c.len(), 2); // Get the file to bump its LRU status. assert_eq!( read_all(&mut c.get("file1").unwrap()).unwrap(), vec![0u8; 10] ); // Adding this third file should put the cache above the limit. c.insert_file("file3", &p3).unwrap(); assert_eq!(c.len(), 2); assert_eq!(c.size(), 20); // The least-recently-used file should have been removed. assert!(!c.contains_key("file2")); assert!(!p1.exists()); assert!(!p2.exists()); assert!(!p3.exists()); } #[test] fn test_prepare_and_commit() { let f = TestFixture::new(); let cache_dir = f.tmp(); let mut c = LruDiskCache::new(cache_dir, 25).unwrap(); let mut tmp = c.prepare_add("a/b/c", 10).unwrap(); // An entry added but not committed doesn't count, except for the // (reserved) size of the disk cache. assert!(!c.contains_key("a/b/c")); assert_eq!(c.size(), 10); assert_eq!(c.lru.size(), 0); tmp.as_file_mut().write_all(&[0; 10]).unwrap(); c.commit(tmp).unwrap(); // Once committed, the file appears. assert!(c.contains_key("a/b/c")); assert_eq!(c.size(), 10); assert_eq!(c.lru.size(), 10); let mut tmp = c.prepare_add("a/b/d", 10).unwrap(); assert_eq!(c.size(), 20); assert_eq!(c.lru.size(), 10); // Even though we haven't committed the second file, preparing for // the addition of the third one should put the cache above the // limit and trigger cleanup. let mut tmp2 = c.prepare_add("x/y/z", 10).unwrap(); assert_eq!(c.size(), 20); assert_eq!(c.lru.size(), 0); // At this point, we expect the first entry to have been removed entirely. assert!(!c.contains_key("a/b/c")); assert!(!f.tmp().join("a/b/c").exists()); tmp.as_file_mut().write_all(&[0; 10]).unwrap(); tmp2.as_file_mut().write_all(&[0; 10]).unwrap(); c.commit(tmp).unwrap(); assert_eq!(c.size(), 20); assert_eq!(c.lru.size(), 10); c.commit(tmp2).unwrap(); assert_eq!(c.size(), 20); assert_eq!(c.lru.size(), 20); let mut tmp = c.prepare_add("a/b/c", 5).unwrap(); assert_eq!(c.size(), 25); assert_eq!(c.lru.size(), 20); // Committing a file bigger than the promised size should properly // handle the case where the real size makes the cache go over the limit. tmp.as_file_mut().write_all(&[0; 10]).unwrap(); c.commit(tmp).unwrap(); assert_eq!(c.size(), 20); assert_eq!(c.lru.size(), 20); assert!(!c.contains_key("a/b/d")); assert!(!f.tmp().join("a/b/d").exists()); // If for some reason, the cache still contains a temporary file on // initialization, the temporary file is removed. let LruDiskCacheAddEntry { file, .. } = c.prepare_add("a/b/d", 5).unwrap(); let (_, path) = file.keep().unwrap(); std::mem::drop(c); // Ensure that the temporary file is indeed there. assert!(get_all_files(cache_dir).any(|(file, _)| file == path)); LruDiskCache::new(cache_dir, 25).unwrap(); // The temporary file should not be there anymore. assert!(get_all_files(cache_dir).all(|(file, _)| file != path)); } #[test] fn test_remove() { let f = TestFixture::new(); let p1 = f.create_file("file1", 10); let p2 = f.create_file("file2", 10); let p3 = f.create_file("file3", 10); let mut c = LruDiskCache::new(f.tmp().join("cache"), 25).unwrap(); c.insert_file("file1", &p1).unwrap(); c.insert_file("file2", &p2).unwrap(); c.remove("file1").unwrap(); c.insert_file("file3", &p3).unwrap(); assert_eq!(c.len(), 2); assert_eq!(c.size(), 20); // file1 should have been removed. assert!(!c.contains_key("file1")); assert!(!f.tmp().join("cache").join("file1").exists()); assert!(f.tmp().join("cache").join("file2").exists()); assert!(f.tmp().join("cache").join("file3").exists()); assert!(!p1.exists()); assert!(!p2.exists()); assert!(!p3.exists()); let p4 = f.create_file("file1", 10); c.insert_file("file1", &p4).unwrap(); assert_eq!(c.len(), 2); // file2 should have been removed. assert!(c.contains_key("file1")); assert!(!c.contains_key("file2")); assert!(!f.tmp().join("cache").join("file2").exists()); assert!(!p4.exists()); } } mozilla-sccache-40c3d6b/src/main.rs000066400000000000000000000012161475712407500172540ustar00rootroot00000000000000// Copyright 2016 Mozilla Foundation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. extern crate sccache; fn main() { sccache::main(); } mozilla-sccache-40c3d6b/src/mock_command.rs000066400000000000000000000520311475712407500207600ustar00rootroot00000000000000// Copyright 2016 Mozilla Foundation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Traits and types for mocking process execution. //! //! This module provides a set of traits and types that can be used //! to write code that expects to execute processes using `std::process::Command` //! in a way that can be mocked for tests. //! //! Instead of using `Command::new()`, make your code generic using //! `CommandCreator` as a trait bound, and use its `new_command` method. //! `new_command` returns an object implementing `CommandChild`, which //! mirrors the methods of `Command`. //! //! For production use, you can then instantiate your code with //! `ProcessCommandCreator`, which simply returns `Command::new()` from //! its `new_command` method. //! //! For testing, you can instantiate your code with `MockCommandCreator`, //! which creates `MockCommand` objects which in turn spawn `MockChild` //! objects. You can use `MockCommand::next_command_spawns` to provide //! the result of `spawn` from the next `MockCommand` that it creates. //! `MockCommandCreator::new_command` will fail an `assert` if it attempts //! to create a command and does not have any pending `MockChild` objects //! to hand out, so your tests must provide enough outputs for all //! expected process executions in the test. //! //! If your code under test needs to spawn processes across threads, you //! can use `CommandCreatorSync` as a trait bound, which is implemented for //! `ProcessCommandCreator` (since it has no state), and also for //! `Arc>`. `CommandCreatorSync` provides a //! `new_command_sync` method which your code can call to create new //! objects implementing `CommandChild` in a thread-safe way. Your tests can //! then create an `Arc>` and safely provide //! `MockChild` outputs. use crate::errors::*; use crate::jobserver::{Acquired, Client}; use async_trait::async_trait; use std::boxed::Box; use std::ffi::{OsStr, OsString}; use std::fmt; use std::io; use std::path::Path; use std::process::{Command, ExitStatus, Output, Stdio}; use std::sync::{Arc, Mutex}; use tokio::io::{AsyncRead, AsyncWrite}; use tokio::process::{ChildStderr, ChildStdin, ChildStdout}; /// A trait that provides a subset of the methods of `std::process::Child`. #[async_trait] pub trait CommandChild { /// The type of the process' standard input. type I: AsyncWrite + Unpin + Sync + Send + 'static; /// The type of the process' standard output. type O: AsyncRead + Unpin + Sync + Send + 'static; /// The type of the process' standard error. type E: AsyncRead + Unpin + Sync + Send + 'static; /// Take the stdin object from the process, if available. fn take_stdin(&mut self) -> Option; /// Take the stdout object from the process, if available. fn take_stdout(&mut self) -> Option; /// Take the stderr object from the process, if available. fn take_stderr(&mut self) -> Option; /// Wait for the process to complete and return its exit status. async fn wait(self) -> io::Result; /// Wait for the process to complete and return its output. async fn wait_with_output(self) -> io::Result; } /// A trait that provides a subset of the methods of `std::process::Command`. #[async_trait] pub trait RunCommand: fmt::Debug + Send { /// The type returned by `spawn`. type C: CommandChild + Send + 'static; /// Append `arg` to the process commandline. fn arg>(&mut self, arg: S) -> &mut Self; /// Append `args` to the process commandline. fn args>(&mut self, args: &[S]) -> &mut Self; /// Insert or update an environment variable mapping. fn env(&mut self, key: K, val: V) -> &mut Self where K: AsRef, V: AsRef; /// Add or update multiple environment variable mappings. fn envs(&mut self, vars: I) -> &mut Self where I: IntoIterator, K: AsRef, V: AsRef; /// Clears the entire environment map for the child process. fn env_clear(&mut self) -> &mut Self; /// Set the working directory of the process to `dir`. fn current_dir>(&mut self, dir: P) -> &mut Self; /// Set the process' stdin from `cfg`. fn stdin(&mut self, cfg: Stdio) -> &mut Self; /// Set the process' stdout from `cfg`. fn stdout(&mut self, cfg: Stdio) -> &mut Self; /// Set the process' stderr from `cfg`. fn stderr(&mut self, cfg: Stdio) -> &mut Self; /// Execute the process and return a process object. async fn spawn(&mut self) -> Result; } /// A trait that provides a means to create objects implementing `RunCommand`. /// /// This is provided so that `MockCommandCreator` can have state for testing. /// For the non-testing scenario, `ProcessCommandCreator` is simply a unit /// struct with a trivial implementation of this. pub trait CommandCreator { /// The type returned by `new_command`. type Cmd: RunCommand; /// Create a new instance of this type. fn new(client: &Client) -> Self; /// Create a new object that implements `RunCommand` that can be used /// to create a new process. fn new_command>(&mut self, program: S) -> Self::Cmd; } /// A trait for simplifying the normal case while still allowing the mock case requiring mutability. pub trait CommandCreatorSync: Clone + Send + Sync + 'static { type Cmd: RunCommand; fn new(client: &Client) -> Self; fn new_command_sync>(&mut self, program: S) -> Self::Cmd; } pub struct Child { inner: tokio::process::Child, token: Acquired, } /// Trivial implementation of `CommandChild` for `std::process::Child`. #[async_trait] impl CommandChild for Child { type I = ChildStdin; type O = ChildStdout; type E = ChildStderr; fn take_stdin(&mut self) -> Option { self.inner.stdin.take() } fn take_stdout(&mut self) -> Option { self.inner.stdout.take() } fn take_stderr(&mut self) -> Option { self.inner.stderr.take() } async fn wait(self) -> io::Result { let Child { mut inner, token } = self; inner.wait().await.map(|ret| { drop(token); ret }) } async fn wait_with_output(self) -> io::Result { let Child { inner, token } = self; inner.wait_with_output().await.map(|ret| { drop(token); ret }) } } pub struct AsyncCommand { inner: Option, jobserver: Client, } impl AsyncCommand { pub fn new>(program: S, jobserver: Client) -> AsyncCommand { AsyncCommand { inner: Some(Command::new(program)), jobserver, } } fn inner(&mut self) -> &mut Command { self.inner.as_mut().expect("can't reuse commands") } } /// Trivial implementation of `RunCommand` for `std::process::Command`. #[async_trait] impl RunCommand for AsyncCommand { type C = Child; fn arg>(&mut self, arg: S) -> &mut AsyncCommand { self.inner().arg(arg); self } fn args>(&mut self, args: &[S]) -> &mut AsyncCommand { self.inner().args(args); self } fn env(&mut self, key: K, val: V) -> &mut AsyncCommand where K: AsRef, V: AsRef, { self.inner().env(key, val); self } fn envs(&mut self, vars: I) -> &mut Self where I: IntoIterator, K: AsRef, V: AsRef, { self.inner().envs(vars); self } fn env_clear(&mut self) -> &mut AsyncCommand { self.inner().env_clear(); self } fn current_dir>(&mut self, dir: P) -> &mut AsyncCommand { self.inner().current_dir(dir); self } fn stdin(&mut self, cfg: Stdio) -> &mut AsyncCommand { self.inner().stdin(cfg); self } fn stdout(&mut self, cfg: Stdio) -> &mut AsyncCommand { self.inner().stdout(cfg); self } fn stderr(&mut self, cfg: Stdio) -> &mut AsyncCommand { self.inner().stderr(cfg); self } async fn spawn(&mut self) -> Result { let mut inner = self.inner.take().unwrap(); inner.env_remove("MAKEFLAGS"); inner.env_remove("MFLAGS"); inner.env_remove("CARGO_MAKEFLAGS"); self.jobserver.configure(&mut inner); let token = self.jobserver.acquire().await?; let mut inner = tokio::process::Command::from(inner); let child = inner .spawn() .with_context(|| format!("failed to spawn {:?}", inner))?; Ok(Child { inner: child, token, }) } } impl fmt::Debug for AsyncCommand { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { self.inner.fmt(f) } } /// Struct to use `RunCommand` with `std::process::Command`. #[derive(Clone)] pub struct ProcessCommandCreator { jobserver: Client, } /// Trivial implementation of `CommandCreator` for `ProcessCommandCreator`. impl CommandCreator for ProcessCommandCreator { type Cmd = AsyncCommand; fn new(client: &Client) -> ProcessCommandCreator { ProcessCommandCreator { jobserver: client.clone(), } } fn new_command>(&mut self, program: S) -> AsyncCommand { AsyncCommand::new(program, self.jobserver.clone()) } } /// Trivial implementation of `CommandCreatorSync` for `ProcessCommandCreator`. impl CommandCreatorSync for ProcessCommandCreator { type Cmd = AsyncCommand; fn new(client: &Client) -> ProcessCommandCreator { CommandCreator::new(client) } fn new_command_sync>(&mut self, program: S) -> AsyncCommand { // This doesn't actually use any mutable state. self.new_command(program) } } #[cfg(unix)] use std::os::unix::process::ExitStatusExt; #[cfg(windows)] use std::os::windows::process::ExitStatusExt; #[cfg(unix)] pub type ExitStatusValue = i32; #[cfg(windows)] pub type ExitStatusValue = u32; #[allow(dead_code)] pub fn exit_status(v: ExitStatusValue) -> ExitStatus { ExitStatus::from_raw(v) } /// A struct that mocks `std::process::Child`. #[allow(dead_code)] #[derive(Debug)] pub struct MockChild { //TODO: this doesn't work to actually track writes... /// A `Cursor` to hand out as stdin. pub stdin: Option>>, /// A `Cursor` to hand out as stdout. pub stdout: Option>>, /// A `Cursor` to hand out as stderr. pub stderr: Option>>, /// The `Result` to be handed out when `wait` is called. pub wait_result: Option>, } /// A mocked child process that simply returns stored values for its status and output. impl MockChild { /// Create a `MockChild` that will return the specified `status`, `stdout`, and `stderr` when waited upon. #[allow(dead_code)] pub fn new, U: AsRef<[u8]>>( status: ExitStatus, stdout: T, stderr: U, ) -> MockChild { MockChild { stdin: Some(io::Cursor::new(vec![])), stdout: Some(io::Cursor::new(stdout.as_ref().to_vec())), stderr: Some(io::Cursor::new(stderr.as_ref().to_vec())), wait_result: Some(Ok(status)), } } /// Create a `MockChild` that will return the specified `err` when waited upon. #[allow(dead_code)] pub fn with_error(err: io::Error) -> MockChild { MockChild { stdin: None, stdout: None, stderr: None, wait_result: Some(Err(err)), } } } #[async_trait] impl CommandChild for MockChild { type I = io::Cursor>; type O = io::Cursor>; type E = io::Cursor>; fn take_stdin(&mut self) -> Option>> { self.stdin.take() } fn take_stdout(&mut self) -> Option>> { self.stdout.take() } fn take_stderr(&mut self) -> Option>> { self.stderr.take() } async fn wait(mut self) -> io::Result { self.wait_result.take().unwrap() } async fn wait_with_output(self) -> io::Result { let MockChild { stdout, stderr, wait_result, .. } = self; wait_result.unwrap().map(|status| Output { status, stdout: stdout.map(|c| c.into_inner()).unwrap_or_else(Vec::new), stderr: stderr.map(|c| c.into_inner()).unwrap_or_else(Vec::new), }) } } pub enum ChildOrCall { Child(Result), Call(Box Result + Send>), } impl fmt::Debug for ChildOrCall { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { ChildOrCall::Child(ref r) => write!(f, "ChildOrCall::Child({:?}", r), ChildOrCall::Call(_) => write!(f, "ChildOrCall::Call(...)"), } } } /// A mocked command that simply returns its `child` from `spawn`. #[allow(dead_code)] #[derive(Debug)] pub struct MockCommand { pub child: Option, pub args: Vec, } #[async_trait] impl RunCommand for MockCommand { type C = MockChild; fn arg>(&mut self, arg: S) -> &mut MockCommand { self.args.push(arg.as_ref().to_owned()); self } fn args>(&mut self, args: &[S]) -> &mut MockCommand { self.args.extend(args.iter().map(|a| a.as_ref().to_owned())); self } fn env(&mut self, _key: K, _val: V) -> &mut MockCommand where K: AsRef, V: AsRef, { self } fn envs(&mut self, _vars: I) -> &mut Self where I: IntoIterator, K: AsRef, V: AsRef, { self } fn env_clear(&mut self) -> &mut MockCommand { self } fn current_dir>(&mut self, _dir: P) -> &mut MockCommand { //TODO: assert value of dir self } fn stdin(&mut self, _cfg: Stdio) -> &mut MockCommand { self } fn stdout(&mut self, _cfg: Stdio) -> &mut MockCommand { self } fn stderr(&mut self, _cfg: Stdio) -> &mut MockCommand { self } async fn spawn(&mut self) -> Result { match self.child.take().unwrap() { ChildOrCall::Child(c) => c, ChildOrCall::Call(f) => f(&self.args), } } } /// `MockCommandCreator` allows mocking out process creation by providing `MockChild` instances to be used in advance. #[allow(dead_code)] pub struct MockCommandCreator { /// Data to be used as the return value of `MockCommand::spawn`. pub children: Vec, } impl MockCommandCreator { /// The next `MockCommand` created will return `child` from `RunCommand::spawn`. #[allow(dead_code)] pub fn next_command_spawns(&mut self, child: Result) { self.children.push(ChildOrCall::Child(child)); } /// The next `MockCommand` created will call `call` with the command-line /// arguments passed to the command. #[allow(dead_code)] pub fn next_command_calls(&mut self, call: C) where C: Fn(&[OsString]) -> Result + Send + 'static, { self.children.push(ChildOrCall::Call(Box::new(call))); } } impl CommandCreator for MockCommandCreator { type Cmd = MockCommand; fn new(_client: &Client) -> MockCommandCreator { MockCommandCreator { children: Vec::new(), } } fn new_command>(&mut self, _program: S) -> MockCommand { assert!(!self.children.is_empty(), "Too many calls to MockCommandCreator::new_command, or not enough to MockCommandCreator::new_command_spawns!"); //TODO: assert value of program MockCommand { child: Some(self.children.remove(0)), args: vec![], } } } /// To simplify life for using a `CommandCreator` across multiple threads. impl CommandCreatorSync for Arc> { type Cmd = T::Cmd; fn new(client: &Client) -> Arc> { Arc::new(Mutex::new(T::new(client))) } fn new_command_sync>(&mut self, program: S) -> T::Cmd { self.lock().unwrap().new_command(program) } } #[cfg(test)] mod test { use super::*; use crate::jobserver::Client; use crate::test::utils::*; use std::ffi::OsStr; use std::io; use std::process::{ExitStatus, Output}; use std::sync::{Arc, Mutex}; use std::thread; fn spawn_command>( creator: &mut T, program: S, ) -> Result<<::Cmd as RunCommand>::C> { creator.new_command(program).spawn().wait() } fn spawn_wait_command>( creator: &mut T, program: S, ) -> Result { Ok(spawn_command(creator, program)?.wait().wait()?) } fn spawn_output_command>( creator: &mut T, program: S, ) -> Result { Ok(spawn_command(creator, program)?.wait_with_output().wait()?) } fn spawn_on_thread( mut t: T, really: bool, ) -> ExitStatus { thread::spawn(move || { if really { t.new_command_sync("foo") .spawn() .wait() .unwrap() .wait() .wait() .unwrap() } else { exit_status(1) } }) .join() .unwrap() } #[test] fn test_mock_command_wait() { let client = Client::new_num(1); let mut creator = MockCommandCreator::new(&client); creator.next_command_spawns(Ok(MockChild::new(exit_status(0), "hello", "error"))); assert_eq!( 0, spawn_wait_command(&mut creator, "foo") .unwrap() .code() .unwrap() ); } #[test] #[should_panic] fn test_unexpected_new_command() { // If next_command_spawns hasn't been called enough times, // new_command should panic. let client = Client::new_num(1); let mut creator = MockCommandCreator::new(&client); creator.new_command("foo").spawn().wait().unwrap(); } #[test] fn test_mock_command_output() { let client = Client::new_num(1); let mut creator = MockCommandCreator::new(&client); creator.next_command_spawns(Ok(MockChild::new(exit_status(0), "hello", "error"))); let output = spawn_output_command(&mut creator, "foo").unwrap(); assert_eq!(0, output.status.code().unwrap()); assert_eq!(b"hello".to_vec(), output.stdout); assert_eq!(b"error".to_vec(), output.stderr); } #[test] fn test_mock_command_calls() { let client = Client::new_num(1); let mut creator = MockCommandCreator::new(&client); creator.next_command_calls(|_| Ok(MockChild::new(exit_status(0), "hello", "error"))); let output = spawn_output_command(&mut creator, "foo").unwrap(); assert_eq!(0, output.status.code().unwrap()); assert_eq!(b"hello".to_vec(), output.stdout); assert_eq!(b"error".to_vec(), output.stderr); } #[test] fn test_mock_spawn_error() { let client = Client::new_num(1); let mut creator = MockCommandCreator::new(&client); creator.next_command_spawns(Err(anyhow!("error"))); let e = spawn_command(&mut creator, "foo").err().unwrap(); assert_eq!("error", e.to_string()); } #[test] fn test_mock_wait_error() { let client = Client::new_num(1); let mut creator = MockCommandCreator::new(&client); creator.next_command_spawns(Ok(MockChild::with_error(io::Error::new( io::ErrorKind::Other, "error", )))); let e = spawn_wait_command(&mut creator, "foo").err().unwrap(); assert_eq!("error", e.to_string()); } #[test] fn test_mock_command_sync() { let client = Client::new_num(1); let creator = Arc::new(Mutex::new(MockCommandCreator::new(&client))); next_command( &creator, Ok(MockChild::new(exit_status(0), "hello", "error")), ); assert_eq!(exit_status(0), spawn_on_thread(creator, true)); } } mozilla-sccache-40c3d6b/src/net.rs000066400000000000000000000141631475712407500171230ustar00rootroot00000000000000// Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! The module is used to provide abstraction over TCP socket and UDS. use std::fmt; #[cfg(any(target_os = "linux", target_os = "android"))] use std::os::linux::net::SocketAddrExt; use futures::{Future, TryFutureExt}; use tokio::io::{AsyncRead, AsyncWrite}; // A unify version of `std::net::SocketAddr` and Unix domain socket. #[derive(Debug)] pub enum SocketAddr { Net(std::net::SocketAddr), // This could work on Windows in the future. See also rust-lang/rust#56533. #[cfg(unix)] Unix(std::path::PathBuf), #[cfg(any(target_os = "linux", target_os = "android"))] UnixAbstract(Vec), } impl fmt::Display for SocketAddr { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { SocketAddr::Net(addr) => write!(f, "{}", addr), #[cfg(unix)] SocketAddr::Unix(p) => write!(f, "{}", p.display()), #[cfg(any(target_os = "linux", target_os = "android"))] SocketAddr::UnixAbstract(p) => write!(f, "\\x00{}", p.escape_ascii()), } } } impl SocketAddr { /// Get a Net address that with IP part set to "127.0.0.1". #[inline] pub fn with_port(port: u16) -> Self { SocketAddr::Net(std::net::SocketAddr::from(([127, 0, 0, 1], port))) } #[inline] pub fn as_net(&self) -> Option<&std::net::SocketAddr> { match self { SocketAddr::Net(addr) => Some(addr), #[cfg(unix)] _ => None, } } /// Parse a string as a unix domain socket. /// /// The string should follow the format of `self.to_string()`. #[cfg(unix)] pub fn parse_uds(s: &str) -> std::io::Result { // Parse abstract socket address first as it can contain any chars. #[cfg(any(target_os = "linux", target_os = "android"))] { if s.starts_with("\\x00") { // Rust abstract path expects no prepand '\x00'. let data = crate::util::ascii_unescape_default(&s.as_bytes()[4..])?; return Ok(SocketAddr::UnixAbstract(data)); } } let path = std::path::PathBuf::from(s); Ok(SocketAddr::Unix(path)) } #[cfg(unix)] pub fn is_unix_path(&self) -> bool { matches!(self, SocketAddr::Unix(_)) } #[cfg(not(unix))] pub fn is_unix_path(&self) -> bool { false } } // A helper trait to unify the behavior of TCP and UDS listener. pub trait Acceptor { type Socket: AsyncRead + AsyncWrite + Unpin + Send; fn accept(&self) -> impl Future> + Send; fn local_addr(&self) -> tokio::io::Result>; } impl Acceptor for tokio::net::TcpListener { type Socket = tokio::net::TcpStream; #[inline] fn accept(&self) -> impl Future> + Send { tokio::net::TcpListener::accept(self).and_then(|(s, _)| futures::future::ok(s)) } #[inline] fn local_addr(&self) -> tokio::io::Result> { tokio::net::TcpListener::local_addr(self).map(|a| Some(SocketAddr::Net(a))) } } // A helper trait to unify the behavior of TCP and UDS stream. pub trait Connection: std::io::Read + std::io::Write { fn try_clone(&self) -> std::io::Result>; } impl Connection for std::net::TcpStream { #[inline] fn try_clone(&self) -> std::io::Result> { let stream = std::net::TcpStream::try_clone(self)?; Ok(Box::new(stream)) } } // Helper function to create a stream. Uses dynamic dispatch to make code more // readable. pub fn connect(addr: &SocketAddr) -> std::io::Result> { match addr { SocketAddr::Net(addr) => { std::net::TcpStream::connect(addr).map(|s| Box::new(s) as Box) } #[cfg(unix)] SocketAddr::Unix(p) => { std::os::unix::net::UnixStream::connect(p).map(|s| Box::new(s) as Box) } #[cfg(any(target_os = "linux", target_os = "android"))] SocketAddr::UnixAbstract(p) => { let sock = std::os::unix::net::SocketAddr::from_abstract_name(p)?; std::os::unix::net::UnixStream::connect_addr(&sock) .map(|s| Box::new(s) as Box) } } } #[cfg(unix)] mod unix_imp { use futures::TryFutureExt; use super::*; impl Acceptor for tokio::net::UnixListener { type Socket = tokio::net::UnixStream; #[inline] fn accept(&self) -> impl Future> + Send { tokio::net::UnixListener::accept(self).and_then(|(s, _)| futures::future::ok(s)) } #[inline] fn local_addr(&self) -> tokio::io::Result> { let addr = tokio::net::UnixListener::local_addr(self)?; if let Some(p) = addr.as_pathname() { return Ok(Some(SocketAddr::Unix(p.to_path_buf()))); } // TODO: support get addr from abstract socket. // tokio::net::SocketAddr needs to support `as_abstract_name`. // #[cfg(any(target_os = "linux", target_os = "android"))] // if let Some(p) = addr.0.as_abstract_name() { // return Ok(SocketAddr::UnixAbstract(p.to_vec())); // } Ok(None) } } impl Connection for std::os::unix::net::UnixStream { #[inline] fn try_clone(&self) -> std::io::Result> { let stream = std::os::unix::net::UnixStream::try_clone(self)?; Ok(Box::new(stream)) } } } mozilla-sccache-40c3d6b/src/protocol.rs000066400000000000000000000050171475712407500201740ustar00rootroot00000000000000use crate::compiler::ColorMode; use crate::server::{DistInfo, ServerInfo}; use serde::{Deserialize, Serialize}; use std::ffi::OsString; /// A client request. #[derive(Serialize, Deserialize, Debug)] pub enum Request { /// Zero the server's statistics. ZeroStats, /// Get server statistics. GetStats, /// Get dist status. DistStatus, /// Shut the server down gracefully. Shutdown, /// Execute a compile or fetch a cached compilation result. Compile(Compile), } /// A server response. #[derive(Serialize, Deserialize, Debug)] pub enum Response { /// Response for `Request::Compile`. Compile(CompileResponse), /// Response for `Request::ZeroStats`. ZeroStats, /// Response for `Request::GetStats`, containing server statistics. Stats(Box), /// Response for `Request::DistStatus`, containing client info. DistStatus(DistInfo), /// Response for `Request::Shutdown`, containing server statistics. ShuttingDown(Box), /// Second response for `Request::Compile`, containing the results of the compilation. CompileFinished(CompileFinished), } /// Possible responses from the server for a `Compile` request. #[derive(Serialize, Deserialize, Debug)] pub enum CompileResponse { /// The compilation was started. CompileStarted, /// The server could not handle this compilation request. UnhandledCompile, /// The compiler was not supported. UnsupportedCompiler(OsString), } /// Information about a finished compile, either from cache or executed locally. #[derive(Serialize, Deserialize, Debug, Default)] pub struct CompileFinished { /// The return code of the compile process, if available. pub retcode: Option, /// The signal that terminated the compile process, if available. pub signal: Option, /// The compiler's stdout. pub stdout: Vec, /// The compiler's stderr. pub stderr: Vec, /// The state of any compiler options passed to control color output. pub color_mode: ColorMode, } /// The contents of a compile request from a client. #[derive(Serialize, Deserialize, Debug)] pub struct Compile { /// The full path to the compiler executable. pub exe: OsString, /// The current working directory in which to execute the compile. pub cwd: OsString, /// The commandline arguments passed to the compiler. pub args: Vec, /// The environment variables present when the compiler was executed, as (var, val). pub env_vars: Vec<(OsString, OsString)>, } mozilla-sccache-40c3d6b/src/server.rs000066400000000000000000002446021475712407500176460ustar00rootroot00000000000000// Copyright 2016 Mozilla Foundation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License.SCCACHE_MAX_FRAME_LENGTH use crate::cache::readonly::ReadOnlyStorage; use crate::cache::{storage_from_config, CacheMode, Storage}; use crate::compiler::{ get_compiler_info, CacheControl, CompileResult, Compiler, CompilerArguments, CompilerHasher, CompilerKind, CompilerProxy, DistType, Language, MissType, }; #[cfg(feature = "dist-client")] use crate::config; use crate::config::Config; use crate::dist; use crate::jobserver::Client; use crate::mock_command::{CommandCreatorSync, ProcessCommandCreator}; use crate::protocol::{Compile, CompileFinished, CompileResponse, Request, Response}; use crate::util; #[cfg(feature = "dist-client")] use anyhow::Context as _; use bytes::{buf::BufMut, Bytes, BytesMut}; use filetime::FileTime; use fs::metadata; use fs_err as fs; use futures::channel::mpsc; use futures::future::FutureExt; use futures::{future, stream, Sink, SinkExt, Stream, StreamExt, TryFutureExt}; use number_prefix::NumberPrefix; use serde::{Deserialize, Serialize}; use std::cell::Cell; use std::collections::{HashMap, HashSet}; use std::env; use std::ffi::OsString; use std::future::Future; use std::io::{self, Write}; use std::marker::Unpin; #[cfg(feature = "dist-client")] use std::mem; #[cfg(any(target_os = "linux", target_os = "android"))] use std::os::linux::net::SocketAddrExt; use std::path::PathBuf; use std::pin::Pin; use std::process::{ExitStatus, Output}; use std::sync::Arc; use std::task::{Context, Poll, Waker}; use std::time::Duration; #[cfg(feature = "dist-client")] use std::time::Instant; use tokio::sync::Mutex; use tokio::sync::RwLock; use tokio::{ io::{AsyncRead, AsyncWrite}, runtime::Runtime, time::{self, sleep, Sleep}, }; use tokio_serde::Framed; use tokio_util::codec::{length_delimited, LengthDelimitedCodec}; use tower::Service; use crate::errors::*; /// If the server is idle for this many seconds, shut down. const DEFAULT_IDLE_TIMEOUT: u64 = 600; /// If the dist client couldn't be created, retry creation at this number /// of seconds from now (or later) #[cfg(feature = "dist-client")] const DIST_CLIENT_RECREATE_TIMEOUT: Duration = Duration::from_secs(30); /// Result of background server startup. #[derive(Debug, Serialize, Deserialize)] pub enum ServerStartup { /// Server started successfully on `addr`. Ok { addr: String }, /// Server Addr already in suse AddrInUse, /// Timed out waiting for server startup. TimedOut, /// Server encountered an error. Err { reason: String }, } /// Get the time the server should idle for before shutting down, in seconds. fn get_idle_timeout() -> u64 { // A value of 0 disables idle shutdown entirely. env::var("SCCACHE_IDLE_TIMEOUT") .ok() .and_then(|s| s.parse().ok()) .unwrap_or(DEFAULT_IDLE_TIMEOUT) } fn notify_server_startup_internal(mut w: W, status: ServerStartup) -> Result<()> { util::write_length_prefixed_bincode(&mut w, status) } #[cfg(unix)] fn notify_server_startup(name: &Option, status: ServerStartup) -> Result<()> { use std::os::unix::net::UnixStream; let name = match *name { Some(ref s) => s, None => return Ok(()), }; debug!("notify_server_startup({:?})", status); let stream = UnixStream::connect(name)?; notify_server_startup_internal(stream, status) } #[cfg(windows)] fn notify_server_startup(name: &Option, status: ServerStartup) -> Result<()> { use fs::OpenOptions; let name = match *name { Some(ref s) => s, None => return Ok(()), }; let pipe = OpenOptions::new().write(true).read(true).open(name)?; notify_server_startup_internal(pipe, status) } #[cfg(unix)] fn get_signal(status: ExitStatus) -> i32 { use std::os::unix::prelude::*; status.signal().expect("must have signal") } #[cfg(windows)] fn get_signal(_status: ExitStatus) -> i32 { panic!("no signals on windows") } pub struct DistClientContainer { // The actual dist client state #[cfg(feature = "dist-client")] state: futures::lock::Mutex, } #[cfg(feature = "dist-client")] pub struct DistClientConfig { // Reusable items tied to an SccacheServer instance pool: tokio::runtime::Handle, // From the static dist configuration scheduler_url: Option, auth: config::DistAuth, cache_dir: PathBuf, toolchain_cache_size: u64, toolchains: Vec, rewrite_includes_only: bool, } #[cfg(feature = "dist-client")] pub enum DistClientState { #[cfg(feature = "dist-client")] Some(Box, Arc), #[cfg(feature = "dist-client")] FailWithMessage(Box, String), #[cfg(feature = "dist-client")] RetryCreateAt(Box, Instant), Disabled, } #[cfg(not(feature = "dist-client"))] impl DistClientContainer { #[cfg(not(feature = "dist-client"))] fn new(config: &Config, _: &tokio::runtime::Handle) -> Self { if config.dist.scheduler_url.is_some() { warn!("Scheduler address configured but dist feature disabled, disabling distributed sccache") } Self {} } pub fn new_disabled() -> Self { Self {} } #[cfg(feature = "dist-client")] pub fn new_with_state(_: DistClientState) -> Self { Self {} } pub async fn reset_state(&self) {} pub async fn get_status(&self) -> DistInfo { DistInfo::Disabled("dist-client feature not selected".to_string()) } async fn get_client(&self) -> Result>> { Ok(None) } } #[cfg(feature = "dist-client")] impl DistClientContainer { fn new(config: &Config, pool: &tokio::runtime::Handle) -> Self { let config = DistClientConfig { pool: pool.clone(), scheduler_url: config.dist.scheduler_url.clone(), auth: config.dist.auth.clone(), cache_dir: config.dist.cache_dir.clone(), toolchain_cache_size: config.dist.toolchain_cache_size, toolchains: config.dist.toolchains.clone(), rewrite_includes_only: config.dist.rewrite_includes_only, }; let state = Self::create_state(config); let state = pool.block_on(state); Self { state: futures::lock::Mutex::new(state), } } #[cfg(feature = "dist-client")] pub fn new_with_state(state: DistClientState) -> Self { Self { state: futures::lock::Mutex::new(state), } } pub fn new_disabled() -> Self { Self { state: futures::lock::Mutex::new(DistClientState::Disabled), } } pub async fn reset_state(&self) { let mut guard = self.state.lock().await; let state = &mut *guard; match mem::replace(state, DistClientState::Disabled) { DistClientState::Some(cfg, _) | DistClientState::FailWithMessage(cfg, _) | DistClientState::RetryCreateAt(cfg, _) => { warn!("State reset. Will recreate"); *state = DistClientState::RetryCreateAt( cfg, Instant::now().checked_sub(Duration::from_secs(1)).unwrap(), ); } DistClientState::Disabled => (), } } pub async fn get_status(&self) -> DistInfo { let mut guard = self.state.lock().await; let state = &mut *guard; let (client, scheduler_url) = match state { DistClientState::Disabled => return DistInfo::Disabled("disabled".to_string()), DistClientState::FailWithMessage(cfg, _) => { return DistInfo::NotConnected( cfg.scheduler_url.clone(), "enabled, auth not configured".to_string(), ) } DistClientState::RetryCreateAt(cfg, _) => { return DistInfo::NotConnected( cfg.scheduler_url.clone(), "enabled, not connected, will retry".to_string(), ) } DistClientState::Some(cfg, client) => (Arc::clone(client), cfg.scheduler_url.clone()), }; match client.do_get_status().await { Ok(res) => DistInfo::SchedulerStatus(scheduler_url.clone(), res), Err(_) => DistInfo::NotConnected( scheduler_url.clone(), "could not communicate with scheduler".to_string(), ), } } async fn get_client(&self) -> Result>> { let mut guard = self.state.lock().await; let state = &mut *guard; Self::maybe_recreate_state(state).await; let res = match state { DistClientState::Some(_, dc) => Ok(Some(dc.clone())), DistClientState::Disabled | DistClientState::RetryCreateAt(_, _) => Ok(None), DistClientState::FailWithMessage(_, msg) => Err(anyhow!(msg.clone())), }; if res.is_err() { let config = match mem::replace(state, DistClientState::Disabled) { DistClientState::FailWithMessage(config, _) => config, _ => unreachable!(), }; // The client is most likely mis-configured, make sure we // re-create on our next attempt. *state = DistClientState::RetryCreateAt( config, Instant::now().checked_sub(Duration::from_secs(1)).unwrap(), ); } res } async fn maybe_recreate_state(state: &mut DistClientState) { if let DistClientState::RetryCreateAt(_, instant) = *state { if instant > Instant::now() { return; } let config = match mem::replace(state, DistClientState::Disabled) { DistClientState::RetryCreateAt(config, _) => config, _ => unreachable!(), }; info!("Attempting to recreate the dist client"); *state = Self::create_state(*config).await } } // Attempt to recreate the dist client async fn create_state(config: DistClientConfig) -> DistClientState { macro_rules! try_or_retry_later { ($v:expr) => {{ match $v { Ok(v) => v, Err(e) => { // `{:?}` prints the full cause chain and backtrace. error!("{:?}", e); return DistClientState::RetryCreateAt( Box::new(config), Instant::now() + DIST_CLIENT_RECREATE_TIMEOUT, ); } } }}; } macro_rules! try_or_fail_with_message { ($v:expr) => {{ match $v { Ok(v) => v, Err(e) => { // `{:?}` prints the full cause chain and backtrace. let errmsg = format!("{:?}", e); error!("{}", errmsg); return DistClientState::FailWithMessage( Box::new(config), errmsg.to_string(), ); } } }}; } match config.scheduler_url { Some(ref addr) => { let url = addr.to_url(); info!("Enabling distributed sccache to {}", url); let auth_token = match &config.auth { config::DistAuth::Token { token } => Ok(token.to_owned()), config::DistAuth::Oauth2CodeGrantPKCE { auth_url, .. } | config::DistAuth::Oauth2Implicit { auth_url, .. } => { Self::get_cached_config_auth_token(auth_url) } }; let auth_token = try_or_fail_with_message!(auth_token .context("could not load client auth token, run |sccache --dist-auth|")); let dist_client = dist::http::Client::new( &config.pool, url, &config.cache_dir.join("client"), config.toolchain_cache_size, &config.toolchains, auth_token, config.rewrite_includes_only, ); let dist_client = try_or_retry_later!(dist_client.context("failure during dist client creation")); use crate::dist::Client; match dist_client.do_get_status().await { Ok(res) => { info!( "Successfully created dist client with {:?} cores across {:?} servers", res.num_cpus, res.num_servers ); DistClientState::Some(Box::new(config), Arc::new(dist_client)) } Err(_) => { warn!("Scheduler address configured, but could not communicate with scheduler"); DistClientState::RetryCreateAt( Box::new(config), Instant::now() + DIST_CLIENT_RECREATE_TIMEOUT, ) } } } None => { info!("No scheduler address configured, disabling distributed sccache"); DistClientState::Disabled } } } fn get_cached_config_auth_token(auth_url: &str) -> Result { let cached_config = config::CachedConfig::reload()?; cached_config .with(|c| c.dist.auth_tokens.get(auth_url).map(String::to_owned)) .with_context(|| format!("token for url {} not present in cached config", auth_url)) } } thread_local! { /// catch_unwind doesn't provide panic location, so we store that /// information via a panic hook to be used when catch_unwind /// catches a panic. static PANIC_LOCATION: Cell> = const { Cell::new(None) }; } /// Start an sccache server, listening on `addr`. /// /// Spins an event loop handling client connections until a client /// requests a shutdown. pub fn start_server(config: &Config, addr: &crate::net::SocketAddr) -> Result<()> { info!("start_server: {addr}"); let panic_hook = std::panic::take_hook(); std::panic::set_hook(Box::new(move |info| { PANIC_LOCATION.with(|l| { l.set( info.location() .map(|loc| (loc.file().to_string(), loc.line(), loc.column())), ) }); panic_hook(info) })); let client = Client::new(); let runtime = tokio::runtime::Builder::new_multi_thread() .enable_all() .worker_threads(std::cmp::max(20, 2 * num_cpus::get())) .build()?; let pool = runtime.handle().clone(); let dist_client = DistClientContainer::new(config, &pool); let notify = env::var_os("SCCACHE_STARTUP_NOTIFY"); let raw_storage = match storage_from_config(config, &pool) { Ok(storage) => storage, Err(err) => { error!("storage init failed for: {err:?}"); notify_server_startup( ¬ify, ServerStartup::Err { reason: err.to_string(), }, )?; return Err(err); } }; let cache_mode = runtime.block_on(async { match raw_storage.check().await { Ok(mode) => Ok(mode), Err(err) => { error!("storage check failed for: {err:?}"); notify_server_startup( ¬ify, ServerStartup::Err { reason: err.to_string(), }, )?; Err(err) } } })?; info!("server has setup with {cache_mode:?}"); let storage = match cache_mode { CacheMode::ReadOnly => Arc::new(ReadOnlyStorage(raw_storage)), _ => raw_storage, }; let res = (|| -> io::Result<_> { match addr { crate::net::SocketAddr::Net(addr) => { trace!("binding TCP {addr}"); let l = runtime.block_on(tokio::net::TcpListener::bind(addr))?; let srv = SccacheServer::<_>::with_listener(l, runtime, client, dist_client, storage); Ok(( srv.local_addr().unwrap(), Box::new(move |f| srv.run(f)) as Box _>, )) } #[cfg(unix)] crate::net::SocketAddr::Unix(path) => { trace!("binding unix socket {}", path.display()); // Unix socket will report addr in use on any unlink file. let _ = std::fs::remove_file(path); let l = { let _guard = runtime.enter(); tokio::net::UnixListener::bind(path)? }; let srv = SccacheServer::<_>::with_listener(l, runtime, client, dist_client, storage); Ok(( srv.local_addr().unwrap(), Box::new(move |f| srv.run(f)) as Box _>, )) } #[cfg(any(target_os = "linux", target_os = "android"))] crate::net::SocketAddr::UnixAbstract(p) => { trace!("binding abstract unix socket {}", p.escape_ascii()); let abstract_addr = std::os::unix::net::SocketAddr::from_abstract_name(p)?; let l = std::os::unix::net::UnixListener::bind_addr(&abstract_addr)?; l.set_nonblocking(true)?; let l = { let _guard = runtime.enter(); tokio::net::UnixListener::from_std(l)? }; let srv = SccacheServer::<_>::with_listener(l, runtime, client, dist_client, storage); Ok(( srv.local_addr() .unwrap_or_else(|| crate::net::SocketAddr::UnixAbstract(p.to_vec())), Box::new(move |f| srv.run(f)) as Box _>, )) } } })(); match res { Ok((addr, run)) => { info!("server started, listening on {addr}"); notify_server_startup( ¬ify, ServerStartup::Ok { addr: addr.to_string(), }, )?; run(future::pending::<()>())?; Ok(()) } Err(e) => { error!("failed to start server: {}", e); if io::ErrorKind::AddrInUse == e.kind() { notify_server_startup(¬ify, ServerStartup::AddrInUse)?; } else if cfg!(windows) && Some(10013) == e.raw_os_error() { // 10013 is the "WSAEACCES" error, which can occur if the requested port // has been allocated for other purposes, such as winNAT or Hyper-V. let windows_help_message = "A Windows port exclusion is blocking use of the configured port.\nTry setting SCCACHE_SERVER_PORT to a new value."; let reason: String = format!("{windows_help_message}\n{e}"); notify_server_startup(¬ify, ServerStartup::Err { reason })?; } else { let reason = e.to_string(); notify_server_startup(¬ify, ServerStartup::Err { reason })?; } Err(e.into()) } } } pub struct SccacheServer { runtime: Runtime, listener: A, rx: mpsc::Receiver, timeout: Duration, service: SccacheService, wait: WaitUntilZero, } impl SccacheServer { pub fn new( port: u16, runtime: Runtime, client: Client, dist_client: DistClientContainer, storage: Arc, ) -> Result { let addr = crate::net::SocketAddr::with_port(port); let listener = runtime.block_on(tokio::net::TcpListener::bind(addr.as_net().unwrap()))?; Ok(Self::with_listener( listener, runtime, client, dist_client, storage, )) } } impl SccacheServer { pub fn with_listener( listener: A, runtime: Runtime, client: Client, dist_client: DistClientContainer, storage: Arc, ) -> Self { // Prepare the service which we'll use to service all incoming TCP // connections. let (tx, rx) = mpsc::channel(1); let (wait, info) = WaitUntilZero::new(); let pool = runtime.handle().clone(); let service = SccacheService::new(dist_client, storage, &client, pool, tx, info); SccacheServer { runtime, listener, rx, service, timeout: Duration::from_secs(get_idle_timeout()), wait, } } /// Configures how long this server will be idle before shutting down. #[allow(dead_code)] pub fn set_idle_timeout(&mut self, timeout: Duration) { self.timeout = timeout; } /// Set the storage this server will use. #[allow(dead_code)] pub fn set_storage(&mut self, storage: Arc) { self.service.storage = storage; } /// Returns a reference to a thread pool to run work on #[allow(dead_code)] pub fn pool(&self) -> &tokio::runtime::Handle { &self.service.rt } /// Returns a reference to the command creator this server will use #[allow(dead_code)] pub fn command_creator(&self) -> &C { &self.service.creator } /// Returns the port that this server is bound to #[allow(dead_code)] pub fn local_addr(&self) -> Option { self.listener.local_addr().unwrap() } /// Runs this server to completion. /// /// If the `shutdown` future resolves then the server will be shut down, /// otherwise the server may naturally shut down if it becomes idle for too /// long anyway. pub fn run(self, shutdown: F) -> io::Result<()> where F: Future, C: Send, A::Socket: 'static, { let SccacheServer { runtime, listener, rx, service, timeout, wait, } = self; // Create our "server future" which will simply handle all incoming // connections in separate tasks. let server = async move { loop { let socket = listener.accept().await?; trace!("incoming connection"); let conn = service.clone().bind(socket).map_err(|res| { error!("Failed to bind socket: {}", res); }); // We're not interested if the task panicked; immediately process // another connection #[allow(clippy::let_underscore_future)] let _ = tokio::spawn(conn); } }; // Right now there's a whole bunch of ways to shut down this server for // various purposes. These include: // // 1. The `shutdown` future above. // 2. An RPC indicating the server should shut down // 3. A period of inactivity (no requests serviced) // // These are all encapsulated with the future that we're creating below. // The `ShutdownOrInactive` indicates the RPC or the period of // inactivity, and this is then select'd with the `shutdown` future // passed to this function. let shutdown = shutdown.map(|_| { info!("shutting down due to explicit signal"); }); let shutdown_idle = async { ShutdownOrInactive { rx, timeout: if timeout != Duration::new(0, 0) { Some(Box::pin(sleep(timeout))) } else { None }, timeout_dur: timeout, } .await; info!("shutting down due to being idle or request"); }; runtime.block_on(async { futures::select! { server = server.fuse() => server, _res = shutdown.fuse() => Ok(()), _res = shutdown_idle.fuse() => Ok::<_, io::Error>(()), } })?; const SHUTDOWN_TIMEOUT: Duration = Duration::from_secs(10); info!( "moving into the shutdown phase now, waiting at most {} seconds \ for all client requests to complete", SHUTDOWN_TIMEOUT.as_secs() ); // Once our server has shut down either due to inactivity or a manual // request we still need to give a bit of time for all active // connections to finish. This `wait` future will resolve once all // instances of `SccacheService` have been dropped. // // Note that we cap the amount of time this can take, however, as we // don't want to wait *too* long. runtime.block_on(async { time::timeout(SHUTDOWN_TIMEOUT, wait).await })?; info!("ok, fully shutting down now"); Ok(()) } } /// Maps a compiler proxy path to a compiler proxy and it's last modification time type CompilerProxyMap = HashMap>, FileTime)>; type CompilerMap = HashMap>>; /// entry of the compiler cache struct CompilerCacheEntry { /// compiler argument trait obj pub compiler: Box>, /// modification time of the compilers executable file pub mtime: FileTime, /// distributed compilation extra info pub dist_info: Option<(PathBuf, FileTime)>, } impl CompilerCacheEntry { fn new( compiler: Box>, mtime: FileTime, dist_info: Option<(PathBuf, FileTime)>, ) -> Self { Self { compiler, mtime, dist_info, } } } /// Service implementation for sccache #[derive(Clone)] pub struct SccacheService where C: Send, { /// Server statistics. stats: Arc>, /// Distributed sccache client dist_client: Arc, /// Cache storage. storage: Arc, /// A cache of known compiler info. compilers: Arc>>, /// map the cwd with compiler proxy path to a proxy resolver, which /// will dynamically resolve the input compiler for the current context /// (usually file or current working directory) /// the associated `FileTime` is the modification time of /// the compiler proxy, in order to track updates of the proxy itself compiler_proxies: Arc>>, /// Task pool for blocking (used mostly for disk I/O-bound tasks) and // non-blocking tasks rt: tokio::runtime::Handle, /// An object for creating commands. /// /// This is mostly useful for unit testing, where we /// can mock this out. creator: C, /// Message channel used to learn about requests received by this server. /// /// Note that messages sent along this channel will keep the server alive /// (reset the idle timer) and this channel can also be used to shut down /// the entire server immediately via a message. tx: mpsc::Sender, /// Information tracking how many services (connected clients) are active. /// This field causes [WaitUntilZero] to wait until this struct drops. #[allow(dead_code)] info: ActiveInfo, } type SccacheRequest = Message>; type SccacheResponse = Message> + Send>>>; /// Messages sent from all services to the main event loop indicating activity. /// /// Whenever a request is receive a `Request` message is sent which will reset /// the idle shutdown timer, and otherwise a `Shutdown` message indicates that /// a server shutdown was requested via an RPC. pub enum ServerMessage { /// A message sent whenever a request is received. Request, /// Message sent whenever a shutdown request is received. Shutdown, } impl Service for Arc> where C: CommandCreatorSync + Send + Sync + 'static, { type Response = SccacheResponse; type Error = Error; type Future = Pin> + Send + 'static>>; fn call(&mut self, req: SccacheRequest) -> Self::Future { trace!("handle_client"); // Opportunistically let channel know that we've received a request. We // ignore failures here as well as backpressure as it's not imperative // that every message is received. drop(self.tx.clone().start_send(ServerMessage::Request)); let me = self.clone(); Box::pin(async move { match req.into_inner() { Request::Compile(compile) => { debug!("handle_client: compile"); me.stats.lock().await.compile_requests += 1; me.handle_compile(compile).await } Request::GetStats => { debug!("handle_client: get_stats"); me.get_info() .await .map(|i| Response::Stats(Box::new(i))) .map(Message::WithoutBody) } Request::DistStatus => { debug!("handle_client: dist_status"); me.get_dist_status() .await .map(Response::DistStatus) .map(Message::WithoutBody) } Request::ZeroStats => { debug!("handle_client: zero_stats"); me.zero_stats().await; Ok(Message::WithoutBody(Response::ZeroStats)) } Request::Shutdown => { debug!("handle_client: shutdown"); let mut tx = me.tx.clone(); future::try_join( async { let _ = tx.send(ServerMessage::Shutdown).await; Ok(()) }, me.get_info(), ) .await .map(move |(_, info)| { Message::WithoutBody(Response::ShuttingDown(Box::new(info))) }) } } }) } fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { Poll::Ready(Ok(())) } } use futures::future::Either; use futures::TryStreamExt; impl SccacheService where C: CommandCreatorSync + Clone + Send + Sync + 'static, { pub fn new( dist_client: DistClientContainer, storage: Arc, client: &Client, rt: tokio::runtime::Handle, tx: mpsc::Sender, info: ActiveInfo, ) -> SccacheService { SccacheService { stats: Arc::default(), dist_client: Arc::new(dist_client), storage, compilers: Arc::default(), compiler_proxies: Arc::default(), rt, creator: C::new(client), tx, info, } } pub fn mock_with_storage( storage: Arc, rt: tokio::runtime::Handle, ) -> SccacheService { let (tx, _) = mpsc::channel(1); let (_, info) = WaitUntilZero::new(); let client = Client::new_num(1); let dist_client = DistClientContainer::new_disabled(); SccacheService { stats: Arc::default(), dist_client: Arc::new(dist_client), storage, compilers: Arc::default(), compiler_proxies: Arc::default(), rt, creator: C::new(&client), tx, info, } } #[cfg(feature = "dist-client")] pub fn mock_with_dist_client( dist_client: Arc, storage: Arc, rt: tokio::runtime::Handle, ) -> SccacheService { let (tx, _) = mpsc::channel(1); let (_, info) = WaitUntilZero::new(); let client = Client::new_num(1); SccacheService { stats: Arc::default(), dist_client: Arc::new(DistClientContainer::new_with_state(DistClientState::Some( Box::new(DistClientConfig { pool: rt.clone(), scheduler_url: None, auth: config::DistAuth::Token { token: "".into() }, cache_dir: "".into(), toolchain_cache_size: 0, toolchains: vec![], rewrite_includes_only: false, }), dist_client, ))), storage, compilers: Arc::default(), compiler_proxies: Arc::default(), rt: rt.clone(), creator: C::new(&client), tx, info, } } fn bind(self, socket: T) -> impl Future> + Send + Sized + 'static where T: AsyncRead + AsyncWrite + Unpin + Send + 'static, { let mut builder = length_delimited::Builder::new(); if let Ok(max_frame_length_str) = env::var("SCCACHE_MAX_FRAME_LENGTH") { if let Ok(max_frame_length) = max_frame_length_str.parse::() { builder.max_frame_length(max_frame_length); } else { warn!("Content of SCCACHE_MAX_FRAME_LENGTH is not a valid number, using default"); } } let io = builder.new_framed(socket); let (sink, stream) = SccacheTransport { inner: Framed::new(io.sink_err_into().err_into(), BincodeCodec), } .split(); let sink = sink.sink_err_into::(); let me = Arc::new(self); stream .err_into::() .and_then(move |input| me.clone().call(input)) .and_then(move |response| async move { let fut = match response { Message::WithoutBody(message) => { let stream = stream::once(async move { Ok(Frame::Message { message }) }); Either::Left(stream) } Message::WithBody(message, body) => { let stream = stream::once(async move { Ok(Frame::Message { message }) }) .chain( body.into_stream() .map_ok(|chunk| Frame::Body { chunk: Some(chunk) }), ) .chain(stream::once(async move { Ok(Frame::Body { chunk: None }) })); Either::Right(stream) } }; Ok(Box::pin(fut)) }) .try_flatten() .forward(sink) } /// Get dist status. async fn get_dist_status(&self) -> Result { Ok(self.dist_client.get_status().await) } /// Get info and stats about the cache. async fn get_info(&self) -> Result { let stats = self.stats.lock().await.clone(); ServerInfo::new(stats, Some(&*self.storage)).await } /// Zero stats about the cache. async fn zero_stats(&self) { *self.stats.lock().await = ServerStats::default(); } /// Handle a compile request from a client. /// /// This will handle a compile request entirely, generating a response with /// the initial information and an optional body which will eventually /// contain the results of the compilation. async fn handle_compile(&self, compile: Compile) -> Result { let exe = compile.exe; let cmd = compile.args; let cwd: PathBuf = compile.cwd.into(); let env_vars = compile.env_vars; let me = self.clone(); let info = self .compiler_info(exe.into(), cwd.clone(), &cmd, &env_vars) .await; Ok(me.check_compiler(info, cmd, cwd, env_vars).await) } /// Look up compiler info from the cache for the compiler `path`. /// If not cached, determine the compiler type and cache the result. pub async fn compiler_info( &self, path: PathBuf, cwd: PathBuf, args: &[OsString], env: &[(OsString, OsString)], ) -> Result>> { trace!("compiler_info"); let me = self.clone(); let me1 = self.clone(); // lookup if compiler proxy exists for the current compiler path let path2 = path.clone(); let path1 = path.clone(); let env = env.to_vec(); let resolved_with_proxy = { let compiler_proxies_borrow = self.compiler_proxies.read().await; // Create an owned future - compiler proxy is not Send so we can't // really await while borrowing the proxy since rustc is too conservative let resolve_proxied_executable = compiler_proxies_borrow .get(&path) .map(|(compiler_proxy, _filetime)| { compiler_proxy.resolve_proxied_executable( self.creator.clone(), cwd.clone(), env.as_slice(), ) }); match resolve_proxied_executable { Some(fut) => fut.await.ok(), None => None, } }; // use the supplied compiler path as fallback, lookup its modification time too let (resolved_compiler_path, mtime) = match resolved_with_proxy { Some(x) => x, // TODO resolve the path right away _ => { // fallback to using the path directly metadata(&path2) .map(|attr| FileTime::from_last_modification_time(&attr)) .ok() .map(move |filetime| (path2, filetime)) .expect("Must contain sane data, otherwise mtime is not avail") } }; // canonicalize the path to follow symlinks // don't canonicalize if the file name differs so it works with clang's multicall let resolved_compiler_path = match resolved_compiler_path.canonicalize() { Ok(path) if matches!(path.file_name(), Some(name) if resolved_compiler_path.file_name() == Some(name)) => { path } _ => resolved_compiler_path, }; let dist_info = match me1.dist_client.get_client().await { Ok(Some(ref client)) => { if let Some(archive) = client.get_custom_toolchain(&resolved_compiler_path) { match metadata(&archive) .map(|attr| FileTime::from_last_modification_time(&attr)) { Ok(mtime) => Some((archive, mtime)), _ => None, } } else { None } } _ => None, }; let opt = match me1.compilers.read().await.get(&resolved_compiler_path) { // It's a hit only if the mtime and dist archive data matches. Some(Some(entry)) => { if entry.mtime == mtime && entry.dist_info == dist_info { Some(entry.compiler.box_clone()) } else { None } } _ => None, }; match opt { Some(info) => { trace!("compiler_info cache hit"); Ok(info) } None => { trace!("compiler_info cache miss"); // Check the compiler type and return the result when // finished. This generally involves invoking the compiler, // so do it asynchronously. // the compiler path might be compiler proxy, so it is important to use // `path` (or its clone `path1`) to resolve using that one, not using `resolved_compiler_path` let info = get_compiler_info::( me.creator.clone(), &path1, &cwd, args, env.as_slice(), &me.rt, dist_info.clone().map(|(p, _)| p), ) .await; let (c, proxy) = match info { Ok((c, proxy)) => (c.clone(), proxy.clone()), Err(err) => { trace!("Inserting PLAIN cache map info for {:?}", &path); me.compilers.write().await.insert(path, None); return Err(err); } }; // register the proxy for this compiler, so it will be used directly from now on // and the true/resolved compiler will create table hits in the hash map // based on the resolved path if let Some(proxy) = proxy { trace!( "Inserting new path proxy {:?} @ {:?} -> {:?}", &path, &cwd, resolved_compiler_path ); me.compiler_proxies .write() .await .insert(path, (proxy, mtime)); } // TODO add some safety checks in case a proxy exists, that the initial `path` is not // TODO the same as the resolved compiler binary // cache let map_info = CompilerCacheEntry::new(c.clone(), mtime, dist_info); trace!( "Inserting POSSIBLY PROXIED cache map info for {:?}", &resolved_compiler_path ); me.compilers .write() .await .insert(resolved_compiler_path, Some(map_info)); // drop the proxy information, response is compiler only Ok(c) } } } /// Check that we can handle and cache `cmd` when run with `compiler`. /// If so, run `start_compile_task` to execute it. async fn check_compiler( &self, compiler: Result>>, cmd: Vec, cwd: PathBuf, env_vars: Vec<(OsString, OsString)>, ) -> SccacheResponse { match compiler { Err(e) => { debug!("check_compiler: Unsupported compiler: {}", e.to_string()); self.stats.lock().await.requests_unsupported_compiler += 1; return Message::WithoutBody(Response::Compile( CompileResponse::UnsupportedCompiler(OsString::from(e.to_string())), )); } Ok(c) => { debug!("check_compiler: Supported compiler"); // Now check that we can handle this compiler with // the provided commandline. match c.parse_arguments(&cmd, &cwd, &env_vars) { CompilerArguments::Ok(hasher) => { debug!("parse_arguments: Ok: {:?}", cmd); let body = self .clone() .start_compile_task(c, hasher, cmd, cwd, env_vars) .and_then(|res| async { Ok(Response::CompileFinished(res)) }) .boxed(); return Message::WithBody( Response::Compile(CompileResponse::CompileStarted), body, ); } CompilerArguments::CannotCache(why, extra_info) => { if let Some(extra_info) = extra_info { debug!( "parse_arguments: CannotCache({}, {}): {:?}", why, extra_info, cmd ) } else { debug!("parse_arguments: CannotCache({}): {:?}", why, cmd) } let mut stats = self.stats.lock().await; stats.requests_not_cacheable += 1; *stats.not_cached.entry(why.to_string()).or_insert(0) += 1; } CompilerArguments::NotCompilation => { debug!("parse_arguments: NotCompilation: {:?}", cmd); self.stats.lock().await.requests_not_compile += 1; } } } } let res = CompileResponse::UnhandledCompile; Message::WithoutBody(Response::Compile(res)) } /// Given compiler arguments `arguments`, look up /// a compile result in the cache or execute the compilation and store /// the result in the cache. pub async fn start_compile_task( self, compiler: Box>, hasher: Box>, arguments: Vec, cwd: PathBuf, env_vars: Vec<(OsString, OsString)>, ) -> Result { self.stats.lock().await.requests_executed += 1; let force_recache = env_vars.iter().any(|(k, _v)| k == "SCCACHE_RECACHE"); let force_no_cache = env_vars.iter().any(|(k, _v)| k == "SCCACHE_NO_CACHE"); let cache_control = if force_no_cache { CacheControl::ForceNoCache } else if force_recache { CacheControl::ForceRecache } else { CacheControl::Default }; let out_pretty = hasher.output_pretty().into_owned(); let color_mode = hasher.color_mode(); let (kind, lang) = { // HACK: See note in src/compiler/nvcc.rs if env_vars .iter() .any(|(k, _)| k == "__SCCACHE_THIS_IS_A_CUDA_COMPILATION__") { ( CompilerKind::C(crate::compiler::CCompilerKind::Nvcc), Language::Cuda, ) } else { (compiler.kind(), hasher.language()) } }; let me = self.clone(); self.rt .spawn(async move { let result = match me.dist_client.get_client().await { Ok(client) => { std::panic::AssertUnwindSafe(hasher .get_cached_or_compile( &me, client, me.creator.clone(), me.storage.clone(), arguments, cwd, env_vars, cache_control, me.rt.clone(), ) ) .catch_unwind() .await .map_err(|e| { let panic = e .downcast_ref::<&str>() .map(|s| &**s) .or_else(|| e.downcast_ref::().map(|s| &**s)) .unwrap_or("An unknown panic was caught."); let thread = std::thread::current(); let thread_name = thread.name().unwrap_or("unnamed"); if let Some((file, line, column)) = PANIC_LOCATION.with(|l| l.take()) { anyhow!("thread '{thread_name}' panicked at {file}:{line}:{column}: {panic}") } else { anyhow!("thread '{thread_name}' panicked: {panic}") } }) .and_then(std::convert::identity) } Err(e) => Err(e), }; let mut cache_write = None; let mut res = CompileFinished { color_mode, ..Default::default() }; let mut stats = me.stats.lock().await; match result { Ok((compiled, out)) => { let mut dist_type = DistType::NoDist; match compiled { CompileResult::Error => { debug!("compile result: cache error"); stats.cache_errors.increment(&kind, &lang); } CompileResult::CacheHit(duration) => { debug!("compile result: cache hit"); stats.cache_hits.increment(&kind, &lang); stats.cache_read_hit_duration += duration; } CompileResult::CacheMiss(miss_type, dt, duration, future) => { debug!("[{}]: compile result: cache miss", out_pretty); dist_type = dt; match miss_type { MissType::Normal => {} MissType::ForcedNoCache => {} MissType::ForcedRecache => { stats.forced_recaches += 1; } MissType::TimedOut => { stats.cache_timeouts += 1; } MissType::CacheReadError => { stats.cache_errors.increment(&kind, &lang); } } stats.compilations += 1; stats.cache_misses.increment(&kind, &lang); stats.compiler_write_duration += duration; debug!("stats after compile result: {stats:?}"); cache_write = Some(future); } CompileResult::NotCached(dt, duration) => { debug!("[{}]: compile result: not cached", out_pretty); dist_type = dt; stats.compilations += 1; stats.compiler_write_duration += duration; } CompileResult::NotCacheable(dt, duration) => { debug!("[{}]: compile result: not cacheable", out_pretty); dist_type = dt; stats.compilations += 1; stats.compiler_write_duration += duration; stats.non_cacheable_compilations += 1; } CompileResult::CompileFailed(dt, duration) => { debug!("[{}]: compile result: compile failed", out_pretty); dist_type = dt; stats.compilations += 1; stats.compiler_write_duration += duration; stats.compile_fails += 1; } }; match dist_type { DistType::NoDist => {} DistType::Ok(id) => { let server = id.addr().to_string(); let server_count = stats.dist_compiles.entry(server).or_insert(0); *server_count += 1; } DistType::Error => stats.dist_errors += 1, } // Make sure the write guard has been dropped ASAP. drop(stats); let Output { status, stdout, stderr, } = out; trace!("CompileFinished retcode: {}", status); match status.code() { Some(code) => res.retcode = Some(code), None => res.signal = Some(get_signal(status)), }; res.stdout = stdout; res.stderr = stderr; } Err(err) => { match err.downcast::() { Ok(ProcessError(output)) => { debug!("Compilation failed: {:?}", output); stats.compile_fails += 1; // Make sure the write guard has been dropped ASAP. drop(stats); match output.status.code() { Some(code) => res.retcode = Some(code), None => res.signal = Some(get_signal(output.status)), }; res.stdout = output.stdout; res.stderr = output.stderr; } Err(err) => match err.downcast::() { Ok(HttpClientError(msg)) => { // Make sure the write guard has been dropped ASAP. drop(stats); me.dist_client.reset_state().await; let errmsg = format!("[{:?}] http error status: {}", out_pretty, msg); error!("{}", errmsg); res.retcode = Some(1); res.stderr = errmsg.as_bytes().to_vec(); } Err(err) => { stats.cache_errors.increment(&kind, &lang); // Make sure the write guard has been dropped ASAP. drop(stats); use std::fmt::Write; error!("[{:?}] fatal error: {}", out_pretty, err); let mut error = "sccache: encountered fatal error\n".to_string(); let _ = writeln!(error, "sccache: error: {}", err); for e in err.chain() { error!("[{:?}] \t{}", out_pretty, e); let _ = writeln!(error, "sccache: caused by: {}", e); } //TODO: figure out a better way to communicate this? res.retcode = Some(-2); res.stderr = error.into_bytes(); } }, } } }; if let Some(cache_write) = cache_write { match cache_write.await { Err(e) => { debug!("Error executing cache write: {}", e); me.stats.lock().await.cache_write_errors += 1; } //TODO: save cache stats! Ok(info) => { debug!( "[{}]: Cache write finished in {}", info.object_file_pretty, util::fmt_duration_as_secs(&info.duration) ); let mut stats = me.stats.lock().await; stats.cache_writes += 1; stats.cache_write_duration += info.duration; } } } Ok(res) }) .map_err(anyhow::Error::new) .await? } } #[derive(Serialize, Deserialize, Debug, Clone, Default)] pub struct PerLanguageCount { counts: HashMap, adv_counts: HashMap, } impl PerLanguageCount { fn increment(&mut self, kind: &CompilerKind, lang: &Language) { let lang_comp_key = kind.lang_comp_kind(lang); let adv_count = self.adv_counts.entry(lang_comp_key).or_insert(0); *adv_count += 1; let lang_key = kind.lang_kind(lang); let count = self.counts.entry(lang_key).or_insert(0); *count += 1; } pub fn all(&self) -> u64 { self.counts.values().sum() } pub fn get(&self, key: &str) -> Option<&u64> { self.counts.get(key) } pub fn get_adv(&self, key: &str) -> Option<&u64> { self.adv_counts.get(key) } pub fn new() -> PerLanguageCount { Self::default() } } /// Statistics about the server. #[derive(Serialize, Deserialize, Clone, Debug)] pub struct ServerStats { /// The count of client compile requests. pub compile_requests: u64, /// The count of client requests that used an unsupported compiler. pub requests_unsupported_compiler: u64, /// The count of client requests that were not compilation. pub requests_not_compile: u64, /// The count of client requests that were not cacheable. pub requests_not_cacheable: u64, /// The count of client requests that were executed. pub requests_executed: u64, /// The count of errors handling compile requests (per language). pub cache_errors: PerLanguageCount, /// The count of cache hits for handled compile requests (per language). pub cache_hits: PerLanguageCount, /// The count of cache misses for handled compile requests (per language). pub cache_misses: PerLanguageCount, /// The count of cache misses because the cache took too long to respond. pub cache_timeouts: u64, /// The count of errors reading cache entries. pub cache_read_errors: u64, /// The count of compilations which were successful but couldn't be cached. pub non_cacheable_compilations: u64, /// The count of compilations which forcibly ignored the cache. pub forced_recaches: u64, /// The count of errors writing to cache. pub cache_write_errors: u64, /// The number of successful cache writes. pub cache_writes: u64, /// The total time spent writing cache entries. pub cache_write_duration: Duration, /// The total time spent reading cache hits. pub cache_read_hit_duration: Duration, /// The number of compilations performed. pub compilations: u64, /// The total time spent compiling. pub compiler_write_duration: Duration, /// The count of compilation failures. pub compile_fails: u64, /// Counts of reasons why compiles were not cached. pub not_cached: HashMap, /// The count of compilations that were successfully distributed indexed /// by the server that ran those compilations. pub dist_compiles: HashMap, /// The count of compilations that were distributed but failed and had to be re-run locally pub dist_errors: u64, } /// Info and stats about the server. #[derive(Serialize, Deserialize, Clone, Debug)] pub struct ServerInfo { pub stats: ServerStats, pub cache_location: String, pub cache_size: Option, pub max_cache_size: Option, pub use_preprocessor_cache_mode: bool, pub version: String, } /// Status of the dist client. #[derive(Serialize, Deserialize, Clone, Debug)] pub enum DistInfo { Disabled(String), #[cfg(feature = "dist-client")] NotConnected(Option, String), #[cfg(feature = "dist-client")] SchedulerStatus(Option, dist::SchedulerStatusResult), } impl Default for ServerStats { fn default() -> ServerStats { ServerStats { compile_requests: u64::default(), requests_unsupported_compiler: u64::default(), requests_not_compile: u64::default(), requests_not_cacheable: u64::default(), requests_executed: u64::default(), cache_errors: PerLanguageCount::new(), cache_hits: PerLanguageCount::new(), cache_misses: PerLanguageCount::new(), cache_timeouts: u64::default(), cache_read_errors: u64::default(), non_cacheable_compilations: u64::default(), forced_recaches: u64::default(), cache_write_errors: u64::default(), cache_writes: u64::default(), cache_write_duration: Duration::new(0, 0), cache_read_hit_duration: Duration::new(0, 0), compilations: u64::default(), compiler_write_duration: Duration::new(0, 0), compile_fails: u64::default(), not_cached: HashMap::new(), dist_compiles: HashMap::new(), dist_errors: u64::default(), } } } pub trait ServerStatsWriter { fn write(&mut self, text: &str); } pub struct StdoutServerStatsWriter; impl ServerStatsWriter for StdoutServerStatsWriter { fn write(&mut self, text: &str) { println!("{text}"); } } impl ServerStats { /// Print stats in a human-readable format. /// /// Return the formatted width of each of the (name, value) columns. fn print(&self, writer: &mut T, advanced: bool) -> (usize, usize) { macro_rules! set_stat { ($vec:ident, $var:expr, $name:expr) => {{ // name, value, suffix length $vec.push(($name.to_string(), $var.to_string(), 0)); }}; } macro_rules! set_lang_stat { ($vec:ident, $var:expr, $name:expr) => {{ $vec.push(($name.to_string(), $var.all().to_string(), 0)); let mut sorted_stats: Vec<_> = $var.counts.iter().collect(); sorted_stats.sort_by_key(|v| v.0); for (lang, count) in sorted_stats.iter() { $vec.push((format!("{} ({})", $name, lang), count.to_string(), 0)); } }}; } macro_rules! set_compiler_stat { ($vec:ident, $var:expr, $name:expr) => {{ $vec.push(($name.to_string(), $var.all().to_string(), 0)); let mut sorted_stats: Vec<_> = $var.adv_counts.iter().collect(); sorted_stats.sort_by_key(|v| v.0); for (lang, count) in sorted_stats.iter() { $vec.push((format!("{} ({})", $name, lang), count.to_string(), 0)); } }}; } macro_rules! set_duration_stat { ($vec:ident, $dur:expr, $num:expr, $name:expr) => {{ let s = if $num > 0 { $dur / $num as u32 } else { Default::default() }; // name, value, suffix length $vec.push(($name.to_string(), util::fmt_duration_as_secs(&s), 2)); }}; } let mut stats_vec = vec![]; //TODO: this would be nice to replace with a custom derive implementation. set_stat!(stats_vec, self.compile_requests, "Compile requests"); set_stat!( stats_vec, self.requests_executed, "Compile requests executed" ); if advanced { set_compiler_stat!(stats_vec, self.cache_hits, "Cache hits"); set_compiler_stat!(stats_vec, self.cache_misses, "Cache misses"); } else { set_lang_stat!(stats_vec, self.cache_hits, "Cache hits"); set_lang_stat!(stats_vec, self.cache_misses, "Cache misses"); } self.set_percentage_stats(&mut stats_vec, advanced); set_stat!(stats_vec, self.cache_timeouts, "Cache timeouts"); set_stat!(stats_vec, self.cache_read_errors, "Cache read errors"); set_stat!(stats_vec, self.forced_recaches, "Forced recaches"); set_stat!(stats_vec, self.cache_write_errors, "Cache write errors"); if advanced { set_compiler_stat!(stats_vec, self.cache_errors, "Cache errors"); } else { set_lang_stat!(stats_vec, self.cache_errors, "Cache errors"); } set_stat!(stats_vec, self.compilations, "Compilations"); set_stat!(stats_vec, self.compile_fails, "Compilation failures"); set_stat!( stats_vec, self.non_cacheable_compilations, "Non-cacheable compilations" ); set_stat!( stats_vec, self.requests_not_cacheable, "Non-cacheable calls" ); set_stat!( stats_vec, self.requests_not_compile, "Non-compilation calls" ); set_stat!( stats_vec, self.requests_unsupported_compiler, "Unsupported compiler calls" ); set_duration_stat!( stats_vec, self.cache_write_duration, self.cache_writes, "Average cache write" ); set_duration_stat!( stats_vec, self.compiler_write_duration, self.compilations, "Average compiler" ); set_duration_stat!( stats_vec, self.cache_read_hit_duration, self.cache_hits.all(), "Average cache read hit" ); set_stat!( stats_vec, self.dist_errors, "Failed distributed compilations" ); let name_width = stats_vec.iter().map(|(n, _, _)| n.len()).max().unwrap(); let stat_width = stats_vec.iter().map(|(_, s, _)| s.len()).max().unwrap(); for (name, stat, suffix_len) in stats_vec { writer.write(&format!( "{:stat_width$}", name, stat, name_width = name_width, stat_width = stat_width + suffix_len )); } if !self.dist_compiles.is_empty() { writer.write("\nSuccessful distributed compiles"); let mut counts: Vec<_> = self.dist_compiles.iter().collect(); counts.sort_by(|(_, c1), (_, c2)| c1.cmp(c2).reverse()); for (reason, count) in counts { writer.write(&format!( " {:stat_width$}", reason, count, name_width = name_width - 2, stat_width = stat_width, )); } } if !self.not_cached.is_empty() { writer.write("\nNon-cacheable reasons:"); let mut counts: Vec<_> = self.not_cached.iter().collect(); counts.sort_by(|(_, c1), (_, c2)| c1.cmp(c2).reverse()); for (reason, count) in counts { writer.write(&format!( "{:stat_width$}", reason, count, name_width = name_width, stat_width = stat_width, )); } writer.write(""); } (name_width, stat_width) } fn set_percentage_stats(&self, stats_vec: &mut Vec<(String, String, usize)>, advanced: bool) { set_percentage_stat( stats_vec, self.cache_hits.all(), self.cache_misses.all() + self.cache_hits.all(), "Cache hits rate", ); let (stats_hits, stats_misses): (Vec<_>, Vec<_>) = if advanced { ( self.cache_hits.adv_counts.iter().collect(), self.cache_misses.adv_counts.iter().collect(), ) } else { ( self.cache_hits.counts.iter().collect(), self.cache_misses.counts.iter().collect(), ) }; let mut all_languages: HashSet<&String> = HashSet::new(); for (lang, _) in &stats_hits { all_languages.insert(lang); } for (lang, _) in &stats_misses { all_languages.insert(lang); } let mut all_languages: Vec<&String> = all_languages.into_iter().collect(); all_languages.sort(); for lang in all_languages { let count_hits = stats_hits .iter() .find(|&&(l, _)| l == lang) .map_or(0, |&(_, &count)| count); let count_misses = stats_misses .iter() .find(|&&(l, _)| l == lang) .map_or(0, |&(_, &count)| count); set_percentage_stat( stats_vec, count_hits, count_hits + count_misses, &format!("Cache hits rate ({})", lang), ); } } } fn set_percentage_stat( vec: &mut Vec<(String, String, usize)>, count_hits: u64, total: u64, name: &str, ) { if total == 0 { vec.push((name.to_string(), "-".to_string(), 0)); } else { let ratio = count_hits as f64 / total as f64; vec.push((name.to_string(), format!("{:.2} %", ratio * 100.0), 2)); } } impl ServerInfo { pub async fn new(stats: ServerStats, storage: Option<&dyn Storage>) -> Result { let cache_location; let use_preprocessor_cache_mode; let cache_size; let max_cache_size; if let Some(storage) = storage { cache_location = storage.location(); use_preprocessor_cache_mode = storage .preprocessor_cache_mode_config() .use_preprocessor_cache_mode; (cache_size, max_cache_size) = futures::try_join!(storage.current_size(), storage.max_size())?; } else { cache_location = String::new(); use_preprocessor_cache_mode = false; cache_size = None; max_cache_size = None; } let version = env!("CARGO_PKG_VERSION").to_string(); Ok(ServerInfo { stats, cache_location, cache_size, max_cache_size, use_preprocessor_cache_mode, version, }) } /// Print info to stdout in a human-readable format. pub fn print(&self, advanced: bool) { let (name_width, stat_width) = self.stats.print(&mut StdoutServerStatsWriter, advanced); println!( "{: (bytes.to_string(), "bytes".to_string()), NumberPrefix::Prefixed(prefix, n) => { (format!("{:.0}", n), format!("{}B", prefix)) } }; println!( "{:stat_width$} {}", name, val, suffix, name_width = name_width, stat_width = stat_width ); } } } } enum Frame { Body { chunk: Option }, Message { message: R }, } struct Body { receiver: mpsc::Receiver>, } impl futures::Stream for Body { type Item = Result; fn poll_next( mut self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> std::task::Poll> { Pin::new(&mut self.receiver).poll_next(cx) } } enum Message { WithBody(R, B), WithoutBody(R), } impl Message { fn into_inner(self) -> R { match self { Message::WithBody(r, _) => r, Message::WithoutBody(r) => r, } } } struct BincodeCodec; impl tokio_serde::Serializer for BincodeCodec where T: serde::Serialize, { type Error = Error; fn serialize(self: Pin<&mut Self>, item: &T) -> std::result::Result { let mut bytes = BytesMut::new(); bincode::serialize_into((&mut bytes).writer(), item)?; Ok(bytes.freeze()) } } impl tokio_serde::Deserializer for BincodeCodec where T: serde::de::DeserializeOwned, { type Error = Error; fn deserialize(self: Pin<&mut Self>, buf: &BytesMut) -> std::result::Result { let ret = bincode::deserialize(buf)?; Ok(ret) } } /// Implementation of `Stream + Sink` that tokio-proto is expecting /// /// This type is composed of a few layers: /// /// * First there's `I`, the I/O object implementing `AsyncRead` and /// `AsyncWrite` /// * Next that's framed using the `length_delimited` module in tokio-io giving /// us a `Sink` and `Stream` of `BytesMut`. /// * Next that sink/stream is wrapped in `ReadBincode` which will cause the /// `Stream` implementation to switch from `BytesMut` to `Request` by parsing /// the bytes bincode. /// * Finally that sink/stream is wrapped in `WriteBincode` which will cause the /// `Sink` implementation to switch from `BytesMut` to `Response` meaning that /// all `Response` types pushed in will be converted to `BytesMut` and pushed /// below. struct SccacheTransport { inner: Framed< futures::stream::ErrInto< futures::sink::SinkErrInto< tokio_util::codec::Framed, Bytes, Error, >, Error, >, Request, Response, BincodeCodec, >, } impl Stream for SccacheTransport { type Item = Result>>; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { Pin::new(&mut self.inner) .poll_next(cx) .map(|r| r.map(|s| s.map(Message::WithoutBody))) } } impl Sink> for SccacheTransport { type Error = Error; fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { Pin::new(&mut self.inner).poll_ready(cx) } fn start_send(mut self: Pin<&mut Self>, item: Frame) -> Result<()> { match item { Frame::Message { message } => Pin::new(&mut self.inner).start_send(message), Frame::Body { chunk: Some(chunk) } => Pin::new(&mut self.inner).start_send(chunk), Frame::Body { chunk: None } => Ok(()), } } fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { Pin::new(&mut self.inner).poll_flush(cx) } fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { Pin::new(&mut self.inner).poll_close(cx) } } struct ShutdownOrInactive { rx: mpsc::Receiver, timeout: Option>>, timeout_dur: Duration, } impl Future for ShutdownOrInactive { type Output = (); fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> { loop { match Pin::new(&mut self.rx).poll_next(cx) { Poll::Pending => break, // Shutdown received! Poll::Ready(Some(ServerMessage::Shutdown)) => return Poll::Ready(()), Poll::Ready(Some(ServerMessage::Request)) => { if self.timeout_dur != Duration::new(0, 0) { self.timeout = Some(Box::pin(sleep(self.timeout_dur))); } } // All services have shut down, in theory this isn't possible... Poll::Ready(None) => return Poll::Ready(()), } } match self.timeout { None => Poll::Pending, Some(ref mut timeout) => timeout.as_mut().poll(cx), } } } /// Helper future which tracks the `ActiveInfo` below. This future will resolve /// once all instances of `ActiveInfo` have been dropped. struct WaitUntilZero { info: std::sync::Weak>, } #[derive(Clone)] #[allow(dead_code)] pub struct ActiveInfo { info: Arc>, } struct Info { waker: Option, } impl Drop for Info { fn drop(&mut self) { if let Some(waker) = self.waker.as_ref() { waker.wake_by_ref(); } } } impl WaitUntilZero { #[rustfmt::skip] fn new() -> (WaitUntilZero, ActiveInfo) { let info = Arc::new(std::sync::Mutex::new(Info { waker: None })); (WaitUntilZero { info: Arc::downgrade(&info) }, ActiveInfo { info }) } } impl std::future::Future for WaitUntilZero { type Output = (); fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> std::task::Poll { match self.info.upgrade() { None => std::task::Poll::Ready(()), Some(arc) => { let mut info = arc.lock().expect("we can't panic when holding lock"); info.waker = Some(cx.waker().clone()); std::task::Poll::Pending } } } } #[test] fn waits_until_zero() { let (wait, _active) = WaitUntilZero::new(); assert_eq!(wait.now_or_never(), None); let (wait, active) = WaitUntilZero::new(); let _active2 = active.clone(); drop(active); assert_eq!(wait.now_or_never(), None); let (wait, _) = WaitUntilZero::new(); assert_eq!(wait.now_or_never(), Some(())); let (wait, active) = WaitUntilZero::new(); let active2 = active.clone(); drop(active); drop(active2); assert_eq!(wait.now_or_never(), Some(())); } #[cfg(test)] mod tests { use super::*; struct StringWriter { buffer: String, } impl StringWriter { fn new() -> StringWriter { StringWriter { buffer: String::new(), } } fn get_output(self) -> String { self.buffer } } impl ServerStatsWriter for StringWriter { fn write(&mut self, text: &str) { self.buffer.push_str(&format!("{}\n", text)); } } #[test] fn test_print_cache_hits_rate_default_server_stats() { let stats = ServerStats::default(); let mut writer = StringWriter::new(); stats.print(&mut writer, false); let output = writer.get_output(); assert!(output.contains("Cache hits rate -")); } #[test] fn test_print_cache_hits_rate_server_stats() { let mut cache_hits_counts = HashMap::new(); cache_hits_counts.insert("Rust".to_string(), 100); cache_hits_counts.insert("C/C++".to_string(), 200); let mut cache_misses_counts = HashMap::new(); cache_misses_counts.insert("Rust".to_string(), 50); cache_misses_counts.insert("Cuda".to_string(), 300); let stats = ServerStats { cache_hits: PerLanguageCount { counts: cache_hits_counts, ..Default::default() }, cache_misses: PerLanguageCount { counts: cache_misses_counts, ..Default::default() }, ..Default::default() }; let mut writer = StringWriter::new(); stats.print(&mut writer, false); let output = writer.get_output(); assert!(output.contains("Cache hits rate 46.15 %")); assert!(output.contains("Cache hits rate (C/C++) 100.00 %")); assert!(output.contains("Cache hits rate (Cuda) 0.00 %")); assert!(output.contains("Cache hits rate (Rust) 66.67 %")); } #[test] fn test_print_cache_hits_rate_advanced_server_stats() { let mut cache_hits_counts = HashMap::new(); cache_hits_counts.insert("rust".to_string(), 50); cache_hits_counts.insert("c/c++ [clang]".to_string(), 30); let mut cache_misses_counts = HashMap::new(); cache_misses_counts.insert("rust".to_string(), 100); cache_misses_counts.insert("cuda".to_string(), 70); let stats = ServerStats { cache_hits: PerLanguageCount { adv_counts: cache_hits_counts, ..Default::default() }, cache_misses: PerLanguageCount { adv_counts: cache_misses_counts, ..Default::default() }, ..Default::default() }; let mut writer = StringWriter::new(); stats.print(&mut writer, true); let output = writer.get_output(); assert!(output.contains("Cache hits rate -")); assert!(output.contains("Cache hits rate (c/c++ [clang]) 100.00 %")); assert!(output.contains("Cache hits rate (cuda) 0.00 %")); assert!(output.contains("Cache hits rate (rust) 33.33 %")); } } mozilla-sccache-40c3d6b/src/test/000077500000000000000000000000001475712407500167415ustar00rootroot00000000000000mozilla-sccache-40c3d6b/src/test/mock_storage.rs000066400000000000000000000052601475712407500217670ustar00rootroot00000000000000// Copyright 2017 Mozilla Foundation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use crate::cache::{Cache, CacheWrite, PreprocessorCacheModeConfig, Storage}; use crate::errors::*; use async_trait::async_trait; use futures::channel::mpsc; use std::sync::Arc; use std::time::Duration; use tokio::sync::Mutex; use tokio::time::sleep; /// A mock `Storage` implementation. pub struct MockStorage { rx: Arc>>>, tx: mpsc::UnboundedSender>, delay: Option, preprocessor_cache_mode: bool, } impl MockStorage { /// Create a new `MockStorage`. if `delay` is `Some`, wait for that amount of time before returning from operations. pub(crate) fn new(delay: Option, preprocessor_cache_mode: bool) -> MockStorage { let (tx, rx) = mpsc::unbounded(); Self { tx, rx: Arc::new(Mutex::new(rx)), delay, preprocessor_cache_mode, } } /// Queue up `res` to be returned as the next result from `Storage::get`. pub(crate) fn next_get(&self, res: Result) { self.tx.unbounded_send(res).unwrap(); } } #[async_trait] impl Storage for MockStorage { async fn get(&self, _key: &str) -> Result { if let Some(delay) = self.delay { sleep(delay).await; } let next = self.rx.lock().await.try_next().unwrap(); next.expect("MockStorage get called but no get results available") } async fn put(&self, _key: &str, _entry: CacheWrite) -> Result { Ok(if let Some(delay) = self.delay { sleep(delay).await; delay } else { Duration::from_secs(0) }) } fn location(&self) -> String { "Mock Storage".to_string() } async fn current_size(&self) -> Result> { Ok(None) } async fn max_size(&self) -> Result> { Ok(None) } fn preprocessor_cache_mode_config(&self) -> PreprocessorCacheModeConfig { PreprocessorCacheModeConfig { use_preprocessor_cache_mode: self.preprocessor_cache_mode, ..Default::default() } } } mozilla-sccache-40c3d6b/src/test/mod.rs000066400000000000000000000012211475712407500200620ustar00rootroot00000000000000// Copyright 2016 Mozilla Foundation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. pub mod mock_storage; #[macro_use] pub mod utils; mod tests; mozilla-sccache-40c3d6b/src/test/tests.rs000066400000000000000000000255261475712407500204630ustar00rootroot00000000000000// Copyright 2016 Mozilla Foundation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use crate::cache::disk::DiskCache; use crate::cache::{CacheMode, PreprocessorCacheModeConfig}; use crate::client::connect_to_server; use crate::commands::{do_compile, request_shutdown, request_stats}; use crate::jobserver::Client; use crate::mock_command::*; use crate::server::{DistClientContainer, SccacheServer, ServerMessage}; use crate::test::utils::*; use fs::File; use fs_err as fs; use futures::channel::oneshot::{self, Sender}; #[cfg(not(target_os = "macos"))] use serial_test::serial; use std::io::{Cursor, Write}; #[cfg(not(target_os = "macos"))] use std::net::TcpListener; use std::path::Path; #[cfg(not(target_os = "macos"))] use std::process::Command; use std::sync::{mpsc, Arc, Mutex}; use std::thread; use std::time::Duration; use std::u64; use tokio::runtime::Runtime; /// Options for running the server in tests. #[derive(Default)] struct ServerOptions { /// The server's idle shutdown timeout. idle_timeout: Option, /// The maximum size of the disk cache. cache_size: Option, } /// Run a server on a background thread, and return a tuple of useful things. /// /// * The port on which the server is listening. /// * A `Sender` which can be used to send messages to the server. /// (Most usefully, ServerMessage::Shutdown.) /// * An `Arc`-and-`Mutex`-wrapped `MockCommandCreator` which the server will /// use for all process creation. /// * The `JoinHandle` for the server thread. fn run_server_thread( cache_dir: &Path, options: T, ) -> ( crate::net::SocketAddr, Sender, Arc>, thread::JoinHandle<()>, ) where T: Into> + Send + 'static, { let options = options.into(); let cache_dir = cache_dir.to_path_buf(); let cache_size = options .as_ref() .and_then(|o| o.cache_size.as_ref()) .copied() .unwrap_or(u64::MAX); // Create a server on a background thread, get some useful bits from it. let (tx, rx) = mpsc::channel(); let (shutdown_tx, shutdown_rx) = oneshot::channel(); let handle = thread::spawn(move || { let runtime = Runtime::new().unwrap(); let dist_client = DistClientContainer::new_disabled(); let storage = Arc::new(DiskCache::new( &cache_dir, cache_size, runtime.handle(), PreprocessorCacheModeConfig::default(), CacheMode::ReadWrite, )); let client = Client::new(); let srv = SccacheServer::new(0, runtime, client, dist_client, storage).unwrap(); let mut srv: SccacheServer<_, Arc>> = srv; let addr = srv.local_addr().unwrap(); assert!(matches!(addr, crate::net::SocketAddr::Net(a) if a.port() > 0)); if let Some(options) = options { if let Some(timeout) = options.idle_timeout { srv.set_idle_timeout(Duration::from_millis(timeout)); } } let creator = srv.command_creator().clone(); tx.send((addr, creator)).unwrap(); srv.run(shutdown_rx).unwrap(); }); let (addr, creator) = rx.recv().unwrap(); (addr, shutdown_tx, creator, handle) } #[test] fn test_server_shutdown() { let f = TestFixture::new(); let (addr, _sender, _storage, child) = run_server_thread(f.tempdir.path(), None); // Connect to the server. let conn = connect_to_server(&addr).unwrap(); // Ask it to shut down request_shutdown(conn).unwrap(); // Ensure that it shuts down. child.join().unwrap(); } /// The server will shutdown when requested when the idle timeout is disabled. #[test] fn test_server_shutdown_no_idle() { let f = TestFixture::new(); // Set a ridiculously low idle timeout. let (addr, _sender, _storage, child) = run_server_thread( f.tempdir.path(), ServerOptions { idle_timeout: Some(0), ..Default::default() }, ); let conn = connect_to_server(&addr).unwrap(); request_shutdown(conn).unwrap(); child.join().unwrap(); } #[test] fn test_server_idle_timeout() { let f = TestFixture::new(); // Set a ridiculously low idle timeout. let (_port, _sender, _storage, child) = run_server_thread( f.tempdir.path(), ServerOptions { idle_timeout: Some(1), ..Default::default() }, ); // Don't connect to it. // Ensure that it shuts down. // It would be nice to have an explicit timeout here so we don't hang // if something breaks... child.join().unwrap(); } #[test] fn test_server_stats() { let f = TestFixture::new(); let (addr, sender, _storage, child) = run_server_thread(f.tempdir.path(), None); // Connect to the server. let conn = connect_to_server(&addr).unwrap(); // Ask it for stats. let info = request_stats(conn).unwrap(); assert_eq!(0, info.stats.compile_requests); // Include sccache ver (cli) to validate. assert_eq!(env!("CARGO_PKG_VERSION"), info.version); // Now signal it to shut down. sender.send(ServerMessage::Shutdown).ok().unwrap(); // Ensure that it shuts down. child.join().unwrap(); } #[test] fn test_server_unsupported_compiler() { let f = TestFixture::new(); let (addr, sender, server_creator, child) = run_server_thread(f.tempdir.path(), None); // Connect to the server. let conn = connect_to_server(&addr).unwrap(); { let mut c = server_creator.lock().unwrap(); // fail rust driver check c.next_command_spawns(Ok(MockChild::new(exit_status(1), "hello", "error"))); // The server will check the compiler, so pretend to be an unsupported // compiler. c.next_command_spawns(Ok(MockChild::new(exit_status(0), "hello", "error"))); } // Ask the server to compile something. //TODO: MockCommand should validate these! let exe = &f.bins[0]; let cmdline = vec!["-c".into(), "file.c".into(), "-o".into(), "file.o".into()]; let cwd = f.tempdir.path(); // This creator shouldn't create any processes. It will assert if // it tries to. let client_creator = new_creator(); let mut stdout = Cursor::new(Vec::new()); let mut stderr = Cursor::new(Vec::new()); let path = Some(f.paths); let mut runtime = Runtime::new().unwrap(); let res = do_compile( client_creator, &mut runtime, conn, exe, cmdline, cwd, path, vec![], &mut stdout, &mut stderr, ); match res { Ok(_) => panic!("do_compile should have failed!"), Err(e) => assert_eq!("Compiler not supported: \"error\"", e.to_string()), } // Make sure we ran the mock processes. assert_eq!(0, server_creator.lock().unwrap().children.len()); // Shut down the server. sender.send(ServerMessage::Shutdown).ok().unwrap(); // Ensure that it shuts down. child.join().unwrap(); } #[test] fn test_server_compile() { let _ = env_logger::try_init(); let f = TestFixture::new(); let gcc = f.mk_bin("gcc").unwrap(); let (addr, sender, server_creator, child) = run_server_thread(f.tempdir.path(), None); // Connect to the server. const PREPROCESSOR_STDOUT: &[u8] = b"preprocessor stdout"; const PREPROCESSOR_STDERR: &[u8] = b"preprocessor stderr"; const STDOUT: &[u8] = b"some stdout"; const STDERR: &[u8] = b"some stderr"; let conn = connect_to_server(&addr).unwrap(); // Write a dummy input file so the preprocessor cache mode can work std::fs::write(f.tempdir.path().join("file.c"), "whatever").unwrap(); { let mut c = server_creator.lock().unwrap(); // The server will check the compiler. Pretend it's GCC. c.next_command_spawns(Ok(MockChild::new(exit_status(0), "compiler_id=gcc", ""))); // Preprocessor invocation. c.next_command_spawns(Ok(MockChild::new( exit_status(0), PREPROCESSOR_STDOUT, PREPROCESSOR_STDERR, ))); // Compiler invocation. //TODO: wire up a way to get data written to stdin. let obj = f.tempdir.path().join("file.o"); c.next_command_calls(move |_| { // Pretend to compile something. let mut f = File::create(&obj)?; f.write_all(b"file contents")?; Ok(MockChild::new(exit_status(0), STDOUT, STDERR)) }); } // Ask the server to compile something. //TODO: MockCommand should validate these! let exe = &gcc; let cmdline = vec!["-c".into(), "file.c".into(), "-o".into(), "file.o".into()]; let cwd = f.tempdir.path(); // This creator shouldn't create any processes. It will assert if // it tries to. let client_creator = new_creator(); let mut stdout = Cursor::new(Vec::new()); let mut stderr = Cursor::new(Vec::new()); let path = Some(f.paths); let mut runtime = Runtime::new().unwrap(); assert_eq!( 0, do_compile( client_creator, &mut runtime, conn, exe, cmdline, cwd, path, vec![], &mut stdout, &mut stderr ) .unwrap() ); // Make sure we ran the mock processes. assert_eq!(0, server_creator.lock().unwrap().children.len()); assert_eq!(STDOUT, stdout.into_inner().as_slice()); assert_eq!(STDERR, stderr.into_inner().as_slice()); // Shut down the server. sender.send(ServerMessage::Shutdown).ok().unwrap(); // Ensure that it shuts down. child.join().unwrap(); } #[test] #[serial] // test fails intermittently on macos: // https://github.com/mozilla/sccache/issues/234 #[cfg(not(target_os = "macos"))] fn test_server_port_in_use() { // Bind an arbitrary free port. let listener = TcpListener::bind("127.0.0.1:0").unwrap(); let sccache = find_sccache_binary(); let output = Command::new(sccache) .arg("--start-server") .env( "SCCACHE_SERVER_PORT", listener.local_addr().unwrap().port().to_string(), ) .env_remove("SCCACHE_SERVER_UDS") .output() .unwrap(); assert!(!output.status.success()); let s = String::from_utf8_lossy(&output.stderr); const MSG: &str = "Server startup failed:"; assert!( s.contains(MSG), "Output did not contain '{}':\n========\n{}\n========", MSG, s ); } mozilla-sccache-40c3d6b/src/test/utils.rs000066400000000000000000000174741475712407500204640ustar00rootroot00000000000000// Copyright 2016 Mozilla Foundation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use crate::mock_command::*; use fs::File; use fs_err as fs; use std::collections::HashMap; use std::env; use std::ffi::OsString; use std::future::Future; use std::io; use std::path::{Path, PathBuf}; use std::sync::{Arc, Mutex}; use tempfile::TempDir; use crate::errors::*; use crate::jobserver::Client; /// Return a `Vec` with each listed entry converted to an owned `String`. macro_rules! stringvec { ( $( $x:expr ),* ) => { vec!($( $x.to_owned(), )*) }; } /// Return a `Vec` with each listed entry converted to an owned `OsString`. macro_rules! ovec { ( $( $x:expr ),* ) => { vec!($( ::std::ffi::OsString::from($x), )*) }; } /// Return a `Vec` with each listed entry converted to an owned `PathBuf`. macro_rules! pathvec { ( $( $x:expr ),* ) => { vec!($( ::std::path::PathBuf::from($x), )*) }; } /// Assert that `left != right`. macro_rules! assert_neq { ($left:expr , $right:expr) => {{ match (&($left), &($right)) { (left_val, right_val) => { if !(*left_val != *right_val) { panic!( "assertion failed: `(left != right)` \ (left: `{:?}`, right: `{:?}`)", left_val, right_val ) } } } }}; } /// Assert that `map` contains all of the (`key`, `val`) pairs specified and only those keys. macro_rules! assert_map_contains { ( $map:expr , $( ($key:expr, $val:expr) ),* ) => { let mut nelems = 0; $( nelems += 1; match $map.get(&$key) { Some(&ref v) => assert_eq!($val, *v, "{} key `{:?}` doesn't match expected! (expected `{:?}` != actual `{:?}`)", stringify!($map), $key, $val, v), None => panic!("{} missing key `{:?}`", stringify!($map), $key), } )* assert_eq!(nelems, $map.len(), "{} contains {} elements, expected {}", stringify!($map), $map.len(), nelems); } } pub fn new_creator() -> Arc> { let client = Client::new(); Arc::new(Mutex::new(MockCommandCreator::new(&client))) } pub fn next_command(creator: &Arc>, child: Result) { creator.lock().unwrap().next_command_spawns(child); } pub fn next_command_calls Result + Send + 'static>( creator: &Arc>, call: C, ) { creator.lock().unwrap().next_command_calls(call); } #[cfg(not(target_os = "macos"))] pub fn find_sccache_binary() -> PathBuf { // Older versions of cargo put the test binary next to the sccache binary. // Newer versions put it in the deps/ subdirectory. let exe = env::current_exe().unwrap(); let this_dir = exe.parent().unwrap(); let dirs = &[&this_dir, &this_dir.parent().unwrap()]; dirs.iter() .map(|d| d.join("sccache").with_extension(env::consts::EXE_EXTENSION)) .filter_map(|d| fs::metadata(&d).ok().map(|_| d)) .next() .unwrap_or_else(|| { panic!( "Error: sccache binary not found, looked in `{:?}`. Do you need to run `cargo build`?", dirs ) }) } pub struct TestFixture { /// Temp directory. pub tempdir: TempDir, /// $PATH pub paths: OsString, /// Binaries created in $PATH pub bins: Vec, } pub const SUBDIRS: &[&str] = &["a", "b", "c"]; pub const BIN_NAME: &str = "bin"; pub fn create_file(dir: &Path, path: &str, fill_contents: F) -> io::Result where F: FnOnce(File) -> io::Result<()>, { let b = dir.join(path); let parent = b.parent().unwrap(); fs::create_dir_all(parent)?; let f = fs::File::create(&b)?; fill_contents(f)?; b.canonicalize() } pub fn touch(dir: &Path, path: &str) -> io::Result { create_file(dir, path, |_f| Ok(())) } #[cfg(unix)] pub fn mk_bin_contents io::Result<()>>( dir: &Path, path: &str, fill_contents: F, ) -> io::Result { use fs_err::os::unix::fs::OpenOptionsExt; let bin = dir.join(path); let parent = bin.parent().unwrap(); fs::create_dir_all(parent)?; #[allow(clippy::unnecessary_cast)] let f = fs::OpenOptions::new() .write(true) .create(true) .mode(0o666 | (libc::S_IXUSR as u32)) .open(&bin)?; fill_contents(f)?; bin.canonicalize() } #[cfg(unix)] pub fn mk_bin(dir: &Path, path: &str) -> io::Result { mk_bin_contents(dir, path, |_f| Ok(())) } #[cfg(not(unix))] #[allow(dead_code)] pub fn mk_bin_contents io::Result<()>>( dir: &Path, path: &str, contents: F, ) -> io::Result { create_file( dir, Path::new(path) .with_extension(env::consts::EXE_EXTENSION) .to_str() .unwrap(), contents, ) } #[cfg(not(unix))] pub fn mk_bin(dir: &Path, path: &str) -> io::Result { touch( dir, Path::new(path) .with_extension(env::consts::EXE_EXTENSION) .to_str() .unwrap(), ) } impl TestFixture { pub fn new() -> TestFixture { let tempdir = tempfile::Builder::new() .prefix("sccache_test") .tempdir() .unwrap(); let mut builder = std::fs::DirBuilder::new(); builder.recursive(true); let mut paths = vec![]; let mut bins = vec![]; for d in SUBDIRS.iter() { let p = tempdir.path().join(d); builder.create(&p).unwrap(); bins.push(mk_bin(&p, BIN_NAME).unwrap()); paths.push(p); } TestFixture { tempdir, paths: env::join_paths(paths).unwrap(), bins, } } #[allow(dead_code)] pub fn touch(&self, path: &str) -> io::Result { touch(self.tempdir.path(), path) } #[allow(dead_code)] pub fn mk_bin(&self, path: &str) -> io::Result { mk_bin(self.tempdir.path(), path) } } pub fn single_threaded_runtime() -> tokio::runtime::Runtime { tokio::runtime::Builder::new_current_thread() .enable_all() .worker_threads(1) .build() .unwrap() } /// An add on trait, to allow calling `.wait()` for `futures::Future` /// as it was possible for `futures` at `0.1`. /// /// Intended for test only! pub(crate) trait Waiter { fn wait(self) -> R; } impl Waiter for T where T: Future, { fn wait(self) -> O { let rt = single_threaded_runtime(); rt.block_on(self) } } #[test] fn test_map_contains_ok() { let mut m = HashMap::new(); m.insert("a", 1); m.insert("b", 2); assert_map_contains!(m, ("a", 1), ("b", 2)); } #[test] #[should_panic] fn test_map_contains_extra_key() { let mut m = HashMap::new(); m.insert("a", 1); m.insert("b", 2); assert_map_contains!(m, ("a", 1)); } #[test] #[should_panic] fn test_map_contains_missing_key() { let mut m = HashMap::new(); m.insert("a", 1); assert_map_contains!(m, ("a", 1), ("b", 2)); } #[test] #[should_panic] fn test_map_contains_wrong_value() { let mut m = HashMap::new(); m.insert("a", 1); m.insert("b", 3); assert_map_contains!(m, ("a", 1), ("b", 2)); } mozilla-sccache-40c3d6b/src/util.rs000066400000000000000000001170231475712407500173110ustar00rootroot00000000000000// Copyright 2017 Mozilla Foundation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use crate::mock_command::{CommandChild, RunCommand}; use blake3::Hasher as blake3_Hasher; use byteorder::{BigEndian, ByteOrder}; use fs::File; use fs_err as fs; use object::{macho, read::archive::ArchiveFile, read::macho::FatArch}; use serde::{Deserialize, Serialize}; use std::cell::Cell; use std::ffi::{OsStr, OsString}; use std::hash::Hasher; use std::io::prelude::*; use std::path::{Path, PathBuf}; use std::process::{self, Stdio}; use std::str; use std::time::Duration; use std::time::{self, SystemTime}; use crate::errors::*; /// The url safe engine for base64. pub const BASE64_URL_SAFE_ENGINE: base64::engine::GeneralPurpose = base64::engine::general_purpose::URL_SAFE_NO_PAD; pub const HASH_BUFFER_SIZE: usize = 128 * 1024; #[derive(Clone)] pub struct Digest { inner: blake3_Hasher, } impl Digest { pub fn new() -> Digest { Digest { inner: blake3_Hasher::new(), } } /// Calculate the BLAKE3 digest of the contents of `path`, running /// the actual hash computation on a background thread in `pool`. pub async fn file(path: T, pool: &tokio::runtime::Handle) -> Result where T: AsRef, { Self::reader(path.as_ref().to_owned(), pool).await } /// Calculate the BLAKE3 digest of the contents read from `reader`. pub fn reader_sync(reader: R) -> Result { Self::reader_sync_with(reader, |_| {}).map(|d| d.finish()) } /// Calculate the BLAKE3 digest of the contents read from `reader`, calling /// `each` before each time the digest is updated. pub fn reader_sync_with(mut reader: R, mut each: F) -> Result { let mut m = Digest::new(); // A buffer of 128KB should give us the best performance. // See https://eklitzke.org/efficient-file-copying-on-linux. let mut buffer = [0; HASH_BUFFER_SIZE]; loop { let count = reader.read(&mut buffer[..])?; if count == 0 { break; } each(&buffer[..count]); m.update(&buffer[..count]); } Ok(m) } /// Calculate the BLAKE3 digest of the contents read from `reader`, while /// also checking for the presence of time macros. /// See [`TimeMacroFinder`] for more details. pub fn reader_sync_time_macros(reader: R) -> Result<(String, TimeMacroFinder)> { let mut finder = TimeMacroFinder::new(); Ok(( Self::reader_sync_with(reader, |visit| finder.find_time_macros(visit))?.finish(), finder, )) } /// Calculate the BLAKE3 digest of the contents of `path`, running /// the actual hash computation on a background thread in `pool`. pub async fn reader(path: PathBuf, pool: &tokio::runtime::Handle) -> Result { pool.spawn_blocking(move || { let reader = File::open(&path) .with_context(|| format!("Failed to open file for hashing: {:?}", path))?; Digest::reader_sync(reader) }) .await? } pub fn update(&mut self, bytes: &[u8]) { self.inner.update(bytes); } pub fn delimiter(&mut self, name: &[u8]) { self.update(b"\0SCCACHE\0"); self.update(name); self.update(b"\0"); } pub fn finish(self) -> String { hex(self.inner.finalize().as_bytes()) } } impl Default for Digest { fn default() -> Self { Self::new() } } /// The longest pattern we're looking for is `__TIMESTAMP__` const MAX_HAYSTACK_LEN: usize = b"__TIMESTAMP__".len(); #[cfg(test)] pub const MAX_TIME_MACRO_HAYSTACK_LEN: usize = MAX_HAYSTACK_LEN; /// Used during the chunked hashing process to check for C preprocessor time /// macros (namely `__TIMESTAMP__`, `__DATE__`, `__DATETIME__`) while reusing /// the same buffer as the hashing function, for efficiency. /// /// See `[Self::find_time_macros]` for details. #[derive(Debug, Default)] pub struct TimeMacroFinder { found_date: Cell, found_time: Cell, found_timestamp: Cell, overlap_buffer: [u8; MAX_HAYSTACK_LEN * 2], /// Counter of chunks of full size we've been through. Partial reads do /// not count and are handled separately. full_chunks_counter: usize, /// Contents of the previous read if it was smaller than `MAX_HAYSTACK_LEN`, /// plus MAX_HAYSTACK_LEN bytes of the previous chunk, to account for /// the possibility of partial reads splitting a time macro /// across two calls. previous_small_read: Vec, } impl TimeMacroFinder { /// Called for each chunk of a file during the hashing process /// in preprocessor cache mode. /// /// When buffer reading a file, we get something like this: /// /// `[xxxx....aaaa][bbbb....cccc][dddd....eeee][ffff...]` /// /// The brackets represent each buffer chunk. We use the fact that the largest /// pattern we're looking for is `__TIMESTAMP__` to avoid copying the entire /// file to memory and re-searching the entire buffer for each pattern. /// We can check inside each chunk for each pattern, and we use an overlap /// buffer to keep the last `b"__TIMESTAMP__".len()` bytes around from the /// last chunk, to also catch any pattern overlapping two chunks. /// /// In the above case, the overflow buffer would look like: /// /// ```text /// Chunk 1 /// - aaaa0000 /// Chunk 2 /// - aaaabbbb /// - cccc0000 /// Chunk 3 /// - ccccdddd /// - eeee0000 /// Chunk 4 /// - eeeeffff /// [...] /// ``` /// /// We have to be careful to zero out the buffer right after each overlap check, /// otherwise we risk the (unlikely) case of a pattern being spread between the /// start of a chunk and its end. /// Finally, we need to account for partial reads: it's possible that a read /// smaller than the haystack hide a time macro because it spreads it across /// two calls. This makes the example more complicated and isn't necessary /// to get the point of the algorithm across. /// See unit tests for some concrete examples. pub fn find_time_macros(&mut self, visit: &[u8]) { if self.full_chunks_counter == 0 { if visit.len() <= MAX_HAYSTACK_LEN { // The read is smaller than the largest haystack. // We might get called again, if this was an incomplete read. if !self.previous_small_read.is_empty() { // In a rare pathological case where all reads are small, // this will grow up to the length of the file. // It it *very* unlikely and of minor performance // importance compared to just getting many small reads. self.previous_small_read.extend(visit); } else { visit.clone_into(&mut self.previous_small_read); } self.find_macros(&self.previous_small_read); return; } // Copy the right side of the visit to the left of the buffer let right_half = visit.len() - MAX_HAYSTACK_LEN; self.overlap_buffer[..MAX_HAYSTACK_LEN].copy_from_slice(&visit[right_half..]); } else { if visit.len() < MAX_HAYSTACK_LEN { // The read is smaller than the largest haystack. // We might get called again, if this was an incomplete read. if !self.previous_small_read.is_empty() { self.previous_small_read.extend(visit); } else { // Since this isn't the first non-small read (counter != 0) // we need to start from MAX_HAYSTACK_LEN bytes of the previous // read, otherwise we might miss a complete read followed // by a small read. let mut buf = self.overlap_buffer[..MAX_HAYSTACK_LEN].to_owned(); buf.extend(visit); self.previous_small_read = buf; } // zero the right side of the buffer self.overlap_buffer[MAX_HAYSTACK_LEN..].copy_from_slice(&[0; MAX_HAYSTACK_LEN]); // Copy the visit to the right of the buffer, starting from the middle self.overlap_buffer[MAX_HAYSTACK_LEN..MAX_HAYSTACK_LEN + visit.len()] .copy_from_slice(visit); // Check both the concatenation with the previous small read self.find_macros(&self.previous_small_read); // ...and the overlap buffer self.find_macros(&self.overlap_buffer); return; } else { // Copy the left side of the visit to the right of the buffer let left_half = MAX_HAYSTACK_LEN; self.overlap_buffer[left_half..].copy_from_slice(&visit[..left_half]); self.find_macros(&self.overlap_buffer); // zero the buffer self.overlap_buffer = Default::default(); // Copy the right side of the visit to the left of the buffer let right_half = visit.len() - MAX_HAYSTACK_LEN; self.overlap_buffer[..MAX_HAYSTACK_LEN].copy_from_slice(&visit[right_half..]); } self.find_macros(&self.overlap_buffer); } // Also check the concatenation with the previous small read if !self.previous_small_read.is_empty() { let mut concatenated = self.previous_small_read.to_owned(); concatenated.extend(visit); self.find_macros(&concatenated); } self.find_macros(visit); self.full_chunks_counter += 1; self.previous_small_read.clear(); } fn find_macros(&self, buffer: &[u8]) { // TODO // This could be made more efficient, either by using a regex for all // three patterns, or by doing some SIMD trickery like `ccache` does. // // `ccache` reads the file twice, so we might actually already be // winning in most cases... though they have an inode cache. // In any case, let's only improve this if it ends up being slow. if memchr::memmem::find(buffer, b"__TIMESTAMP__").is_some() { self.found_timestamp.set(true); } if memchr::memmem::find(buffer, b"__TIME__").is_some() { self.found_time.set(true); }; if memchr::memmem::find(buffer, b"__DATE__").is_some() { self.found_date.set(true); }; } pub fn found_time_macros(&self) -> bool { self.found_date() || self.found_time() || self.found_timestamp() } pub fn found_time(&self) -> bool { self.found_time.get() } pub fn found_date(&self) -> bool { self.found_date.get() } pub fn found_timestamp(&self) -> bool { self.found_timestamp.get() } pub fn new() -> Self { Default::default() } } pub fn hex(bytes: &[u8]) -> String { let mut s = String::with_capacity(bytes.len() * 2); for &byte in bytes { s.push(hex(byte & 0xf)); s.push(hex((byte >> 4) & 0xf)); } return s; fn hex(byte: u8) -> char { match byte { 0..=9 => (b'0' + byte) as char, _ => (b'a' + byte - 10) as char, } } } /// Calculate the digest of each file in `files` on background threads in /// `pool`. pub async fn hash_all(files: &[PathBuf], pool: &tokio::runtime::Handle) -> Result> { let start = time::Instant::now(); let count = files.len(); let iter = files.iter().map(move |f| Digest::file(f, pool)); let hashes = futures::future::try_join_all(iter).await?; trace!( "Hashed {} files in {}", count, fmt_duration_as_secs(&start.elapsed()) ); Ok(hashes) } /// Calculate the digest of each static library archive in `files` on background threads in /// `pool`. /// /// The hash is calculated by adding the filename of each archive entry followed /// by its contents, ignoring headers and other file metadata. This primarily /// exists because Apple's `ar` tool inserts timestamps for each file with /// no way to disable this behavior. pub async fn hash_all_archives( files: &[PathBuf], pool: &tokio::runtime::Handle, ) -> Result> { let start = time::Instant::now(); let count = files.len(); let iter = files.iter().map(|path| { let path = path.clone(); pool.spawn_blocking(move || -> Result { let mut m = Digest::new(); let archive_file = File::open(&path) .with_context(|| format!("Failed to open file for hashing: {:?}", path))?; let archive_mmap = unsafe { memmap2::MmapOptions::new().map_copy_read_only(&archive_file)? }; match macho::FatHeader::parse(&*archive_mmap) { Ok(h) if h.magic.get(object::endian::BigEndian) == macho::FAT_MAGIC => { for arch in macho::FatHeader::parse_arch32(&*archive_mmap)? { hash_regular_archive(&mut m, arch.data(&*archive_mmap)?)?; } } Ok(h) if h.magic.get(object::endian::BigEndian) == macho::FAT_MAGIC_64 => { for arch in macho::FatHeader::parse_arch64(&*archive_mmap)? { hash_regular_archive(&mut m, arch.data(&*archive_mmap)?)?; } } // Not a FatHeader at all, regular archive. _ => hash_regular_archive(&mut m, &archive_mmap)?, } Ok(m.finish()) }) }); let mut hashes = futures::future::try_join_all(iter).await?; if let Some(i) = hashes.iter().position(|res| res.is_err()) { return Err(hashes.swap_remove(i).unwrap_err()); } trace!( "Hashed {} files in {}", count, fmt_duration_as_secs(&start.elapsed()) ); Ok(hashes.into_iter().map(|res| res.unwrap()).collect()) } fn hash_regular_archive(m: &mut Digest, data: &[u8]) -> Result<()> { let archive = ArchiveFile::parse(data)?; for entry in archive.members() { let entry = entry?; m.update(entry.name()); m.update(entry.data(data)?); } Ok(()) } /// Format `duration` as seconds with a fractional component. pub fn fmt_duration_as_secs(duration: &Duration) -> String { format!("{}.{:03} s", duration.as_secs(), duration.subsec_millis()) } /// If `input`, write it to `child`'s stdin while also reading `child`'s stdout and stderr, then wait on `child` and return its status and output. /// /// This was lifted from `std::process::Child::wait_with_output` and modified /// to also write to stdin. async fn wait_with_input_output(mut child: T, input: Option>) -> Result where T: CommandChild + 'static, { use tokio::io::{AsyncReadExt, AsyncWriteExt}; let stdin = input.and_then(|i| { child.take_stdin().map(|mut stdin| async move { stdin.write_all(&i).await.context("failed to write stdin") }) }); let stdout = child.take_stdout(); let stdout = async move { match stdout { Some(mut stdout) => { let mut buf = Vec::new(); stdout .read_to_end(&mut buf) .await .context("failed to read stdout")?; Result::Ok(Some(buf)) } None => Ok(None), } }; let stderr = child.take_stderr(); let stderr = async move { match stderr { Some(mut stderr) => { let mut buf = Vec::new(); stderr .read_to_end(&mut buf) .await .context("failed to read stderr")?; Result::Ok(Some(buf)) } None => Ok(None), } }; // Finish writing stdin before waiting, because waiting drops stdin. let status = async move { if let Some(stdin) = stdin { let _ = stdin.await; } child.wait().await.context("failed to wait for child") }; let (status, stdout, stderr) = futures::future::try_join3(status, stdout, stderr).await?; Ok(process::Output { status, stdout: stdout.unwrap_or_default(), stderr: stderr.unwrap_or_default(), }) } /// Run `command`, writing `input` to its stdin if it is `Some` and return the exit status and output. /// /// If the command returns a non-successful exit status, an error of `SccacheError::ProcessError` /// will be returned containing the process output. pub async fn run_input_output(mut command: C, input: Option>) -> Result where C: RunCommand, { let child = command .stdin(if input.is_some() { Stdio::piped() } else { Stdio::inherit() }) .stdout(Stdio::piped()) .stderr(Stdio::piped()) .spawn() .await?; wait_with_input_output(child, input) .await .and_then(|output| { if output.status.success() { Ok(output) } else { Err(ProcessError(output).into()) } }) } /// Write `data` to `writer` with bincode serialization, prefixed by a `u32` length. pub fn write_length_prefixed_bincode(mut writer: W, data: S) -> Result<()> where W: Write, S: Serialize, { let bytes = bincode::serialize(&data)?; let mut len = [0; 4]; BigEndian::write_u32(&mut len, bytes.len() as u32); writer.write_all(&len)?; writer.write_all(&bytes)?; writer.flush()?; Ok(()) } pub trait OsStrExt { fn starts_with(&self, s: &str) -> bool; fn split_prefix(&self, s: &str) -> Option; } #[cfg(unix)] use std::os::unix::ffi::OsStrExt as _OsStrExt; #[cfg(unix)] impl OsStrExt for OsStr { fn starts_with(&self, s: &str) -> bool { self.as_bytes().starts_with(s.as_bytes()) } fn split_prefix(&self, s: &str) -> Option { let bytes = self.as_bytes(); if bytes.starts_with(s.as_bytes()) { Some(OsStr::from_bytes(&bytes[s.len()..]).to_owned()) } else { None } } } #[cfg(windows)] use std::os::windows::ffi::{OsStrExt as _OsStrExt, OsStringExt}; #[cfg(windows)] impl OsStrExt for OsStr { fn starts_with(&self, s: &str) -> bool { // Attempt to interpret this OsStr as utf-16. This is a pretty "poor // man's" implementation, however, as it only handles a subset of // unicode characters in `s`. Currently that's sufficient, though, as // we're only calling `starts_with` with ascii string literals. let u16s = self.encode_wide(); let mut utf8 = s.chars(); for codepoint in u16s { let to_match = match utf8.next() { Some(ch) => ch, None => return true, }; let to_match = to_match as u32; let codepoint = codepoint as u32; // UTF-16 encodes codepoints < 0xd7ff as just the raw value as a // u16, and that's all we're matching against. If the codepoint in // `s` is *over* this value then just assume it's not in `self`. // // If `to_match` is the same as the `codepoint` coming out of our // u16 iterator we keep going, otherwise we've found a mismatch. if to_match < 0xd7ff { if to_match != codepoint { return false; } } else { return false; } } // If we ran out of characters to match, then the strings should be // equal, otherwise we've got more data to match in `s` so we didn't // start with `s` utf8.next().is_none() } fn split_prefix(&self, s: &str) -> Option { // See comments in the above implementation for what's going on here let mut u16s = self.encode_wide().peekable(); let mut utf8 = s.chars(); while let Some(&codepoint) = u16s.peek() { let to_match = match utf8.next() { Some(ch) => ch, None => { let codepoints = u16s.collect::>(); return Some(OsString::from_wide(&codepoints)); } }; let to_match = to_match as u32; let codepoint = codepoint as u32; if to_match < 0xd7ff { if to_match != codepoint { return None; } } else { return None; } u16s.next(); } if utf8.next().is_none() { Some(OsString::new()) } else { None } } } #[cfg(unix)] pub fn encode_path(dst: &mut dyn Write, path: &Path) -> std::io::Result<()> { use std::os::unix::prelude::*; let bytes = path.as_os_str().as_bytes(); dst.write_all(bytes) } #[cfg(windows)] pub fn encode_path(dst: &mut dyn Write, path: &Path) -> std::io::Result<()> { use std::os::windows::prelude::*; let points = path.as_os_str().encode_wide().collect::>(); let bytes = wide_char_to_multi_byte(&points)?; // use_default_char_flag dst.write_all(&bytes) } #[cfg(unix)] pub fn decode_path(bytes: &[u8]) -> std::io::Result { use std::os::unix::prelude::*; Ok(OsStr::from_bytes(bytes).into()) } #[cfg(windows)] pub fn decode_path(bytes: &[u8]) -> std::io::Result { use windows_sys::Win32::Globalization::{CP_OEMCP, MB_ERR_INVALID_CHARS}; let codepage = CP_OEMCP; let flags = MB_ERR_INVALID_CHARS; Ok(OsString::from_wide(&multi_byte_to_wide_char(codepage, flags, bytes)?).into()) } #[cfg(windows)] pub fn wide_char_to_multi_byte(wide_char_str: &[u16]) -> std::io::Result> { use windows_sys::Win32::Globalization::{WideCharToMultiByte, CP_OEMCP}; let codepage = CP_OEMCP; let flags = 0; // Empty string if wide_char_str.is_empty() { return Ok(Vec::new()); } unsafe { // Get length of multibyte string let len = WideCharToMultiByte( codepage, flags, wide_char_str.as_ptr(), wide_char_str.len() as i32, std::ptr::null_mut(), 0, std::ptr::null(), std::ptr::null_mut(), ); if len > 0 { // Convert from UTF-16 to multibyte let mut astr: Vec = Vec::with_capacity(len as usize); let len = WideCharToMultiByte( codepage, flags, wide_char_str.as_ptr(), wide_char_str.len() as i32, astr.as_mut_ptr() as _, len, std::ptr::null(), std::ptr::null_mut(), ); if len > 0 { astr.set_len(len as usize); if (len as usize) == astr.len() { return Ok(astr); } else { return Ok(astr[0..(len as usize)].to_vec()); } } } Err(std::io::Error::last_os_error()) } } #[cfg(windows)] /// Wrapper for MultiByteToWideChar. /// /// See https://msdn.microsoft.com/en-us/library/windows/desktop/dd319072(v=vs.85).aspx /// for more details. pub fn multi_byte_to_wide_char( codepage: u32, flags: u32, multi_byte_str: &[u8], ) -> std::io::Result> { use windows_sys::Win32::Globalization::MultiByteToWideChar; if multi_byte_str.is_empty() { return Ok(vec![]); } unsafe { // Get length of UTF-16 string let len = MultiByteToWideChar( codepage, flags, multi_byte_str.as_ptr(), multi_byte_str.len() as i32, std::ptr::null_mut(), 0, ); if len > 0 { // Convert to UTF-16 let mut wstr: Vec = Vec::with_capacity(len as usize); let len = MultiByteToWideChar( codepage, flags, multi_byte_str.as_ptr(), multi_byte_str.len() as i32, wstr.as_mut_ptr(), len, ); wstr.set_len(len as usize); if len > 0 { return Ok(wstr); } } Err(std::io::Error::last_os_error()) } } /// A Unix timestamp with nanoseconds precision #[derive(Serialize, Deserialize, Debug, Copy, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)] pub struct Timestamp { seconds: i64, /// Always in the `0 .. 1_000_000_000` range. nanoseconds: u32, } const NSEC_PER_SEC: u32 = 1_000_000_000; impl From for Timestamp { fn from(system_time: std::time::SystemTime) -> Self { // On Unix, `SystemTime` is a wrapper for the `timespec` C struct: // https://www.gnu.org/software/libc/manual/html_node/Time-Types.html#index-struct-timespec // On Windows, `SystemTime` wraps a 100ns intervals-based struct. // We want to effectively access the inner fields, but the Rust standard // library does not expose them. The best we can do is: let seconds; let nanoseconds; match system_time.duration_since(std::time::UNIX_EPOCH) { Ok(duration) => { seconds = duration.as_secs() as i64; nanoseconds = duration.subsec_nanos(); } Err(error) => { // `system_time` is before `UNIX_EPOCH`. // We need to undo this algorithm: // https://github.com/rust-lang/rust/blob/6bed1f0bc3cc50c10aab26d5f94b16a00776b8a5/library/std/src/sys/unix/time.rs#L40-L41 let negative = error.duration(); let negative_secs = negative.as_secs() as i64; let negative_nanos = negative.subsec_nanos(); if negative_nanos == 0 { seconds = -negative_secs; nanoseconds = 0; } else { // For example if `system_time` was 4.3 seconds before // the Unix epoch we get a Duration that represents // `(-4, -0.3)` but we want `(-5, +0.7)`: seconds = -1 - negative_secs; nanoseconds = NSEC_PER_SEC - negative_nanos; } } }; Self { seconds, nanoseconds, } } } impl PartialEq for Timestamp { fn eq(&self, other: &SystemTime) -> bool { self == &Self::from(*other) } } impl Timestamp { pub fn new(seconds: i64, nanoseconds: u32) -> Self { Self { seconds, nanoseconds, } } } /// Adds a fallback for trying Unix's `ctime` semantics on Windows systems. pub trait MetadataCtimeExt { fn ctime_or_creation(&self) -> std::io::Result; } impl MetadataCtimeExt for std::fs::Metadata { #[cfg(unix)] fn ctime_or_creation(&self) -> std::io::Result { use std::os::unix::prelude::MetadataExt; Ok(Timestamp { seconds: self.ctime(), nanoseconds: self.ctime_nsec().try_into().unwrap_or(0), }) } #[cfg(windows)] fn ctime_or_creation(&self) -> std::io::Result { // Windows does not have the actual notion of ctime in the Unix sense. // Best effort is creation time (also called ctime in windows libs...) self.created().map(Into::into) } } pub struct HashToDigest<'a> { pub digest: &'a mut Digest, } impl<'a> Hasher for HashToDigest<'a> { fn write(&mut self, bytes: &[u8]) { self.digest.update(bytes) } fn finish(&self) -> u64 { panic!("not supposed to be called"); } } /// Pipe `cmd`'s stdio to `/dev/null`, unless a specific env var is set. #[cfg(not(windows))] pub fn daemonize() -> Result<()> { use crate::jobserver::discard_inherited_jobserver; use daemonize::Daemonize; use std::env; use std::mem; match env::var("SCCACHE_NO_DAEMON") { Ok(ref val) if val == "1" => {} _ => { Daemonize::new().start().context("failed to daemonize")?; } } unsafe { discard_inherited_jobserver(); } static mut PREV_SIGSEGV: *mut libc::sigaction = 0 as *mut _; static mut PREV_SIGBUS: *mut libc::sigaction = 0 as *mut _; static mut PREV_SIGILL: *mut libc::sigaction = 0 as *mut _; // We don't have a parent process any more once we've reached this point, // which means that no one's probably listening for our exit status. // In order to assist with debugging crashes of the server we configure our // rlimit to allow runtime dumps and we also install a signal handler for // segfaults which at least prints out what just happened. unsafe { match env::var("SCCACHE_ALLOW_CORE_DUMPS") { Ok(ref val) if val == "1" => { let rlim = libc::rlimit { rlim_cur: libc::RLIM_INFINITY, rlim_max: libc::RLIM_INFINITY, }; libc::setrlimit(libc::RLIMIT_CORE, &rlim); } _ => {} } PREV_SIGSEGV = Box::into_raw(Box::new(mem::zeroed::())); PREV_SIGBUS = Box::into_raw(Box::new(mem::zeroed::())); PREV_SIGILL = Box::into_raw(Box::new(mem::zeroed::())); let mut new: libc::sigaction = mem::zeroed(); new.sa_sigaction = handler as usize; new.sa_flags = libc::SA_SIGINFO | libc::SA_RESTART; libc::sigaction(libc::SIGSEGV, &new, &mut *PREV_SIGSEGV); libc::sigaction(libc::SIGBUS, &new, &mut *PREV_SIGBUS); libc::sigaction(libc::SIGILL, &new, &mut *PREV_SIGILL); } return Ok(()); extern "C" fn handler( signum: libc::c_int, _info: *mut libc::siginfo_t, _ptr: *mut libc::c_void, ) { use std::fmt::{Result, Write}; struct Stderr; impl Write for Stderr { fn write_str(&mut self, s: &str) -> Result { unsafe { let bytes = s.as_bytes(); libc::write(libc::STDERR_FILENO, bytes.as_ptr() as *const _, bytes.len()); Ok(()) } } } unsafe { let _ = writeln!(Stderr, "signal {} received", signum); // Configure the old handler and then resume the program. This'll // likely go on to create a runtime dump if one's configured to be // created. match signum { libc::SIGBUS => libc::sigaction(signum, &*PREV_SIGBUS, std::ptr::null_mut()), libc::SIGILL => libc::sigaction(signum, &*PREV_SIGILL, std::ptr::null_mut()), _ => libc::sigaction(signum, &*PREV_SIGSEGV, std::ptr::null_mut()), }; } } } /// This is a no-op on Windows. #[cfg(windows)] pub fn daemonize() -> Result<()> { Ok(()) } /// Disable connection pool to avoid broken connection between runtime /// /// # TODO /// /// We should refactor sccache current model to make sure that we only have /// one tokio runtime and keep reqwest alive inside it. /// /// --- /// /// More details could be found at https://github.com/mozilla/sccache/pull/1563 #[cfg(any(feature = "dist-server", feature = "dist-client"))] pub fn new_reqwest_blocking_client() -> reqwest::blocking::Client { reqwest::blocking::Client::builder() .pool_max_idle_per_host(0) .build() .expect("http client must build with success") } fn unhex(b: u8) -> std::io::Result { match b { b'0'..=b'9' => Ok(b - b'0'), b'a'..=b'f' => Ok(b - b'a' + 10), b'A'..=b'F' => Ok(b - b'A' + 10), _ => Err(std::io::Error::new( std::io::ErrorKind::InvalidInput, "invalid hex digit", )), } } /// A reverse version of std::ascii::escape_default pub fn ascii_unescape_default(s: &[u8]) -> std::io::Result> { let mut out = Vec::with_capacity(s.len() + 4); let mut offset = 0; while offset < s.len() { let c = s[offset]; if c == b'\\' { offset += 1; if offset >= s.len() { return Err(std::io::Error::new( std::io::ErrorKind::InvalidInput, "incomplete escape", )); } let c = s[offset]; match c { b'n' => out.push(b'\n'), b'r' => out.push(b'\r'), b't' => out.push(b'\t'), b'\'' => out.push(b'\''), b'"' => out.push(b'"'), b'\\' => out.push(b'\\'), b'x' => { offset += 1; if offset + 1 >= s.len() { return Err(std::io::Error::new( std::io::ErrorKind::InvalidInput, "incomplete hex escape", )); } let v = unhex(s[offset])? << 4 | unhex(s[offset + 1])?; out.push(v); offset += 1; } _ => { return Err(std::io::Error::new( std::io::ErrorKind::InvalidInput, "invalid escape", )); } } } else { out.push(c); } offset += 1; } Ok(out) } #[cfg(test)] mod tests { use super::{OsStrExt, TimeMacroFinder}; use std::ffi::{OsStr, OsString}; #[test] fn simple_starts_with() { let a: &OsStr = "foo".as_ref(); assert!(a.starts_with("")); assert!(a.starts_with("f")); assert!(a.starts_with("fo")); assert!(a.starts_with("foo")); assert!(!a.starts_with("foo2")); assert!(!a.starts_with("b")); assert!(!a.starts_with("b")); let a: &OsStr = "".as_ref(); assert!(!a.starts_with("a")) } #[test] fn simple_strip_prefix() { let a: &OsStr = "foo".as_ref(); assert_eq!(a.split_prefix(""), Some(OsString::from("foo"))); assert_eq!(a.split_prefix("f"), Some(OsString::from("oo"))); assert_eq!(a.split_prefix("fo"), Some(OsString::from("o"))); assert_eq!(a.split_prefix("foo"), Some(OsString::from(""))); assert_eq!(a.split_prefix("foo2"), None); assert_eq!(a.split_prefix("b"), None); } #[test] fn test_time_macro_short_read() { // Normal "read" should succeed let mut finder = TimeMacroFinder::new(); finder.find_time_macros(b"__TIME__"); assert!(finder.found_time()); // So should a partial "read" let mut finder = TimeMacroFinder::new(); finder.find_time_macros(b"__"); assert!(!finder.found_time()); finder.find_time_macros(b"TIME__"); assert!(finder.found_time()); // So should a partial "read" later down the line let mut finder = TimeMacroFinder::new(); finder.find_time_macros(b"Something or other larger than the haystack"); finder.find_time_macros(b"__"); assert!(!finder.found_time()); finder.find_time_macros(b"TIME__"); assert!(finder.found_time()); // Even if the last "read" is large let mut finder = TimeMacroFinder::new(); finder.find_time_macros(b"Something or other larger than the haystack"); finder.find_time_macros(b"__"); assert!(!finder.found_time()); finder.find_time_macros(b"TIME__ something or other larger than the haystack"); assert!(finder.found_time()); // Pathological case let mut finder = TimeMacroFinder::new(); finder.find_time_macros(b"__"); assert!(!finder.found_time()); finder.find_time_macros(b"TI"); assert!(!finder.found_time()); finder.find_time_macros(b"ME"); assert!(!finder.found_time()); finder.find_time_macros(b"__"); assert!(finder.found_time()); // Odd-numbered pathological case let mut finder = TimeMacroFinder::new(); finder.find_time_macros(b"This is larger than the haystack __"); assert!(!finder.found_time()); finder.find_time_macros(b"TI"); assert!(!finder.found_time()); finder.find_time_macros(b"ME"); assert!(!finder.found_time()); finder.find_time_macros(b"__"); assert!(finder.found_time()); // Sawtooth length pathological case let mut finder = TimeMacroFinder::new(); finder.find_time_macros(b"This is larger than the haystack __"); assert!(!finder.found_time()); finder.find_time_macros(b"TI"); assert!(!finder.found_time()); finder.find_time_macros(b"ME__ This is larger than the haystack"); assert!(finder.found_time()); assert!(!finder.found_timestamp()); finder.find_time_macros(b"__"); assert!(!finder.found_timestamp()); finder.find_time_macros(b"TIMESTAMP__ This is larger than the haystack"); assert!(finder.found_timestamp()); // Odd-numbered sawtooth length pathological case let mut finder = TimeMacroFinder::new(); finder.find_time_macros(b"__"); assert!(!finder.found_time()); finder.find_time_macros(b"TIME__ This is larger than the haystack"); assert!(finder.found_time()); assert!(!finder.found_timestamp()); finder.find_time_macros(b"__"); assert!(!finder.found_timestamp()); finder.find_time_macros(b"TIMESTAMP__ This is larger than the haystack"); assert!(finder.found_timestamp()); } #[test] fn test_ascii_unescape_default() { let mut alphabet = r#"\\'"\t\n\r"#.as_bytes().to_vec(); alphabet.push(b'a'); alphabet.push(b'1'); alphabet.push(0); alphabet.push(0xff); let mut input = vec![]; let mut output = vec![]; let mut alphabet_indexes = [0; 3]; let mut tested_cases = 0; // Following loop may test duplicated inputs, but it's not a problem loop { input.clear(); output.clear(); for idx in alphabet_indexes { if idx < alphabet.len() { input.push(alphabet[idx]); } } if input.is_empty() { break; } output.extend(input.as_slice().escape_ascii()); let result = super::ascii_unescape_default(&output).unwrap(); assert_eq!(input, result, "{:?}", output); tested_cases += 1; for idx in &mut alphabet_indexes { *idx += 1; if *idx > alphabet.len() { // Use `>` so we can test various input length. *idx = 0; } else { break; } } } assert_eq!(tested_cases, (alphabet.len() + 1).pow(3) - 1); let empty_result = super::ascii_unescape_default(&[]).unwrap(); assert!(empty_result.is_empty(), "{:?}", empty_result); } } mozilla-sccache-40c3d6b/tests/000077500000000000000000000000001475712407500163355ustar00rootroot00000000000000mozilla-sccache-40c3d6b/tests/autotools/000077500000000000000000000000001475712407500203665ustar00rootroot00000000000000mozilla-sccache-40c3d6b/tests/autotools/Makefile.am000066400000000000000000000001041475712407500224150ustar00rootroot00000000000000noinst_LIBRARIES = libmyproject.a libmyproject_a_SOURCES = main.cpp mozilla-sccache-40c3d6b/tests/autotools/configure.ac000066400000000000000000000003051475712407500226520ustar00rootroot00000000000000AC_INIT([myproject], [1.0]) AM_INIT_AUTOMAKE([foreign subdir-objects]) AC_CONFIG_SRCDIR([main.cpp]) AC_CONFIG_HEADERS([config.h]) AC_PROG_RANLIB AC_PROG_CXX AC_CONFIG_FILES([Makefile]) AC_OUTPUT mozilla-sccache-40c3d6b/tests/autotools/main.cpp000066400000000000000000000001331475712407500220130ustar00rootroot00000000000000#include void print_hello() { std::cout << "Hello, World!" << std::endl; } mozilla-sccache-40c3d6b/tests/cache_hit_rate.rs000066400000000000000000000063731475712407500216360ustar00rootroot00000000000000pub mod helpers; use std::process::Command; use anyhow::Result; use assert_cmd::assert::OutputAssertExt; use helpers::{cargo_clean, SccacheTest, CARGO, CRATE_DIR}; use predicates::{boolean::PredicateBooleanExt, str::PredicateStrExt}; use serial_test::serial; #[test] #[serial] fn test_cache_hit_rate() -> Result<()> { let test_info = SccacheTest::new(None)?; Command::new(CARGO.as_os_str()) .args(["build", "--color=never"]) .envs(test_info.env.iter().cloned()) .current_dir(CRATE_DIR.as_os_str()) .assert() .try_stderr(predicates::str::contains("\x1b[").from_utf8().not())? .try_success()?; test_info .show_text_stats(false)? .try_stdout( predicates::str::is_match(r"Cache hits rate\s+0\.00\s%") .unwrap() .from_utf8(), )? .try_stdout( predicates::str::is_match(r"Cache hits rate \(Rust\)\s+0\.00\s%") .unwrap() .from_utf8(), )? .try_success()?; // Clean it so we can build it again. cargo_clean(&test_info)?; Command::new(CARGO.as_os_str()) .args(["run", "--color=always"]) .envs(test_info.env.iter().cloned()) .current_dir(CRATE_DIR.as_os_str()) .assert() .try_stderr(predicates::str::contains("\x1b[").from_utf8())? .try_success()?; test_info .show_text_stats(false)? .try_stdout( predicates::str::is_match(r"Cache hits rate\s+50\.00\s%") .unwrap() .from_utf8(), )? .try_stdout( predicates::str::is_match(r"Cache hits rate \(Rust\)\s+50\.00\s%") .unwrap() .from_utf8(), )? .try_success()?; Ok(()) } #[test] #[serial] fn test_adv_cache_hit_rate() -> Result<()> { let test_info = SccacheTest::new(None)?; Command::new(CARGO.as_os_str()) .args(["build", "--color=never"]) .envs(test_info.env.iter().cloned()) .current_dir(CRATE_DIR.as_os_str()) .assert() .try_stderr(predicates::str::contains("\x1b[").from_utf8().not())? .try_success()?; test_info .show_text_stats(true)? .try_stdout( predicates::str::is_match(r"Cache hits rate\s+0\.00\s%") .unwrap() .from_utf8(), )? .try_stdout( predicates::str::is_match(r"Cache hits rate \(rust\)\s+0\.00\s%") .unwrap() .from_utf8(), )? .try_success()?; cargo_clean(&test_info)?; Command::new(CARGO.as_os_str()) .args(["run", "--color=always"]) .envs(test_info.env.iter().cloned()) .current_dir(CRATE_DIR.as_os_str()) .assert() .try_stderr(predicates::str::contains("\x1b[").from_utf8())? .try_success()?; test_info .show_text_stats(true)? .try_stdout( predicates::str::is_match(r"Cache hits rate\s+50\.00\s%") .unwrap() .from_utf8(), )? .try_stdout( predicates::str::is_match(r"Cache hits rate \(rust\)\s+50\.00\s%") .unwrap() .from_utf8(), )? .try_success()?; Ok(()) } mozilla-sccache-40c3d6b/tests/cmake-hip/000077500000000000000000000000001475712407500201735ustar00rootroot00000000000000mozilla-sccache-40c3d6b/tests/cmake-hip/CMakeLists.txt000066400000000000000000000003001475712407500227240ustar00rootroot00000000000000cmake_minimum_required(VERSION 3.10) project(myproject LANGUAGES CXX HIP) add_library(vectoradd_hip vectoradd_hip.cpp) set_source_files_properties(vectoradd_hip.cpp PROPERTIES LANGUAGE HIP) mozilla-sccache-40c3d6b/tests/cmake-hip/vectoradd_hip.cpp000066400000000000000000000076571475712407500235310ustar00rootroot00000000000000/* Copyright (c) 2015-2016 Advanced Micro Devices, Inc. All rights reserved. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include #include #include #include #include #include "hip/hip_runtime.h" #define HIP_ASSERT(x) (assert((x)==hipSuccess)) #define WIDTH 1024 #define HEIGHT 1024 #define NUM (WIDTH*HEIGHT) #define THREADS_PER_BLOCK_X 16 #define THREADS_PER_BLOCK_Y 16 #define THREADS_PER_BLOCK_Z 1 __global__ void vectoradd_float(float* __restrict__ a, const float* __restrict__ b, const float* __restrict__ c, int width, int height) { int x = hipBlockDim_x * hipBlockIdx_x + hipThreadIdx_x; int y = hipBlockDim_y * hipBlockIdx_y + hipThreadIdx_y; int i = y * width + x; if ( i < (width * height)) { a[i] = b[i] + c[i]; } } #if 0 __kernel__ void vectoradd_float(float* a, const float* b, const float* c, int width, int height) { int x = blockDimX * blockIdx.x + threadIdx.x; int y = blockDimY * blockIdy.y + threadIdx.y; int i = y * width + x; if ( i < (width * height)) { a[i] = b[i] + c[i]; } } #endif using namespace std; int main() { float* hostA; float* hostB; float* hostC; float* deviceA; float* deviceB; float* deviceC; hipDeviceProp_t devProp; hipGetDeviceProperties(&devProp, 0); cout << " System minor " << devProp.minor << endl; cout << " System major " << devProp.major << endl; cout << " agent prop name " << devProp.name << endl; cout << "hip Device prop succeeded " << endl ; int i; int errors; hostA = (float*)malloc(NUM * sizeof(float)); hostB = (float*)malloc(NUM * sizeof(float)); hostC = (float*)malloc(NUM * sizeof(float)); // initialize the input data for (i = 0; i < NUM; i++) { hostB[i] = (float)i; hostC[i] = (float)i*100.0f; } HIP_ASSERT(hipMalloc((void**)&deviceA, NUM * sizeof(float))); HIP_ASSERT(hipMalloc((void**)&deviceB, NUM * sizeof(float))); HIP_ASSERT(hipMalloc((void**)&deviceC, NUM * sizeof(float))); HIP_ASSERT(hipMemcpy(deviceB, hostB, NUM*sizeof(float), hipMemcpyHostToDevice)); HIP_ASSERT(hipMemcpy(deviceC, hostC, NUM*sizeof(float), hipMemcpyHostToDevice)); hipLaunchKernelGGL(vectoradd_float, dim3(WIDTH/THREADS_PER_BLOCK_X, HEIGHT/THREADS_PER_BLOCK_Y), dim3(THREADS_PER_BLOCK_X, THREADS_PER_BLOCK_Y), 0, 0, deviceA ,deviceB ,deviceC ,WIDTH ,HEIGHT); HIP_ASSERT(hipMemcpy(hostA, deviceA, NUM*sizeof(float), hipMemcpyDeviceToHost)); // verify the results errors = 0; for (i = 0; i < NUM; i++) { if (hostA[i] != (hostB[i] + hostC[i])) { errors++; } } if (errors!=0) { printf("FAILED: %d errors\n",errors); } else { printf ("PASSED!\n"); } HIP_ASSERT(hipFree(deviceA)); HIP_ASSERT(hipFree(deviceB)); HIP_ASSERT(hipFree(deviceC)); free(hostA); free(hostB); free(hostC); //hipResetDefaultAccelerator(); return errors; } mozilla-sccache-40c3d6b/tests/cmake/000077500000000000000000000000001475712407500174155ustar00rootroot00000000000000mozilla-sccache-40c3d6b/tests/cmake/CMakeLists.txt000066400000000000000000000001571475712407500221600ustar00rootroot00000000000000cmake_minimum_required(VERSION 3.10) project(myproject LANGUAGES CXX) add_library(myproject OBJECT main.cpp) mozilla-sccache-40c3d6b/tests/cmake/main.cpp000066400000000000000000000001331475712407500210420ustar00rootroot00000000000000#include void print_hello() { std::cout << "Hello, World!" << std::endl; } mozilla-sccache-40c3d6b/tests/dist.rs000066400000000000000000000172641475712407500176600ustar00rootroot00000000000000#![cfg(all(feature = "dist-client", feature = "dist-server"))] extern crate assert_cmd; #[macro_use] extern crate log; extern crate sccache; extern crate serde_json; use crate::harness::{ get_stats, sccache_command, start_local_daemon, stop_local_daemon, write_json_cfg, write_source, }; use assert_cmd::prelude::*; use sccache::config::HTTPUrl; use sccache::dist::{ AssignJobResult, CompileCommand, InputsReader, JobId, JobState, RunJobResult, ServerIncoming, ServerOutgoing, SubmitToolchainResult, Toolchain, ToolchainReader, }; use std::ffi::OsStr; use std::path::Path; use sccache::errors::*; mod harness; fn basic_compile(tmpdir: &Path, sccache_cfg_path: &Path, sccache_cached_cfg_path: &Path) { let envs: Vec<(_, &OsStr)> = vec![ ("RUST_BACKTRACE", "1".as_ref()), ("SCCACHE_LOG", "debug".as_ref()), ("SCCACHE_CONF", sccache_cfg_path.as_ref()), ("SCCACHE_CACHED_CONF", sccache_cached_cfg_path.as_ref()), ]; let source_file = "x.c"; let obj_file = "x.o"; write_source(tmpdir, source_file, "#if !defined(SCCACHE_TEST_DEFINE)\n#error SCCACHE_TEST_DEFINE is not defined\n#endif\nint x() { return 5; }"); sccache_command() .args([ std::env::var("CC") .unwrap_or_else(|_| "gcc".to_string()) .as_str(), "-c", "-DSCCACHE_TEST_DEFINE", ]) .arg(tmpdir.join(source_file)) .arg("-o") .arg(tmpdir.join(obj_file)) .envs(envs) .assert() .success(); } pub fn dist_test_sccache_client_cfg( tmpdir: &Path, scheduler_url: HTTPUrl, ) -> sccache::config::FileConfig { let mut sccache_cfg = harness::sccache_client_cfg(tmpdir, false); sccache_cfg.cache.disk.as_mut().unwrap().size = 0; sccache_cfg.dist.scheduler_url = Some(scheduler_url); sccache_cfg } #[test] #[cfg_attr(not(feature = "dist-tests"), ignore)] fn test_dist_basic() { let tmpdir = tempfile::Builder::new() .prefix("sccache_dist_test") .tempdir() .unwrap(); let tmpdir = tmpdir.path(); let sccache_dist = harness::sccache_dist_path(); let mut system = harness::DistSystem::new(&sccache_dist, tmpdir); system.add_scheduler(); system.add_server(); let sccache_cfg = dist_test_sccache_client_cfg(tmpdir, system.scheduler_url()); let sccache_cfg_path = tmpdir.join("sccache-cfg.json"); write_json_cfg(tmpdir, "sccache-cfg.json", &sccache_cfg); let sccache_cached_cfg_path = tmpdir.join("sccache-cached-cfg"); stop_local_daemon(); start_local_daemon(&sccache_cfg_path, &sccache_cached_cfg_path); basic_compile(tmpdir, &sccache_cfg_path, &sccache_cached_cfg_path); get_stats(|info| { assert_eq!(1, info.stats.dist_compiles.values().sum::()); assert_eq!(0, info.stats.dist_errors); assert_eq!(1, info.stats.compile_requests); assert_eq!(1, info.stats.requests_executed); assert_eq!(0, info.stats.cache_hits.all()); assert_eq!(1, info.stats.cache_misses.all()); }); } #[test] #[cfg_attr(not(feature = "dist-tests"), ignore)] fn test_dist_restartedserver() { let tmpdir = tempfile::Builder::new() .prefix("sccache_dist_test") .tempdir() .unwrap(); let tmpdir = tmpdir.path(); let sccache_dist = harness::sccache_dist_path(); let mut system = harness::DistSystem::new(&sccache_dist, tmpdir); system.add_scheduler(); let server_handle = system.add_server(); let sccache_cfg = dist_test_sccache_client_cfg(tmpdir, system.scheduler_url()); let sccache_cfg_path = tmpdir.join("sccache-cfg.json"); write_json_cfg(tmpdir, "sccache-cfg.json", &sccache_cfg); let sccache_cached_cfg_path = tmpdir.join("sccache-cached-cfg"); stop_local_daemon(); start_local_daemon(&sccache_cfg_path, &sccache_cached_cfg_path); basic_compile(tmpdir, &sccache_cfg_path, &sccache_cached_cfg_path); system.restart_server(&server_handle); basic_compile(tmpdir, &sccache_cfg_path, &sccache_cached_cfg_path); get_stats(|info| { assert_eq!(2, info.stats.dist_compiles.values().sum::()); assert_eq!(0, info.stats.dist_errors); assert_eq!(2, info.stats.compile_requests); assert_eq!(2, info.stats.requests_executed); assert_eq!(0, info.stats.cache_hits.all()); assert_eq!(2, info.stats.cache_misses.all()); }); } #[test] #[cfg_attr(not(feature = "dist-tests"), ignore)] fn test_dist_nobuilder() { let tmpdir = tempfile::Builder::new() .prefix("sccache_dist_test") .tempdir() .unwrap(); let tmpdir = tmpdir.path(); let sccache_dist = harness::sccache_dist_path(); let mut system = harness::DistSystem::new(&sccache_dist, tmpdir); system.add_scheduler(); let sccache_cfg = dist_test_sccache_client_cfg(tmpdir, system.scheduler_url()); let sccache_cfg_path = tmpdir.join("sccache-cfg.json"); write_json_cfg(tmpdir, "sccache-cfg.json", &sccache_cfg); let sccache_cached_cfg_path = tmpdir.join("sccache-cached-cfg"); stop_local_daemon(); start_local_daemon(&sccache_cfg_path, &sccache_cached_cfg_path); basic_compile(tmpdir, &sccache_cfg_path, &sccache_cached_cfg_path); get_stats(|info| { assert_eq!(0, info.stats.dist_compiles.values().sum::()); assert_eq!(1, info.stats.dist_errors); assert_eq!(1, info.stats.compile_requests); assert_eq!(1, info.stats.requests_executed); assert_eq!(0, info.stats.cache_hits.all()); assert_eq!(1, info.stats.cache_misses.all()); }); } struct FailingServer; impl ServerIncoming for FailingServer { fn handle_assign_job(&self, _job_id: JobId, _tc: Toolchain) -> Result { let need_toolchain = false; let state = JobState::Ready; Ok(AssignJobResult { need_toolchain, state, }) } fn handle_submit_toolchain( &self, _requester: &dyn ServerOutgoing, _job_id: JobId, _tc_rdr: ToolchainReader, ) -> Result { panic!("should not have submitted toolchain") } fn handle_run_job( &self, requester: &dyn ServerOutgoing, job_id: JobId, _command: CompileCommand, _outputs: Vec, _inputs_rdr: InputsReader, ) -> Result { requester .do_update_job_state(job_id, JobState::Started) .context("Updating job state failed")?; bail!("internal build failure") } } #[test] #[cfg_attr(not(feature = "dist-tests"), ignore)] fn test_dist_failingserver() { let tmpdir = tempfile::Builder::new() .prefix("sccache_dist_test") .tempdir() .unwrap(); let tmpdir = tmpdir.path(); let sccache_dist = harness::sccache_dist_path(); let mut system = harness::DistSystem::new(&sccache_dist, tmpdir); system.add_scheduler(); system.add_custom_server(FailingServer); let sccache_cfg = dist_test_sccache_client_cfg(tmpdir, system.scheduler_url()); let sccache_cfg_path = tmpdir.join("sccache-cfg.json"); write_json_cfg(tmpdir, "sccache-cfg.json", &sccache_cfg); let sccache_cached_cfg_path = tmpdir.join("sccache-cached-cfg"); stop_local_daemon(); start_local_daemon(&sccache_cfg_path, &sccache_cached_cfg_path); basic_compile(tmpdir, &sccache_cfg_path, &sccache_cached_cfg_path); get_stats(|info| { assert_eq!(0, info.stats.dist_compiles.values().sum::()); assert_eq!(1, info.stats.dist_errors); assert_eq!(1, info.stats.compile_requests); assert_eq!(1, info.stats.requests_executed); assert_eq!(0, info.stats.cache_hits.all()); assert_eq!(1, info.stats.cache_misses.all()); }); } mozilla-sccache-40c3d6b/tests/harness/000077500000000000000000000000001475712407500200005ustar00rootroot00000000000000mozilla-sccache-40c3d6b/tests/harness/Dockerfile.sccache-dist000066400000000000000000000001541475712407500243230ustar00rootroot00000000000000FROM ubuntu:latest RUN apt-get update && \ apt-get install -y libcap2 bubblewrap && \ apt-get clean mozilla-sccache-40c3d6b/tests/harness/mod.rs000066400000000000000000000535621475712407500211400ustar00rootroot00000000000000use fs_err as fs; #[cfg(any(feature = "dist-client", feature = "dist-server"))] use sccache::config::HTTPUrl; use sccache::dist::{self, SchedulerStatusResult, ServerId}; use sccache::server::ServerInfo; use std::env; use std::io::Write; use std::net::{self, IpAddr, SocketAddr}; use std::path::{Path, PathBuf}; use std::process::{Command, Output, Stdio}; use std::str::{self, FromStr}; use std::thread; use std::time::{Duration, Instant}; use assert_cmd::prelude::*; #[cfg(feature = "dist-server")] use nix::{ sys::{ signal::Signal, wait::{WaitPidFlag, WaitStatus}, }, unistd::{ForkResult, Pid}, }; use predicates::prelude::*; use serde::Serialize; use uuid::Uuid; const CONTAINER_NAME_PREFIX: &str = "sccache_dist_test"; const DIST_IMAGE: &str = "sccache_dist_test_image"; const DIST_DOCKERFILE: &str = include_str!("Dockerfile.sccache-dist"); const DIST_IMAGE_BWRAP_PATH: &str = "/usr/bin/bwrap"; const MAX_STARTUP_WAIT: Duration = Duration::from_secs(5); const DIST_SERVER_TOKEN: &str = "THIS IS THE TEST TOKEN"; const CONFIGS_CONTAINER_PATH: &str = "/sccache-bits"; const BUILD_DIR_CONTAINER_PATH: &str = "/sccache-bits/build-dir"; const SCHEDULER_PORT: u16 = 10500; const SERVER_PORT: u16 = 12345; // arbitrary const TC_CACHE_SIZE: u64 = 1024 * 1024 * 1024; // 1 gig pub fn start_local_daemon(cfg_path: &Path, cached_cfg_path: &Path) { // Don't run this with run() because on Windows `wait_with_output` // will hang because the internal server process is not detached. if !sccache_command() .arg("--start-server") // Uncomment following lines to debug locally. // .env("SCCACHE_LOG", "sccache=trace") // .env("RUST_LOG_STYLE", "never") // .env( // "SCCACHE_ERROR_LOG", // env::temp_dir().join("sccache_local_daemon.txt"), // ) .env("SCCACHE_CONF", cfg_path) .env("SCCACHE_CACHED_CONF", cached_cfg_path) .status() .unwrap() .success() { panic!("Failed to start local daemon"); } } pub fn stop_local_daemon() -> bool { trace!("sccache --stop-server"); sccache_command() .arg("--stop-server") .stdout(Stdio::null()) .stderr(Stdio::null()) .status() .map_or(false, |status| status.success()) } pub fn get_stats(f: F) { sccache_command() .args(["--show-stats", "--stats-format=json"]) .assert() .success() .stdout(predicate::function(move |output: &[u8]| { let s = str::from_utf8(output).expect("Output not UTF-8"); let stats = serde_json::from_str(s).expect("Failed to parse JSON stats"); eprintln!("get server stats: {stats:?}"); f(stats); true })); } #[allow(unused)] pub fn zero_stats() { trace!("sccache --zero-stats"); drop( sccache_command() .arg("--zero-stats") .stdout(Stdio::null()) .stderr(Stdio::null()) .status(), ); } pub fn write_json_cfg(path: &Path, filename: &str, contents: &T) { let p = path.join(filename); let mut f = fs::File::create(p).unwrap(); f.write_all(&serde_json::to_vec(contents).unwrap()).unwrap(); } pub fn write_source(path: &Path, filename: &str, contents: &str) { let p = path.join(filename); let mut f = fs::File::create(p).unwrap(); f.write_all(contents.as_bytes()).unwrap(); } // Prune any environment variables that could adversely affect test execution. pub fn sccache_command() -> Command { use sccache::util::OsStrExt; let mut cmd = Command::new(assert_cmd::cargo::cargo_bin("sccache")); for (var, _) in env::vars_os() { if var.starts_with("SCCACHE_") { cmd.env_remove(var); } } cmd } #[cfg(feature = "dist-server")] pub fn sccache_dist_path() -> PathBuf { assert_cmd::cargo::cargo_bin("sccache-dist") } pub fn sccache_client_cfg( tmpdir: &Path, preprocessor_cache_mode: bool, ) -> sccache::config::FileConfig { let cache_relpath = "client-cache"; let dist_cache_relpath = "client-dist-cache"; fs::create_dir(tmpdir.join(cache_relpath)).unwrap(); fs::create_dir(tmpdir.join(dist_cache_relpath)).unwrap(); let disk_cache = sccache::config::DiskCacheConfig { dir: tmpdir.join(cache_relpath), preprocessor_cache_mode: sccache::config::PreprocessorCacheModeConfig { use_preprocessor_cache_mode: preprocessor_cache_mode, ..Default::default() }, ..Default::default() }; sccache::config::FileConfig { cache: sccache::config::CacheConfigs { azure: None, disk: Some(disk_cache), gcs: None, gha: None, memcached: None, redis: None, s3: None, webdav: None, oss: None, }, dist: sccache::config::DistConfig { auth: Default::default(), // dangerously_insecure scheduler_url: None, cache_dir: tmpdir.join(dist_cache_relpath), toolchains: vec![], toolchain_cache_size: TC_CACHE_SIZE, rewrite_includes_only: false, // TODO }, server_startup_timeout_ms: None, } } #[cfg(feature = "dist-server")] fn sccache_scheduler_cfg() -> sccache::config::scheduler::Config { sccache::config::scheduler::Config { public_addr: SocketAddr::from(([0, 0, 0, 0], SCHEDULER_PORT)), client_auth: sccache::config::scheduler::ClientAuth::Insecure, server_auth: sccache::config::scheduler::ServerAuth::Token { token: DIST_SERVER_TOKEN.to_owned(), }, } } #[cfg(feature = "dist-server")] fn sccache_server_cfg( tmpdir: &Path, scheduler_url: HTTPUrl, server_ip: IpAddr, ) -> sccache::config::server::Config { let relpath = "server-cache"; fs::create_dir(tmpdir.join(relpath)).unwrap(); sccache::config::server::Config { builder: sccache::config::server::BuilderType::Overlay { build_dir: BUILD_DIR_CONTAINER_PATH.into(), bwrap_path: DIST_IMAGE_BWRAP_PATH.into(), }, cache_dir: Path::new(CONFIGS_CONTAINER_PATH).join(relpath), public_addr: SocketAddr::new(server_ip, SERVER_PORT), bind_address: Some(SocketAddr::from(([0, 0, 0, 0], SERVER_PORT))), scheduler_url, scheduler_auth: sccache::config::server::SchedulerAuth::Token { token: DIST_SERVER_TOKEN.to_owned(), }, toolchain_cache_size: TC_CACHE_SIZE, } } // TODO: this is copied from the sccache-dist binary - it's not clear where would be a better place to put the // code so that it can be included here #[cfg(feature = "dist-server")] fn create_server_token(server_id: ServerId, auth_token: &str) -> String { format!("{} {}", server_id.addr(), auth_token) } #[cfg(feature = "dist-server")] pub enum ServerHandle { Container { cid: String, url: HTTPUrl }, Process { pid: Pid, url: HTTPUrl }, } #[cfg(feature = "dist-server")] pub struct DistSystem { sccache_dist: PathBuf, tmpdir: PathBuf, scheduler_name: Option, server_names: Vec, server_pids: Vec, } #[cfg(feature = "dist-server")] impl DistSystem { pub fn new(sccache_dist: &Path, tmpdir: &Path) -> Self { // Make sure the docker image is available, building it if necessary let mut child = Command::new("docker") .args(["build", "-q", "-t", DIST_IMAGE, "-"]) .stdin(Stdio::piped()) .stdout(Stdio::piped()) .stderr(Stdio::piped()) .spawn() .unwrap(); child .stdin .as_mut() .unwrap() .write_all(DIST_DOCKERFILE.as_bytes()) .unwrap(); let output = child.wait_with_output().unwrap(); check_output(&output); let tmpdir = tmpdir.join("distsystem"); fs::create_dir(&tmpdir).unwrap(); Self { sccache_dist: sccache_dist.to_owned(), tmpdir, scheduler_name: None, server_names: vec![], server_pids: vec![], } } pub fn add_scheduler(&mut self) { let scheduler_cfg_relpath = "scheduler-cfg.json"; let scheduler_cfg_path = self.tmpdir.join(scheduler_cfg_relpath); let scheduler_cfg_container_path = Path::new(CONFIGS_CONTAINER_PATH).join(scheduler_cfg_relpath); let scheduler_cfg = sccache_scheduler_cfg(); fs::File::create(scheduler_cfg_path) .unwrap() .write_all(&serde_json::to_vec(&scheduler_cfg).unwrap()) .unwrap(); // Create the scheduler let scheduler_name = make_container_name("scheduler"); let output = Command::new("docker") .args([ "run", "--name", &scheduler_name, "-e", "SCCACHE_NO_DAEMON=1", "-e", "SCCACHE_LOG=debug", "-e", "RUST_BACKTRACE=1", "--network", "host", "-v", &format!("{}:/sccache-dist", self.sccache_dist.to_str().unwrap()), "-v", &format!( "{}:{}", self.tmpdir.to_str().unwrap(), CONFIGS_CONTAINER_PATH ), "-d", DIST_IMAGE, "bash", "-c", &format!( r#" set -o errexit && exec /sccache-dist scheduler --config {cfg} "#, cfg = scheduler_cfg_container_path.to_str().unwrap() ), ]) .output() .unwrap(); self.scheduler_name = Some(scheduler_name); check_output(&output); let scheduler_url = self.scheduler_url(); wait_for_http(scheduler_url, Duration::from_millis(100), MAX_STARTUP_WAIT); wait_for( || { let status = self.scheduler_status(); if matches!( status, SchedulerStatusResult { num_servers: 0, num_cpus: _, in_progress: 0 } ) { Ok(()) } else { Err(format!("{:?}", status)) } }, Duration::from_millis(100), MAX_STARTUP_WAIT, ); } pub fn add_server(&mut self) -> ServerHandle { let server_cfg_relpath = format!("server-cfg-{}.json", self.server_names.len()); let server_cfg_path = self.tmpdir.join(&server_cfg_relpath); let server_cfg_container_path = Path::new(CONFIGS_CONTAINER_PATH).join(server_cfg_relpath); let server_name = make_container_name("server"); let output = Command::new("docker") .args([ "run", // Important for the bubblewrap builder "--privileged", "--name", &server_name, "-e", "SCCACHE_LOG=debug", "-e", "RUST_BACKTRACE=1", "--network", "host", "-v", &format!("{}:/sccache-dist", self.sccache_dist.to_str().unwrap()), "-v", &format!( "{}:{}", self.tmpdir.to_str().unwrap(), CONFIGS_CONTAINER_PATH ), "-d", DIST_IMAGE, "bash", "-c", &format!( r#" set -o errexit && while [ ! -f {cfg}.ready ]; do sleep 0.1; done && exec /sccache-dist server --config {cfg} "#, cfg = server_cfg_container_path.to_str().unwrap() ), ]) .output() .unwrap(); self.server_names.push(server_name.clone()); check_output(&output); let server_ip = IpAddr::from_str("127.0.0.1").unwrap(); let server_cfg = sccache_server_cfg(&self.tmpdir, self.scheduler_url(), server_ip); fs::File::create(&server_cfg_path) .unwrap() .write_all(&serde_json::to_vec(&server_cfg).unwrap()) .unwrap(); fs::File::create(format!("{}.ready", server_cfg_path.to_str().unwrap())).unwrap(); let url = HTTPUrl::from_url( reqwest::Url::parse(&format!("https://{}:{}", server_ip, SERVER_PORT)).unwrap(), ); let handle = ServerHandle::Container { cid: server_name, url, }; self.wait_server_ready(&handle); handle } pub fn add_custom_server( &mut self, handler: S, ) -> ServerHandle { let server_addr = { let ip = IpAddr::from_str("127.0.0.1").unwrap(); let listener = net::TcpListener::bind(SocketAddr::from((ip, 0))).unwrap(); listener.local_addr().unwrap() }; let token = create_server_token(ServerId::new(server_addr), DIST_SERVER_TOKEN); let server = dist::http::Server::new( server_addr, Some(SocketAddr::from(([0, 0, 0, 0], server_addr.port()))), self.scheduler_url().to_url(), token, handler, ) .unwrap(); let pid = match unsafe { nix::unistd::fork() }.unwrap() { ForkResult::Parent { child } => { self.server_pids.push(child); child } ForkResult::Child => { env::set_var("SCCACHE_LOG", "sccache=trace"); env_logger::try_init().unwrap(); server.start().unwrap(); unreachable!(); } }; let url = HTTPUrl::from_url(reqwest::Url::parse(&format!("https://{}", server_addr)).unwrap()); let handle = ServerHandle::Process { pid, url }; self.wait_server_ready(&handle); handle } pub fn restart_server(&mut self, handle: &ServerHandle) { match handle { ServerHandle::Container { cid, url: _ } => { let output = Command::new("docker") .args(["restart", cid]) .output() .unwrap(); check_output(&output); } ServerHandle::Process { pid: _, url: _ } => { // TODO: pretty easy, just no need yet panic!("restart not yet implemented for pids") } } self.wait_server_ready(handle) } pub fn wait_server_ready(&mut self, handle: &ServerHandle) { let url = match handle { ServerHandle::Container { cid: _, url } | ServerHandle::Process { pid: _, url } => { url.clone() } }; wait_for_http(url, Duration::from_millis(100), MAX_STARTUP_WAIT); wait_for( || { let status = self.scheduler_status(); if matches!( status, SchedulerStatusResult { num_servers: 1, num_cpus: _, in_progress: 0 } ) { Ok(()) } else { Err(format!("{:?}", status)) } }, Duration::from_millis(100), MAX_STARTUP_WAIT, ); } pub fn scheduler_url(&self) -> HTTPUrl { let url = format!("http://127.0.0.1:{}", SCHEDULER_PORT); HTTPUrl::from_url(reqwest::Url::parse(&url).unwrap()) } fn scheduler_status(&self) -> SchedulerStatusResult { let res = reqwest::blocking::get(dist::http::urls::scheduler_status( &self.scheduler_url().to_url(), )) .unwrap(); assert!(res.status().is_success()); bincode::deserialize_from(res).unwrap() } } // If you want containers to hang around (e.g. for debugging), comment out the "rm -f" lines #[cfg(feature = "dist-server")] impl Drop for DistSystem { fn drop(&mut self) { let mut did_err = false; // Panicking halfway through drop would either abort (if it's a double panic) or leave us with // resources that aren't yet cleaned up. Instead, do as much as possible then decide what to do // at the end - panic (if not already doing so) or let the panic continue macro_rules! droperr { ($e:expr) => { match $e { Ok(()) => (), Err(e) => { did_err = true; eprintln!("Error with {}: {}", stringify!($e), e) } } }; } let mut logs = vec![]; let mut outputs = vec![]; let mut exits = vec![]; if let Some(scheduler_name) = self.scheduler_name.as_ref() { droperr!(Command::new("docker") .args(["logs", scheduler_name]) .output() .map(|o| logs.push((scheduler_name, o)))); droperr!(Command::new("docker") .args(["kill", scheduler_name]) .output() .map(|o| outputs.push((scheduler_name, o)))); droperr!(Command::new("docker") .args(["rm", "-f", scheduler_name]) .output() .map(|o| outputs.push((scheduler_name, o)))); } for server_name in self.server_names.iter() { droperr!(Command::new("docker") .args(["logs", server_name]) .output() .map(|o| logs.push((server_name, o)))); droperr!(Command::new("docker") .args(["kill", server_name]) .output() .map(|o| outputs.push((server_name, o)))); droperr!(Command::new("docker") .args(["rm", "-f", server_name]) .output() .map(|o| outputs.push((server_name, o)))); } for &pid in self.server_pids.iter() { droperr!(nix::sys::signal::kill(pid, Signal::SIGINT)); thread::sleep(Duration::from_millis(100)); let mut killagain = true; // Default to trying to kill again, e.g. if there was an error waiting on the pid droperr!( nix::sys::wait::waitpid(pid, Some(WaitPidFlag::WNOHANG)).map(|ws| { if ws != WaitStatus::StillAlive { killagain = false; exits.push(ws) } }) ); if killagain { eprintln!("SIGINT didn't kill process, trying SIGKILL"); droperr!(nix::sys::signal::kill(pid, Signal::SIGKILL)); droperr!(nix::sys::wait::waitpid(pid, Some(WaitPidFlag::WNOHANG)) .map_err(|e| e.to_string()) .and_then(|ws| if ws == WaitStatus::StillAlive { Err("process alive after sigkill".to_owned()) } else { exits.push(ws); Ok(()) })); } } for ( container, Output { status, stdout, stderr, }, ) in logs { println!( "LOGS == ({}) ==\n> {} <:\n## STDOUT\n{}\n\n## STDERR\n{}\n====", status, container, String::from_utf8_lossy(&stdout), String::from_utf8_lossy(&stderr) ); } for ( container, Output { status, stdout, stderr, }, ) in outputs { println!( "OUTPUTS == ({}) ==\n> {} <:\n## STDOUT\n{}\n\n## STDERR\n{}\n====", status, container, String::from_utf8_lossy(&stdout), String::from_utf8_lossy(&stderr) ); } for exit in exits { println!("EXIT: {:?}", exit) } if did_err && !thread::panicking() { panic!("Encountered failures during dist system teardown") } } } fn make_container_name(tag: &str) -> String { format!( "{}_{}_{}", CONTAINER_NAME_PREFIX, tag, Uuid::new_v4().hyphenated() ) } fn check_output(output: &Output) { if !output.status.success() { println!("{}\n\n[BEGIN STDOUT]\n===========\n{}\n===========\n[FIN STDOUT]\n\n[BEGIN STDERR]\n===========\n{}\n===========\n[FIN STDERR]\n\n", output.status, String::from_utf8_lossy(&output.stdout), String::from_utf8_lossy(&output.stderr)); panic!() } } #[cfg(feature = "dist-server")] fn wait_for_http(url: HTTPUrl, interval: Duration, max_wait: Duration) { // TODO: after upgrading to reqwest >= 0.9, use 'danger_accept_invalid_certs' and stick with that rather than tcp wait_for( || { let url = url.to_url(); let url = url.socket_addrs(|| None).unwrap(); match net::TcpStream::connect(url.as_slice()) { Ok(_) => Ok(()), Err(e) => Err(e.to_string()), } }, interval, max_wait, ) } fn wait_for Result<(), String>>(f: F, interval: Duration, max_wait: Duration) { let start = Instant::now(); let mut lasterr; loop { match f() { Ok(()) => return, Err(e) => lasterr = e, } if start.elapsed() > max_wait { break; } thread::sleep(interval) } panic!("wait timed out, last error result: {}", lasterr) } mozilla-sccache-40c3d6b/tests/helpers/000077500000000000000000000000001475712407500177775ustar00rootroot00000000000000mozilla-sccache-40c3d6b/tests/helpers/mod.rs000066400000000000000000000110751475712407500211300ustar00rootroot00000000000000use anyhow::{Context, Result}; use assert_cmd::assert::OutputAssertExt; use chrono::Local; use fs_err as fs; use log::trace; use once_cell::sync::Lazy; use std::convert::Infallible; use std::ffi::OsString; use std::io::Write; use std::path::{Path, PathBuf}; use std::process::{Command, Stdio}; pub static CRATE_DIR: Lazy = Lazy::new(|| Path::new(file!()).parent().unwrap().join("../test-crate")); pub static CARGO: Lazy = Lazy::new(|| std::env::var_os("CARGO").unwrap()); pub static SCCACHE_BIN: Lazy = Lazy::new(|| assert_cmd::cargo::cargo_bin("sccache")); /// Ensures the logger is only initialized once. Panics if initialization fails. static LOGGER: Lazy> = Lazy::new(|| { env_logger::Builder::new() .format(|f, record| { writeln!( f, "{} [{}] - {}", Local::now().format("%Y-%m-%dT%H:%M:%S%.3f"), record.level(), record.args() ) }) .parse_env("RUST_LOG") .init(); Ok(()) }); /// Used as a test setup fixture. The drop implementation cleans up after a _successful_ test. /// We catch the panic to ensure that the drop runs and the TempDir is cleaned up. pub struct SccacheTest<'a> { /// Tempdir used for Sccache cache and cargo output. It is kept in the struct only to have the /// destructor run when SccacheTest goes out of scope, but is never used otherwise. #[allow(dead_code)] pub tempdir: tempfile::TempDir, pub env: Vec<(&'a str, std::ffi::OsString)>, } impl SccacheTest<'_> { pub fn new(additional_envs: Option<&[(&'static str, std::ffi::OsString)]>) -> Result { assert!(LOGGER.is_ok()); // Create a temp directory to use for the disk cache. let tempdir = tempfile::Builder::new() .prefix("sccache_test_rust_cargo") .tempdir() .context("Failed to create tempdir")?; let cache_dir = tempdir.path().join("cache"); fs::create_dir(&cache_dir)?; let cargo_dir = tempdir.path().join("cargo"); fs::create_dir(&cargo_dir)?; // Ensure there's no existing sccache server running. stop_sccache()?; trace!("sccache --start-server"); Command::new(SCCACHE_BIN.as_os_str()) .arg("--start-server") .env("SCCACHE_DIR", &cache_dir) .assert() .try_success() .context("Failed to start sccache server")?; let mut env = vec![ ("CARGO_TARGET_DIR", cargo_dir.as_os_str().to_owned()), ("RUSTC_WRAPPER", SCCACHE_BIN.as_os_str().to_owned()), // Explicitly disable incremental compilation because sccache is unable to cache it at // the time of writing. ("CARGO_INCREMENTAL", OsString::from("0")), ("TEST_ENV_VAR", OsString::from("1")), ]; if let Some(vec) = additional_envs { env.extend_from_slice(vec); } Ok(SccacheTest { tempdir, env: env.to_owned(), }) } /// Show the statistics for sccache. This will be called at the end of a test and making this /// an associated function will ensure that the struct lives until the end of the test. pub fn show_stats(&self) -> assert_cmd::assert::AssertResult { trace!("sccache --show-stats"); Command::new(SCCACHE_BIN.as_os_str()) .args(["--show-stats", "--stats-format=json"]) .assert() .try_success() } pub fn show_text_stats(&self, advanced: bool) -> assert_cmd::assert::AssertResult { let cmd = if advanced { "--show-adv-stats" } else { "--show-stats" }; trace!("sccache {cmd}"); Command::new(SCCACHE_BIN.as_os_str()) .args([cmd, "--stats-format=text"]) .assert() .try_success() } } impl Drop for SccacheTest<'_> { fn drop(&mut self) { stop_sccache().expect("Stopping Sccache server failed"); } } pub fn stop_sccache() -> Result<()> { trace!("sccache --stop-server"); Command::new(SCCACHE_BIN.as_os_str()) .arg("--stop-server") .stdout(Stdio::null()) .stderr(Stdio::null()) .status() .context("Failed to stop sccache server")?; Ok(()) } pub fn cargo_clean(test_info: &SccacheTest) -> Result<()> { Command::new(CARGO.as_os_str()) .args(["clean"]) .envs(test_info.env.iter().cloned()) .current_dir(CRATE_DIR.as_os_str()) .assert() .try_success()?; Ok(()) } mozilla-sccache-40c3d6b/tests/htpasswd000066400000000000000000000000161475712407500201120ustar00rootroot00000000000000bar:{PLAIN}bazmozilla-sccache-40c3d6b/tests/msvc-preprocessing/000077500000000000000000000000001475712407500221665ustar00rootroot00000000000000mozilla-sccache-40c3d6b/tests/msvc-preprocessing/args.rsp000066400000000000000000000000361475712407500236470ustar00rootroot00000000000000foo.cpp -Fofoo.o -W4 -WX -Wallmozilla-sccache-40c3d6b/tests/msvc-preprocessing/foo.cpp000066400000000000000000000014011475712407500234510ustar00rootroot00000000000000// This tests sccache's ability to handle a known bug in cl.exe // More information: https://github.com/mozilla/sccache/issues/1725 __pragma(warning(push)) __pragma(warning(disable: 4668)) // cl.exe during compilation will correctly ignore 4668 here (undefined define) // during preprocessing, it will not ignore 4668 // sccache must explicitly ignore this warning when preprocessing (to figure out the cache key) #if UNDEFINED_MACRO_TRIGGERING_C4668 #error "This error should be unreachable" #endif __pragma(warning(pop)) // Minimal reproducible example for errors from user code // More information: https://github.com/mozilla/sccache/issues/2250 #pragma warning(disable : 4002) #define F(x, y) int main() { F(2, , , , , , 3, , , , , , ) // C4002 return 0; } mozilla-sccache-40c3d6b/tests/msvc/000077500000000000000000000000001475712407500173055ustar00rootroot00000000000000mozilla-sccache-40c3d6b/tests/msvc/args.rsp000066400000000000000000000000571475712407500207710ustar00rootroot00000000000000foo.cpp -Fofoo.o /sourceDependencies foo.o.jsonmozilla-sccache-40c3d6b/tests/msvc/foo.cpp000066400000000000000000000001211475712407500205660ustar00rootroot00000000000000#include int main() { std::cout << "Hello World!\n"; return 0; } mozilla-sccache-40c3d6b/tests/nginx_http_cache.conf000066400000000000000000000011751475712407500225150ustar00rootroot00000000000000error_log /tmp/error.log; pid /tmp/nginx.pid; load_module /usr/lib/nginx/modules/ngx_http_dav_ext_module.so; events { worker_connections 1024; } http { server { listen 127.0.0.1:8080; server_name localhost; access_log /tmp/access.log; root /tmp/static; location / { client_body_temp_path /tmp; log_not_found off; dav_methods PUT DELETE MKCOL; dav_ext_methods PROPFIND OPTIONS LOCK UNLOCK; create_full_put_path on; client_max_body_size 1024M; auth_basic "Authentication required"; auth_basic_user_file /tmp/htpasswd; } } } mozilla-sccache-40c3d6b/tests/oauth.rs000077500000000000000000000231631475712407500200330ustar00rootroot00000000000000#![deny(rust_2018_idioms)] #![cfg(feature = "dist-client")] use fs_err as fs; use std::io::{self, Read, Write}; use std::net::TcpStream; use std::path::Path; use std::process::{Command, Output, Stdio}; use std::thread; use std::time::{Duration, Instant}; use thirtyfour_sync::prelude::*; const LOCAL_AUTH_BASE_URL: &str = "http://localhost:12731/"; const USERNAME_SELECTOR: &str = ".auth0-lock-input-email .auth0-lock-input"; const PASSWORD_SELECTOR: &str = ".auth0-lock-input-password .auth0-lock-input"; const LOGIN_SELECTOR: &str = ".auth0-lock-submit"; const BROWSER_RETRY_WAIT: Duration = Duration::from_secs(1); const BROWSER_MAX_WAIT: Duration = Duration::from_secs(10); // The configuration below is for the sccache-test tenant under aidanhs' auth0 account. There // is one user, one api and two applications. There is a rule ensuring that oauth access is // never granted to the built-in auth0 tenant management API (though the worst that could happen // is tests start failing because someone deliberately messes up the configuration). const TEST_USERNAME: &str = "test@example.com"; const TEST_PASSWORD: &str = "test1234"; fn generate_code_grant_pkce_auth_config() -> sccache::config::DistAuth { sccache::config::DistAuth::Oauth2CodeGrantPKCE { client_id: "Xmbl6zRW1o1tJ5LQOz0p65NwY47aMO7A".to_owned(), auth_url: "https://sccache-test.auth0.com/authorize?audience=https://sccache-dist-test-api/" .to_owned(), token_url: "https://sccache-test.auth0.com/oauth/token".to_owned(), } } fn generate_implicit_auth_config() -> sccache::config::DistAuth { sccache::config::DistAuth::Oauth2Implicit { client_id: "TTborSAyjBnSi1W11201ZzNu9gSg63bq".to_owned(), auth_url: "https://sccache-test.auth0.com/authorize?audience=https://sccache-dist-test-api/" .to_owned(), } } fn config_with_dist_auth( tmpdir: &Path, auth_config: sccache::config::DistAuth, ) -> sccache::config::FileConfig { sccache::config::FileConfig { cache: Default::default(), dist: sccache::config::DistConfig { auth: auth_config, scheduler_url: None, cache_dir: tmpdir.join("unused-cache"), toolchains: vec![], toolchain_cache_size: 0, rewrite_includes_only: true, }, server_startup_timeout_ms: None, } } fn sccache_command() -> Command { Command::new(assert_cmd::cargo::cargo_bin("sccache")) } fn retry Option, T>(interval: Duration, until: Duration, mut f: F) -> Option { let start = Instant::now(); while start.elapsed() < until { if let Some(res) = f() { return Some(res); } else { thread::sleep(interval) } } None } trait DriverExt { fn wait_for_element(&self, selector: &str) -> Result<(), ()>; fn wait_on_url bool>(&self, condition: F) -> Result<(), ()>; } impl DriverExt for WebDriver { fn wait_for_element(&self, selector: &str) -> Result<(), ()> { retry(BROWSER_RETRY_WAIT, BROWSER_MAX_WAIT, || { self.find_element(By::Css(selector)).ok() }) .map(|_| ()) .ok_or(()) } fn wait_on_url bool>(&self, condition: F) -> Result<(), ()> { let start = Instant::now(); while start.elapsed() < BROWSER_MAX_WAIT { match self.current_url() { Ok(ref url) if condition(url) => return Ok(()), Ok(_) | Err(_) => thread::sleep(BROWSER_RETRY_WAIT), } } Err(()) } } // With reference to https://github.com/mozilla-iam/cis_tests/blob/ef7740b/pages/auth0.py fn auth0_login(driver: &WebDriver, email: &str, password: &str) { driver.wait_for_element(USERNAME_SELECTOR).unwrap(); thread::sleep(Duration::from_secs(1)); // Give the element time to get ready driver .find_element(By::Css(USERNAME_SELECTOR)) .unwrap() .send_keys(email) .unwrap(); driver .find_element(By::Css(PASSWORD_SELECTOR)) .unwrap() .send_keys(password) .unwrap(); driver .find_element(By::Css(LOGIN_SELECTOR)) .unwrap() .click() .unwrap(); } struct SeleniumContainer { cid: String, } fn check_output(output: &Output) { if !output.status.success() { println!( "===========\n{}\n==========\n\n\n\n=========\n{}\n===============\n\n\n", String::from_utf8_lossy(&output.stdout), String::from_utf8_lossy(&output.stderr) ); panic!() } } impl SeleniumContainer { fn new() -> Self { // https://github.com/SeleniumHQ/docker-selenium#running-the-images let cid = { // It's important to use net=host so that selenium can see pages hosted on localhost let args = &[ "run", "--rm", "-d", "-v", "/dev/shm:/dev/shm", "--net", "host", "-e", "SE_OPTS=-debug", "selenium/standalone-chrome-debug:3.14.0", ]; let output = Command::new("docker").args(args).output().unwrap(); check_output(&output); let stdout = String::from_utf8(output.stdout).unwrap(); stdout.trim().to_owned() }; SeleniumContainer { cid } } } impl Drop for SeleniumContainer { fn drop(&mut self) { let Output { stdout, stderr, .. } = Command::new("docker") .args(["logs", &self.cid]) .output() .unwrap(); let output = Command::new("docker") .args(["kill", &self.cid]) .output() .unwrap(); println!( "====\n> selenium container <:\n## STDOUT\n{}\n\n## STDERR\n{}\n====", String::from_utf8_lossy(&stdout), String::from_utf8_lossy(&stderr) ); check_output(&output) } } #[test] #[cfg_attr( not(all(target_os = "linux", target_arch = "x86_64", feature = "dist-tests")), ignore )] fn test_auth() { // Make sure the client auth port isn't in use, as sccache will gracefully fall back let client_auth_port = sccache::dist::client_auth::VALID_PORTS[0]; assert_eq!( TcpStream::connect(("localhost", client_auth_port)) .unwrap_err() .kind(), io::ErrorKind::ConnectionRefused ); // NOTE: if you want to debug selenium, you can comment out the three lines below and just use a local // selenium instance (download the standalone server and the chrome driver, running the former and putting the // latter on the PATH). Alternatively, because we use the '-debug' image you can use vnc with the password 'secret'. assert_eq!( TcpStream::connect(("localhost", 4444)).unwrap_err().kind(), io::ErrorKind::ConnectionRefused ); let _selenium = SeleniumContainer::new(); thread::sleep(Duration::from_secs(3)); // Code grant PKCE println!("Testing code grant pkce auth"); test_auth_with_config(generate_code_grant_pkce_auth_config()); // Implicit println!("Testing implicit auth"); test_auth_with_config(generate_implicit_auth_config()); } fn test_auth_with_config(dist_auth: sccache::config::DistAuth) { let conf_dir = tempfile::Builder::new() .prefix("sccache-test-conf") .tempdir() .unwrap(); let sccache_config = config_with_dist_auth(conf_dir.path(), dist_auth); let sccache_config_path = conf_dir.path().join("sccache-config.json"); fs::File::create(&sccache_config_path) .unwrap() .write_all(&serde_json::to_vec(&sccache_config).unwrap()) .unwrap(); let sccache_cached_config_path = conf_dir.path().join("sccache-cached-config"); let envs = vec![ ("SCCACHE_LOG", "sccache=trace".into()), ("SCCACHE_CONF", sccache_config_path.into_os_string()), ( "SCCACHE_CACHED_CONF", sccache_cached_config_path.clone().into_os_string(), ), ]; println!("Starting sccache --dist-auth"); let mut sccache_process = sccache_command() .arg("--dist-auth") .envs(envs) .stdin(Stdio::null()) .spawn() .unwrap(); thread::sleep(Duration::from_secs(1)); // let the http server start up println!("Beginning in-browser auth"); login(); let status = retry(Duration::from_secs(1), Duration::from_secs(10), || { sccache_process.try_wait().unwrap() }); match status { Some(s) => assert!(s.success()), None => { sccache_process.kill().unwrap(); panic!("Waited too long for process to exit") } } println!("Validating cached config"); let mut cached_config_string = String::new(); fs::File::open(sccache_cached_config_path) .unwrap() .read_to_string(&mut cached_config_string) .unwrap(); let cached_config: sccache::config::CachedFileConfig = toml::from_str(&cached_config_string).unwrap(); assert_eq!(cached_config.dist.auth_tokens.len(), 1); } fn login() { let caps = DesiredCapabilities::chrome(); let driver = WebDriver::new("http://localhost:4444/wd/hub", &caps).unwrap(); println!("Started browser session"); driver.get(LOCAL_AUTH_BASE_URL).unwrap(); driver .wait_on_url(|url| url != LOCAL_AUTH_BASE_URL) .unwrap(); auth0_login(&driver, TEST_USERNAME, TEST_PASSWORD); driver .wait_on_url(|url| url.starts_with(LOCAL_AUTH_BASE_URL)) .unwrap(); // Let any final JS complete thread::sleep(Duration::from_secs(1)); let _ = driver.quit(); } mozilla-sccache-40c3d6b/tests/sccache_args.rs000066400000000000000000000041271475712407500213140ustar00rootroot00000000000000//! Tests for sccache args. //! //! Any copyright is dedicated to the Public Domain. //! http://creativecommons.org/publicdomain/zero/1.0/ pub mod helpers; use anyhow::Result; use assert_cmd::prelude::*; use helpers::{stop_sccache, SCCACHE_BIN}; use predicates::prelude::*; use serial_test::serial; use std::process::Command; #[macro_use] extern crate log; #[test] #[serial] #[cfg(feature = "gcs")] fn test_gcp_arg_check() -> Result<()> { trace!("sccache with log"); stop_sccache()?; let mut cmd = Command::new(SCCACHE_BIN.as_os_str()); cmd.arg("--start-server") .env("SCCACHE_LOG", "debug") .env("SCCACHE_GCS_KEY_PATH", "foo.json"); cmd.assert().failure().stderr(predicate::str::contains( "If setting GCS credentials, SCCACHE_GCS_BUCKET", )); stop_sccache()?; let mut cmd = Command::new(SCCACHE_BIN.as_os_str()); cmd.arg("--start-server") .env("SCCACHE_LOG", "debug") .env("SCCACHE_GCS_OAUTH_URL", "http://127.0.0.1"); cmd.assert().failure().stderr(predicate::str::contains( "If setting GCS credentials, SCCACHE_GCS_BUCKET", )); stop_sccache()?; let mut cmd = Command::new(SCCACHE_BIN.as_os_str()); cmd.arg("--start-server") .env("SCCACHE_LOG", "debug") .env("SCCACHE_GCS_BUCKET", "b") .env("SCCACHE_GCS_CREDENTIALS_URL", "not_valid_url//127.0.0.1") .env("SCCACHE_GCS_KEY_PATH", "foo.json"); // This is just a warning cmd.assert() .failure() .stderr(predicate::str::contains("gcs credential url is invalid")); Ok(()) } #[test] #[serial] #[cfg(feature = "s3")] fn test_s3_invalid_args() -> Result<()> { stop_sccache()?; let mut cmd = Command::new(SCCACHE_BIN.as_os_str()); cmd.arg("--start-server") .env("SCCACHE_LOG", "debug") .env("SCCACHE_BUCKET", "test") .env("SCCACHE_REGION", "us-east-1") .env("AWS_ACCESS_KEY_ID", "invalid_ak") .env("AWS_SECRET_ACCESS_KEY", "invalid_sk"); cmd.assert() .failure() .stderr(predicate::str::contains("cache storage failed to read")); Ok(()) } mozilla-sccache-40c3d6b/tests/sccache_cargo.rs000066400000000000000000000301621475712407500214510ustar00rootroot00000000000000//! System tests for compiling Rust code with cargo. //! //! Any copyright is dedicated to the Public Domain. //! http://creativecommons.org/publicdomain/zero/1.0/ pub mod helpers; use anyhow::{Context, Result}; use helpers::{cargo_clean, stop_sccache, CARGO, CRATE_DIR}; use assert_cmd::prelude::*; use fs_err as fs; use helpers::{SccacheTest, SCCACHE_BIN}; use predicates::prelude::*; use serial_test::serial; use std::path::Path; use std::process::Command; #[macro_use] extern crate log; #[test] #[serial] fn test_rust_cargo_check() -> Result<()> { test_rust_cargo_cmd("check", SccacheTest::new(None)?) } #[test] #[serial] fn test_rust_cargo_check_readonly() -> Result<()> { test_rust_cargo_cmd_readonly("check", SccacheTest::new(None)?) } #[test] #[serial] fn test_rust_cargo_build() -> Result<()> { test_rust_cargo_cmd("build", SccacheTest::new(None)?) } #[test] #[serial] fn test_rust_cargo_build_readonly() -> Result<()> { test_rust_cargo_cmd_readonly("build", SccacheTest::new(None)?) } #[test] #[serial] #[cfg(unix)] fn test_run_log_no_perm() -> Result<()> { trace!("sccache with log"); stop_sccache()?; let mut cmd = Command::new(SCCACHE_BIN.as_os_str()); cmd.arg("gcc") .env("SCCACHE_ERROR_LOG", "/no-perm.log") // Should not work .env("SCCACHE_LOG", "debug"); cmd.assert().failure().stderr(predicate::str::contains( "Cannot open/write log file '/no-perm.log'", )); Ok(()) } #[test] #[serial] fn test_run_log() -> Result<()> { trace!("sccache with log"); stop_sccache()?; let tempdir = tempfile::Builder::new() .prefix("sccache_test_rust_cargo") .tempdir() .context("Failed to create tempdir")?; let tmppath = tempdir.path().join("perm.log"); let mut cmd = Command::new(SCCACHE_BIN.as_os_str()); cmd.arg("--start-server") .env("SCCACHE_ERROR_LOG", &tmppath) // Should not work .env("SCCACHE_LOG", "debug"); cmd.assert().success(); stop_sccache()?; assert!(Path::new(&tmppath).is_file()); Ok(()) } /// This test checks that changing an environment variable reference by env! is detected by /// sccache, causes a rebuild and is correctly printed to stdout. #[test] #[serial] fn test_rust_cargo_run_with_env_dep_parsing() -> Result<()> { test_rust_cargo_env_dep(SccacheTest::new(None)?) } #[cfg(feature = "unstable")] #[test] #[serial] fn test_rust_cargo_check_nightly() -> Result<()> { use std::ffi::OsString; test_rust_cargo_cmd( "check", SccacheTest::new(Some(&[( "RUSTFLAGS", OsString::from("-Cprofile-generate=."), )]))?, ) } #[cfg(feature = "unstable")] #[test] #[serial] fn test_rust_cargo_check_nightly_readonly() -> Result<()> { use std::ffi::OsString; test_rust_cargo_cmd_readonly( "check", SccacheTest::new(Some(&[( "RUSTFLAGS", OsString::from("-Cprofile-generate=."), )]))?, ) } #[cfg(feature = "unstable")] #[test] #[serial] fn test_rust_cargo_build_nightly() -> Result<()> { use std::ffi::OsString; test_rust_cargo_cmd( "build", SccacheTest::new(Some(&[( "RUSTFLAGS", OsString::from("-Cprofile-generate=."), )]))?, ) } #[cfg(feature = "unstable")] #[test] #[serial] fn test_rust_cargo_build_nightly_readonly() -> Result<()> { use std::ffi::OsString; test_rust_cargo_cmd_readonly( "build", SccacheTest::new(Some(&[( "RUSTFLAGS", OsString::from("-Cprofile-generate=."), )]))?, ) } /// Test that building a simple Rust crate with cargo using sccache results in a cache hit /// when built a second time and a cache miss, when the environment variable referenced via /// env! is changed. fn test_rust_cargo_cmd(cmd: &str, test_info: SccacheTest) -> Result<()> { // `cargo clean` first, just to be sure there's no leftover build objects. cargo_clean(&test_info)?; // Now build the crate with cargo. Command::new(CARGO.as_os_str()) .args([cmd, "--color=never"]) .envs(test_info.env.iter().cloned()) .current_dir(CRATE_DIR.as_os_str()) .assert() .try_stderr(predicates::str::contains("\x1b[").from_utf8().not())? .try_success()?; // Clean it so we can build it again. cargo_clean(&test_info)?; Command::new(CARGO.as_os_str()) .args([cmd, "--color=always"]) .envs(test_info.env.iter().cloned()) .current_dir(CRATE_DIR.as_os_str()) .assert() .try_stderr(predicates::str::contains("\x1b[").from_utf8())? .try_success()?; test_info .show_stats()? .try_stdout( predicates::str::contains( r#""cache_hits":{"counts":{"Rust":2},"adv_counts":{"rust":2}}"#, ) .from_utf8(), )? .try_success()?; Ok(()) } fn restart_sccache( test_info: &SccacheTest, additional_envs: Option>, ) -> Result<()> { let cache_dir = test_info.tempdir.path().join("cache"); stop_sccache()?; trace!("sccache --start-server"); let mut cmd = Command::new(SCCACHE_BIN.as_os_str()); cmd.arg("--start-server"); cmd.env("SCCACHE_DIR", &cache_dir); if let Some(additional_envs) = additional_envs { cmd.envs(additional_envs); } cmd.assert() .try_success() .context("Failed to start sccache server")?; Ok(()) } /// Test that building a simple Rust crate with cargo using sccache results in the following behaviors (for three different runs): /// - In read-only mode, a cache miss. /// - In read-write mode, a cache miss. /// - In read-only mode, a cache hit. /// /// The environment variable for read/write mode is added by this function. fn test_rust_cargo_cmd_readonly(cmd: &str, test_info: SccacheTest) -> Result<()> { // `cargo clean` first, just to be sure there's no leftover build objects. cargo_clean(&test_info)?; // The cache must be put into read-only mode, and that can only be configured // when the server starts up, so we need to restart it. restart_sccache( &test_info, Some(vec![("SCCACHE_LOCAL_RW_MODE".into(), "READ_ONLY".into())]), )?; // Now build the crate with cargo. Command::new(CARGO.as_os_str()) .args([cmd, "--color=never"]) .envs(test_info.env.iter().cloned()) .current_dir(CRATE_DIR.as_os_str()) .assert() .try_stderr(predicates::str::contains("\x1b[").from_utf8().not())? .try_success()?; // Stats reset on server restart, so this needs to be run for each build. test_info .show_stats()? .try_stdout( predicates::str::contains(r#""cache_hits":{"counts":{},"adv_counts":{}}"#).from_utf8(), )? .try_stdout( predicates::str::contains( r#""cache_misses":{"counts":{"Rust":2},"adv_counts":{"rust":2}}"#, ) .from_utf8(), )? .try_success()?; cargo_clean(&test_info)?; restart_sccache( &test_info, Some(vec![("SCCACHE_LOCAL_RW_MODE".into(), "READ_WRITE".into())]), )?; Command::new(CARGO.as_os_str()) .args([cmd, "--color=always"]) .envs(test_info.env.iter().cloned()) .current_dir(CRATE_DIR.as_os_str()) .assert() .try_stderr(predicates::str::contains("\x1b[").from_utf8())? .try_success()?; test_info .show_stats()? .try_stdout( predicates::str::contains(r#""cache_hits":{"counts":{},"adv_counts":{}}"#).from_utf8(), )? .try_stdout( predicates::str::contains( r#""cache_misses":{"counts":{"Rust":2},"adv_counts":{"rust":2}}"#, ) .from_utf8(), )? .try_success()?; cargo_clean(&test_info)?; restart_sccache( &test_info, Some(vec![("SCCACHE_LOCAL_RW_MODE".into(), "READ_ONLY".into())]), )?; Command::new(CARGO.as_os_str()) .args([cmd, "--color=always"]) .envs(test_info.env.iter().cloned()) .current_dir(CRATE_DIR.as_os_str()) .assert() .try_stderr(predicates::str::contains("\x1b[").from_utf8())? .try_success()?; test_info .show_stats()? .try_stdout( predicates::str::contains( r#""cache_hits":{"counts":{"Rust":2},"adv_counts":{"rust":2}}"#, ) .from_utf8(), )? .try_stdout( predicates::str::contains(r#""cache_misses":{"counts":{},"adv_counts":{}}"#) .from_utf8(), )? .try_success()?; Ok(()) } fn test_rust_cargo_env_dep(test_info: SccacheTest) -> Result<()> { cargo_clean(&test_info)?; // Now build the crate with cargo. Command::new(CARGO.as_os_str()) .args(["run", "--color=never"]) .envs(test_info.env.iter().cloned()) .current_dir(CRATE_DIR.as_os_str()) .assert() .try_stderr(predicates::str::contains("\x1b[").from_utf8().not())? .try_stdout(predicates::str::contains("Env var: 1"))? .try_success()?; // Clean it so we can build it again. cargo_clean(&test_info)?; Command::new(CARGO.as_os_str()) .args(["run", "--color=always"]) .envs(test_info.env.iter().cloned()) .env("TEST_ENV_VAR", "OTHER_VALUE") .current_dir(CRATE_DIR.as_os_str()) .assert() .try_stderr(predicates::str::contains("\x1b[").from_utf8())? .try_stdout(predicates::str::contains("Env var: OTHER_VALUE"))? .try_success()?; // Now get the stats and ensure that we had one cache hit for the second build. // The test crate has one dependency (itoa) so there are two separate compilations, but only // itoa should be cached (due to the changed environment variable). test_info .show_stats()? .try_stdout(predicates::str::contains(r#""cache_hits":{"counts":{"Rust":1}"#).from_utf8())? .try_success()?; drop(test_info); Ok(()) } /// Test that building a simple Rust crate with cargo using sccache in read-only mode with an empty cache results in /// a cache miss that is produced by the readonly storage wrapper (and does not attempt to write to the underlying cache). #[test] #[serial] fn test_rust_cargo_cmd_readonly_preemtive_block() -> Result<()> { let test_info = SccacheTest::new(None)?; // `cargo clean` first, just to be sure there's no leftover build objects. cargo_clean(&test_info)?; let sccache_log = test_info.tempdir.path().join("sccache.log"); stop_sccache()?; restart_sccache( &test_info, Some(vec![ ("SCCACHE_LOCAL_RW_MODE".into(), "READ_ONLY".into()), ("SCCACHE_LOG".into(), "trace".into()), ( "SCCACHE_ERROR_LOG".into(), sccache_log.to_str().unwrap().into(), ), ]), )?; // Now build the crate with cargo. // Assert that our cache miss is due to the readonly storage wrapper, not due to the underlying disk cache. Command::new(CARGO.as_os_str()) .args(["build", "--color=never"]) .envs(test_info.env.iter().cloned()) .current_dir(CRATE_DIR.as_os_str()) .assert() .try_stderr(predicates::str::contains("\x1b[").from_utf8().not())? .try_success()?; let log_contents = fs::read_to_string(sccache_log)?; assert!(predicates::str::contains("server has setup with ReadOnly").eval(log_contents.as_str())); assert!(predicates::str::contains( "Error executing cache write: Cannot write to read-only storage" ) .eval(log_contents.as_str())); assert!(predicates::str::contains("DiskCache::finish_put") .not() .eval(log_contents.as_str())); // Stats reset on server restart, so this needs to be run for each build. test_info .show_stats()? .try_stdout( predicates::str::contains(r#""cache_hits":{"counts":{},"adv_counts":{}}"#).from_utf8(), )? .try_stdout( predicates::str::contains( r#""cache_misses":{"counts":{"Rust":2},"adv_counts":{"rust":2}}"#, ) .from_utf8(), )? .try_success()?; Ok(()) } mozilla-sccache-40c3d6b/tests/sccache_rustc.rs000066400000000000000000000076061475712407500215250ustar00rootroot00000000000000#![cfg(unix)] use assert_cmd::Command; use tempfile::tempdir; use std::{ env::{consts::DLL_SUFFIX, var_os}, ffi::OsString, fs::{self, create_dir, create_dir_all, remove_file, set_permissions, File}, io::Write, os::unix::{ fs::symlink, prelude::{OsStrExt, PermissionsExt}, }, path::{Path, PathBuf}, }; struct StopServer; impl Drop for StopServer { fn drop(&mut self) { let _ = Command::cargo_bin("sccache") .unwrap() .arg("--stop-server") .ok(); } } // (temp dir) // ├── rust // symlinks to rust1 on the first run and rust2 on the second // ├── rust1/ // │ ├── bin // │ │ └── rustc // │ ├── lib // │ │ └── driver.so -> ../driver.so // │ └── driver.so // ├── rust2/ // │ ├── bin // │ │ └── rustc // │ ├── lib // │ │ └── driver.so -> ../driver.so // │ └── driver.so // ├── sccache/ // ├── counter // increases by 1 for every compilation that is not cached // ├── RUST_FILE // compile output copied from counter, same content means it was cached // └── RUST_FILE.rs #[test] fn test_symlinks() { let root = tempdir().unwrap(); let root = root.path(); fs::write(root.join("counter"), b"0").unwrap(); fs::write(root.join("RUST_FILE.rs"), []).unwrap(); create_mock_rustc(root.join("rust1")); create_mock_rustc(root.join("rust2")); let rust = root.join("rust"); let bin = rust.join("bin"); let out_file = root.join("RUST_FILE"); symlink(root.join("rust1"), &rust).unwrap(); drop(StopServer); let _stop_server = StopServer; run_sccache(root, &bin); let output1 = fs::read(&out_file).unwrap(); remove_file(&rust).unwrap(); symlink(root.join("rust2"), &rust).unwrap(); run_sccache(root, &bin); let output2 = fs::read(out_file).unwrap(); assert_ne!(output1, output2); } fn create_mock_rustc(dir: PathBuf) { let bin = dir.join("bin"); create_dir_all(&bin).unwrap(); let dll_name = format!("driver{DLL_SUFFIX}"); let dll = dir.join(&dll_name); fs::write(&dll, dir.as_os_str().as_bytes()).unwrap(); let lib = dir.join("lib"); create_dir(&lib).unwrap(); symlink(dll, lib.join(&dll_name)).unwrap(); let rustc = bin.join("rustc"); write!( File::create(&rustc).unwrap(), r#"#!/usr/bin/env sh set -e build=0 while [ "$#" -gt 0 ]; do case "$1" in -vV) echo rustc 1.0.0 exec echo "host: unknown" ;; +stable) exit 1 ;; --print=sysroot) exec echo {} ;; --print) shift if [ "$1" = file-names ]; then exec echo RUST_FILE.rs fi ;; --emit) shift if [ "$1" = dep-info ]; then echo "deps.d: RUST_FILE.rs" > "$3" exec echo "RUST_FILE.rs:" "$3" fi ;; RUST_FILE.rs) build=1 ;; esac shift done if [ "$build" -eq 1 ]; then echo $(($(cat counter) + 1)) > counter cp counter RUST_FILE fi "#, dir.display(), ) .unwrap(); let mut perm = rustc.metadata().unwrap().permissions(); perm.set_mode(0o755); set_permissions(&rustc, perm).unwrap(); } fn run_sccache(root: &Path, path: &Path) { let mut paths: OsString = path.into(); paths.push(":"); paths.push(var_os("PATH").unwrap()); Command::cargo_bin("sccache") .unwrap() .current_dir(root) .env("PATH", paths) .env("SCCACHE_DIR", root.join("sccache")) .arg("rustc") .arg("RUST_FILE.rs") .arg("--crate-name=sccache_rustc_tests") .arg("--crate-type=lib") .arg("--emit=link") .arg("--out-dir") .arg(root) .unwrap(); } mozilla-sccache-40c3d6b/tests/system.rs000066400000000000000000002077641475712407500202470ustar00rootroot00000000000000// System tests for compiling C code. // // Copyright 2016 Mozilla Foundation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #![deny(rust_2018_idioms)] #![allow(dead_code, unused_imports)] #[macro_use] extern crate log; use crate::harness::{ get_stats, sccache_client_cfg, sccache_command, start_local_daemon, stop_local_daemon, write_json_cfg, write_source, zero_stats, }; use assert_cmd::prelude::*; use fs::File; use fs_err as fs; use log::Level::Trace; use predicates::prelude::*; use regex::Regex; use serial_test::serial; use std::collections::HashMap; use std::env; use std::ffi::{OsStr, OsString}; use std::fmt::{self, format}; use std::io::{self, Read, Write}; use std::path::{Path, PathBuf}; use std::process::{Command, Output, Stdio}; use std::str; use std::time::{Duration, SystemTime}; use test_case::test_case; use which::{which, which_in}; mod harness; #[derive(Clone)] struct Compiler { pub name: &'static str, pub exe: OsString, pub env_vars: Vec<(OsString, OsString)>, } // Test GCC + clang on non-OS X platforms. #[cfg(all(unix, not(target_os = "macos")))] const COMPILERS: &[&str] = &["gcc", "clang", "clang++", "nvc", "nvc++"]; // OS X ships a `gcc` that's just a clang wrapper, so only test clang there. #[cfg(target_os = "macos")] const COMPILERS: &[&str] = &["clang", "clang++"]; const CUDA_COMPILERS: &[&str] = &["nvcc", "clang++"]; fn adv_key_kind(lang: &str, compiler: &str) -> String { let language = lang.to_owned(); match compiler { "clang" | "clang++" => language + " [clang]", "gcc" | "g++" => language + " [gcc]", "cl.exe" => language + " [msvc]", "nvc" | "nvc++" => language + " [nvhpc]", "nvcc" => match lang { "cudafe++" => "cuda [cudafe++]".to_owned(), "ptx" => language + " [cicc]", "cubin" => language + " [ptxas]", _ => language + " [nvcc]", }, _ => { trace!("Unknown compiler type: {}", compiler); language + "unknown" } } } //TODO: could test gcc when targeting mingw. macro_rules! vec_from { ( $t:ty, $( $x:expr ),* ) => { vec!($( Into::<$t>::into(&$x), )*) }; } // TODO: This will fail if gcc/clang is actually a ccache wrapper, as it is the // default case on Fedora, e.g. fn compile_cmdline>( compiler: &str, exe: T, input: &str, output: &str, mut extra_args: Vec, ) -> Vec { let mut arg = match compiler { "gcc" | "clang" | "clang++" | "nvc" | "nvc++" | "nvcc" => { vec_from!(OsString, exe.as_ref(), "-c", input, "-o", output) } "cl.exe" => vec_from!(OsString, exe, "-c", input, format!("-Fo{}", output)), _ => panic!("Unsupported compiler: {}", compiler), }; if !extra_args.is_empty() { arg.append(&mut extra_args) } arg } // TODO: This will fail if gcc/clang is actually a ccache wrapper, as it is the // default case on Fedora, e.g. fn compile_cuda_cmdline>( compiler: &str, exe: T, compile_flag: &str, input: &str, output: &str, mut extra_args: Vec, ) -> Vec { let mut arg = match compiler { "nvcc" => vec_from!(OsString, exe.as_ref(), compile_flag, input, "-o", output), "clang++" => { vec_from!( OsString, exe, compile_flag, input, "--cuda-gpu-arch=sm_70", format!( "--cuda-path={}", env::var_os("CUDA_PATH") .or(env::var_os("CUDA_HOME")) .unwrap_or("/usr/local/cuda".into()) .to_string_lossy() ), "--no-cuda-version-check", // work around for clang-cuda on windows-2019 (https://github.com/microsoft/STL/issues/2359) "-D_ALLOW_COMPILER_AND_STL_VERSION_MISMATCH", "-o", output ) } _ => panic!("Unsupported compiler: {}", compiler), }; if !extra_args.is_empty() { arg.append(&mut extra_args) } arg } // TODO: This will fail if gcc/clang is actually a ccache wrapper, as it is the // default case on Fedora, e.g. // // archs is a list of GPU architectures to compile for. fn compile_hip_cmdline>( compiler: &str, exe: T, input: &str, output: &str, archs: &Vec, mut extra_args: Vec, ) -> Vec { let mut arg = match compiler { "clang" => { vec_from!(OsString, exe, "-x", "hip", "-c", input, "-o", output) } _ => panic!("Unsupported compiler: \"{}\"", compiler), }; for arch in archs { arg.push(format!("--offload-arch={}", arch).into()); } if !extra_args.is_empty() { arg.append(&mut extra_args) } arg } const INPUT: &str = "test.c"; const INPUT_CLANG_MULTICALL: &str = "test_clang_multicall.c"; const INPUT_WITH_WHITESPACE: &str = "test_whitespace.c"; const INPUT_WITH_WHITESPACE_ALT: &str = "test_whitespace_alt.c"; const INPUT_ERR: &str = "test_err.c"; const INPUT_MACRO_EXPANSION: &str = "test_macro_expansion.c"; const INPUT_WITH_DEFINE: &str = "test_with_define.c"; const INPUT_FOR_CUDA_A: &str = "test_a.cu"; const INPUT_FOR_CUDA_B: &str = "test_b.cu"; const INPUT_FOR_CUDA_C: &str = "test_c.cu"; const INPUT_FOR_HIP_A: &str = "test_a.hip"; const INPUT_FOR_HIP_B: &str = "test_b.hip"; const INPUT_FOR_HIP_C: &str = "test_c.hip"; const OUTPUT: &str = "test.o"; // Copy the source files into the tempdir so we can compile with relative paths, since the commandline winds up in the hash key. fn copy_to_tempdir(inputs: &[&str], tempdir: &Path) { for f in inputs { let original_source_file = Path::new(file!()).parent().unwrap().join(f); let source_file = tempdir.join(f); trace!("fs::copy({:?}, {:?})", original_source_file, source_file); fs::copy(&original_source_file, &source_file).unwrap(); // Preprocessor cache will not cache files that are too recent. // Certain OS/FS combinations have a slow resolution (up to 2s for NFS), // leading to flaky tests. // We set the times for the new file to 10 seconds ago, to be safe. let new_time = filetime::FileTime::from_system_time(SystemTime::now() - Duration::from_secs(10)); filetime::set_file_times(source_file, new_time, new_time).unwrap(); } } fn test_basic_compile(compiler: Compiler, tempdir: &Path) { let Compiler { name, exe, env_vars, } = compiler; println!("test_basic_compile: {}", name); // Compile a source file. copy_to_tempdir(&[INPUT, INPUT_ERR], tempdir); let out_file = tempdir.join(OUTPUT); trace!("compile"); sccache_command() .args(compile_cmdline(name, &exe, INPUT, OUTPUT, Vec::new())) .current_dir(tempdir) .envs(env_vars.clone()) .assert() .success(); assert!(fs::metadata(&out_file).map(|m| m.len() > 0).unwrap()); trace!("request stats"); get_stats(|info| { assert_eq!(1, info.stats.compile_requests); assert_eq!(1, info.stats.requests_executed); assert_eq!(0, info.stats.cache_hits.all()); assert_eq!(1, info.stats.cache_misses.all()); assert_eq!(&1, info.stats.cache_misses.get("C/C++").unwrap()); let adv_key = adv_key_kind("c", compiler.name); assert_eq!(&1, info.stats.cache_misses.get_adv(&adv_key).unwrap()); }); trace!("compile"); fs::remove_file(&out_file).unwrap(); sccache_command() .args(compile_cmdline(name, &exe, INPUT, OUTPUT, Vec::new())) .current_dir(tempdir) .envs(env_vars) .assert() .success(); assert!(fs::metadata(&out_file).map(|m| m.len() > 0).unwrap()); trace!("request stats"); get_stats(|info| { assert_eq!(2, info.stats.compile_requests); assert_eq!(2, info.stats.requests_executed); assert_eq!(1, info.stats.cache_hits.all()); assert_eq!(1, info.stats.cache_misses.all()); assert_eq!(&1, info.stats.cache_hits.get("C/C++").unwrap()); assert_eq!(&1, info.stats.cache_misses.get("C/C++").unwrap()); let adv_key = adv_key_kind("c", compiler.name); assert_eq!(&1, info.stats.cache_hits.get_adv(&adv_key).unwrap()); assert_eq!(&1, info.stats.cache_misses.get_adv(&adv_key).unwrap()); }); } fn test_noncacheable_stats(compiler: Compiler, tempdir: &Path) { let Compiler { name, exe, env_vars, } = compiler; println!("test_noncacheable_stats: {}", name); copy_to_tempdir(&[INPUT], tempdir); trace!("compile"); sccache_command() .arg(&exe) .arg("-E") .arg(INPUT) .current_dir(tempdir) .envs(env_vars) .assert() .success(); trace!("request stats"); get_stats(|info| { assert_eq!(1, info.stats.compile_requests); assert_eq!(0, info.stats.requests_executed); assert_eq!(1, info.stats.not_cached.len()); assert_eq!(Some(&1), info.stats.not_cached.get("-E")); }); } fn test_msvc_deps(compiler: Compiler, tempdir: &Path) { let Compiler { name, exe, env_vars, } = compiler; // Check that -deps works. trace!("compile with /sourceDependencies"); let mut args = compile_cmdline(name, exe, INPUT, OUTPUT, Vec::new()); args.push("/sourceDependenciestest.o.json".into()); sccache_command() .args(&args) .current_dir(tempdir) .envs(env_vars) .assert() .success(); // Check the contents let f = File::open(tempdir.join("test.o.json")).expect("Failed to open dep file"); // MSVC deps files are JSON, which we can validate properties of, but will be // subtly different on different systems (Windows SDK version, for example) let deps: serde_json::Value = serde_json::from_reader(f).expect("Failed to read dep file"); let source = deps["Data"]["Source"].as_str().expect("No source found"); let source = Path::new(source).file_name().expect("No source file name"); assert_eq!(source, INPUT); let includes = deps["Data"]["Includes"] .as_array() .expect("No includes found"); assert_ne!(includes.len(), 0); } fn test_msvc_responsefile(compiler: Compiler, tempdir: &Path) { let Compiler { name: _, exe, env_vars, } = compiler; let out_file = tempdir.join(OUTPUT); let cmd_file_name = "test_msvc.rsp"; { let mut file = File::create(tempdir.join(cmd_file_name)).unwrap(); let content = format!("-c {INPUT} -Fo{OUTPUT}"); file.write_all(content.as_bytes()).unwrap(); } let args = vec_from!(OsString, exe, &format!("@{cmd_file_name}")); sccache_command() .args(&args) .current_dir(tempdir) .envs(env_vars) .assert() .success(); assert!(fs::metadata(&out_file).map(|m| m.len() > 0).unwrap()); fs::remove_file(&out_file).unwrap(); } fn test_gcc_mp_werror(compiler: Compiler, tempdir: &Path) { let Compiler { name, exe, env_vars, } = compiler; trace!("test -MP with -Werror"); let mut args = compile_cmdline(name, exe, INPUT_ERR, OUTPUT, Vec::new()); args.extend(vec_from!( OsString, "-MD", "-MP", "-MF", "foo.pp", "-Werror" )); // This should fail, but the error should be from the #error! sccache_command() .args(&args) .current_dir(tempdir) .envs(env_vars) .assert() .failure() .stderr( predicates::str::contains("to generate dependencies you must specify either -M or -MM") .from_utf8() .not(), ); } fn test_gcc_fprofile_generate_source_changes(compiler: Compiler, tempdir: &Path) { let Compiler { name, exe, env_vars, } = compiler; trace!("test -fprofile-generate with different source inputs"); zero_stats(); const SRC: &str = "source.c"; write_source( tempdir, SRC, "/*line 1*/ #ifndef UNDEFINED /*unused line 1*/ #endif int main(int argc, char** argv) { return 0; } ", ); let mut args = compile_cmdline(name, exe, SRC, OUTPUT, Vec::new()); args.extend(vec_from!(OsString, "-fprofile-generate")); trace!("compile source.c (1)"); sccache_command() .args(&args) .current_dir(tempdir) .envs(env_vars.clone()) .assert() .success(); get_stats(|info| { assert_eq!(0, info.stats.cache_hits.all()); assert_eq!(1, info.stats.cache_misses.all()); assert_eq!(&1, info.stats.cache_misses.get("C/C++").unwrap()); }); // Compile the same source again to ensure we can get a cache hit. trace!("compile source.c (2)"); sccache_command() .args(&args) .current_dir(tempdir) .envs(env_vars.clone()) .assert() .success(); get_stats(|info| { assert_eq!(1, info.stats.cache_hits.all()); assert_eq!(1, info.stats.cache_misses.all()); assert_eq!(&1, info.stats.cache_hits.get("C/C++").unwrap()); assert_eq!(&1, info.stats.cache_misses.get("C/C++").unwrap()); }); // Now write out a slightly different source file that will preprocess to the same thing, // modulo line numbers. This should not be a cache hit because line numbers are important // with -fprofile-generate. write_source( tempdir, SRC, "/*line 1*/ #ifndef UNDEFINED /*unused line 1*/ /*unused line 2*/ #endif int main(int argc, char** argv) { return 0; } ", ); trace!("compile source.c (3)"); sccache_command() .args(&args) .current_dir(tempdir) .envs(env_vars) .assert() .success(); get_stats(|info| { assert_eq!(1, info.stats.cache_hits.all()); assert_eq!(2, info.stats.cache_misses.all()); assert_eq!(&1, info.stats.cache_hits.get("C/C++").unwrap()); assert_eq!(&2, info.stats.cache_misses.get("C/C++").unwrap()); }); } /* test case like this: echo "int test(){}" > test.cc mkdir o1 o2 sccache g++ -c -g -gsplit-dwarf test.cc -o test1.o sccache g++ -c -g -gsplit-dwarf test.cc -o test1.o --- > cache hit sccache g++ -c -g -gsplit-dwarf test.cc -o test2.o --- > cache miss strings test2.o |grep test2.dwo */ fn test_split_dwarf_object_generate_output_dir_changes(compiler: Compiler, tempdir: &Path) { let Compiler { name, exe, env_vars, } = compiler; trace!("test -g -gsplit-dwarf with different output"); zero_stats(); const SRC: &str = "source.c"; write_source(tempdir, SRC, "int test(){}"); let mut args = compile_cmdline(name, exe.clone(), SRC, "test1.o", Vec::new()); args.extend(vec_from!(OsString, "-g")); args.extend(vec_from!(OsString, "-gsplit-dwarf")); trace!("compile source.c (1)"); sccache_command() .args(&args) .current_dir(tempdir) .envs(env_vars.clone()) .assert() .success(); get_stats(|info| { assert_eq!(0, info.stats.cache_hits.all()); assert_eq!(1, info.stats.cache_misses.all()); assert_eq!(&1, info.stats.cache_misses.get("C/C++").unwrap()); }); // Compile the same source again to ensure we can get a cache hit. trace!("compile source.c (2)"); sccache_command() .args(&args) .current_dir(tempdir) .envs(env_vars.clone()) .assert() .success(); get_stats(|info| { assert_eq!(1, info.stats.cache_hits.all()); assert_eq!(1, info.stats.cache_misses.all()); assert_eq!(&1, info.stats.cache_hits.get("C/C++").unwrap()); assert_eq!(&1, info.stats.cache_misses.get("C/C++").unwrap()); }); // Compile the same source again with different output // to ensure we can force generate new object file. let mut args2 = compile_cmdline(name, exe, SRC, "test2.o", Vec::new()); args2.extend(vec_from!(OsString, "-g")); args2.extend(vec_from!(OsString, "-gsplit-dwarf")); trace!("compile source.c (2)"); sccache_command() .args(&args2) .current_dir(tempdir) .envs(env_vars.clone()) .assert() .success(); get_stats(|info| { assert_eq!(1, info.stats.cache_hits.all()); assert_eq!(2, info.stats.cache_misses.all()); assert_eq!(&1, info.stats.cache_hits.get("C/C++").unwrap()); assert_eq!(&2, info.stats.cache_misses.get("C/C++").unwrap()); }); } fn test_gcc_clang_no_warnings_from_macro_expansion(compiler: Compiler, tempdir: &Path) { let Compiler { name, exe, env_vars, } = compiler; println!("test_gcc_clang_no_warnings_from_macro_expansion: {}", name); // Compile a source file. copy_to_tempdir(&[INPUT_MACRO_EXPANSION], tempdir); trace!("compile"); sccache_command() .args( [ &compile_cmdline(name, exe, INPUT_MACRO_EXPANSION, OUTPUT, Vec::new())[..], &vec_from!(OsString, "-Wunreachable-code")[..], ] .concat(), ) .current_dir(tempdir) .envs(env_vars) .assert() .success() .stderr(predicates::str::contains("warning:").from_utf8().not()); } fn test_compile_with_define(compiler: Compiler, tempdir: &Path) { let Compiler { name, exe, env_vars, } = compiler; println!("test_compile_with_define: {}", name); // Compile a source file. copy_to_tempdir(&[INPUT_WITH_DEFINE], tempdir); trace!("compile"); sccache_command() .args( [ &compile_cmdline(name, exe, INPUT_WITH_DEFINE, OUTPUT, Vec::new())[..], &vec_from!(OsString, "-DSCCACHE_TEST_DEFINE")[..], ] .concat(), ) .current_dir(tempdir) .envs(env_vars) .assert() .success() .stderr(predicates::str::contains("warning:").from_utf8().not()); } fn test_gcc_clang_depfile(compiler: Compiler, tempdir: &Path) { let Compiler { name, exe, env_vars, } = compiler; println!("test_gcc_clang_depfile: {}", name); copy_to_tempdir(&[INPUT], tempdir); fs::copy(tempdir.join(INPUT), tempdir.join("same-content.c")).unwrap(); trace!("compile"); sccache_command() .args(compile_cmdline( name, exe.clone(), INPUT, OUTPUT, Vec::new(), )) .args(vec_from!(OsString, "-MD", "-MF", "first.d")) .current_dir(tempdir) .envs(env_vars.clone()) .assert() .success(); sccache_command() .args(compile_cmdline( name, exe, "same-content.c", "same-content.o", Vec::new(), )) .args(vec_from!(OsString, "-MD", "-MF", "second.d")) .current_dir(tempdir) .envs(env_vars) .assert() .success(); let mut first = String::new(); let mut second = String::new(); File::open(tempdir.join("first.d")) .unwrap() .read_to_string(&mut first) .unwrap(); File::open(tempdir.join("second.d")) .unwrap() .read_to_string(&mut second) .unwrap(); assert_ne!(first, second); } fn run_sccache_command_tests(compiler: Compiler, tempdir: &Path, preprocessor_cache_mode: bool) { if compiler.name != "clang++" { test_basic_compile(compiler.clone(), tempdir); } test_compile_with_define(compiler.clone(), tempdir); if compiler.name == "cl.exe" { test_msvc_deps(compiler.clone(), tempdir); test_msvc_responsefile(compiler.clone(), tempdir); } if compiler.name == "gcc" { test_gcc_mp_werror(compiler.clone(), tempdir); test_gcc_fprofile_generate_source_changes(compiler.clone(), tempdir); } if compiler.name == "clang" || compiler.name == "gcc" { test_gcc_clang_no_warnings_from_macro_expansion(compiler.clone(), tempdir); test_split_dwarf_object_generate_output_dir_changes(compiler.clone(), tempdir); test_gcc_clang_depfile(compiler.clone(), tempdir); } if compiler.name == "clang++" { test_clang_multicall(compiler.clone(), tempdir); } // If we are testing with clang-14 or later, we expect the -fminimize-whitespace flag to be used. if compiler.name == "clang" || compiler.name == "clang++" { let version_cmd = Command::new(compiler.exe.clone()) .arg("--version") .output() .expect("Failure when getting compiler version"); assert!(version_cmd.status.success()); let version_output = match str::from_utf8(&version_cmd.stdout) { Ok(v) => v, Err(e) => panic!("Invalid UTF-8 sequence: {}", e), }; // Regex to match "Apple LLVM clang version" or "Apple clang version" let re = Regex::new(r"(?PApple)?.*clang version (?P\d+)").unwrap(); let (major, is_appleclang) = match re.captures(version_output) { Some(c) => ( c.name("major").unwrap().as_str().parse::().unwrap(), c.name("apple").is_some(), ), None => panic!( "Version info not found in --version output: {}", version_output ), }; test_clang_cache_whitespace_normalization( compiler, tempdir, !is_appleclang && major >= 14, preprocessor_cache_mode, ); } else { test_clang_cache_whitespace_normalization( compiler, tempdir, false, preprocessor_cache_mode, ); } } fn test_nvcc_cuda_compiles(compiler: &Compiler, tempdir: &Path) { let Compiler { name, exe, env_vars, } = compiler; println!("test_nvcc_cuda_compiles: {}", name); // Compile multiple source files. copy_to_tempdir(&[INPUT_FOR_CUDA_A, INPUT_FOR_CUDA_B], tempdir); let out_file = tempdir.join(OUTPUT); trace!("compile A"); sccache_command() .args(compile_cuda_cmdline( name, exe, "-c", // relative path for input INPUT_FOR_CUDA_A, // relative path for output out_file.file_name().unwrap().to_string_lossy().as_ref(), Vec::new(), )) .current_dir(tempdir) .envs(env_vars.clone()) .assert() .success(); assert!(fs::metadata(&out_file).map(|m| m.len() > 0).unwrap()); fs::remove_file(&out_file).unwrap(); trace!("compile A request stats"); get_stats(|info| { assert_eq!(1, info.stats.compile_requests); assert_eq!(5, info.stats.requests_executed); assert_eq!(0, info.stats.cache_hits.all()); assert_eq!(4, info.stats.cache_misses.all()); assert_eq!(&1, info.stats.cache_misses.get("CUDA").unwrap()); assert_eq!( &1, info.stats.cache_misses.get("CUDA (Device code)").unwrap() ); assert_eq!(&1, info.stats.cache_misses.get("PTX").unwrap()); assert_eq!(&1, info.stats.cache_misses.get("CUBIN").unwrap()); assert!(info.stats.cache_misses.get("C/C++").is_none()); let adv_cuda_key = adv_key_kind("cuda", compiler.name); let adv_cudafe_key = adv_key_kind("cudafe++", compiler.name); let adv_ptx_key = adv_key_kind("ptx", compiler.name); let adv_cubin_key = adv_key_kind("cubin", compiler.name); assert_eq!(&1, info.stats.cache_misses.get_adv(&adv_cuda_key).unwrap()); assert_eq!( &1, info.stats.cache_misses.get_adv(&adv_cudafe_key).unwrap() ); assert_eq!(&1, info.stats.cache_misses.get_adv(&adv_ptx_key).unwrap()); assert_eq!(&1, info.stats.cache_misses.get_adv(&adv_cubin_key).unwrap()); }); trace!("compile A"); sccache_command() .args(compile_cuda_cmdline( name, exe, "-c", // relative path for input INPUT_FOR_CUDA_A, // absolute path for output out_file.to_string_lossy().as_ref(), Vec::new(), )) .current_dir(tempdir) .envs(env_vars.clone()) .assert() .success(); assert!(fs::metadata(&out_file).map(|m| m.len() > 0).unwrap()); fs::remove_file(&out_file).unwrap(); trace!("compile A request stats"); get_stats(|info| { assert_eq!(2, info.stats.compile_requests); assert_eq!(10, info.stats.requests_executed); assert_eq!(4, info.stats.cache_hits.all()); assert_eq!(4, info.stats.cache_misses.all()); assert_eq!(&1, info.stats.cache_hits.get("CUDA").unwrap()); assert_eq!(&1, info.stats.cache_hits.get("CUDA (Device code)").unwrap()); assert_eq!(&1, info.stats.cache_hits.get("PTX").unwrap()); assert_eq!(&1, info.stats.cache_hits.get("CUBIN").unwrap()); assert_eq!(&1, info.stats.cache_misses.get("CUDA").unwrap()); assert_eq!( &1, info.stats.cache_misses.get("CUDA (Device code)").unwrap() ); assert_eq!(&1, info.stats.cache_misses.get("PTX").unwrap()); assert_eq!(&1, info.stats.cache_misses.get("CUBIN").unwrap()); assert!(info.stats.cache_misses.get("C/C++").is_none()); let adv_cuda_key = adv_key_kind("cuda", compiler.name); let adv_cudafe_key = adv_key_kind("cudafe++", compiler.name); let adv_ptx_key = adv_key_kind("ptx", compiler.name); let adv_cubin_key = adv_key_kind("cubin", compiler.name); assert_eq!(&1, info.stats.cache_hits.get_adv(&adv_cuda_key).unwrap()); assert_eq!(&1, info.stats.cache_hits.get_adv(&adv_cudafe_key).unwrap()); assert_eq!(&1, info.stats.cache_hits.get_adv(&adv_ptx_key).unwrap()); assert_eq!(&1, info.stats.cache_hits.get_adv(&adv_cubin_key).unwrap()); assert_eq!(&1, info.stats.cache_misses.get_adv(&adv_cuda_key).unwrap()); assert_eq!( &1, info.stats.cache_misses.get_adv(&adv_cudafe_key).unwrap() ); assert_eq!(&1, info.stats.cache_misses.get_adv(&adv_ptx_key).unwrap()); assert_eq!(&1, info.stats.cache_misses.get_adv(&adv_cubin_key).unwrap()); }); // By compiling another input source we verify that the pre-processor // phase is correctly running and outputting text trace!("compile B"); sccache_command() .args(compile_cuda_cmdline( name, exe, "-c", // absolute path for input &tempdir.join(INPUT_FOR_CUDA_B).to_string_lossy(), // absolute path for output out_file.to_string_lossy().as_ref(), Vec::new(), )) .current_dir(tempdir) .envs(env_vars.clone()) .assert() .success(); assert!(fs::metadata(&out_file).map(|m| m.len() > 0).unwrap()); fs::remove_file(&out_file).unwrap(); trace!("compile B request stats"); get_stats(|info| { assert_eq!(3, info.stats.compile_requests); assert_eq!(15, info.stats.requests_executed); assert_eq!(5, info.stats.cache_hits.all()); assert_eq!(7, info.stats.cache_misses.all()); assert_eq!(&1, info.stats.cache_hits.get("CUDA").unwrap()); assert_eq!(&1, info.stats.cache_hits.get("CUDA (Device code)").unwrap()); assert_eq!(&1, info.stats.cache_hits.get("PTX").unwrap()); assert_eq!(&2, info.stats.cache_hits.get("CUBIN").unwrap()); assert_eq!(&2, info.stats.cache_misses.get("CUDA").unwrap()); assert_eq!( &2, info.stats.cache_misses.get("CUDA (Device code)").unwrap() ); assert_eq!(&2, info.stats.cache_misses.get("PTX").unwrap()); assert_eq!(&1, info.stats.cache_misses.get("CUBIN").unwrap()); assert!(info.stats.cache_misses.get("C/C++").is_none()); let adv_cuda_key = adv_key_kind("cuda", compiler.name); let adv_cudafe_key = adv_key_kind("cudafe++", compiler.name); let adv_ptx_key = adv_key_kind("ptx", compiler.name); let adv_cubin_key = adv_key_kind("cubin", compiler.name); assert_eq!(&1, info.stats.cache_hits.get_adv(&adv_cuda_key).unwrap()); assert_eq!(&1, info.stats.cache_hits.get_adv(&adv_cudafe_key).unwrap()); assert_eq!(&1, info.stats.cache_hits.get_adv(&adv_ptx_key).unwrap()); assert_eq!(&2, info.stats.cache_hits.get_adv(&adv_cubin_key).unwrap()); assert_eq!(&2, info.stats.cache_misses.get_adv(&adv_cuda_key).unwrap()); assert_eq!( &2, info.stats.cache_misses.get_adv(&adv_cudafe_key).unwrap() ); assert_eq!(&2, info.stats.cache_misses.get_adv(&adv_ptx_key).unwrap()); assert_eq!(&1, info.stats.cache_misses.get_adv(&adv_cubin_key).unwrap()); }); trace!("compile ptx"); let out_file = tempdir.join("test.ptx"); sccache_command() .args(compile_cuda_cmdline( name, exe, "-ptx", INPUT_FOR_CUDA_A, // relative path for output out_file.file_name().unwrap().to_string_lossy().as_ref(), Vec::new(), )) .current_dir(tempdir) .envs(env_vars.clone()) .assert() .success(); assert!(fs::metadata(&out_file).map(|m| m.len() > 0).unwrap()); fs::remove_file(&out_file).unwrap(); trace!("compile ptx request stats"); get_stats(|info| { assert_eq!(4, info.stats.compile_requests); assert_eq!(17, info.stats.requests_executed); assert_eq!(5, info.stats.cache_hits.all()); assert_eq!(8, info.stats.cache_misses.all()); assert_eq!(&1, info.stats.cache_hits.get("CUDA").unwrap()); assert_eq!(&1, info.stats.cache_hits.get("CUDA (Device code)").unwrap()); assert_eq!(&1, info.stats.cache_hits.get("PTX").unwrap()); assert_eq!(&2, info.stats.cache_hits.get("CUBIN").unwrap()); assert_eq!(&2, info.stats.cache_misses.get("CUDA").unwrap()); assert_eq!( &2, info.stats.cache_misses.get("CUDA (Device code)").unwrap() ); assert_eq!(&3, info.stats.cache_misses.get("PTX").unwrap()); assert_eq!(&1, info.stats.cache_misses.get("CUBIN").unwrap()); assert!(info.stats.cache_misses.get("C/C++").is_none()); let adv_cuda_key = adv_key_kind("cuda", compiler.name); let adv_cudafe_key = adv_key_kind("cudafe++", compiler.name); let adv_ptx_key = adv_key_kind("ptx", compiler.name); let adv_cubin_key = adv_key_kind("cubin", compiler.name); assert_eq!(&1, info.stats.cache_hits.get_adv(&adv_cuda_key).unwrap()); assert_eq!(&1, info.stats.cache_hits.get_adv(&adv_cudafe_key).unwrap()); assert_eq!(&1, info.stats.cache_hits.get_adv(&adv_ptx_key).unwrap()); assert_eq!(&2, info.stats.cache_hits.get_adv(&adv_cubin_key).unwrap()); assert_eq!(&2, info.stats.cache_misses.get_adv(&adv_cuda_key).unwrap()); assert_eq!( &2, info.stats.cache_misses.get_adv(&adv_cudafe_key).unwrap() ); assert_eq!(&3, info.stats.cache_misses.get_adv(&adv_ptx_key).unwrap()); assert_eq!(&1, info.stats.cache_misses.get_adv(&adv_cubin_key).unwrap()); }); trace!("compile cubin"); let out_file = tempdir.join("test.cubin"); sccache_command() .args(compile_cuda_cmdline( name, exe, "-cubin", INPUT_FOR_CUDA_A, // absolute path for output out_file.to_string_lossy().as_ref(), Vec::new(), )) .current_dir(tempdir) .envs(env_vars.clone()) .assert() .success(); assert!(fs::metadata(&out_file).map(|m| m.len() > 0).unwrap()); fs::remove_file(&out_file).unwrap(); trace!("compile cubin request stats"); get_stats(|info| { assert_eq!(5, info.stats.compile_requests); assert_eq!(20, info.stats.requests_executed); assert_eq!(6, info.stats.cache_hits.all()); assert_eq!(9, info.stats.cache_misses.all()); assert_eq!(&1, info.stats.cache_hits.get("CUDA").unwrap()); assert_eq!(&1, info.stats.cache_hits.get("CUDA (Device code)").unwrap()); assert_eq!(&1, info.stats.cache_hits.get("PTX").unwrap()); assert_eq!(&3, info.stats.cache_hits.get("CUBIN").unwrap()); assert_eq!(&2, info.stats.cache_misses.get("CUDA").unwrap()); assert_eq!( &2, info.stats.cache_misses.get("CUDA (Device code)").unwrap() ); assert_eq!(&4, info.stats.cache_misses.get("PTX").unwrap()); assert_eq!(&1, info.stats.cache_misses.get("CUBIN").unwrap()); assert!(info.stats.cache_misses.get("C/C++").is_none()); let adv_cuda_key = adv_key_kind("cuda", compiler.name); let adv_cudafe_key = adv_key_kind("cudafe++", compiler.name); let adv_ptx_key = adv_key_kind("ptx", compiler.name); let adv_cubin_key = adv_key_kind("cubin", compiler.name); assert_eq!(&1, info.stats.cache_hits.get_adv(&adv_cuda_key).unwrap()); assert_eq!(&1, info.stats.cache_hits.get_adv(&adv_cudafe_key).unwrap()); assert_eq!(&1, info.stats.cache_hits.get_adv(&adv_ptx_key).unwrap()); assert_eq!(&3, info.stats.cache_hits.get_adv(&adv_cubin_key).unwrap()); assert_eq!(&2, info.stats.cache_misses.get_adv(&adv_cuda_key).unwrap()); assert_eq!( &2, info.stats.cache_misses.get_adv(&adv_cudafe_key).unwrap() ); assert_eq!(&4, info.stats.cache_misses.get_adv(&adv_ptx_key).unwrap()); assert_eq!(&1, info.stats.cache_misses.get_adv(&adv_cubin_key).unwrap()); }); // Test to ensure #2299 doesn't regress (https://github.com/mozilla/sccache/issues/2299) let test_2299_src_name = "test_2299.cu"; let test_2299_out_file = tempdir.join("test_2299.cu.o"); // Two versions of the source with different contents inside the #ifndef __CUDA_ARCH__ let test_2299_cu_src_1 = " #ifndef __CUDA_ARCH__ static const auto x = 5; #endif int main(int argc, char** argv) { return 0; } "; let test_2299_cu_src_2 = " #ifndef __CUDA_ARCH__ static const auto x = \"5\"; #endif int main(int argc, char** argv) { return 0; } "; write_source(tempdir, test_2299_src_name, test_2299_cu_src_1); trace!("compile test_2299.cu (1)"); sccache_command() .args(compile_cuda_cmdline( name, exe, "-c", // relative path for input test_2299_src_name, // relative path for output test_2299_out_file .file_name() .unwrap() .to_string_lossy() .as_ref(), Vec::new(), )) .current_dir(tempdir) .envs(env_vars.clone()) .assert() .success(); assert!(fs::metadata(&test_2299_out_file) .map(|m| m.len() > 0) .unwrap()); fs::remove_file(&test_2299_out_file).unwrap(); trace!("compile test_2299.cu request stats (1)"); get_stats(|info| { assert_eq!(6, info.stats.compile_requests); assert_eq!(25, info.stats.requests_executed); assert_eq!(6, info.stats.cache_hits.all()); assert_eq!(13, info.stats.cache_misses.all()); assert_eq!(&1, info.stats.cache_hits.get("CUDA").unwrap()); assert_eq!(&1, info.stats.cache_hits.get("CUDA (Device code)").unwrap()); assert_eq!(&1, info.stats.cache_hits.get("PTX").unwrap()); assert_eq!(&3, info.stats.cache_hits.get("CUBIN").unwrap()); assert_eq!(&3, info.stats.cache_misses.get("CUDA").unwrap()); assert_eq!( &3, info.stats.cache_misses.get("CUDA (Device code)").unwrap() ); assert_eq!(&5, info.stats.cache_misses.get("PTX").unwrap()); assert_eq!(&2, info.stats.cache_misses.get("CUBIN").unwrap()); assert!(info.stats.cache_misses.get("C/C++").is_none()); let adv_cuda_key = adv_key_kind("cuda", compiler.name); let adv_cudafe_key = adv_key_kind("cudafe++", compiler.name); let adv_ptx_key = adv_key_kind("ptx", compiler.name); let adv_cubin_key = adv_key_kind("cubin", compiler.name); assert_eq!(&1, info.stats.cache_hits.get_adv(&adv_cuda_key).unwrap()); assert_eq!(&1, info.stats.cache_hits.get_adv(&adv_cudafe_key).unwrap()); assert_eq!(&1, info.stats.cache_hits.get_adv(&adv_ptx_key).unwrap()); assert_eq!(&3, info.stats.cache_hits.get_adv(&adv_cubin_key).unwrap()); assert_eq!(&3, info.stats.cache_misses.get_adv(&adv_cuda_key).unwrap()); assert_eq!( &3, info.stats.cache_misses.get_adv(&adv_cudafe_key).unwrap() ); assert_eq!(&5, info.stats.cache_misses.get_adv(&adv_ptx_key).unwrap()); assert_eq!(&2, info.stats.cache_misses.get_adv(&adv_cubin_key).unwrap()); }); write_source(tempdir, test_2299_src_name, test_2299_cu_src_2); trace!("compile test_2299.cu (2)"); sccache_command() .args(compile_cuda_cmdline( name, exe, "-c", // relative path for input test_2299_src_name, // relative path for output test_2299_out_file .file_name() .unwrap() .to_string_lossy() .as_ref(), Vec::new(), )) .current_dir(tempdir) .envs(env_vars.clone()) .assert() .success(); assert!(fs::metadata(&test_2299_out_file) .map(|m| m.len() > 0) .unwrap()); fs::remove_file(&test_2299_out_file).unwrap(); trace!("compile test_2299.cu request stats (2)"); get_stats(|info| { assert_eq!(7, info.stats.compile_requests); assert_eq!(30, info.stats.requests_executed); assert_eq!(8, info.stats.cache_hits.all()); assert_eq!(15, info.stats.cache_misses.all()); assert_eq!(&1, info.stats.cache_hits.get("CUDA").unwrap()); assert_eq!(&1, info.stats.cache_hits.get("CUDA (Device code)").unwrap()); assert_eq!(&2, info.stats.cache_hits.get("PTX").unwrap()); assert_eq!(&4, info.stats.cache_hits.get("CUBIN").unwrap()); assert_eq!(&4, info.stats.cache_misses.get("CUDA").unwrap()); assert_eq!( &4, info.stats.cache_misses.get("CUDA (Device code)").unwrap() ); assert_eq!(&5, info.stats.cache_misses.get("PTX").unwrap()); assert_eq!(&2, info.stats.cache_misses.get("CUBIN").unwrap()); assert!(info.stats.cache_misses.get("C/C++").is_none()); let adv_cuda_key = adv_key_kind("cuda", compiler.name); let adv_cudafe_key = adv_key_kind("cudafe++", compiler.name); let adv_ptx_key = adv_key_kind("ptx", compiler.name); let adv_cubin_key = adv_key_kind("cubin", compiler.name); assert_eq!(&1, info.stats.cache_hits.get_adv(&adv_cuda_key).unwrap()); assert_eq!(&1, info.stats.cache_hits.get_adv(&adv_cudafe_key).unwrap()); assert_eq!(&2, info.stats.cache_hits.get_adv(&adv_ptx_key).unwrap()); assert_eq!(&4, info.stats.cache_hits.get_adv(&adv_cubin_key).unwrap()); assert_eq!(&4, info.stats.cache_misses.get_adv(&adv_cuda_key).unwrap()); assert_eq!( &4, info.stats.cache_misses.get_adv(&adv_cudafe_key).unwrap() ); assert_eq!(&5, info.stats.cache_misses.get_adv(&adv_ptx_key).unwrap()); assert_eq!(&2, info.stats.cache_misses.get_adv(&adv_cubin_key).unwrap()); }); // Recompile the original version again to ensure only cache hits write_source(tempdir, test_2299_src_name, test_2299_cu_src_1); trace!("compile test_2299.cu (3)"); sccache_command() .args(compile_cuda_cmdline( name, exe, "-c", // relative path for input test_2299_src_name, // relative path for output test_2299_out_file .file_name() .unwrap() .to_string_lossy() .as_ref(), Vec::new(), )) .current_dir(tempdir) .envs(env_vars.clone()) .assert() .success(); assert!(fs::metadata(&test_2299_out_file) .map(|m| m.len() > 0) .unwrap()); fs::remove_file(&test_2299_out_file).unwrap(); trace!("compile test_2299.cu request stats (3)"); get_stats(|info| { assert_eq!(8, info.stats.compile_requests); assert_eq!(35, info.stats.requests_executed); assert_eq!(12, info.stats.cache_hits.all()); assert_eq!(15, info.stats.cache_misses.all()); assert_eq!(&2, info.stats.cache_hits.get("CUDA").unwrap()); assert_eq!(&2, info.stats.cache_hits.get("CUDA (Device code)").unwrap()); assert_eq!(&3, info.stats.cache_hits.get("PTX").unwrap()); assert_eq!(&5, info.stats.cache_hits.get("CUBIN").unwrap()); assert_eq!(&4, info.stats.cache_misses.get("CUDA").unwrap()); assert_eq!( &4, info.stats.cache_misses.get("CUDA (Device code)").unwrap() ); assert_eq!(&5, info.stats.cache_misses.get("PTX").unwrap()); assert_eq!(&2, info.stats.cache_misses.get("CUBIN").unwrap()); assert!(info.stats.cache_misses.get("C/C++").is_none()); let adv_cuda_key = adv_key_kind("cuda", compiler.name); let adv_cudafe_key = adv_key_kind("cudafe++", compiler.name); let adv_ptx_key = adv_key_kind("ptx", compiler.name); let adv_cubin_key = adv_key_kind("cubin", compiler.name); assert_eq!(&2, info.stats.cache_hits.get_adv(&adv_cuda_key).unwrap()); assert_eq!(&2, info.stats.cache_hits.get_adv(&adv_cudafe_key).unwrap()); assert_eq!(&3, info.stats.cache_hits.get_adv(&adv_ptx_key).unwrap()); assert_eq!(&5, info.stats.cache_hits.get_adv(&adv_cubin_key).unwrap()); assert_eq!(&4, info.stats.cache_misses.get_adv(&adv_cuda_key).unwrap()); assert_eq!( &4, info.stats.cache_misses.get_adv(&adv_cudafe_key).unwrap() ); assert_eq!(&5, info.stats.cache_misses.get_adv(&adv_ptx_key).unwrap()); assert_eq!(&2, info.stats.cache_misses.get_adv(&adv_cubin_key).unwrap()); }); } fn test_nvcc_proper_lang_stat_tracking(compiler: Compiler, tempdir: &Path) { let Compiler { name, exe, env_vars, } = compiler; zero_stats(); println!("test_nvcc_proper_lang_stat_tracking: {}", name); // Compile multiple source files. copy_to_tempdir(&[INPUT_FOR_CUDA_C, INPUT], tempdir); let out_file = tempdir.join(OUTPUT); trace!("compile CUDA A"); sccache_command() .args(compile_cmdline( name, &exe, INPUT_FOR_CUDA_C, OUTPUT, Vec::new(), )) .current_dir(tempdir) .envs(env_vars.clone()) .assert() .success(); fs::remove_file(&out_file).unwrap(); trace!("compile CUDA A"); sccache_command() .args(compile_cmdline( name, &exe, INPUT_FOR_CUDA_C, OUTPUT, Vec::new(), )) .current_dir(tempdir) .envs(env_vars.clone()) .assert() .success(); fs::remove_file(&out_file).unwrap(); trace!("compile C++ A"); sccache_command() .args(compile_cmdline(name, &exe, INPUT, OUTPUT, Vec::new())) .current_dir(tempdir) .envs(env_vars.clone()) .assert() .success(); fs::remove_file(&out_file).unwrap(); trace!("compile C++ A"); sccache_command() .args(compile_cmdline(name, &exe, INPUT, OUTPUT, Vec::new())) .current_dir(tempdir) .envs(env_vars) .assert() .success(); fs::remove_file(&out_file).unwrap(); trace!("request stats"); get_stats(|info| { assert_eq!(4, info.stats.compile_requests); assert_eq!(14, info.stats.requests_executed); assert_eq!(6, info.stats.cache_hits.all()); assert_eq!(4, info.stats.cache_misses.all()); assert!(info.stats.cache_hits.get("C/C++").is_none()); assert_eq!(&2, info.stats.cache_hits.get("CUDA").unwrap()); assert_eq!(&1, info.stats.cache_hits.get("CUDA (Device code)").unwrap()); assert_eq!(&2, info.stats.cache_hits.get("CUBIN").unwrap()); assert!(info.stats.cache_misses.get("C/C++").is_none()); assert_eq!(&2, info.stats.cache_misses.get("CUDA").unwrap()); assert_eq!( &1, info.stats.cache_misses.get("CUDA (Device code)").unwrap() ); assert_eq!(&1, info.stats.cache_misses.get("PTX").unwrap()); }); } fn run_sccache_nvcc_cuda_command_tests(compiler: Compiler, tempdir: &Path) { test_nvcc_cuda_compiles(&compiler, tempdir); test_nvcc_proper_lang_stat_tracking(compiler, tempdir); } fn test_clang_cuda_compiles(compiler: &Compiler, tempdir: &Path) { let Compiler { name, exe, env_vars, } = compiler; println!("test_clang_cuda_compiles: {}", name); // Compile multiple source files. copy_to_tempdir(&[INPUT_FOR_CUDA_A, INPUT_FOR_CUDA_B], tempdir); let out_file = tempdir.join(OUTPUT); trace!("compile A"); sccache_command() .args(compile_cuda_cmdline( name, exe, "-c", INPUT_FOR_CUDA_A, OUTPUT, Vec::new(), )) .current_dir(tempdir) .envs(env_vars.clone()) .assert() .success(); assert!(fs::metadata(&out_file).map(|m| m.len() > 0).unwrap()); trace!("request stats"); get_stats(|info| { assert_eq!(1, info.stats.compile_requests); assert_eq!(1, info.stats.requests_executed); assert_eq!(0, info.stats.cache_hits.all()); assert_eq!(1, info.stats.cache_misses.all()); assert_eq!(&1, info.stats.cache_misses.get("CUDA").unwrap()); let adv_cuda_key = adv_key_kind("cuda", compiler.name); assert_eq!(&1, info.stats.cache_misses.get_adv(&adv_cuda_key).unwrap()); }); trace!("compile A"); fs::remove_file(&out_file).unwrap(); sccache_command() .args(compile_cuda_cmdline( name, exe, "-c", INPUT_FOR_CUDA_A, OUTPUT, Vec::new(), )) .current_dir(tempdir) .envs(env_vars.clone()) .assert() .success(); assert!(fs::metadata(&out_file).map(|m| m.len() > 0).unwrap()); trace!("request stats"); get_stats(|info| { assert_eq!(2, info.stats.compile_requests); assert_eq!(2, info.stats.requests_executed); assert_eq!(1, info.stats.cache_hits.all()); assert_eq!(1, info.stats.cache_misses.all()); assert_eq!(&1, info.stats.cache_hits.get("CUDA").unwrap()); assert_eq!(&1, info.stats.cache_misses.get("CUDA").unwrap()); let adv_cuda_key = adv_key_kind("cuda", compiler.name); assert_eq!(&1, info.stats.cache_hits.get_adv(&adv_cuda_key).unwrap()); assert_eq!(&1, info.stats.cache_misses.get_adv(&adv_cuda_key).unwrap()); }); // By compiling another input source we verify that the pre-processor // phase is correctly running and outputting text trace!("compile B"); sccache_command() .args(compile_cuda_cmdline( name, exe, "-c", INPUT_FOR_CUDA_B, OUTPUT, Vec::new(), )) .current_dir(tempdir) .envs(env_vars.clone()) .assert() .success(); assert!(fs::metadata(&out_file).map(|m| m.len() > 0).unwrap()); trace!("request stats"); get_stats(|info| { assert_eq!(3, info.stats.compile_requests); assert_eq!(3, info.stats.requests_executed); assert_eq!(1, info.stats.cache_hits.all()); assert_eq!(2, info.stats.cache_misses.all()); assert_eq!(&1, info.stats.cache_hits.get("CUDA").unwrap()); assert_eq!(&2, info.stats.cache_misses.get("CUDA").unwrap()); let adv_cuda_key = adv_key_kind("cuda", compiler.name); assert_eq!(&1, info.stats.cache_hits.get_adv(&adv_cuda_key).unwrap()); assert_eq!(&2, info.stats.cache_misses.get_adv(&adv_cuda_key).unwrap()); }); } fn test_clang_proper_lang_stat_tracking(compiler: Compiler, tempdir: &Path) { let Compiler { name, exe, env_vars, } = compiler; zero_stats(); println!("test_clang_proper_lang_stat_tracking: {}", name); // Compile multiple source files. copy_to_tempdir(&[INPUT_FOR_CUDA_C, INPUT], tempdir); let out_file = tempdir.join(OUTPUT); trace!("compile CUDA A"); sccache_command() .args(compile_cuda_cmdline( name, &exe, "-c", INPUT_FOR_CUDA_C, OUTPUT, Vec::new(), )) .current_dir(tempdir) .envs(env_vars.clone()) .assert() .success(); fs::remove_file(&out_file).unwrap(); trace!("compile CUDA A"); sccache_command() .args(compile_cuda_cmdline( name, &exe, "-c", INPUT_FOR_CUDA_C, OUTPUT, Vec::new(), )) .current_dir(tempdir) .envs(env_vars.clone()) .assert() .success(); fs::remove_file(&out_file).unwrap(); trace!("compile C++ A"); sccache_command() .args(compile_cmdline(name, &exe, INPUT, OUTPUT, Vec::new())) .current_dir(tempdir) .envs(env_vars.clone()) .assert() .success(); fs::remove_file(&out_file).unwrap(); trace!("compile C++ A"); sccache_command() .args(compile_cmdline(name, &exe, INPUT, OUTPUT, Vec::new())) .current_dir(tempdir) .envs(env_vars) .assert() .success(); fs::remove_file(&out_file).unwrap(); trace!("request stats"); get_stats(|info| { assert_eq!(4, info.stats.compile_requests); assert_eq!(4, info.stats.requests_executed); assert_eq!(2, info.stats.cache_hits.all()); assert_eq!(2, info.stats.cache_misses.all()); assert_eq!(&1, info.stats.cache_hits.get("C/C++").unwrap()); assert_eq!(&1, info.stats.cache_misses.get("C/C++").unwrap()); assert_eq!(&1, info.stats.cache_hits.get("CUDA").unwrap()); assert_eq!(&1, info.stats.cache_misses.get("CUDA").unwrap()); }); } fn run_sccache_clang_cuda_command_tests(compiler: Compiler, tempdir: &Path) { test_clang_cuda_compiles(&compiler, tempdir); test_clang_proper_lang_stat_tracking(compiler, tempdir); } fn test_hip_compiles(compiler: &Compiler, tempdir: &Path) { let Compiler { name, exe, env_vars, } = compiler; println!("test_hip_compiles: {}", name); // Compile multiple source files. copy_to_tempdir(&[INPUT_FOR_HIP_A, INPUT_FOR_HIP_B], tempdir); let target_arch = vec!["gfx900".to_string()]; let out_file = tempdir.join(OUTPUT); trace!("compile A"); sccache_command() .args(compile_hip_cmdline( name, exe, INPUT_FOR_HIP_A, OUTPUT, &target_arch, Vec::new(), )) .current_dir(tempdir) .envs(env_vars.clone()) .assert() .success(); assert!(fs::metadata(&out_file).map(|m| m.len() > 0).unwrap()); trace!("request stats"); get_stats(|info| { assert_eq!(1, info.stats.compile_requests); assert_eq!(1, info.stats.requests_executed); assert_eq!(0, info.stats.cache_hits.all()); assert_eq!(1, info.stats.cache_misses.all()); assert_eq!(&1, info.stats.cache_misses.get("HIP").unwrap()); let adv_hip_key = adv_key_kind("hip", compiler.name); assert_eq!(&1, info.stats.cache_misses.get_adv(&adv_hip_key).unwrap()); }); trace!("compile A"); fs::remove_file(&out_file).unwrap(); sccache_command() .args(compile_hip_cmdline( name, exe, INPUT_FOR_HIP_A, OUTPUT, &target_arch, Vec::new(), )) .current_dir(tempdir) .envs(env_vars.clone()) .assert() .success(); assert!(fs::metadata(&out_file).map(|m| m.len() > 0).unwrap()); trace!("request stats"); get_stats(|info| { assert_eq!(2, info.stats.compile_requests); assert_eq!(2, info.stats.requests_executed); assert_eq!(1, info.stats.cache_hits.all()); assert_eq!(1, info.stats.cache_misses.all()); assert_eq!(&1, info.stats.cache_hits.get("HIP").unwrap()); assert_eq!(&1, info.stats.cache_misses.get("HIP").unwrap()); let adv_hip_key = adv_key_kind("hip", compiler.name); assert_eq!(&1, info.stats.cache_hits.get_adv(&adv_hip_key).unwrap()); assert_eq!(&1, info.stats.cache_misses.get_adv(&adv_hip_key).unwrap()); }); // By compiling another input source we verify that the pre-processor // phase is correctly running and outputting text trace!("compile B"); sccache_command() .args(compile_hip_cmdline( name, exe, INPUT_FOR_HIP_B, OUTPUT, &target_arch, Vec::new(), )) .current_dir(tempdir) .envs(env_vars.clone()) .assert() .success(); assert!(fs::metadata(&out_file).map(|m| m.len() > 0).unwrap()); trace!("request stats"); get_stats(|info| { assert_eq!(3, info.stats.compile_requests); assert_eq!(3, info.stats.requests_executed); assert_eq!(1, info.stats.cache_hits.all()); assert_eq!(2, info.stats.cache_misses.all()); assert_eq!(&1, info.stats.cache_hits.get("HIP").unwrap()); assert_eq!(&2, info.stats.cache_misses.get("HIP").unwrap()); let adv_hip_key = adv_key_kind("hip", compiler.name); assert_eq!(&1, info.stats.cache_hits.get_adv(&adv_hip_key).unwrap()); assert_eq!(&2, info.stats.cache_misses.get_adv(&adv_hip_key).unwrap()); }); } fn test_hip_compiles_multi_targets(compiler: &Compiler, tempdir: &Path) { let Compiler { name, exe, env_vars, } = compiler; println!("test_hip_compiles_multi_targets: {}", name); // Compile multiple source files. copy_to_tempdir(&[INPUT_FOR_HIP_A, INPUT_FOR_HIP_B], tempdir); let target_arches: Vec = vec!["gfx900".to_string(), "gfx1030".to_string()]; let out_file = tempdir.join(OUTPUT); trace!("compile A with gfx900 and gfx1030"); sccache_command() .args(compile_hip_cmdline( name, exe, INPUT_FOR_HIP_A, OUTPUT, &target_arches, Vec::new(), )) .current_dir(tempdir) .envs(env_vars.clone()) .assert() .success(); assert!(fs::metadata(&out_file).map(|m| m.len() > 0).unwrap()); trace!("request stats"); get_stats(|info| { assert_eq!(1, info.stats.compile_requests); assert_eq!(1, info.stats.requests_executed); assert_eq!(0, info.stats.cache_hits.all()); assert_eq!(1, info.stats.cache_misses.all()); assert_eq!(&1, info.stats.cache_misses.get("HIP").unwrap()); let adv_hip_key = adv_key_kind("hip", compiler.name); assert_eq!(&1, info.stats.cache_misses.get_adv(&adv_hip_key).unwrap()); }); trace!("compile A with with gfx900 and gfx1030 again"); fs::remove_file(&out_file).unwrap(); sccache_command() .args(compile_hip_cmdline( name, exe, INPUT_FOR_HIP_A, OUTPUT, &target_arches, Vec::new(), )) .current_dir(tempdir) .envs(env_vars.clone()) .assert() .success(); assert!(fs::metadata(&out_file).map(|m| m.len() > 0).unwrap()); trace!("request stats"); get_stats(|info| { assert_eq!(2, info.stats.compile_requests); assert_eq!(2, info.stats.requests_executed); assert_eq!(1, info.stats.cache_hits.all()); assert_eq!(1, info.stats.cache_misses.all()); assert_eq!(&1, info.stats.cache_hits.get("HIP").unwrap()); assert_eq!(&1, info.stats.cache_misses.get("HIP").unwrap()); let adv_hip_key = adv_key_kind("hip", compiler.name); assert_eq!(&1, info.stats.cache_hits.get_adv(&adv_hip_key).unwrap()); assert_eq!(&1, info.stats.cache_misses.get_adv(&adv_hip_key).unwrap()); }); // By compiling another input source we verify that the pre-processor // phase is correctly running and outputting text trace!("compile B with gfx900 and gfx1030"); sccache_command() .args(compile_hip_cmdline( name, exe, INPUT_FOR_HIP_B, OUTPUT, &target_arches, Vec::new(), )) .current_dir(tempdir) .envs(env_vars.clone()) .assert() .success(); assert!(fs::metadata(&out_file).map(|m| m.len() > 0).unwrap()); trace!("request stats"); get_stats(|info| { assert_eq!(3, info.stats.compile_requests); assert_eq!(3, info.stats.requests_executed); assert_eq!(1, info.stats.cache_hits.all()); assert_eq!(2, info.stats.cache_misses.all()); assert_eq!(&1, info.stats.cache_hits.get("HIP").unwrap()); assert_eq!(&2, info.stats.cache_misses.get("HIP").unwrap()); let adv_hip_key = adv_key_kind("hip", compiler.name); assert_eq!(&1, info.stats.cache_hits.get_adv(&adv_hip_key).unwrap()); assert_eq!(&2, info.stats.cache_misses.get_adv(&adv_hip_key).unwrap()); }); } fn run_sccache_hip_command_tests(compiler: Compiler, tempdir: &Path) { zero_stats(); test_hip_compiles(&compiler, tempdir); zero_stats(); test_hip_compiles_multi_targets(&compiler, tempdir); // test_proper_lang_stat_tracking(compiler, tempdir); } fn test_clang_multicall(compiler: Compiler, tempdir: &Path) { let Compiler { name, exe, env_vars, } = compiler; println!("test_clang_multicall: {}", name); // Compile a source file. copy_to_tempdir(&[INPUT_CLANG_MULTICALL], tempdir); println!("compile clang_multicall"); sccache_command() .args(compile_cmdline( name, exe, INPUT_CLANG_MULTICALL, OUTPUT, Vec::new(), )) .current_dir(tempdir) .envs(env_vars) .assert() .success(); } fn test_clang_cache_whitespace_normalization( compiler: Compiler, tempdir: &Path, hit: bool, preprocessor_cache_mode: bool, ) { let Compiler { name, exe, env_vars, } = compiler; println!("test_clang_cache_whitespace_normalization: {}", name); debug!("expecting hit: {}", hit); // Compile a source file. copy_to_tempdir(&[INPUT_WITH_WHITESPACE, INPUT_WITH_WHITESPACE_ALT], tempdir); zero_stats(); debug!("compile whitespace"); sccache_command() .args(compile_cmdline( name, &exe, INPUT_WITH_WHITESPACE, OUTPUT, Vec::new(), )) .current_dir(tempdir) .envs(env_vars.clone()) .assert() .success(); debug!("request stats"); get_stats(|info| { assert_eq!(1, info.stats.compile_requests); assert_eq!(1, info.stats.requests_executed); assert_eq!(0, info.stats.cache_hits.all()); assert_eq!(1, info.stats.cache_misses.all()); }); debug!("compile whitespace_alt"); sccache_command() .args(compile_cmdline( name, &exe, INPUT_WITH_WHITESPACE_ALT, OUTPUT, Vec::new(), )) .current_dir(tempdir) .envs(env_vars) .assert() .success(); debug!("request stats (expecting cache hit)"); if hit { get_stats(move |info| { assert_eq!(2, info.stats.compile_requests); assert_eq!(2, info.stats.requests_executed); if preprocessor_cache_mode { // Preprocessor cache mode hashes the input file, so whitespace // normalization does not work. assert_eq!(0, info.stats.cache_hits.all()); assert_eq!(2, info.stats.cache_misses.all()); } else { assert_eq!(1, info.stats.cache_hits.all()); assert_eq!(1, info.stats.cache_misses.all()); } }); } else { get_stats(|info| { assert_eq!(2, info.stats.compile_requests); assert_eq!(2, info.stats.requests_executed); assert_eq!(0, info.stats.cache_hits.all()); assert_eq!(2, info.stats.cache_misses.all()); }); } } #[cfg(unix)] fn find_compilers() -> Vec { let cwd = env::current_dir().unwrap(); COMPILERS .iter() .filter_map(|c| { which_in(c, env::var_os("PATH"), &cwd) .ok() .map(|full_path| Compiler { name: c, exe: full_path.into(), env_vars: vec![], }) }) .collect::>() } #[cfg(target_env = "msvc")] fn find_compilers() -> Vec { let tool = cc::Build::new() .opt_level(1) .host("x86_64-pc-windows-msvc") .target("x86_64-pc-windows-msvc") .debug(false) .get_compiler(); vec![Compiler { name: "cl.exe", exe: tool.path().as_os_str().to_os_string(), env_vars: tool.env().to_vec(), }] } fn find_cuda_compilers() -> Vec { let cwd = env::current_dir().unwrap(); // CUDA compilers like clang don't come with all of the components for compilation. // To consider a machine to have any cuda compilers we rely on the existence of `nvcc` let compilers = match which("nvcc") { Ok(_) => CUDA_COMPILERS .iter() .filter_map(|c| { which_in(c, env::var_os("PATH"), &cwd) .ok() .map(|full_path| Compiler { name: c, exe: full_path.into(), env_vars: vec![], }) }) .collect::>(), Err(_) => { eprintln!( "unable to find `nvcc` in PATH={:?}", env::var_os("PATH").unwrap_or_default() ); vec![] } }; compilers } // We detect the HIP Clang compiler through 2 methods: // 1. If the env var HIP_CLANG_PATH is set, try $HIP_CLANG_PATH/clang. This is the same behavior as // hipcc, but is rarely know, so we have another option. // 2. If the env var ROCM_PATH is set, try $ROCM_PATH/llvm/bin/clang. This is the location in // AMD's official debian packages. // 3. Otherwise, just bail. fn find_hip_compiler() -> Option { let env_vars: Vec<(OsString, OsString)> = env::vars_os().collect(); if let Ok(hip_clang_path) = env::var("HIP_CLANG_PATH") { let clang_path = Path::new(&hip_clang_path).join("clang"); if let Ok(true) = clang_path.try_exists() { return Some(Compiler { name: "clang", exe: clang_path.into_os_string(), env_vars, }); } } if let Ok(rocm_path) = env::var("ROCM_PATH") { let clang_path = Path::new(&rocm_path).join("llvm").join("bin").join("clang"); if let Ok(true) = clang_path.try_exists() { return Some(Compiler { name: "hip", exe: clang_path.into_os_string(), env_vars, }); } } None } // TODO: This runs multiple test cases, for multiple compilers. It should be // split up to run them individually. In the current form, it is hard to see // which sub test cases are executed, and if one fails, the remaining tests // are not run. #[test_case(true ; "with preprocessor cache")] #[test_case(false ; "without preprocessor cache")] #[serial] #[cfg(any(unix, target_env = "msvc"))] fn test_sccache_command(preprocessor_cache_mode: bool) { let _ = env_logger::try_init(); let tempdir = tempfile::Builder::new() .prefix("sccache_system_test") .tempdir() .unwrap(); let compilers = find_compilers(); if compilers.is_empty() { warn!("No compilers found, skipping test"); } else { // Ensure there's no existing sccache server running. stop_local_daemon(); // Create the configurations let sccache_cfg = sccache_client_cfg(tempdir.path(), preprocessor_cache_mode); write_json_cfg(tempdir.path(), "sccache-cfg.json", &sccache_cfg); let sccache_cached_cfg_path = tempdir.path().join("sccache-cached-cfg"); // Start a server. trace!("start server"); start_local_daemon( &tempdir.path().join("sccache-cfg.json"), &sccache_cached_cfg_path, ); for compiler in compilers { run_sccache_command_tests(compiler, tempdir.path(), preprocessor_cache_mode); zero_stats(); } stop_local_daemon(); } } #[test] #[serial] fn test_stats_no_server() { // Ensure there's no existing sccache server running. stop_local_daemon(); get_stats(|_| {}); assert!( !stop_local_daemon(), "Server shouldn't be running after --show-stats" ); } #[test_case(true ; "with preprocessor cache")] #[test_case(false ; "without preprocessor cache")] #[serial] #[cfg(any(unix, target_env = "msvc"))] fn test_cuda_sccache_command(preprocessor_cache_mode: bool) { let _ = env_logger::try_init(); let tempdir = tempfile::Builder::new() .prefix("sccache_system_test") .tempdir() .unwrap(); let compilers = find_cuda_compilers(); println!( "CUDA compilers: {:?}", compilers .iter() .map(|c| c.exe.to_string_lossy()) .collect::>() ); if compilers.is_empty() { warn!("No compilers found, skipping test"); } else { // Ensure there's no existing sccache server running. stop_local_daemon(); // Create the configurations let sccache_cfg = sccache_client_cfg(tempdir.path(), preprocessor_cache_mode); write_json_cfg(tempdir.path(), "sccache-cfg.json", &sccache_cfg); let sccache_cached_cfg_path = tempdir.path().join("sccache-cached-cfg"); // Start a server. trace!("start server"); start_local_daemon( &tempdir.path().join("sccache-cfg.json"), &sccache_cached_cfg_path, ); for compiler in compilers { match compiler.name { "nvcc" => run_sccache_nvcc_cuda_command_tests(compiler, tempdir.path()), "clang++" => run_sccache_clang_cuda_command_tests(compiler, tempdir.path()), _ => {} } zero_stats(); } stop_local_daemon(); } } #[test_case(true ; "with preprocessor cache")] #[test_case(false ; "without preprocessor cache")] #[serial] #[cfg(any(unix, target_env = "msvc"))] fn test_hip_sccache_command(preprocessor_cache_mode: bool) { let _ = env_logger::try_init(); let tempdir = tempfile::Builder::new() .prefix("sccache_system_test") .tempdir() .unwrap(); if let Some(compiler) = find_hip_compiler() { stop_local_daemon(); // Create the configurations let sccache_cfg = sccache_client_cfg(tempdir.path(), preprocessor_cache_mode); write_json_cfg(tempdir.path(), "sccache-cfg.json", &sccache_cfg); let sccache_cached_cfg_path = tempdir.path().join("sccache-cached-cfg"); // Start a server. trace!("start server"); start_local_daemon( &tempdir.path().join("sccache-cfg.json"), &sccache_cached_cfg_path, ); run_sccache_hip_command_tests(compiler, tempdir.path()); zero_stats(); stop_local_daemon(); } } mozilla-sccache-40c3d6b/tests/test-crate/000077500000000000000000000000001475712407500204105ustar00rootroot00000000000000mozilla-sccache-40c3d6b/tests/test-crate/Cargo.lock000066400000000000000000000005621475712407500223200ustar00rootroot00000000000000# This file is automatically @generated by Cargo. # It is not intended for manual editing. version = 3 [[package]] name = "itoa" version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8324a32baf01e2ae060e9de58ed0bc2320c9a2833491ee36cd3b4c414de4db8c" [[package]] name = "test-crate" version = "0.1.0" dependencies = [ "itoa", ] mozilla-sccache-40c3d6b/tests/test-crate/Cargo.toml000066400000000000000000000004461475712407500223440ustar00rootroot00000000000000[package] authors = ["Ted Mielczarek "] name = "test-crate" version = "0.1.0" [dependencies] # Arbitrary crate dependency that doesn't pull in any transitive dependencies. itoa = "0.3.4" [lib] name = "mylib" path = "src/lib.rs" [[bin]] name = "mybin" path = "src/bin.rs" mozilla-sccache-40c3d6b/tests/test-crate/src/000077500000000000000000000000001475712407500211775ustar00rootroot00000000000000mozilla-sccache-40c3d6b/tests/test-crate/src/bin.rs000066400000000000000000000000761475712407500223200ustar00rootroot00000000000000extern crate mylib; fn main() { mylib::env_dep_test(); } mozilla-sccache-40c3d6b/tests/test-crate/src/lib.rs000066400000000000000000000002751475712407500223170ustar00rootroot00000000000000fn unused() {} pub fn env_dep_test() { println!("Env var: {}", env!("TEST_ENV_VAR")); } #[cfg(test)] mod tests { #[test] fn it_works() { assert_eq!(2 + 2, 4); } } mozilla-sccache-40c3d6b/tests/test.c000066400000000000000000000000761475712407500174630ustar00rootroot00000000000000#include void foo() { printf("hello world\n"); } mozilla-sccache-40c3d6b/tests/test.c.gcc-13.2.0-preproc000066400000000000000000000430571475712407500224130ustar00rootroot00000000000000# 0 "tests/test.c" # 0 "" # 0 "" # 1 "/usr/include/stdc-predef.h" 1 3 4 # 0 "" 2 # 1 "tests/test.c" # 1 "/usr/include/stdio.h" 1 3 4 # 27 "/usr/include/stdio.h" 3 4 # 1 "/usr/include/x86_64-linux-gnu/bits/libc-header-start.h" 1 3 4 # 33 "/usr/include/x86_64-linux-gnu/bits/libc-header-start.h" 3 4 # 1 "/usr/include/features.h" 1 3 4 # 393 "/usr/include/features.h" 3 4 # 1 "/usr/include/features-time64.h" 1 3 4 # 20 "/usr/include/features-time64.h" 3 4 # 1 "/usr/include/x86_64-linux-gnu/bits/wordsize.h" 1 3 4 # 21 "/usr/include/features-time64.h" 2 3 4 # 1 "/usr/include/x86_64-linux-gnu/bits/timesize.h" 1 3 4 # 19 "/usr/include/x86_64-linux-gnu/bits/timesize.h" 3 4 # 1 "/usr/include/x86_64-linux-gnu/bits/wordsize.h" 1 3 4 # 20 "/usr/include/x86_64-linux-gnu/bits/timesize.h" 2 3 4 # 22 "/usr/include/features-time64.h" 2 3 4 # 394 "/usr/include/features.h" 2 3 4 # 490 "/usr/include/features.h" 3 4 # 1 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" 1 3 4 # 561 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" 3 4 # 1 "/usr/include/x86_64-linux-gnu/bits/wordsize.h" 1 3 4 # 562 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" 2 3 4 # 1 "/usr/include/x86_64-linux-gnu/bits/long-double.h" 1 3 4 # 563 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" 2 3 4 # 491 "/usr/include/features.h" 2 3 4 # 514 "/usr/include/features.h" 3 4 # 1 "/usr/include/x86_64-linux-gnu/gnu/stubs.h" 1 3 4 # 10 "/usr/include/x86_64-linux-gnu/gnu/stubs.h" 3 4 # 1 "/usr/include/x86_64-linux-gnu/gnu/stubs-64.h" 1 3 4 # 11 "/usr/include/x86_64-linux-gnu/gnu/stubs.h" 2 3 4 # 515 "/usr/include/features.h" 2 3 4 # 34 "/usr/include/x86_64-linux-gnu/bits/libc-header-start.h" 2 3 4 # 28 "/usr/include/stdio.h" 2 3 4 # 1 "/usr/lib/gcc/x86_64-linux-gnu/13/include/stddef.h" 1 3 4 # 214 "/usr/lib/gcc/x86_64-linux-gnu/13/include/stddef.h" 3 4 # 214 "/usr/lib/gcc/x86_64-linux-gnu/13/include/stddef.h" 3 4 typedef long unsigned int size_t; # 34 "/usr/include/stdio.h" 2 3 4 # 1 "/usr/lib/gcc/x86_64-linux-gnu/13/include/stdarg.h" 1 3 4 # 40 "/usr/lib/gcc/x86_64-linux-gnu/13/include/stdarg.h" 3 4 typedef __builtin_va_list __gnuc_va_list; # 37 "/usr/include/stdio.h" 2 3 4 # 1 "/usr/include/x86_64-linux-gnu/bits/types.h" 1 3 4 # 27 "/usr/include/x86_64-linux-gnu/bits/types.h" 3 4 # 1 "/usr/include/x86_64-linux-gnu/bits/wordsize.h" 1 3 4 # 28 "/usr/include/x86_64-linux-gnu/bits/types.h" 2 3 4 # 1 "/usr/include/x86_64-linux-gnu/bits/timesize.h" 1 3 4 # 19 "/usr/include/x86_64-linux-gnu/bits/timesize.h" 3 4 # 1 "/usr/include/x86_64-linux-gnu/bits/wordsize.h" 1 3 4 # 20 "/usr/include/x86_64-linux-gnu/bits/timesize.h" 2 3 4 # 29 "/usr/include/x86_64-linux-gnu/bits/types.h" 2 3 4 typedef unsigned char __u_char; typedef unsigned short int __u_short; typedef unsigned int __u_int; typedef unsigned long int __u_long; typedef signed char __int8_t; typedef unsigned char __uint8_t; typedef signed short int __int16_t; typedef unsigned short int __uint16_t; typedef signed int __int32_t; typedef unsigned int __uint32_t; typedef signed long int __int64_t; typedef unsigned long int __uint64_t; typedef __int8_t __int_least8_t; typedef __uint8_t __uint_least8_t; typedef __int16_t __int_least16_t; typedef __uint16_t __uint_least16_t; typedef __int32_t __int_least32_t; typedef __uint32_t __uint_least32_t; typedef __int64_t __int_least64_t; typedef __uint64_t __uint_least64_t; typedef long int __quad_t; typedef unsigned long int __u_quad_t; typedef long int __intmax_t; typedef unsigned long int __uintmax_t; # 141 "/usr/include/x86_64-linux-gnu/bits/types.h" 3 4 # 1 "/usr/include/x86_64-linux-gnu/bits/typesizes.h" 1 3 4 # 142 "/usr/include/x86_64-linux-gnu/bits/types.h" 2 3 4 # 1 "/usr/include/x86_64-linux-gnu/bits/time64.h" 1 3 4 # 143 "/usr/include/x86_64-linux-gnu/bits/types.h" 2 3 4 typedef unsigned long int __dev_t; typedef unsigned int __uid_t; typedef unsigned int __gid_t; typedef unsigned long int __ino_t; typedef unsigned long int __ino64_t; typedef unsigned int __mode_t; typedef unsigned long int __nlink_t; typedef long int __off_t; typedef long int __off64_t; typedef int __pid_t; typedef struct { int __val[2]; } __fsid_t; typedef long int __clock_t; typedef unsigned long int __rlim_t; typedef unsigned long int __rlim64_t; typedef unsigned int __id_t; typedef long int __time_t; typedef unsigned int __useconds_t; typedef long int __suseconds_t; typedef long int __suseconds64_t; typedef int __daddr_t; typedef int __key_t; typedef int __clockid_t; typedef void * __timer_t; typedef long int __blksize_t; typedef long int __blkcnt_t; typedef long int __blkcnt64_t; typedef unsigned long int __fsblkcnt_t; typedef unsigned long int __fsblkcnt64_t; typedef unsigned long int __fsfilcnt_t; typedef unsigned long int __fsfilcnt64_t; typedef long int __fsword_t; typedef long int __ssize_t; typedef long int __syscall_slong_t; typedef unsigned long int __syscall_ulong_t; typedef __off64_t __loff_t; typedef char *__caddr_t; typedef long int __intptr_t; typedef unsigned int __socklen_t; typedef int __sig_atomic_t; # 39 "/usr/include/stdio.h" 2 3 4 # 1 "/usr/include/x86_64-linux-gnu/bits/types/__fpos_t.h" 1 3 4 # 1 "/usr/include/x86_64-linux-gnu/bits/types/__mbstate_t.h" 1 3 4 # 13 "/usr/include/x86_64-linux-gnu/bits/types/__mbstate_t.h" 3 4 typedef struct { int __count; union { unsigned int __wch; char __wchb[4]; } __value; } __mbstate_t; # 6 "/usr/include/x86_64-linux-gnu/bits/types/__fpos_t.h" 2 3 4 typedef struct _G_fpos_t { __off_t __pos; __mbstate_t __state; } __fpos_t; # 40 "/usr/include/stdio.h" 2 3 4 # 1 "/usr/include/x86_64-linux-gnu/bits/types/__fpos64_t.h" 1 3 4 # 10 "/usr/include/x86_64-linux-gnu/bits/types/__fpos64_t.h" 3 4 typedef struct _G_fpos64_t { __off64_t __pos; __mbstate_t __state; } __fpos64_t; # 41 "/usr/include/stdio.h" 2 3 4 # 1 "/usr/include/x86_64-linux-gnu/bits/types/__FILE.h" 1 3 4 struct _IO_FILE; typedef struct _IO_FILE __FILE; # 42 "/usr/include/stdio.h" 2 3 4 # 1 "/usr/include/x86_64-linux-gnu/bits/types/FILE.h" 1 3 4 struct _IO_FILE; typedef struct _IO_FILE FILE; # 43 "/usr/include/stdio.h" 2 3 4 # 1 "/usr/include/x86_64-linux-gnu/bits/types/struct_FILE.h" 1 3 4 # 35 "/usr/include/x86_64-linux-gnu/bits/types/struct_FILE.h" 3 4 struct _IO_FILE; struct _IO_marker; struct _IO_codecvt; struct _IO_wide_data; typedef void _IO_lock_t; struct _IO_FILE { int _flags; char *_IO_read_ptr; char *_IO_read_end; char *_IO_read_base; char *_IO_write_base; char *_IO_write_ptr; char *_IO_write_end; char *_IO_buf_base; char *_IO_buf_end; char *_IO_save_base; char *_IO_backup_base; char *_IO_save_end; struct _IO_marker *_markers; struct _IO_FILE *_chain; int _fileno; int _flags2; __off_t _old_offset; unsigned short _cur_column; signed char _vtable_offset; char _shortbuf[1]; _IO_lock_t *_lock; __off64_t _offset; struct _IO_codecvt *_codecvt; struct _IO_wide_data *_wide_data; struct _IO_FILE *_freeres_list; void *_freeres_buf; size_t __pad5; int _mode; char _unused2[15 * sizeof (int) - 4 * sizeof (void *) - sizeof (size_t)]; }; # 44 "/usr/include/stdio.h" 2 3 4 # 52 "/usr/include/stdio.h" 3 4 typedef __gnuc_va_list va_list; # 63 "/usr/include/stdio.h" 3 4 typedef __off_t off_t; # 77 "/usr/include/stdio.h" 3 4 typedef __ssize_t ssize_t; typedef __fpos_t fpos_t; # 133 "/usr/include/stdio.h" 3 4 # 1 "/usr/include/x86_64-linux-gnu/bits/stdio_lim.h" 1 3 4 # 134 "/usr/include/stdio.h" 2 3 4 # 143 "/usr/include/stdio.h" 3 4 extern FILE *stdin; extern FILE *stdout; extern FILE *stderr; extern int remove (const char *__filename) __attribute__ ((__nothrow__ , __leaf__)); extern int rename (const char *__old, const char *__new) __attribute__ ((__nothrow__ , __leaf__)); extern int renameat (int __oldfd, const char *__old, int __newfd, const char *__new) __attribute__ ((__nothrow__ , __leaf__)); # 178 "/usr/include/stdio.h" 3 4 extern int fclose (FILE *__stream); # 188 "/usr/include/stdio.h" 3 4 extern FILE *tmpfile (void) __attribute__ ((__malloc__)) __attribute__ ((__malloc__ (fclose, 1))) ; # 205 "/usr/include/stdio.h" 3 4 extern char *tmpnam (char[20]) __attribute__ ((__nothrow__ , __leaf__)) ; extern char *tmpnam_r (char __s[20]) __attribute__ ((__nothrow__ , __leaf__)) ; # 222 "/usr/include/stdio.h" 3 4 extern char *tempnam (const char *__dir, const char *__pfx) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__malloc__)) __attribute__ ((__malloc__ (__builtin_free, 1))); extern int fflush (FILE *__stream); # 239 "/usr/include/stdio.h" 3 4 extern int fflush_unlocked (FILE *__stream); # 258 "/usr/include/stdio.h" 3 4 extern FILE *fopen (const char *__restrict __filename, const char *__restrict __modes) __attribute__ ((__malloc__)) __attribute__ ((__malloc__ (fclose, 1))) ; extern FILE *freopen (const char *__restrict __filename, const char *__restrict __modes, FILE *__restrict __stream) ; # 293 "/usr/include/stdio.h" 3 4 extern FILE *fdopen (int __fd, const char *__modes) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__malloc__)) __attribute__ ((__malloc__ (fclose, 1))) ; # 308 "/usr/include/stdio.h" 3 4 extern FILE *fmemopen (void *__s, size_t __len, const char *__modes) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__malloc__)) __attribute__ ((__malloc__ (fclose, 1))) ; extern FILE *open_memstream (char **__bufloc, size_t *__sizeloc) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__malloc__)) __attribute__ ((__malloc__ (fclose, 1))) ; # 328 "/usr/include/stdio.h" 3 4 extern void setbuf (FILE *__restrict __stream, char *__restrict __buf) __attribute__ ((__nothrow__ , __leaf__)); extern int setvbuf (FILE *__restrict __stream, char *__restrict __buf, int __modes, size_t __n) __attribute__ ((__nothrow__ , __leaf__)); extern void setbuffer (FILE *__restrict __stream, char *__restrict __buf, size_t __size) __attribute__ ((__nothrow__ , __leaf__)); extern void setlinebuf (FILE *__stream) __attribute__ ((__nothrow__ , __leaf__)); extern int fprintf (FILE *__restrict __stream, const char *__restrict __format, ...); extern int printf (const char *__restrict __format, ...); extern int sprintf (char *__restrict __s, const char *__restrict __format, ...) __attribute__ ((__nothrow__)); extern int vfprintf (FILE *__restrict __s, const char *__restrict __format, __gnuc_va_list __arg); extern int vprintf (const char *__restrict __format, __gnuc_va_list __arg); extern int vsprintf (char *__restrict __s, const char *__restrict __format, __gnuc_va_list __arg) __attribute__ ((__nothrow__)); extern int snprintf (char *__restrict __s, size_t __maxlen, const char *__restrict __format, ...) __attribute__ ((__nothrow__)) __attribute__ ((__format__ (__printf__, 3, 4))); extern int vsnprintf (char *__restrict __s, size_t __maxlen, const char *__restrict __format, __gnuc_va_list __arg) __attribute__ ((__nothrow__)) __attribute__ ((__format__ (__printf__, 3, 0))); # 403 "/usr/include/stdio.h" 3 4 extern int vdprintf (int __fd, const char *__restrict __fmt, __gnuc_va_list __arg) __attribute__ ((__format__ (__printf__, 2, 0))); extern int dprintf (int __fd, const char *__restrict __fmt, ...) __attribute__ ((__format__ (__printf__, 2, 3))); extern int fscanf (FILE *__restrict __stream, const char *__restrict __format, ...) ; extern int scanf (const char *__restrict __format, ...) ; extern int sscanf (const char *__restrict __s, const char *__restrict __format, ...) __attribute__ ((__nothrow__ , __leaf__)); # 1 "/usr/include/x86_64-linux-gnu/bits/floatn.h" 1 3 4 # 120 "/usr/include/x86_64-linux-gnu/bits/floatn.h" 3 4 # 1 "/usr/include/x86_64-linux-gnu/bits/floatn-common.h" 1 3 4 # 24 "/usr/include/x86_64-linux-gnu/bits/floatn-common.h" 3 4 # 1 "/usr/include/x86_64-linux-gnu/bits/long-double.h" 1 3 4 # 25 "/usr/include/x86_64-linux-gnu/bits/floatn-common.h" 2 3 4 # 121 "/usr/include/x86_64-linux-gnu/bits/floatn.h" 2 3 4 # 431 "/usr/include/stdio.h" 2 3 4 extern int fscanf (FILE *__restrict __stream, const char *__restrict __format, ...) __asm__ ("" "__isoc99_fscanf") ; extern int scanf (const char *__restrict __format, ...) __asm__ ("" "__isoc99_scanf") ; extern int sscanf (const char *__restrict __s, const char *__restrict __format, ...) __asm__ ("" "__isoc99_sscanf") __attribute__ ((__nothrow__ , __leaf__)) ; # 459 "/usr/include/stdio.h" 3 4 extern int vfscanf (FILE *__restrict __s, const char *__restrict __format, __gnuc_va_list __arg) __attribute__ ((__format__ (__scanf__, 2, 0))) ; extern int vscanf (const char *__restrict __format, __gnuc_va_list __arg) __attribute__ ((__format__ (__scanf__, 1, 0))) ; extern int vsscanf (const char *__restrict __s, const char *__restrict __format, __gnuc_va_list __arg) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__format__ (__scanf__, 2, 0))); extern int vfscanf (FILE *__restrict __s, const char *__restrict __format, __gnuc_va_list __arg) __asm__ ("" "__isoc99_vfscanf") __attribute__ ((__format__ (__scanf__, 2, 0))) ; extern int vscanf (const char *__restrict __format, __gnuc_va_list __arg) __asm__ ("" "__isoc99_vscanf") __attribute__ ((__format__ (__scanf__, 1, 0))) ; extern int vsscanf (const char *__restrict __s, const char *__restrict __format, __gnuc_va_list __arg) __asm__ ("" "__isoc99_vsscanf") __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__format__ (__scanf__, 2, 0))); # 513 "/usr/include/stdio.h" 3 4 extern int fgetc (FILE *__stream); extern int getc (FILE *__stream); extern int getchar (void); extern int getc_unlocked (FILE *__stream); extern int getchar_unlocked (void); # 538 "/usr/include/stdio.h" 3 4 extern int fgetc_unlocked (FILE *__stream); # 549 "/usr/include/stdio.h" 3 4 extern int fputc (int __c, FILE *__stream); extern int putc (int __c, FILE *__stream); extern int putchar (int __c); # 565 "/usr/include/stdio.h" 3 4 extern int fputc_unlocked (int __c, FILE *__stream); extern int putc_unlocked (int __c, FILE *__stream); extern int putchar_unlocked (int __c); extern int getw (FILE *__stream); extern int putw (int __w, FILE *__stream); extern char *fgets (char *__restrict __s, int __n, FILE *__restrict __stream) __attribute__ ((__access__ (__write_only__, 1, 2))); # 632 "/usr/include/stdio.h" 3 4 extern __ssize_t __getdelim (char **__restrict __lineptr, size_t *__restrict __n, int __delimiter, FILE *__restrict __stream) ; extern __ssize_t getdelim (char **__restrict __lineptr, size_t *__restrict __n, int __delimiter, FILE *__restrict __stream) ; extern __ssize_t getline (char **__restrict __lineptr, size_t *__restrict __n, FILE *__restrict __stream) ; extern int fputs (const char *__restrict __s, FILE *__restrict __stream); extern int puts (const char *__s); extern int ungetc (int __c, FILE *__stream); extern size_t fread (void *__restrict __ptr, size_t __size, size_t __n, FILE *__restrict __stream) ; extern size_t fwrite (const void *__restrict __ptr, size_t __size, size_t __n, FILE *__restrict __s); # 702 "/usr/include/stdio.h" 3 4 extern size_t fread_unlocked (void *__restrict __ptr, size_t __size, size_t __n, FILE *__restrict __stream) ; extern size_t fwrite_unlocked (const void *__restrict __ptr, size_t __size, size_t __n, FILE *__restrict __stream); extern int fseek (FILE *__stream, long int __off, int __whence); extern long int ftell (FILE *__stream) ; extern void rewind (FILE *__stream); # 736 "/usr/include/stdio.h" 3 4 extern int fseeko (FILE *__stream, __off_t __off, int __whence); extern __off_t ftello (FILE *__stream) ; # 760 "/usr/include/stdio.h" 3 4 extern int fgetpos (FILE *__restrict __stream, fpos_t *__restrict __pos); extern int fsetpos (FILE *__stream, const fpos_t *__pos); # 786 "/usr/include/stdio.h" 3 4 extern void clearerr (FILE *__stream) __attribute__ ((__nothrow__ , __leaf__)); extern int feof (FILE *__stream) __attribute__ ((__nothrow__ , __leaf__)) ; extern int ferror (FILE *__stream) __attribute__ ((__nothrow__ , __leaf__)) ; extern void clearerr_unlocked (FILE *__stream) __attribute__ ((__nothrow__ , __leaf__)); extern int feof_unlocked (FILE *__stream) __attribute__ ((__nothrow__ , __leaf__)) ; extern int ferror_unlocked (FILE *__stream) __attribute__ ((__nothrow__ , __leaf__)) ; extern void perror (const char *__s); extern int fileno (FILE *__stream) __attribute__ ((__nothrow__ , __leaf__)) ; extern int fileno_unlocked (FILE *__stream) __attribute__ ((__nothrow__ , __leaf__)) ; # 823 "/usr/include/stdio.h" 3 4 extern int pclose (FILE *__stream); extern FILE *popen (const char *__command, const char *__modes) __attribute__ ((__malloc__)) __attribute__ ((__malloc__ (pclose, 1))) ; extern char *ctermid (char *__s) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__access__ (__write_only__, 1))); # 867 "/usr/include/stdio.h" 3 4 extern void flockfile (FILE *__stream) __attribute__ ((__nothrow__ , __leaf__)); extern int ftrylockfile (FILE *__stream) __attribute__ ((__nothrow__ , __leaf__)) ; extern void funlockfile (FILE *__stream) __attribute__ ((__nothrow__ , __leaf__)); # 885 "/usr/include/stdio.h" 3 4 extern int __uflow (FILE *); extern int __overflow (FILE *, int); # 909 "/usr/include/stdio.h" 3 4 # 2 "tests/test.c" 2 # 3 "tests/test.c" void foo() { printf("hello world\n"); } mozilla-sccache-40c3d6b/tests/test_a.cu000066400000000000000000000003141475712407500201430ustar00rootroot00000000000000 #include #include "cuda_runtime.h" __global__ void cuda_entry_point(int*, int*) {} __device__ void cuda_device_func(int*, int*) {} int main() { printf("%s says hello world\n", __FILE__); } mozilla-sccache-40c3d6b/tests/test_a.hip000066400000000000000000000003171475712407500203170ustar00rootroot00000000000000 #include #include __global__ void cuda_entry_point(int*, int*) {} __device__ void cuda_device_func(int*, int*) {} int main() { printf("%s says hello world\n", __FILE__); } mozilla-sccache-40c3d6b/tests/test_b.cu000066400000000000000000000003141475712407500201440ustar00rootroot00000000000000 #include #include "cuda_runtime.h" __global__ void cuda_entry_point(int*, int*) {} __device__ void cuda_device_func(int*, int*) {} int main() { printf("%s says hello world\n", __FILE__); } mozilla-sccache-40c3d6b/tests/test_b.hip000066400000000000000000000003171475712407500203200ustar00rootroot00000000000000 #include #include __global__ void cuda_entry_point(int*, int*) {} __device__ void cuda_device_func(int*, int*) {} int main() { printf("%s says hello world\n", __FILE__); } mozilla-sccache-40c3d6b/tests/test_c.cu000066400000000000000000000003141475712407500201450ustar00rootroot00000000000000 #include #include "cuda_runtime.h" __global__ void cuda_entry_point(int*, int*) {} __device__ void cuda_device_func(int*, int*) {} int main() { printf("%s says hello world\n", __FILE__); } mozilla-sccache-40c3d6b/tests/test_c.hip000066400000000000000000000003171475712407500203210ustar00rootroot00000000000000 #include #include __global__ void cuda_entry_point(int*, int*) {} __device__ void cuda_device_func(int*, int*) {} int main() { printf("%s says hello world\n", __FILE__); } mozilla-sccache-40c3d6b/tests/test_clang_multicall.c000066400000000000000000000003211475712407500226660ustar00rootroot00000000000000#include // this is c++ code, but the extension is .c, // so clang doesn't change its behavior because of the extension int main() { std::cout << "Hello, world!" << std::endl; return 0; } mozilla-sccache-40c3d6b/tests/test_err.c000066400000000000000000000000021475712407500203200ustar00rootroot00000000000000x mozilla-sccache-40c3d6b/tests/test_macro_expansion.c000066400000000000000000000007061475712407500227300ustar00rootroot00000000000000#include #define foo(x) \ { \ if (x) { \ abort(); \ } \ } void bar() { foo(0); } mozilla-sccache-40c3d6b/tests/test_whitespace.c000066400000000000000000000001011475712407500216640ustar00rootroot00000000000000#include int main() {printf("Hello world!");return 0;}mozilla-sccache-40c3d6b/tests/test_whitespace_alt.c000066400000000000000000000001151475712407500225310ustar00rootroot00000000000000#include int main() { printf("Hello world!"); return 0; } mozilla-sccache-40c3d6b/tests/test_with_define.c000066400000000000000000000002201475712407500220170ustar00rootroot00000000000000#include #if !defined(SCCACHE_TEST_DEFINE) #error SCCACHE_TEST_DEFINE is not defined #endif void foo() { printf("hello world\n"); } mozilla-sccache-40c3d6b/tests/xcode/000077500000000000000000000000001475712407500174375ustar00rootroot00000000000000mozilla-sccache-40c3d6b/tests/xcode/main.cpp000066400000000000000000000001551475712407500210700ustar00rootroot00000000000000#include int main(int argc, const char * argv[]) { std::cout << "Hello, World!\n"; return 0; } mozilla-sccache-40c3d6b/tests/xcode/sccache.xcconfig000066400000000000000000000002011475712407500225430ustar00rootroot00000000000000C_COMPILER_LAUNCHER=../../target/debug/sccache CLANG_ENABLE_MODULES=NO COMPILER_INDEX_STORE_ENABLE=NO CLANG_USE_RESPONSE_FILE=NO mozilla-sccache-40c3d6b/tests/xcode/xcode-test.xcodeproj/000077500000000000000000000000001475712407500235125ustar00rootroot00000000000000mozilla-sccache-40c3d6b/tests/xcode/xcode-test.xcodeproj/project.pbxproj000066400000000000000000000220261475712407500265700ustar00rootroot00000000000000// !$*UTF8*$! { archiveVersion = 1; classes = { }; objectVersion = 63; objects = { /* Begin PBXBuildFile section */ 9D52B00E2CABDB80008CF5FD /* main.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 9D52B00D2CABDB80008CF5FD /* main.cpp */; }; /* End PBXBuildFile section */ /* Begin PBXCopyFilesBuildPhase section */ 9D52B0012CABDB40008CF5FD /* CopyFiles */ = { isa = PBXCopyFilesBuildPhase; buildActionMask = 2147483647; dstPath = /usr/share/man/man1/; dstSubfolderSpec = 0; files = ( ); runOnlyForDeploymentPostprocessing = 1; }; /* End PBXCopyFilesBuildPhase section */ /* Begin PBXFileReference section */ 9D52B0032CABDB40008CF5FD /* xcode-test */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = "xcode-test"; sourceTree = BUILT_PRODUCTS_DIR; }; 9D52B00D2CABDB80008CF5FD /* main.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; path = main.cpp; sourceTree = ""; }; /* End PBXFileReference section */ /* Begin PBXFrameworksBuildPhase section */ 9D52B0002CABDB40008CF5FD /* Frameworks */ = { isa = PBXFrameworksBuildPhase; buildActionMask = 2147483647; files = ( ); runOnlyForDeploymentPostprocessing = 0; }; /* End PBXFrameworksBuildPhase section */ /* Begin PBXGroup section */ 9D52AFFA2CABDB40008CF5FD = { isa = PBXGroup; children = ( 9D52B00D2CABDB80008CF5FD /* main.cpp */, 9D52B0042CABDB40008CF5FD /* Products */, ); sourceTree = ""; }; 9D52B0042CABDB40008CF5FD /* Products */ = { isa = PBXGroup; children = ( 9D52B0032CABDB40008CF5FD /* xcode-test */, ); name = Products; sourceTree = ""; }; /* End PBXGroup section */ /* Begin PBXNativeTarget section */ 9D52B0022CABDB40008CF5FD /* xcode-test */ = { isa = PBXNativeTarget; buildConfigurationList = 9D52B00A2CABDB40008CF5FD /* Build configuration list for PBXNativeTarget "xcode-test" */; buildPhases = ( 9D52AFFF2CABDB40008CF5FD /* Sources */, 9D52B0002CABDB40008CF5FD /* Frameworks */, 9D52B0012CABDB40008CF5FD /* CopyFiles */, ); buildRules = ( ); dependencies = ( ); name = "xcode-test"; packageProductDependencies = ( ); productName = "xcode-test"; productReference = 9D52B0032CABDB40008CF5FD /* xcode-test */; productType = "com.apple.product-type.tool"; }; /* End PBXNativeTarget section */ /* Begin PBXProject section */ 9D52AFFB2CABDB40008CF5FD /* Project object */ = { isa = PBXProject; attributes = { BuildIndependentTargetsInParallel = 1; LastUpgradeCheck = 1600; TargetAttributes = { 9D52B0022CABDB40008CF5FD = { CreatedOnToolsVersion = 16.0; }; }; }; buildConfigurationList = 9D52AFFE2CABDB40008CF5FD /* Build configuration list for PBXProject "xcode-test" */; compatibilityVersion = "Xcode 12.0"; developmentRegion = en; hasScannedForEncodings = 0; knownRegions = ( en, Base, ); mainGroup = 9D52AFFA2CABDB40008CF5FD; minimizedProjectReferenceProxies = 1; productRefGroup = 9D52B0042CABDB40008CF5FD /* Products */; projectDirPath = ""; projectRoot = ""; targets = ( 9D52B0022CABDB40008CF5FD /* xcode-test */, ); }; /* End PBXProject section */ /* Begin PBXSourcesBuildPhase section */ 9D52AFFF2CABDB40008CF5FD /* Sources */ = { isa = PBXSourcesBuildPhase; buildActionMask = 2147483647; files = ( 9D52B00E2CABDB80008CF5FD /* main.cpp in Sources */, ); runOnlyForDeploymentPostprocessing = 0; }; /* End PBXSourcesBuildPhase section */ /* Begin XCBuildConfiguration section */ 9D52B0082CABDB40008CF5FD /* Debug */ = { isa = XCBuildConfiguration; buildSettings = { ALWAYS_SEARCH_USER_PATHS = NO; ASSETCATALOG_COMPILER_GENERATE_SWIFT_ASSET_SYMBOL_EXTENSIONS = YES; CLANG_ANALYZER_NONNULL = YES; CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE; CLANG_CXX_LANGUAGE_STANDARD = "gnu++20"; CLANG_ENABLE_MODULES = YES; CLANG_ENABLE_OBJC_ARC = YES; CLANG_ENABLE_OBJC_WEAK = YES; CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES; CLANG_WARN_BOOL_CONVERSION = YES; CLANG_WARN_COMMA = YES; CLANG_WARN_CONSTANT_CONVERSION = YES; CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES; CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; CLANG_WARN_DOCUMENTATION_COMMENTS = YES; CLANG_WARN_EMPTY_BODY = YES; CLANG_WARN_ENUM_CONVERSION = YES; CLANG_WARN_INFINITE_RECURSION = YES; CLANG_WARN_INT_CONVERSION = YES; CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES; CLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES; CLANG_WARN_OBJC_LITERAL_CONVERSION = YES; CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; CLANG_WARN_QUOTED_INCLUDE_IN_FRAMEWORK_HEADER = YES; CLANG_WARN_RANGE_LOOP_ANALYSIS = YES; CLANG_WARN_STRICT_PROTOTYPES = YES; CLANG_WARN_SUSPICIOUS_MOVE = YES; CLANG_WARN_UNGUARDED_AVAILABILITY = YES_AGGRESSIVE; CLANG_WARN_UNREACHABLE_CODE = YES; CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; COPY_PHASE_STRIP = NO; DEBUG_INFORMATION_FORMAT = dwarf; ENABLE_STRICT_OBJC_MSGSEND = YES; ENABLE_TESTABILITY = YES; ENABLE_USER_SCRIPT_SANDBOXING = YES; GCC_C_LANGUAGE_STANDARD = gnu17; GCC_DYNAMIC_NO_PIC = NO; GCC_NO_COMMON_BLOCKS = YES; GCC_OPTIMIZATION_LEVEL = 0; GCC_PREPROCESSOR_DEFINITIONS = ( "DEBUG=1", "$(inherited)", ); GCC_WARN_64_TO_32_BIT_CONVERSION = YES; GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; GCC_WARN_UNDECLARED_SELECTOR = YES; GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; GCC_WARN_UNUSED_FUNCTION = YES; GCC_WARN_UNUSED_VARIABLE = YES; LOCALIZATION_PREFERS_STRING_CATALOGS = YES; MACOSX_DEPLOYMENT_TARGET = 11.5; MTL_ENABLE_DEBUG_INFO = INCLUDE_SOURCE; MTL_FAST_MATH = YES; ONLY_ACTIVE_ARCH = YES; SDKROOT = macosx; }; name = Debug; }; 9D52B0092CABDB40008CF5FD /* Release */ = { isa = XCBuildConfiguration; buildSettings = { ALWAYS_SEARCH_USER_PATHS = NO; ASSETCATALOG_COMPILER_GENERATE_SWIFT_ASSET_SYMBOL_EXTENSIONS = YES; CLANG_ANALYZER_NONNULL = YES; CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE; CLANG_CXX_LANGUAGE_STANDARD = "gnu++20"; CLANG_ENABLE_MODULES = YES; CLANG_ENABLE_OBJC_ARC = YES; CLANG_ENABLE_OBJC_WEAK = YES; CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES; CLANG_WARN_BOOL_CONVERSION = YES; CLANG_WARN_COMMA = YES; CLANG_WARN_CONSTANT_CONVERSION = YES; CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES; CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; CLANG_WARN_DOCUMENTATION_COMMENTS = YES; CLANG_WARN_EMPTY_BODY = YES; CLANG_WARN_ENUM_CONVERSION = YES; CLANG_WARN_INFINITE_RECURSION = YES; CLANG_WARN_INT_CONVERSION = YES; CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES; CLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES; CLANG_WARN_OBJC_LITERAL_CONVERSION = YES; CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; CLANG_WARN_QUOTED_INCLUDE_IN_FRAMEWORK_HEADER = YES; CLANG_WARN_RANGE_LOOP_ANALYSIS = YES; CLANG_WARN_STRICT_PROTOTYPES = YES; CLANG_WARN_SUSPICIOUS_MOVE = YES; CLANG_WARN_UNGUARDED_AVAILABILITY = YES_AGGRESSIVE; CLANG_WARN_UNREACHABLE_CODE = YES; CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; COPY_PHASE_STRIP = NO; DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; ENABLE_NS_ASSERTIONS = NO; ENABLE_STRICT_OBJC_MSGSEND = YES; ENABLE_USER_SCRIPT_SANDBOXING = YES; GCC_C_LANGUAGE_STANDARD = gnu17; GCC_NO_COMMON_BLOCKS = YES; GCC_WARN_64_TO_32_BIT_CONVERSION = YES; GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; GCC_WARN_UNDECLARED_SELECTOR = YES; GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; GCC_WARN_UNUSED_FUNCTION = YES; GCC_WARN_UNUSED_VARIABLE = YES; LOCALIZATION_PREFERS_STRING_CATALOGS = YES; MACOSX_DEPLOYMENT_TARGET = 11.5; MTL_ENABLE_DEBUG_INFO = NO; MTL_FAST_MATH = YES; SDKROOT = macosx; }; name = Release; }; 9D52B00B2CABDB40008CF5FD /* Debug */ = { isa = XCBuildConfiguration; buildSettings = { CODE_SIGN_STYLE = Automatic; PRODUCT_NAME = "$(TARGET_NAME)"; }; name = Debug; }; 9D52B00C2CABDB40008CF5FD /* Release */ = { isa = XCBuildConfiguration; buildSettings = { CODE_SIGN_STYLE = Automatic; PRODUCT_NAME = "$(TARGET_NAME)"; }; name = Release; }; /* End XCBuildConfiguration section */ /* Begin XCConfigurationList section */ 9D52AFFE2CABDB40008CF5FD /* Build configuration list for PBXProject "xcode-test" */ = { isa = XCConfigurationList; buildConfigurations = ( 9D52B0082CABDB40008CF5FD /* Debug */, 9D52B0092CABDB40008CF5FD /* Release */, ); defaultConfigurationIsVisible = 0; defaultConfigurationName = Release; }; 9D52B00A2CABDB40008CF5FD /* Build configuration list for PBXNativeTarget "xcode-test" */ = { isa = XCConfigurationList; buildConfigurations = ( 9D52B00B2CABDB40008CF5FD /* Debug */, 9D52B00C2CABDB40008CF5FD /* Release */, ); defaultConfigurationIsVisible = 0; defaultConfigurationName = Release; }; /* End XCConfigurationList section */ }; rootObject = 9D52AFFB2CABDB40008CF5FD /* Project object */; }