diff --git a/.coderabbit.yaml b/.coderabbit.yaml deleted file mode 100644 index 2220649caa2..00000000000 --- a/.coderabbit.yaml +++ /dev/null @@ -1,18 +0,0 @@ -# yaml-language-server: $schema=https://coderabbit.ai/integrations/schema.v2.json -# Disable CodeRabbit auto-review to prevent verbose comments on PRs. -# When enabled: false, CodeRabbit won't attempt reviews and won't post -# "Review skipped" or other automated comments. -reviews: - auto_review: - enabled: false - review_status: false - high_level_summary: false - poem: false - sequence_diagrams: false - changed_files_summary: false - tools: - github-checks: - enabled: false -chat: - art: false - auto_reply: false diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS deleted file mode 100644 index 763c5f27ee6..00000000000 --- a/.github/CODEOWNERS +++ /dev/null @@ -1,17 +0,0 @@ -# Pull requests concerning the listed files will automatically invite the respective maintainers as reviewers. -# This file is not used for denoting any kind of ownership, but is merely a tool for handling notifications. -# -# Merge permissions are required for maintaining an entry in this file. -# For documentation on this mechanism, see https://help.github.com/articles/about-codeowners/ - -# Default reviewers if nothing else matches -* @edolstra - -# This file -.github/CODEOWNERS @edolstra - -# Documentation of built-in functions -src/libexpr/primops.cc @roberth - -# Libstore layer -/src/libstore @ericson2314 diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index af94c3e9e5b..08a5851748d 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -1,10 +1,9 @@ --- name: Bug report about: Report unexpected or incorrect behaviour -title: '' +title: "" labels: bug -assignees: '' - +assignees: "" --- ## Describe the bug @@ -32,7 +31,9 @@ assignees: '' ## Metadata - + + + ## Additional context @@ -42,13 +43,9 @@ assignees: '' -- [ ] checked [latest Nix manual] \([source]) +- [ ] checked [latest Determinate Nix manual] \([source]) - [ ] checked [open bug issues and pull requests] for possible duplicates -[latest Nix manual]: https://nix.dev/manual/nix/development/ -[source]: https://github.com/NixOS/nix/tree/master/doc/manual/source -[open bug issues and pull requests]: https://github.com/NixOS/nix/labels/bug - ---- - -Add :+1: to [issues you find important](https://github.com/NixOS/nix/issues?q=is%3Aissue+is%3Aopen+sort%3Areactions-%2B1-desc). +[latest Determinate Nix manual]: https://manual.determinate.systems/ +[source]: https://github.com/DeterminateSystems/nix-src/tree/main/doc/manual/source +[open bug issues and pull requests]: https://github.com/DeterminateSystems/nix-src/labels/bug diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md index fe9f9dd209d..b88e1093798 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -1,10 +1,9 @@ --- name: Feature request about: Suggest a new feature -title: '' +title: "" labels: feature -assignees: '' - +assignees: "" --- ## Is your feature request related to a problem? @@ -27,13 +26,9 @@ assignees: '' -- [ ] checked [latest Nix manual] \([source]) -- [ ] checked [open feature issues and pull requests] for possible duplicates - -[latest Nix manual]: https://nix.dev/manual/nix/development/ -[source]: https://github.com/NixOS/nix/tree/master/doc/manual/source -[open feature issues and pull requests]: https://github.com/NixOS/nix/labels/feature - ---- +- [ ] checked [latest Determinate Nix manual] \([source]) +- [ ] checked [open bug issues and pull requests] for possible duplicates -Add :+1: to [issues you find important](https://github.com/NixOS/nix/issues?q=is%3Aissue+is%3Aopen+sort%3Areactions-%2B1-desc). +[latest Determinate Nix manual]: https://manual.determinate.systems/ +[source]: https://github.com/DeterminateSystems/nix-src/tree/main/doc/manual/source +[open bug issues and pull requests]: https://github.com/DeterminateSystems/nix-src/labels/bug diff --git a/.github/ISSUE_TEMPLATE/installer.md b/.github/ISSUE_TEMPLATE/installer.md index 070e0bd9b25..430bef971aa 100644 --- a/.github/ISSUE_TEMPLATE/installer.md +++ b/.github/ISSUE_TEMPLATE/installer.md @@ -1,18 +1,17 @@ --- name: Installer issue about: Report problems with installation -title: '' +title: "" labels: installer -assignees: '' - +assignees: "" --- ## Platform - + -- [ ] Linux: - [ ] macOS +- [ ] Linux: - [ ] WSL ## Additional information @@ -35,13 +34,9 @@ assignees: '' -- [ ] checked [latest Nix manual] \([source]) -- [ ] checked [open installer issues and pull requests] for possible duplicates - -[latest Nix manual]: https://nix.dev/manual/nix/development/ -[source]: https://github.com/NixOS/nix/tree/master/doc/manual/source -[open installer issues and pull requests]: https://github.com/NixOS/nix/labels/installer - ---- +- [ ] checked [latest Determinate Nix manual] \([source]) +- [ ] checked [open bug issues and pull requests] for possible duplicates -Add :+1: to [issues you find important](https://github.com/NixOS/nix/issues?q=is%3Aissue+is%3Aopen+sort%3Areactions-%2B1-desc). +[latest Determinate Nix manual]: https://manual.determinate.systems/ +[source]: https://github.com/DeterminateSystems/nix-src/tree/main/doc/manual/source +[open bug issues and pull requests]: https://github.com/DeterminateSystems/nix-src/labels/bug diff --git a/.github/ISSUE_TEMPLATE/missing_documentation.md b/.github/ISSUE_TEMPLATE/missing_documentation.md index 4e05b626d39..fcdd0d20135 100644 --- a/.github/ISSUE_TEMPLATE/missing_documentation.md +++ b/.github/ISSUE_TEMPLATE/missing_documentation.md @@ -1,10 +1,9 @@ --- name: Missing or incorrect documentation about: Help us improve the reference manual -title: '' +title: "" labels: documentation -assignees: '' - +assignees: "" --- ## Problem @@ -19,13 +18,9 @@ assignees: '' -- [ ] checked [latest Nix manual] \([source]) -- [ ] checked [open documentation issues and pull requests] for possible duplicates - -[latest Nix manual]: https://nix.dev/manual/nix/development/ -[source]: https://github.com/NixOS/nix/tree/master/doc/manual/source -[open documentation issues and pull requests]: https://github.com/NixOS/nix/labels/documentation - ---- +- [ ] checked [latest Determinate Nix manual] \([source]) +- [ ] checked [open bug issues and pull requests] for possible duplicates -Add :+1: to [issues you find important](https://github.com/NixOS/nix/issues?q=is%3Aissue+is%3Aopen+sort%3Areactions-%2B1-desc). +[latest Determinate Nix manual]: https://manual.determinate.systems/ +[source]: https://github.com/DeterminateSystems/nix-src/tree/main/doc/manual/source +[open bug issues and pull requests]: https://github.com/DeterminateSystems/nix-src/labels/bug diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index c155bf8bfa4..d3e1f817736 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,26 +1,3 @@ - - ## Motivation @@ -34,9 +11,3 @@ PR stuck in review? We have two Nix team meetings per week online that are open - ---- - -Add :+1: to [pull requests you find important](https://github.com/NixOS/nix/pulls?q=is%3Aopen+sort%3Areactions-%2B1-desc). - -The Nix maintainer team uses a [GitHub project board](https://github.com/orgs/NixOS/projects/19) to [schedule and track reviews](https://github.com/NixOS/nix/tree/master/maintainers#project-board-protocol). diff --git a/.github/STALE-BOT.md b/.github/STALE-BOT.md index bc0005413f1..281d0f79a8b 100644 --- a/.github/STALE-BOT.md +++ b/.github/STALE-BOT.md @@ -2,34 +2,21 @@ - Thanks for your contribution! - To remove the stale label, just leave a new comment. -- _How to find the right people to ping?_ → [`git blame`](https://git-scm.com/docs/git-blame) to the rescue! (or GitHub's history and blame buttons.) -- You can always ask for help on [our Discourse Forum](https://discourse.nixos.org/) or on [Matrix - #users:nixos.org](https://matrix.to/#/#users:nixos.org). +- You can always ask for help on [Discord](https://determinate.systems/discord). ## Suggestions for PRs -1. GitHub sometimes doesn't notify people who commented / reviewed a PR previously, when you (force) push commits. If you have addressed the reviews you can [officially ask for a review](https://docs.github.com/en/free-pro-team@latest/github/collaborating-with-issues-and-pull-requests/requesting-a-pull-request-review) from those who commented to you or anyone else. -2. If it is unfinished but you plan to finish it, please mark it as a draft. -3. If you don't expect to work on it any time soon, closing it with a short comment may encourage someone else to pick up your work. -4. To get things rolling again, rebase the PR against the target branch and address valid comments. -5. If you need a review to move forward, ask in [the Discourse thread for PRs that need help](https://discourse.nixos.org/t/prs-in-distress/3604). -6. If all you need is a merge, check the git history to find and [request reviews](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/requesting-a-pull-request-review) from people who usually merge related contributions. +1. If it is unfinished but you plan to finish it, please mark it as a draft. +1. If you don't expect to work on it any time soon, closing it with a short comment may encourage someone else to pick up your work. +1. To get things rolling again, rebase the PR against the target branch and address valid comments. +1. If you need a review to move forward, ask in [Discord](https://determinate.systems/discord). ## Suggestions for issues 1. If it is resolved (either for you personally, or in general), please consider closing it. 2. If this might still be an issue, but you are not interested in promoting its resolution, please consider closing it while encouraging others to take over and reopen an issue if they care enough. -3. If you still have interest in resolving it, try to ping somebody who you believe might have an interest in the topic. Consider discussing the problem in [our Discourse Forum](https://discourse.nixos.org/). -4. As with all open source projects, your best option is to submit a Pull Request that addresses this issue. We :heart: this attitude! +3. If you still have interest in resolving it, try to ping somebody who you believe might have an interest in the topic. Consider discussing the problem in [Discord](https://determinate.systems/discord). **Memorandum on closing issues** Don't be afraid to close an issue that holds valuable information. Closed issues stay in the system for people to search, read, cross-reference, or even reopen--nothing is lost! Closing obsolete issues is an important way to help maintainers focus their time and effort. - -## Useful GitHub search queries - -- [Open PRs with any stale-bot interaction](https://github.com/NixOS/nix/pulls?q=is%3Apr+is%3Aopen+commenter%3Aapp%2Fstale+) -- [Open PRs with any stale-bot interaction and `stale`](https://github.com/NixOS/nix/pulls?q=is%3Apr+is%3Aopen+commenter%3Aapp%2Fstale+label%3A%22stale%22) -- [Open PRs with any stale-bot interaction and NOT `stale`](https://github.com/NixOS/nix/pulls?q=is%3Apr+is%3Aopen+commenter%3Aapp%2Fstale+-label%3A%22stale%22+) -- [Open Issues with any stale-bot interaction](https://github.com/NixOS/nix/issues?q=is%3Aissue+is%3Aopen+commenter%3Aapp%2Fstale+) -- [Open Issues with any stale-bot interaction and `stale`](https://github.com/NixOS/nix/issues?q=is%3Aissue+is%3Aopen+commenter%3Aapp%2Fstale+label%3A%22stale%22+) -- [Open Issues with any stale-bot interaction and NOT `stale`](https://github.com/NixOS/nix/issues?q=is%3Aissue+is%3Aopen+commenter%3Aapp%2Fstale+-label%3A%22stale%22+) diff --git a/.github/release-notes.sh b/.github/release-notes.sh new file mode 100755 index 00000000000..f641e146d2e --- /dev/null +++ b/.github/release-notes.sh @@ -0,0 +1,69 @@ +#!/usr/bin/env bash + +# SC2002 disables "useless cat" warnings. +# I prefer pipelines that start with an explicit input, and go from there. +# Overly fussy. +# shellcheck disable=SC2002 + +scratch=$(mktemp -d -t tmp.XXXXXXXXXX) +finish() { + rm -rf "$scratch" +} +trap finish EXIT + +DATE=$(date +%Y-%m-%d) +DETERMINATE_NIX_VERSION=$(cat .version-determinate) +TAG_NAME="v${DETERMINATE_NIX_VERSION}" +NIX_VERSION=$(cat .version) +NIX_VERSION_MAJOR_MINOR=$(echo "$NIX_VERSION" | cut -d. -f1,2) +GITHUB_REPOSITORY="${GITHUB_REPOSITORY:-DeterminateSystems/nix-src}" + +gh api "/repos/${GITHUB_REPOSITORY}/releases/generate-notes" \ + -f "tag_name=${TAG_NAME}" > "$scratch/notes.json" + +trim_trailing_newlines() { + local text + text="$(cat)" + echo -n "${text}" +} + +linkify_gh() { + sed \ + -e 's!\(https://github.com/DeterminateSystems/nix-src/\(pull\|issue\)/\([[:digit:]]\+\)\)![DeterminateSystems/nix-src#\3](\1)!' \ + -e 's#\(https://github.com/DeterminateSystems/nix-src/compare/\([^ ]\+\)\)#[\2](\1)#' +} + +( + cat doc/manual/source/release-notes-determinate/changes.md \ + | sed 's/^.*\(\)$/This section lists the differences between upstream Nix '"$NIX_VERSION_MAJOR_MINOR"' and Determinate Nix '"$DETERMINATE_NIX_VERSION"'.\1/' \ + + printf "\n\n" "$DETERMINATE_NIX_VERSION" + cat "$scratch/notes.json" \ + | jq -r .body \ + | grep -v '^#' \ + | grep -v "Full Changelog" \ + | trim_trailing_newlines \ + | sed -e 's/^\* /\n* /' \ + | linkify_gh + echo "" # final newline +) > "$scratch/changes.md" + +( + printf "# Release %s (%s)\n\n" \ + "$DETERMINATE_NIX_VERSION" \ + "$DATE" + printf "* Based on [upstream Nix %s](../release-notes/rl-%s.md).\n\n" \ + "$NIX_VERSION" \ + "$NIX_VERSION_MAJOR_MINOR" + + cat "$scratch/notes.json" | jq -r .body | linkify_gh +) > "$scratch/rl.md" + +( + cat doc/manual/source/SUMMARY.md.in \ + | sed 's/\(\)$/\1\n - [Release '"$DETERMINATE_NIX_VERSION"' ('"$DATE"')](release-notes-determinate\/'"$TAG_NAME"'.md)/' +) > "$scratch/summary.md" + +mv "$scratch/changes.md" doc/manual/source/release-notes-determinate/changes.md +mv "$scratch/rl.md" "doc/manual/source/release-notes-determinate/v${DETERMINATE_NIX_VERSION}.md" +mv "$scratch/summary.md" doc/manual/source/SUMMARY.md.in diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml deleted file mode 100644 index 5ad073785cd..00000000000 --- a/.github/workflows/backport.yml +++ /dev/null @@ -1,37 +0,0 @@ -name: Backport -on: - pull_request_target: - types: [closed, labeled] -permissions: - contents: read -jobs: - backport: - name: Backport Pull Request - permissions: - # for korthout/backport-action - contents: write - pull-requests: write - if: github.repository_owner == 'NixOS' && github.event.pull_request.merged == true && (github.event_name != 'labeled' || startsWith('backport', github.event.label.name)) - runs-on: ubuntu-24.04-arm - steps: - - name: Generate GitHub App token - id: generate-token - uses: actions/create-github-app-token@v2 - with: - app-id: ${{ vars.CI_APP_ID }} - private-key: ${{ secrets.CI_APP_PRIVATE_KEY }} - - uses: actions/checkout@v6 - with: - ref: ${{ github.event.pull_request.head.sha }} - # required to find all branches - fetch-depth: 0 - - name: Create backport PRs - uses: korthout/backport-action@d07416681cab29bf2661702f925f020aaa962997 # v3.4.1 - id: backport - with: - # Config README: https://github.com/korthout/backport-action#backport-action - github_token: ${{ steps.generate-token.outputs.token }} - github_workspace: ${{ github.workspace }} - auto_merge_enabled: true - pull_description: |- - Automatic backport to `${target_branch}`, triggered by a label in #${pull_number}. diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml new file mode 100644 index 00000000000..dd98d0d00f9 --- /dev/null +++ b/.github/workflows/build.yml @@ -0,0 +1,241 @@ +on: + workflow_call: + inputs: + system: + required: true + type: string + runner: + required: true + type: string + runner_for_virt: + required: true + type: string + runner_small: + required: true + type: string + if: + required: false + default: true + type: boolean + run_tests: + required: false + default: true + type: boolean + run_vm_tests: + required: false + default: false + type: boolean + run_regression_tests: + required: false + default: false + type: boolean + publish_manual: + required: false + default: false + type: boolean + secrets: + manual_netlify_auth_token: + required: false + manual_netlify_site_id: + required: false + +jobs: + build: + if: ${{ inputs.if }} + strategy: + fail-fast: false + runs-on: ${{ inputs.runner }} + timeout-minutes: 60 + steps: + - uses: actions/checkout@v4 + - uses: DeterminateSystems/determinate-nix-action@main + - uses: DeterminateSystems/flakehub-cache-action@main + - run: nix build .#packages.${{ inputs.system }}.default .#packages.${{ inputs.system }}.binaryTarball --no-link -L + - run: nix build .#packages.${{ inputs.system }}.binaryTarball --out-link tarball + - uses: actions/upload-artifact@v4 + with: + name: ${{ inputs.system }} + path: ./tarball/*.xz + + test: + if: ${{ inputs.if && inputs.run_tests}} + needs: build + strategy: + fail-fast: false + runs-on: ${{ inputs.runner }} + timeout-minutes: 60 + steps: + - uses: actions/checkout@v4 + - uses: DeterminateSystems/determinate-nix-action@main + - uses: DeterminateSystems/flakehub-cache-action@main + - run: nix flake check -L --system ${{ inputs.system }} + + vm_tests_smoke: + if: inputs.run_vm_tests && github.event_name != 'merge_group' + needs: build + runs-on: ${{ inputs.runner_for_virt }} + steps: + - uses: actions/checkout@v4 + - uses: DeterminateSystems/determinate-nix-action@main + - uses: DeterminateSystems/flakehub-cache-action@main + - run: | + nix build -L \ + .#hydraJobs.tests.functional_user \ + .#hydraJobs.tests.githubFlakes \ + .#hydraJobs.tests.nix-docker \ + .#hydraJobs.tests.tarballFlakes \ + ; + + vm_tests_all: + if: inputs.run_vm_tests && github.event_name == 'merge_group' + needs: build + runs-on: ${{ inputs.runner_for_virt }} + steps: + - uses: actions/checkout@v4 + - uses: DeterminateSystems/determinate-nix-action@main + - uses: DeterminateSystems/flakehub-cache-action@main + - run: | + cmd() { + nix build -L --keep-going --timeout 600 \ + $(nix flake show --json \ + | jq -r ' + .hydraJobs.tests + | with_entries(select(.value.type == "derivation")) + | keys[] + | ".#hydraJobs.tests." + .') + } + + if ! cmd; then + echo "failed, retrying once ..." + printf "\n\n\n\n\n\n\n\n" + cmd + fi + + flake_regressions: + if: | + (inputs.run_regression_tests && github.event_name == 'merge_group') + || ( + inputs.run_regression_tests + && github.event.pull_request.head.repo.full_name == 'DeterminateSystems/nix-src' + && ( + (github.event.action == 'labeled' && github.event.label.name == 'flake-regression-test') + || (github.event.action != 'labeled' && contains(github.event.pull_request.labels.*.name, 'flake-regression-test')) + ) + ) + needs: build + runs-on: ${{ inputs.runner }} + strategy: + matrix: + nix_config: + - "lazy-trees = true" + - "lazy-trees = false" + - "eval-cores = 24" + glob: + - "[0-d]*" + - "[e-l]*" + - "[m]*" + - "[n-r]*" + - "[s-z]*" + + steps: + - name: Checkout nix + uses: actions/checkout@v4 + - name: Checkout flake-regressions + uses: actions/checkout@v4 + with: + repository: DeterminateSystems/flake-regressions + path: flake-regressions + - name: Checkout flake-regressions-data + uses: actions/checkout@v4 + with: + repository: DeterminateSystems/flake-regressions-data + path: flake-regressions/tests + - uses: DeterminateSystems/determinate-nix-action@main + - uses: DeterminateSystems/flakehub-cache-action@main + - name: Run flake regression tests + env: + #PARALLEL: ${{ !contains(matrix.nix_config, 'eval-cores') && '-P 50%' || '-P 1' }} + PARALLEL: '-P 1' + FLAKE_REGRESSION_GLOB: ${{ matrix.glob }} + NIX_CONFIG: ${{ matrix.nix_config }} + PREFETCH: "1" + run: | + set -x + echo "PARALLEL: $PARALLEL" + echo "NIX_CONFIG: $NIX_CONFIG" + if [ ! -z "${NSC_CACHE_PATH:-}" ]; then + mkdir -p "${NSC_CACHE_PATH}/nix/xdg-cache" + export XDG_CACHE_HOME="${NSC_CACHE_PATH}/nix/xdg-cache" + fi + nix build -L --out-link ./new-nix + export PATH=$(pwd)/new-nix/bin:$PATH + [[ $(type -p nix) = $(pwd)/new-nix/bin/nix ]] + + nix config show lazy-trees + nix config show eval-cores + lscpu + nproc + + if ! flake-regressions/eval-all.sh; then + echo "Some failed, trying again" + printf "\n\n\n\n\n\n\n\n" + NIX_REMOTE=/tmp/nix flake-regressions/eval-all.sh + fi + + manual: + if: github.event_name != 'merge_group' + needs: build + runs-on: ${{ inputs.runner_small }} + permissions: + id-token: "write" + contents: "read" + pull-requests: "write" + statuses: "write" + deployments: "write" + steps: + - name: Checkout nix + uses: actions/checkout@v4 + - uses: DeterminateSystems/determinate-nix-action@main + - uses: DeterminateSystems/flakehub-cache-action@main + - name: Build manual + if: inputs.system == 'x86_64-linux' + run: nix build .#hydraJobs.manual + - uses: nwtgck/actions-netlify@v3.0 + if: inputs.publish_manual && inputs.system == 'x86_64-linux' + with: + publish-dir: "./result/share/doc/nix/manual" + production-branch: main + github-token: ${{ secrets.GITHUB_TOKEN }} + deploy-message: "Deploy from GitHub Actions" + # NOTE(cole-h): We have a perpetual PR displaying our changes against upstream open, but + # its conversation is locked, so this PR comment can never be posted. + # https://github.com/DeterminateSystems/nix-src/pull/165 + enable-pull-request-comment: ${{ github.event.pull_request.number != 165 }} + enable-commit-comment: true + enable-commit-status: true + overwrites-pull-request-comment: true + env: + NETLIFY_AUTH_TOKEN: ${{ secrets.manual_netlify_auth_token }} + NETLIFY_SITE_ID: ${{ secrets.manual_netlify_site_id }} + + success: + needs: + - build + - test + - vm_tests_smoke + - vm_tests_all + - flake_regressions + - manual + if: ${{ always() }} + runs-on: ubuntu-latest + steps: + - run: "true" + - run: | + echo "A dependent in the build matrix failed:" + echo "$needs" + exit 1 + env: + needs: ${{ toJSON(needs) }} + if: | + contains(needs.*.result, 'failure') || + contains(needs.*.result, 'cancelled') diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index fe9d94248d1..08000ac4c87 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -2,317 +2,158 @@ name: "CI" on: pull_request: - merge_group: push: branches: + # NOTE: make sure any branches here are also valid directory names, + # otherwise creating the directory and uploading to s3 will fail + - main - master - workflow_dispatch: - inputs: - dogfood: - description: 'Use dogfood Nix build' - required: false - default: true - type: boolean - -concurrency: - group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} - cancel-in-progress: true + merge_group: + release: + types: + - published -permissions: read-all +permissions: + id-token: "write" + contents: "read" + pull-requests: "write" + statuses: "write" + deployments: "write" jobs: eval: - runs-on: ubuntu-24.04 + runs-on: UbuntuLatest32Cores128G steps: - - uses: actions/checkout@v6 - with: - fetch-depth: 0 - - uses: ./.github/actions/install-nix-action - with: - dogfood: ${{ github.event_name == 'workflow_dispatch' && inputs.dogfood || github.event_name != 'workflow_dispatch' }} - extra_nix_config: - experimental-features = nix-command flakes - github_token: ${{ secrets.GITHUB_TOKEN }} - use_cache: false - - run: nix flake show --all-systems --json - - pre-commit-checks: - name: pre-commit checks - runs-on: ubuntu-24.04 - steps: - - uses: actions/checkout@v6 - - uses: ./.github/actions/install-nix-action + - uses: actions/checkout@v4 with: - dogfood: ${{ github.event_name == 'workflow_dispatch' && inputs.dogfood || github.event_name != 'workflow_dispatch' }} - extra_nix_config: experimental-features = nix-command flakes - github_token: ${{ secrets.GITHUB_TOKEN }} - - run: ./ci/gha/tests/pre-commit-checks + fetch-depth: 0 + - uses: DeterminateSystems/determinate-nix-action@main + - run: nix flake show --all-systems --json - basic-checks: - name: aggregate basic checks - if: ${{ always() }} - runs-on: ubuntu-24.04 - needs: [pre-commit-checks, eval] - steps: - - name: Exit with any errors - if: ${{ contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled') }} - run: | - exit 1 + build_x86_64-linux: + uses: ./.github/workflows/build.yml + with: + system: x86_64-linux + runner: namespace-profile-linuxamd32c64g-cache + runner_for_virt: UbuntuLatest32Cores128G + runner_small: ubuntu-latest + run_tests: true + run_vm_tests: true + run_regression_tests: true + publish_manual: true + secrets: + manual_netlify_auth_token: ${{ secrets.NETLIFY_AUTH_TOKEN }} + manual_netlify_site_id: ${{ secrets.NETLIFY_SITE_ID }} - tests: - needs: basic-checks - strategy: - fail-fast: false - matrix: - include: - - scenario: on ubuntu - runs-on: ubuntu-24.04 - os: linux - instrumented: false - primary: true - stdenv: stdenv - - scenario: on macos - runs-on: macos-14 - os: darwin - instrumented: false - primary: true - stdenv: stdenv - - scenario: on ubuntu (with sanitizers / coverage) - runs-on: ubuntu-24.04 - os: linux - instrumented: true - primary: false - stdenv: clangStdenv - name: tests ${{ matrix.scenario }} - runs-on: ${{ matrix.runs-on }} - timeout-minutes: 60 - steps: - - uses: actions/checkout@v6 - with: - fetch-depth: 0 - - uses: ./.github/actions/install-nix-action - with: - github_token: ${{ secrets.GITHUB_TOKEN }} - dogfood: ${{ github.event_name == 'workflow_dispatch' && inputs.dogfood || github.event_name != 'workflow_dispatch' }} - # The sandbox would otherwise be disabled by default on Darwin - extra_nix_config: "sandbox = true" - # Since ubuntu 22.30, unprivileged usernamespaces are no longer allowed to map to the root user: - # https://ubuntu.com/blog/ubuntu-23-10-restricted-unprivileged-user-namespaces - - run: sudo sysctl -w kernel.apparmor_restrict_unprivileged_userns=0 - if: matrix.os == 'linux' - - name: Run component tests - run: | - nix build --file ci/gha/tests/wrapper.nix componentTests -L \ - --arg withInstrumentation ${{ matrix.instrumented }} \ - --argstr stdenv "${{ matrix.stdenv }}" - - name: Run VM tests - run: | - nix build --file ci/gha/tests/wrapper.nix vmTests -L \ - --arg withInstrumentation ${{ matrix.instrumented }} \ - --argstr stdenv "${{ matrix.stdenv }}" - if: ${{ matrix.os == 'linux' }} - - name: Run flake checks and prepare the installer tarball - run: | - ci/gha/tests/build-checks - ci/gha/tests/prepare-installer-for-github-actions - if: ${{ matrix.primary }} - - name: Collect code coverage - run: | - nix build --file ci/gha/tests/wrapper.nix codeCoverage.coverageReports -L \ - --arg withInstrumentation ${{ matrix.instrumented }} \ - --argstr stdenv "${{ matrix.stdenv }}" \ - --out-link coverage-reports - cat coverage-reports/index.txt >> $GITHUB_STEP_SUMMARY - if: ${{ matrix.instrumented }} - - name: Upload coverage reports - uses: actions/upload-artifact@v5 - with: - name: coverage-reports - path: coverage-reports/ - if: ${{ matrix.instrumented }} - - name: Upload installer tarball - uses: actions/upload-artifact@v5 - with: - name: installer-${{matrix.os}} - path: out/* - if: ${{ matrix.primary }} + build_aarch64-linux: + uses: ./.github/workflows/build.yml + with: + if: ${{ + github.event_name != 'pull_request' + || ( + github.event.pull_request.head.repo.full_name == 'DeterminateSystems/nix-src' + && ( + (github.event.action == 'labeled' && github.event.label.name == 'upload to s3') + || (github.event.action != 'labeled' && contains(github.event.pull_request.labels.*.name, 'upload to s3')) + ) + ) + }} + system: aarch64-linux + runner: UbuntuLatest32Cores128GArm + runner_for_virt: UbuntuLatest32Cores128GArm + runner_small: UbuntuLatest32Cores128GArm - installer_test: - needs: [tests] - strategy: - fail-fast: false - matrix: - include: - - scenario: on ubuntu - runs-on: ubuntu-24.04 - os: linux - experimental-installer: false - - scenario: on macos - runs-on: macos-14 - os: darwin - experimental-installer: false - - scenario: on ubuntu (experimental) - runs-on: ubuntu-24.04 - os: linux - experimental-installer: true - - scenario: on macos (experimental) - runs-on: macos-14 - os: darwin - experimental-installer: true - name: installer test ${{ matrix.scenario }} - runs-on: ${{ matrix.runs-on }} - steps: - - uses: actions/checkout@v6 - - name: Download installer tarball - uses: actions/download-artifact@v6 - with: - name: installer-${{matrix.os}} - path: out - - name: Looking up the installer tarball URL - id: installer-tarball-url - run: | - echo "installer-url=file://$GITHUB_WORKSPACE/out" >> "$GITHUB_OUTPUT" - TARBALL_PATH="$(find "$GITHUB_WORKSPACE/out" -name 'nix*.tar.xz' -print | head -n 1)" - echo "tarball-path=file://$TARBALL_PATH" >> "$GITHUB_OUTPUT" - - uses: cachix/install-nix-action@0b0e072294b088b73964f1d72dfdac0951439dbd # v31.8.4 - if: ${{ !matrix.experimental-installer }} - with: - install_url: ${{ format('{0}/install', steps.installer-tarball-url.outputs.installer-url) }} - install_options: ${{ format('--tarball-url-prefix {0}', steps.installer-tarball-url.outputs.installer-url) }} - - uses: ./.github/actions/install-nix-action - if: ${{ matrix.experimental-installer }} - with: - dogfood: false - experimental-installer: true - tarball_url: ${{ steps.installer-tarball-url.outputs.tarball-path }} - github_token: ${{ secrets.GITHUB_TOKEN }} - - run: sudo apt install fish zsh - if: matrix.os == 'linux' - - run: brew install fish - if: matrix.os == 'darwin' - - run: exec bash -c "nix-instantiate -E 'builtins.currentTime' --eval" - - run: exec sh -c "nix-instantiate -E 'builtins.currentTime' --eval" - - run: exec zsh -c "nix-instantiate -E 'builtins.currentTime' --eval" - - run: exec fish -c "nix-instantiate -E 'builtins.currentTime' --eval" - - run: exec bash -c "nix-channel --add https://releases.nixos.org/nixos/unstable/nixos-23.05pre466020.60c1d71f2ba nixpkgs" - - run: exec bash -c "nix-channel --update && nix-env -iA nixpkgs.hello && hello" + build_aarch64-darwin: + uses: ./.github/workflows/build.yml + with: + system: aarch64-darwin + runner: namespace-profile-mac-m2-12c28g + runner_for_virt: namespace-profile-mac-m2-12c28g + runner_small: macos-latest-xlarge - # Steps to test CI automation in your own fork. - # 1. Sign-up for https://hub.docker.com/ - # 2. Store your dockerhub username as DOCKERHUB_USERNAME in "Repository secrets" of your fork repository settings (https://github.com/$githubuser/nix/settings/secrets/actions) - # 3. Create an access token in https://hub.docker.com/settings/security and store it as DOCKERHUB_TOKEN in "Repository secrets" of your fork - check_secrets: - permissions: - contents: none - name: Check presence of secrets - runs-on: ubuntu-24.04 - outputs: - docker: ${{ steps.secret.outputs.docker }} + success: + runs-on: ubuntu-latest + needs: + - eval + - build_x86_64-linux + - build_aarch64-linux + - build_aarch64-darwin + if: ${{ always() }} steps: - - name: Check for DockerHub secrets - id: secret + - run: "true" + - run: | + echo "A dependent in the build matrix failed:" + echo "$needs" + exit 1 env: - _DOCKER_SECRETS: ${{ secrets.DOCKERHUB_USERNAME }}${{ secrets.DOCKERHUB_TOKEN }} - run: | - echo "docker=${{ env._DOCKER_SECRETS != '' }}" >> $GITHUB_OUTPUT + needs: ${{ toJSON(needs) }} + if: | + contains(needs.*.result, 'failure') || + contains(needs.*.result, 'cancelled') - docker_push_image: - needs: [tests, check_secrets] - permissions: - contents: read - packages: write - if: >- - needs.check_secrets.outputs.docker == 'true' && - github.event_name == 'push' && - github.ref_name == 'master' - runs-on: ubuntu-24.04 - steps: - - uses: actions/checkout@v6 - with: - fetch-depth: 0 - - uses: ./.github/actions/install-nix-action - with: - dogfood: false - extra_nix_config: | - experimental-features = flakes nix-command - - run: echo NIX_VERSION="$(nix eval .\#nix.version | tr -d \")" >> $GITHUB_ENV - - run: nix build .#dockerImage -L - - run: docker load -i ./result/image.tar.gz - - run: docker tag nix:$NIX_VERSION ${{ secrets.DOCKERHUB_USERNAME }}/nix:$NIX_VERSION - - run: docker tag nix:$NIX_VERSION ${{ secrets.DOCKERHUB_USERNAME }}/nix:master - # We'll deploy the newly built image to both Docker Hub and Github Container Registry. - # - # Push to Docker Hub first - - name: Login to Docker Hub - uses: docker/login-action@v3 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - run: docker push ${{ secrets.DOCKERHUB_USERNAME }}/nix:$NIX_VERSION - - run: docker push ${{ secrets.DOCKERHUB_USERNAME }}/nix:master - # Push to GitHub Container Registry as well - - name: Login to GitHub Container Registry - uses: docker/login-action@v3 - with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - name: Push image - run: | - IMAGE_ID=ghcr.io/${{ github.repository_owner }}/nix - # Change all uppercase to lowercase - IMAGE_ID=$(echo $IMAGE_ID | tr '[A-Z]' '[a-z]') + - uses: actions/checkout@v4 + - uses: DeterminateSystems/determinate-nix-action@main - docker tag nix:$NIX_VERSION $IMAGE_ID:$NIX_VERSION - docker tag nix:$NIX_VERSION $IMAGE_ID:latest - docker push $IMAGE_ID:$NIX_VERSION - docker push $IMAGE_ID:latest - # deprecated 2024-02-24 - docker tag nix:$NIX_VERSION $IMAGE_ID:master - docker push $IMAGE_ID:master + - name: Create artifacts directory + run: mkdir -p ./artifacts - flake_regressions: - needs: tests - runs-on: ubuntu-24.04 - steps: - - name: Checkout nix - uses: actions/checkout@v6 - - name: Checkout flake-regressions - uses: actions/checkout@v6 + - name: Fetch artifacts + uses: actions/download-artifact@v4 with: - repository: NixOS/flake-regressions - path: flake-regressions - - name: Checkout flake-regressions-data - uses: actions/checkout@v6 - with: - repository: NixOS/flake-regressions-data - path: flake-regressions/tests - - uses: ./.github/actions/install-nix-action + path: downloaded + - name: Move downloaded artifacts to artifacts directory + run: | + for dir in ./downloaded/*; do + arch="$(basename "$dir")" + mv "$dir"/*.xz ./artifacts/"${arch}" + done + + - name: Build fallback-paths.nix + if: ${{ + github.event_name != 'pull_request' + || ( + github.event.pull_request.head.repo.full_name == 'DeterminateSystems/nix-src' + && ( + (github.event.action == 'labeled' && github.event.label.name == 'upload to s3') + || (github.event.action != 'labeled' && contains(github.event.pull_request.labels.*.name, 'upload to s3')) + ) + ) + }} + run: | + nix build .#fallbackPathsNix --out-link fallback + cat fallback > ./artifacts/fallback-paths.nix + + - uses: DeterminateSystems/push-artifact-ids@main with: - dogfood: ${{ github.event_name == 'workflow_dispatch' && inputs.dogfood || github.event_name != 'workflow_dispatch' }} - extra_nix_config: - experimental-features = nix-command flakes - github_token: ${{ secrets.GITHUB_TOKEN }} - - run: nix build -L --out-link ./new-nix && PATH=$(pwd)/new-nix/bin:$PATH MAX_FLAKES=25 flake-regressions/eval-all.sh + s3_upload_role: ${{ secrets.AWS_S3_UPLOAD_ROLE_ARN }} + bucket: ${{ secrets.AWS_S3_UPLOAD_BUCKET_NAME }} + directory: ./artifacts + ids_project_name: determinate-nix + ids_binary_prefix: determinate-nix + skip_acl: true + allowed_branches: '["main"]' - profile_build: - needs: tests - runs-on: ubuntu-24.04 - timeout-minutes: 60 - if: >- - github.event_name == 'push' && - github.ref_name == 'master' + publish: + needs: + - success + if: (!github.repository.fork && (github.ref == format('refs/heads/{0}', github.event.repository.default_branch) || startsWith(github.ref, 'refs/tags/'))) + environment: ${{ github.event_name == 'release' && 'production' || '' }} + runs-on: ubuntu-latest + permissions: + contents: write + id-token: write steps: - - uses: actions/checkout@v6 - with: - fetch-depth: 0 - - uses: ./.github/actions/install-nix-action - with: - github_token: ${{ secrets.GITHUB_TOKEN }} - dogfood: ${{ github.event_name == 'workflow_dispatch' && inputs.dogfood || github.event_name != 'workflow_dispatch' }} - extra_nix_config: | - experimental-features = flakes nix-command ca-derivations impure-derivations - max-jobs = 1 - - run: | - nix build -L --file ./ci/gha/profile-build buildTimeReport --out-link build-time-report.md - cat build-time-report.md >> $GITHUB_STEP_SUMMARY + - uses: actions/checkout@v4 + - uses: DeterminateSystems/determinate-nix-action@main + - uses: DeterminateSystems/flakehub-push@main + with: + rolling: ${{ github.ref == format('refs/heads/{0}', github.event.repository.default_branch) }} + visibility: "public" + tag: "${{ github.ref_name }}" + - name: Update the release notes + if: startsWith(github.ref, 'refs/tags/') + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + TAG_NAME: ${{ github.ref_name }} + run: | + gh release edit "$TAG_NAME" --notes-file doc/manual/source/release-notes-determinate/"$TAG_NAME".md || true diff --git a/.github/workflows/propose-release.yml b/.github/workflows/propose-release.yml new file mode 100644 index 00000000000..ea01e4b7afe --- /dev/null +++ b/.github/workflows/propose-release.yml @@ -0,0 +1,32 @@ +on: + workflow_dispatch: + inputs: + reference-id: + type: string + required: true + version: + type: string + required: true + +concurrency: + group: ${{ github.workflow }} + cancel-in-progress: true + +jobs: + propose-release: + uses: DeterminateSystems/propose-release/.github/workflows/workflow.yml@main + permissions: + id-token: write + contents: write + pull-requests: write + with: + update-flake: false + reference-id: ${{ inputs.reference-id }} + version: ${{ inputs.version }} + extra-commands-early: | + echo ${{ inputs.version }} > .version-determinate + git add .version-determinate + git commit -m "Set .version-determinate to ${{ inputs.version }}" || true + ./.github/release-notes.sh + git add doc + git commit -m "Generate release notes for ${{ inputs.version }}" || true diff --git a/.version b/.version index 3afbaeb2b33..ba13d3caf21 100644 --- a/.version +++ b/.version @@ -1 +1 @@ -2.33.0 +2.33.1 diff --git a/.version-determinate b/.version-determinate new file mode 100644 index 00000000000..c3df54c9b83 --- /dev/null +++ b/.version-determinate @@ -0,0 +1 @@ +3.15.1 diff --git a/README.md b/README.md index 02498944cdb..c5cbcbed21b 100644 --- a/README.md +++ b/README.md @@ -1,38 +1,111 @@ -# Nix +

+ +

+

+  Discord  +  Bluesky  +  Mastodon  +  Twitter  +  LinkedIn  +

-[![Open Collective supporters](https://opencollective.com/nixos/tiers/supporter/badge.svg?label=Supporters&color=brightgreen)](https://opencollective.com/nixos) -[![CI](https://github.com/NixOS/nix/workflows/CI/badge.svg)](https://github.com/NixOS/nix/actions/workflows/ci.yml) +# The Determinate Nix CLI -Nix is a powerful package manager for Linux and other Unix systems that makes package -management reliable and reproducible. Please refer to the [Nix manual](https://nix.dev/reference/nix-manual) -for more details. +[![CI](https://github.com/DeterminateSystems/nix-src/workflows/CI/badge.svg)](https://github.com/DeterminateSystems/nix-src/actions/workflows/ci.yml) -## Installation and first steps +**Nix** is a powerful [language], [package manager][package-management], and [build tool][cli] for [macOS](#macos), [Linux](#linux), and other Unix systems. +It enables you to create fully reproducible [development environments][envs], to build [packages] in sandboxed environments, to build entire Linux systems using [NixOS], and much more. -Visit [nix.dev](https://nix.dev) for [installation instructions](https://nix.dev/tutorials/install-nix) and [beginner tutorials](https://nix.dev/tutorials/first-steps). +[**Determinate Nix**][det-nix] is a downstream distribution of [Nix][upstream] created and maintained by [Determinate Systems][detsys]. +It has two components: -Full reference documentation can be found in the [Nix manual](https://nix.dev/reference/nix-manual). +- The Determinate Nix CLI, a distribution of the Nix CLI built from this repository. + It's based on the [upstream Nix CLI][upstream] and continuously rebased against it, but adds a wide variety of [features] and [improvements][changelog]. +- [Determinate Nixd][dnixd] is a useful daemon for Linux and macOS that handles vital tasks like configuration and enterprise certificate management. -## Building and developing +Determinate Nix is built on SOC-2-Type-II-compliant infrastructure using [Determinate Secure Packages][secure-packages], released via a carefully orchestrated process, and, for Determinate Systems customers, backed by formal security response SLAs that meet stringent compliance standards. -Follow instructions in the Nix reference manual to [set up a development environment and build Nix from source](https://nix.dev/manual/nix/development/development/building.html). +> [!NOTE] +> Determinate Nix, by definition, consists of _both_ the components listed above. +> While it's possible to use the code in this repository to run just our downstream Nix CLI, we do _not_ officially support this experience and provide none of the guarantees or SLAs that we provide for Determinate Nix proper. -## Contributing +Determinate Nix is part of the [Determinate platform][determinate], which also includes [FlakeHub], a secure flake repository with features like [FlakeHub Cache][cache], [private flakes][private-flakes], and [semantic versioning][semver] (SemVer) for [flakes]. + +## Installing Determinate Nix + +You can install Determinate Nix on [macOS](#macos), non-NixOS [Linux](#linux) and WSL, and [NixOS](#nixos). + +### macOS + +On macOS, we recommend using the graphical installer from Determinate Systems. +Click [here][gui] to download and run it. + +### Linux + +On Linux, including Windows Subsystem for Linux (WSL), we recommend installing Determinate Nix using [Determinate Nix Installer][installer]: + +```shell +curl -fsSL https://install.determinate.systems/nix | sh -s -- install +``` -Check the [contributing guide](./CONTRIBUTING.md) if you want to get involved with developing Nix. +### NixOS -## Additional resources +On [NixOS], we recommend following our [dedicated installation guide][nixos-install]. +We also provide both [Amazon Machine Images][amis] (AMIs) and [ISOs] for using Determinate on NixOS. -Nix was created by Eelco Dolstra and developed as the subject of his PhD thesis [The Purely Functional Software Deployment Model](https://edolstra.github.io/pubs/phd-thesis.pdf), published 2006. -Today, a world-wide developer community contributes to Nix and the ecosystem that has grown around it. +## Other resources -- [The Nix, Nixpkgs, NixOS Community on nixos.org](https://nixos.org/) -- [Official documentation on nix.dev](https://nix.dev) -- [Nixpkgs](https://github.com/NixOS/nixpkgs) is [the largest, most up-to-date free software repository in the world](https://repology.org/repositories/graphs) -- [NixOS](https://github.com/NixOS/nixpkgs/tree/master/nixos) is a Linux distribution that can be configured fully declaratively -- [Discourse](https://discourse.nixos.org/) -- Matrix: [#users:nixos.org](https://matrix.to/#/#users:nixos.org) for user support and [#nix-dev:nixos.org](https://matrix.to/#/#nix-dev:nixos.org) for development +Nix was created by [Eelco Dolstra][eelco] and developed as the subject of his 2006 PhD thesis, [The Purely Functional Software Deployment Model][thesis]. +Today, a worldwide developer community contributes to Nix and the ecosystem that has grown around it. + +- [Zero to Nix][z2n], Determinate Systems' guide to Nix and [flakes] for beginners +- [Nixpkgs], a collection of well over 100,000 software packages that you can build and manage using Nix +- [NixOS] is a Linux distribution that can be configured fully declaratively +- The Nix, Nixpkgs, and NixOS community on [nixos.org][website] + +## Reference + +The primary documentation for Determinate and Determinate Nix is available at [docs.determinate.systems][determinate]. +For deeply technical reference material, see the [Determinate Nix manual][manual] which is based on the upstream Nix manual. ## License -Nix is released under the [LGPL v2.1](./COPYING). +[Upstream Nix][upstream] is released under the [LGPL v2.1][license] license. +[Determinate Nix][det-nix] is also released under LGPL v2.1 in accordance with the terms of the upstream license. + +## Contributing + +Check the [contributing guide][contributing] if you want to get involved with developing Nix. + +[amis]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AMIs.html +[cache]: https://docs.determinate.systems/flakehub/cache +[changelog]: https://determinate.systems/blog/categories/changelog +[cli]: https://manual.determinate.systems/command-ref/new-cli/nix.html +[contributing]: ./CONTRIBUTING.md +[det-nix]: https://docs.determinate.systems/determinate-nix +[determinate]: https://docs.determinate.systems +[detsys]: https://determinate.systems +[dnixd]: https://docs.determinate.systems/determinate-nix#determinate-nixd +[eelco]: https://determinate.systems/people/eelco-dolstra +[envs]: https://zero-to-nix.com/concepts/dev-env +[features]: https://docs.determinate.systems/determinate-nix/#special-features +[flakehub]: https://flakehub.com +[flakes]: https://zero-to-nix.com/concepts/flakes +[gui]: https://install.determinate.systems/determinate-pkg/stable/Universal +[installer]: https://github.com/DeterminateSystems/nix-installer +[isos]: https://github.com/DeterminateSystems/nixos-iso +[language]: https://zero-to-nix.com/concepts/nix-language +[license]: ./COPYING +[manual]: https://manual.determinate.systems +[nixpkgs]: https://github.com/NixOS/nixpkgs +[nixos]: https://github.com/NixOS/nixpkgs/tree/master/nixos +[nixos-install]: https://docs.determinate.systems/guides/advanced-installation#nixos +[packages]: https://zero-to-nix.com/concepts/packages +[package-management]: https://zero-to-nix.com/concepts/package-management +[private-flakes]: https://docs.determinate.systems/flakehub/private-flakes +[secure-packages]: https://determinate.systems/secure-packages +[semver]: https://docs.determinate.systems/flakehub/concepts/semver +[thesis]: https://edolstra.github.io/pubs/phd-thesis.pdf +[upstream]: https://github.com/NixOS/nix +[website]: https://nixos.org +[z2n]: https://zero-to-nix.com diff --git a/ci/gha/tests/default.nix b/ci/gha/tests/default.nix index 6100f2f4172..c179174e6e2 100644 --- a/ci/gha/tests/default.nix +++ b/ci/gha/tests/default.nix @@ -76,7 +76,6 @@ rec { */ topLevel = { installerScriptForGHA = hydraJobs.installerScriptForGHA.${system}; - installTests = hydraJobs.installTests.${system}; nixpkgsLibTests = hydraJobs.tests.nixpkgsLibTests.${system}; rl-next = pkgs.buildPackages.runCommand "test-rl-next-release-notes" { } '' LANG=C.UTF-8 ${pkgs.changelog-d}/bin/changelog-d ${../../../doc/manual/rl-next} >$out diff --git a/default.nix b/default.nix deleted file mode 100644 index 6466507b714..00000000000 --- a/default.nix +++ /dev/null @@ -1,9 +0,0 @@ -(import ( - let - lock = builtins.fromJSON (builtins.readFile ./flake.lock); - in - fetchTarball { - url = "https://github.com/edolstra/flake-compat/archive/${lock.nodes.flake-compat.locked.rev}.tar.gz"; - sha256 = lock.nodes.flake-compat.locked.narHash; - } -) { src = ./.; }).defaultNix diff --git a/doc/manual/book.toml.in b/doc/manual/book.toml.in index c798afc4a8c..11efca75f11 100644 --- a/doc/manual/book.toml.in +++ b/doc/manual/book.toml.in @@ -1,12 +1,12 @@ [book] -title = "Nix @version@ Reference Manual" +title = "Determinate Nix @version@ Reference Manual" src = "source" [output.html] additional-css = ["custom.css"] additional-js = ["redirects.js"] -edit-url-template = "https://github.com/NixOS/nix/tree/master/doc/manual/{path}" -git-repository-url = "https://github.com/NixOS/nix" +edit-url-template = "https://github.com/DeterminateSystems/nix-src/tree/master/doc/manual/{path}" +git-repository-url = "https://github.com/DeterminateSystems/nix-src" mathjax-support = true # Handles replacing @docroot@ with a path to ./source relative to that markdown file, diff --git a/doc/manual/custom.css b/doc/manual/custom.css index 7af150be391..119c6d12543 100644 --- a/doc/manual/custom.css +++ b/doc/manual/custom.css @@ -1,5 +1,5 @@ :root { - --sidebar-width: 23em; + --sidebar-width: 23em; } h1.menu-title::before { @@ -7,11 +7,10 @@ h1.menu-title::before { background-image: url("./favicon.svg"); padding: 1.25em; background-position: center center; - background-size: 2em; + background-size: 1.5em; background-repeat: no-repeat; } - .menu-bar { padding: 0.5em 0em; } @@ -21,13 +20,13 @@ h1.menu-title::before { } h1:not(:first-of-type) { - margin-top: 1.3em; + margin-top: 1.3em; } h2 { - margin-top: 1em; + margin-top: 1em; } .hljs-meta { - user-select: none; + user-select: none; } diff --git a/doc/manual/generate-manpage.nix b/doc/manual/generate-manpage.nix index 31e74e17d26..292cb283d3d 100644 --- a/doc/manual/generate-manpage.nix +++ b/doc/manual/generate-manpage.nix @@ -42,11 +42,6 @@ let let result = '' - > **Warning** \ - > This program is - > [**experimental**](@docroot@/development/experimental-features.md#xp-feature-nix-command) - > and its interface is subject to change. - # Name `${command}` - ${details.description} diff --git a/doc/manual/meson.build b/doc/manual/meson.build index 3c3e7954113..1b9a325df2a 100644 --- a/doc/manual/meson.build +++ b/doc/manual/meson.build @@ -5,19 +5,9 @@ project( license : 'LGPL-2.1-or-later', ) -# Compute documentation URL based on version and release type -version = meson.project_version() -official_release = get_option('official-release') +fs = import('fs') -if official_release - # For official releases, use versioned URL (dropping patch version) - version_parts = version.split('.') - major_minor = '@0@.@1@'.format(version_parts[0], version_parts[1]) - doc_url = 'https://nix.dev/manual/nix/@0@'.format(major_minor) -else - # For development builds, use /latest - doc_url = 'https://nix.dev/manual/nix/latest' -endif +doc_url = 'https://manual.determinate.systems/' nix = find_program('nix', native : true) @@ -40,7 +30,7 @@ nix_env_for_docs = { 'NIX_CONFIG' : 'cores = 0', } -nix_for_docs = [ nix, '--experimental-features', 'nix-command' ] +nix_for_docs = [ nix ] nix_eval_for_docs_common = nix_for_docs + [ 'eval', '-I', @@ -137,7 +127,7 @@ if get_option('html-manual') python.full_path(), mdbook.full_path(), meson.current_build_dir(), - meson.project_version(), + fs.read('../../.version-determinate').strip(), rsync.full_path(), ), ], diff --git a/doc/manual/package.nix b/doc/manual/package.nix index 3a90a0faf8a..0b3d8ca940a 100644 --- a/doc/manual/package.nix +++ b/doc/manual/package.nix @@ -34,7 +34,7 @@ let in mkMesonDerivation (finalAttrs: { - pname = "nix-manual"; + pname = "determinate-nix-manual"; inherit version; workDir = ./.; @@ -42,6 +42,7 @@ mkMesonDerivation (finalAttrs: { fileset.difference (fileset.unions [ ../../.version + ../../.version-determinate # For example JSON ../../src/libutil-tests/data/memory-source-accessor ../../src/libutil-tests/data/hash diff --git a/doc/manual/redirects.json b/doc/manual/redirects.json index 0a6c7150800..07a6f36627f 100644 --- a/doc/manual/redirects.json +++ b/doc/manual/redirects.json @@ -243,29 +243,11 @@ "gloss-validity": "glossary.html#gloss-validity", "part-glossary": "glossary.html", "sec-building-source": "installation/building-source.html", - "ch-env-variables": "installation/env-variables.html", - "sec-installer-proxy-settings": "installation/env-variables.html#proxy-environment-variables", - "sec-nix-ssl-cert-file": "installation/env-variables.html#nix_ssl_cert_file", - "sec-nix-ssl-cert-file-with-nix-daemon-and-macos": "installation/env-variables.html#nix_ssl_cert_file", "chap-installation": "installation/index.html", - "ch-installing-binary": "installation/installing-binary.html", - "sect-macos-installation": "installation/installing-binary.html#macos-installation", - "sect-macos-installation-change-store-prefix": "installation/installing-binary.html#macos-installation", - "sect-macos-installation-encrypted-volume": "installation/installing-binary.html#macos-installation", - "sect-macos-installation-recommended-notes": "installation/installing-binary.html#macos-installation", - "sect-macos-installation-symlink": "installation/installing-binary.html#macos-installation", - "sect-multi-user-installation": "installation/installing-binary.html#multi-user-installation", - "sect-nix-install-binary-tarball": "installation/installing-binary.html#installing-from-a-binary-tarball", - "sect-nix-install-pinned-version-url": - "installation/installing-binary.html#installing-a-pinned-nix-version-from-a-url", - "sect-single-user-installation": "installation/installing-binary.html#single-user-installation", "ch-installing-source": "installation/installing-source.html", - "ssec-multi-user": "installation/multi-user.html", "ch-nix-security": "installation/nix-security.html", "sec-obtaining-source": "installation/obtaining-source.html", "sec-prerequisites-source": "installation/prerequisites-source.html", - "sec-single-user": "installation/single-user.html", - "ch-supported-platforms": "installation/supported-platforms.html", "ch-upgrading-nix": "installation/upgrading.html", "ch-about-nix": "introduction.html", "chap-introduction": "introduction.html", @@ -287,43 +269,7 @@ "sec-sharing-packages": "package-management/sharing-packages.html", "ssec-ssh-substituter": "package-management/ssh-substituter.html", "chap-quick-start": "quick-start.html", - "sec-relnotes": "release-notes/index.html", - "ch-relnotes-0.10.1": "release-notes/rl-0.10.1.html", - "ch-relnotes-0.10": "release-notes/rl-0.10.html", - "ssec-relnotes-0.11": "release-notes/rl-0.11.html", - "ssec-relnotes-0.12": "release-notes/rl-0.12.html", - "ssec-relnotes-0.13": "release-notes/rl-0.13.html", - "ssec-relnotes-0.14": "release-notes/rl-0.14.html", - "ssec-relnotes-0.15": "release-notes/rl-0.15.html", - "ssec-relnotes-0.16": "release-notes/rl-0.16.html", - "ch-relnotes-0.5": "release-notes/rl-0.5.html", - "ch-relnotes-0.6": "release-notes/rl-0.6.html", - "ch-relnotes-0.7": "release-notes/rl-0.7.html", - "ch-relnotes-0.8.1": "release-notes/rl-0.8.1.html", - "ch-relnotes-0.8": "release-notes/rl-0.8.html", - "ch-relnotes-0.9.1": "release-notes/rl-0.9.1.html", - "ch-relnotes-0.9.2": "release-notes/rl-0.9.2.html", - "ch-relnotes-0.9": "release-notes/rl-0.9.html", - "ssec-relnotes-1.0": "release-notes/rl-1.0.html", - "ssec-relnotes-1.1": "release-notes/rl-1.1.html", - "ssec-relnotes-1.10": "release-notes/rl-1.10.html", - "ssec-relnotes-1.11.10": "release-notes/rl-1.11.10.html", - "ssec-relnotes-1.11": "release-notes/rl-1.11.html", - "ssec-relnotes-1.2": "release-notes/rl-1.2.html", - "ssec-relnotes-1.3": "release-notes/rl-1.3.html", - "ssec-relnotes-1.4": "release-notes/rl-1.4.html", - "ssec-relnotes-1.5.1": "release-notes/rl-1.5.html", - "ssec-relnotes-1.5.2": "release-notes/rl-1.5.2.html", - "ssec-relnotes-1.5": "release-notes/rl-1.5.html", - "ssec-relnotes-1.6.1": "release-notes/rl-1.6.1.html", - "ssec-relnotes-1.6.0": "release-notes/rl-1.6.html", - "ssec-relnotes-1.7": "release-notes/rl-1.7.html", - "ssec-relnotes-1.8": "release-notes/rl-1.8.html", - "ssec-relnotes-1.9": "release-notes/rl-1.9.html", - "ssec-relnotes-2.0": "release-notes/rl-2.0.html", - "ssec-relnotes-2.1": "release-notes/rl-2.1.html", - "ssec-relnotes-2.2": "release-notes/rl-2.2.html", - "ssec-relnotes-2.3": "release-notes/rl-2.3.html" + "sec-relnotes": "release-notes/index.html" }, "language/types.html": { "simple-values": "#primitives", @@ -340,12 +286,10 @@ "builder-execution": "../store/building.html#builder-execution" }, "installation/installing-binary.html": { - "linux": "uninstall.html#linux", - "macos": "uninstall.html#macos", "uninstalling": "uninstall.html" }, "development/building.html": { - "nix-with-flakes": "#building-nix-with-flakes", + "nix-with-flakes": "#building-nix", "classic-nix": "#building-nix", "running-tests": "testing.html#running-tests", "unit-tests": "testing.html#unit-tests", diff --git a/doc/manual/rl-next/shorter-build-dir-names.md b/doc/manual/rl-next/shorter-build-dir-names.md new file mode 100644 index 00000000000..e87fa5d04fb --- /dev/null +++ b/doc/manual/rl-next/shorter-build-dir-names.md @@ -0,0 +1,6 @@ +--- +synopsis: "Temporary build directories no longer include derivation names" +prs: [13839] +--- + +Temporary build directories created during derivation builds no longer include the derivation name in their path to avoid build failures when the derivation name is too long. This change ensures predictable prefix lengths for build directories under `/nix/var/nix/builds`. \ No newline at end of file diff --git a/doc/manual/source/SUMMARY.md.in b/doc/manual/source/SUMMARY.md.in index 8b6b29f6a7f..2684ec3d633 100644 --- a/doc/manual/source/SUMMARY.md.in +++ b/doc/manual/source/SUMMARY.md.in @@ -3,17 +3,12 @@ - [Introduction](introduction.md) - [Quick Start](quick-start.md) - [Installation](installation/index.md) - - [Supported Platforms](installation/supported-platforms.md) - - [Installing a Binary Distribution](installation/installing-binary.md) - [Installing Nix from Source](installation/installing-source.md) - [Prerequisites](installation/prerequisites-source.md) - [Obtaining a Source Distribution](installation/obtaining-source.md) - [Building Nix from Source](installation/building-source.md) - [Using Nix within Docker](installation/installing-docker.md) - [Security](installation/nix-security.md) - - [Single-User Mode](installation/single-user.md) - - [Multi-User Mode](installation/multi-user.md) - - [Environment Variables](installation/env-variables.md) - [Upgrading Nix](installation/upgrading.md) - [Uninstalling Nix](installation/uninstall.md) - [Nix Store](store/index.md) @@ -65,8 +60,11 @@ - [Command Reference](command-ref/index.md) - [Common Options](command-ref/opt-common.md) - [Common Environment Variables](command-ref/env-common.md) - - [Main Commands](command-ref/main-commands.md) + - [Subcommands](command-ref/subcommands.md) +{{#include ./command-ref/new-cli/SUMMARY.md}} + - [Deprecated Commands](command-ref/main-commands.md) - [nix-build](command-ref/nix-build.md) + - [nix-channel](command-ref/nix-channel.md) - [nix-shell](command-ref/nix-shell.md) - [nix-store](command-ref/nix-store.md) - [nix-store --add-fixed](command-ref/nix-store/add-fixed.md) @@ -102,22 +100,17 @@ - [nix-env --uninstall](command-ref/nix-env/uninstall.md) - [nix-env --upgrade](command-ref/nix-env/upgrade.md) - [Utilities](command-ref/utilities.md) - - [nix-channel](command-ref/nix-channel.md) - [nix-collect-garbage](command-ref/nix-collect-garbage.md) - [nix-copy-closure](command-ref/nix-copy-closure.md) - [nix-daemon](command-ref/nix-daemon.md) - [nix-hash](command-ref/nix-hash.md) - [nix-instantiate](command-ref/nix-instantiate.md) - [nix-prefetch-url](command-ref/nix-prefetch-url.md) - - [Experimental Commands](command-ref/experimental-commands.md) -{{#include ./command-ref/new-cli/SUMMARY.md}} - [Files](command-ref/files.md) - [nix.conf](command-ref/conf-file.md) - [Profiles](command-ref/files/profiles.md) - [manifest.nix](command-ref/files/manifest.nix.md) - [manifest.json](command-ref/files/manifest.json.md) - - [Channels](command-ref/files/channels.md) - - [Default Nix expression](command-ref/files/default-nix-expression.md) - [Architecture and Design](architecture/architecture.md) - [Formats and Protocols](protocols/index.md) - [JSON Formats](protocols/json/index.md) @@ -149,7 +142,49 @@ - [C++ style guide](development/cxx.md) - [Experimental Features](development/experimental-features.md) - [Contributing](development/contributing.md) -- [Releases](release-notes/index.md) +- [Determinate Nix Release Notes](release-notes-determinate/index.md) + - [Changes between Nix and Determinate Nix](release-notes-determinate/changes.md) + - [Release 3.15.1 (2025-12-24)](release-notes-determinate/v3.15.1.md) + - [Release 3.15.0 (2025-12-19)](release-notes-determinate/v3.15.0.md) + - [Release 3.14.0 (2025-12-08)](release-notes-determinate/v3.14.0.md) + - [Release 3.13.2 (2025-11-19)](release-notes-determinate/v3.13.2.md) + - [Release 3.13.1 (2025-11-12)](release-notes-determinate/v3.13.1.md) + - [Release 3.13.0 (2025-11-09)](release-notes-determinate/v3.13.0.md) + - [Release 3.12.2 (2025-11-05)](release-notes-determinate/v3.12.2.md) + - [Release 3.12.1 (2025-11-04)](release-notes-determinate/v3.12.1.md) + - [Release 3.12.0 (2025-10-23)](release-notes-determinate/v3.12.0.md) + - [Release 3.11.3 (2025-10-09)](release-notes-determinate/v3.11.3.md) + - [Release 3.11.2 (2025-09-12)](release-notes-determinate/v3.11.2.md) + - [Release 3.11.1 (2025-09-04)](release-notes-determinate/v3.11.1.md) + - [Release 3.11.0 (2025-09-03)](release-notes-determinate/v3.11.0.md) + - [Release 3.10.1 (2025-09-02)](release-notes-determinate/v3.10.1.md) + - [Release 3.10.0 (2025-09-02)](release-notes-determinate/v3.10.0.md) + - [Release 3.9.1 (2025-08-28)](release-notes-determinate/v3.9.1.md) + - [Release 3.9.0 (2025-08-26)](release-notes-determinate/v3.9.0.md) + - [Release 3.8.6 (2025-08-19)](release-notes-determinate/v3.8.6.md) + - [Release 3.8.5 (2025-08-04)](release-notes-determinate/rl-3.8.5.md) + - [Release 3.8.4 (2025-07-21)](release-notes-determinate/rl-3.8.4.md) + - [Release 3.8.3 (2025-07-18)](release-notes-determinate/rl-3.8.3.md) + - [Release 3.8.2 (2025-07-12)](release-notes-determinate/rl-3.8.2.md) + - [Release 3.8.1 (2025-07-11)](release-notes-determinate/rl-3.8.1.md) + - [Release 3.8.0 (2025-07-10)](release-notes-determinate/rl-3.8.0.md) + - [Release 3.7.0 (2025-07-03)](release-notes-determinate/rl-3.7.0.md) + - [Release 3.6.8 (2025-06-25)](release-notes-determinate/rl-3.6.8.md) + - [Release 3.6.7 (2025-06-24)](release-notes-determinate/rl-3.6.7.md) + - [Release 3.6.6 (2025-06-17)](release-notes-determinate/rl-3.6.6.md) + - [Release 3.6.5 (2025-06-16)](release-notes-determinate/rl-3.6.5.md) + - [Release 3.6.2 (2025-06-02)](release-notes-determinate/rl-3.6.2.md) + - [Release 3.6.1 (2025-05-24)](release-notes-determinate/rl-3.6.1.md) + - [Release 3.6.0 (2025-05-22)](release-notes-determinate/rl-3.6.0.md) + - [Release 3.5.2 (2025-05-12)](release-notes-determinate/rl-3.5.2.md) + - [Release 3.5.1 (2025-05-09)](release-notes-determinate/rl-3.5.1.md) + - [~~Release 3.5.0 (2025-05-09)~~](release-notes-determinate/rl-3.5.0.md) + - [Release 3.4.2 (2025-05-05)](release-notes-determinate/rl-3.4.2.md) + - [Release 3.4.0 (2025-04-25)](release-notes-determinate/rl-3.4.0.md) + - [Release 3.3.0 (2025-04-11)](release-notes-determinate/rl-3.3.0.md) + - [Release 3.1.0 (2025-03-27)](release-notes-determinate/rl-3.1.0.md) + - [Release 3.0.0 (2025-03-04)](release-notes-determinate/rl-3.0.0.md) +- [Nix Release Notes](release-notes/index.md) {{#include ./SUMMARY-rl-next.md}} - [Release 2.33 (2025-12-09)](release-notes/rl-2.33.md) - [Release 2.32 (2025-10-06)](release-notes/rl-2.32.md) @@ -159,60 +194,3 @@ - [Release 2.28 (2025-04-02)](release-notes/rl-2.28.md) - [Release 2.27 (2025-03-03)](release-notes/rl-2.27.md) - [Release 2.26 (2025-01-22)](release-notes/rl-2.26.md) - - [Release 2.25 (2024-11-07)](release-notes/rl-2.25.md) - - [Release 2.24 (2024-07-31)](release-notes/rl-2.24.md) - - [Release 2.23 (2024-06-03)](release-notes/rl-2.23.md) - - [Release 2.22 (2024-04-23)](release-notes/rl-2.22.md) - - [Release 2.21 (2024-03-11)](release-notes/rl-2.21.md) - - [Release 2.20 (2024-01-29)](release-notes/rl-2.20.md) - - [Release 2.19 (2023-11-17)](release-notes/rl-2.19.md) - - [Release 2.18 (2023-09-20)](release-notes/rl-2.18.md) - - [Release 2.17 (2023-07-24)](release-notes/rl-2.17.md) - - [Release 2.16 (2023-05-31)](release-notes/rl-2.16.md) - - [Release 2.15 (2023-04-11)](release-notes/rl-2.15.md) - - [Release 2.14 (2023-02-28)](release-notes/rl-2.14.md) - - [Release 2.13 (2023-01-17)](release-notes/rl-2.13.md) - - [Release 2.12 (2022-12-06)](release-notes/rl-2.12.md) - - [Release 2.11 (2022-08-25)](release-notes/rl-2.11.md) - - [Release 2.10 (2022-07-11)](release-notes/rl-2.10.md) - - [Release 2.9 (2022-05-30)](release-notes/rl-2.9.md) - - [Release 2.8 (2022-04-19)](release-notes/rl-2.8.md) - - [Release 2.7 (2022-03-07)](release-notes/rl-2.7.md) - - [Release 2.6 (2022-01-24)](release-notes/rl-2.6.md) - - [Release 2.5 (2021-12-13)](release-notes/rl-2.5.md) - - [Release 2.4 (2021-11-01)](release-notes/rl-2.4.md) - - [Release 2.3 (2019-09-04)](release-notes/rl-2.3.md) - - [Release 2.2 (2019-01-11)](release-notes/rl-2.2.md) - - [Release 2.1 (2018-09-02)](release-notes/rl-2.1.md) - - [Release 2.0 (2018-02-22)](release-notes/rl-2.0.md) - - [Release 1.11.10 (2017-06-12)](release-notes/rl-1.11.10.md) - - [Release 1.11 (2016-01-19)](release-notes/rl-1.11.md) - - [Release 1.10 (2015-09-03)](release-notes/rl-1.10.md) - - [Release 1.9 (2015-06-12)](release-notes/rl-1.9.md) - - [Release 1.8 (2014-12-14)](release-notes/rl-1.8.md) - - [Release 1.7 (2014-04-11)](release-notes/rl-1.7.md) - - [Release 1.6.1 (2013-10-28)](release-notes/rl-1.6.1.md) - - [Release 1.6 (2013-09-10)](release-notes/rl-1.6.md) - - [Release 1.5.2 (2013-05-13)](release-notes/rl-1.5.2.md) - - [Release 1.5 (2013-02-27)](release-notes/rl-1.5.md) - - [Release 1.4 (2013-02-26)](release-notes/rl-1.4.md) - - [Release 1.3 (2013-01-04)](release-notes/rl-1.3.md) - - [Release 1.2 (2012-12-06)](release-notes/rl-1.2.md) - - [Release 1.1 (2012-07-18)](release-notes/rl-1.1.md) - - [Release 1.0 (2012-05-11)](release-notes/rl-1.0.md) - - [Release 0.16 (2010-08-17)](release-notes/rl-0.16.md) - - [Release 0.15 (2010-03-17)](release-notes/rl-0.15.md) - - [Release 0.14 (2010-02-04)](release-notes/rl-0.14.md) - - [Release 0.13 (2009-11-05)](release-notes/rl-0.13.md) - - [Release 0.12 (2008-11-20)](release-notes/rl-0.12.md) - - [Release 0.11 (2007-12-31)](release-notes/rl-0.11.md) - - [Release 0.10.1 (2006-10-11)](release-notes/rl-0.10.1.md) - - [Release 0.10 (2006-10-06)](release-notes/rl-0.10.md) - - [Release 0.9.2 (2005-09-21)](release-notes/rl-0.9.2.md) - - [Release 0.9.1 (2005-09-20)](release-notes/rl-0.9.1.md) - - [Release 0.9 (2005-09-16)](release-notes/rl-0.9.md) - - [Release 0.8.1 (2005-04-13)](release-notes/rl-0.8.1.md) - - [Release 0.8 (2005-04-11)](release-notes/rl-0.8.md) - - [Release 0.7 (2005-01-12)](release-notes/rl-0.7.md) - - [Release 0.6 (2004-11-14)](release-notes/rl-0.6.md) - - [Release 0.5 and earlier](release-notes/rl-0.5.md) diff --git a/doc/manual/source/advanced-topics/distributed-builds.md b/doc/manual/source/advanced-topics/distributed-builds.md index 08a980643e8..c39cf450079 100644 --- a/doc/manual/source/advanced-topics/distributed-builds.md +++ b/doc/manual/source/advanced-topics/distributed-builds.md @@ -5,8 +5,8 @@ this allows multiple builds to be performed in parallel. Remote builds also allow Nix to perform multi-platform builds in a semi-transparent way. For example, if you perform a build for a -`x86_64-darwin` on an `i686-linux` machine, Nix can automatically -forward the build to a `x86_64-darwin` machine, if one is available. +`aarch64-darwin` on an `x86_64-linux` machine, Nix can automatically +forward the build to a `aarch64-darwin` machine, if one is available. ## Requirements @@ -59,7 +59,7 @@ then you need to ensure that the `PATH` of non-interactive login shells contains Nix. The [list of remote build machines](@docroot@/command-ref/conf-file.md#conf-builders) can be specified on the command line or in the Nix configuration file. -For example, the following command allows you to build a derivation for `x86_64-darwin` on a Linux machine: +For example, the following command allows you to build a derivation for `aarch64-darwin` on a Linux machine: ```console uname @@ -71,8 +71,8 @@ Linux ```console nix build --impure \ - --expr '(with import { system = "x86_64-darwin"; }; runCommand "foo" {} "uname > $out")' \ - --builders 'ssh://mac x86_64-darwin' + --expr '(with import { system = "aarch64-darwin"; }; runCommand "foo" {} "uname > $out")' \ + --builders 'ssh://mac aarch64-darwin' ``` ```console @@ -90,12 +90,12 @@ Darwin It is possible to specify multiple build machines separated by a semicolon or a newline, e.g. ```console - --builders 'ssh://mac x86_64-darwin ; ssh://beastie x86_64-freebsd' + --builders 'ssh://mac aarch64-darwin ; ssh://beastie x86_64-freebsd' ``` Remote build machines can also be configured in [`nix.conf`](@docroot@/command-ref/conf-file.md), e.g. - builders = ssh://mac x86_64-darwin ; ssh://beastie x86_64-freebsd + builders = ssh://mac aarch64-darwin ; ssh://beastie x86_64-freebsd After making changes to `nix.conf`, restart the Nix daemon for changes to take effect. @@ -107,4 +107,4 @@ file included in `builders` via the syntax `@/path/to/file`. For example, causes the list of machines in `/etc/nix/machines` to be included. (This is the default.) -[Nix instance]: @docroot@/glossary.md#gloss-nix-instance \ No newline at end of file +[Nix instance]: @docroot@/glossary.md#gloss-nix-instance diff --git a/doc/manual/source/advanced-topics/eval-profiler.md b/doc/manual/source/advanced-topics/eval-profiler.md index ed3848bb2db..2bc7ebb05e0 100644 --- a/doc/manual/source/advanced-topics/eval-profiler.md +++ b/doc/manual/source/advanced-topics/eval-profiler.md @@ -27,7 +27,7 @@ site](https://en.wikipedia.org/wiki/Call_site) position and the name of the function being called (when available). For example: ``` -/nix/store/x9wnkly3k1gkq580m90jjn32q9f05q2v-source/pkgs/top-level/default.nix:167:5:primop import +/nix/store/2q71fdvr4h33g9832hiriwnf20fn630l-source/pkgs/top-level/default.nix:167:5:primop import ``` -Here `import` primop is called at `/nix/store/x9wnkly3k1gkq580m90jjn32q9f05q2v-source/pkgs/top-level/default.nix:167:5`. +Here `import` primop is called at `/nix/store/2q71fdvr4h33g9832hiriwnf20fn630l-source/pkgs/top-level/default.nix:167:5`. diff --git a/doc/manual/source/command-ref/env-common.md b/doc/manual/source/command-ref/env-common.md index e0fd2b00eec..fe6e822ff16 100644 --- a/doc/manual/source/command-ref/env-common.md +++ b/doc/manual/source/command-ref/env-common.md @@ -102,7 +102,7 @@ Most Nix commands interpret the following environment variables: This variable should be set to `daemon` if you want to use the Nix daemon to execute Nix operations. This is necessary in [multi-user - Nix installations](@docroot@/installation/multi-user.md). If the Nix + Nix installations](@docroot@/installation/nix-security.md#multi-user-model). If the Nix daemon's Unix socket is at some non-standard path, this variable should be set to `unix://path/to/socket`. Otherwise, it should be left unset. diff --git a/doc/manual/source/command-ref/experimental-commands.md b/doc/manual/source/command-ref/experimental-commands.md deleted file mode 100644 index 1190729a230..00000000000 --- a/doc/manual/source/command-ref/experimental-commands.md +++ /dev/null @@ -1,8 +0,0 @@ -# Experimental Commands - -This section lists [experimental commands](@docroot@/development/experimental-features.md#xp-feature-nix-command). - -> **Warning** -> -> These commands may be removed in the future, or their syntax may -> change in incompatible ways. diff --git a/doc/manual/source/command-ref/files/default-nix-expression.md b/doc/manual/source/command-ref/files/default-nix-expression.md index 2bd45ff5deb..e886e3ff499 100644 --- a/doc/manual/source/command-ref/files/default-nix-expression.md +++ b/doc/manual/source/command-ref/files/default-nix-expression.md @@ -31,12 +31,12 @@ Then, the resulting expression is interpreted like this: The file [`manifest.nix`](@docroot@/command-ref/files/manifest.nix.md) is always ignored. -The command [`nix-channel`] places a symlink to the current user's [channels] in this directory, the [user channel link](#user-channel-link). +The command [`nix-channel`] places a symlink to the current user's channels in this directory, the [user channel link](#user-channel-link). This makes all subscribed channels available as attributes in the default expression. ## User channel link -A symlink that ensures that [`nix-env`] can find the current user's [channels]: +A symlink that ensures that [`nix-env`] can find the current user's channels: - `~/.nix-defexpr/channels` - `$XDG_STATE_HOME/defexpr/channels` if [`use-xdg-base-directories`] is set to `true`. @@ -51,4 +51,3 @@ In a multi-user installation, you may also have `~/.nix-defexpr/channels_root`, [`nix-channel`]: @docroot@/command-ref/nix-channel.md [`nix-env`]: @docroot@/command-ref/nix-env.md [`use-xdg-base-directories`]: @docroot@/command-ref/conf-file.md#conf-use-xdg-base-directories -[channels]: @docroot@/command-ref/files/channels.md diff --git a/doc/manual/source/command-ref/files/manifest.nix.md b/doc/manual/source/command-ref/files/manifest.nix.md index d7d1b605b54..78bfdc346ea 100644 --- a/doc/manual/source/command-ref/files/manifest.nix.md +++ b/doc/manual/source/command-ref/files/manifest.nix.md @@ -114,9 +114,9 @@ Here is an example of how this file might look like after installing `hello` fro }; name = "hello-2.12.1"; out = { - outPath = "/nix/store/260q5867crm1xjs4khgqpl6vr9kywql1-hello-2.12.1"; + outPath = "/nix/store/src1vzij2z0slnakrsbpqpk20389z0k6-hello-2.12.1"; }; - outPath = "/nix/store/260q5867crm1xjs4khgqpl6vr9kywql1-hello-2.12.1"; + outPath = "/nix/store/src1vzij2z0slnakrsbpqpk20389z0k6-hello-2.12.1"; outputs = [ "out" ]; system = "x86_64-linux"; type = "derivation"; diff --git a/doc/manual/source/command-ref/files/profiles.md b/doc/manual/source/command-ref/files/profiles.md index b5c7378800f..f137336747f 100644 --- a/doc/manual/source/command-ref/files/profiles.md +++ b/doc/manual/source/command-ref/files/profiles.md @@ -37,13 +37,13 @@ dr-xr-xr-x 4 root root 4096 Jan 1 1970 share /home/eelco/.local/state/nix/profiles/profile-7-link/bin: total 20 -lrwxrwxrwx 5 root root 79 Jan 1 1970 chromium -> /nix/store/ijm5k0zqisvkdwjkc77mb9qzb35xfi4m-chromium-86.0.4240.111/bin/chromium +lrwxrwxrwx 5 root root 79 Jan 1 1970 chromium -> /nix/store/cyxny9d1zjb9l9103fr6j6kavp3bqjxf-chromium-86.0.4240.111/bin/chromium lrwxrwxrwx 7 root root 87 Jan 1 1970 spotify -> /nix/store/w9182874m1bl56smps3m5zjj36jhp3rn-spotify-1.1.26.501.gbe11e53b-15/bin/spotify lrwxrwxrwx 3 root root 79 Jan 1 1970 zoom-us -> /nix/store/wbhg2ga8f3h87s9h5k0slxk0m81m4cxl-zoom-us-5.3.469451.0927/bin/zoom-us /home/eelco/.local/state/nix/profiles/profile-7-link/share/applications: total 12 -lrwxrwxrwx 4 root root 120 Jan 1 1970 chromium-browser.desktop -> /nix/store/4cf803y4vzfm3gyk3vzhzb2327v0kl8a-chromium-unwrapped-86.0.4240.111/share/applications/chromium-browser.desktop +lrwxrwxrwx 4 root root 120 Jan 1 1970 chromium-browser.desktop -> /nix/store/sqzyx2l85i6j2a77pnyvglh3bvzwmjjp-chromium-unwrapped-86.0.4240.111/share/applications/chromium-browser.desktop lrwxrwxrwx 7 root root 110 Jan 1 1970 spotify.desktop -> /nix/store/w9182874m1bl56smps3m5zjj36jhp3rn-spotify-1.1.26.501.gbe11e53b-15/share/applications/spotify.desktop lrwxrwxrwx 3 root root 107 Jan 1 1970 us.zoom.Zoom.desktop -> /nix/store/wbhg2ga8f3h87s9h5k0slxk0m81m4cxl-zoom-us-5.3.469451.0927/share/applications/us.zoom.Zoom.desktop @@ -67,7 +67,7 @@ By default, this symlink points to: - `$NIX_STATE_DIR/profiles/per-user/root/profile` for `root` The `PATH` environment variable should include `/bin` subdirectory of the profile link (e.g. `~/.nix-profile/bin`) for the user environment to be visible to the user. -The [installer](@docroot@/installation/installing-binary.md) sets this up by default, unless you enable [`use-xdg-base-directories`]. +The installer sets this up by default, unless you enable [`use-xdg-base-directories`]. [`nix-env`]: @docroot@/command-ref/nix-env.md [`nix profile`]: @docroot@/command-ref/new-cli/nix3-profile.md diff --git a/doc/manual/source/command-ref/nix-channel.md b/doc/manual/source/command-ref/nix-channel.md index 865f43ccce5..59817be974b 100644 --- a/doc/manual/source/command-ref/nix-channel.md +++ b/doc/manual/source/command-ref/nix-channel.md @@ -8,6 +8,12 @@ # Description +> **Warning** +> +> nix-channel is deprecated in favor of flakes in Determinate Nix. +> For a guide on Nix flakes, see: . +> For details and to offer feedback on the deprecation process, see: . + Channels are a mechanism for referencing remote Nix expressions and conveniently retrieving their latest version. The moving parts of channels are: diff --git a/doc/manual/source/command-ref/nix-copy-closure.md b/doc/manual/source/command-ref/nix-copy-closure.md index b7e31d93bfc..b34d57a5015 100644 --- a/doc/manual/source/command-ref/nix-copy-closure.md +++ b/doc/manual/source/command-ref/nix-copy-closure.md @@ -72,11 +72,11 @@ When using public key authentication, you can avoid typing the passphrase with ` > $ storePath="$(nix-build '' -I nixpkgs=channel:nixpkgs-unstable -A hello --no-out-link)" > $ nix-copy-closure --to alice@itchy.example.org "$storePath" > copying 5 paths... -> copying path '/nix/store/nrwkk6ak3rgkrxbqhsscb01jpzmslf2r-xgcc-13.2.0-libgcc' to 'ssh://alice@itchy.example.org'... -> copying path '/nix/store/gm61h1y42pqyl6178g90x8zm22n6pyy5-libunistring-1.1' to 'ssh://alice@itchy.example.org'... -> copying path '/nix/store/ddfzjdykw67s20c35i7a6624by3iz5jv-libidn2-2.3.7' to 'ssh://alice@itchy.example.org'... -> copying path '/nix/store/apab5i73dqa09wx0q27b6fbhd1r18ihl-glibc-2.39-31' to 'ssh://alice@itchy.example.org'... -> copying path '/nix/store/g1n2vryg06amvcc1avb2mcq36faly0mh-hello-2.12.1' to 'ssh://alice@itchy.example.org'... +> copying path '/nix/store/h6q8sqsqfbd3252f9gixqn3z282wds7m-xgcc-13.2.0-libgcc' to 'ssh://alice@itchy.example.org'... +> copying path '/nix/store/imnwvn96lw355giswsk36hx105j4wnpj-libunistring-1.1' to 'ssh://alice@itchy.example.org'... +> copying path '/nix/store/85301indj7scg34spnfczkz72jgv8wa9-libidn2-2.3.7' to 'ssh://alice@itchy.example.org'... +> copying path '/nix/store/ypwfsaljwhzw9iffiysxmxnhjj8v7np0-glibc-2.39-31' to 'ssh://alice@itchy.example.org'... +> copying path '/nix/store/0dklv59zppdsqdvgf0qdvjgzcs5wbwxa-hello-2.12.1' to 'ssh://alice@itchy.example.org'... > ``` > **Example** diff --git a/doc/manual/source/command-ref/nix-env.md b/doc/manual/source/command-ref/nix-env.md index bda02149ed0..d01caaf7f78 100644 --- a/doc/manual/source/command-ref/nix-env.md +++ b/doc/manual/source/command-ref/nix-env.md @@ -52,7 +52,7 @@ These pages can be viewed offline: `nix-env` can obtain packages from multiple sources: - An attribute set of derivations from: - - The [default Nix expression](@docroot@/command-ref/files/default-nix-expression.md) (by default) + - The default Nix expression (by default) - A Nix file, specified via `--file` - A [profile](@docroot@/command-ref/files/profiles.md), specified via `--from-profile` - A Nix expression that is a function which takes default expression as argument, specified via `--from-expression` diff --git a/doc/manual/source/command-ref/nix-env/install.md b/doc/manual/source/command-ref/nix-env/install.md index 527fd8f90d8..320fa530fda 100644 --- a/doc/manual/source/command-ref/nix-env/install.md +++ b/doc/manual/source/command-ref/nix-env/install.md @@ -22,12 +22,11 @@ It is based on the current generation of the active [profile](@docroot@/command- The arguments *args* map to store paths in a number of possible ways: -- By default, *args* is a set of names denoting derivations in the [default Nix expression]. +- By default, *args* is a set of names denoting derivations in the default Nix expression. These are [realised], and the resulting output paths are installed. Currently installed derivations with a name equal to the name of a derivation being added are removed unless the option `--preserve-installed` is specified. [derivation expression]: @docroot@/glossary.md#gloss-derivation-expression - [default Nix expression]: @docroot@/command-ref/files/default-nix-expression.md [realised]: @docroot@/glossary.md#gloss-realise If there are multiple derivations matching a name in *args* that @@ -45,7 +44,7 @@ The arguments *args* map to store paths in a number of possible ways: gcc-3.3.6 gcc-4.1.1` will install both version of GCC (and will probably cause a user environment conflict\!). -- If [`--attr`](#opt-attr) / `-A` is specified, the arguments are *attribute paths* that select attributes from the [default Nix expression]. +- If [`--attr`](#opt-attr) / `-A` is specified, the arguments are *attribute paths* that select attributes from the default Nix expression. This is faster than using derivation names and unambiguous. Show the attribute paths of available packages with [`nix-env --query`](./query.md): @@ -58,7 +57,7 @@ The arguments *args* map to store paths in a number of possible ways: easy way to copy user environment elements from one profile to another. -- If `--from-expression` is given, *args* are [Nix language functions](@docroot@/language/syntax.md#functions) that are called with the [default Nix expression] as their single argument. +- If `--from-expression` is given, *args* are [Nix language functions](@docroot@/language/syntax.md#functions) that are called with the default Nix expression as their single argument. The derivations returned by those function calls are installed. This allows derivations to be specified in an unambiguous way, which is necessary if there are multiple derivations with the same name. @@ -204,7 +203,7 @@ To install a specific [store derivation] (typically created by `nix-instantiate`): ```console -$ nix-env --install /nix/store/fibjb1bfbpm5mrsxc4mh2d8n37sxh91i-gcc-3.4.3.drv +$ nix-env --install /nix/store/8la6y31fmm6i4wfmby6avly1wf718xnj-gcc-3.4.3.drv ``` To install a specific output path: @@ -232,7 +231,7 @@ $ nix-env --file '' --install --attr hello --dry-run (dry run; not doing anything) installing ‘hello-2.10’ this path will be fetched (0.04 MiB download, 0.19 MiB unpacked): - /nix/store/wkhdf9jinag5750mqlax6z2zbwhqb76n-hello-2.10 + /nix/store/ikwkxz4wwlp2g1428n7dy729cg1d9hin-hello-2.10 ... ``` diff --git a/doc/manual/source/command-ref/nix-prefetch-url.md b/doc/manual/source/command-ref/nix-prefetch-url.md index 19322ec8e04..8451778ad46 100644 --- a/doc/manual/source/command-ref/nix-prefetch-url.md +++ b/doc/manual/source/command-ref/nix-prefetch-url.md @@ -76,7 +76,7 @@ $ nix-prefetch-url ftp://ftp.gnu.org/pub/gnu/hello/hello-2.10.tar.gz ```console $ nix-prefetch-url --print-path mirror://gnu/hello/hello-2.10.tar.gz 0ssi1wpaf7plaswqqjwigppsg5fyh99vdlb9kzl7c9lng89ndq1i -/nix/store/3x7dwzq014bblazs7kq20p9hyzz0qh8g-hello-2.10.tar.gz +/nix/store/8alrpdaasjd1x6g1fczchmzbpqm936a3-hello-2.10.tar.gz ``` ```console diff --git a/doc/manual/source/command-ref/nix-store/add-fixed.md b/doc/manual/source/command-ref/nix-store/add-fixed.md index 2ea90a13592..511fe2050eb 100644 --- a/doc/manual/source/command-ref/nix-store/add-fixed.md +++ b/doc/manual/source/command-ref/nix-store/add-fixed.md @@ -34,6 +34,6 @@ This operation has the following options: ```console $ nix-store --add-fixed sha256 ./hello-2.10.tar.gz -/nix/store/3x7dwzq014bblazs7kq20p9hyzz0qh8g-hello-2.10.tar.gz +/nix/store/8alrpdaasjd1x6g1fczchmzbpqm936a3-hello-2.10.tar.gz ``` diff --git a/doc/manual/source/command-ref/nix-store/delete.md b/doc/manual/source/command-ref/nix-store/delete.md index 550c5ea2914..fcb2212d86d 100644 --- a/doc/manual/source/command-ref/nix-store/delete.md +++ b/doc/manual/source/command-ref/nix-store/delete.md @@ -27,7 +27,7 @@ paths in the store that refer to it (i.e., depend on it). # Example ```console -$ nix-store --delete /nix/store/zq0h41l75vlb4z45kzgjjmsjxvcv1qk7-mesa-6.4 +$ nix-store --delete /nix/store/gjak3al7lj61x4gj6rln4f5pc5v0f67n-mesa-6.4 0 bytes freed (0.00 MiB) -error: cannot delete path `/nix/store/zq0h41l75vlb4z45kzgjjmsjxvcv1qk7-mesa-6.4' since it is still alive +error: cannot delete path `/nix/store/gjak3al7lj61x4gj6rln4f5pc5v0f67n-mesa-6.4' since it is still alive ``` diff --git a/doc/manual/source/command-ref/nix-store/query.md b/doc/manual/source/command-ref/nix-store/query.md index b5ba63adae2..cc45eeb74cf 100644 --- a/doc/manual/source/command-ref/nix-store/query.md +++ b/doc/manual/source/command-ref/nix-store/query.md @@ -103,6 +103,13 @@ symlink. example when *paths* were substituted from a binary cache. Use `--valid-derivers` instead to obtain valid paths only. + > **Note** + > + > `nix-store --query --deriver` is replaced with the following `nix` command: + > + > nix path-info --json ... | jq -r '.[].deriver' + + [deriver]: @docroot@/glossary.md#gloss-deriver - `--valid-derivers` @@ -184,9 +191,9 @@ Print the build-time dependencies of `svn`: ```console $ nix-store --query --requisites $(nix-store --query --deriver $(which svn)) -/nix/store/02iizgn86m42q905rddvg4ja975bk2i4-grep-2.5.1.tar.bz2.drv -/nix/store/07a2bzxmzwz5hp58nf03pahrv2ygwgs3-gcc-wrapper.sh -/nix/store/0ma7c9wsbaxahwwl04gbw3fcd806ski4-glibc-2.3.4.drv +/nix/store/y6qa66l9h0pw161crnlk6y16rdrcljx4-grep-2.5.1.tar.bz2.drv +/nix/store/z716h753s97jhnzvfank2srqbljswpgm-gcc-wrapper.sh +/nix/store/f39x0q73rjdyvzm93y9wrkfr6x39lb7f-glibc-2.3.4.drv ... lots of other paths ... ``` @@ -199,10 +206,10 @@ Show the build-time dependencies as a tree: ```console $ nix-store --query --tree $(nix-store --query --deriver $(which svn)) /nix/store/7i5082kfb6yjbqdbiwdhhza0am2xvh6c-subversion-1.1.4.drv -+---/nix/store/d8afh10z72n8l1cr5w42366abiblgn54-builder.sh -+---/nix/store/fmzxmpjx2lh849ph0l36snfj9zdibw67-bash-3.0.drv -| +---/nix/store/570hmhmx3v57605cqg9yfvvyh0nnb8k8-bash -| +---/nix/store/p3srsbd8dx44v2pg6nbnszab5mcwx03v-builder.sh ++---/nix/store/vxnmkc8l8d2ijjha4xwhkfgx9vvc3q4c-builder.sh ++---/nix/store/rn9776dy82n5qrgz7xbcl1iw4vfkcrkk-bash-3.0.drv +| +---/nix/store/x9j20hz6bln1crzn55qifk0bbsm8v5ac-bash +| +---/nix/store/ajnn1mcm45wjvn0rlc22gvx2cwhjnazx-builder.sh ... ``` diff --git a/doc/manual/source/command-ref/nix-store/realise.md b/doc/manual/source/command-ref/nix-store/realise.md index 240685ce5c7..f5d203894e6 100644 --- a/doc/manual/source/command-ref/nix-store/realise.md +++ b/doc/manual/source/command-ref/nix-store/realise.md @@ -76,7 +76,7 @@ This operation is typically used to build [store derivation]s produced by ```console $ nix-store --realise $(nix-instantiate ./test.nix) -/nix/store/31axcgrlbfsxzmfff1gyj1bf62hvkby2-aterm-2.3.1 +/nix/store/6gwmy5jcnwdlz6aqqhksz863f1l8xc2w-aterm-2.3.1 ``` This is essentially what [`nix-build`](@docroot@/command-ref/nix-build.md) does. diff --git a/doc/manual/source/command-ref/subcommands.md b/doc/manual/source/command-ref/subcommands.md new file mode 100644 index 00000000000..6a26732338d --- /dev/null +++ b/doc/manual/source/command-ref/subcommands.md @@ -0,0 +1,3 @@ +# Subcommands + +This section lists all the subcommands of the `nix` CLI. diff --git a/doc/manual/source/development/building.md b/doc/manual/source/development/building.md index eb65a724757..da72204107c 100644 --- a/doc/manual/source/development/building.md +++ b/doc/manual/source/development/building.md @@ -1,73 +1,5 @@ # Building Nix -This section provides some notes on how to start hacking on Nix. -To get the latest version of Nix from GitHub: - -```console -$ git clone https://github.com/NixOS/nix.git -$ cd nix -``` - -> **Note** -> -> The following instructions assume you already have some version of Nix installed locally, so that you can use it to set up the development environment. -> If you don't have it installed, follow the [installation instructions](../installation/index.md). - - -To build all dependencies and start a shell in which all environment variables are set up so that those dependencies can be found: - -```console -$ nix-shell -``` - -To get a shell with one of the other [supported compilation environments](#compilation-environments): - -```console -$ nix-shell --attr devShells.x86_64-linux.native-clangStdenv -``` - -> **Note** -> -> You can use `native-ccacheStdenv` to drastically improve rebuild time. -> By default, [ccache](https://ccache.dev) keeps artifacts in `~/.cache/ccache/`. - -To build Nix itself in this shell: - -```console -[nix-shell]$ out="$(pwd)/outputs/out" dev=$out debug=$out mesonFlags+=" --prefix=${out}" -[nix-shell]$ dontAddPrefix=1 configurePhase -[nix-shell]$ buildPhase -``` - -To test it: - -```console -[nix-shell]$ checkPhase -``` - -To install it in `$(pwd)/outputs`: - -```console -[nix-shell]$ installPhase -[nix-shell]$ ./outputs/out/bin/nix --version -nix (Nix) 2.12 -``` - -To build a release version of Nix for the current operating system and CPU architecture: - -```console -$ nix-build -``` - -You can also build Nix for one of the [supported platforms](#platforms). - -## Building Nix with flakes - -This section assumes you are using Nix with the [`flakes`] and [`nix-command`] experimental features enabled. - -[`flakes`]: @docroot@/development/experimental-features.md#xp-feature-flakes -[`nix-command`]: @docroot@/development/experimental-features.md#xp-feature-nix-command - To build all dependencies and start a shell in which all environment variables are set up so that those dependencies can be found: ```console @@ -126,8 +58,6 @@ Nix can be built for various platforms, as specified in [`flake.nix`]: [`flake.nix`]: https://github.com/nixos/nix/blob/master/flake.nix - `x86_64-linux` -- `x86_64-darwin` -- `i686-linux` - `aarch64-linux` - `aarch64-darwin` - `armv6l-linux` @@ -145,12 +75,6 @@ platform. Common solutions include [remote build machines] and [binary format em Given such a setup, executing the build only requires selecting the respective attribute. For example, to compile for `aarch64-linux`: -```console -$ nix-build --attr packages.aarch64-linux.default -``` - -or for Nix with the [`flakes`] and [`nix-command`] experimental features enabled: - ```console $ nix build .#packages.aarch64-linux.default ``` @@ -243,20 +167,12 @@ To build with one of those environments, you can use $ nix build .#nix-cli-ccacheStdenv ``` -for flake-enabled Nix, or - -```console -$ nix-build --attr nix-cli-ccacheStdenv -``` - -for classic Nix. - You can use any of the other supported environments in place of `nix-cli-ccacheStdenv`. ## Editor integration The `clangd` LSP server is installed by default on the `clang`-based `devShell`s. -See [supported compilation environments](#compilation-environments) and instructions how to set up a shell [with flakes](#building-nix-with-flakes) or in [classic Nix](#building-nix). +See [supported compilation environments](#compilation-environments) and instructions how to [set up a shell](#building-nix). To use the LSP with your editor, you will want a `compile_commands.json` file telling `clangd` how we are compiling the code. Meson's configure always produces this inside the build directory. diff --git a/doc/manual/source/development/debugging.md b/doc/manual/source/development/debugging.md index d2450495e50..6578632d991 100644 --- a/doc/manual/source/development/debugging.md +++ b/doc/manual/source/development/debugging.md @@ -6,14 +6,7 @@ Additionally, see [Testing Nix](./testing.md) for further instructions on how to ## Building Nix with Debug Symbols -In the development shell, set the `mesonBuildType` environment variable to `debug` before configuring the build: - -```console -[nix-shell]$ export mesonBuildType=debugoptimized -``` - -Then, proceed to build Nix as described in [Building Nix](./building.md). -This will build Nix with debug symbols, which are essential for effective debugging. +In the development shell, `mesonBuildType` is set automatically to `debugoptimized`. This builds Nix with debug symbols, which are essential for effective debugging. It is also possible to build without optimization for faster build: diff --git a/doc/manual/source/development/experimental-features.md b/doc/manual/source/development/experimental-features.md index ad5cffa91ee..56a45b23890 100644 --- a/doc/manual/source/development/experimental-features.md +++ b/doc/manual/source/development/experimental-features.md @@ -6,7 +6,7 @@ Experimental features are considered unstable, which means that they can be chan Users must explicitly enable them by toggling the associated [experimental feature flags](@docroot@/command-ref/conf-file.md#conf-experimental-features). This allows accessing unstable functionality without unwittingly relying on it. -Experimental feature flags were first introduced in [Nix 2.4](@docroot@/release-notes/rl-2.4.md). +Experimental feature flags were first introduced in [Nix 2.4](https://nix.dev/manual/nix/latest/release-notes/rl-2.4). Before that, Nix did have experimental features, but they were not guarded by flags and were merely documented as unstable. This was a source of confusion and controversy. diff --git a/doc/manual/source/development/testing.md b/doc/manual/source/development/testing.md index dd965862a34..35654d16393 100644 --- a/doc/manual/source/development/testing.md +++ b/doc/manual/source/development/testing.md @@ -325,7 +325,6 @@ Creating a Cachix cache for your installer tests and adding its authorisation to - `x86_64-linux` - `armv6l-linux` - `armv7l-linux` - - `x86_64-darwin` - The `installer_test` job (which runs on `ubuntu-24.04` and `macos-14`) will try to install Nix with the cached installer and run a trivial Nix command. diff --git a/doc/manual/source/favicon.png b/doc/manual/source/favicon.png deleted file mode 100644 index 1ed2b5fe0fd..00000000000 Binary files a/doc/manual/source/favicon.png and /dev/null differ diff --git a/doc/manual/source/favicon.svg b/doc/manual/source/favicon.svg index 1d2a6e835d5..55fb9479b06 100644 --- a/doc/manual/source/favicon.svg +++ b/doc/manual/source/favicon.svg @@ -1 +1,29 @@ - \ No newline at end of file + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/doc/manual/source/glossary.md b/doc/manual/source/glossary.md index 502e6d4de6b..64ca1cf5e16 100644 --- a/doc/manual/source/glossary.md +++ b/doc/manual/source/glossary.md @@ -136,7 +136,7 @@ > **Example** > - > `/nix/store/a040m110amc4h71lds2jmr8qrkj2jhxd-git-2.38.1` + > `/nix/store/jf6gn2dzna4nmsfbdxsd7kwhsk6gnnlr-git-2.38.1` See [Store Path](@docroot@/store/store-path.md) for details. @@ -353,14 +353,6 @@ See [Nix Archive](store/file-system-object/content-address.html#serial-nix-archive) for details. -- [`∅`]{#gloss-empty-set} - - The empty set symbol. In the context of profile history, this denotes a package is not present in a particular version of the profile. - -- [`ε`]{#gloss-epsilon} - - The epsilon symbol. In the context of a package, this means the version is empty. More precisely, the derivation does not have a version attribute. - - [package]{#package} A software package; files that belong together for a particular purpose, and metadata. diff --git a/doc/manual/source/installation/env-variables.md b/doc/manual/source/installation/env-variables.md deleted file mode 100644 index 0350904211a..00000000000 --- a/doc/manual/source/installation/env-variables.md +++ /dev/null @@ -1,62 +0,0 @@ -# Environment Variables - -To use Nix, some environment variables should be set. In particular, -`PATH` should contain the directories `prefix/bin` and -`~/.nix-profile/bin`. The first directory contains the Nix tools -themselves, while `~/.nix-profile` is a symbolic link to the current -*user environment* (an automatically generated package consisting of -symlinks to installed packages). The simplest way to set the required -environment variables is to include the file -`prefix/etc/profile.d/nix.sh` in your `~/.profile` (or similar), like -this: - -```bash -source prefix/etc/profile.d/nix.sh -``` - -# `NIX_SSL_CERT_FILE` - -If you need to specify a custom certificate bundle to account for an -HTTPS-intercepting man in the middle proxy, you must specify the path to -the certificate bundle in the environment variable `NIX_SSL_CERT_FILE`. - -If you don't specify a `NIX_SSL_CERT_FILE` manually, Nix will install -and use its own certificate bundle. - -Set the environment variable and install Nix - -```console -$ export NIX_SSL_CERT_FILE=/etc/ssl/my-certificate-bundle.crt -$ curl -L https://nixos.org/nix/install | sh -``` - -In the shell profile and rc files (for example, `/etc/bashrc`, -`/etc/zshrc`), add the following line: - -```bash -export NIX_SSL_CERT_FILE=/etc/ssl/my-certificate-bundle.crt -``` - -> **Note** -> -> You must not add the export and then do the install, as the Nix -> installer will detect the presence of Nix configuration, and abort. - -If you use the Nix daemon, you should also add the following to -`/etc/nix/nix.conf`: - -``` -ssl-cert-file = /etc/ssl/my-certificate-bundle.crt -``` - -## Proxy Environment Variables - -The Nix installer has special handling for these proxy-related -environment variables: `http_proxy`, `https_proxy`, `ftp_proxy`, -`all_proxy`, `no_proxy`, `HTTP_PROXY`, `HTTPS_PROXY`, `FTP_PROXY`, -`ALL_PROXY`, `NO_PROXY`. - -If any of these variables are set when running the Nix installer, then -the installer will create an override file at -`/etc/systemd/system/nix-daemon.service.d/override.conf` so `nix-daemon` -will use them. diff --git a/doc/manual/source/installation/index.md b/doc/manual/source/installation/index.md index 3c09f103184..aded684b0b5 100644 --- a/doc/manual/source/installation/index.md +++ b/doc/manual/source/installation/index.md @@ -1,44 +1,11 @@ # Installation -This section describes how to install and configure Nix for first-time use. - -The current recommended option on Linux and MacOS is [multi-user](#multi-user). - -## Multi-user - -This installation offers better sharing, improved isolation, and more security -over a single user installation. - -This option requires either: - -* Linux running systemd, with SELinux disabled -* MacOS - -> **Updating to macOS 15 Sequoia** -> -> If you recently updated to macOS 15 Sequoia and are getting -> ```console -> error: the user '_nixbld1' in the group 'nixbld' does not exist -> ``` -> when running Nix commands, refer to GitHub issue [NixOS/nix#10892](https://github.com/NixOS/nix/issues/10892) for instructions to fix your installation without reinstalling. +We recommend that macOS users install Determinate Nix using our graphical installer, [Determinate.pkg][pkg]. +For Linux and Windows Subsystem for Linux (WSL) users: ```console -$ curl -L https://nixos.org/nix/install | sh -s -- --daemon -``` - -## Single-user - -> Single-user is not supported on Mac. - -> `warning: installing Nix as root is not supported by this script!` - -This installation has less requirements than the multi-user install, however it -cannot offer equivalent sharing, isolation, or security. - -This option is suitable for systems without systemd. - -```console -$ curl -L https://nixos.org/nix/install | sh -s -- --no-daemon +curl --proto '=https' --tlsv1.2 -sSf -L https://install.determinate.systems/nix | \ + sh -s -- install ``` ## Distributions @@ -46,3 +13,5 @@ $ curl -L https://nixos.org/nix/install | sh -s -- --no-daemon The Nix community maintains installers for several distributions. They can be found in the [`nix-community/nix-installers`](https://github.com/nix-community/nix-installers) repository. + +[pkg]: https://install.determinate.systems/determinate-pkg/stable/Universal diff --git a/doc/manual/source/installation/installing-binary.md b/doc/manual/source/installation/installing-binary.md deleted file mode 100644 index 21c15637437..00000000000 --- a/doc/manual/source/installation/installing-binary.md +++ /dev/null @@ -1,158 +0,0 @@ -# Installing a Binary Distribution - -> **Updating to macOS 15 Sequoia** -> -> If you recently updated to macOS 15 Sequoia and are getting -> ```console -> error: the user '_nixbld1' in the group 'nixbld' does not exist -> ``` -> when running Nix commands, refer to GitHub issue [NixOS/nix#10892](https://github.com/NixOS/nix/issues/10892) for instructions to fix your installation without reinstalling. - -To install the latest version Nix, run the following command: - -```console -$ curl -L https://nixos.org/nix/install | sh -``` - -This performs the default type of installation for your platform: - -- [Multi-user](#multi-user-installation): - - Linux with systemd and without SELinux - - macOS -- [Single-user](#single-user-installation): - - Linux without systemd - - Linux with SELinux - -We recommend the multi-user installation if it supports your platform and you can authenticate with `sudo`. - -The installer can be configured with various command line arguments and environment variables. -To show available command line flags: - -```console -$ curl -L https://nixos.org/nix/install | sh -s -- --help -``` - -To check what it does and how it can be customised further, [download and edit the second-stage installation script](#installing-from-a-binary-tarball). - -# Installing a pinned Nix version from a URL - -Version-specific installation URLs for all Nix versions since 1.11.16 can be found at [releases.nixos.org](https://releases.nixos.org/?prefix=nix/). -The directory for each version contains the corresponding SHA-256 hash. - -All installation scripts are invoked the same way: - -```console -$ export VERSION=2.19.2 -$ curl -L https://releases.nixos.org/nix/nix-$VERSION/install | sh -``` - -# Multi User Installation - -The multi-user Nix installation creates system users and a system service for the Nix daemon. - -Supported systems: - -- Linux running systemd, with SELinux disabled -- macOS - -To explicitly instruct the installer to perform a multi-user installation on your system: - -```console -$ bash <(curl -L https://nixos.org/nix/install) --daemon -``` - -You can run this under your usual user account or `root`. -The script will invoke `sudo` as needed. - -# Single User Installation - -To explicitly select a single-user installation on your system: - -```console -$ bash <(curl -L https://nixos.org/nix/install) --no-daemon -``` - -In a single-user installation, `/nix` is owned by the invoking user. -The script will invoke `sudo` to create `/nix` if it doesn’t already exist. -If you don’t have `sudo`, manually create `/nix` as `root`: - -```console -$ su root -# mkdir /nix -# chown alice /nix -``` - -# Installing from a binary tarball - -You can also download a binary tarball that contains Nix and all its dependencies: -- Choose a [version](https://releases.nixos.org/?prefix=nix/) and [system type](../development/building.md#platforms) -- Download and unpack the tarball -- Run the installer - -> **Example** -> -> ```console -> $ pushd $(mktemp -d) -> $ export VERSION=2.19.2 -> $ export SYSTEM=x86_64-linux -> $ curl -LO https://releases.nixos.org/nix/nix-$VERSION/nix-$VERSION-$SYSTEM.tar.xz -> $ tar xfj nix-$VERSION-$SYSTEM.tar.xz -> $ cd nix-$VERSION-$SYSTEM -> $ ./install -> $ popd -> ``` - -The installer can be customised with the environment variables declared in the file named `install-multi-user`. - -## Native packages for Linux distributions - -The Nix community maintains installers for some Linux distributions in their native packaging format(https://nix-community.github.io/nix-installers/). - -# macOS Installation - - -[]{#sect-macos-installation-change-store-prefix}[]{#sect-macos-installation-encrypted-volume}[]{#sect-macos-installation-symlink}[]{#sect-macos-installation-recommended-notes} - -We believe we have ironed out how to cleanly support the read-only root file system -on modern macOS. New installs will do this automatically. - -This section previously detailed the situation, options, and trade-offs, -but it now only outlines what the installer does. You don't need to know -this to run the installer, but it may help if you run into trouble: - -- create a new APFS volume for your Nix store -- update `/etc/synthetic.conf` to direct macOS to create a "synthetic" - empty root directory to mount your volume -- specify mount options for the volume in `/etc/fstab` - - `rw`: read-write - - `noauto`: prevent the system from auto-mounting the volume (so the - LaunchDaemon mentioned below can control mounting it, and to avoid - masking problems with that mounting service). - - `nobrowse`: prevent the Nix Store volume from showing up on your - desktop; also keeps Spotlight from spending resources to index - this volume - -- if you have FileVault enabled - - generate an encryption password - - put it in your system Keychain - - use it to encrypt the volume -- create a system LaunchDaemon to mount this volume early enough in the - boot process to avoid problems loading or restoring any programs that - need access to your Nix store - diff --git a/doc/manual/source/installation/nix-security.md b/doc/manual/source/installation/nix-security.md index 1e9036b68b2..61cad24c2b3 100644 --- a/doc/manual/source/installation/nix-security.md +++ b/doc/manual/source/installation/nix-security.md @@ -1,15 +1,85 @@ # Security -Nix has two basic security models. First, it can be used in “single-user -mode”, which is similar to what most other package management tools do: -there is a single user (typically root) who performs all package -management operations. All other users can then use the installed -packages, but they cannot perform package management operations -themselves. - -Alternatively, you can configure Nix in “multi-user mode”. In this -model, all users can perform package management operations — for -instance, every user can install software without requiring root -privileges. Nix ensures that this is secure. For instance, it’s not -possible for one user to overwrite a package used by another user with a -Trojan horse. +Nix follows a [**multi-user**](#multi-user-model) security model in which all +users can perform package management operations. Every user can, for example, +install software without requiring root privileges, and Nix ensures that this +is secure. It's *not* possible for one user to, for example, overwrite a +package used by another user with a Trojan horse. + +## Multi-User model + +To allow a Nix store to be shared safely among multiple users, it is +important that users are not able to run builders that modify the Nix +store or database in arbitrary ways, or that interfere with builds +started by other users. If they could do so, they could install a Trojan +horse in some package and compromise the accounts of other users. + +To prevent this, the Nix store and database are owned by some privileged +user (usually `root`) and builders are executed under special user +accounts (usually named `nixbld1`, `nixbld2`, etc.). When a unprivileged +user runs a Nix command, actions that operate on the Nix store (such as +builds) are forwarded to a *Nix daemon* running under the owner of the +Nix store/database that performs the operation. + +> **Note** +> +> Multi-user mode has one important limitation: only root and a set of +> trusted users specified in `nix.conf` can specify arbitrary binary +> caches. So while unprivileged users may install packages from +> arbitrary Nix expressions, they may not get pre-built binaries. + +### Setting up the build users + +The *build users* are the special UIDs under which builds are performed. +They should all be members of the *build users group* `nixbld`. This +group should have no other members. The build users should not be +members of any other group. On Linux, you can create the group and users +as follows: + +```console +$ groupadd -r nixbld +$ for n in $(seq 1 10); do useradd -c "Nix build user $n" \ + -d /var/empty -g nixbld -G nixbld -M -N -r -s "$(which nologin)" \ + nixbld$n; done +``` + +This creates 10 build users. There can never be more concurrent builds +than the number of build users, so you may want to increase this if you +expect to do many builds at the same time. + +### Running the daemon + +The [Nix daemon](../command-ref/nix-daemon.md) should be started as +follows (as `root`): + +```console +$ nix-daemon +``` + +You’ll want to put that line somewhere in your system’s boot scripts. + +To let unprivileged users use the daemon, they should set the +[`NIX_REMOTE` environment variable](../command-ref/env-common.md) to +`daemon`. So you should put a line like + +```console +export NIX_REMOTE=daemon +``` + +into the users’ login scripts. + +### Restricting access + +To limit which users can perform Nix operations, you can use the +permissions on the directory `/nix/var/nix/daemon-socket`. For instance, +if you want to restrict the use of Nix to the members of a group called +`nix-users`, do + +```console +$ chgrp nix-users /nix/var/nix/daemon-socket +$ chmod ug=rwx,o= /nix/var/nix/daemon-socket +``` + +This way, users who are not in the `nix-users` group cannot connect to +the Unix domain socket `/nix/var/nix/daemon-socket/socket`, so they +cannot perform Nix operations. diff --git a/doc/manual/source/installation/single-user.md b/doc/manual/source/installation/single-user.md deleted file mode 100644 index f9a3b26edf4..00000000000 --- a/doc/manual/source/installation/single-user.md +++ /dev/null @@ -1,9 +0,0 @@ -# Single-User Mode - -In single-user mode, all Nix operations that access the database in -`prefix/var/nix/db` or modify the Nix store in `prefix/store` must be -performed under the user ID that owns those directories. This is -typically root. (If you install from RPM packages, that’s in fact the -default ownership.) However, on single-user machines, it is often -convenient to `chown` those directories to your normal user account so -that you don’t have to `su` to root all the time. diff --git a/doc/manual/source/installation/supported-platforms.md b/doc/manual/source/installation/supported-platforms.md deleted file mode 100644 index 8ca3ce8d445..00000000000 --- a/doc/manual/source/installation/supported-platforms.md +++ /dev/null @@ -1,7 +0,0 @@ -# Supported Platforms - -Nix is currently supported on the following platforms: - - - Linux (i686, x86\_64, aarch64). - - - macOS (x86\_64, aarch64). diff --git a/doc/manual/source/installation/uninstall.md b/doc/manual/source/installation/uninstall.md index 69d59847b6f..e95634c213a 100644 --- a/doc/manual/source/installation/uninstall.md +++ b/doc/manual/source/installation/uninstall.md @@ -1,197 +1,15 @@ # Uninstalling Nix -## Multi User - -Removing a [multi-user installation](./installing-binary.md#multi-user-installation) depends on the operating system. - -### Linux - -If you are on Linux with systemd: - -1. Remove the Nix daemon service: - - ```console - sudo systemctl stop nix-daemon.service - sudo systemctl disable nix-daemon.socket nix-daemon.service - sudo systemctl daemon-reload - ``` - -Remove files created by Nix: +To uninstall Determinate Nix, use the uninstallation utility built into the [Determinate Nix Installer][installer]: ```console -sudo rm -rf /etc/nix /etc/profile.d/nix.sh /etc/tmpfiles.d/nix-daemon.conf /nix ~root/.nix-channels ~root/.nix-defexpr ~root/.nix-profile ~root/.cache/nix +$ /nix/nix-installer uninstall ``` -Remove build users and their group: +If you're certain that you want to uninstall, you can skip the confirmation step: ```console -for i in $(seq 1 32); do - sudo userdel nixbld$i -done -sudo groupdel nixbld +$ /nix/nix-installer uninstall --no-confirm ``` -There may also be references to Nix in - -- `/etc/bash.bashrc` -- `/etc/bashrc` -- `/etc/profile` -- `/etc/zsh/zshrc` -- `/etc/zshrc` - -which you may remove. - -### FreeBSD - -1. Stop and remove the Nix daemon service: - - ```console - sudo service nix-daemon stop - sudo rm -f /usr/local/etc/rc.d/nix-daemon - sudo sysrc -x nix_daemon_enable - ``` - -2. Remove files created by Nix: - - ```console - sudo rm -rf /etc/nix /usr/local/etc/profile.d/nix.sh /nix ~root/.nix-channels ~root/.nix-defexpr ~root/.nix-profile ~root/.cache/nix - ``` - -3. Remove build users and their group: - - ```console - for i in $(seq 1 32); do - sudo pw userdel nixbld$i - done - sudo pw groupdel nixbld - ``` - -4. There may also be references to Nix in: - - `/usr/local/etc/bashrc` - - `/usr/local/etc/zshrc` - - Shell configuration files in users' home directories - - which you may remove. - -### macOS - -> **Updating to macOS 15 Sequoia** -> -> If you recently updated to macOS 15 Sequoia and are getting -> ```console -> error: the user '_nixbld1' in the group 'nixbld' does not exist -> ``` -> when running Nix commands, refer to GitHub issue [NixOS/nix#10892](https://github.com/NixOS/nix/issues/10892) for instructions to fix your installation without reinstalling. - -1. If system-wide shell initialisation files haven't been altered since installing Nix, use the backups made by the installer: - - ```console - sudo mv /etc/zshrc.backup-before-nix /etc/zshrc - sudo mv /etc/bashrc.backup-before-nix /etc/bashrc - sudo mv /etc/bash.bashrc.backup-before-nix /etc/bash.bashrc - ``` - - Otherwise, edit `/etc/zshrc`, `/etc/bashrc`, and `/etc/bash.bashrc` to remove the lines sourcing `nix-daemon.sh`, which should look like this: - - ```bash - # Nix - if [ -e '/nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh' ]; then - . '/nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh' - fi - # End Nix - ``` - -2. Stop and remove the Nix daemon services: - - ```console - sudo launchctl unload /Library/LaunchDaemons/org.nixos.nix-daemon.plist - sudo rm /Library/LaunchDaemons/org.nixos.nix-daemon.plist - sudo launchctl unload /Library/LaunchDaemons/org.nixos.darwin-store.plist - sudo rm /Library/LaunchDaemons/org.nixos.darwin-store.plist - ``` - - This stops the Nix daemon and prevents it from being started next time you boot the system. - -3. Remove the `nixbld` group and the `_nixbuildN` users: - - ```console - sudo dscl . -delete /Groups/nixbld - for u in $(sudo dscl . -list /Users | grep _nixbld); do sudo dscl . -delete /Users/$u; done - ``` - - This will remove all the build users that no longer serve a purpose. - -4. Edit fstab using `sudo vifs` to remove the line mounting the Nix Store volume on `/nix`, which looks like - - ``` - UUID= /nix apfs rw,noauto,nobrowse,suid,owners - ``` - or - - ``` - LABEL=Nix\040Store /nix apfs rw,nobrowse - ``` - - by setting the cursor on the respective line using the arrow keys, and pressing `dd`, and then `:wq` to save the file. - - This will prevent automatic mounting of the Nix Store volume. - -5. Edit `/etc/synthetic.conf` to remove the `nix` line. - If this is the only line in the file you can remove it entirely: - - ```bash - if [ -f /etc/synthetic.conf ]; then - if [ "$(cat /etc/synthetic.conf)" = "nix" ]; then - sudo rm /etc/synthetic.conf - else - sudo vi /etc/synthetic.conf - fi - fi - ``` - - This will prevent the creation of the empty `/nix` directory. - -6. Remove the files Nix added to your system, except for the store: - - ```console - sudo rm -rf /etc/nix /var/root/.nix-profile /var/root/.nix-defexpr /var/root/.nix-channels ~/.nix-profile ~/.nix-defexpr ~/.nix-channels - ``` - - -7. Remove the Nix Store volume: - - ```console - sudo diskutil apfs deleteVolume /nix - ``` - - This will remove the Nix Store volume and everything that was added to the store. - - If the output indicates that the command couldn't remove the volume, you should make sure you don't have an _unmounted_ Nix Store volume. - Look for a "Nix Store" volume in the output of the following command: - - ```console - diskutil list - ``` - - If you _do_ find a "Nix Store" volume, delete it by running `diskutil apfs deleteVolume` with the store volume's `diskXsY` identifier. - - If you get an error that the volume is in use by the kernel, reboot and immediately delete the volume before starting any other process. - -> **Note** -> -> After you complete the steps here, you will still have an empty `/nix` directory. -> This is an expected sign of a successful uninstall. -> The empty `/nix` directory will disappear the next time you reboot. -> -> You do not have to reboot to finish uninstalling Nix. -> The uninstall is complete. -> macOS (Catalina+) directly controls root directories, and its read-only root will prevent you from manually deleting the empty `/nix` mountpoint. - -## Single User - -To remove a [single-user installation](./installing-binary.md#single-user-installation) of Nix, run: - -```console -rm -rf /nix ~/.nix-channels ~/.nix-defexpr ~/.nix-profile -``` -You might also want to manually remove references to Nix from your `~/.profile`. +[installer]: https://github.com/DeterminateSystems/nix-installer diff --git a/doc/manual/source/installation/upgrading.md b/doc/manual/source/installation/upgrading.md index a433f1d30e6..8fe342b09b7 100644 --- a/doc/manual/source/installation/upgrading.md +++ b/doc/manual/source/installation/upgrading.md @@ -1,40 +1,10 @@ # Upgrading Nix -> **Note** -> -> These upgrade instructions apply where Nix was installed following the [installation instructions in this manual](./index.md). - -Check which Nix version will be installed, for example from one of the [release channels](http://channels.nixos.org/) such as `nixpkgs-unstable`: - -```console -$ nix-shell -p nix -I nixpkgs=channel:nixpkgs-unstable --run "nix --version" -nix (Nix) 2.18.1 -``` - -> **Warning** -> -> Writing to the [local store](@docroot@/store/types/local-store.md) with a newer version of Nix, for example by building derivations with [`nix-build`](@docroot@/command-ref/nix-build.md) or [`nix-store --realise`](@docroot@/command-ref/nix-store/realise.md), may change the database schema! -> Reverting to an older version of Nix may therefore require purging the store database before it can be used. - -## Linux multi-user +You can upgrade Determinate Nix using Determinate Nixd: ```console -$ sudo su -# nix-env --install --file '' --attr nix cacert -I nixpkgs=channel:nixpkgs-unstable -# systemctl daemon-reload -# systemctl restart nix-daemon +sudo determinate-nixd upgrade ``` -## macOS multi-user +Note that the `sudo` is necessary here and upgrading fails without it. -```console -$ sudo nix-env --install --file '' --attr nix cacert -I nixpkgs=channel:nixpkgs-unstable -$ sudo launchctl remove org.nixos.nix-daemon -$ sudo launchctl load /Library/LaunchDaemons/org.nixos.nix-daemon.plist -``` - -## Single-user all platforms - -```console -$ nix-env --install --file '' --attr nix cacert -I nixpkgs=channel:nixpkgs-unstable -``` diff --git a/doc/manual/source/introduction.md b/doc/manual/source/introduction.md index e70411c11f5..039ad6f30b1 100644 --- a/doc/manual/source/introduction.md +++ b/doc/manual/source/introduction.md @@ -1,4 +1,19 @@ -# Introduction +# Determinate Nix + +**Determinate Nix** is a downstream distribution of [Nix], a purely functional language, CLI tool, and package management system. +It's available on Linux, macOS, and Windows Subsystem for Linux (WSL). + +## Installing + +We recommend that macOS users install Determinate Nix using our graphical installer, [Determinate.pkg][pkg]. +For Linux and Windows Subsystem for Linux (WSL) users: + +```console +curl --proto '=https' --tlsv1.2 -sSf -L https://install.determinate.systems/nix | \ + sh -s -- install +``` + +## How Nix works Nix is a _purely functional package manager_. This means that it treats packages like values in a purely functional programming language @@ -8,7 +23,7 @@ stores packages in the _Nix store_, usually the directory `/nix/store`, where each package has its own unique subdirectory such as - /nix/store/b6gvzjyb2pg0kjfwrjmg1vfhh54ad73z-firefox-33.1/ + /nix/store/q06x3jll2yfzckz2bzqak089p43ixkkq-firefox-33.1/ where `b6gvzjyb2pg0…` is a unique identifier for the package that captures all its dependencies (it’s a cryptographic hash of the @@ -184,10 +199,14 @@ to build configuration files in `/etc`). This means, among other things, that it is easy to roll back the entire configuration of the system to an earlier state. Also, users can install software without root privileges. For more information and downloads, see the [NixOS -homepage](https://nixos.org/). +homepage][nix]. ## License Nix is released under the terms of the [GNU LGPLv2.1 or (at your option) any later -version](http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html). +version][license]. + +[license]: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html +[pkg]: https://install.determinate.systems/determinate-pkg/stable/Universal +[site]: https://nixos.org diff --git a/doc/manual/source/language/string-context.md b/doc/manual/source/language/string-context.md index 65c59d865f0..0968cc88b84 100644 --- a/doc/manual/source/language/string-context.md +++ b/doc/manual/source/language/string-context.md @@ -34,12 +34,12 @@ String context elements come in different forms: > [`builtins.storePath`] creates a string with a single constant string context element: > > ```nix - > builtins.getContext (builtins.storePath "/nix/store/wkhdf9jinag5750mqlax6z2zbwhqb76n-hello-2.10") + > builtins.getContext (builtins.storePath "/nix/store/ikwkxz4wwlp2g1428n7dy729cg1d9hin-hello-2.10") > ``` > evaluates to > ```nix > { - > "/nix/store/wkhdf9jinag5750mqlax6z2zbwhqb76n-hello-2.10" = { + > "/nix/store/ikwkxz4wwlp2g1428n7dy729cg1d9hin-hello-2.10" = { > path = true; > }; > } diff --git a/doc/manual/source/language/string-interpolation.md b/doc/manual/source/language/string-interpolation.md index 8e25d2b6311..3f6bf9b9f85 100644 --- a/doc/manual/source/language/string-interpolation.md +++ b/doc/manual/source/language/string-interpolation.md @@ -181,7 +181,7 @@ A derivation interpolates to the [store path] of its first [output](./derivation > "${pkgs.hello}" > ``` > -> "/nix/store/4xpfqf29z4m8vbhrqcz064wfmb46w5r7-hello-2.12.1" +> "/nix/store/qnlr7906z0mrl2syrkdbpicffq02nw07-hello-2.12.1" An attribute set interpolates to the return value of the function in the `__toString` applied to the attribute set itself. diff --git a/doc/manual/source/protocols/json/schema/derivation-options-v1.yaml b/doc/manual/source/protocols/json/schema/derivation-options-v1.yaml index 58ff070882f..d247802cd6c 100644 --- a/doc/manual/source/protocols/json/schema/derivation-options-v1.yaml +++ b/doc/manual/source/protocols/json/schema/derivation-options-v1.yaml @@ -9,7 +9,7 @@ description: | > **Warning** > > This JSON format is currently - > [**experimental**](@docroot@/development/experimental-features.md#xp-feature-nix-command) + > [**experimental**](@docroot@/development/experimental-features.md) > and subject to change. type: object diff --git a/doc/manual/source/protocols/json/schema/derivation-v4.yaml b/doc/manual/source/protocols/json/schema/derivation-v4.yaml index 2528f7502e6..c1884769671 100644 --- a/doc/manual/source/protocols/json/schema/derivation-v4.yaml +++ b/doc/manual/source/protocols/json/schema/derivation-v4.yaml @@ -9,7 +9,7 @@ description: | > **Warning** > > This JSON format is currently - > [**experimental**](@docroot@/development/experimental-features.md#xp-feature-nix-command) + > [**experimental**](@docroot@/development/experimental-features.md) > and subject to change. type: object @@ -94,8 +94,8 @@ properties: > > ```json > "srcs": [ - > "47y241wqdhac3jm5l7nv0x4975mb1975-separate-debug-info.sh", - > "56d0w71pjj9bdr363ym3wj1zkwyqq97j-fix-pop-var-context-error.patch" + > "b8nwz167km1yciqpwzjj24f8jcy8pq1h-separate-debug-info.sh", + > "ihzmilr413r8fb3ah30yjnhlb18c1laz-fix-pop-var-context-error.patch" > ] > ``` items: @@ -140,7 +140,7 @@ properties: description: | Absolute path of the program used to perform the build. Typically this is the `bash` shell - (e.g. `/nix/store/r3j288vpmczbl500w6zz89gyfa4nr0b1-bash-4.4-p23/bin/bash`). + (e.g. `/nix/store/p4xlj4imjbnm4v0x5jf4qysvyjjlgq1d-bash-4.4-p23/bin/bash`). args: type: array diff --git a/doc/manual/source/protocols/json/schema/store-object-info-v2.yaml b/doc/manual/source/protocols/json/schema/store-object-info-v2.yaml index 3ed7e99e28d..6ebaa3b2422 100644 --- a/doc/manual/source/protocols/json/schema/store-object-info-v2.yaml +++ b/doc/manual/source/protocols/json/schema/store-object-info-v2.yaml @@ -6,12 +6,6 @@ description: | This schema describes the JSON representation of store object metadata as returned by commands like [`nix path-info --json`](@docroot@/command-ref/new-cli/nix3-path-info.md). - > **Warning** - > - > This JSON format is currently - > [**experimental**](@docroot@/development/experimental-features.md#xp-feature-nix-command) - > and subject to change. - ### Field Categories Store object information can come in a few different variations. diff --git a/doc/manual/source/protocols/json/schema/store-path-v1.yaml b/doc/manual/source/protocols/json/schema/store-path-v1.yaml index 2012aab9915..61653d60e21 100644 --- a/doc/manual/source/protocols/json/schema/store-path-v1.yaml +++ b/doc/manual/source/protocols/json/schema/store-path-v1.yaml @@ -6,12 +6,6 @@ description: | This schema describes the JSON representation of store paths as used in various Nix JSON APIs. - > **Warning** - > - > This JSON format is currently - > [**experimental**](@docroot@/development/experimental-features.md#xp-feature-nix-command) - > and subject to change. - ## Format Store paths in JSON are represented as strings containing just the hash and name portion, without the store directory prefix. diff --git a/doc/manual/source/protocols/json/schema/store-v1.yaml b/doc/manual/source/protocols/json/schema/store-v1.yaml index e0c6f8fed6c..31aa10c4147 100644 --- a/doc/manual/source/protocols/json/schema/store-v1.yaml +++ b/doc/manual/source/protocols/json/schema/store-v1.yaml @@ -10,7 +10,7 @@ description: | > **Warning** > > This JSON format is currently - > [**experimental**](@docroot@/development/experimental-features.md#xp-feature-nix-command) + > [**experimental**](@docroot@/development/experimental-features.md) > and subject to change. type: object diff --git a/doc/manual/source/quick-start.md b/doc/manual/source/quick-start.md index 9eb7a326590..42e4e9c0c24 100644 --- a/doc/manual/source/quick-start.md +++ b/doc/manual/source/quick-start.md @@ -3,10 +3,13 @@ This chapter is for impatient people who don't like reading documentation. For more in-depth information you are kindly referred to subsequent chapters. -1. Install Nix: +1. Install Nix. + We recommend that macOS users install Determinate Nix using our graphical installer, [Determinate.pkg][pkg]. + For Linux and Windows Subsystem for Linux (WSL) users: ```console - $ curl -L https://nixos.org/nix/install | sh + $ curl --proto '=https' --tlsv1.2 -sSf -L https://install.determinate.systems/nix | \ + sh -s -- install ``` The install script will use `sudo`, so make sure you have sufficient rights. @@ -41,3 +44,5 @@ For more in-depth information you are kindly referred to subsequent chapters. ```console $ nix-collect-garbage ``` + +[pkg]: https://install.determinate.systems/determinate-pkg/stable/Universal diff --git a/doc/manual/source/release-notes-determinate/changes.md b/doc/manual/source/release-notes-determinate/changes.md new file mode 100644 index 00000000000..478068f2d89 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/changes.md @@ -0,0 +1,156 @@ +# Changes between Nix and Determinate Nix + +This section lists the differences between upstream Nix 2.33 and Determinate Nix 3.15.1. + +* In Determinate Nix, flakes are stable. You no longer need to enable the `flakes` experimental feature. + +* In Determinate Nix, the new Nix CLI (i.e. the `nix` command) is stable. You no longer need to enable the `nix-command` experimental feature. + +* Determinate Nix has a setting [`json-log-path`](@docroot@/command-ref/conf-file.md#conf-json-log-path) to send a copy of all Nix log messages (in JSON format) to a file or Unix domain socket. + +* Determinate Nix has made `nix profile install` an alias to `nix profile add`, a more symmetrical antonym of `nix profile remove`. + +* `nix-channel` and `channel:` url syntax (like `channel:nixos-24.11`) is deprecated, see: https://github.com/DeterminateSystems/nix-src/issues/34 + +* Using indirect flake references and implicit inputs is deprecated, see: https://github.com/DeterminateSystems/nix-src/issues/37 + +* Warnings around "dirty trees" are updated to reduce "dirty" jargon, and now refers to "uncommitted changes". + + + + + + + +* `nix upgrade-nix` is now inert, and suggests using `determinate-nixd upgrade`. [DeterminateSystems/nix-src#55](https://github.com/DeterminateSystems/nix-src/pull/55) + +* Determinate Nix has Lazy Trees, avoiding expensive copying of flake inputs to the Nix store. ([DeterminateSystems/nix-src#27](https://github.com/DeterminateSystems/nix-src/pull/27), [DeterminateSystems/nix-src#56](https://github.com/DeterminateSystems/nix-src/pull/56)) + + + + + + + + + +* Documentation on how to replicate `nix-store --query --deriver` with the new `nix` cli. [DeterminateSystems/nix-src#82](https://github.com/DeterminateSystems/nix-src/pull/82) + +* In `nix profile`, the symbols `ε` and `∅` have been replaced with descriptive English words. [DeterminateSystems/nix-src#81](https://github.com/DeterminateSystems/nix-src/pull/81) + + + + + + + +* When remote building with `--keep-failed`, Determinate Nix shows "you can rerun" message if the derivation's platform is supported on this machine. [DeterminateSystems/nix-src#87](https://github.com/DeterminateSystems/nix-src/pull/87) + +* Improved error message when `sandbox-paths` specifies a missing file. [DeterminateSystems/nix-src#88](https://github.com/DeterminateSystems/nix-src/pull/88) + + + + + + + + + +* `nix store delete` now explains why deletion fails. [DeterminateSystems/nix-src#130](https://github.com/DeterminateSystems/nix-src/pull/130) + + + + + + + + + + + + + +* Tab completing arguments to Nix avoids network access. [DeterminateSystems/nix-src#161](https://github.com/DeterminateSystems/nix-src/pull/161) + +* Importing Nixpkgs and other tarballs to the cache is 2-4x faster. [DeterminateSystems/nix-src#149](https://github.com/DeterminateSystems/nix-src/pull/149) + +* Adding paths to the store is significantly faster. [DeterminateSystems/nix-src#162](https://github.com/DeterminateSystems/nix-src/pull/162) + + + + + +* Determinate Nix allows flake inputs to be fetched at build time. [DeterminateSystems/nix-src#49](https://github.com/DeterminateSystems/nix-src/pull/49) + + + +* The default `nix flake init` template is much more useful. [DeterminateSystems/nix-src#180](https://github.com/DeterminateSystems/nix-src/pull/180) + + + + + + + + +* Multithreaded evaluation support. [DeterminateSystems/nix-src#125](https://github.com/DeterminateSystems/nix-src/pull/125) + + + + + + +* Determinate Nix only tries to substitute inputs if fetching from its original location fails.[DeterminateSystems/nix-src#202](https://github.com/DeterminateSystems/nix-src/pull/202) + + + + + + +* A new command `nix nario` that replaces `nix-store --export|--export`. It also has a new file format (`--format 2`) that supports store path attributes such as signatures, and that can be imported more efficiently. [DeterminateSystems/nix-src#215](https://github.com/DeterminateSystems/nix-src/pull/215) + +* Determinate Nix prints the Nix version when using `-vv` or higher verbosity. [DeterminateSystems/nix-src#237](https://github.com/DeterminateSystems/nix-src/pull/237) + + + + +* During evaluation, you can read or import from the result of `builtins.fetchClosure`. [DeterminateSystems/nix-src#241](https://github.com/DeterminateSystems/nix-src/pull/241) + + + +* Flakerefs in error messages and lockfile diffs are abbreviated for readability. [DeterminateSystems/nix-src#243](https://github.com/DeterminateSystems/nix-src/pull/243), [DeterminateSystems/nix-src#264](https://github.com/DeterminateSystems/nix-src/pull/264) + + + + + + + + +* The Git fetcher doesn't compute `revCount` or `lastModified` if they're already specified [DeterminateSystems./nix-src#269](https://github.com/DeterminateSystems/nix-src/pull/269) + +* The Git fetcher avoids doing a shallow Git fetch if it previously did a non-shallow fetch of the same repository. [DeterminateSystems/nix-src#270](https://github.com/DeterminateSystems/nix-src/pull/270) + +* Determinate Nix has a builtin copy of the flake registry, making it more resilient to network outages. [DeterminateSystems/nix-src#271](https://github.com/DeterminateSystems/nix-src/pull/271) + + + +* `nix build` and `nix profile` report failing or succeeding installables. [DeterminateSystems/nix-src#281](https://github.com/DeterminateSystems/nix-src/pull/281) + +* `nix flake check` shows which outputs failed or succeeded. [DeterminateSystems/nix-src#285](https://github.com/DeterminateSystems/nix-src/pull/285) + +* Determinate Nix has a `nix ps` command to show active builds. [DeterminateSystems/nix-src#282](https://github.com/DeterminateSystems/nix-src/pull/282) + +* Determinate Nix has improved backward compatibility with lock files created by Nix < 2.20. [DeterminateSystems/nix-src#278](https://github.com/DeterminateSystems/nix-src/pull/278) + + + +* Determinate Nix has a builtin function `builtins.filterAttrs`. [DeterminateSystems/nix-src#291](https://github.com/DeterminateSystems/nix-src/pull/291) + +* `builtins.fetchTree` implicitly sets `__final = true` when a `narHash` is supplied. This allows the tree to be substituted. [DeterminateSystems/nix-src#297](https://github.com/DeterminateSystems/nix-src/pull/297) + + + +* Bring back the free alpha by @grahamc in [DeterminateSystems/nix-src#305](https://github.com/DeterminateSystems/nix-src/pull/305) + +* Replace substitutable hashes with unlilkey to substitute hashes by @grahamc in [DeterminateSystems/nix-src#306](https://github.com/DeterminateSystems/nix-src/pull/306) diff --git a/doc/manual/source/release-notes-determinate/index.md b/doc/manual/source/release-notes-determinate/index.md new file mode 100644 index 00000000000..bba33084424 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/index.md @@ -0,0 +1,3 @@ +# Determinate Nix Release Notes + +This chapter lists the differences between Nix and Determinate Nix, as well as the release history of Determinate Nix. diff --git a/doc/manual/source/release-notes-determinate/rl-3.0.0.md b/doc/manual/source/release-notes-determinate/rl-3.0.0.md new file mode 100644 index 00000000000..d60786e9a72 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.0.0.md @@ -0,0 +1,5 @@ +# Release 3.0.0 (2025-03-04) + +* Initial release of Determinate Nix. + +* Based on [upstream Nix 2.26.2](../release-notes/rl-2.26.md). diff --git a/doc/manual/source/release-notes-determinate/rl-3.1.0.md b/doc/manual/source/release-notes-determinate/rl-3.1.0.md new file mode 100644 index 00000000000..96b7819d08d --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.1.0.md @@ -0,0 +1,5 @@ +# Release 3.1.0 (2025-03-27) + +* Based on [upstream Nix 2.27.1](../release-notes/rl-2.27.md). + +* New setting `json-log-path` that sends a copy of all Nix log messages (in JSON format) to a file or Unix domain socket. diff --git a/doc/manual/source/release-notes-determinate/rl-3.3.0.md b/doc/manual/source/release-notes-determinate/rl-3.3.0.md new file mode 100644 index 00000000000..badf96415df --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.3.0.md @@ -0,0 +1,5 @@ +# Release 3.3.0 (2025-04-11) + +* Based on [upstream Nix 2.28.1](../release-notes/rl-2.28.md). + +* The `nix profile install` command is now an alias to `nix profile add`, a more symmetrical antonym of `nix profile remove`. diff --git a/doc/manual/source/release-notes-determinate/rl-3.4.0.md b/doc/manual/source/release-notes-determinate/rl-3.4.0.md new file mode 100644 index 00000000000..24ae03ca554 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.4.0.md @@ -0,0 +1,50 @@ +# Release 3.4.0 (2025-04-25) + +* Based on [upstream Nix 2.28.2](../release-notes/rl-2.28.md). + +* **Warn users that `nix-channel` is deprecated.** + +This is the first change accomplishing our roadmap item of deprecating Nix channels: https://github.com/DeterminateSystems/nix-src/issues/34 + +This is due to user confusion and surprising behavior of channels, especially in the context of user vs. root channels. + +The goal of this change is to make the user experience of Nix more predictable. +In particular, these changes are to support users with lower levels of experience who are following guides that focus on channels as the mechanism of distribution. + +Users will now see this message: + +> nix-channel is deprecated in favor of flakes in Determinate Nix. For a guide on Nix flakes, see: https://zero-to-nix.com/. or details and to offer feedback on the deprecation process, see: https://github.com/DeterminateSystems/nix-src/issues/34. + + +* **Warn users that `channel:` URLs are deprecated.** + +This is the second change regarding our deprecation of Nix channels. +Using a `channel:` URL (like `channel:nixos-24.11`) will yield a warning like this: + +> Channels are deprecated in favor of flakes in Determinate Nix. Instead of 'channel:nixos-24.11', use 'https://nixos.org/channels/nixos-24.11/nixexprs.tar.xz'. For a guide on Nix flakes, see: https://zero-to-nix.com/. For details and to offer feedback on the deprecation process, see: https://github.com/DeterminateSystems/nix-src/issues/34. + +* **Warn users against indirect flake references in `flake.nix` inputs** + +This is the first change accomplishing our roadmap item of deprecating implicit and indirect flake inputs: https://github.com/DeterminateSystems/nix-src/issues/37 + +The flake registry provides an important UX affordance for using Nix flakes and remote sources in command line uses. +For that reason, the registry is not being deprecated entirely and will still be used for command-line incantations, like nix run. + +This move will eliminate user confusion and surprising behavior around global and local registries during flake input resolution. + +The goal of this change is to make the user experience of Nix more predictable. +We have seen a pattern of confusion when using automatic flake inputs and local registries. +Specifically, users' flake inputs resolving and locking inconsistently depending on the configuration of the host system. + +Users will now see the following warning if their flake.nix uses an implicit or indirect Flake reference input: + +> Flake input 'nixpkgs' uses the flake registry. Using the registry in flake inputs is deprecated in Determinate Nix. To make your flake future-proof, add the following to 'xxx/flake.nix': +> +> inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixos-24.11"; +> +> For more information, see: https://github.com/DeterminateSystems/nix-src/issues/37 + + +### Other updates: +* Improve the "dirty tree" message. Determinate Nix will now say `Git tree '...' has uncommitted changes` instead of `Git tree '...' is dirty` +* Stop warning about uncommitted changes in a Git repository when using `nix develop` diff --git a/doc/manual/source/release-notes-determinate/rl-3.4.2.md b/doc/manual/source/release-notes-determinate/rl-3.4.2.md new file mode 100644 index 00000000000..8acabd4425f --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.4.2.md @@ -0,0 +1,4 @@ +# Release 3.4.2 (2025-05-05) + +* Based on [upstream Nix 2.28.3](../release-notes/rl-2.28.md). + diff --git a/doc/manual/source/release-notes-determinate/rl-3.5.0.md b/doc/manual/source/release-notes-determinate/rl-3.5.0.md new file mode 100644 index 00000000000..d5b26b9419e --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.5.0.md @@ -0,0 +1,4 @@ +# Release 3.5.0 (2025-05-09) + +* Based on [upstream Nix 2.28.3](../release-notes/rl-2.28.md). + diff --git a/doc/manual/source/release-notes-determinate/rl-3.5.1.md b/doc/manual/source/release-notes-determinate/rl-3.5.1.md new file mode 100644 index 00000000000..b0813ca59c9 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.5.1.md @@ -0,0 +1,57 @@ +# Release 3.5.1 (2025-05-09) + +* Based on [upstream Nix 2.28.3](../release-notes/rl-2.28.md). + +## What's Changed + +Most notably, Lazy Trees has merged in to Determinate Nix and is in Feature Preview status, but remains disabled by default. +Lazy trees massively improves performance in virtually all scenarios because it enables Nix to avoid making unnecessary copies of files into the Nix store. +In testing, we saw iteration times on Nixpkgs **drop from over 12 seconds to 3.5 seconds**. + +After upgrading to Determinate Nix 3.5.1 with `sudo determinate-nixd upgrade`, enable lazy trees by adding this to `/etc/nix/nix.custom.conf`: + +``` +lazy-trees = true +``` + +Please note that our full flake regression test suite passes with no changes with lazy trees, and please report compatibility issues. + +Read [this GitHub comment](https://github.com/DeterminateSystems/nix-src/pull/27#pullrequestreview-2822153088) for further details and next steps. +We'll be publishing an update on the [Determinate Systems blog](https://determinate.systems/posts/) in the next few days with more information as well. + +Relevant PRs: +* Lazy trees v2 by @edolstra in [DeterminateSystems/nix-src#27](https://github.com/DeterminateSystems/nix-src/pull/27) +* Improve lazy trees backward compatibility by @edolstra in [DeterminateSystems/nix-src#56](https://github.com/DeterminateSystems/nix-src/pull/56) + + +### Additional changes in this release: +* Bug fix: Flake input URLs are canonicalized before checking flake.lock file staleness, avoiding needlessly regenerating flake.lock files with `dir` in URL-style flakerefs by @edolstra in [DeterminateSystems/nix-src#57](https://github.com/DeterminateSystems/nix-src/pull/57) +* `nix upgrade-nix` is deprecated in favor of `determinate-nixd upgrade`, by @gustavderdrache in [DeterminateSystems/nix-src#55](https://github.com/DeterminateSystems/nix-src/pull/55) +* UX: Improved build failure and dependency failure error messages to include needed output paths by @edolstra in [DeterminateSystems/nix-src#58](https://github.com/DeterminateSystems/nix-src/pull/58). + +Previously: + +``` +error: builder for '/nix/store/[...]-nested-failure-bottom.drv' failed with exit code 1 +error: 1 dependencies of derivation '/nix/store/[...]-nested-failure-middle.drv' failed to build +error: 1 dependencies of derivation '/nix/store/[...]-nested-failure-top.drv' failed to build +``` + +Now: + +``` +error: Cannot build '/nix/store/w37gflm9wz9dcnsgy3sfrmnlvm8qigaj-nested-failure-bottom.drv'. + Reason: builder failed with exit code 1. + Output paths: + /nix/store/yzybs8kp35dfipbzdlqcc6lxz62hax04-nested-failure-bottom +error: Cannot build '/nix/store/00gr5hlxfc03x2675w6nn3pwfrz2fr62-nested-failure-middle.drv'. + Reason: 1 dependency failed. + Output paths: + /nix/store/h781j5h4bdchmb4c2lvy8qzh8733azhz-nested-failure-middle +error: Cannot build '/nix/store/8am0ng1gyx8sbzyr0yx6jd5ix3yy5szc-nested-failure-top.drv'. + Reason: 1 dependency failed. + Output paths: + /nix/store/fh12637kgvp906s9yhi9w2dc7ghfwxs1-nested-failure-top +``` + +**Full Changelog**: [v3.4.2...v3.5.1](https://github.com/DeterminateSystems/nix-src/compare/v3.4.2...v3.5.1) diff --git a/doc/manual/source/release-notes-determinate/rl-3.5.2.md b/doc/manual/source/release-notes-determinate/rl-3.5.2.md new file mode 100644 index 00000000000..bc5396c255b --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.5.2.md @@ -0,0 +1,11 @@ +# Release 3.5.2 (2025-05-12) + +* Based on [upstream Nix 2.28.3](../release-notes/rl-2.28.md). + +## What's Changed +* Fix a regression where narHash was not added to lock files when lazy trees were disabled by @edolstra in [DeterminateSystems/nix-src#63](https://github.com/DeterminateSystems/nix-src/pull/63) + +* Tell users a source is corrupted ("cannot read file from tarball: Truncated tar archive detected while reading data"), improving over the previous 'cannot read file from tarball' error by @edolstra in [DeterminateSystems/nix-src#64](https://github.com/DeterminateSystems/nix-src/pull/64) + + +**Full Changelog**: [v3.5.1...v3.5.2](https://github.com/DeterminateSystems/nix-src/compare/v3.5.1...v3.5.2) diff --git a/doc/manual/source/release-notes-determinate/rl-3.6.0.md b/doc/manual/source/release-notes-determinate/rl-3.6.0.md new file mode 100644 index 00000000000..453ab6c301d --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.6.0.md @@ -0,0 +1,11 @@ +# Release 3.6.0 (2025-05-22) + +* Based on [upstream Nix 2.29.0](../release-notes/rl-2.29.md). + +## What's Changed +* Install 'nix profile add' manpage by @edolstra in [DeterminateSystems/nix-src#69](https://github.com/DeterminateSystems/nix-src/pull/69) +* Sync with upstream 2.29.0 by @edolstra in [DeterminateSystems/nix-src#67](https://github.com/DeterminateSystems/nix-src/pull/67) +* Emit warnings when using import-from-derivation by setting the `trace-import-from-derivation` option to `true` by @gustavderdrache in [DeterminateSystems/nix-src#70](https://github.com/DeterminateSystems/nix-src/pull/70) + + +**Full Changelog**: [v3.5.2...v3.6.0](https://github.com/DeterminateSystems/nix-src/compare/v3.5.2...v3.6.0) diff --git a/doc/manual/source/release-notes-determinate/rl-3.6.1.md b/doc/manual/source/release-notes-determinate/rl-3.6.1.md new file mode 100644 index 00000000000..12505afee27 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.6.1.md @@ -0,0 +1,9 @@ +# Release 3.6.1 (2025-05-24) + +* Based on [upstream Nix 2.29.0](../release-notes/rl-2.29.md). + +## What's Changed +* Fix nlohmann error in fromStructuredAttrs() by @edolstra in [DeterminateSystems/nix-src#73](https://github.com/DeterminateSystems/nix-src/pull/73) + + +**Full Changelog**: [v3.6.0...v3.6.1](https://github.com/DeterminateSystems/nix-src/compare/v3.6.0...v3.6.1) diff --git a/doc/manual/source/release-notes-determinate/rl-3.6.2.md b/doc/manual/source/release-notes-determinate/rl-3.6.2.md new file mode 100644 index 00000000000..882c142f00c --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.6.2.md @@ -0,0 +1,15 @@ +# Release 3.6.2 (2025-06-02) + +* Based on [upstream Nix 2.29.0](../release-notes/rl-2.29.md). + +## What's Changed +* Dramatically improve the performance of nix store copy-sigs: Use http-connections setting to control parallelism by @edolstra in [DeterminateSystems/nix-src#80](https://github.com/DeterminateSystems/nix-src/pull/80) +* Document how to replicate nix-store --query --deriver with the nix cli by @grahamc in [DeterminateSystems/nix-src#82](https://github.com/DeterminateSystems/nix-src/pull/82) +* The garbage collector no longer gives up if it encounters an undeletable file, by @edolstra in [DeterminateSystems/nix-src#83](https://github.com/DeterminateSystems/nix-src/pull/83) +* nix profile: Replace ε and ∅ with descriptive English words by @grahamc in [DeterminateSystems/nix-src#81](https://github.com/DeterminateSystems/nix-src/pull/81) +* Rework README to clarify that this distribution is our distribution, by @lucperkins in [DeterminateSystems/nix-src#84](https://github.com/DeterminateSystems/nix-src/pull/84) +* Include the source location when warning about inefficient double copies by @edolstra in [DeterminateSystems/nix-src#79](https://github.com/DeterminateSystems/nix-src/pull/79) +* Call out that `--keep-failed` with remote builders will keep the failed build directory on that builder by @cole-h in [DeterminateSystems/nix-src#85](https://github.com/DeterminateSystems/nix-src/pull/85) + + +**Full Changelog**: [v3.6.1...v3.6.2](https://github.com/DeterminateSystems/nix-src/compare/v3.6.1...v3.6.2) diff --git a/doc/manual/source/release-notes-determinate/rl-3.6.5.md b/doc/manual/source/release-notes-determinate/rl-3.6.5.md new file mode 100644 index 00000000000..8ef5be0fd0d --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.6.5.md @@ -0,0 +1,19 @@ +# Release 3.6.5 (2025-06-12) + +* Based on [upstream Nix 2.29.0](../release-notes/rl-2.29.md). + +## What's Changed +* When remote building with --keep-failed, only show "you can rerun" message if the derivation's platform is supported on this machine by @cole-h in [DeterminateSystems/nix-src#87](https://github.com/DeterminateSystems/nix-src/pull/87) +* Indicate that sandbox-paths specifies a missing file in the corresponding error message. by @cole-h in [DeterminateSystems/nix-src#88](https://github.com/DeterminateSystems/nix-src/pull/88) +* Render lazy tree paths in messages withouth the/nix/store/hash... prefix in substituted source trees by @edolstra in [DeterminateSystems/nix-src#91](https://github.com/DeterminateSystems/nix-src/pull/91) +* Use FlakeHub inputs by @lucperkins in [DeterminateSystems/nix-src#89](https://github.com/DeterminateSystems/nix-src/pull/89) +* Proactively cache more flake inputs and fetches by @edolstra in [DeterminateSystems/nix-src#93](https://github.com/DeterminateSystems/nix-src/pull/93) +* Fix: register extra builtins just once by @edolstra in [DeterminateSystems/nix-src#97](https://github.com/DeterminateSystems/nix-src/pull/97) +* Fix the link to `builders-use-substitutes` documentation for `builders` by @lucperkins in [DeterminateSystems/nix-src#102](https://github.com/DeterminateSystems/nix-src/pull/102) +* Improve error messages that use the hypothetical future tense of "will" by @lucperkins in [DeterminateSystems/nix-src#92](https://github.com/DeterminateSystems/nix-src/pull/92) +* Make the `nix repl` test more stable by @edolstra in [DeterminateSystems/nix-src#103](https://github.com/DeterminateSystems/nix-src/pull/103) +* Run nixpkgsLibTests against lazy trees by @edolstra in [DeterminateSystems/nix-src#100](https://github.com/DeterminateSystems/nix-src/pull/100) +* Run the Nix test suite against lazy trees by @edolstra in [DeterminateSystems/nix-src#105](https://github.com/DeterminateSystems/nix-src/pull/105) +* Improve caching of inputs by @edolstra in [DeterminateSystems/nix-src#98](https://github.com/DeterminateSystems/nix-src/pull/98), [DeterminateSystems/nix-src#110](https://github.com/DeterminateSystems/nix-src/pull/110), and [DeterminateSystems/nix-src#115](https://github.com/DeterminateSystems/nix-src/pull/115) + +**Full Changelog**: [v3.6.2...v3.6.5](https://github.com/DeterminateSystems/nix-src/compare/v3.6.2...v3.6.4) diff --git a/doc/manual/source/release-notes-determinate/rl-3.6.6.md b/doc/manual/source/release-notes-determinate/rl-3.6.6.md new file mode 100644 index 00000000000..bf4e3690afa --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.6.6.md @@ -0,0 +1,7 @@ +# Release 3.6.6 (2025-06-17) + +* Based on [upstream Nix 2.29.0](../release-notes/rl-2.29.md). + +## What's Changed + +* No-op release on the nix-src side, due to a regression on nix-darwin in determinate-nixd. diff --git a/doc/manual/source/release-notes-determinate/rl-3.6.7.md b/doc/manual/source/release-notes-determinate/rl-3.6.7.md new file mode 100644 index 00000000000..197587f1b3a --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.6.7.md @@ -0,0 +1,17 @@ +# Release 3.6.7 (2025-06-24) + +* Based on [upstream Nix 2.29.1](../release-notes/rl-2.29.md). + +## What's Changed + +### Security contents + +* Patched against GHSA-g948-229j-48j3 + +### Lazy trees: + +* Lazy trees now produces `flake.lock` files with NAR hashes unless `lazy-locks` is set to `true` by @edolstra in [DeterminateSystems/nix-src#113](https://github.com/DeterminateSystems/nix-src/pull/113) +* Improved caching with lazy-trees when using --impure, with enhanced testing by @edolstra in [DeterminateSystems/nix-src#117](https://github.com/DeterminateSystems/nix-src/pull/117) + + +**Full Changelog**: [v3.6.6...v3.6.7](https://github.com/DeterminateSystems/nix-src/compare/v3.6.6...v3.6.7) diff --git a/doc/manual/source/release-notes-determinate/rl-3.6.8.md b/doc/manual/source/release-notes-determinate/rl-3.6.8.md new file mode 100644 index 00000000000..c4b4b96c9e7 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.6.8.md @@ -0,0 +1,12 @@ +# Release 3.6.8 (2025-06-25) + +* Based on [upstream Nix 2.29.1](../release-notes/rl-2.29.md). + +## What's Changed +* Fix fetchToStore() caching with --impure, improve testing by @edolstra in [DeterminateSystems/nix-src#117](https://github.com/DeterminateSystems/nix-src/pull/117) +* Add lazy-locks setting by @edolstra in [DeterminateSystems/nix-src#113](https://github.com/DeterminateSystems/nix-src/pull/113) +* Sync 2.29.1 by @edolstra in [DeterminateSystems/nix-src#124](https://github.com/DeterminateSystems/nix-src/pull/124) +* Release v3.6.7 by @github-actions in [DeterminateSystems/nix-src#126](https://github.com/DeterminateSystems/nix-src/pull/126) + + +**Full Changelog**: [v3.6.6...v3.6.8](https://github.com/DeterminateSystems/nix-src/compare/v3.6.6...v3.6.8) diff --git a/doc/manual/source/release-notes-determinate/rl-3.7.0.md b/doc/manual/source/release-notes-determinate/rl-3.7.0.md new file mode 100644 index 00000000000..615e858592e --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.7.0.md @@ -0,0 +1,63 @@ +# Release 3.7.0 (2025-07-03) + +- Based on [upstream Nix 2.29.1](../release-notes/rl-2.29.md). + +## What's Changed + +### Prefetch flake inputs in parallel + +By @edolstra in [DeterminateSystems/nix-src#127](https://github.com/DeterminateSystems/nix-src/pull/127) + +This release brings the command `nix flake prefetch-inputs`. + +Flake inputs are typically fetched "just in time." +That means Nix fetches a flake input when the evaluator needs it, and not before. +When the evaluator needs an input, evaluation is paused until the source is available. + +This causes a significant slow-down on projects with lots of flake inputs. + +The new command `nix flake prefetch-inputs` fetches all flake inputs in parallel. +We expect running this new command before building will dramatically improve evaluation performance for most projects, especially in CI. +Note that projects which with many unused flake inputs may not benefit from this change, since the new command fetches every input whether they're used or not. + +### Deep flake input overrides now work as expected + +By @edolstra in [DeterminateSystems/nix-src#108](https://github.com/DeterminateSystems/nix-src/pull/108) + +An override like: + +``` +inputs.foo.inputs.bar.inputs.nixpkgs.follows = "nixpkgs"; +``` + +implicitly set `inputs.foo.inputs.bar` to `flake:bar`, which led to an unexpected error like: + +``` +error: cannot find flake 'flake:bar' in the flake registries +``` + +We now no longer create a parent override (like for `foo.bar` in the example above) if it doesn't set an explicit ref or follows attribute. +We only recursively apply its child overrides. + +### `nix store delete` now shows you why deletion was not possible + +By @edolstra in [DeterminateSystems/nix-src#130](https://github.com/DeterminateSystems/nix-src/pull/130) + +For example: + +``` +error: Cannot delete path '/nix/store/6fcrjgfjip2ww3sx51rrmmghfsf60jvi-patchelf-0.14.3' + because it's referenced by the GC root '/home/eelco/Dev/nix-master/build/result'. + +error: Cannot delete path '/nix/store/lf3lrf8bjfn8xvr0az9q96y989sxs5r9-cowsay-3.8.4' + because it's referenced by the GC root '/proc/3600568/environ'. + +error: Cannot delete path '/nix/store/klyng5rpdkwi5kbxkncy4gjwb490dlhb-foo.drv' + because it's in use by '{nix-process:3605324}'. +``` + +### Lazy-tree improvements + +- Improved lazy-tree evaluation caching for flakes accessed with a `path` flakeref by @edolstra in [DeterminateSystems/nix-src#131](https://github.com/DeterminateSystems/nix-src/pull/131) + +**Full Changelog**: [v3.6.8...v3.7.0](https://github.com/DeterminateSystems/nix-src/compare/v3.6.8...v3.7.0) diff --git a/doc/manual/source/release-notes-determinate/rl-3.8.0.md b/doc/manual/source/release-notes-determinate/rl-3.8.0.md new file mode 100644 index 00000000000..4103d6df94e --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.8.0.md @@ -0,0 +1,29 @@ +# Release 3.8.0 (2025-07-10) + +* Based on [upstream Nix 2.30.0](../release-notes/rl-2.30.md). + +## What's Changed + +### Faster CI with `nix flake check` + +`nix flake check` no longer downloads flake outputs if no building is necessary. + +This command is intended to validate that a flake can fully evaluate and all outputs can build. +If the outputs are available in a binary cache then both properties are confirmed to be true. +Notably, downloading the output from the binary cache is not strictly necessary for the validation. + +Previously, `nix flake check` would download a flake output if the full build is available in a binary cache. + +Some users will find this change significantly reduces costly bandwidth and CI workflow time. + +PR: [DeterminateSystems/nix-src#134](https://github.com/DeterminateSystems/nix-src/pull/134) + +### Improved flake locking of transitive dependencies + +Determinate Nix now re-locks all transitive dependencies when changing a flake input's source URL. + +This fixes an issue where in some scenarios Nix would not re-lock those inputs and incorrectly use the old inputs' dependencies. + +PR: [DeterminateSystems/nix-src#137](https://github.com/DeterminateSystems/nix-src/pull/137) + +**Full Changelog**: [v3.7.0...v3.8.0](https://github.com/DeterminateSystems/nix-src/compare/v3.7.0...v3.8.0) diff --git a/doc/manual/source/release-notes-determinate/rl-3.8.1.md b/doc/manual/source/release-notes-determinate/rl-3.8.1.md new file mode 100644 index 00000000000..90dc328f6ec --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.8.1.md @@ -0,0 +1,9 @@ +# Release 3.8.1 (2025-07-11) + +* Based on [upstream Nix 2.30.0](../release-notes/rl-2.30.md). + +## What's Changed +* Address ifdef problem with macOS/BSD sandboxing by @gustavderdrache in [DeterminateSystems/nix-src#142](https://github.com/DeterminateSystems/nix-src/pull/142) + + +**Full Changelog**: [v3.8.0...v3.8.1](https://github.com/DeterminateSystems/nix-src/compare/v3.8.0...v3.8.1) diff --git a/doc/manual/source/release-notes-determinate/rl-3.8.2.md b/doc/manual/source/release-notes-determinate/rl-3.8.2.md new file mode 100644 index 00000000000..638d90f6841 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.8.2.md @@ -0,0 +1,10 @@ +# Release 3.8.2 (2025-07-12) + +* Based on [upstream Nix 2.30.0](../release-notes/rl-2.30.md). + +## What's Changed +* ci: don't run the full test suite for x86_64-darwin by @grahamc in [DeterminateSystems/nix-src#144](https://github.com/DeterminateSystems/nix-src/pull/144) +* Try publishing the manual again by @grahamc in [DeterminateSystems/nix-src#145](https://github.com/DeterminateSystems/nix-src/pull/145) + + +**Full Changelog**: [v3.8.1...v3.8.2](https://github.com/DeterminateSystems/nix-src/compare/v3.8.1...v3.8.2) diff --git a/doc/manual/source/release-notes-determinate/rl-3.8.3.md b/doc/manual/source/release-notes-determinate/rl-3.8.3.md new file mode 100644 index 00000000000..d3eb02bc7ea --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.8.3.md @@ -0,0 +1,26 @@ +# Release 3.8.3 (2025-07-18) + +* Based on [upstream Nix 2.30.1](../release-notes/rl-2.30.md). + +## What's Changed + +### Non-blocking evaluation caching + +Users reported evaluation would occasionally block other evaluation processes. + +The evaluation cache database is now opened in write-ahead mode to prevent delaying evaluations. + +PR: [DeterminateSystems/nix-src#150](https://github.com/DeterminateSystems/nix-src/pull/150) + +### New experimental feature: `external-builders` + +This experimental feature allows Nix to call an external program for the build environment. + +The interface and behavior of this feature may change at any moment without a correspondingly major semver version change. + +PRs: +- [DeterminateSystems/nix-src#141](https://github.com/DeterminateSystems/nix-src/pull/141) +- [DeterminateSystems/nix-src#152](https://github.com/DeterminateSystems/nix-src/pull/152) +- [DeterminateSystems/nix-src#78](https://github.com/DeterminateSystems/nix-src/pull/78) + +**Full Changelog**: [v3.8.2...v3.8.3](https://github.com/DeterminateSystems/nix-src/compare/v3.8.2...v3.8.3) diff --git a/doc/manual/source/release-notes-determinate/rl-3.8.4.md b/doc/manual/source/release-notes-determinate/rl-3.8.4.md new file mode 100644 index 00000000000..7c73e75ca02 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.8.4.md @@ -0,0 +1,9 @@ +# Release 3.8.4 (2025-07-21) + +* Based on [upstream Nix 2.30.1](../release-notes/rl-2.30.md). + +## What's Changed +* Revert "Use WAL mode for SQLite cache databases" by @grahamc in [DeterminateSystems/nix-src#155](https://github.com/DeterminateSystems/nix-src/pull/155) + + +**Full Changelog**: [v3.8.3...v3.8.4](https://github.com/DeterminateSystems/nix-src/compare/v3.8.3...v3.8.4) diff --git a/doc/manual/source/release-notes-determinate/rl-3.8.5.md b/doc/manual/source/release-notes-determinate/rl-3.8.5.md new file mode 100644 index 00000000000..0f1bbe6f99d --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.8.5.md @@ -0,0 +1,58 @@ +## What's Changed + +### Less time "unpacking into the Git cache" + +Unpacking sources into the user's cache is now takes 1/2 to 1/4 of the time it used to. +Previously, Nix serially unpacked sources into the cache. +This change takes better advantage of our users' hardware by parallelizing the import. +Real life testing shows an initial Nixpkgs import takes 3.6s on Linux, when it used to take 11.7s. + +PR: [DeterminateSystems/nix-src#149](https://github.com/DeterminateSystems/nix-src/pull/149) + +### Copy paths to the daemon in parallel + +Determinate Nix's evaluator no longer blocks evaluation when copying paths to the store. +Previously, Nix would pause evaluation when it needed to add files to the store. +Now, the copying is performed in the background allowing evaluation to proceed. + +PR: [DeterminateSystems/nix-src#162](https://github.com/DeterminateSystems/nix-src/pull/162) + +### Faster Nix evaluation by reducing duplicate Nix daemon queries + +Determinate Nix more effectively caches store path validity data within a single evaluation. +Previously, the Nix client would perform many thousands of exra Nix daemon requests. +Each extra request takes real time, and this change reduced a sample evaluation by over 12,000 requests. + +PR: [DeterminateSystems/nix-src#157](https://github.com/DeterminateSystems/nix-src/pull/157) + +### More responsive tab completion + +Tab completion now implies the "--offline" flag, which disables most network requests. +Previously, tab completing Nix arguments would attempt to fetch sources and access binary caches. +Operating in offline mode improves the interactive experience of Nix when tab completing. + +PR: [DeterminateSystems/nix-src#161](https://github.com/DeterminateSystems/nix-src/pull/161) + +### ZFS users: we fixed the mysterious stall. + +Opening the Nix database is usually instantaneous but sometimes has a several second latency. +Determinate Nix works around this issue, eliminating the frustrating random stall when running Nix commands. + +PR: [DeterminateSystems/nix-src#158](https://github.com/DeterminateSystems/nix-src/pull/158) + +### Other changes + +* Determinate Nix is now fully formatted by clang-format, making it easier than ever to contribute to the project. + +PR: [DeterminateSystems/nix-src#159](https://github.com/DeterminateSystems/nix-src/pull/159) + +* Determinate Nix is now based on upstream Nix 2.30.2. + +PR: [DeterminateSystems/nix-src#160](https://github.com/DeterminateSystems/nix-src/pull/160) + +* Determinate Nix now uses `main` as our development branch, moving away from `detsys-main`. + +PRs: +* [DeterminateSystems/nix-src#164](https://github.com/DeterminateSystems/nix-src/pull/164) +* [DeterminateSystems/nix-src#166](https://github.com/DeterminateSystems/nix-src/pull/166) + diff --git a/doc/manual/source/release-notes-determinate/v3.10.0.md b/doc/manual/source/release-notes-determinate/v3.10.0.md new file mode 100644 index 00000000000..c644dd78744 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.10.0.md @@ -0,0 +1,10 @@ +# Release 3.10.0 (2025-09-02) + +* Based on [upstream Nix 2.31.0](../release-notes/rl-2.31.md). + +## What's Changed + +This release rebases Determinate Nix on upstream Nix 2.31.0. + + +**Full Changelog**: [v3.9.1...v3.10.0](https://github.com/DeterminateSystems/nix-src/compare/v3.9.1...v3.10.0) diff --git a/doc/manual/source/release-notes-determinate/v3.10.1.md b/doc/manual/source/release-notes-determinate/v3.10.1.md new file mode 100644 index 00000000000..08cbe4fd058 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.10.1.md @@ -0,0 +1,9 @@ +# Release 3.10.1 (2025-09-02) + +* Based on [upstream Nix 2.31.1](../release-notes/rl-2.31.md). + +## What's Changed +This release rebases Determinate Nix on upstream Nix 2.31.1. + + +**Full Changelog**: [v3.10.0...v3.10.1](https://github.com/DeterminateSystems/nix-src/compare/v3.10.0...v3.10.1) diff --git a/doc/manual/source/release-notes-determinate/v3.11.0.md b/doc/manual/source/release-notes-determinate/v3.11.0.md new file mode 100644 index 00000000000..7abb665a5a9 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.11.0.md @@ -0,0 +1,36 @@ +# Release 3.11.0 (2025-09-03) + +- Based on [upstream Nix 2.31.1](../release-notes/rl-2.31.md). + +## What's Changed + +### Parallel evaluation + +The following commands are now able to evaluate Nix expressions in parallel: + +- `nix search` +- `nix flake check` +- `nix flake show` +- `nix eval --json` + +This is currently in developer preview, and we'll be turning it on for more users in the coming weeks. +If you would like to try it right away, specify `eval-cores` in your `/etc/nix/nix.custom.conf`: + +```ini +eval-cores = 0 # Evaluate across all cores +``` + +Further, we introduced a new builtin: `builtins.parallel`. +This new builtin allows users to explicitly parallelize evaluation within a Nix expression. + +Using this new builtin requires turning on an additional experimental feature: + +```ini +extra-experimental-features = parallel-eval +``` + +Please note that this new builtin is subject to change semantics or even go away during the developer preview. + +PR: [DeterminateSystems/nix-src#125](https://github.com/DeterminateSystems/nix-src/pull/125) + +**Full Changelog**: [v3.10.1...v3.11.0](https://github.com/DeterminateSystems/nix-src/compare/v3.10.1...v3.11.0) diff --git a/doc/manual/source/release-notes-determinate/v3.11.1.md b/doc/manual/source/release-notes-determinate/v3.11.1.md new file mode 100644 index 00000000000..30597164333 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.11.1.md @@ -0,0 +1,9 @@ +# Release 3.11.1 (2025-09-04) + +* Based on [upstream Nix 2.31.1](../release-notes/rl-2.31.md). + +## What's Changed +* Fix race condition in Value::isTrivial() by @edolstra in [DeterminateSystems/nix-src#192](https://github.com/DeterminateSystems/nix-src/pull/192) + + +**Full Changelog**: [v3.11.0...v3.11.1](https://github.com/DeterminateSystems/nix-src/compare/v3.11.0...v3.11.1) diff --git a/doc/manual/source/release-notes-determinate/v3.11.2.md b/doc/manual/source/release-notes-determinate/v3.11.2.md new file mode 100644 index 00000000000..ac4fe569dff --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.11.2.md @@ -0,0 +1,24 @@ +# Release 3.11.2 (2025-09-12) + +* Based on [upstream Nix 2.31.1](../release-notes/rl-2.31.md). + +## What's Changed + +### Fix some interactions with the registry and flakes that include a `?dir=` parameter + +Some users were experiencing issues when their flake registry contained a flake that included a `?dir=` parameter, causing commands like `nix eval registry-with-flake-in-subdir#output` and those that used --inputs-from` to fail or behave incorrectly. + +This is now fixed, so use your flakes inside subdirs without fear! + +PRs: [DeterminateSystems/nix-src#196](https://github.com/DeterminateSystems/nix-src/pull/196), [DeterminateSystems/nix-src#199](https://github.com/DeterminateSystems/nix-src/pull/199) + +### Only substitute inputs if they haven't already been fetched + +When using `lazy-trees`, you might have noticed Nix fetching some source inputs from a cache, even though you could have sworn it already fetched those inputs! + +This fixes that behavior such that Nix will try to fetch inputs from their original location, and only if that fails fall back to fetching from a substituter. + +PR: [DeterminateSystems/nix-src#202](https://github.com/DeterminateSystems/nix-src/pull/202) + + +**Full Changelog**: [v3.11.1...v3.11.2](https://github.com/DeterminateSystems/nix-src/compare/v3.11.1...v3.11.2) diff --git a/doc/manual/source/release-notes-determinate/v3.11.3.md b/doc/manual/source/release-notes-determinate/v3.11.3.md new file mode 100644 index 00000000000..fab5ed51a4b --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.11.3.md @@ -0,0 +1,34 @@ +# Release 3.11.3 (2025-10-09) + +* Based on [upstream Nix 2.31.2](../release-notes/rl-2.31.md). + +## What's Changed + +### Fix some bugs and interactions with parallel eval + +We received some reports of parallel eval having issues, such as not being able to be interrupted, infinite recursion hanging forever, and segfaults when using the experimental `builtins.parallel`. + +Those have now been fixed. + +Additionally, the debugger now disables parallel eval, because the two features are incompatible. + +PRs: [DeterminateSystems/nix-src#206](https://github.com/DeterminateSystems/nix-src/pull/206), [DeterminateSystems/nix-src#213](https://github.com/DeterminateSystems/nix-src/pull/213), [DeterminateSystems/nix-src#218](https://github.com/DeterminateSystems/nix-src/pull/218), [DeterminateSystems/nix-src#205](https://github.com/DeterminateSystems/nix-src/pull/205) + +### `NIX_SSHOPTS` + `ssh-ng://root@localhost` fix + +We noticed that specifying `NIX_SSHOPTS=-p2222` when using a command that uses SSH (such as `nix copy --to ssh-ng://root@localhost`) stopped respecting the `NIX_SSHOPTS` setting because of an incorrect comparison. + +This has been fixed, so `NIX_SSHOPTS` and SSH stores that are accessed like `user@localhost` work again. + +PR: [DeterminateSystems/nix-src#219](https://github.com/DeterminateSystems/nix-src/pull/219) + +### Fix `error: [json.exception.type_error.302] type must be string, but is array` when using `exportReferencesGraph` + +We received a report of a `nix build` failing on a specific flake due to its expression using `exportReferencesGraph` with a heterogeneous array of dependencies, causing this inscrutable error. + +This specific case has been broken since Nix 2.29.0, and is now fixed. + +PRs: [DeterminateSystems/nix-src#221](https://github.com/DeterminateSystems/nix-src/pull/221), [DeterminateSystems/nix-src#225](https://github.com/DeterminateSystems/nix-src/pull/225) + + +**Full Changelog**: [v3.11.2...v3.11.3](https://github.com/DeterminateSystems/nix-src/compare/v3.11.2...v3.11.3) diff --git a/doc/manual/source/release-notes-determinate/v3.12.0.md b/doc/manual/source/release-notes-determinate/v3.12.0.md new file mode 100644 index 00000000000..55c1f10bf15 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.12.0.md @@ -0,0 +1,17 @@ +# Release 3.12.0 (2025-10-23) + +* Based on [upstream Nix 2.32.1](../release-notes/rl-2.32.md). + +## What's Changed + +### `nix nario` + +Determinate Nix has a new command, `nix nario`, that replaces the commands `nix-store --export` and `nix-store --import` from the old CLI. `nix nario` allows you to serialize store paths to a file that can be imported into another Nix store. It is backwards compatible with the file format generated by `nix-store --export`. It also provides a new format (selected by passing `--format 2`) that supports store path attributes such as signatures, and allows store paths to be imported more efficiently. + +### Other changes + +`nix flake clone` now supports arbitrary input types. In particular, this allows you to clone tarball flakes, such as flakes on FlakeHub. + +When using `-vv`, Determinate Nix now prints the Nix version. This is useful when diagnosing Nix problems from the debug output of a Nix run. + +**Full Changelog**: [v3.11.3...v3.12.0](https://github.com/DeterminateSystems/nix-src/compare/v3.11.3...v3.12.0) diff --git a/doc/manual/source/release-notes-determinate/v3.12.1.md b/doc/manual/source/release-notes-determinate/v3.12.1.md new file mode 100644 index 00000000000..1be2b48e26d --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.12.1.md @@ -0,0 +1,10 @@ +# Release 3.12.1 (2025-11-04) + +* Based on [upstream Nix 2.32.1](../release-notes/rl-2.32.md). + +## What's Changed +* Allow access to the result of fetchClosure by @edolstra in [DeterminateSystems/nix-src#241](https://github.com/DeterminateSystems/nix-src/pull/241) +* libstore/build: fixup JSON logger missing the resBuildResult result event by @cole-h in [DeterminateSystems/nix-src#246](https://github.com/DeterminateSystems/nix-src/pull/246) + + +**Full Changelog**: [v3.12.0...v3.12.1](https://github.com/DeterminateSystems/nix-src/compare/v3.12.0...v3.12.1) diff --git a/doc/manual/source/release-notes-determinate/v3.12.2.md b/doc/manual/source/release-notes-determinate/v3.12.2.md new file mode 100644 index 00000000000..4c8c3169aa7 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.12.2.md @@ -0,0 +1,42 @@ +# Release 3.12.2 (2025-11-05) + +* Based on [upstream Nix 2.32.2](../release-notes/rl-2.32.md). + +## What's Changed + +### Faster `revCount` computation + +When using Git repositories with a long history, calculating the `revCount` attribute can take a long time. Determinate Nix now computes `revCount` using multiple threads, making it much faster. + +Note that if you don't need `revCount`, you can disable it altogether by setting the flake input attribute `shallow = true`. + +PR: [DeterminateSystems/nix-src#245](https://github.com/DeterminateSystems/nix-src/pull/245) + +### More readable error messages + +Previously, Nix showed full flakerefs in error messages such as stack traces, e.g. +``` + … from call site + at «github:NixOS/nixpkgs/3bea86e918d8b54aa49780505d2d4cd9261413be?narHash=sha256-Ica%2B%2BSXFuLyxX9Q7YxhfZulUif6/gwM8AEQYlUxqSgE%3D»/lib/customisation.nix:69:16: + 68| let + 69| result = f origArgs; + | ^ + 70| +``` +It now abbreviates these by leaving out `narHash` and shortening Git revisions: +``` + … from call site + at «github:NixOS/nixpkgs/3bea86e»/lib/customisation.nix:69:16: + 68| let + 69| result = f origArgs; + | ^ + 70| +``` + +PR: [DeterminateSystems/nix-src#243](https://github.com/DeterminateSystems/nix-src/pull/243) + +### Other changes + +This release fixes an assertion failure in `nix flake check`. PR: [DeterminateSystems/nix-src#252](https://github.com/DeterminateSystems/nix-src/pull/252) + +**Full Changelog**: [v3.12.1...v3.12.2](https://github.com/DeterminateSystems/nix-src/compare/v3.12.1...v3.12.2) diff --git a/doc/manual/source/release-notes-determinate/v3.13.0.md b/doc/manual/source/release-notes-determinate/v3.13.0.md new file mode 100644 index 00000000000..09041c2acda --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.13.0.md @@ -0,0 +1,45 @@ +# Release 3.13.0 (2025-11-09) + +* Based on [upstream Nix 2.32.3](../release-notes/rl-2.32.md). + +## What's Changed + + +### Git sources have a progress indicator again + +Nix used to feel "stuck" while it was cloning large repositories. +Determinate Nix now shows git's native progress indicator while fetching. + +PR: [DeterminateSystems/nix-src#250](https://github.com/DeterminateSystems/nix-src/pull/250) + +### C API improvements + +We've invested in the C API to support our work on closure analysis for SBOM generation, and made a couple of changes: + +* C API: add nix_locked_flake_read_path for flake file reading +* C API: make nix_store_get_fs_closure compatible with upstream + +PRs: +* [DeterminateSystems/nix-src#244](https://github.com/DeterminateSystems/nix-src/pull/244) +* [DeterminateSystems/nix-src#254](https://github.com/DeterminateSystems/nix-src/pull/254) + +### Dropping support for Intel Macs + +Determinate Nix no longer supports being installed on Intel Macs. +Determinate Nix will continue to support building for Intel macOS targets, but only from an Apple Silicon host. + +From our intent-to-ship: +> Over the past year, we’ve watched usage of Determinate on Intel macOS hosts dwindle to a minuscule fraction of total usage. +> It currently stands at approximately 0.02% of all installations. +> The vast majority are run in managed CI environments that, we anticipate, will be able to easily convert to using Apple Silicon runners. + +For more information: https://github.com/DeterminateSystems/nix-src/issues/224 + +PR: [DeterminateSystems/nix-src#257](https://github.com/DeterminateSystems/nix-src/pull/257) + +### Bugs fixed + +* IPv6 Store URLs now handles zone ID references like it did in previous releases [NixOS/nix#14434](https://github.com/NixOS/nix/pull/14434) + + +**Full Changelog**: [v3.12.2...v3.13.0](https://github.com/DeterminateSystems/nix-src/compare/v3.12.2...v3.13.0) diff --git a/doc/manual/source/release-notes-determinate/v3.13.1.md b/doc/manual/source/release-notes-determinate/v3.13.1.md new file mode 100644 index 00000000000..025a192c44e --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.13.1.md @@ -0,0 +1,10 @@ +# Release 3.13.1 (2025-11-12) + +* Based on [upstream Nix 2.32.4](../release-notes/rl-2.32.md). + +## What's Changed +* nix bundle: Wait for async path writer by @edolstra in [DeterminateSystems/nix-src#260](https://github.com/DeterminateSystems/nix-src/pull/260) +* Sync with upstream 2.32.4 by @edolstra in [DeterminateSystems/nix-src#261](https://github.com/DeterminateSystems/nix-src/pull/261) + + +**Full Changelog**: [v3.13.0...v3.13.1](https://github.com/DeterminateSystems/nix-src/compare/v3.13.0...v3.13.1) diff --git a/doc/manual/source/release-notes-determinate/v3.13.2.md b/doc/manual/source/release-notes-determinate/v3.13.2.md new file mode 100644 index 00000000000..2490b865e6b --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.13.2.md @@ -0,0 +1,68 @@ +# Release 3.13.2 (2025-11-19) + +* Based on [upstream Nix 2.32.4](../release-notes/rl-2.32.md). + +## What's Changed + +### Abbreviate flakerefs in lockfile diffs and `nix flake metadata` + +Flake refs are now abbreviated when possible, to reduce visual clutter. + +For example, this changes + +``` +• Updated input 'blender-bin': + 'https://api.flakehub.com/f/pinned/edolstra/blender-bin/1.0.19/01993ca7-2aa8-746f-96f5-ca8d2c2b962d/source.tar.gz?narHash=sha256-ZqVhVl9UYVErF8HW8lcvqss005VWYjuX//rZ%2BOmXyHg%3D' (2025-09-12) + → 'https://api.flakehub.com/f/pinned/edolstra/blender-bin/1.0.20/019a8772-b044-7738-8c03-109bdc9f0a01/source.tar.gz?narHash=sha256-sVj9Gmx0kwTDQPJ5kgQYszE3Hdjevu0zx0b/bL2fyUc%3D' (2025-11-15) +• Updated input 'nix': + 'github:DeterminateSystems/nix-src/236ebef6514f3a2a9765c8a1d80dd503b8e672be?narHash=sha256-s6/Err0yqOp5fM3OdCF1vhmEYpeElbPOWX88YrW2qj4%3D' (2025-10-23) + → 'github:DeterminateSystems/nix-src/ef054dc06e9701597bce0b0572af18cb4c7e7277?narHash=sha256-uqYmH0KA8caQqX5u4BMarZsuDlC%2B71HRsH3h4f3DPCA%3D' (2025-11-12) +``` + +to + +``` +• Updated input 'blender-bin': + 'https://api.flakehub.com/f/pinned/edolstra/blender-bin/1.0.19/01993ca7-2aa8-746f-96f5-ca8d2c2b962d/source.tar.gz' (2025-09-12) + → 'https://api.flakehub.com/f/pinned/edolstra/blender-bin/1.0.20/019a8772-b044-7738-8c03-109bdc9f0a01/source.tar.gz' (2025-11-15) +• Updated input 'nix': + 'github:DeterminateSystems/nix-src/236ebef' (2025-10-23) + → 'github:DeterminateSystems/nix-src/ef054dc' (2025-11-12) +``` + +PR: [DeterminateSystems/nix-src#264](https://github.com/DeterminateSystems/nix-src/pull/264) + +### `nix flake prefetch-inputs` now skips build-time inputs + +Build-time inputs can already be fetched in parallel, so prefetching them is usually not what you want. + +This can be especially noticeable in projects that make extensive use of build-time flake inputs. + +PR: [DeterminateSystems/nix-src#263](https://github.com/DeterminateSystems/nix-src/pull/263) + +### Don't compute `revCount`/`lastModified` if they're already specified + +We don't care if the user (or more likely the lock file) specifies an incorrect value for these attributes, since it doesn't matter for security (unlike content hashes like `narHash`). + +This can save time when operating on large repos -- having to recalculate these attributes could slow things down greatly. + +PR: [DeterminateSystems/nix-src#269](https://github.com/DeterminateSystems/nix-src/pull/269) + +### Avoid unnecessary Git refetches + +This fixes the issue where updating a Git input does a non-shallow fetch, and then a subsequent eval does a shallow refetch because the `revCount` is already known. + +Now the subsequent eval will reuse the repo used in the first fetch. + +PR: [DeterminateSystems/nix-src#270](https://github.com/DeterminateSystems/nix-src/pull/270) + +### Use our mirrored flake registry + +The flake registry is security-critical and thus should have high availability. + +By mirroring the upstream Nix flake registry, we can make it less likely that a GitHub outage affects being able to resolve from the registry. + +PR: [DeterminateSystems/nix-src#271](https://github.com/DeterminateSystems/nix-src/pull/271) + + +**Full Changelog**: [v3.13.1...v3.13.2](https://github.com/DeterminateSystems/nix-src/compare/v3.13.1...v3.13.2) diff --git a/doc/manual/source/release-notes-determinate/v3.14.0.md b/doc/manual/source/release-notes-determinate/v3.14.0.md new file mode 100644 index 00000000000..d72d5d21468 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.14.0.md @@ -0,0 +1,159 @@ +# Release 3.14.0 (2025-12-08) + +* Based on [upstream Nix 2.32.4](../release-notes/rl-2.32.md). + +## What is going on?! `nix ps` to the rescue + +Determinate Nix now features a `nix ps` command to summarize all of the active builds and child processes: + +``` +$ nix ps +USER PID CPU DERIVATION/COMMAND +_nixbld1 30167 0.4s /nix/store/h431bcfml83czhpyzljhp9mw4yrq95vs-determinate-nix-manual-3.14.0.drv (wall=9s) +_nixbld1 30167 0.2s └───bash -e /nix/store/jwqf79v5p51x9mv8vx20fv9mzm2x7kig-source-stdenv.sh /nix/store/285whzixr5k1kfj6nidyj29mqqgv7n0b-default-builder.s +_nixbld1 30278 0.0s └───ninja -j14 +_nixbld1 30279 0.0s ├───/nix/store/p7rag2cw99d7alp6749rjqp71qc0mnzl-python3-3.12.11/bin/python3.12 /nix/store/8k5fancbc5fjmxq6izn0z4inwnmpj09y-mes +_nixbld1 30286 0.0s │ └───/nix/store/z59zm01pjwzil2qkvv0s4ibk54risy9a-determinate-nix-3.14.0/bin/nix config show --json +_nixbld1 30280 0.0s ├───/nix/store/p7rag2cw99d7alp6749rjqp71qc0mnzl-python3-3.12.11/bin/python3.12 /nix/store/8k5fancbc5fjmxq6izn0z4inwnmpj09y-mes +_nixbld1 30287 0.0s │ └───/nix/store/z59zm01pjwzil2qkvv0s4ibk54risy9a-determinate-nix-3.14.0/bin/nix __dump-language +_nixbld1 30281 0.0s ├───/nix/store/p7rag2cw99d7alp6749rjqp71qc0mnzl-python3-3.12.11/bin/python3.12 /nix/store/8k5fancbc5fjmxq6izn0z4inwnmpj09y-mes +_nixbld1 30288 0.0s │ └───/nix/store/z59zm01pjwzil2qkvv0s4ibk54risy9a-determinate-nix-3.14.0/bin/nix __dump-cli +_nixbld1 30282 0.0s ├───/nix/store/p7rag2cw99d7alp6749rjqp71qc0mnzl-python3-3.12.11/bin/python3.12 /nix/store/8k5fancbc5fjmxq6izn0z4inwnmpj09y-mes +_nixbld1 30284 0.0s │ └───/nix/store/z59zm01pjwzil2qkvv0s4ibk54risy9a-determinate-nix-3.14.0/bin/nix __dump-xp-features +_nixbld1 30283 0.0s └───/nix/store/p7rag2cw99d7alp6749rjqp71qc0mnzl-python3-3.12.11/bin/python3.12 /nix/store/8k5fancbc5fjmxq6izn0z4inwnmpj09y-mes +_nixbld1 30285 0.0s └───/nix/store/bs1pvy8margy5sj0jwahchxbjnqzi14i-bash-5.2p37/bin/bash -euo pipefail -c if type -p build-release-notes > /de +_nixbld1 30289 0.0s └───changelog-d ../source/release-notes/../../rl-next +``` + +For the integrators out there, it also has a `--json` flag with all the raw data. + +PRs: +* [DeterminateSystems/nix-src#282](https://github.com/DeterminateSystems/nix-src/pull/282) +* [DeterminateSystems/nix-src#287](https://github.com/DeterminateSystems/nix-src/pull/287) + + +## Nix `build`, `profile`, and `flake check` commands tell you what output failed + +These commands now tell you exactly what flake outputs failed to build. +Previously, the error would indicate only what derivation failed to build -- but not which output. + +Now, `nix build` and `nix profile` commands provide the specific output: + +``` +$ nix build .#oneFakeHash .#badSystem --keep-going +❌ git+file:///Users/grahamc/src/github.com/DeterminateSystems/samples#oneFakeHash +error: hash mismatch in fixed-output derivation '/nix/store/58pp1y74j4f5zxfq50xncv2wvnxf7w3y-one-fake-hash.drv': + specified: sha256-AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= + got: sha256-i7j83d71sibS/ssSjLJ5PMKmbhjAM+BHW0aElvkgEwY= +❌ git+file:///Users/grahamc/src/github.com/DeterminateSystems/samples#badSystem +error: Cannot build '/nix/store/5vsaxi730yl2icngkyvn8wiflik5wfmq-bad-system.drv'. + Reason: required system or feature not available + Required system: 'bogus' with features {} + Current system: 'aarch64-darwin' with features {apple-virt, benchmark, big-parallel, nixos-test} +``` + +And in a great change for CI, `nix flake check` users get improved summaries too: + +``` +$ nix flake check +❓ checks.aarch64-darwin.twoFakeHashes (cancelled) +❓ checks.aarch64-darwin.badSystemNested (cancelled) +❓ checks.aarch64-darwin.oneFakeHash (cancelled) +❓ checks.aarch64-darwin.failure (cancelled) +❓ checks.aarch64-darwin.badSystem (cancelled) +❓ checks.aarch64-darwin.weirdHash (cancelled) +❓ checks.aarch64-darwin.all (cancelled) +❓ checks.aarch64-darwin.fakeHashes (cancelled) +❓ checks.aarch64-darwin.incorrectHashes (cancelled) +❓ checks.aarch64-darwin.badFeaturesNested (cancelled) +❓ checks.aarch64-darwin.failureNested (cancelled) +❌ checks.aarch64-darwin.badFeatures +error: Cannot build '/nix/store/sc1cyhrpsm9yjx55cl2zzyr5lypwigi6-bad-feature.drv'. + Reason: required system or feature not available + Required system: 'aarch64-darwin' with features {bogus} + Current system: 'aarch64-darwin' with features {apple-virt, benchmark, big-parallel, nixos-test} +``` + +PRs: +* [DeterminateSystems/nix-src#281](https://github.com/DeterminateSystems/nix-src/pull/281) +* [DeterminateSystems/nix-src#285](https://github.com/DeterminateSystems/nix-src/pull/285) + + +## More seamless upgrades from Nix 2.18 and Nix 2.19 + +We've heard from some users who are trying to upgrade from Nix 2.18. + +These users are primarily experiencing problems caused by Nix 2.20 switching from `git-archive` to `libgit2` for fetching repositories. +This change caused some `git-archive` filters to stop executing, like autocrlf. +Not running those filters is an improvement, and running those filters *can cause* instability in source hashes. +However, this switch *did* cause previously valid hashes to become invalid. + +Determinate Nix now retries fetching an old archive with `git-archive` as a fallback when libgit2 fails to provide the correct source. + +Further, to support a progressive migration Determinate Nix has a new option: `nix-219-compat`. +Set `nix-219-compat=true` to cause Nix to author new flake.nix files with a `git-archive` based source hash. + +Finally, a user identified `builtins.path` changed since 2.18 and stopped propagating references. +We have corrected this regression. + +PRs: +* [DeterminateSystems/nix-src#283](https://github.com/DeterminateSystems/nix-src/pull/283) +* [DeterminateSystems/nix-src#278](https://github.com/DeterminateSystems/nix-src/pull/278) + +## Flake registry mirroring + +Determinate Nix now includes a fallback copy of the Nix Registry. +This change builds on top of v3.13.2, where we changed from the upstream Nix registry to a mirrored copy hosted by `install.determinate.systems`. + +Combined, these changes increase the reliability of Nix in the face of network outages. + +> [!NOTE] +> Flake registry URLs for `flake.nix` inputs is deprecated. +> The flake registry should only be used for interactive use. +> See: https://github.com/DeterminateSystems/nix-src/issues/37 + +PR: [DeterminateSystems/nix-src#273](https://github.com/DeterminateSystems/nix-src/pull/273) + +## Flake registry resolution CLI + +We added the new command `nix registry resolve` to help debug issues with Flake registries. +This command looks up a flake registry input name and returns the flakeref it resolves to. + +For example, looking up Nixpkgs: + +``` +$ nix registry resolve nixpkgs +github:NixOS/nixpkgs/nixpkgs-unstable +``` + +Or looking up the 25.11 branch of Nixpkgs: +``` +$ nix registry resolve nixpkgs/release-25.11 +github:NixOS/nixpkgs/release-25.11 +``` + +> [!NOTE] +> Flake registry URLs for `flake.nix` inputs is deprecated. +> The flake registry should only be used for interactive use. +> See: https://github.com/DeterminateSystems/nix-src/issues/37 + +PR: [DeterminateSystems/nix-src#273](https://github.com/DeterminateSystems/nix-src/pull/273) + +## Improved Docker image packaging + +Thanks to `employee-64c7dcd530593118dcccc3fb`, the OCI / Docker images built by the Determinate Nix flake.nix can be further customized. + +Users can specify their own base image by specifying `fromImage`. + +Additionally, users can specify additional directories to include at the beginning or end of the PATH variable with `extraPrePaths` and `extraPostPaths`. + +PRs: +* [DeterminateSystems/nix-src#277](https://github.com/DeterminateSystems/nix-src/pull/277) +* [DeterminateSystems/nix-src#280](https://github.com/DeterminateSystems/nix-src/pull/280) + +## Bug fixes + +* Corrected an error with parallel evaluation which ([DeterminateSystems/nix-src#286](https://github.com/DeterminateSystems/nix-src/pull/286)) +* Fixed compatibility with updated Nixpkgs versions. Thank you SandaruKasa! ([DeterminateSystems/nix-src#284](https://github.com/DeterminateSystems/nix-src/pull/284)) + +**Full Changelog**: [v3.13.2...v3.14.0](https://github.com/DeterminateSystems/nix-src/compare/v3.13.2...v3.14.0) diff --git a/doc/manual/source/release-notes-determinate/v3.15.0.md b/doc/manual/source/release-notes-determinate/v3.15.0.md new file mode 100644 index 00000000000..fb568374c3f --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.15.0.md @@ -0,0 +1,28 @@ +# Release 3.15.0 (2025-12-19) + +* Based on [upstream Nix 2.33.0](../release-notes/rl-2.33.md). + +## `fetchTree` improvement + +`builtins.fetchTree` now implicitly treats the fetched tree as "final" when a `narHash` is supplied, meaning that it will not return attributes like `lastModified` or `revCount` unless they were specified by the caller. This makes it possible to substitute the tree from a binary cache, which is often more efficient. Furthermore, for Git inputs, it allows Nix to perform a shallow fetch, which is much faster. + +This is primarily useful for users of `flake-compat`, since it uses `builtins.fetchTree` internally. + +PR: [DeterminateSystems/nix-src#297](https://github.com/DeterminateSystems/nix-src/pull/297) + +## New builtin function `builtins.filterAttrs` + +Nixpkgs heavily relies on this function to select attributes from an attribute set: + +```nix +filterAttrs = pred: set: removeAttrs set (filter (name: !pred name set.${name}) (attrNames set)); +``` + +Determinate Nix now has this function built-in, which makes it much faster. + +PR: [DeterminateSystems/nix-src#291](https://github.com/DeterminateSystems/nix-src/pull/291) + +## New Contributors +* @not-ronjinger made their first contribution in [DeterminateSystems/nix-src#291](https://github.com/DeterminateSystems/nix-src/pull/291) + +**Full Changelog**: [v3.14.0...v3.15.0](https://github.com/DeterminateSystems/nix-src/compare/v3.14.0...v3.15.0) diff --git a/doc/manual/source/release-notes-determinate/v3.15.1.md b/doc/manual/source/release-notes-determinate/v3.15.1.md new file mode 100644 index 00000000000..9243962cf4b --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.15.1.md @@ -0,0 +1,15 @@ +# Release 3.15.1 (2025-12-24) + +* Based on [upstream Nix 2.33.0](../release-notes/rl-2.33.md). + +## What's Changed +Users reported the v3.15.0 tarball could not be fetched in a fixed-output derivation due to current stdenv paths present in the documentation. This release eliminated those paths. + +PR: [DeterminateSystems/nix-src#306](https://github.com/DeterminateSystems/nix-src/pull/306) + +Additionally, this change re-enables CodeRabbit's code review on our changes. CodeRabit was disabled by the upstream project, and we inadvertently included that change. + +PR: [DeterminateSystems/nix-src#305](https://github.com/DeterminateSystems/nix-src/pull/305) + + +**Full Changelog**: [v3.15.0...v3.15.1](https://github.com/DeterminateSystems/nix-src/compare/v3.15.0...v3.15.1) diff --git a/doc/manual/source/release-notes-determinate/v3.8.6.md b/doc/manual/source/release-notes-determinate/v3.8.6.md new file mode 100644 index 00000000000..8f917f2362f --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.8.6.md @@ -0,0 +1,14 @@ +# Release 3.8.6 (2025-08-19) + +* Based on [upstream Nix 2.30.2](../release-notes/rl-2.30.md). + +## What's Changed +* Auto update release notes by @grahamc in [DeterminateSystems/nix-src#170](https://github.com/DeterminateSystems/nix-src/pull/170) +* Use WAL mode for SQLite cache databases (2nd attempt) by @edolstra in [DeterminateSystems/nix-src#167](https://github.com/DeterminateSystems/nix-src/pull/167) +* Enable parallel marking in boehm-gc by @edolstra in [DeterminateSystems/nix-src#168](https://github.com/DeterminateSystems/nix-src/pull/168) +* BasicClientConnection::queryPathInfo(): Don't throw exception for invalid paths by @edolstra in [DeterminateSystems/nix-src#172](https://github.com/DeterminateSystems/nix-src/pull/172) +* Fix queryPathInfo() negative caching by @edolstra in [DeterminateSystems/nix-src#173](https://github.com/DeterminateSystems/nix-src/pull/173) +* forceDerivation(): Wait for async path write after forcing value by @edolstra in [DeterminateSystems/nix-src#176](https://github.com/DeterminateSystems/nix-src/pull/176) + + +**Full Changelog**: [v3.8.5...v3.8.6](https://github.com/DeterminateSystems/nix-src/compare/v3.8.5...v3.8.6) diff --git a/doc/manual/source/release-notes-determinate/v3.9.0.md b/doc/manual/source/release-notes-determinate/v3.9.0.md new file mode 100644 index 00000000000..66deb69b619 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.9.0.md @@ -0,0 +1,45 @@ +# Release 3.9.0 (2025-08-26) + +* Based on [upstream Nix 2.30.2](../release-notes/rl-2.30.md). + +## What's Changed + +### Build-time flake inputs + +Some of our users have hundreds or thousands of flake inputs. +In those cases, it is painfully slow for Nix to fetch all the inputs during evaluation of the flake. + +Determinate Nix has an experimental feature for deferring the fetching to build time of the dependent derivations. + +This is currently in developer preview. +If you would like to try it, add the experimental feature to your `/etc/nix/nix.custom.conf`: + +```ini +extra-experimental-features = build-time-fetch-tree +``` + +Then, mark an input to be fetched at build time: + +```nix +inputs.example = { + type = "github"; + owner = "DeterminateSystems"; + repo = "example"; + flake = false; # <-- currently required + buildTime = true; +}; +``` + +Let us know what you think! + +PR: [DeterminateSystems/nix-src#49](https://github.com/DeterminateSystems/nix-src/pull/49) + +### Corrected inconsistent behavior of `nix flake check` + +Users reported that `nix flake check` would not consistently validate the entire flake. + +We've fixed this issue and improved our testing around `nix flake check`. + +PR: [DeterminateSystems/nix-src#182](https://github.com/DeterminateSystems/nix-src/pull/182) + +**Full Changelog**: [v3.8.6...v3.9.0](https://github.com/DeterminateSystems/nix-src/compare/v3.8.6...v3.9.0) diff --git a/doc/manual/source/release-notes-determinate/v3.9.1.md b/doc/manual/source/release-notes-determinate/v3.9.1.md new file mode 100644 index 00000000000..38d17199c2c --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.9.1.md @@ -0,0 +1,20 @@ +# Release 3.9.1 (2025-08-28) + +- Based on [upstream Nix 2.30.2](../release-notes/rl-2.30.md). + +### A useful `nix flake init` template default + +Nix's default flake template is [extremely bare bones](https://github.com/NixOS/templates/blob/ad0e221dda33c4b564fad976281130ce34a20cb9/trivial/flake.nix), and not a useful starting point. + +Deteminate Nix now uses [a more fleshed out default template](https://github.com/DeterminateSystems/flake-templates/blob/8af99b99627da41f16897f60eb226db30c775e76/default/flake.nix), including targeting multiple systems. + +PR: [DeterminateSystems/nix-src#180](https://github.com/DeterminateSystems/nix-src/pull/180) + +### Build cancellation is repaired on macOS + +A recent macOS update changed how signals are handled by Nix and broke using Ctrl-C to stop a build. +Determinate Nix on macOS correctly handles these signals and stops the build. + +PR: [DeterminateSystems/nix-src#184](https://github.com/DeterminateSystems/nix-src/pull/184) + +**Full Changelog**: [v3.9.0...v3.9.1](https://github.com/DeterminateSystems/nix-src/compare/v3.9.0...v3.9.1) diff --git a/doc/manual/source/release-notes/rl-0.12.md b/doc/manual/source/release-notes/rl-0.12.md index 3a4aba07d69..3541b6487e7 100644 --- a/doc/manual/source/release-notes/rl-0.12.md +++ b/doc/manual/source/release-notes/rl-0.12.md @@ -80,7 +80,7 @@ ... the following paths will be downloaded/copied (30.02 MiB): /nix/store/4m8pvgy2dcjgppf5b4cj5l6wyshjhalj-samba-3.2.4 - /nix/store/7h1kwcj29ip8vk26rhmx6bfjraxp0g4l-libunwind-0.98.6 + /nix/store/spc1m987vlibchdx369qwa391s738s7l-libunwind-0.98.6 ... - Language features: diff --git a/doc/manual/source/release-notes/rl-0.8.md b/doc/manual/source/release-notes/rl-0.8.md index 5ba6e0e7217..2bc6352c354 100644 --- a/doc/manual/source/release-notes/rl-0.8.md +++ b/doc/manual/source/release-notes/rl-0.8.md @@ -63,7 +63,7 @@ Nix 0.8 has the following improvements: can query all paths that directly or indirectly use a certain Glibc: $ nix-store -q --referrers-closure \ - /nix/store/8lz9yc6zgmc0vlqmn2ipcpkjlmbi51vv-glibc-2.3.4 + /nix/store/1a6mdrjz4wn7b9sfmcw5ggbk1mi281mh-glibc-2.3.4 - The concept of fixed-output derivations has been formalised. Previously, functions such as `fetchurl` in Nixpkgs used a hack diff --git a/doc/manual/source/release-notes/rl-2.0.md b/doc/manual/source/release-notes/rl-2.0.md index 25cc5e0a5f3..181940f616f 100644 --- a/doc/manual/source/release-notes/rl-2.0.md +++ b/doc/manual/source/release-notes/rl-2.0.md @@ -66,7 +66,7 @@ This release has the following new features: nix copy --to ssh://machine nixpkgs.hello - nix copy --to ssh://machine /nix/store/0i2jd68mp5g6h2sa5k9c85rb80sn8hi9-hello-2.10 + nix copy --to ssh://machine /nix/store/qbhyj3blxpw2i6pb7c6grc9185nbnpvy-hello-2.10 nix copy --to ssh://machine '(with import {}; hello)' @@ -187,7 +187,7 @@ This release has the following new features: former is primarily useful in conjunction with remote stores, e.g. - nix ls-store --store https://cache.nixos.org/ -lR /nix/store/0i2jd68mp5g6h2sa5k9c85rb80sn8hi9-hello-2.10 + nix ls-store --store https://cache.nixos.org/ -lR /nix/store/qbhyj3blxpw2i6pb7c6grc9185nbnpvy-hello-2.10 lists the contents of path in a binary cache. diff --git a/doc/manual/source/release-notes/rl-2.13.md b/doc/manual/source/release-notes/rl-2.13.md index 168708113ea..6976f91501b 100644 --- a/doc/manual/source/release-notes/rl-2.13.md +++ b/doc/manual/source/release-notes/rl-2.13.md @@ -25,7 +25,7 @@ * Allow explicitly selecting outputs in a store derivation installable, just like we can do with other sorts of installables. For example, ```shell-session - # nix build /nix/store/gzaflydcr6sb3567hap9q6srzx8ggdgg-glibc-2.33-78.drv^dev + # nix build /nix/store/fpq78s2h8ffh66v2iy0q1838mhff06y8-glibc-2.33-78.drv^dev ``` now works just as ```shell-session diff --git a/doc/manual/source/release-notes/rl-2.15.md b/doc/manual/source/release-notes/rl-2.15.md index e7e52631ba4..1d30c70a4c0 100644 --- a/doc/manual/source/release-notes/rl-2.15.md +++ b/doc/manual/source/release-notes/rl-2.15.md @@ -18,13 +18,13 @@ For example, ```shell-session - $ nix path-info /nix/store/gzaflydcr6sb3567hap9q6srzx8ggdgg-glibc-2.33-78.drv + $ nix path-info /nix/store/fpq78s2h8ffh66v2iy0q1838mhff06y8-glibc-2.33-78.drv ``` now gives info about the derivation itself, while ```shell-session - $ nix path-info /nix/store/gzaflydcr6sb3567hap9q6srzx8ggdgg-glibc-2.33-78.drv^* + $ nix path-info /nix/store/fpq78s2h8ffh66v2iy0q1838mhff06y8-glibc-2.33-78.drv^* ``` provides information about each of its outputs. diff --git a/doc/manual/source/release-notes/rl-2.19.md b/doc/manual/source/release-notes/rl-2.19.md index 04f8c9c28d2..0596ef90961 100644 --- a/doc/manual/source/release-notes/rl-2.19.md +++ b/doc/manual/source/release-notes/rl-2.19.md @@ -45,7 +45,7 @@ ```json5 [ { - "path": "/nix/store/8fv91097mbh5049i9rglc73dx6kjg3qk-bash-5.2-p15", + "path": "/nix/store/fvqsvk65d38p8qqir371ii0hyqxvjcw6-bash-5.2-p15", "valid": true, // ... }, @@ -60,7 +60,7 @@ ```json5 { - "/nix/store/8fv91097mbh5049i9rglc73dx6kjg3qk-bash-5.2-p15": { + "/nix/store/fvqsvk65d38p8qqir371ii0hyqxvjcw6-bash-5.2-p15": { // ... }, "/nix/store/wffw7l0alvs3iw94cbgi1gmmbmw99sqb-home-manager-path": null, @@ -69,7 +69,7 @@ This makes it match `nix derivation show`, which also maps store paths to information. -- When Nix is installed using the [binary installer](@docroot@/installation/installing-binary.md), in supported shells (Bash, Zsh, Fish) +- When Nix is installed using the binary installer, in supported shells (Bash, Zsh, Fish) [`XDG_DATA_DIRS`](https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html#variables) is now populated with the path to the `/share` subdirectory of the current profile. This means that command completion scripts, `.desktop` files, and similar artifacts installed via [`nix-env`](@docroot@/command-ref/nix-env.md) or [`nix profile`](@docroot@/command-ref/new-cli/nix3-profile.md) (experimental) can be found by any program that follows the [XDG Base Directory Specification](https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html). diff --git a/doc/manual/source/release-notes/rl-2.20.md b/doc/manual/source/release-notes/rl-2.20.md index eb724f600aa..d54646379c8 100644 --- a/doc/manual/source/release-notes/rl-2.20.md +++ b/doc/manual/source/release-notes/rl-2.20.md @@ -182,7 +182,7 @@ «partially applied primop map» nix-repl> builtins.trace lib.id "my-value" - trace: «lambda id @ /nix/store/8rrzq23h2zq7sv5l2vhw44kls5w0f654-source/lib/trivial.nix:26:5» + trace: «lambda id @ /nix/store/kgr5lnaiiv08wb7k324yv1i1npjmrvjc-source/lib/trivial.nix:26:5» "my-value" ``` diff --git a/doc/manual/source/release-notes/rl-2.24.md b/doc/manual/source/release-notes/rl-2.24.md index e9b46bb22b7..f608fb54f7d 100644 --- a/doc/manual/source/release-notes/rl-2.24.md +++ b/doc/manual/source/release-notes/rl-2.24.md @@ -268,6 +268,21 @@ be configured using the `warn-large-path-threshold` setting, e.g. `--warn-large-path-threshold 100M`. +- Wrap filesystem exceptions more correctly [#11378](https://github.com/NixOS/nix/pull/11378) + + With the switch to `std::filesystem` in different places, Nix started to throw `std::filesystem::filesystem_error` in many places instead of its own exceptions. + + This led to no longer generating error traces, for example when listing a non-existing directory. + + This version catches these types of exception correctly and wraps them into Nix's own exeception type. + + Author: [**@Mic92**](https://github.com/Mic92) + +- `` uses TLS verification [#11585](https://github.com/NixOS/nix/pull/11585) + + Previously `` did not do TLS verification. This was because the Nix sandbox in the past did not have access to TLS certificates, and Nix checks the hash of the fetched file anyway. However, this can expose authentication data from `netrc` and URLs to man-in-the-middle attackers. In addition, Nix now in some cases (such as when using impure derivations) does *not* check the hash. Therefore we have now enabled TLS verification. This means that downloads by `` will now fail if you're fetching from a HTTPS server that does not have a valid certificate. + + `` is also known as the builtin derivation builder `builtin:fetchurl`. It's not to be confused with the evaluation-time function `builtins.fetchurl`, which was not affected by this issue. ## Contributors diff --git a/doc/manual/source/store/store-path.md b/doc/manual/source/store/store-path.md index 4061f3653f6..08b024e4a84 100644 --- a/doc/manual/source/store/store-path.md +++ b/doc/manual/source/store/store-path.md @@ -2,7 +2,7 @@ > **Example** > -> `/nix/store/a040m110amc4h71lds2jmr8qrkj2jhxd-git-2.38.1` +> `/nix/store/jf6gn2dzna4nmsfbdxsd7kwhsk6gnnlr-git-2.38.1` > > A rendered store path @@ -22,7 +22,7 @@ Store paths are pairs of > **Example** > -> - Digest: `b6gvzjyb2pg0kjfwrjmg1vfhh54ad73z` +> - Digest: `q06x3jll2yfzckz2bzqak089p43ixkkq` > - Name: `firefox-33.1` To make store objects accessible to operating system processes, stores have to expose store objects through the file system. @@ -38,7 +38,7 @@ A store path is rendered to a file system path as the concatenation of > **Example** > > ``` -> /nix/store/b6gvzjyb2pg0kjfwrjmg1vfhh54ad73z-firefox-33.1 +> /nix/store/q06x3jll2yfzckz2bzqak089p43ixkkq-firefox-33.1 > |--------| |------------------------------| |----------| > store directory digest name > ``` diff --git a/doc/manual/source/store/types/index.md.in b/doc/manual/source/store/types/index.md.in index a35161ce8fa..b211ac98fe3 100644 --- a/doc/manual/source/store/types/index.md.in +++ b/doc/manual/source/store/types/index.md.in @@ -8,7 +8,7 @@ Stores are specified using a URL-like syntax. For example, the command ```console # nix path-info --store https://cache.nixos.org/ --json \ - /nix/store/a7gvj343m05j2s32xcnwr35v31ynlypr-coreutils-9.1 + /nix/store/1542dip9i7k4f24y6hqgd04hmvid9hr5-coreutils-9.1 ``` fetches information about a store path in the HTTP binary cache diff --git a/docker.nix b/docker.nix index 32205224b73..72c13663488 100644 --- a/docker.nix +++ b/docker.nix @@ -8,6 +8,7 @@ # Image configuration name ? "nix", tag ? "latest", + fromImage ? null, bundleNixpkgs ? true, channelName ? "nixpkgs", channelURL ? "https://channels.nixos.org/nixpkgs-unstable", @@ -27,6 +28,8 @@ "org.opencontainers.image.description" = "Nix container image"; }, Cmd ? [ (lib.getExe bashInteractive) ], + extraPrePaths ? [ ], + extraPostPaths ? [ ], # Default Packages nix ? pkgs.nix, bashInteractive ? pkgs.bashInteractive, @@ -336,7 +339,7 @@ let globalFlakeRegistryPath="$nixCacheDir/flake-registry.json" ln -s ${flake-registry-path} $out$globalFlakeRegistryPath mkdir -p $out/nix/var/nix/gcroots/auto - rootName=$(${lib.getExe' nix "nix"} --extra-experimental-features nix-command hash file --type sha1 --base32 <(echo -n $globalFlakeRegistryPath)) + rootName=$(${lib.getExe' nix "nix"} hash file --type sha1 --base32 <(echo -n $globalFlakeRegistryPath)) ln -s $globalFlakeRegistryPath $out/nix/var/nix/gcroots/auto/$rootName '') ); @@ -352,6 +355,7 @@ dockerTools.buildLayeredImageWithNixDb { gid uname gname + fromImage ; contents = [ baseSystem ]; @@ -373,11 +377,15 @@ dockerTools.buildLayeredImageWithNixDb { Env = [ "USER=${uname}" "PATH=${ - lib.concatStringsSep ":" [ - "${userHome}/.nix-profile/bin" - "/nix/var/nix/profiles/default/bin" - "/nix/var/nix/profiles/default/sbin" - ] + lib.concatStringsSep ":" ( + extraPrePaths + ++ [ + "${userHome}/.nix-profile/bin" + "/nix/var/nix/profiles/default/bin" + "/nix/var/nix/profiles/default/sbin" + ] + ++ extraPostPaths + ) }" "MANPATH=${ lib.concatStringsSep ":" [ diff --git a/flake.lock b/flake.lock index 19f7b0c1c21..f56706ec761 100644 --- a/flake.lock +++ b/flake.lock @@ -3,11 +3,11 @@ "flake-compat": { "flake": false, "locked": { - "lastModified": 1733328505, - "narHash": "sha256-NeCCThCEP3eCl2l/+27kNNK7QrwZB1IJCrXfrbv5oqU=", + "lastModified": 1696426674, + "narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=", "owner": "edolstra", "repo": "flake-compat", - "rev": "ff81ac966bb2cae68946d5ed5fc4994f96d0ffec", + "rev": "0f9255e01c2351cc7d116c072cb317785dd33b33", "type": "github" }, "original": { @@ -23,55 +23,51 @@ ] }, "locked": { - "lastModified": 1733312601, - "narHash": "sha256-4pDvzqnegAfRkPwO3wmwBhVi/Sye1mzps0zHWYnP88c=", - "owner": "hercules-ci", - "repo": "flake-parts", - "rev": "205b12d8b7cd4802fbcb8e8ef6a0f1408781a4f9", - "type": "github" + "lastModified": 1748821116, + "narHash": "sha256-F82+gS044J1APL0n4hH50GYdPRv/5JWm34oCJYmVKdE=", + "rev": "49f0870db23e8c1ca0b5259734a02cd9e1e371a1", + "revCount": 377, + "type": "tarball", + "url": "https://api.flakehub.com/f/pinned/hercules-ci/flake-parts/0.1.377%2Brev-49f0870db23e8c1ca0b5259734a02cd9e1e371a1/01972f28-554a-73f8-91f4-d488cc502f08/source.tar.gz" }, "original": { - "owner": "hercules-ci", - "repo": "flake-parts", - "type": "github" + "type": "tarball", + "url": "https://flakehub.com/f/hercules-ci/flake-parts/0.1" } }, "git-hooks-nix": { "inputs": { - "flake-compat": [], + "flake-compat": "flake-compat", "gitignore": [], "nixpkgs": [ "nixpkgs" - ], - "nixpkgs-stable": [ - "nixpkgs" ] }, "locked": { - "lastModified": 1734279981, - "narHash": "sha256-NdaCraHPp8iYMWzdXAt5Nv6sA3MUzlCiGiR586TCwo0=", - "owner": "cachix", - "repo": "git-hooks.nix", - "rev": "aa9f40c906904ebd83da78e7f328cd8aeaeae785", - "type": "github" + "lastModified": 1747372754, + "narHash": "sha256-2Y53NGIX2vxfie1rOW0Qb86vjRZ7ngizoo+bnXU9D9k=", + "rev": "80479b6ec16fefd9c1db3ea13aeb038c60530f46", + "revCount": 1026, + "type": "tarball", + "url": "https://api.flakehub.com/f/pinned/cachix/git-hooks.nix/0.1.1026%2Brev-80479b6ec16fefd9c1db3ea13aeb038c60530f46/0196d79a-1b35-7b8e-a021-c894fb62163d/source.tar.gz" }, "original": { - "owner": "cachix", - "repo": "git-hooks.nix", - "type": "github" + "type": "tarball", + "url": "https://flakehub.com/f/cachix/git-hooks.nix/0.1.941" } }, "nixpkgs": { "locked": { - "lastModified": 1763948260, - "narHash": "sha256-zZk7fn2ARAqmLwaYTpxBJmj81KIdz11NiWt7ydHHD/M=", - "rev": "1c8ba8d3f7634acac4a2094eef7c32ad9106532c", + "lastModified": 1761597516, + "narHash": "sha256-wxX7u6D2rpkJLWkZ2E932SIvDJW8+ON/0Yy8+a5vsDU=", + "rev": "daf6dc47aa4b44791372d6139ab7b25269184d55", + "revCount": 811874, "type": "tarball", - "url": "https://releases.nixos.org/nixos/25.05/nixos-25.05.813095.1c8ba8d3f763/nixexprs.tar.xz" + "url": "https://api.flakehub.com/f/pinned/NixOS/nixpkgs/0.2505.811874%2Brev-daf6dc47aa4b44791372d6139ab7b25269184d55/019a3494-3498-707e-9086-1fb81badc7fe/source.tar.gz" }, "original": { "type": "tarball", - "url": "https://channels.nixos.org/nixos-25.05/nixexprs.tar.xz" + "url": "https://flakehub.com/f/NixOS/nixpkgs/0.2505" } }, "nixpkgs-23-11": { @@ -108,7 +104,6 @@ }, "root": { "inputs": { - "flake-compat": "flake-compat", "flake-parts": "flake-parts", "git-hooks-nix": "git-hooks-nix", "nixpkgs": "nixpkgs", diff --git a/flake.nix b/flake.nix index d35363ab2e5..0d04d3b1825 100644 --- a/flake.nix +++ b/flake.nix @@ -1,24 +1,18 @@ { description = "The purely functional package manager"; - inputs.nixpkgs.url = "https://channels.nixos.org/nixos-25.05/nixexprs.tar.xz"; + inputs.nixpkgs.url = "https://flakehub.com/f/NixOS/nixpkgs/0.2505"; inputs.nixpkgs-regression.url = "github:NixOS/nixpkgs/215d4d0fd80ca5163643b03a33fde804a29cc1e2"; inputs.nixpkgs-23-11.url = "github:NixOS/nixpkgs/a62e6edd6d5e1fa0329b8653c801147986f8d446"; - inputs.flake-compat = { - url = "github:edolstra/flake-compat"; - flake = false; - }; # dev tooling - inputs.flake-parts.url = "github:hercules-ci/flake-parts"; - inputs.git-hooks-nix.url = "github:cachix/git-hooks.nix"; + inputs.flake-parts.url = "https://flakehub.com/f/hercules-ci/flake-parts/0.1"; + inputs.git-hooks-nix.url = "https://flakehub.com/f/cachix/git-hooks.nix/0.1.941"; # work around https://github.com/NixOS/nix/issues/7730 inputs.flake-parts.inputs.nixpkgs-lib.follows = "nixpkgs"; inputs.git-hooks-nix.inputs.nixpkgs.follows = "nixpkgs"; - inputs.git-hooks-nix.inputs.nixpkgs-stable.follows = "nixpkgs"; # work around 7730 and https://github.com/NixOS/nix/issues/7807 - inputs.git-hooks-nix.inputs.flake-compat.follows = ""; inputs.git-hooks-nix.inputs.gitignore.follows = ""; outputs = @@ -34,26 +28,24 @@ officialRelease = true; - linux32BitSystems = [ "i686-linux" ]; + linux32BitSystems = [ ]; linux64BitSystems = [ "x86_64-linux" "aarch64-linux" ]; linuxSystems = linux32BitSystems ++ linux64BitSystems; darwinSystems = [ - "x86_64-darwin" "aarch64-darwin" ]; systems = linuxSystems ++ darwinSystems; crossSystems = [ - "armv6l-unknown-linux-gnueabihf" - "armv7l-unknown-linux-gnueabihf" - "riscv64-unknown-linux-gnu" + #"armv6l-unknown-linux-gnueabihf" + #"armv7l-unknown-linux-gnueabihf" + #"riscv64-unknown-linux-gnu" # Disabled because of https://github.com/NixOS/nixpkgs/issues/344423 # "x86_64-unknown-netbsd" - "x86_64-unknown-freebsd" - "x86_64-w64-mingw32" + #"x86_64-unknown-freebsd" ]; stdenvs = [ @@ -372,6 +364,40 @@ nix-manual-manpages-only = nixpkgsFor.${system}.native.nixComponents2.nix-manual-manpages-only; nix-internal-api-docs = nixpkgsFor.${system}.native.nixComponents2.nix-internal-api-docs; nix-external-api-docs = nixpkgsFor.${system}.native.nixComponents2.nix-external-api-docs; + + fallbackPathsNix = + let + pkgs = nixpkgsFor.${system}.native; + + closures = forAllSystems (system: self.packages.${system}.default.outPath); + + closures_json = + pkgs.runCommand "versions.json" + { + buildInputs = [ pkgs.jq ]; + passAsFile = [ "json" ]; + json = builtins.toJSON closures; + } + '' + cat "$jsonPath" | jq . > $out + ''; + + closures_nix = + pkgs.runCommand "versions.nix" + { + buildInputs = [ pkgs.jq ]; + passAsFile = [ "template" ]; + jsonPath = closures_json; + template = '' + builtins.fromJSON('''@closures@''') + ''; + } + '' + export closures=$(cat "$jsonPath"); + substituteAll "$templatePath" "$out" + ''; + in + closures_nix; } # We need to flatten recursive attribute sets of derivations to pass `flake check`. // @@ -434,8 +460,6 @@ { # These attributes go right into `packages.`. "${pkgName}" = nixpkgsFor.${system}.native.nixComponents2.${pkgName}; - "${pkgName}-static" = nixpkgsFor.${system}.native.pkgsStatic.nixComponents2.${pkgName}; - "${pkgName}-llvm" = nixpkgsFor.${system}.native.pkgsLLVM.nixComponents2.${pkgName}; } // lib.optionalAttrs supportsCross ( flatMapAttrs (lib.genAttrs crossSystems (_: { })) ( @@ -512,32 +536,6 @@ } ) ) - // lib.optionalAttrs (!nixpkgsFor.${system}.native.stdenv.isDarwin) ( - prefixAttrs "static" ( - forAllStdenvs ( - stdenvName: - makeShell { - pkgs = nixpkgsFor.${system}.nativeForStdenv.${stdenvName}.pkgsStatic; - } - ) - ) - // prefixAttrs "llvm" ( - forAllStdenvs ( - stdenvName: - makeShell { - pkgs = nixpkgsFor.${system}.nativeForStdenv.${stdenvName}.pkgsLLVM; - } - ) - ) - // prefixAttrs "cross" ( - forAllCrossSystems ( - crossSystem: - makeShell { - pkgs = nixpkgsFor.${system}.cross.${crossSystem}; - } - ) - ) - ) // { native = self.devShells.${system}.native-stdenv; default = self.devShells.${system}.native; diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 414e6c570ab..7f7447b19e4 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -102,6 +102,7 @@ # Don't format vendored code ''^doc/manual/redirects\.js$'' ''^doc/manual/theme/highlight\.js$'' + ''^src/libfetchers/builtin-flake-registry\.json$'' ]; }; shellcheck = { diff --git a/maintainers/invalidate-store-paths.sh b/maintainers/invalidate-store-paths.sh new file mode 100755 index 00000000000..a075e262183 --- /dev/null +++ b/maintainers/invalidate-store-paths.sh @@ -0,0 +1,32 @@ +#!/usr/bin/env bash +set -euo pipefail +set -x + +git ls-files -z \ + | xargs -0 grep -o '[0123456789abcdfghijklmnpqrsvwxyz]\{32\}' 2> /dev/null \ + | rev \ + | cut -d: -f1 \ + | rev \ + | sort \ + | uniq \ + | while read -r oldhash; do + if ! curl --fail -I "https://cache.nixos.org/$oldhash.narinfo" > /dev/null 2>&1; then + continue + fi + + newhash=$( + nix eval --expr "builtins.toFile \"006c6ssvddri1sg34wnw65mzd05pcp3qliylxlhv49binldajba5\" \"$oldhash\"" \ + | cut -d- -f1 \ + | cut -d/ -f4 + ) + + msg=$(printf "bad: %s -> %s" "$oldhash" "$newhash") + echo "$msg" + git ls-files -z \ + | xargs -0 grep -a -l "$oldhash" 2> /dev/null \ + | while read -r file; do + [ -L "$file" ] && continue + perl -pi -e "s/$oldhash/$newhash/g" "$file" || true + done || true + git commit -am "$msg" + done diff --git a/maintainers/keys/158A6F530EA202E5F651611314FAEA63448E1DF9.asc b/maintainers/keys/158A6F530EA202E5F651611314FAEA63448E1DF9.asc new file mode 100644 index 00000000000..ea00c2c3d54 --- /dev/null +++ b/maintainers/keys/158A6F530EA202E5F651611314FAEA63448E1DF9.asc @@ -0,0 +1,110 @@ +-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQINBGPtMiwBEAC0sFZW2QW/OaDjKm5zGRpDvHXDsMIUtlHfoi5ce8pocC63W05o +FSXbUZjZ1VfYO8lT8DFANCzTkiXYaZx0cPRG2pVY4AOQZDNFt5XrAyvw496XCAIM +DTYGFLjCqgjPt9RUFEy4MyHPJTEpB0x3rXgT4ILNu9vsj9Q0vttps7SpbZ3Ldq5H +o/BBbLW77q/vNjpYzCbBIXF7ycUGpnNv9Go/WuiDnrBMcyxh+8kjjIHB5cxZSnjJ +DUv681+m83v+gLZQGX/jexQrrf5JpS0X9qEnhGLrNUDhtyv5ud3Je4EfamkjLVVC +RlNLofgflOCsl/tP80i+K7S1QdKhUALxuJ6H0prYUflGBDxDyC8XYuJ62TT0OUpa +vJvgwVlCq8/jq+ykYQXlbuBVOzi5wAuI4l3+HqreSQYPSiwe+6N590Zbafdv1fvN +WFtZKCTGMqfyaaAnppioH9/+NWkI2AQxaYVasYM/JEYvY9pJgA7alh51jHW4JglP +ErypKfBKPKJID0QENqYoa3bDDCihuNWhgQf9dxzPlj2ckd35Zb6w4DfuSmtjaa9D +o0jZVY1JbFuxBqP09+saVPrxLHgmPxjcdzPGQQtAqdO2vyJXNEGLFMoVEZPNaLo3 +QmcIJnT7oSck+4vGfOYtWUHXQynu/Tnwsv2XkA/uyw8HNe+RRMqv/apnzQARAQAB +tCdTZXJnZWkgWmltbWVybWFuIDxzZXJnZWlAemltbWVybWFuLmZvbz6JAlEEEwEK +ADsCGwEFCwkIBwIGFQoJCAsCBBYCAwECHgECF4AWIQQVim9TDqIC5fZRYRMU+upj +RI4d+QUCaUbsaAIZAQAKCRAU+upjRI4d+VdDD/492HRaJ/8V7R7VUzkafmb2Hb28 +SLf7oiB8Uq9I7SukiDEaIT1fUhquYWQ9KWpPRNR1TX6ApXnIeuJRMGFoDVIRnmnr +cKnYYXfqqc81VxIyKvaumB7KWbS7G4Nbor8AH1ouOOOMMS50OTJOWQA4A26inIuG +n+7L8MeS5aT+3uNKDoTKsidC47vnaxNMcke1taPfbfo7vn69PsRCM/g9/7TQYU8b +6xp+pM9Ao9nJneRk2YCpsGYRrWTpaik0DFKnfpPKJM/yunhtGLF2IYAp3l1mvHPK +nnzo92zjpQuZwazEIK+23V1vRT4IjM2BewbJPAzf2/UuxEjjgNQm0tOtH2JhFNeB +VM0BVrGxWrrwrsmv6lWghTtBc6zRWyHrj/rpjtVQNmeKYrHWJeXwVz1rqgGPmB2N +k0MZD1UjHHhEs1Cntn7yLmxTPztRJCtR+euRu81Uo2NAvrMJ4xsDjaM0LeLTnzjV +9AsPjD188dOFyz7VExZum4+XaaEJ41FIPLEqU3U0GAa6stEy0ylSlIN4x9aiXXVW +xfzHHchS5jK6QAjuZxN8t01GRactNylINRf7uoTECFZTtXNqfeuk0HQxBH0LuKVE +0PxJbcNI4mVWw1KgTJ8PUVC1IXP3sEpqPdJOYiRXgnpcS26fWOBu0aQ4mhxiaJhr +/zBfrkLEqp20TdNDZLkCDQRj7TM1ARAAt73xO24curnHTTgXkkVMMRzcMLx3Mb1a +2FuddxC5hzTpEpw01L91UBrXVJEg9K2KAwP5CtCLgPCqXr47Tm7krvHxWwBksgY/ +6aHRsoPQfCFUZHc0aiO+C4NCzR+aEeGKn66Oc1Hq9oUTpDgiBWhsuEPiyA1OSGF0 +4L0jeTCqfm68kWp4PIK9yuugkdDsoyj6TonuMsb3V5ctHLqop9KH+eHSkUTPo+Lk ++bxaeAOJ1UfbohgbRbrYKAfsaghhOMDH3R1w2pvtUJz+sDbuQsiPFTqbxsXDTFws +H4N/AQCYnnvOhqEek2sOEZ19bJXt5UrAr10mX4PGmAkWqE1JWBxpOKG3BXSGOTu1 +3dFhQfPMK+PmvUrs0kcWQr53K/aRUdKKhIfTcMfkqYTGPK5HclHph24WjXj3QFFA +SjksQTdm6486ZmLZK4CTbAFOPfTF/aWg8gu9v4ihdq6lqHNNXxv2xBAChcd59H7p +D5zy9z8SpwWR9V5JDmlF6HWIIau1c6lSsQq1xHvYM8EuPe03vJvor+2u/cn5zYF1 +5ZxAuPI2i5vtavg1s8ZGAAogJ9dVcP36LdJfL9quXWvmovkd//qHIepBB+l/zQio +ZRDZlIcfV3Xycaqsb5OqHGARHE0097koipMt5y/iXlqG4Ruue6Idb8bW96EKpaWj +kKy/iNfQfQMAEQEAAYkEcgQYAQoAJgIbAhYhBBWKb1MOogLl9lFhExT66mNEjh35 +BQJpRuz3BQkJHCDCAkDBdCAEGQEKAB0WIQRK3hK0WyJ4BicGpcmpsLVXymMjJQUC +Y+0zNQAKCRCpsLVXymMjJbdSD/9+f1FOOeGDAJI6Duo5fsWnf4xJJdtQtDbz6d2A +SeDapxeJ3zWfKBD0wu5sISEa0uiWsYSmLtsa2SqVAKHlEaMGRR+tkBMPQ+rvgI4c +62YjGTgm+IPd+NFIn+ixFU1hpinTh+KhUEoeOwWCvKs9nZfSG9vkienfiG0bBxo2 +zrvBzXA50x5hbUL+ghKu/AVfN9qZDwh30O4KZTwk4g4cM9SeaQa4YvHYIS3IEhDZ +hGybwrrqV9cs92ln4IJw9WCy9QReBNrdeFgC4+3ziUp1QsG3RvqrtuMttwBVC1Z3 +bj5QjLLOREhhodfvk98t9yVkragObb4rGrLo1mWuF0c4mJGvXwnrqhCMvzv4M+0T +Zdrmw6YpGkGOaOPghVuwoTtqSAkl+zFWIJS89jidvkYG3EqKAkgLKog/TQReCq13 +HWrF8cMck+Rf2K8k26q/RNZaA9ZUKjLExzz8lsWmd2C7rvkGLrlxnzxz0gGyNR3Q +KK74vcPhqeABt2GSkHtEXZFFA9IVVzwlRWK3e0S+mVQnZVjNL+cBPn3/hZHMLesB +CucyYZv+DxvT+JkYXBkGSw4s3hpABqGym7gdPUIa0q4rbBFG6xP5sLLBG4yru8vV +2dyCMmFqRuxpT49uNfyQ6Vj+dobN6qHnP/9NwfzOixXYBHXR6LBqb/M+iCiJaaIn +uiRLHwkQFPrqY0SOHfkQpA//W51vj8meuz7snRO+vZFcjLneFFzqfh1Jdz8IqDpO +CkI5pBJmi8e0oSe6r68MkahiQLlYPwm7d+sjHvJhPWipNKWq/uwCgBs+Ac1lpPXR +MwLbrZukcLMYlLmb2MrCKmjcMt0BZsZKBNYL3a3X9nHgwXdeqFYS4WQDMCCc09lz +9YqfdoEsqRO4qN7D0hFqnwjOzb34ixZ6UO8a8ekY9QKxAgWc9fJWGMg6Pjdg4qsK +nqymOIAdGVOJdoRM46wKGVBvbsF2gNfQU4XyzgJo5vHGFwJm6EoSnODlL5e2wsQh +uN1oqBt/8ef/plloMEqVBweUBATqSqjRF6IhhYJvWVuQHQL1p1vnV9FebiVj34ir +Z8ID+o0AnTJcclbUcDwannGJ0cuDcPhk/v/ahVuoMERCi12qnMBo5B/e6Omyh1yB +4pbf4GATGGQipDQG75eC/kP2GQEqJP5WYN0Ar8Le/AA/2xyL7upW0yIByyXCwGEb +JRwEgU3+bPyu58bFt8Pftit6J7rA3oBVVMOPrYH5eZwRaj5m2RptwKGL6BfHnhNv +ZqmCq9EBGX6L1NI0xHMjEFfXJ8jU01XdfG8nCqkwqsHwslXLhqjJphfHcx89YwbV +/15GCuURAv1cKe/7277sOhcvP/QpQqSWgvYExHw8PeFJcTYtF2NrRgNwcQsWS1Rj +gXa5Ag0EY+0zcAEQANC5N6kSfezuucAgi+X3BD+MT37mxQyvICSggEJf1LDSmy0+ +bnvD7setL8CP9etTA2fcVNYKI1oboMyhoCnsRP2jDdv1iXOI/hZg4wSb/D1yUkae +fUpxv3Wuci2QKavH2MfraDD7BFMbsQeMcHtn4Rk216T6jndZHnzT1Ih7iX0XeQPb +li5fojOiZssgWAVT4HPXFCJB6lI35Hjp35oRYwrtMmu5INinZ79n9h1igGtt1ItZ +b7rQKNd772Jxcn4UU71ovORSL/xT5i5sxZ+evQOxkpqUAokMOFaoHcOXLmA1NsFv +yryXHK4Ioq9ap2jKlLTWkJWjua9JZ4AmKhbvT8X4ELxIKSCAdJKAWP8ZHbXNu5MD +aznyzZQLxSO7uFvu356De75mI5iohZNj5wB5Wju71pBiorTKVj4+iJ4e+xVIzFdG +hFC0DehNcl2t9w/y8qHwIQ1yUAjXHLXq0/2jsVeH6bU5q/MsgvUP1jcFe0eyOpxy +CDvyFdzZFbI57TnB/fvcZTRZ5ewXMFpH8gzuoFzAjUAP95UjYKgaGdrNPNIy28Ii +4zhvdghei2+n9jgiMfcGQg8lyfH5yF0vWWWynX0KcJsRwEZoL2EauVdwq4PcYOoU +pQFhpcreCjD4LdZ4yRU4InbhcUogXjrQ9Dz01TbPmQD5b5iso21bCEFBXrhzABEB +AAGJAjwEGAEKACYCGwwWIQQVim9TDqIC5fZRYRMU+upjRI4d+QUCaUbs9wUJCRwg +hwAKCRAU+upjRI4d+X/XD/sH5xvHPfTJq52v8weFmB52up+DzqG2lyhGdoUQ1Muw +dRDLTLXLJrFdfpoOo7/j4Scr0rdc7/dpCn0DLcPuCoPxu+SkjEnVehFmZrGSv7Ga +x9dHr3DBh42fdlX/U/EnDuyosY0JU1gNF2/6FIA+bTTOFE3RxfN906RjslYQDjMZ +UAlSeLYHOZofdltI0YIr32vrxgdWQGZXPxU4XusDUc0z163OO+TGg7iUNWFZP5Qj +ubM7e0YbDX0NPIshk8us99YJmrWnhaix1/W5ryO3DXiGaQ7XFi9u7QofRqvRIctg +QXavdepkzJow9V9qpMECAJePIuICq7rm+xy+njjbuF436W7390bfVBwRr+FPADsl +jgQP4KvY5rykss30kheom8wNEbveWkhH5oTfH9b7O4KXJfpfJzrlgOWp2BD9JL8t +/M4HvFXTr2a75H/QbHK5OFrZeGATuv9OTxv7EZvnrPXU+DYTFldpu7TrNNqKCoj3 +ZyXmc3Hhg5kskDhfHJppaeOayuhMOpT3ud1MFzROY5SLVIH8rBR12KUgsCUYQcGs +Iy0+0QvEGkjb4cAH1NK3VlbqVNsy1RmqRt2B28R2ueewDfTOoqkzt4MmzLqTdnAx +mTqmHmkEKhEf3K4MRNUPO2yieUg2COk5l6x9HhAnoxxeOZrTmcMsPY/UViG2HEPm +ybkCDQRj7TRDARAA9DZuKdfKq4Bs2+NwxC0aplljWOl8VIsEVg+Q8agD7/HU6/b6 +Dry0njtWybn2x6Axf/nUdeOC01Fi1lmht/fpj6mRkgAvd/V6P10xnsUoykPSDSTh +P25MFFGW3JAA82bwdJ4AJpEQvTZG2nTb3237vlBiI1qHQrac8GYkju2O4UfySRN6 +7cyi7bMf2pjWBBOEhaNy4b6CMDsb32P/N5J7sTE/TXgrS+u4ITIgjzSrkUkh5Z+B +8QVRa7xPIDZJdvZWTEXWu5fgRPZvxbr154GIkWJkFzlDoB1UcO56/uzRUuKhEV6o +HW3LMUuWdPMjpHpq8hrL0G2rDniJFUtbDFzHdZK1LUU3T2BJM8rjI3D/euph+IDT +27vl5qo72zCYE/iKzx4FMLZcQvx1kUAxkPX8l+dzZEwKeRIIpFDxQvatRtl+z0bM +jbkpDb+Yjv66sC4dYRpgTTGX6rok0PWHR3IxDNzyf2j8zQ4LFJ+rVBM1GjGSt6mG +j9TeL8CVeiSp4SuJ7I/FJVPHsKb50m+BDzeB31qTydNqh2kKr0DVAUa+TUsCr7e0 +OYr8WE2adJcRXIW0qw50xXF+W7/05GqSCVD0dpeOUdBTQTsSkQmM3/0hcj9aVo9e +UDCM9RF0WRqiDAoHzJFfg+ztamkQI5HO6CklC4Ok22qrHRf6HDNYSuT6QFkAEQEA +AYkCPQQYAQoAJwMbIAQWIQQVim9TDqIC5fZRYRMU+upjRI4d+QUCaUbs9wUJCRwf +tAAKCRAU+upjRI4d+Y+cD/9yllG6uo934pcHNsVppZBfREFwSc8ywlbosCuSVpay +PjSqgrWwDrnqrsk0F2kUdC6rR3BIcXbn+lA9KqylH+cCXAJCkh8EDq6TlQ7Lt5EV +w1U0MAMXOyxPwDymQ/BO+iDyjXWkRRYgbF5XiFhCfGeuKyhkhACisAgNZ1uA1P5k +0SJYc14YfEhQkB46Y20SpfVHRsQ46FyNB6GHbmTmfoO8La8VTh++7GBdh85HfvkG +VNQ3wpi5oXsOLN9+MJOezc0XsW2LQsKQj1/J7QKzGh+lxN5cemsA5aqPzh8dyxeT +0lYRFp4AHkimqGUomVpRkbegMIPxXqOE+ZAmsddErw0UtmrKxcmMptOJwNgYzEgu +++2vtqerL/NYp+wsdcWaBjCz2F3NiwHgNli7NSB/FPwucZZ5gN5C4SnmeFzrGdHg +Oy+tQUN6ayQKljHeBO7CjMlsFNo/dcVrEMa1ShxBMqlj/6ivoEhktLz0Nru4FwNU +xE5SJYDYfpjD7Ws8y4LoXgWXjFHrMO6N9GzqLN/e8LT7I+w4ps2MrgJ8QSrelmQ3 +rjkxp3uWp5v2lqy4rLfpi9iB6zIAeoN2eU1yOM9joxOYMxKYaYeYyP1Mm90wFol8 +LcTSaN+tVniPddBiL6zvsGBEMbCR9XN3EQ+mErbuw5ovWBOCrr+dvN3FxvD11y4J +7w== +=mXYP +-----END PGP PUBLIC KEY BLOCK----- diff --git a/maintainers/keys/B541D55301270E0BCF15CA5D8170B4726D7198DE.asc b/maintainers/keys/B541D55301270E0BCF15CA5D8170B4726D7198DE.asc new file mode 100644 index 00000000000..dee0aef4cc6 --- /dev/null +++ b/maintainers/keys/B541D55301270E0BCF15CA5D8170B4726D7198DE.asc @@ -0,0 +1,51 @@ +-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQENBFZu2zwBCADfatenjH3cvhlU6AeInvp4R0JmPBG942aghFj1Qh57smRcO5Bv +y9mqrX3UDdmVvu58V3k1k9/GzPnAG1t+c7ohdymv/AMuNY4pE2sfxx7bX+mncTHX +5wthipn8kTNm4WjREjCJM1Bm5sozzEZetED3+0/dWlnHl8b38evnLsD+WbSrDPVp +o6M6Eg9IfMwTfcXzdmLmSnGolBWDQ9i1a0x0r3o+sDW5UTnr7jVP+zILcnOZ1Ewl +Rn9OJ4Qg3ULM7WTMDYpKH4BO7RLR3aJgmsFAHp17vgUnzzFBZ10MCS3UOyUNoyph +xo3belf7Q9nrHcSNbqSeQuBnW/vafAZUreAlABEBAAG0IkVlbGNvIERvbHN0cmEg +PGVkb2xzdHJhQGdtYWlsLmNvbT6JATwEEwEIACYCGyMHCwkIBwMCAQYVCAIJCgsE +FgIDAQIeAQIXgAUCVm7etAIZAQAKCRCBcLRybXGY3q51B/96qt41tmcDSzrj/UTl +O6rErfW5zFvVsJTZ95Duwu87t/DVhw5lKBQcjALqVddufw1nMzyN/tSOMVDW8xe4 +wMEdcU4+QAMzNX80enuyinsw1glxfLcK0+VbTvqNIfw0sG3MjPqNs6cK2VRfMHK4 +paJjytBVICszNX9TfjLyIpKKoSSo1vqnT47LDZ5GIMy7l9Cs2sO/rqQHSPcR79yz +8m8tbHpDDEMZmJeklckKP2QoiqnHiIvlisDxLclYnUmNaPdaN/f++qZz5Yqvu1n+ +sNUBA5eLaZH64Uy2SwtABxO3JPJ8nQ2+SFZ7ocFm4Gcdv4aM+Ura9S6fvM91tEJp +yAQOiQE5BBMBCAAjBQJWbts8AhsjBwsJCAcDAgEGFQgCCQoLBBYCAwECHgECF4AA +CgkQgXC0cm1xmN6sIAgAielxO8zJREqEkA2xudg/o4e9ZlNZ3X1NvY8OzJH/qlB2 +SmwKqwifhtbC1K0uavXA7eaxdtd2zrI+Yq7IooUyv7juMjHTZhLcFbR5iVkQ4Mfp +JmeHXJ/ChYKxD5mMj/C3WbCZ91oCSNZ6Iyi5fvQj/691OC4q+y/2NEUcOI8D8cw8 +XKHbKtceFYc+nZmdOv3ZZrNTSN/kszGViNNLKgnpPdDVPtLp+vjXtbmitiFG2HL/ +WfbJ+3Gh2Yr1Vy3O9dWKH++e1AmIv7WWqmUjRFVpqC/wr7/BLaScWT8WKF5vkshU +gq8Ez1/cuizsgs3wQIZWgXKQK5njvwnbKg+Zmh/uGbQmRWVsY28gRG9sc3RyYSA8 +ZWVsY28uZG9sc3RyYUB0d2VhZy5pbz6JAU4EEwEIADgWIQS1QdVTAScOC88Vyl2B +cLRybXGY3gUCXELt4gIbIwULCQgHAgYVCgkICwIEFgIDAQIeAQIXgAAKCRCBcLRy +bXGY3ujFCADfS5D1xHU8KH6TpqgssSggYVq62Wwn/Ga+4XPPetM+ajcXagyH6SwB +mxlHICcnv9xC93ryiTI10P1ADJl+aBsI66wEdHBU+ty4RTDy4JZNUPtmRCk9LhSc +mtUO3ry/wtWkRLdJxP49hg7BbQvWoU0M6WODp7SJjPKPWNX64mzHBeOuy+DqGCbM +lpGNCvW8ahU/ewbm7+xwWmzqLDoWzXjHsdF4QdzMVM/vkAgWEP4y0wEqFASzIYaR +GNEkBWU4OQVq5Bdm9+wWWAgsbM0FJAQl0GDqnz4QxWzxxCAAXdbh9F5ffafWYsA9 +bise4ZQLkvYo6iUnrcFm4dtZbT8iL3gptCtFZWxjbyBEb2xzdHJhIDxlZWxjby5k +b2xzdHJhQGxvZ2ljYmxveC5jb20+iQE5BBMBCAAjBQJWbt6nAhsjBwsJCAcDAgEG +FQgCCQoLBBYCAwECHgECF4AACgkQgXC0cm1xmN4b/wf8DApMV/jSPEpibekrUPQu +Ye3Z8cxBQuRm/nOPowtPEH/ShAevrCdRiob2nuEZWNoqZ2e5/+6ud07Hs9bslvco +cDv1jeY1dof1idxfKhH3kfSpuD2XJhuzQBxBqOrIlCS/rdnW+Y9wOGD7+bs9QpcA +IyAeQGLLkfggAxaGYQ2Aev8pS7i3a/+lOWbFhcTe02I49KemCOJqBorG5FfILLNr +DjO3EoutNGpuz6rZvc/BlymphWBoAdUmxgoObr7NYWgw9pI8WeE6C7bbSOO7p5aQ +spWXU7Hm17DkzsVDpaJlyClllqK+DdKza5oWlBMe/P02jD3Y+0P/2rCCyQQwmH3D +RbkBDQRWbts8AQgA0g556xc08dH5YNEjbCwEt1j+XoRnV4+GfbSJIXOl9joIgzRC +4IaijvL8+4biWvX7HiybfvBKto0XB1AWLZRC3jWKX5p74I77UAcrD+VQ/roWQqlJ +BKbiQMlRYEsj/5Xnf72G90IP4DAFKvNl+rLChe+jUySA91BCtrYoP75Sw1BE9Cyz +xEtm4WUzKAJdXI+ZTBttA2Nbqy+GSuzBs7fSKDwREJaZmVrosvmns+pQVG4WPWf4 +0l4mPguDQmZ9wSWZvBDkpG7AgHYDRYRGkMbAGsVfc6cScN2VsSTa6cbeeAEowKxM +qx9RbY3WOq6aKAm0qDvow1nl7WwXwe8K0wQxfQARAQABiQEfBBgBCAAJBQJWbts8 +AhsMAAoJEIFwtHJtcZjeuAAH/0YNz2Qe1IAEO5oqEZNFOccL4KxVPrBhWUen83/b +C6PjOnOqv6q5ztAcms88WIKxBlfzIfq+dzJcbKVS/H7TEXgcaC+7EYW8sJVEsipN +BtEZ3LQNJ5coDjm7WZygniah1lfXNuiritAXduK5FWNNndqGArEaeZ8Shzdo/Uyi +b9lOsBIL6xc2ZcnX5f+rTu02LCEtEb0FwCycZLEWYf8hG4k8uttIOZOC+CLk/k8d +kBmPikMwUVTTV0CdT1cemQKdTaoAaK+kurF6FYXwcnjhRlHrisSt/tVMEwTw4LUM +3MYf6qfjjvE4HlDwZal8th7ccoQp/flfJIuRv85xCcKK+PI= +=u5cX +-----END PGP PUBLIC KEY BLOCK----- diff --git a/maintainers/keys/README.md b/maintainers/keys/README.md new file mode 100644 index 00000000000..ef473eada79 --- /dev/null +++ b/maintainers/keys/README.md @@ -0,0 +1,13 @@ +# Maintainer GPG Keys + +Release tags are signed by members of the [Nix maintainer team](https://nixos.org/community/teams/nix/) as part of the [release process](../release-process.md). This directory contains the public GPG keys used for signing. + +## Keys + +- **Eelco Dolstra** + GPG Fingerprint: `B541 D553 0127 0E0B CF15 CA5D 8170 B472 6D71 98DE` + +- **Sergei Zimmerman** + GPG Fingerprint: [`158A 6F53 0EA2 02E5 F651 6113 14FA EA63 448E 1DF9`](https://keys.openpgp.org/vks/v1/by-fingerprint/158A6F530EA202E5F651611314FAEA63448E1DF9) + + diff --git a/maintainers/link-headers b/maintainers/link-headers new file mode 100755 index 00000000000..2457a2dc829 --- /dev/null +++ b/maintainers/link-headers @@ -0,0 +1,83 @@ +#!/usr/bin/env python3 + +# This script must be run from the root of the Nix repository. +# +# For include path hygiene, we need to put headers in a separate +# directory than sources. But during development, it is nice to paths +# that are similar for headers and source files, e.g. +# `foo/bar/baz.{cc,hh}`, e.g. for less typing when opening one file, and +# then opening the other file. +# +# This script symlinks the headers next to the source files to +# facilitate such a development workflows. It also updates +# `.git/info/exclude` so that the symlinks are not accidentally committed +# by mistake. + +from pathlib import Path +import subprocess +import os + + +def main() -> None: + # Path to the source directory + GIT_TOPLEVEL = Path( + subprocess.run( + ["git", "rev-parse", "--show-toplevel"], + text=True, + stdout=subprocess.PIPE, + check=True, + ).stdout.strip() + ) + + # Get header files from git + result = subprocess.run( + ["git", "-C", str(GIT_TOPLEVEL), "ls-files", "*/include/nix/**.hh"], + text=True, + stdout=subprocess.PIPE, + check=True, + ) + header_files = result.stdout.strip().split("\n") + header_files.sort() + + links = [] + for file_str in header_files: + project_str, header_str = file_str.split("/include/nix/", 1) + project = Path(project_str) + header = Path(header_str) + + # Reconstruct the full path (relative to SRC_DIR) to the header file. + file = project / "include" / "nix" / header + + # The symlink should be created at "project/header", i.e. next to the project's sources. + link = project / header + + # Compute a relative path from the symlink's parent directory to the actual header file. + relative_source = os.path.relpath( + GIT_TOPLEVEL / file, GIT_TOPLEVEL / link.parent + ) + + # Create the symbolic link. + full_link_path = GIT_TOPLEVEL / link + full_link_path.parent.mkdir(parents=True, exist_ok=True) + if full_link_path.is_symlink(): + full_link_path.unlink() + full_link_path.symlink_to(relative_source) + links.append(link) + + # Generate .gitignore file + gitignore_path = GIT_TOPLEVEL / ".git" / "info" / "exclude" + gitignore_path.parent.mkdir(parents=True, exist_ok=True) + with gitignore_path.open("w") as gitignore: + gitignore.write("# DO NOT EDIT! Autogenerated\n") + gitignore.write( + "# Symlinks for headers to be next to sources for development\n" + ) + gitignore.write('# Run "maintainers/link-headers" to regenerate\n\n') + gitignore.write('# Run "maintainers/link-headers" to regenerate\n\n') + + for link in links: + gitignore.write(f"/{link}\n") + + +if __name__ == "__main__": + main() diff --git a/maintainers/release-process.md b/maintainers/release-process.md index 68de3b67778..f8b6b6bec57 100644 --- a/maintainers/release-process.md +++ b/maintainers/release-process.md @@ -5,11 +5,11 @@ The release process is intended to create the following for each release: -* A Git tag +* A signed Git tag (public keys in `maintainers/keys/`) * Binary tarballs in https://releases.nixos.org/?prefix=nix/ -* Docker images +* Docker images (arm64 and amd64 variants, uploaded to DockerHub and GHCR) * Closures in https://cache.nixos.org @@ -104,21 +104,17 @@ release: evaluation ID (e.g. `1780832` in `https://hydra.nixos.org/eval/1780832`). -* Tag the release and upload the release artifacts to - [`releases.nixos.org`](https://releases.nixos.org/) and [Docker Hub](https://hub.docker.com/): +* Tag the release: ```console - $ IS_LATEST=1 ./maintainers/upload-release.pl + $ IS_LATEST=1 ./maintainers/upload-release.pl --skip-docker --skip-s3 --project-root $PWD ``` Note: `IS_LATEST=1` causes the `latest-release` branch to be force-updated. This is used by the `nixos.org` website to get the [latest Nix manual](https://nixos.org/manual/nixpkgs/unstable/). - TODO: This script requires the right AWS credentials. Document. - - TODO: This script currently requires a - `/home/eelco/Dev/nix-pristine`. +* Trigger the [`upload-release.yml` workflow](https://github.com/NixOS/nix/actions/workflows/upload-release.yml) via `workflow_dispatch` trigger. At the top click `Run workflow` -> select the current release branch from `Use workflow from` -> fill in `Hydra evaluation ID` with `` value from previous steps -> click `Run workflow`. Wait for the run to be approved by `NixOS/nix-team` (or bypass checks if warranted). Wait for the workflow to succeed. TODO: trigger nixos.org netlify: https://docs.netlify.com/configure-builds/build-hooks/ @@ -181,16 +177,18 @@ release: * Wait for the desired evaluation of the maintenance jobset to finish building. -* Run +* Tag the release ```console - $ IS_LATEST=1 ./maintainers/upload-release.pl + $ IS_LATEST=1 ./maintainers/upload-release.pl --skip-docker --skip-s3 --project-root $PWD ``` Omit `IS_LATEST=1` when creating a point release that is not on the most recent stable branch. This prevents `nixos.org` to going back to an older release. +* Trigger the [`upload-release.yml` workflow](https://github.com/NixOS/nix/actions/workflows/upload-release.yml) via `workflow_dispatch` trigger. At the top click `Run workflow` -> select the current release branch from `Use workflow from` -> fill in `Hydra evaluation ID` with `` value from previous steps -> click `Run workflow`. Wait for the run to be approved by `NixOS/nix-team` (or bypass checks if warranted). Wait for the workflow to succeed. + * Bump the version number of the release branch as above (e.g. to `2.12.2`). diff --git a/maintainers/upload-release.pl b/maintainers/upload-release.pl index 31a9c71d543..f7678b7d1c0 100755 --- a/maintainers/upload-release.pl +++ b/maintainers/upload-release.pl @@ -1,7 +1,8 @@ #! /usr/bin/env nix-shell -#! nix-shell -i perl -p perl perlPackages.LWPUserAgent perlPackages.LWPProtocolHttps perlPackages.FileSlurp perlPackages.NetAmazonS3 gnupg1 +#! nix-shell -i perl -p awscli2 perl perlPackages.LWPUserAgent perlPackages.LWPProtocolHttps perlPackages.FileSlurp perlPackages.NetAmazonS3 perlPackages.GetoptLongDescriptive gnupg1 use strict; +use Getopt::Long::Descriptive; use Data::Dumper; use File::Basename; use File::Path; @@ -13,7 +14,30 @@ delete $ENV{'shell'}; # shut up a LWP::UserAgent.pm warning -my $evalId = $ARGV[0] or die "Usage: $0 EVAL-ID\n"; +my ($opt, $usage) = describe_options( + '%c %o ', + [ 'skip-docker', 'Skip Docker image upload' ], + [ 'skip-git', 'Skip Git tagging' ], + [ 'skip-s3', 'Skip S3 upload' ], + [ 'docker-owner=s', 'Docker image owner', { default => 'nixos/nix' } ], + [ 'project-root=s', 'Pristine git repository path' ], + [ 's3-endpoint=s', 'Custom S3 endpoint' ], + [ 's3-host=s', 'S3 host', { default => 's3-eu-west-1.amazonaws.com' } ], + [], + [ 'help|h', 'Show this help message', { shortcircuit => 1 } ], + [], + [ 'Environment variables:' ], + [ 'AWS_ACCESS_KEY_ID' ], + [ 'AWS_SECRET_ACCESS_KEY' ], + [ 'AWS_SESSION_TOKEN For OIDC' ], + [ 'IS_LATEST Set to "1" to mark as latest release' ], +); + +print($usage->text), exit if $opt->help; + +my $evalId = $ARGV[0] or do { print STDERR $usage->text; exit 1 }; + +die "--project-root is required unless --skip-git is specified\n" unless $opt->skip_git || $opt->project_root; my $releasesBucketName = "nix-releases"; my $channelsBucketName = "nix-channels"; @@ -62,25 +86,38 @@ sub fetch { my $binaryCache = "https://cache.nixos.org/?local-nar-cache=$narCache"; # S3 setup. -my $aws_access_key_id = $ENV{'AWS_ACCESS_KEY_ID'} or die "No AWS_ACCESS_KEY_ID given."; -my $aws_secret_access_key = $ENV{'AWS_SECRET_ACCESS_KEY'} or die "No AWS_SECRET_ACCESS_KEY given."; - -my $s3 = Net::Amazon::S3->new( - { aws_access_key_id => $aws_access_key_id, - aws_secret_access_key => $aws_secret_access_key, - retry => 1, - host => "s3-eu-west-1.amazonaws.com", - }); - -my $releasesBucket = $s3->bucket($releasesBucketName) or die; - -my $s3_us = Net::Amazon::S3->new( - { aws_access_key_id => $aws_access_key_id, - aws_secret_access_key => $aws_secret_access_key, - retry => 1, - }); - -my $channelsBucket = $s3_us->bucket($channelsBucketName) or die; +my $aws_access_key_id = $ENV{'AWS_ACCESS_KEY_ID'}; +my $aws_secret_access_key = $ENV{'AWS_SECRET_ACCESS_KEY'}; +my $aws_session_token = $ENV{'AWS_SESSION_TOKEN'}; + +my ($s3, $releasesBucket, $s3_channels, $channelsBucket); + +unless ($opt->skip_s3) { + $aws_access_key_id or die "No AWS_ACCESS_KEY_ID given."; + $aws_secret_access_key or die "No AWS_SECRET_ACCESS_KEY given."; + + $s3 = Net::Amazon::S3->new( + { aws_access_key_id => $aws_access_key_id, + aws_secret_access_key => $aws_secret_access_key, + $aws_session_token ? (aws_session_token => $aws_session_token) : (), + retry => 1, + host => $opt->s3_host, + secure => ($opt->s3_endpoint && $opt->s3_endpoint =~ /^http:/) ? 0 : 1, + }); + + $releasesBucket = $s3->bucket($releasesBucketName) or die; + + $s3_channels = Net::Amazon::S3->new( + { aws_access_key_id => $aws_access_key_id, + aws_secret_access_key => $aws_secret_access_key, + $aws_session_token ? (aws_session_token => $aws_session_token) : (), + retry => 1, + $opt->s3_endpoint ? (host => $opt->s3_host) : (), + $opt->s3_endpoint ? (secure => ($opt->s3_endpoint =~ /^http:/) ? 0 : 1) : (), + }); + + $channelsBucket = $s3_channels->bucket($channelsBucketName) or die; +} sub getStorePath { my ($jobName, $output) = @_; @@ -115,11 +152,12 @@ sub copyManual { File::Path::remove_tree("$tmpDir/manual.tmp", {safe => 1}); } - system("aws s3 sync '$tmpDir/manual' s3://$releasesBucketName/$releaseDir/manual") == 0 + my $awsEndpoint = $opt->s3_endpoint ? "--endpoint-url " . $opt->s3_endpoint : ""; + system("aws $awsEndpoint s3 sync '$tmpDir/manual' s3://$releasesBucketName/$releaseDir/manual") == 0 or die "syncing manual to S3\n"; } -copyManual; +copyManual unless $opt->skip_s3; sub downloadFile { my ($jobName, $productNr, $dstName) = @_; @@ -158,30 +196,12 @@ sub downloadFile { return $sha256_expected; } -downloadFile("binaryTarball.i686-linux", "1"); -downloadFile("binaryTarball.x86_64-linux", "1"); -downloadFile("binaryTarball.aarch64-linux", "1"); -downloadFile("binaryTarball.x86_64-darwin", "1"); -downloadFile("binaryTarball.aarch64-darwin", "1"); -eval { - downloadFile("binaryTarballCross.x86_64-linux.armv6l-unknown-linux-gnueabihf", "1"); -}; -warn "$@" if $@; -eval { - downloadFile("binaryTarballCross.x86_64-linux.armv7l-unknown-linux-gnueabihf", "1"); -}; -warn "$@" if $@; -eval { - downloadFile("binaryTarballCross.x86_64-linux.riscv64-unknown-linux-gnu", "1"); -}; -warn "$@" if $@; -downloadFile("installerScript", "1"); - -# Upload docker images to dockerhub. +# Upload docker images. my $dockerManifest = ""; my $dockerManifestLatest = ""; my $haveDocker = 0; +unless ($opt->skip_docker) { for my $platforms (["x86_64-linux", "amd64"], ["aarch64-linux", "arm64"]) { my $system = $platforms->[0]; my $dockerPlatform = $platforms->[1]; @@ -195,8 +215,8 @@ sub downloadFile { print STDERR "loading docker image for $dockerPlatform...\n"; system("docker load -i $tmpDir/$fn") == 0 or die; - my $tag = "nixos/nix:$version-$dockerPlatform"; - my $latestTag = "nixos/nix:latest-$dockerPlatform"; + my $tag = $opt->docker_owner . ":$version-$dockerPlatform"; + my $latestTag = $opt->docker_owner . ":latest-$dockerPlatform"; print STDERR "tagging $version docker image for $dockerPlatform...\n"; system("docker tag nix:$version $tag") == 0 or die; @@ -219,68 +239,94 @@ sub downloadFile { } if ($haveDocker) { + my $dockerOwner = $opt->docker_owner; print STDERR "creating multi-platform docker manifest...\n"; - system("docker manifest rm nixos/nix:$version"); - system("docker manifest create nixos/nix:$version $dockerManifest") == 0 or die; + system("docker manifest rm $dockerOwner:$version"); + system("docker manifest create $dockerOwner:$version $dockerManifest") == 0 or die; if ($isLatest) { print STDERR "creating latest multi-platform docker manifest...\n"; - system("docker manifest rm nixos/nix:latest"); - system("docker manifest create nixos/nix:latest $dockerManifestLatest") == 0 or die; + system("docker manifest rm $dockerOwner:latest"); + system("docker manifest create $dockerOwner:latest $dockerManifestLatest") == 0 or die; } print STDERR "pushing multi-platform docker manifest...\n"; - system("docker manifest push nixos/nix:$version") == 0 or die; + system("docker manifest push $dockerOwner:$version") == 0 or die; if ($isLatest) { print STDERR "pushing latest multi-platform docker manifest...\n"; - system("docker manifest push nixos/nix:latest") == 0 or die; + system("docker manifest push $dockerOwner:latest") == 0 or die; } } +} -# Upload nix-fallback-paths.nix. -write_file("$tmpDir/fallback-paths.nix", - "{\n" . - " x86_64-linux = \"" . getStorePath("build.nix-everything.x86_64-linux") . "\";\n" . - " i686-linux = \"" . getStorePath("build.nix-everything.i686-linux") . "\";\n" . - " aarch64-linux = \"" . getStorePath("build.nix-everything.aarch64-linux") . "\";\n" . - " riscv64-linux = \"" . getStorePath("buildCross.nix-everything.riscv64-unknown-linux-gnu.x86_64-linux") . "\";\n" . - " x86_64-darwin = \"" . getStorePath("build.nix-everything.x86_64-darwin") . "\";\n" . - " aarch64-darwin = \"" . getStorePath("build.nix-everything.aarch64-darwin") . "\";\n" . - "}\n"); # Upload release files to S3. -for my $fn (glob "$tmpDir/*") { - my $name = basename($fn); - next if $name eq "manual"; - my $dstKey = "$releaseDir/" . $name; - unless (defined $releasesBucket->head_key($dstKey)) { - print STDERR "uploading $fn to s3://$releasesBucketName/$dstKey...\n"; - - my $configuration = (); - $configuration->{content_type} = "application/octet-stream"; - - if ($fn =~ /.sha256|install|\.nix$/) { - $configuration->{content_type} = "text/plain"; +unless ($opt->skip_s3) { + downloadFile("binaryTarball.i686-linux", "1"); + downloadFile("binaryTarball.x86_64-linux", "1"); + downloadFile("binaryTarball.aarch64-linux", "1"); + downloadFile("binaryTarball.x86_64-darwin", "1"); + downloadFile("binaryTarball.aarch64-darwin", "1"); + eval { + downloadFile("binaryTarballCross.x86_64-linux.armv6l-unknown-linux-gnueabihf", "1"); + }; + warn "$@" if $@; + eval { + downloadFile("binaryTarballCross.x86_64-linux.armv7l-unknown-linux-gnueabihf", "1"); + }; + warn "$@" if $@; + eval { + downloadFile("binaryTarballCross.x86_64-linux.riscv64-unknown-linux-gnu", "1"); + }; + warn "$@" if $@; + downloadFile("installerScript", "1"); + + # Upload nix-fallback-paths.nix. + write_file("$tmpDir/fallback-paths.nix", + "{\n" . + " x86_64-linux = \"" . getStorePath("build.nix-everything.x86_64-linux") . "\";\n" . + " i686-linux = \"" . getStorePath("build.nix-everything.i686-linux") . "\";\n" . + " aarch64-linux = \"" . getStorePath("build.nix-everything.aarch64-linux") . "\";\n" . + " riscv64-linux = \"" . getStorePath("buildCross.nix-everything.riscv64-unknown-linux-gnu.x86_64-linux") . "\";\n" . + " x86_64-darwin = \"" . getStorePath("build.nix-everything.x86_64-darwin") . "\";\n" . + " aarch64-darwin = \"" . getStorePath("build.nix-everything.aarch64-darwin") . "\";\n" . + "}\n"); + + for my $fn (glob "$tmpDir/*") { + my $name = basename($fn); + next if $name eq "manual"; + my $dstKey = "$releaseDir/" . $name; + unless (defined $releasesBucket->head_key($dstKey)) { + print STDERR "uploading $fn to s3://$releasesBucketName/$dstKey...\n"; + + my $configuration = (); + $configuration->{content_type} = "application/octet-stream"; + + if ($fn =~ /.sha256|install|\.nix$/) { + $configuration->{content_type} = "text/plain"; + } + + $releasesBucket->add_key_filename($dstKey, $fn, $configuration) + or die $releasesBucket->err . ": " . $releasesBucket->errstr; } - - $releasesBucket->add_key_filename($dstKey, $fn, $configuration) - or die $releasesBucket->err . ": " . $releasesBucket->errstr; } -} -# Update the "latest" symlink. -$channelsBucket->add_key( - "nix-latest/install", "", - { "x-amz-website-redirect-location" => "https://releases.nixos.org/$releaseDir/install" }) - or die $channelsBucket->err . ": " . $channelsBucket->errstr - if $isLatest; + # Update the "latest" symlink. + $channelsBucket->add_key( + "nix-latest/install", "", + { "x-amz-website-redirect-location" => "https://releases.nixos.org/$releaseDir/install" }) + or die $channelsBucket->err . ": " . $channelsBucket->errstr + if $isLatest; +} # Tag the release in Git. -chdir("/home/eelco/Dev/nix-pristine") or die; -system("git remote update origin") == 0 or die; -system("git tag --force --sign $version $nixRev -m 'Tagging release $version'") == 0 or die; -system("git push --tags") == 0 or die; -system("git push --force-with-lease origin $nixRev:refs/heads/latest-release") == 0 or die if $isLatest; +unless ($opt->skip_git) { + chdir($opt->project_root) or die "Cannot chdir to " . $opt->project_root . ": $!"; + system("git remote update origin") == 0 or die; + system("git tag --force --sign $version $nixRev -m 'Tagging release $version'") == 0 or die; + system("git push origin refs/tags/$version") == 0 or die; + system("git push --force-with-lease origin $nixRev:refs/heads/latest-release") == 0 or die if $isLatest; +} File::Path::remove_tree($narCache, {safe => 1}); File::Path::remove_tree($tmpDir, {safe => 1}); diff --git a/packaging/components.nix b/packaging/components.nix index dbf2180e894..6402e8b7b2f 100644 --- a/packaging/components.nix +++ b/packaging/components.nix @@ -27,7 +27,7 @@ let pkg-config ; - baseVersion = lib.fileContents ../.version; + baseVersion = lib.fileContents ../.version-determinate; versionSuffix = lib.optionalString (!officialRelease) "pre"; @@ -51,15 +51,6 @@ let exts: userFn: stdenv.mkDerivation (lib.extends (lib.composeManyExtensions exts) userFn); setVersionLayer = finalAttrs: prevAttrs: { - preConfigure = - prevAttrs.preConfigure or "" - + - # Update the repo-global .version file. - # Symlink ./.version points there, but by default only workDir is writable. - '' - chmod u+w ./.version - echo ${finalAttrs.version} > ./.version - ''; }; localSourceLayer = diff --git a/packaging/dependencies.nix b/packaging/dependencies.nix index 7b7ee0ecf4d..16ded9c97c4 100644 --- a/packaging/dependencies.nix +++ b/packaging/dependencies.nix @@ -30,17 +30,22 @@ scope: { NIX_CFLAGS_COMPILE = "-DINITIAL_MARK_STACK_SIZE=1048576"; }); - lowdown = pkgs.lowdown.overrideAttrs (prevAttrs: rec { - version = "2.0.2"; - src = pkgs.fetchurl { - url = "https://kristaps.bsd.lv/lowdown/snapshots/lowdown-${version}.tar.gz"; - hash = "sha512-cfzhuF4EnGmLJf5EGSIbWqJItY3npbRSALm+GarZ7SMU7Hr1xw0gtBFMpOdi5PBar4TgtvbnG4oRPh+COINGlA=="; - }; - nativeBuildInputs = prevAttrs.nativeBuildInputs ++ [ pkgs.buildPackages.bmake ]; - postInstall = - lib.replaceStrings [ "lowdown.so.1" "lowdown.1.dylib" ] [ "lowdown.so.2" "lowdown.2.dylib" ] - (prevAttrs.postInstall or ""); - }); + lowdown = + if lib.versionAtLeast pkgs.lowdown.version "2.0.2" then + pkgs.lowdown + else + pkgs.lowdown.overrideAttrs (prevAttrs: rec { + version = "2.0.2"; + src = pkgs.fetchurl { + url = "https://kristaps.bsd.lv/lowdown/snapshots/lowdown-${version}.tar.gz"; + hash = "sha512-cfzhuF4EnGmLJf5EGSIbWqJItY3npbRSALm+GarZ7SMU7Hr1xw0gtBFMpOdi5PBar4TgtvbnG4oRPh+COINGlA=="; + }; + patches = [ ]; + nativeBuildInputs = prevAttrs.nativeBuildInputs ++ [ pkgs.buildPackages.bmake ]; + postInstall = + lib.replaceStrings [ "lowdown.so.1" "lowdown.1.dylib" ] [ "lowdown.so.2" "lowdown.2.dylib" ] + (prevAttrs.postInstall or ""); + }); # TODO: Remove this when https://github.com/NixOS/nixpkgs/pull/442682 is included in a stable release toml11 = @@ -66,6 +71,7 @@ scope: { "--with-coroutine" "--with-iostreams" "--with-url" + "--with-thread" ]; enableIcu = false; }).overrideAttrs diff --git a/packaging/dev-shell.nix b/packaging/dev-shell.nix index 95cbc4a3623..8f963f961fb 100644 --- a/packaging/dev-shell.nix +++ b/packaging/dev-shell.nix @@ -148,6 +148,15 @@ pkgs.nixComponents2.nix-util.overrideAttrs ( isInternal = dep: internalDrvs ? ${builtins.unsafeDiscardStringContext dep.drvPath or "_non-existent_"}; + activeComponentNames = lib.listToAttrs ( + map (c: { + name = c.pname or c.name; + value = null; + }) activeComponents + ); + + isActiveComponent = name: activeComponentNames ? ${name}; + in { pname = "shell-for-nix"; @@ -190,31 +199,23 @@ pkgs.nixComponents2.nix-util.overrideAttrs ( } ); - small = - (finalAttrs.finalPackage.withActiveComponents ( - c: - lib.intersectAttrs (lib.genAttrs [ - "nix-cli" - "nix-util-tests" - "nix-store-tests" - "nix-expr-tests" - "nix-fetchers-tests" - "nix-flake-tests" - "nix-functional-tests" - "nix-perl-bindings" - ] (_: null)) c - )).overrideAttrs - (o: { - mesonFlags = o.mesonFlags ++ [ - # TODO: infer from activeComponents or vice versa - "-Dkaitai-struct-checks=false" - "-Djson-schema-checks=false" - ]; - }); + small = finalAttrs.finalPackage.withActiveComponents ( + c: + lib.intersectAttrs (lib.genAttrs [ + "nix-cli" + "nix-util-tests" + "nix-store-tests" + "nix-expr-tests" + "nix-fetchers-tests" + "nix-flake-tests" + "nix-functional-tests" + "nix-perl-bindings" + ] (_: null)) c + ); }; # Remove the version suffix to avoid unnecessary attempts to substitute in nix develop - version = lib.fileContents ../.version; + version = lib.fileContents ../.version-determinate; name = finalAttrs.pname; installFlags = "sysconfdir=$(out)/etc"; @@ -258,10 +259,13 @@ pkgs.nixComponents2.nix-util.overrideAttrs ( # We use this shell with the local checkout, not unpackPhase. src = null; + # Workaround https://sourceware.org/pipermail/gdb-patches/2025-October/221398.html # Remove when gdb fix is rolled out everywhere. separateDebugInfo = false; + mesonBuildType = "debugoptimized"; + env = { # For `make format`, to work without installing pre-commit _NIX_PRE_COMMIT_HOOKS_CONFIG = "${(pkgs.formats.yaml { }).generate "pre-commit-config.yaml" @@ -275,21 +279,33 @@ pkgs.nixComponents2.nix-util.overrideAttrs ( dontUseCmakeConfigure = true; - mesonFlags = - map (transformFlag "libutil") (ignoreCrossFile pkgs.nixComponents2.nix-util.mesonFlags) - ++ map (transformFlag "libstore") (ignoreCrossFile pkgs.nixComponents2.nix-store.mesonFlags) - ++ map (transformFlag "libfetchers") (ignoreCrossFile pkgs.nixComponents2.nix-fetchers.mesonFlags) - ++ lib.optionals havePerl ( - map (transformFlag "perl") (ignoreCrossFile pkgs.nixComponents2.nix-perl-bindings.mesonFlags) - ) - ++ map (transformFlag "libexpr") (ignoreCrossFile pkgs.nixComponents2.nix-expr.mesonFlags) - ++ map (transformFlag "libcmd") (ignoreCrossFile pkgs.nixComponents2.nix-cmd.mesonFlags); + mesonFlags = [ + (lib.mesonBool "kaitai-struct-checks" (isActiveComponent "nix-kaitai-struct-checks")) + (lib.mesonBool "json-schema-checks" (isActiveComponent "nix-json-schema-checks")) + ] + ++ map (transformFlag "libutil") (ignoreCrossFile pkgs.nixComponents2.nix-util.mesonFlags) + ++ map (transformFlag "libstore") (ignoreCrossFile pkgs.nixComponents2.nix-store.mesonFlags) + ++ map (transformFlag "libfetchers") (ignoreCrossFile pkgs.nixComponents2.nix-fetchers.mesonFlags) + ++ lib.optionals havePerl ( + map (transformFlag "perl") (ignoreCrossFile pkgs.nixComponents2.nix-perl-bindings.mesonFlags) + ) + ++ map (transformFlag "libexpr") (ignoreCrossFile pkgs.nixComponents2.nix-expr.mesonFlags) + ++ map (transformFlag "libcmd") (ignoreCrossFile pkgs.nixComponents2.nix-cmd.mesonFlags); nativeBuildInputs = let inputs = dedupByString (v: "${v}") ( - lib.filter (x: !isInternal x) (lib.lists.concatMap (c: c.nativeBuildInputs) activeComponents) + lib.filter (x: !isInternal x) ( + lib.lists.concatMap ( + # Nix manual has a build-time dependency on nix, but we + # don't want to do a native build just to enter the ross + # dev shell. + # + # TODO: think of a more principled fix for this. + c: lib.filter (f: f.pname or null != "nix") c.nativeBuildInputs + ) activeComponents + ) ) ++ lib.optional ( !buildCanExecuteHost @@ -305,8 +321,8 @@ pkgs.nixComponents2.nix-util.overrideAttrs ( pkgs.buildPackages.nixfmt-rfc-style pkgs.buildPackages.shellcheck pkgs.buildPackages.include-what-you-use - pkgs.buildPackages.gdb ] + ++ lib.optional pkgs.hostPlatform.isUnix pkgs.buildPackages.gdb ++ lib.optional (stdenv.cc.isClang && stdenv.hostPlatform == stdenv.buildPlatform) ( lib.hiPrio pkgs.buildPackages.clang-tools ) @@ -322,13 +338,13 @@ pkgs.nixComponents2.nix-util.overrideAttrs ( ) ); - buildInputs = [ - pkgs.gbenchmark - ] - ++ dedupByString (v: "${v}") ( - lib.filter (x: !isInternal x) (lib.lists.concatMap (c: c.buildInputs) activeComponents) - ) - ++ lib.optional havePerl pkgs.perl; + buildInputs = + # TODO change Nixpkgs to mark gbenchmark as building on Windows + lib.optional pkgs.hostPlatform.isUnix pkgs.gbenchmark + ++ dedupByString (v: "${v}") ( + lib.filter (x: !isInternal x) (lib.lists.concatMap (c: c.buildInputs) activeComponents) + ) + ++ lib.optional havePerl pkgs.perl; propagatedBuildInputs = dedupByString (v: "${v}") ( lib.filter (x: !isInternal x) (lib.lists.concatMap (c: c.propagatedBuildInputs) activeComponents) diff --git a/packaging/everything.nix b/packaging/everything.nix index f6bdad4907b..3206b8ba423 100644 --- a/packaging/everything.nix +++ b/packaging/everything.nix @@ -75,7 +75,7 @@ let }; devdoc = buildEnv { - name = "nix-${nix-cli.version}-devdoc"; + name = "determinate-nix-${nix-cli.version}-devdoc"; paths = [ nix-internal-api-docs nix-external-api-docs @@ -84,7 +84,7 @@ let in stdenv.mkDerivation (finalAttrs: { - pname = "nix"; + pname = "determinate-nix"; version = nix-cli.version; /** diff --git a/packaging/hydra.nix b/packaging/hydra.nix index 3a31314f709..9839dd62163 100644 --- a/packaging/hydra.nix +++ b/packaging/hydra.nix @@ -122,77 +122,6 @@ rec { system: self.devShells.${system}.default.inputDerivation )) [ "i686-linux" ]; - buildStatic = forAllPackages ( - pkgName: - lib.genAttrs linux64BitSystems ( - system: nixpkgsFor.${system}.native.pkgsStatic.nixComponents2.${pkgName} - ) - ); - - buildCross = forAllPackages ( - pkgName: - # Hack to avoid non-evaling package - ( - if pkgName == "nix-functional-tests" then - lib.flip builtins.removeAttrs [ "x86_64-w64-mingw32" ] - else - lib.id - ) - ( - forAllCrossSystems ( - crossSystem: - lib.genAttrs [ "x86_64-linux" ] ( - system: nixpkgsFor.${system}.cross.${crossSystem}.nixComponents2.${pkgName} - ) - ) - ) - ); - - # Builds with sanitizers already have GC disabled, so this buildNoGc can just - # point to buildWithSanitizers in order to reduce the load on hydra. - buildNoGc = buildWithSanitizers; - - buildWithSanitizers = - let - components = forAllSystems ( - system: - let - pkgs = nixpkgsFor.${system}.native; - in - pkgs.nixComponents2.overrideScope ( - self: super: { - # Boost coroutines fail with ASAN on darwin. - withASan = !pkgs.stdenv.buildPlatform.isDarwin; - withUBSan = true; - nix-expr = super.nix-expr.override { enableGC = false; }; - # Unclear how to make Perl bindings work with a dynamically linked ASAN. - nix-perl-bindings = null; - } - ) - ); - in - forAllPackages (pkgName: forAllSystems (system: components.${system}.${pkgName})); - - buildNoTests = forAllSystems (system: nixpkgsFor.${system}.native.nixComponents2.nix-cli); - - # Toggles some settings for better coverage. Windows needs these - # library combinations, and Debian build Nix with GNU readline too. - buildReadlineNoMarkdown = - let - components = forAllSystems ( - system: - nixpkgsFor.${system}.native.nixComponents2.overrideScope ( - self: super: { - nix-cmd = super.nix-cmd.override { - enableMarkdown = false; - readlineFlavor = "readline"; - }; - } - ) - ); - in - forAllPackages (pkgName: forAllSystems (system: components.${system}.${pkgName})); - # Perl bindings for various platforms. perlBindings = forAllSystems (system: nixpkgsFor.${system}.native.nixComponents2.nix-perl-bindings); @@ -203,30 +132,6 @@ rec { system: nixpkgsFor.${system}.native.callPackage ./binary-tarball.nix { } ); - binaryTarballCross = lib.genAttrs [ "x86_64-linux" ] ( - system: - forAllCrossSystems ( - crossSystem: nixpkgsFor.${system}.cross.${crossSystem}.callPackage ./binary-tarball.nix { } - ) - ); - - # The first half of the installation script. This is uploaded - # to https://nixos.org/nix/install. It downloads the binary - # tarball for the user's system and calls the second half of the - # installation script. - installerScript = installScriptFor [ - # Native - self.hydraJobs.binaryTarball."x86_64-linux" - self.hydraJobs.binaryTarball."i686-linux" - self.hydraJobs.binaryTarball."aarch64-linux" - self.hydraJobs.binaryTarball."x86_64-darwin" - self.hydraJobs.binaryTarball."aarch64-darwin" - # Cross - self.hydraJobs.binaryTarballCross."x86_64-linux"."armv6l-unknown-linux-gnueabihf" - self.hydraJobs.binaryTarballCross."x86_64-linux"."armv7l-unknown-linux-gnueabihf" - self.hydraJobs.binaryTarballCross."x86_64-linux"."riscv64-unknown-linux-gnu" - ]; - installerScriptForGHA = forAllSystems ( system: nixpkgsFor.${system}.native.callPackage ./installer { @@ -294,6 +199,19 @@ rec { pkgs = nixpkgsFor.${system}.native; } ); + + nixpkgsLibTestsLazy = forAllSystems ( + system: + lib.overrideDerivation + (import (nixpkgs + "/lib/tests/test-with-nix.nix") { + lib = nixpkgsFor.${system}.native.lib; + nix = self.packages.${system}.nix-cli; + pkgs = nixpkgsFor.${system}.native; + }) + (_: { + "NIX_CONFIG" = "lazy-trees = true"; + }) + ); }; metrics.nixpkgs = import "${nixpkgs-regression}/pkgs/top-level/metrics.nix" { @@ -308,17 +226,12 @@ rec { in pkgs.runCommand "install-tests" { againstSelf = testNixVersions pkgs pkgs.nix; - againstCurrentLatest = - # FIXME: temporarily disable this on macOS because of #3605. - if system == "x86_64-linux" then testNixVersions pkgs pkgs.nixVersions.latest else null; + #againstCurrentLatest = + # # FIXME: temporarily disable this on macOS because of #3605. + # if system == "x86_64-linux" then testNixVersions pkgs pkgs.nixVersions.latest else null; # Disabled because the latest stable version doesn't handle # `NIX_DAEMON_SOCKET_PATH` which is required for the tests to work # againstLatestStable = testNixVersions pkgs pkgs.nixStable; } "touch $out" ); - - installerTests = import ../tests/installer { - binaryTarballs = self.hydraJobs.binaryTarball; - inherit nixpkgsFor; - }; } diff --git a/packaging/installer/default.nix b/packaging/installer/default.nix index e171f36f99f..a8e344b496c 100644 --- a/packaging/installer/default.nix +++ b/packaging/installer/default.nix @@ -32,7 +32,7 @@ runCommand "installer-script" in '' \ - --replace '@tarballHash_${system}@' $(nix --experimental-features nix-command hash-file --base16 --type sha256 ${tarball}/*.tar.xz) \ + --replace '@tarballHash_${system}@' $(nix hash-file --base16 --type sha256 ${tarball}/*.tar.xz) \ --replace '@tarballPath_${system}@' $(tarballPath ${tarball}/*.tar.xz) \ '' ) tarballs diff --git a/scripts/install-multi-user.sh b/scripts/install-multi-user.sh index 5ff760a6143..683beca10fd 100644 --- a/scripts/install-multi-user.sh +++ b/scripts/install-multi-user.sh @@ -53,8 +53,8 @@ readonly PROFILE_NIX_FILE_FISH="$NIX_ROOT/var/nix/profiles/default/etc/profile.d readonly NIX_INSTALLED_NIX="@nix@" readonly NIX_INSTALLED_CACERT="@cacert@" -#readonly NIX_INSTALLED_NIX="/nix/store/j8dbv5w6jl34caywh2ygdy88knx1mdf7-nix-2.3.6" -#readonly NIX_INSTALLED_CACERT="/nix/store/7dxhzymvy330i28ii676fl1pqwcahv2f-nss-cacert-3.49.2" +#readonly NIX_INSTALLED_NIX="/nix/store/byi37zv50wnfrpp4d81z3spswd5zva37-nix-2.3.6" +#readonly NIX_INSTALLED_CACERT="/nix/store/7pi45g541xa8ahwgpbpy7ggsl0xj1jj6-nss-cacert-3.49.2" EXTRACTED_NIX_PATH="$(dirname "$0")" readonly EXTRACTED_NIX_PATH diff --git a/shell.nix b/shell.nix deleted file mode 100644 index 918f4bbd9e9..00000000000 --- a/shell.nix +++ /dev/null @@ -1,3 +0,0 @@ -(import (fetchTarball "https://github.com/edolstra/flake-compat/archive/master.tar.gz") { - src = ./.; -}).shellNix diff --git a/src/external-api-docs/package.nix b/src/external-api-docs/package.nix index b194e16d460..28cde8c09e6 100644 --- a/src/external-api-docs/package.nix +++ b/src/external-api-docs/package.nix @@ -14,7 +14,7 @@ let in mkMesonDerivation (finalAttrs: { - pname = "nix-external-api-docs"; + pname = "determinate-nix-external-api-docs"; inherit version; workDir = ./.; diff --git a/src/internal-api-docs/package.nix b/src/internal-api-docs/package.nix index 6c4f354aee5..636c19653ea 100644 --- a/src/internal-api-docs/package.nix +++ b/src/internal-api-docs/package.nix @@ -14,7 +14,7 @@ let in mkMesonDerivation (finalAttrs: { - pname = "nix-internal-api-docs"; + pname = "determinate-nix-internal-api-docs"; inherit version; workDir = ./.; diff --git a/src/libcmd/command.cc b/src/libcmd/command.cc index 798ef072eb1..226b65f4a7c 100644 --- a/src/libcmd/command.cc +++ b/src/libcmd/command.cc @@ -138,6 +138,13 @@ ref EvalCommand::getEvalStore() ref EvalCommand::getEvalState() { if (!evalState) { + if (startReplOnEvalErrors && evalSettings.evalCores != 1U) { + // Disable parallel eval if the debugger is enabled, since + // they're incompatible at the moment. + warn("using the debugger disables multi-threaded evaluation"); + evalSettings.evalCores = 1; + } + evalState = std::allocate_shared( traceable_allocator(), lookupPath, getEvalStore(), fetchSettings, evalSettings, getStore()); diff --git a/src/libcmd/common-eval-args.cc b/src/libcmd/common-eval-args.cc index 30e76b2455d..865901febf4 100644 --- a/src/libcmd/common-eval-args.cc +++ b/src/libcmd/common-eval-args.cc @@ -19,17 +19,12 @@ namespace nix { -fetchers::Settings fetchSettings; - -static GlobalConfig::Register rFetchSettings(&fetchSettings); - EvalSettings evalSettings{ settings.readOnlyMode, { { "flake", [](EvalState & state, std::string_view rest) { - experimentalFeatureSettings.require(Xp::Flakes); // FIXME `parseFlakeRef` should take a `std::string_view`. auto flakeRef = parseFlakeRef(fetchSettings, std::string{rest}, {}, true, false); debug("fetching flake search path element '%s''", rest); @@ -186,7 +181,6 @@ SourcePath lookupFileArg(EvalState & state, std::string_view s, const std::files } else if (hasPrefix(s, "flake:")) { - experimentalFeatureSettings.require(Xp::Flakes); auto flakeRef = parseFlakeRef(fetchSettings, std::string(s.substr(6)), {}, true, false); auto [accessor, lockedRef] = flakeRef.resolve(fetchSettings, *state.store).lazyFetch(fetchSettings, *state.store); diff --git a/src/libcmd/include/nix/cmd/command.hh b/src/libcmd/include/nix/cmd/command.hh index d1b528e2477..c860f019260 100644 --- a/src/libcmd/include/nix/cmd/command.hh +++ b/src/libcmd/include/nix/cmd/command.hh @@ -214,6 +214,8 @@ struct InstallableCommand : virtual Args, SourceExprCommand { InstallableCommand(); + virtual void preRun(ref store); + virtual void run(ref store, ref installable) = 0; void run(ref store) override; diff --git a/src/libcmd/include/nix/cmd/common-eval-args.hh b/src/libcmd/include/nix/cmd/common-eval-args.hh index 67cb0714827..4f9ebb83df5 100644 --- a/src/libcmd/include/nix/cmd/common-eval-args.hh +++ b/src/libcmd/include/nix/cmd/common-eval-args.hh @@ -25,9 +25,6 @@ namespace flake { struct Settings; } -/** - * @todo Get rid of global settings variables - */ extern fetchers::Settings fetchSettings; /** diff --git a/src/libcmd/include/nix/cmd/installables.hh b/src/libcmd/include/nix/cmd/installables.hh index 530334e037b..2ea35261c7f 100644 --- a/src/libcmd/include/nix/cmd/installables.hh +++ b/src/libcmd/include/nix/cmd/installables.hh @@ -96,6 +96,22 @@ typedef std::vector DerivedPathsWithInfo; struct Installable; +struct InstallableWithBuildResult +{ + ref installable; + + using Success = BuiltPathWithResult; + + using Failure = BuildResult; // must be a `BuildResult::Failure` + + std::variant result; + + /** + * Throw an exception if this represents a failure, otherwise returns a `BuiltPathWithResult`. + */ + const BuiltPathWithResult & getSuccess() const; +}; + /** * Shorthand, for less typing and helping us keep the choice of * collection in sync. @@ -160,13 +176,15 @@ struct Installable const Installables & installables, BuildMode bMode = bmNormal); - static std::vector, BuiltPathWithResult>> build2( + static std::vector build2( ref evalStore, ref store, Realise mode, const Installables & installables, BuildMode bMode = bmNormal); + static void throwBuildErrors(std::vector & buildResults, const Store & store); + static std::set toStorePathSet( ref evalStore, ref store, Realise mode, OperateOn operateOn, const Installables & installables); diff --git a/src/libcmd/installable-attr-path.cc b/src/libcmd/installable-attr-path.cc index 28c3db3fc79..3a80aa384de 100644 --- a/src/libcmd/installable-attr-path.cc +++ b/src/libcmd/installable-attr-path.cc @@ -89,7 +89,8 @@ DerivedPathsWithInfo InstallableAttrPath::toDerivedPaths() } DerivedPathsWithInfo res; - for (auto & [drvPath, outputs] : byDrvPath) + for (auto & [drvPath, outputs] : byDrvPath) { + state->waitForPath(drvPath); res.push_back({ .path = DerivedPath::Built{ @@ -102,6 +103,7 @@ DerivedPathsWithInfo InstallableAttrPath::toDerivedPaths() so we can fill in this info. */ }), }); + } return res; } diff --git a/src/libcmd/installable-flake.cc b/src/libcmd/installable-flake.cc index 11bbdbf8429..70267a65c09 100644 --- a/src/libcmd/installable-flake.cc +++ b/src/libcmd/installable-flake.cc @@ -102,6 +102,7 @@ DerivedPathsWithInfo InstallableFlake::toDerivedPaths() } auto drvPath = attr->forceDerivation(); + state->waitForPath(drvPath); std::optional priority; diff --git a/src/libcmd/installable-value.cc b/src/libcmd/installable-value.cc index 3a167af3db4..ec53ee97c89 100644 --- a/src/libcmd/installable-value.cc +++ b/src/libcmd/installable-value.cc @@ -55,7 +55,7 @@ InstallableValue::trySinglePathToDerivedPaths(Value & v, const PosIdx pos, std:: else if (v.type() == nString) { return {{ - .path = DerivedPath::fromSingle(state->coerceToSingleDerivedPath(pos, v, errorCtx)), + .path = DerivedPath::fromSingle(state->devirtualize(state->coerceToSingleDerivedPath(pos, v, errorCtx))), .info = make_ref(), }}; } diff --git a/src/libcmd/installables.cc b/src/libcmd/installables.cc index 7e3861e2f1d..be64a429297 100644 --- a/src/libcmd/installables.cc +++ b/src/libcmd/installables.cc @@ -21,6 +21,7 @@ #include "nix/util/url.hh" #include "nix/fetchers/registry.hh" #include "nix/store/build-result.hh" +#include "nix/util/exit.hh" #include #include @@ -398,9 +399,6 @@ void completeFlakeRefWithFragment( void completeFlakeRef(AddCompletions & completions, ref store, std::string_view prefix) { - if (!experimentalFeatureSettings.isEnabled(Xp::Flakes)) - return; - if (prefix == "") completions.add("."); @@ -554,46 +552,69 @@ static SingleBuiltPath getBuiltPath(ref evalStore, ref store, cons b.raw()); } -std::vector Installable::build( - ref evalStore, ref store, Realise mode, const Installables & installables, BuildMode bMode) +const BuiltPathWithResult & InstallableWithBuildResult::getSuccess() const { - std::vector res; - for (auto & [_, builtPathWithResult] : build2(evalStore, store, mode, installables, bMode)) - res.push_back(builtPathWithResult); - return res; + if (auto * failure = std::get_if(&result)) { + auto failure2 = failure->tryGetFailure(); + assert(failure2); + failure2->rethrow(); + } else + return *std::get_if(&result); } -static void throwBuildErrors(std::vector & buildResults, const Store & store) +void Installable::throwBuildErrors(std::vector & buildResults, const Store & store) { - std::vector> failed; for (auto & buildResult : buildResults) { - if (auto * failure = buildResult.tryGetFailure()) { - failed.push_back({&buildResult, failure}); - } - } + if (std::get_if(&buildResult.result)) { + // Report success first. + for (auto & buildResult : buildResults) { + if (std::get_if(&buildResult.result)) + notice("✅ " ANSI_BOLD "%s" ANSI_NORMAL, buildResult.installable->what()); + } - auto failedResult = failed.begin(); - if (failedResult != failed.end()) { - if (failed.size() == 1) { - failedResult->second->rethrow(); - } else { - StringSet failedPaths; - for (; failedResult != failed.end(); failedResult++) { - if (!failedResult->second->errorMsg.empty()) { - logError( - ErrorInfo{ - .level = lvlError, - .msg = failedResult->second->errorMsg, - }); + // Then cancelled builds. + for (auto & buildResult : buildResults) { + if (auto failure = std::get_if(&buildResult.result)) { + if (failure->isCancelled()) + notice( + "❓ " ANSI_BOLD "%s" ANSI_NORMAL ANSI_FAINT " (cancelled)", + buildResult.installable->what()); + } + } + + // Then failures. + for (auto & buildResult : buildResults) { + if (auto failure = std::get_if(&buildResult.result)) { + if (failure->isCancelled()) + continue; + auto failure2 = failure->tryGetFailure(); + assert(failure2); + printError("❌ " ANSI_RED "%s" ANSI_NORMAL, buildResult.installable->what()); + try { + failure2->rethrow(); + } catch (Error & e) { + logError(e.info()); + } } - failedPaths.insert(failedResult->first->path.to_string(store)); } - throw Error("build of %s failed", concatStringsSep(", ", quoteStrings(failedPaths))); + + throw Exit(1); } } } -std::vector, BuiltPathWithResult>> Installable::build2( +std::vector Installable::build( + ref evalStore, ref store, Realise mode, const Installables & installables, BuildMode bMode) +{ + auto results = build2(evalStore, store, mode, installables, bMode); + throwBuildErrors(results, *store); + std::vector res; + for (auto & b : results) + res.push_back(b.getSuccess()); + return res; +} + +std::vector Installable::build2( ref evalStore, ref store, Realise mode, const Installables & installables, BuildMode bMode) { if (mode == Realise::Nothing) @@ -615,7 +636,7 @@ std::vector, BuiltPathWithResult>> Installable::build } } - std::vector, BuiltPathWithResult>> res; + std::vector res; switch (mode) { @@ -630,17 +651,21 @@ std::vector, BuiltPathWithResult>> Installable::build [&](const DerivedPath::Built & bfd) { auto outputs = resolveDerivedPath(*store, bfd, &*evalStore); res.push_back( - {aux.installable, - {.path = - BuiltPath::Built{ - .drvPath = - make_ref(getBuiltPath(evalStore, store, *bfd.drvPath)), - .outputs = outputs, - }, - .info = aux.info}}); + {.installable = aux.installable, + .result = InstallableWithBuildResult::Success{ + .path = + BuiltPath::Built{ + .drvPath = make_ref( + getBuiltPath(evalStore, store, *bfd.drvPath)), + .outputs = outputs, + }, + .info = aux.info}}); }, [&](const DerivedPath::Opaque & bo) { - res.push_back({aux.installable, {.path = BuiltPath::Opaque{bo.path}, .info = aux.info}}); + res.push_back( + {.installable = aux.installable, + .result = InstallableWithBuildResult::Success{ + .path = BuiltPath::Opaque{bo.path}, .info = aux.info}}); }, }, path.raw()); @@ -654,9 +679,13 @@ std::vector, BuiltPathWithResult>> Installable::build printMissing(store, pathsToBuild, lvlInfo); auto buildResults = store->buildPathsWithResults(pathsToBuild, bMode, evalStore); - throwBuildErrors(buildResults, *store); for (auto & buildResult : buildResults) { - // If we didn't throw, they must all be sucesses + if (buildResult.tryGetFailure()) { + for (auto & aux : backmap[buildResult.path]) { + res.push_back({.installable = aux.installable, .result = buildResult}); + } + continue; + } auto & success = std::get(buildResult.inner); for (auto & aux : backmap[buildResult.path]) { std::visit( @@ -666,20 +695,22 @@ std::vector, BuiltPathWithResult>> Installable::build for (auto & [outputName, realisation] : success.builtOutputs) outputs.emplace(outputName, realisation.outPath); res.push_back( - {aux.installable, - {.path = - BuiltPath::Built{ - .drvPath = - make_ref(getBuiltPath(evalStore, store, *bfd.drvPath)), - .outputs = outputs, - }, - .info = aux.info, - .result = buildResult}}); + {.installable = aux.installable, + .result = InstallableWithBuildResult::Success{ + .path = + BuiltPath::Built{ + .drvPath = make_ref( + getBuiltPath(evalStore, store, *bfd.drvPath)), + .outputs = outputs, + }, + .info = aux.info, + .result = buildResult}}); }, [&](const DerivedPath::Opaque & bo) { res.push_back( - {aux.installable, - {.path = BuiltPath::Opaque{bo.path}, .info = aux.info, .result = buildResult}}); + {.installable = aux.installable, + .result = InstallableWithBuildResult::Success{ + .path = BuiltPath::Opaque{bo.path}, .info = aux.info, .result = buildResult}}); }, }, buildResult.path.raw()); @@ -840,8 +871,11 @@ InstallableCommand::InstallableCommand() }); } +void InstallableCommand::preRun(ref store) {} + void InstallableCommand::run(ref store) { + preRun(store); auto installable = parseInstallable(store, _installable); run(store, std::move(installable)); } diff --git a/src/libcmd/package.nix b/src/libcmd/package.nix index c382f0e5760..21d7586a321 100644 --- a/src/libcmd/package.nix +++ b/src/libcmd/package.nix @@ -35,7 +35,7 @@ let in mkMesonLibrary (finalAttrs: { - pname = "nix-cmd"; + pname = "determinate-nix-cmd"; inherit version; workDir = ./.; diff --git a/src/libcmd/repl.cc b/src/libcmd/repl.cc index 8fbb54dd30d..d8e61b5b520 100644 --- a/src/libcmd/repl.cc +++ b/src/libcmd/repl.cc @@ -177,7 +177,7 @@ ReplExitStatus NixRepl::mainLoop() if (state->debugRepl) { debuggerNotice = " debugger"; } - notice("Nix %1%%2%\nType :? for help.", nixVersion, debuggerNotice); + notice("Nix %s\nType :? for help.", version(), debuggerNotice); } isFirstRepl = false; @@ -332,6 +332,7 @@ StorePath NixRepl::getDerivationPath(Value & v) auto drvPath = packageInfo->queryDrvPath(); if (!drvPath) throw Error("expression did not evaluate to a valid derivation (no 'drvPath' attribute)"); + state->waitForPath(*drvPath); if (!state->store->isValidPath(*drvPath)) throw Error("expression evaluated to invalid derivation '%s'", state->store->printStorePath(*drvPath)); return *drvPath; diff --git a/src/libexpr-c/nix_api_expr.cc b/src/libexpr-c/nix_api_expr.cc index 0dd9fa0a51d..bfbd0a9c361 100644 --- a/src/libexpr-c/nix_api_expr.cc +++ b/src/libexpr-c/nix_api_expr.cc @@ -71,6 +71,7 @@ nix_err nix_expr_eval_from_string( nix::Expr * parsedExpr = state->state.parseExprFromString(expr, state->state.rootPath(nix::CanonPath(path))); state->state.eval(parsedExpr, *value->value); state->state.forceValue(*value->value, nix::noPos); + state->state.waitForAllPaths(); } NIXC_CATCH_ERRS } @@ -82,6 +83,7 @@ nix_err nix_value_call(nix_c_context * context, EvalState * state, Value * fn, n try { state->state.callFunction(*fn->value, *arg->value, *value->value, nix::noPos); state->state.forceValue(*value->value, nix::noPos); + state->state.waitForAllPaths(); } NIXC_CATCH_ERRS } @@ -100,6 +102,7 @@ nix_err nix_value_call_multi( try { state->state.callFunction(*fn->value, {internal_args.data(), nargs}, *value->value, nix::noPos); state->state.forceValue(*value->value, nix::noPos); + state->state.waitForAllPaths(); } NIXC_CATCH_ERRS } @@ -110,6 +113,7 @@ nix_err nix_value_force(nix_c_context * context, EvalState * state, nix_value * context->last_err_code = NIX_OK; try { state->state.forceValue(*value->value, nix::noPos); + state->state.waitForAllPaths(); } NIXC_CATCH_ERRS } @@ -120,6 +124,7 @@ nix_err nix_value_force_deep(nix_c_context * context, EvalState * state, nix_val context->last_err_code = NIX_OK; try { state->state.forceValueDeep(*value->value); + state->state.waitForAllPaths(); } NIXC_CATCH_ERRS } diff --git a/src/libexpr-c/nix_api_value.cc b/src/libexpr-c/nix_api_value.cc index 7fd8233adec..b6a838284ef 100644 --- a/src/libexpr-c/nix_api_value.cc +++ b/src/libexpr-c/nix_api_value.cc @@ -194,6 +194,8 @@ ValueType nix_get_type(nix_c_context * context, const nix_value * value) switch (v.type()) { case nThunk: return NIX_TYPE_THUNK; + case nFailed: + return NIX_TYPE_FAILED; case nInt: return NIX_TYPE_INT; case nFloat: @@ -386,6 +388,7 @@ nix_value * nix_get_attr_byname(nix_c_context * context, const nix_value * value auto attr = v.attrs()->get(s); if (attr) { state->state.forceValue(*attr->value, nix::noPos); + state->state.waitForAllPaths(); return new_nix_value(attr->value, state->state.mem); } nix_set_err_msg(context, NIX_ERR_KEY, "missing attribute"); diff --git a/src/libexpr-c/nix_api_value.h b/src/libexpr-c/nix_api_value.h index 5bd45da9059..a01bfb28059 100644 --- a/src/libexpr-c/nix_api_value.h +++ b/src/libexpr-c/nix_api_value.h @@ -100,7 +100,10 @@ typedef enum { /** @brief External value from C++ plugins or C API * @see Externals */ - NIX_TYPE_EXTERNAL + NIX_TYPE_EXTERNAL, + /** @brief Failed value. Contains an exception that can be rethrown. + */ + NIX_TYPE_FAILED, } ValueType; // forward declarations diff --git a/src/libexpr-c/package.nix b/src/libexpr-c/package.nix index 694fbc1fe78..ec92ecce105 100644 --- a/src/libexpr-c/package.nix +++ b/src/libexpr-c/package.nix @@ -15,7 +15,7 @@ let in mkMesonLibrary (finalAttrs: { - pname = "nix-expr-c"; + pname = "determinate-nix-expr-c"; inherit version; workDir = ./.; diff --git a/src/libexpr-test-support/include/nix/expr/tests/value/context.hh b/src/libexpr-test-support/include/nix/expr/tests/value/context.hh index 68a0b8dea7d..2311f3941c1 100644 --- a/src/libexpr-test-support/include/nix/expr/tests/value/context.hh +++ b/src/libexpr-test-support/include/nix/expr/tests/value/context.hh @@ -26,6 +26,12 @@ struct Arbitrary static Gen arbitrary(); }; +template<> +struct Arbitrary +{ + static Gen arbitrary(); +}; + template<> struct Arbitrary { diff --git a/src/libexpr-test-support/package.nix b/src/libexpr-test-support/package.nix index 5cb4adaa8c4..1879a571608 100644 --- a/src/libexpr-test-support/package.nix +++ b/src/libexpr-test-support/package.nix @@ -18,7 +18,7 @@ let in mkMesonLibrary (finalAttrs: { - pname = "nix-util-test-support"; + pname = "determinate-nix-util-test-support"; inherit version; workDir = ./.; diff --git a/src/libexpr-test-support/tests/value/context.cc b/src/libexpr-test-support/tests/value/context.cc index d6036601a94..8ce84fb51f5 100644 --- a/src/libexpr-test-support/tests/value/context.cc +++ b/src/libexpr-test-support/tests/value/context.cc @@ -16,6 +16,15 @@ Gen Arbitrary::arb }); } +Gen Arbitrary::arbitrary() +{ + return gen::map(gen::arbitrary(), [](StorePath storePath) { + return NixStringContextElem::Path{ + .storePath = storePath, + }; + }); +} + Gen Arbitrary::arbitrary() { return gen::mapcat( @@ -31,6 +40,8 @@ Gen Arbitrary::arbitrary() case 2: return gen::map( gen::arbitrary(), [](NixStringContextElem a) { return a; }); + case 3: + return gen::map(gen::arbitrary(), [](NixStringContextElem a) { return a; }); default: assert(false); } diff --git a/src/libexpr-tests/value/value.cc b/src/libexpr-tests/value/value.cc index 420db0f31b1..bd8f0da7121 100644 --- a/src/libexpr-tests/value/value.cc +++ b/src/libexpr-tests/value/value.cc @@ -13,7 +13,6 @@ TEST_F(ValueTest, unsetValue) { Value unsetValue; ASSERT_EQ(false, unsetValue.isValid()); - ASSERT_EQ(nThunk, unsetValue.type(true)); ASSERT_DEATH(unsetValue.type(), ""); } diff --git a/src/libexpr/eval-cache.cc b/src/libexpr/eval-cache.cc index 43f10da6eac..0f892f9c320 100644 --- a/src/libexpr/eval-cache.cc +++ b/src/libexpr/eval-cache.cc @@ -554,16 +554,17 @@ string_t AttrCursor::getStringWithContext() if (auto s = std::get_if(&cachedValue->second)) { bool valid = true; for (auto & c : s->second) { - const StorePath & path = std::visit( + const StorePath * path = std::visit( overloaded{ - [&](const NixStringContextElem::DrvDeep & d) -> const StorePath & { return d.drvPath; }, - [&](const NixStringContextElem::Built & b) -> const StorePath & { - return b.drvPath->getBaseStorePath(); + [&](const NixStringContextElem::DrvDeep & d) -> const StorePath * { return &d.drvPath; }, + [&](const NixStringContextElem::Built & b) -> const StorePath * { + return &b.drvPath->getBaseStorePath(); }, - [&](const NixStringContextElem::Opaque & o) -> const StorePath & { return o.path; }, + [&](const NixStringContextElem::Opaque & o) -> const StorePath * { return &o.path; }, + [&](const NixStringContextElem::Path & p) -> const StorePath * { return nullptr; }, }, c.raw); - if (!root->state.store->isValidPath(path)) { + if (!path || !root->state.store->isValidPath(*path)) { valid = false; break; } @@ -711,6 +712,7 @@ StorePath AttrCursor::forceDerivation() /* The eval cache contains 'drvPath', but the actual path has been garbage-collected. So force it to be regenerated. */ aDrvPath->forceValue(); + root->state.waitForPath(drvPath); if (!root->state.store->isValidPath(drvPath)) throw Error( "don't know how to recreate store derivation '%s'!", root->state.store->printStorePath(drvPath)); diff --git a/src/libexpr/eval-gc.cc b/src/libexpr/eval-gc.cc index 0d25f38f64d..c1e974e053b 100644 --- a/src/libexpr/eval-gc.cc +++ b/src/libexpr/eval-gc.cc @@ -46,6 +46,88 @@ static void * oomHandler(size_t requested) throw std::bad_alloc(); } +static size_t getFreeMem() +{ + /* On Linux, use the `MemAvailable` or `MemFree` fields from + /proc/cpuinfo. */ +# ifdef __linux__ + { + std::unordered_map fields; + for (auto & line : + tokenizeString>(readFile(std::filesystem::path("/proc/meminfo")), "\n")) { + auto colon = line.find(':'); + if (colon == line.npos) + continue; + fields.emplace(line.substr(0, colon), trim(line.substr(colon + 1))); + } + + auto i = fields.find("MemAvailable"); + if (i == fields.end()) + i = fields.find("MemFree"); + if (i != fields.end()) { + auto kb = tokenizeString>(i->second, " "); + if (kb.size() == 2 && kb[1] == "kB") + return string2Int(kb[0]).value_or(0) * 1024; + } + } +# endif + + /* On non-Linux systems, conservatively assume that 25% of memory is free. */ + long pageSize = sysconf(_SC_PAGESIZE); + long pages = sysconf(_SC_PHYS_PAGES); + if (pageSize > 0 && pages > 0) + return (static_cast(pageSize) * static_cast(pages)) / 4; + return 0; +} + +/** + * When a thread goes into a coroutine, we lose its original sp until + * control flow returns to the thread. This causes Boehm GC to crash + * since it will scan memory between the coroutine's sp and the + * original stack base of the thread. Therefore, we detect when the + * current sp is outside of the original thread stack and push the + * entire thread stack instead, as an approximation. + * + * This is not optimal, because it causes the stack below sp to be + * scanned. However, we usually we don't have active coroutines during + * evaluation, so this is acceptable. + * + * Note that we don't scan coroutine stacks. It's currently assumed + * that we don't have GC roots in coroutines. + */ +void fixupBoehmStackPointer(void ** sp_ptr, void * _pthread_id) +{ + void *& sp = *sp_ptr; + auto pthread_id = reinterpret_cast(_pthread_id); + size_t osStackSize; + char * osStackHi; + char * osStackLo; + +# ifdef __APPLE__ + osStackSize = pthread_get_stacksize_np(pthread_id); + osStackHi = (char *) pthread_get_stackaddr_np(pthread_id); + osStackLo = osStackHi - osStackSize; +# else + pthread_attr_t pattr; + if (pthread_attr_init(&pattr)) + throw Error("fixupBoehmStackPointer: pthread_attr_init failed"); +# ifdef HAVE_PTHREAD_GETATTR_NP + if (pthread_getattr_np(pthread_id, &pattr)) + throw Error("fixupBoehmStackPointer: pthread_getattr_np failed"); +# else +# error "Need `pthread_attr_get_np`" +# endif + if (pthread_attr_getstack(&pattr, (void **) &osStackLo, &osStackSize)) + throw Error("fixupBoehmStackPointer: pthread_attr_getstack failed"); + if (pthread_attr_destroy(&pattr)) + throw Error("fixupBoehmStackPointer: pthread_attr_destroy failed"); + osStackHi = osStackLo + osStackSize; +# endif + + if (sp >= osStackHi || sp < osStackLo) // sp is outside the os stack + sp = osStackLo; +} + static inline void initGCReal() { /* Initialise the Boehm garbage collector. */ @@ -76,8 +158,11 @@ static inline void initGCReal() GC_set_oom_fn(oomHandler); - /* Set the initial heap size to something fairly big (25% of - physical RAM, up to a maximum of 384 MiB) so that in most cases + GC_set_sp_corrector(&fixupBoehmStackPointer); + assert(GC_get_sp_corrector()); + + /* Set the initial heap size to something fairly big (80% of + free RAM, up to a maximum of 4 GiB) so that in most cases we don't need to garbage collect at all. (Collection has a fairly significant overhead.) The heap size can be overridden through libgc's GC_INITIAL_HEAP_SIZE environment variable. We @@ -88,15 +173,10 @@ static inline void initGCReal() if (!getEnv("GC_INITIAL_HEAP_SIZE")) { size_t size = 32 * 1024 * 1024; # if HAVE_SYSCONF && defined(_SC_PAGESIZE) && defined(_SC_PHYS_PAGES) - size_t maxSize = 384 * 1024 * 1024; - long pageSize = sysconf(_SC_PAGESIZE); - long pages = sysconf(_SC_PHYS_PAGES); - if (pageSize != -1) - size = (pageSize * pages) / 4; // 25% of RAM - if (size > maxSize) - size = maxSize; + size_t maxSize = 4ULL * 1024 * 1024 * 1024; + auto free = getFreeMem(); + size = std::max(size, std::min((size_t) (free * 0.5), maxSize)); # endif - debug("setting initial heap size to %1% bytes", size); GC_expand_hp(size); } } diff --git a/src/libexpr/eval-settings.cc b/src/libexpr/eval-settings.cc index 04c6193885e..27205864b8b 100644 --- a/src/libexpr/eval-settings.cc +++ b/src/libexpr/eval-settings.cc @@ -91,9 +91,19 @@ bool EvalSettings::isPseudoUrl(std::string_view s) std::string EvalSettings::resolvePseudoUrl(std::string_view url) { - if (hasPrefix(url, "channel:")) - return "https://channels.nixos.org/" + std::string(url.substr(8)) + "/nixexprs.tar.xz"; - else + if (hasPrefix(url, "channel:")) { + auto realUrl = "https://channels.nixos.org/" + std::string(url.substr(8)) + "/nixexprs.tar.xz"; + static bool haveWarned = false; + warnOnce( + haveWarned, + "Channels are deprecated in favor of flakes in Determinate Nix. " + "Instead of '%s', use '%s'. " + "See https://zero-to-nix.com for a guide to Nix flakes. " + "For details and to offer feedback on the deprecation process, see: https://github.com/DeterminateSystems/nix-src/issues/34.", + url, + realUrl); + return realUrl; + } else return std::string(url); } diff --git a/src/libexpr/eval.cc b/src/libexpr/eval.cc index ab3f7b3ff5d..46393b79c5e 100644 --- a/src/libexpr/eval.cc +++ b/src/libexpr/eval.cc @@ -25,6 +25,8 @@ #include "nix/fetchers/tarball.hh" #include "nix/fetchers/input-cache.hh" #include "nix/util/current-process.hh" +#include "nix/store/async-path-writer.hh" +#include "nix/expr/parallel-eval.hh" #include "parser-tab.hh" @@ -44,6 +46,11 @@ #include #include #include +#include + +#ifndef _WIN32 // TODO use portable implementation +# include +#endif #include "nix/util/strings-inline.hh" @@ -155,6 +162,8 @@ std::string_view showType(ValueType type, bool withArticle) return WA("a", "float"); case nThunk: return WA("a", "thunk"); + case nFailed: + return WA("a", "failure"); } unreachable(); } @@ -193,20 +202,36 @@ PosIdx Value::determinePos(const PosIdx pos) const return attrs()->pos; case tLambda: return lambda().fun->pos; +#if 0 + // FIXME: disabled because reading from an app is racy. case tApp: return app().left->determinePos(pos); +#endif default: return pos; } #pragma GCC diagnostic pop } -bool Value::isTrivial() const +template<> +bool ValueStorage::isTrivial() const { - return !isa() - && (!isa() - || (dynamic_cast(thunk().expr) && ((ExprAttrs *) thunk().expr)->dynamicAttrs->empty()) - || dynamic_cast(thunk().expr) || dynamic_cast(thunk().expr)); + auto p1_ = p1; // must acquire before reading p0, since thunks can change + auto p0_ = p0.load(std::memory_order_acquire); + + auto pd = static_cast(p0_ & discriminatorMask); + + if (pd == pdThunk || pd == pdPending || pd == pdAwaited) { + bool isApp = p1_ & discriminatorMask; + if (isApp) + return false; + auto expr = untagPointer(p1_); + return (dynamic_cast(expr) && ((ExprAttrs *) expr)->dynamicAttrs->empty()) + || dynamic_cast(expr) || dynamic_cast(expr); + } + + else + return true; } static Symbol getName(const AttrName & name, EvalState & state, Env & env) @@ -300,6 +325,7 @@ EvalState::EvalState( , debugRepl(nullptr) , debugStop(false) , trylevel(0) + , asyncPathWriter(AsyncPathWriter::make(store)) , srcToStore(make_ref()) , importResolutionCache(make_ref()) , fileEvalCache(make_ref()) @@ -311,6 +337,7 @@ EvalState::EvalState( , baseEnv(mem.allocEnv(BASE_ENV_SIZE)) #endif , staticBaseEnv{std::make_shared(nullptr, nullptr)} + , executor{make_ref(settings)} { corepkgsFS->setPathDisplay(""); internalFS->setPathDisplay("«nix-internal»", ""); @@ -451,7 +478,8 @@ void EvalState::checkURI(const std::string & uri) Value * EvalState::addConstant(const std::string & name, Value & v, Constant info) { Value * v2 = allocValue(); - *v2 = v; + // Do a raw copy since `operator =` barfs on thunks. + memcpy((char *) v2, (char *) &v, sizeof(Value)); addConstant(name, v2, info); return v2; } @@ -467,8 +495,10 @@ void EvalState::addConstant(const std::string & name, Value * v, Constant info) We might know the type of a thunk in advance, so be allowed to just write it down in that case. */ - if (auto gotType = v->type(true); gotType != nThunk) - assert(info.type == gotType); + if (v->isFinished()) { + if (auto gotType = v->type(); gotType != nThunk) + assert(info.type == gotType); + } /* Install value the base environment. */ staticBaseEnv->vars.emplace_back(symbols.create(name), baseEnvDispl); @@ -654,7 +684,7 @@ void printStaticEnvBindings(const SymbolTable & st, const StaticEnv & se) // just for the current level of Env, not the whole chain. void printWithBindings(const SymbolTable & st, const Env & env) { - if (!env.values[0]->isThunk()) { + if (env.values[0]->isFinished()) { std::cout << "with: "; std::cout << ANSI_MAGENTA; auto j = env.values[0]->attrs()->begin(); @@ -709,7 +739,7 @@ void mapStaticEnvBindings(const SymbolTable & st, const StaticEnv & se, const En if (env.up && se.up) { mapStaticEnvBindings(st, *se.up, *env.up, vm); - if (se.isWith && !env.values[0]->isThunk()) { + if (se.isWith && env.values[0]->isFinished()) { // add 'with' bindings. for (auto & j : *env.values[0]->attrs()) vm.insert_or_assign(std::string(st[j.name]), j.value); @@ -943,7 +973,14 @@ void EvalState::mkPos(Value & v, PosIdx p) auto origin = positions.originOf(p); if (auto path = std::get_if(&origin)) { auto attrs = buildBindings(3); - attrs.alloc(s.file).mkString(path->path.abs(), mem); + if (path->accessor == rootFS && store->isInStore(path->path.abs())) + // FIXME: only do this for virtual store paths? + attrs.alloc(s.file).mkString( + path->path.abs(), + {NixStringContextElem::Path{.storePath = store->toStorePath(path->path.abs()).first}}, + mem); + else + attrs.alloc(s.file).mkString(path->path.abs(), mem); makePositionThunks(*this, p, attrs.alloc(s.line), attrs.alloc(s.column)); v.mkAttrs(attrs); } else @@ -991,6 +1028,7 @@ std::string EvalState::mkSingleDerivedPathStringRaw(const SingleDerivedPath & p) auto optStaticOutputPath = std::visit( overloaded{ [&](const SingleDerivedPath::Opaque & o) { + waitForPath(o.path); auto drv = store->readDerivation(o.path); auto i = drv.outputs.find(b.output); if (i == drv.outputs.end()) @@ -1066,10 +1104,9 @@ Value * ExprPath::maybeThunk(EvalState & state, Env & env) * from a thunk, ensuring that every file is parsed/evaluated only * once (via the thunk stored in `EvalState::fileEvalCache`). */ -struct ExprParseFile : Expr, gc +struct ExprParseFile : Expr { - // FIXME: make this a reference (see below). - SourcePath path; + SourcePath & path; bool mustBeTrivial; ExprParseFile(SourcePath & path, bool mustBeTrivial) @@ -1120,18 +1157,14 @@ void EvalState::evalFile(const SourcePath & path, Value & v, bool mustBeTrivial) } Value * vExpr; - // FIXME: put ExprParseFile on the stack instead of the heap once - // https://github.com/NixOS/nix/pull/13930 is merged. That will ensure - // the post-condition that `expr` is unreachable after - // `forceValue()` returns. - auto expr = new ExprParseFile{*resolvedPath, mustBeTrivial}; + ExprParseFile expr{*resolvedPath, mustBeTrivial}; fileEvalCache->try_emplace_and_cvisit( *resolvedPath, nullptr, [&](auto & i) { vExpr = allocValue(); - vExpr->mkThunk(&baseEnv, expr); + vExpr->mkThunk(&baseEnv, &expr); i.second = vExpr; }, [&](auto & i) { vExpr = i.second; }); @@ -1437,7 +1470,7 @@ void ExprSelect::eval(EvalState & state, Env & env, Value & v) state.attrSelects[pos2]++; } - state.forceValue(*vAttrs, (pos2 ? pos2 : this->pos)); + state.forceValue(*vAttrs, pos2 ? pos2 : this->pos); } catch (Error & e) { if (pos2) { @@ -1496,6 +1529,8 @@ void ExprLambda::eval(EvalState & state, Env & env, Value & v) v.mkLambda(&env, this); } +thread_local size_t EvalState::callDepth = 0; + void EvalState::callFunction(Value & fun, std::span args, Value & vRes, const PosIdx pos) { auto _level = addCallDepth(pos); @@ -1511,15 +1546,16 @@ void EvalState::callFunction(Value & fun, std::span args, Value & vRes, forceValue(fun, pos); - Value vCur(fun); + Value vCur = fun; auto makeAppChain = [&]() { - vRes = vCur; for (auto arg : args) { auto fun2 = allocValue(); - *fun2 = vRes; - vRes.mkPrimOpApp(fun2, arg); + *fun2 = vCur; + vCur.reset(); + vCur.mkPrimOpApp(fun2, arg); } + vRes = vCur; }; const Attr * functor; @@ -1615,6 +1651,7 @@ void EvalState::callFunction(Value & fun, std::span args, Value & vRes, lambda.name ? concatStrings("'", symbols[lambda.name], "'") : "anonymous lambda") : nullptr; + vCur.reset(); lambda.body->eval(*this, env2, vCur); } catch (Error & e) { if (loggerSettings.showTrace.get()) { @@ -1649,7 +1686,9 @@ void EvalState::callFunction(Value & fun, std::span args, Value & vRes, primOpCalls[fn->name]++; try { - fn->fun(*this, vCur.determinePos(noPos), args.data(), vCur); + auto pos = vCur.determinePos(noPos); + vCur.reset(); + fn->fun(*this, pos, args.data(), vCur); } catch (Error & e) { if (fn->addTrace) addErrorTrace(e, pos, "while calling the '%1%' builtin", fn->name); @@ -1671,6 +1710,7 @@ void EvalState::callFunction(Value & fun, std::span args, Value & vRes, assert(primOp->isPrimOp()); auto arity = primOp->primOp()->arity; auto argsLeft = arity - argsDone; + assert(argsLeft); if (args.size() < argsLeft) { /* We still don't have enough arguments, so extend the tPrimOpApp chain. */ @@ -1699,7 +1739,9 @@ void EvalState::callFunction(Value & fun, std::span args, Value & vRes, // 2. Create a fake env (arg1, arg2, etc.) and a fake expr (arg1: arg2: etc: builtins.name arg1 arg2 // etc) // so the debugger allows to inspect the wrong parameters passed to the builtin. - fn->fun(*this, vCur.determinePos(noPos), vArgs, vCur); + auto pos = vCur.determinePos(noPos); + vCur.reset(); + fn->fun(*this, pos, vArgs, vCur); } catch (Error & e) { if (fn->addTrace) addErrorTrace(e, pos, "while calling the '%1%' builtin", fn->name); @@ -1716,6 +1758,7 @@ void EvalState::callFunction(Value & fun, std::span args, Value & vRes, heap-allocate a copy and use that instead. */ Value * args2[] = {allocValue(), args[0]}; *args2[0] = vCur; + vCur.reset(); try { callFunction(*functor->value, args2, vCur, functor->pos); } catch (Error & e) { @@ -1903,8 +1946,12 @@ void ExprOpImpl::eval(EvalState & state, Env & env, Value & v) || state.evalBool(env, e2, pos, "in the right operand of the IMPL (->) operator")); } -void ExprOpUpdate::eval(EvalState & state, Value & v, Value & v1, Value & v2) +void ExprOpUpdate::eval(EvalState & state, Env & env, Value & v) { + Value v1, v2; + state.evalAttrs(env, e1, v1, pos, "in the left operand of the update (//) operator"); + state.evalAttrs(env, e2, v2, pos, "in the right operand of the update (//) operator"); + state.nrOpUpdates++; const Bindings & bindings1 = *v1.attrs(); @@ -1978,38 +2025,6 @@ void ExprOpUpdate::eval(EvalState & state, Value & v, Value & v1, Value & v2) state.nrOpUpdateValuesCopied += v.attrs()->size(); } -void ExprOpUpdate::eval(EvalState & state, Env & env, Value & v) -{ - UpdateQueue q; - evalForUpdate(state, env, q); - - v.mkAttrs(&Bindings::emptyBindings); - for (auto & rhs : std::views::reverse(q)) { - /* Remember that queue is sorted rightmost attrset first. */ - eval(state, /*v=*/v, /*v1=*/v, /*v2=*/rhs); - } -} - -void Expr::evalForUpdate(EvalState & state, Env & env, UpdateQueue & q, std::string_view errorCtx) -{ - Value v; - state.evalAttrs(env, this, v, getPos(), errorCtx); - q.push_back(v); -} - -void ExprOpUpdate::evalForUpdate(EvalState & state, Env & env, UpdateQueue & q) -{ - /* Output rightmost attrset first to the merge queue as the one - with the most priority. */ - e2->evalForUpdate(state, env, q, "in the right operand of the update (//) operator"); - e1->evalForUpdate(state, env, q, "in the left operand of the update (//) operator"); -} - -void ExprOpUpdate::evalForUpdate(EvalState & state, Env & env, UpdateQueue & q, std::string_view errorCtx) -{ - evalForUpdate(state, env, q); -} - void ExprOpConcatLists::eval(EvalState & state, Env & env, Value & v) { Value v1; @@ -2129,7 +2144,7 @@ void ExprConcatStrings::eval(EvalState & state, Env & env, Value & v) } else if (firstType == nFloat) { v.mkFloat(nf); } else if (firstType == nPath) { - if (!context.empty()) + if (hasContext(context)) state.error("a string that refers to a store path cannot be appended to a path") .atPos(pos) .withFrame(env, *this) @@ -2157,16 +2172,6 @@ void ExprPos::eval(EvalState & state, Env & env, Value & v) state.mkPos(v, pos); } -void ExprBlackHole::eval(EvalState & state, [[maybe_unused]] Env & env, Value & v) -{ - throwInfiniteRecursionError(state, v); -} - -[[gnu::noinline]] [[noreturn]] void ExprBlackHole::throwInfiniteRecursionError(EvalState & state, Value & v) -{ - state.error("infinite recursion encountered").atPos(v.determinePos(noPos)).debugThrow(); -} - // always force this to be separate, otherwise forceValue may inline it and take // a massive perf hit [[gnu::noinline]] @@ -2199,6 +2204,7 @@ void EvalState::forceValueDeep(Value & v) for (auto & i : *v.attrs()) try { // If the value is a thunk, we're evaling. Otherwise no trace necessary. + // FIXME: race, thunk might be updated by another thread auto dts = state.debugRepl && i.value->isThunk() ? makeDebugTraceStacker( state, *i.value->thunk().expr, @@ -2351,12 +2357,15 @@ std::string_view EvalState::forceStringNoCtx(Value & v, const PosIdx pos, std::s { auto s = forceString(v, pos, errorCtx); if (v.context()) { - error( - "the string '%1%' is not allowed to refer to a store path (such as '%2%')", - v.string_view(), - (*v.context()->begin())->view()) - .withTrace(pos, errorCtx) - .debugThrow(); + NixStringContext context; + copyContext(v, context); + if (hasContext(context)) + error( + "the string '%1%' is not allowed to refer to a store path (such as '%2%')", + v.string_view(), + (*v.context()->begin())->view()) + .withTrace(pos, errorCtx) + .debugThrow(); } return s; } @@ -2411,14 +2420,21 @@ BackedStringView EvalState::coerceToString( } if (v.type() == nPath) { + // FIXME: instead of copying the path to the store, we could + // return a virtual store path that lazily copies the path to + // the store in devirtualize(). if (!canonicalizePath && !copyToStore) { // FIXME: hack to preserve path literals that end in a // slash, as in /foo/${x}. return v.pathStrView(); } else if (copyToStore) { - return store->printStorePath(copyPathToStore(context, v.path())); + return store->printStorePath(copyPathToStore(context, v.path(), v.determinePos(pos))); } else { - return std::string{v.path().path.abs()}; + auto path = v.path(); + if (path.accessor == rootFS && store->isInStore(path.path.abs())) { + context.insert(NixStringContextElem::Path{.storePath = store->toStorePath(path.path.abs()).first}); + } + return std::string(path.path.abs()); } } @@ -2490,7 +2506,7 @@ BackedStringView EvalState::coerceToString( .debugThrow(); } -StorePath EvalState::copyPathToStore(NixStringContext & context, const SourcePath & path) +StorePath EvalState::copyPathToStore(NixStringContext & context, const SourcePath & path, PosIdx pos) { if (nix::isDerivation(path.path.abs())) error("file names are not allowed to end in '%1%'", drvExtension).debugThrow(); @@ -2503,7 +2519,7 @@ StorePath EvalState::copyPathToStore(NixStringContext & context, const SourcePat *store, path.resolveSymlinks(SymlinkResolution::Ancestors), settings.readOnlyMode ? FetchMode::DryRun : FetchMode::Copy, - path.baseName(), + computeBaseName(path, pos), ContentAddressMethod::Raw::NixArchive, nullptr, repair); @@ -2555,7 +2571,9 @@ EvalState::coerceToStorePath(const PosIdx pos, Value & v, NixStringContext & con auto path = coerceToString(pos, v, context, errorCtx, false, false, true).toOwned(); if (auto storePath = store->maybeParseStorePath(path)) return *storePath; - error("path '%1%' is not in the Nix store", path).withTrace(pos, errorCtx).debugThrow(); + error("cannot coerce '%s' to a store path because it is not a subpath of the Nix store", path) + .withTrace(pos, errorCtx) + .debugThrow(); } std::pair EvalState::coerceToSingleDerivedPathUnchecked( @@ -2579,6 +2597,9 @@ std::pair EvalState::coerceToSingleDerivedP .debugThrow(); }, [&](NixStringContextElem::Built && b) -> SingleDerivedPath { return std::move(b); }, + [&](NixStringContextElem::Path && p) -> SingleDerivedPath { + error("string '%s' has no context", s).withTrace(pos, errorCtx).debugThrow(); + }, }, ((NixStringContextElem &&) *context.begin()).raw); return { @@ -2813,8 +2834,11 @@ void EvalState::assertEqValues(Value & v1, Value & v2, const PosIdx pos, std::st } return; - case nThunk: // Must not be left by forceValue - assert(false); + // Cannot be returned by forceValue(). + case nThunk: + case nFailed: + unreachable(); + default: // Note that we pass compiler flags that should make `default:` unreachable. // Also note that this probably ran after `eqValues`, which implements // the same logic more efficiently (without having to unwind stacks), @@ -2906,8 +2930,11 @@ bool EvalState::eqValues(Value & v1, Value & v2, const PosIdx pos, std::string_v // !!! return v1.fpoint() == v2.fpoint(); - case nThunk: // Must not be left by forceValue - assert(false); + // Cannot be returned by forceValue(). + case nThunk: + case nFailed: + unreachable(); + default: // Note that we pass compiler flags that should make `default:` unreachable. error("eqValues: cannot compare %1% with %2%", showType(v1), showType(v2)) .withTrace(pos, errorCtx) @@ -3013,6 +3040,11 @@ void EvalState::printStatistics() topObj["nrOpUpdates"] = nrOpUpdates.load(); topObj["nrOpUpdateValuesCopied"] = nrOpUpdateValuesCopied.load(); topObj["nrThunks"] = nrThunks.load(); + topObj["nrThunksAwaited"] = nrThunksAwaited.load(); + topObj["nrThunksAwaitedSlow"] = nrThunksAwaitedSlow.load(); + topObj["nrSpuriousWakeups"] = nrSpuriousWakeups.load(); + topObj["maxWaiting"] = maxWaiting.load(); + topObj["waitingTime"] = microsecondsWaiting / (double) 1000000; topObj["nrAvoided"] = nrAvoided.load(); topObj["nrLookups"] = nrLookups.load(); topObj["nrPrimOpCalls"] = nrPrimOpCalls.load(); @@ -3064,10 +3096,10 @@ void EvalState::printStatistics() } if (getEnv("NIX_SHOW_SYMBOLS").value_or("0") != "0") { + auto list = json::array(); + symbols.dump([&](std::string_view s) { list.emplace_back(std::string(s)); }); // XXX: overrides earlier assignment - topObj["symbols"] = json::array(); - auto & list = topObj["symbols"]; - symbols.dump([&](std::string_view s) { list.emplace_back(s); }); + topObj["symbols"] = std::move(list); } if (outPath == "-") { std::cerr << topObj.dump(2) << std::endl; @@ -3254,11 +3286,11 @@ Expr * EvalState::parse( const std::shared_ptr & staticEnv) { DocCommentMap tmpDocComments; // Only used when not origin is not a SourcePath - DocCommentMap * docComments = &tmpDocComments; + auto * docComments = &tmpDocComments; if (auto sourcePath = std::get_if(&origin)) { - auto [it, _] = positionToDocComment.try_emplace(*sourcePath); - docComments = &it->second; + auto [it, _] = positionToDocComment.lock()->try_emplace(*sourcePath, make_ref()); + docComments = &*it->second; } auto result = @@ -3276,12 +3308,14 @@ DocComment EvalState::getDocCommentForPos(PosIdx pos) if (!path) return {}; - auto table = positionToDocComment.find(*path); - if (table == positionToDocComment.end()) + auto positionToDocComment_ = positionToDocComment.readLock(); + + auto table = positionToDocComment_->find(*path); + if (table == positionToDocComment_->end()) return {}; - auto it = table->second.find(pos); - if (it == table->second.end()) + auto it = table->second->find(pos); + if (it == table->second->end()) return {}; return it->second; } @@ -3315,4 +3349,24 @@ void forceNoNullByte(std::string_view s, std::function pos) } } +void EvalState::waitForPath(const StorePath & path) +{ + asyncPathWriter->waitForPath(path); +} + +void EvalState::waitForPath(const SingleDerivedPath & path) +{ + std::visit( + overloaded{ + [&](const DerivedPathOpaque & p) { waitForPath(p.path); }, + [&](const SingleDerivedPathBuilt & p) { waitForPath(*p.drvPath); }, + }, + path.raw()); +} + +void EvalState::waitForAllPaths() +{ + asyncPathWriter->waitForAllPaths(); +} + } // namespace nix diff --git a/src/libexpr/include/nix/expr/eval-inline.hh b/src/libexpr/include/nix/expr/eval-inline.hh index e8aa380fdb0..35b54926157 100644 --- a/src/libexpr/include/nix/expr/eval-inline.hh +++ b/src/libexpr/include/nix/expr/eval-inline.hh @@ -33,6 +33,9 @@ Value * EvalMemory::allocValue() GC_malloc_many returns a linked list of objects of the given size, where the first word of each object is also the pointer to the next object in the list. This also means that we have to explicitly clear the first word of every object we take. */ + thread_local static std::shared_ptr valueAllocCache{ + std::allocate_shared(traceable_allocator(), nullptr)}; + if (!*valueAllocCache) { *valueAllocCache = GC_malloc_many(sizeof(Value)); if (!*valueAllocCache) @@ -63,6 +66,9 @@ Env & EvalMemory::allocEnv(size_t size) #if NIX_USE_BOEHMGC if (size == 1) { /* see allocValue for explanations. */ + thread_local static std::shared_ptr env1AllocCache{ + std::allocate_shared(traceable_allocator(), nullptr)}; + if (!*env1AllocCache) { *env1AllocCache = GC_malloc_many(sizeof(Env) + sizeof(Value *)); if (!*env1AllocCache) @@ -82,27 +88,68 @@ Env & EvalMemory::allocEnv(size_t size) return *env; } -[[gnu::always_inline]] -void EvalState::forceValue(Value & v, const PosIdx pos) +/** + * An identifier of the current thread for deadlock detection, stored + * in p0 of pending/awaited thunks. We're not using std::thread::id + * because it's not guaranteed to fit. + */ +extern thread_local uint32_t myEvalThreadId; + +template +void ValueStorage>>::force( + EvalState & state, PosIdx pos) { - if (v.isThunk()) { - Env * env = v.thunk().env; - assert(env || v.isBlackhole()); - Expr * expr = v.thunk().expr; + auto p0_ = p0.load(std::memory_order_acquire); + + auto pd = static_cast(p0_ & discriminatorMask); + + if (pd == pdThunk) { try { - v.mkBlackhole(); - // checkInterrupt(); - if (env) [[likely]] - expr->eval(*this, *env, v); - else - ExprBlackHole::throwInfiniteRecursionError(*this, v); + // The value we get here is only valid if we can set the + // thunk to pending. + auto p1_ = p1; + + // Atomically set the thunk to "pending". + if (!p0.compare_exchange_strong( + p0_, + pdPending | (myEvalThreadId << discriminatorBits), + std::memory_order_acquire, + std::memory_order_acquire)) { + pd = static_cast(p0_ & discriminatorMask); + if (pd == pdPending || pd == pdAwaited) { + // The thunk is already "pending" or "awaited", so + // we need to wait for it. + p0_ = waitOnThunk(state, p0_); + goto done; + } + assert(pd != pdThunk); + // Another thread finished this thunk, no need to wait. + goto done; + } + + bool isApp = p1_ & discriminatorMask; + if (isApp) { + auto left = untagPointer(p0_); + auto right = untagPointer(p1_); + state.callFunction(*left, *right, (Value &) *this, pos); + } else { + auto env = untagPointer(p0_); + auto expr = untagPointer(p1_); + expr->eval(state, *env, (Value &) *this); + } } catch (...) { - v.mkThunk(env, expr); - tryFixupBlackHolePos(v, pos); + state.tryFixupBlackHolePos((Value &) *this, pos); + setStorage(new Value::Failed{.ex = std::current_exception()}); throw; } - } else if (v.isApp()) - callFunction(*v.app().left, *v.app().right, v, pos); + } + + else if (pd == pdPending || pd == pdAwaited) + p0_ = waitOnThunk(state, p0_); + +done: + if (InternalType(p0_ & 0xff) == tFailed) + std::rethrow_exception((std::bit_cast(p1))->ex); } [[gnu::always_inline]] diff --git a/src/libexpr/include/nix/expr/eval-settings.hh b/src/libexpr/include/nix/expr/eval-settings.hh index 5dbef9272b9..f367541ec2f 100644 --- a/src/libexpr/include/nix/expr/eval-settings.hh +++ b/src/libexpr/include/nix/expr/eval-settings.hh @@ -91,7 +91,7 @@ struct EvalSettings : Config - `$HOME/.nix-defexpr/channels` - The [user channel link](@docroot@/command-ref/files/default-nix-expression.md#user-channel-link), pointing to the current state of [channels](@docroot@/command-ref/files/channels.md) for the current user. + The user channel link pointing to the current state of channels for the current user. - `nixpkgs=$NIX_STATE_DIR/profiles/per-user/root/channels/nixpkgs` @@ -101,7 +101,7 @@ struct EvalSettings : Config The current state of all channels for the `root` user. - These files are set up by the [Nix installer](@docroot@/installation/installing-binary.md). + These files are set up by the Nix installer. See [`NIX_STATE_DIR`](@docroot@/command-ref/env-common.md#env-NIX_STATE_DIR) for details on the environment variable. > **Note** @@ -142,7 +142,7 @@ struct EvalSettings : Config R"( If set to `true`, the Nix evaluator doesn't allow access to any files outside of - [`builtins.nixPath`](@docroot@/language/builtins.md#builtins-nixPath), + [`builtins.nixPath`](@docroot@/language/builtins.md#builtins-nixPath) or to URIs outside of [`allowed-uris`](@docroot@/command-ref/conf-file.md#conf-allowed-uris). )"}; @@ -271,7 +271,7 @@ struct EvalSettings : Config "ignore-try", R"( If set to true, ignore exceptions inside 'tryEval' calls when evaluating Nix expressions in - debug mode (using the --debugger flag). By default the debugger pauses on all exceptions. + debug mode (using the --debugger flag). By default, the debugger pauses on all exceptions. )"}; Setting traceVerbose{ @@ -289,7 +289,7 @@ struct EvalSettings : Config "debugger-on-trace", R"( If set to true and the `--debugger` flag is given, the following functions - enter the debugger like [`builtins.break`](@docroot@/language/builtins.md#builtins-break): + enter the debugger like [`builtins.break`](@docroot@/language/builtins.md#builtins-break). * [`builtins.trace`](@docroot@/language/builtins.md#builtins-trace) * [`builtins.traceVerbose`](@docroot@/language/builtins.md#builtins-traceVerbose) @@ -305,7 +305,7 @@ struct EvalSettings : Config "debugger-on-warn", R"( If set to true and the `--debugger` flag is given, [`builtins.warn`](@docroot@/language/builtins.md#builtins-warn) - will enter the debugger like [`builtins.break`](@docroot@/language/builtins.md#builtins-break). + enter the debugger like [`builtins.break`](@docroot@/language/builtins.md#builtins-break). This is useful for debugging warnings in third-party Nix code. @@ -319,7 +319,7 @@ struct EvalSettings : Config R"( If set to true, [`builtins.warn`](@docroot@/language/builtins.md#builtins-warn) throws an error when logging a warning. - This will give you a stack trace that leads to the location of the warning. + This gives you a stack trace that leads to the location of the warning. This is useful for finding information about warnings in third-party Nix code when you can not start the interactive debugger, such as when Nix is called from a non-interactive script. See [`debugger-on-warn`](#conf-debugger-on-warn). @@ -361,6 +361,44 @@ struct EvalSettings : Config The default value is chosen to balance performance and memory usage. On 32 bit systems where memory is scarce, the default is a large value to reduce the amount of allocations. )"}; + + Setting lazyTrees{ + this, + false, + "lazy-trees", + R"( + If set to true, flakes and trees fetched by [`builtins.fetchTree`](@docroot@/language/builtins.md#builtins-fetchTree) are only copied to the Nix store when they're used as a dependency of a derivation. This avoids copying (potentially large) source trees unnecessarily. + )"}; + + // FIXME: this setting should really be in libflake, but it's + // currently needed in mountInput(). + Setting lazyLocks{ + this, + false, + "lazy-locks", + R"( + If enabled, Nix only includes NAR hashes in lock file entries if they're necessary to lock the input (i.e. when there is no other attribute that allows the content to be verified, like a Git revision). + This is not backward compatible with older versions of Nix. + If disabled, lock file entries always contain a NAR hash. + )"}; + + Setting evalCores{ + this, + 1, + "eval-cores", + R"( + The number of threads used to evaluate Nix expressions. This currently affects the following commands: + + * `nix search` + * `nix flake check` + * `nix flake show` + * `nix eval --json` + * Any evaluation that uses `builtins.parallel` + + The value `0` causes Nix to use all available CPU cores in the system. + + Note that enabling the debugger (`--debugger`) disables multi-threaded evaluation. + )"}; }; /** @@ -368,4 +406,10 @@ struct EvalSettings : Config */ std::filesystem::path getNixDefExpr(); +/** + * Stack size for evaluator threads. This used to be 64 MiB, but macOS as deployed on GitHub Actions has a + * hard limit slightly under that, so we round it down a bit. + */ +constexpr size_t evalStackSize = 60 * 1024 * 1024; + } // namespace nix diff --git a/src/libexpr/include/nix/expr/eval.hh b/src/libexpr/include/nix/expr/eval.hh index b70c9db789d..c9cfb1a573b 100644 --- a/src/libexpr/include/nix/expr/eval.hh +++ b/src/libexpr/include/nix/expr/eval.hh @@ -51,10 +51,12 @@ struct SingleDerivedPath; enum RepairFlag : bool; struct MemorySourceAccessor; struct MountedSourceAccessor; +struct AsyncPathWriter; namespace eval_cache { class EvalCache; } +struct Executor; /** * Increments a count on construction and decrements on destruction. @@ -432,6 +434,8 @@ public: std::list debugTraces; boost::unordered_flat_map> exprEnvs; + ref asyncPathWriter; + const std::shared_ptr getStaticEnv(const Expr & expr) const { auto i = exprEnvs.find(&expr); @@ -496,10 +500,11 @@ private: * Associate source positions of certain AST nodes with their preceding doc comment, if they have one. * Grouped by file. */ - boost::unordered_flat_map positionToDocComment; + SharedSync>> positionToDocComment; LookupPath lookupPath; + // FIXME: make thread-safe. boost::unordered_flat_map, StringViewHash, std::equal_to<>> lookupPathResolved; @@ -588,7 +593,12 @@ public: /** * Mount an input on the Nix store. */ - StorePath mountInput(fetchers::Input & input, const fetchers::Input & originalInput, ref accessor); + StorePath mountInput( + fetchers::Input & input, + const fetchers::Input & originalInput, + ref accessor, + bool requireLockable, + bool forceNarHash = false); /** * Parse a Nix expression from the specified file. @@ -650,7 +660,10 @@ public: * application, call the function and overwrite `v` with the * result. Otherwise, this is a no-op. */ - inline void forceValue(Value & v, const PosIdx pos); + inline void forceValue(Value & v, const PosIdx pos) + { + v.force(*this, pos); + } void tryFixupBlackHolePos(Value & v, PosIdx pos); @@ -708,6 +721,12 @@ public: std::optional tryAttrsToString( const PosIdx pos, Value & v, NixStringContext & context, bool coerceMore = false, bool copyToStore = true); + StorePath devirtualize(const StorePath & path, StringMap * rewrites = nullptr); + + SingleDerivedPath devirtualize(const SingleDerivedPath & path, StringMap * rewrites = nullptr); + + std::string devirtualize(std::string_view s, const NixStringContext & context); + /** * String coercion. * @@ -725,7 +744,19 @@ public: bool copyToStore = true, bool canonicalizePath = true); - StorePath copyPathToStore(NixStringContext & context, const SourcePath & path); + StorePath copyPathToStore(NixStringContext & context, const SourcePath & path, PosIdx pos); + + /** + * Compute the base name for a `SourcePath`. For non-store paths, + * this is just `SourcePath::baseName()`. But for store paths, for + * backwards compatibility, it needs to be `-source`, + * i.e. as if the path were copied to the Nix store. This results + * in a "double-copied" store path like + * `/nix/store/--source`. We don't need to + * materialize /nix/store/-source though. Still, this + * requires reading/hashing the path twice. + */ + std::string computeBaseName(const SourcePath & path, PosIdx pos); /** * Path coercion. @@ -868,10 +899,11 @@ private: const std::shared_ptr & staticEnv); /** - * Current Nix call stack depth, used with `max-call-depth` setting to throw stack overflow hopefully before we run - * out of system stack. + * Current Nix call stack depth, used with `max-call-depth` + * setting to throw stack overflow hopefully before we run out of + * system stack. */ - size_t callDepth = 0; + thread_local static size_t callDepth; public: @@ -1018,6 +1050,10 @@ public: DocComment getDocCommentForPos(PosIdx pos); + void waitForPath(const StorePath & path); + void waitForPath(const SingleDerivedPath & path); + void waitForAllPaths(); + private: /** @@ -1043,8 +1079,18 @@ private: Counter nrPrimOpCalls; Counter nrFunctionCalls; +public: + Counter nrThunksAwaited; + Counter nrThunksAwaitedSlow; + Counter microsecondsWaiting; + Counter currentlyWaiting; + Counter maxWaiting; + Counter nrSpuriousWakeups; + +private: bool countCalls; + // FIXME: make thread-safe. typedef boost::unordered_flat_map> PrimOpCalls; PrimOpCalls primOpCalls; @@ -1056,6 +1102,7 @@ private: void incrFunctionCall(ExprLambda * fun); + // FIXME: make thread-safe. typedef boost::unordered_flat_map> AttrSelects; AttrSelects attrSelects; @@ -1073,6 +1120,17 @@ private: friend struct Value; friend class ListBuilder; + +public: + /** + * Worker threads manager. + * + * Note: keep this last to ensure that it's destroyed first, so we + * don't have any background work items (e.g. from + * `builtins.parallel`) referring to a partially destroyed + * `EvalState`. + */ + ref executor; }; struct DebugTraceStacker diff --git a/src/libexpr/include/nix/expr/meson.build b/src/libexpr/include/nix/expr/meson.build index 2b0fbc40603..9f676b230f1 100644 --- a/src/libexpr/include/nix/expr/meson.build +++ b/src/libexpr/include/nix/expr/meson.build @@ -24,6 +24,7 @@ headers = [ config_pub_h ] + files( 'get-drvs.hh', 'json-to-value.hh', 'nixexpr.hh', + 'parallel-eval.hh', 'parser-state.hh', 'primops.hh', 'print-ambiguous.hh', diff --git a/src/libexpr/include/nix/expr/nixexpr.hh b/src/libexpr/include/nix/expr/nixexpr.hh index df39ecdde91..9bce1a9b91a 100644 --- a/src/libexpr/include/nix/expr/nixexpr.hh +++ b/src/libexpr/include/nix/expr/nixexpr.hh @@ -8,7 +8,6 @@ #include #include -#include "nix/expr/gc-small-vector.hh" #include "nix/expr/value.hh" #include "nix/expr/symbol-table.hh" #include "nix/expr/eval-error.hh" @@ -91,8 +90,6 @@ typedef std::vector AttrSelectionPath; std::string showAttrSelectionPath(const SymbolTable & symbols, std::span attrPath); -using UpdateQueue = SmallTemporaryValueVector; - /* Abstract syntax of Nix expressions. */ struct Expr @@ -123,14 +120,6 @@ struct Expr * of thunks allocated. */ virtual Value * maybeThunk(EvalState & state, Env & env); - - /** - * Only called when performing an attrset update: `//` or similar. - * Instead of writing to a Value &, this function writes to an UpdateQueue. - * This allows the expression to perform multiple updates in a delayed manner, gathering up all the updates before - * applying them. - */ - virtual void evalForUpdate(EvalState & state, Env & env, UpdateQueue & q, std::string_view errorCtx); virtual void setName(Symbol name); virtual void setDocComment(DocComment docComment) {}; @@ -738,7 +727,7 @@ struct ExprOpNot : Expr struct name : Expr \ { \ MakeBinOpMembers(name, s) \ - } + }; MakeBinOp(ExprOpEq, "=="); MakeBinOp(ExprOpNEq, "!="); @@ -749,14 +738,7 @@ MakeBinOp(ExprOpConcatLists, "++"); struct ExprOpUpdate : Expr { -private: - /** Special case for merging of two attrsets. */ - void eval(EvalState & state, Value & v, Value & v1, Value & v2); - void evalForUpdate(EvalState & state, Env & env, UpdateQueue & q); - -public: - MakeBinOpMembers(ExprOpUpdate, "//"); - virtual void evalForUpdate(EvalState & state, Env & env, UpdateQueue & q, std::string_view errorCtx) override; + MakeBinOpMembers(ExprOpUpdate, "//") }; struct ExprConcatStrings : Expr @@ -811,23 +793,11 @@ struct ExprPos : Expr COMMON_METHODS }; -/* only used to mark thunks as black holes. */ -struct ExprBlackHole : Expr -{ - void show(const SymbolTable & symbols, std::ostream & str) const override {} - - void eval(EvalState & state, Env & env, Value & v) override; - - void bindVars(EvalState & es, const std::shared_ptr & env) override {} - - [[noreturn]] static void throwInfiniteRecursionError(EvalState & state, Value & v); -}; - -extern ExprBlackHole eBlackHole; - class Exprs { - std::pmr::monotonic_buffer_resource buffer; + // FIXME: use std::pmr::monotonic_buffer_resource when parallel + // eval is disabled? + std::pmr::synchronized_pool_resource buffer; public: std::pmr::polymorphic_allocator alloc{&buffer}; diff --git a/src/libexpr/include/nix/expr/parallel-eval.hh b/src/libexpr/include/nix/expr/parallel-eval.hh new file mode 100644 index 00000000000..4ccb3cfb843 --- /dev/null +++ b/src/libexpr/include/nix/expr/parallel-eval.hh @@ -0,0 +1,90 @@ +#pragma once + +#include +#include +#include +#include + +#include + +#include "nix/util/sync.hh" +#include "nix/util/logging.hh" +#include "nix/util/environment-variables.hh" +#include "nix/util/util.hh" +#include "nix/util/signals.hh" + +#if NIX_USE_BOEHMGC +# include +#endif + +namespace nix { + +struct Executor +{ + using work_t = std::function; + + struct Item + { + std::promise promise; + work_t work; + }; + + struct State + { + std::multimap queue; + std::vector threads; + }; + + std::atomic_bool quit{false}; + + const unsigned int evalCores; + + const bool enabled; + + const std::unique_ptr interruptCallback; + + Sync state_; + + std::condition_variable wakeup; + + static unsigned int getEvalCores(const EvalSettings & evalSettings); + + Executor(const EvalSettings & evalSettings); + + ~Executor(); + + void createWorker(State & state); + + void worker(); + + std::vector> spawn(std::vector> && items); + + static thread_local bool amWorkerThread; +}; + +struct FutureVector +{ + Executor & executor; + + struct State + { + std::vector> futures; + }; + + Sync state_; + + ~FutureVector(); + + // FIXME: add a destructor that cancels/waits for all futures. + + void spawn(std::vector> && work); + + void spawn(uint8_t prioPrefix, Executor::work_t && work) + { + spawn({{std::move(work), prioPrefix}}); + } + + void finishAll(); +}; + +} // namespace nix diff --git a/src/libexpr/include/nix/expr/print-ambiguous.hh b/src/libexpr/include/nix/expr/print-ambiguous.hh index c0d811d4b93..e64f7f9bf8d 100644 --- a/src/libexpr/include/nix/expr/print-ambiguous.hh +++ b/src/libexpr/include/nix/expr/print-ambiguous.hh @@ -15,7 +15,6 @@ namespace nix { * * See: https://github.com/NixOS/nix/issues/9730 */ -void printAmbiguous( - Value & v, const SymbolTable & symbols, std::ostream & str, std::set * seen, int depth); +void printAmbiguous(EvalState & state, Value & v, std::ostream & str, std::set * seen, int depth); } // namespace nix diff --git a/src/libexpr/include/nix/expr/symbol-table.hh b/src/libexpr/include/nix/expr/symbol-table.hh index f0220376c53..23151082933 100644 --- a/src/libexpr/include/nix/expr/symbol-table.hh +++ b/src/libexpr/include/nix/expr/symbol-table.hh @@ -2,13 +2,14 @@ ///@file #include + #include "nix/expr/value.hh" -#include "nix/expr/static-string-data.hh" -#include "nix/util/chunked-vector.hh" #include "nix/util/error.hh" +#include "nix/util/sync.hh" +#include "nix/util/alignment.hh" #include -#include +#include namespace nix { @@ -17,17 +18,27 @@ class SymbolValue : protected Value friend class SymbolStr; friend class SymbolTable; - uint32_t idx; - - SymbolValue() = default; - -public: operator std::string_view() const noexcept { return string_view(); } }; +struct ContiguousArena +{ + const char * data; + const size_t maxSize; + + // Put this in a separate cache line to ensure that a thread + // adding a symbol doesn't slow down threads dereferencing symbols + // by invalidating the read-only `data` field. + alignas(64) std::atomic size{0}; + + ContiguousArena(size_t maxSize); + + size_t allocate(size_t bytes); +}; + class StaticSymbolTable; /** @@ -42,6 +53,7 @@ class Symbol friend class StaticSymbolTable; private: + /// The offset of the symbol in `SymbolTable::arena`. uint32_t id; explicit constexpr Symbol(uint32_t id) noexcept @@ -73,6 +85,8 @@ public: constexpr auto operator<=>(const Symbol & other) const noexcept = default; friend class std::hash; + + constexpr static size_t alignment = alignof(SymbolValue); }; /** @@ -84,25 +98,20 @@ class SymbolStr { friend class SymbolTable; - constexpr static size_t chunkSize{8192}; - using SymbolValueStore = ChunkedVector; - const SymbolValue * s; struct Key { using HashType = boost::hash; - SymbolValueStore & store; std::string_view s; std::size_t hash; - std::pmr::memory_resource & resource; + ContiguousArena & arena; - Key(SymbolValueStore & store, std::string_view s, std::pmr::memory_resource & stringMemory) - : store(store) - , s(s) + Key(std::string_view s, ContiguousArena & arena) + : s(s) , hash(HashType{}(s)) - , resource(stringMemory) + , arena(arena) { } }; @@ -113,22 +122,7 @@ public: { } - SymbolStr(const Key & key) - { - auto size = key.s.size(); - if (size >= std::numeric_limits::max()) { - throw Error("Size of symbol exceeds 4GiB and cannot be stored"); - } - // for multi-threaded implementations: lock store and allocator here - const auto & [v, idx] = key.store.add(SymbolValue{}); - if (size == 0) { - v.mkStringNoCopy(""_sds, nullptr); - } else { - v.mkStringNoCopy(StringData::make(key.resource, key.s)); - } - v.idx = idx; - this->s = &v; - } + SymbolStr(const Key & key); bool operator==(std::string_view s2) const noexcept { @@ -157,11 +151,7 @@ public: [[gnu::always_inline]] bool empty() const noexcept { - auto * p = &s->string_data(); - // Save a dereference in the sentinel value case - if (p == &""_sds) - return true; - return p->size() == 0; + return !s->string_data().size(); } [[gnu::always_inline]] @@ -176,11 +166,6 @@ public: return s; } - explicit operator Symbol() const noexcept - { - return Symbol{s->idx + 1}; - } - struct Hash { using is_transparent = void; @@ -218,6 +203,11 @@ public: return operator()(b, a); } }; + + constexpr static size_t computeSize(std::string_view s) + { + return alignUp(sizeof(Value) + sizeof(StringData) + s.size() + 1, Symbol::alignment); + } }; class SymbolTable; @@ -237,6 +227,7 @@ class StaticSymbolTable std::array symbols; std::size_t size = 0; + std::size_t nextId = alignof(SymbolValue); public: constexpr StaticSymbolTable() = default; @@ -245,8 +236,9 @@ public: { /* No need to check bounds because out of bounds access is a compilation error. */ - auto sym = Symbol(size + 1); //< +1 because Symbol with id = 0 is reserved + auto sym = Symbol(nextId); symbols[size++] = {str, sym}; + nextId += SymbolStr::computeSize(str); return sym; } @@ -264,61 +256,67 @@ private: * SymbolTable is an append only data structure. * During its lifetime the monotonic buffer holds all strings and nodes, if the symbol set is node based. */ - std::pmr::monotonic_buffer_resource buffer; - SymbolStr::SymbolValueStore store{16}; + ContiguousArena arena; /** - * Transparent lookup of string view for a pointer to a ChunkedVector entry -> return offset into the store. - * ChunkedVector references are never invalidated. + * Transparent lookup of string view for a pointer to a + * SymbolValue in the arena. */ - boost::unordered_flat_set symbols{SymbolStr::chunkSize}; + boost::concurrent_flat_set symbols; public: SymbolTable(const StaticSymbolTable & staticSymtab) + : arena(1 << 30) { + // Reserve symbol ID 0 and ensure alignment of the first allocation. + arena.allocate(Symbol::alignment); + staticSymtab.copyIntoSymbolTable(*this); } /** * Converts a string into a symbol. */ - Symbol create(std::string_view s) - { - // Most symbols are looked up more than once, so we trade off insertion performance - // for lookup performance. - // FIXME: make this thread-safe. - return Symbol(*symbols.insert(SymbolStr::Key{store, s, buffer}).first); - } + Symbol create(std::string_view s); std::vector resolve(const std::span & symbols) const { std::vector result; result.reserve(symbols.size()); - for (auto sym : symbols) + for (auto & sym : symbols) result.push_back((*this)[sym]); return result; } SymbolStr operator[](Symbol s) const { - uint32_t idx = s.id - uint32_t(1); - if (idx >= store.size()) - unreachable(); - return store[idx]; + assert(s.id); + // Note: we don't check arena.size here to avoid a dependency + // on other threads creating new symbols. + return SymbolStr(*reinterpret_cast(arena.data + s.id)); } - [[gnu::always_inline]] size_t size() const noexcept { - return store.size(); + return symbols.size(); } - size_t totalSize() const; + size_t totalSize() const + { + return arena.size; + } template void dump(T callback) const { - store.forEach(callback); + std::string_view left{arena.data, arena.size}; + left = left.substr(Symbol::alignment); + while (!left.empty()) { + auto v = reinterpret_cast(left.data()); + callback(v->string_view()); + left = left.substr( + alignUp(sizeof(SymbolValue) + sizeof(StringData) + v->string_view().size() + 1, Symbol::alignment)); + } } }; diff --git a/src/libexpr/include/nix/expr/value.hh b/src/libexpr/include/nix/expr/value.hh index 004dcc43f0f..10893347bd6 100644 --- a/src/libexpr/include/nix/expr/value.hh +++ b/src/libexpr/include/nix/expr/value.hh @@ -1,6 +1,7 @@ #pragma once ///@file +#include #include #include #include @@ -26,6 +27,19 @@ namespace nix { struct Value; class BindingsBuilder; +static constexpr int discriminatorBits = 3; + +enum PrimaryDiscriminator : int { + pdSingleDWord = 0, + pdThunk = 1, + pdPending = 2, + pdAwaited = 3, + pdPairOfPointers = 4, + pdListN = 5, // FIXME: get rid of this by putting the size in the first word + pdString = 6, + pdPath = 7, // FIXME: get rid of this by ditching the `accessor` field +}; + /** * Internal type discriminator, which is more detailed than `ValueType`, as * it specifies the exact representation used (for types that have multiple @@ -36,27 +50,50 @@ class BindingsBuilder; * This also restricts the number of internal types represented with distinct memory layouts. */ typedef enum { - tUninitialized = 0, - /* layout: Single/zero field payload */ - tInt = 1, - tBool, - tNull, - tFloat, - tExternal, - tPrimOp, - tAttrs, - /* layout: Pair of pointers payload */ - tListSmall, - tPrimOpApp, - tApp, - tThunk, - tLambda, - /* layout: Single untaggable field */ - tListN, - tString, - tPath, + /* Values that have more type bits in the first word, and the + payload (a single word) in the second word. */ + tUninitialized = PrimaryDiscriminator::pdSingleDWord | (0 << discriminatorBits), + tInt = PrimaryDiscriminator::pdSingleDWord | (1 << discriminatorBits), + tFloat = PrimaryDiscriminator::pdSingleDWord | (2 << discriminatorBits), + tBool = PrimaryDiscriminator::pdSingleDWord | (3 << discriminatorBits), + tNull = PrimaryDiscriminator::pdSingleDWord | (4 << discriminatorBits), + tAttrs = PrimaryDiscriminator::pdSingleDWord | (5 << discriminatorBits), + tPrimOp = PrimaryDiscriminator::pdSingleDWord | (6 << discriminatorBits), + tFailed = PrimaryDiscriminator::pdSingleDWord | (7 << discriminatorBits), + tExternal = PrimaryDiscriminator::pdSingleDWord | (8 << discriminatorBits), + + /* Thunks. */ + tThunk = PrimaryDiscriminator::pdThunk | (0 << discriminatorBits), + tApp = PrimaryDiscriminator::pdThunk | (1 << discriminatorBits), + + tPending = PrimaryDiscriminator::pdPending, + tAwaited = PrimaryDiscriminator::pdAwaited, + + /* Values that consist of two pointers. The second word contains + more type bits in its alignment niche. */ + tListSmall = PrimaryDiscriminator::pdPairOfPointers | (0 << discriminatorBits), + tPrimOpApp = PrimaryDiscriminator::pdPairOfPointers | (1 << discriminatorBits), + tLambda = PrimaryDiscriminator::pdPairOfPointers | (2 << discriminatorBits), + + /* Special values. */ + tListN = PrimaryDiscriminator::pdListN, + tString = PrimaryDiscriminator::pdString, + tPath = PrimaryDiscriminator::pdPath, } InternalType; +/** + * Return true if `type` denotes a "finished" value, i.e. a weak-head + * normal form. + * + * Note that tPrimOpApp is considered "finished" because it represents + * a primop call with an incomplete number of arguments, and therefore + * cannot be evaluated further. + */ +inline bool isFinished(InternalType t) +{ + return t != tUninitialized && t != tThunk && t != tApp && t != tPending && t != tAwaited; +} + /** * This type abstracts over all actual value types in the language, * grouping together implementation details like tList*, different function @@ -64,6 +101,7 @@ typedef enum { */ typedef enum { nThunk, + nFailed, nInt, nFloat, nBool, @@ -80,7 +118,6 @@ class Bindings; struct Env; struct Expr; struct ExprLambda; -struct ExprBlackHole; struct PrimOp; class Symbol; class SymbolStr; @@ -282,7 +319,7 @@ namespace detail { /** * Implementation mixin class for defining the public types - * In can be inherited from by the actual ValueStorage implementations + * In can be inherited by the actual ValueStorage implementations * for free due to Empty Base Class Optimization (EBCO). */ struct ValueBase @@ -417,6 +454,11 @@ struct ValueBase size_t size; Value * const * elems; }; + + struct Failed : gc + { + std::exception_ptr ex; + }; }; template @@ -443,6 +485,7 @@ struct PayloadTypeToInternalType MACRO(PrimOp *, primOp, tPrimOp) \ MACRO(ValueBase::PrimOpApplicationThunk, primOpApp, tPrimOpApp) \ MACRO(ExternalValueBase *, external, tExternal) \ + MACRO(ValueBase::Failed *, failed, tFailed) \ MACRO(NixFloat, fpoint, tFloat) #define NIX_VALUE_PAYLOAD_TYPE(T, FIELD_NAME, DISCRIMINATOR) \ @@ -546,12 +589,44 @@ class alignas(16) ValueStorage::type; - using Payload = std::array; - Payload payload = {}; - static constexpr int discriminatorBits = 3; + /** + * For multithreaded evaluation, we have to make sure that thunks/apps + * (the only mutable types of values) are updated in a safe way. A + * value can have the following states (see `force()`): + * + * * "thunk"/"app". When forced, this value transitions to + * "pending". The current thread will evaluate the + * thunk/app. When done, it will override the value with the + * result. If the value is at that point in the "awaited" state, + * the thread will wake up any waiting threads. + * + * * "pending". This means it's currently being evaluated. If + * another thread forces this value, it transitions to "awaited" + * and the thread will wait for the value to be updated (see + * `waitOnThunk()`). + * + * * "awaited". Like pending, only it means that there already are + * one or more threads waiting for this thunk. + * + * To ensure race-free access, the non-atomic word `p1` must + * always be updated before `p0`. Writes to `p0` should use + * *release* semantics (so that `p1` and any referenced values become + * visible to threads that read `p0`), and reads from `p0` should + * use `*acquire* semantics. + * + * Note: at some point, we may want to switch to 128-bit atomics + * so that `p0` and `p1` can be updated together + * atomically. However, 128-bit atomics are a bit problematic at + * present on x86_64 (see + * e.g. https://ibraheem.ca/posts/128-bit-atomics/). + */ + std::atomic p0{0}; + PackedPointer p1{0}; + static constexpr PackedPointer discriminatorMask = (PackedPointer(1) << discriminatorBits) - 1; + // FIXME: move/update /** * The value is stored as a pair of 8-byte double words. All pointers are assumed * to be 8-byte aligned. This gives us at most 6 bits of discriminator bits @@ -581,15 +656,6 @@ class alignas(16) ValueStorage requires std::is_pointer_v @@ -600,7 +666,7 @@ class alignas(16) ValueStorage(payload[0] & discriminatorMask); + return static_cast(p0 & discriminatorMask); } static void assertAligned(PackedPointer val) noexcept @@ -608,13 +674,30 @@ class alignas(16) ValueStorage(p0_ & discriminatorMask); + if (pd == pdPending) + // Nothing to do; no thread is waiting on this thunk. + ; + else if (pd == pdAwaited) + // Slow path: wake up the threads that are waiting on this + // thunk. + notifyWaiters(); + else if (pd == pdThunk) + unreachable(); + } + template void setSingleDWordPayload(PackedPointer untaggedVal) noexcept { - /* There's plenty of free upper bits in the first dword, which is - used only for the discriminator. */ - payload[0] = static_cast(pdSingleDWord) | (static_cast(type) << discriminatorBits); - payload[1] = untaggedVal; + /* There's plenty of free upper bits in the first byte, which + is used only for the discriminator. */ + finish(static_cast(type), untaggedVal); } template @@ -623,32 +706,42 @@ class alignas(16) ValueStorage= pdListN && discriminator <= pdPath); auto firstFieldPayload = std::bit_cast(firstPtrField); assertAligned(firstFieldPayload); - payload[0] = static_cast(discriminator) | firstFieldPayload; - payload[1] = std::bit_cast(untaggableField); + finish(static_cast(discriminator) | firstFieldPayload, std::bit_cast(untaggableField)); } template void setPairOfPointersPayload(T * firstPtrField, U * secondPtrField) noexcept { static_assert(type >= tListSmall && type <= tLambda); - { - auto firstFieldPayload = std::bit_cast(firstPtrField); - assertAligned(firstFieldPayload); - payload[0] = static_cast(pdPairOfPointers) | firstFieldPayload; - } - { - auto secondFieldPayload = std::bit_cast(secondPtrField); - assertAligned(secondFieldPayload); - payload[1] = (type - tListSmall) | secondFieldPayload; - } + auto firstFieldPayload = std::bit_cast(firstPtrField); + assertAligned(firstFieldPayload); + auto secondFieldPayload = std::bit_cast(secondPtrField); + assertAligned(secondFieldPayload); + finish( + static_cast(pdPairOfPointers) | firstFieldPayload, + ((type - tListSmall) >> discriminatorBits) | secondFieldPayload); + } + + template + void setThunkPayload(T * firstPtrField, U * secondPtrField) noexcept + { + static_assert(type >= tThunk && type <= tApp); + auto secondFieldPayload = std::bit_cast(secondPtrField); + assertAligned(secondFieldPayload); + p1 = ((type - tThunk) >> discriminatorBits) | secondFieldPayload; + auto firstFieldPayload = std::bit_cast(firstPtrField); + assertAligned(firstFieldPayload); + // Note: awaited values can never become a thunk, so no need + // to check for waiters. + p0.store(static_cast(pdThunk) | firstFieldPayload, std::memory_order_release); } template requires std::is_pointer_v && std::is_pointer_v void getPairOfPointersPayload(T & firstPtrField, U & secondPtrField) const noexcept { - firstPtrField = untagPointer(payload[0]); - secondPtrField = untagPointer(payload[1]); + firstPtrField = untagPointer(p0); + secondPtrField = untagPointer(p1); } protected: @@ -656,42 +749,45 @@ protected: InternalType getInternalType() const noexcept { switch (auto pd = getPrimaryDiscriminator()) { - case pdUninitialized: - /* Discriminator value of zero is used to distinguish uninitialized values. */ - return tUninitialized; case pdSingleDWord: - /* Payloads that only use up a single double word store the InternalType - in the upper bits of the first double word. */ - return InternalType(payload[0] >> discriminatorBits); + /* Payloads that only use up a single double word store + the full InternalType in the first byte. */ + return InternalType(p0 & 0xff); + case pdThunk: + return static_cast(tThunk + ((p1 & discriminatorMask) << discriminatorBits)); + case pdPending: + return tPending; + case pdAwaited: + return tAwaited; + case pdPairOfPointers: + return static_cast(tListSmall + ((p1 & discriminatorMask) << discriminatorBits)); /* The order must match that of the enumerations defined in InternalType. */ case pdListN: case pdString: case pdPath: return static_cast(tListN + (pd - pdListN)); - case pdPairOfPointers: - return static_cast(tListSmall + (payload[1] & discriminatorMask)); [[unlikely]] default: unreachable(); } } -#define NIX_VALUE_STORAGE_DEF_PAIR_OF_PTRS(TYPE, MEMBER_A, MEMBER_B) \ - \ - void getStorage(TYPE & val) const noexcept \ - { \ - getPairOfPointersPayload(val MEMBER_A, val MEMBER_B); \ - } \ - \ - void setStorage(TYPE val) noexcept \ - { \ - setPairOfPointersPayload>(val MEMBER_A, val MEMBER_B); \ +#define NIX_VALUE_STORAGE_DEF_PAIR_OF_PTRS(TYPE, SET, MEMBER_A, MEMBER_B) \ + \ + void getStorage(TYPE & val) const noexcept \ + { \ + getPairOfPointersPayload(val MEMBER_A, val MEMBER_B); \ + } \ + \ + void setStorage(TYPE val) noexcept \ + { \ + SET>(val MEMBER_A, val MEMBER_B); \ } - NIX_VALUE_STORAGE_DEF_PAIR_OF_PTRS(SmallList, [0], [1]) - NIX_VALUE_STORAGE_DEF_PAIR_OF_PTRS(PrimOpApplicationThunk, .left, .right) - NIX_VALUE_STORAGE_DEF_PAIR_OF_PTRS(FunctionApplicationThunk, .left, .right) - NIX_VALUE_STORAGE_DEF_PAIR_OF_PTRS(ClosureThunk, .env, .expr) - NIX_VALUE_STORAGE_DEF_PAIR_OF_PTRS(Lambda, .env, .fun) + NIX_VALUE_STORAGE_DEF_PAIR_OF_PTRS(SmallList, setPairOfPointersPayload, [0], [1]) + NIX_VALUE_STORAGE_DEF_PAIR_OF_PTRS(PrimOpApplicationThunk, setPairOfPointersPayload, .left, .right) + NIX_VALUE_STORAGE_DEF_PAIR_OF_PTRS(Lambda, setPairOfPointersPayload, .env, .fun) + NIX_VALUE_STORAGE_DEF_PAIR_OF_PTRS(FunctionApplicationThunk, setThunkPayload, .left, .right) + NIX_VALUE_STORAGE_DEF_PAIR_OF_PTRS(ClosureThunk, setThunkPayload, .env, .expr) #undef NIX_VALUE_STORAGE_DEF_PAIR_OF_PTRS @@ -699,52 +795,57 @@ protected: { /* PackedPointerType -> int64_t here is well-formed, since the standard requires this conversion to follow 2's complement rules. This is just a no-op. */ - integer = NixInt(payload[1]); + integer = NixInt(p1); } void getStorage(bool & boolean) const noexcept { - boolean = payload[1]; + boolean = p1; } void getStorage(Null & null) const noexcept {} void getStorage(NixFloat & fpoint) const noexcept { - fpoint = std::bit_cast(payload[1]); + fpoint = std::bit_cast(p1); } void getStorage(ExternalValueBase *& external) const noexcept { - external = std::bit_cast(payload[1]); + external = std::bit_cast(p1); } void getStorage(PrimOp *& primOp) const noexcept { - primOp = std::bit_cast(payload[1]); + primOp = std::bit_cast(p1); } void getStorage(Bindings *& attrs) const noexcept { - attrs = std::bit_cast(payload[1]); + attrs = std::bit_cast(p1); } void getStorage(List & list) const noexcept { - list.elems = untagPointer(payload[0]); - list.size = payload[1]; + list.elems = untagPointer(p0); + list.size = p1; } void getStorage(StringWithContext & string) const noexcept { - string.context = untagPointer(payload[0]); - string.str = std::bit_cast(payload[1]); + string.context = untagPointer(p0); + string.str = std::bit_cast(p1); } void getStorage(Path & path) const noexcept { - path.accessor = untagPointer(payload[0]); - path.path = std::bit_cast(payload[1]); + path.accessor = untagPointer(p0); + path.path = std::bit_cast(p1); + } + + void getStorage(Failed *& failed) const noexcept + { + failed = std::bit_cast(p1); } void setStorage(NixInt integer) noexcept @@ -796,8 +897,85 @@ protected: { setUntaggablePayload(path.accessor, path.path); } + + void setStorage(Failed * failed) noexcept + { + setSingleDWordPayload(std::bit_cast(failed)); + } + + ValueStorage() {} + + ValueStorage(const ValueStorage & v) + { + *this = v; + } + + /** + * Copy a value. This is not allowed to be a thunk to avoid + * accidental work duplication. + */ + ValueStorage & operator=(const ValueStorage & v) + { + auto p0_ = v.p0.load(std::memory_order_acquire); + auto p1_ = v.p1; // must be loaded after p0 + auto pd = static_cast(p0_ & discriminatorMask); + if (pd == pdThunk || pd == pdPending || pd == pdAwaited) + unreachable(); + finish(p0_, p1_); + return *this; + } + +public: + + /** + * Check whether forcing this value requires a trivial amount of + * computation. A value is trivial if it's finished or if it's a + * thunk whose expression is an attrset with no dynamic + * attributes, a lambda or a list. Note that it's up to the caller + * to check whether the members of those attrsets or lists must be + * trivial. + */ + bool isTrivial() const; + + inline void reset() + { + p1 = 0; + p0.store(0, std::memory_order_relaxed); + } + + /// Only used for testing. + inline void mkBlackhole() + { + p0.store(pdPending, std::memory_order_relaxed); + } + + void force(EvalState & state, PosIdx pos); + +private: + + /** + * Given a thunk that was observed to be in the pending or awaited + * state, wait for it to finish. Returns the first word of the + * value. + */ + PackedPointer waitOnThunk(EvalState & state, PackedPointer p0); + + /** + * Wake up any threads that are waiting on this value. + */ + void notifyWaiters(); }; +template<> +void ValueStorage::notifyWaiters(); + +template<> +ValueStorage::PackedPointer +ValueStorage::waitOnThunk(EvalState & state, PackedPointer p0); + +template<> +bool ValueStorage::isTrivial() const; + /** * View into a list of Value * that is itself immutable. * @@ -1039,47 +1217,58 @@ public: void print(EvalState & state, std::ostream & str, PrintOptions options = PrintOptions{}); + // FIXME: optimize, only look at first word + inline bool isFinished() const + { + return nix::isFinished(getInternalType()); + } + // Functions needed to distinguish the type // These should be removed eventually, by putting the functionality that's // needed by callers into methods of this type - // type() == nThunk inline bool isThunk() const { return isa(); - }; + } inline bool isApp() const { return isa(); - }; + } - inline bool isBlackhole() const; + inline bool isBlackhole() const + { + auto t = getInternalType(); + return t == tPending || t == tAwaited; + } // type() == nFunction inline bool isLambda() const { return isa(); - }; + } inline bool isPrimOp() const { return isa(); - }; + } inline bool isPrimOpApp() const { return isa(); - }; + } + + inline bool isFailed() const + { + return isa(); + } /** * Returns the normal type of a Value. This only returns nThunk if * the Value hasn't been forceValue'd - * - * @param invalidIsThunk Instead of aborting an an invalid (probably - * 0, so uninitialized) internal type, return `nThunk`. */ - inline ValueType type(bool invalidIsThunk = false) const + inline ValueType type() const { switch (getInternalType()) { case tUninitialized: @@ -1107,14 +1296,15 @@ public: return nExternal; case tFloat: return nFloat; + case tFailed: + return nFailed; case tThunk: case tApp: + case tPending: + case tAwaited: return nThunk; } - if (invalidIsThunk) - return nThunk; - else - unreachable(); + unreachable(); } /** @@ -1205,8 +1395,6 @@ public: setStorage(Lambda{.env = e, .fun = f}); } - inline void mkBlackhole(); - void mkPrimOp(PrimOp * p); inline void mkPrimOpApp(Value * l, Value * r) noexcept @@ -1229,6 +1417,11 @@ public: setStorage(n); } + inline void mkFailed() noexcept + { + setStorage(new Value::Failed{.ex = std::current_exception()}); + } + bool isList() const noexcept { return isa(); @@ -1246,13 +1439,6 @@ public: PosIdx determinePos(const PosIdx pos) const; - /** - * Check whether forcing this value requires a trivial amount of - * computation. In particular, function applications are - * non-trivial. - */ - bool isTrivial() const; - SourcePath path() const { return SourcePath( @@ -1314,6 +1500,7 @@ public: return getStorage(); } + // FIXME: remove this since reading it is racy. ClosureThunk thunk() const noexcept { return getStorage(); @@ -1324,6 +1511,7 @@ public: return getStorage(); } + // FIXME: remove this since reading it is racy. FunctionApplicationThunk app() const noexcept { return getStorage(); @@ -1343,19 +1531,12 @@ public: { return getStorage().accessor; } -}; -extern ExprBlackHole eBlackHole; - -bool Value::isBlackhole() const -{ - return isThunk() && thunk().expr == (Expr *) &eBlackHole; -} - -void Value::mkBlackhole() -{ - mkThunk(nullptr, (Expr *) &eBlackHole); -} + Failed * failed() const noexcept + { + return getStorage(); + } +}; typedef std::vector> ValueVector; typedef boost::unordered_flat_map< diff --git a/src/libexpr/include/nix/expr/value/context.hh b/src/libexpr/include/nix/expr/value/context.hh index 054516bc268..fa3d4e87c0f 100644 --- a/src/libexpr/include/nix/expr/value/context.hh +++ b/src/libexpr/include/nix/expr/value/context.hh @@ -64,7 +64,31 @@ struct NixStringContextElem */ using Built = SingleDerivedPath::Built; - using Raw = std::variant; + /** + * A store path that will not result in a store reference when + * used in a derivation or toFile. + * + * When you apply `builtins.toString` to a path value representing + * a path in the Nix store (as is the case with flake inputs), + * historically you got a string without context + * (e.g. `/nix/store/...-source`). This is broken, since it allows + * you to pass a store path to a derivation/toFile without a + * proper store reference. This is especially a problem with lazy + * trees, since the store path is a virtual path that doesn't + * exist. + * + * For backwards compatibility, and to warn users about this + * unsafe use of `toString`, we keep track of such strings as a + * special type of context. + */ + struct Path + { + StorePath storePath; + + GENERATE_CMP(Path, me->storePath); + }; + + using Raw = std::variant; Raw raw; @@ -92,4 +116,10 @@ struct NixStringContextElem */ typedef std::set NixStringContext; +/** + * Returns false if `context` has no elements other than + * `NixStringContextElem::Path`. + */ +bool hasContext(const NixStringContext & context); + } // namespace nix diff --git a/src/libexpr/meson.build b/src/libexpr/meson.build index 18c4c7fa32c..3724db9e2cb 100644 --- a/src/libexpr/meson.build +++ b/src/libexpr/meson.build @@ -43,6 +43,7 @@ boost = dependency( modules : [ 'container', 'context', + 'thread', ], include_type : 'system', ) @@ -62,7 +63,6 @@ bdw_gc = dependency('bdw-gc', required : bdw_gc_required) if bdw_gc.found() deps_public += bdw_gc foreach funcspec : [ - 'pthread_attr_get_np', 'pthread_getattr_np', ] define_name = 'HAVE_' + funcspec.underscorify().to_upper() @@ -165,11 +165,13 @@ sources = files( 'json-to-value.cc', 'lexer-helpers.cc', 'nixexpr.cc', + 'parallel-eval.cc', 'paths.cc', 'primops.cc', 'print-ambiguous.cc', 'print.cc', 'search-path.cc', + 'symbol-table.cc', 'value-to-json.cc', 'value-to-xml.cc', 'value.cc', diff --git a/src/libexpr/nixexpr.cc b/src/libexpr/nixexpr.cc index 4a2f71a11b8..b52370816f5 100644 --- a/src/libexpr/nixexpr.cc +++ b/src/libexpr/nixexpr.cc @@ -13,8 +13,6 @@ namespace nix { Counter Expr::nrExprs; -ExprBlackHole eBlackHole; - // FIXME: remove, because *symbols* are abstract and do not have a single // textual representation; see printIdentifier() std::ostream & operator<<(std::ostream & str, const SymbolStr & symbol) @@ -626,15 +624,6 @@ void ExprLambda::setDocComment(DocComment docComment) // belongs in the same conditional. body->setDocComment(docComment); } -}; - -/* Symbol table. */ - -size_t SymbolTable::totalSize() const -{ - size_t n = 0; - dump([&](SymbolStr s) { n += s.size(); }); - return n; } std::string DocComment::getInnerText(const PosTable & positions) const diff --git a/src/libexpr/package.nix b/src/libexpr/package.nix index d0aef34e95d..c82e56de8ff 100644 --- a/src/libexpr/package.nix +++ b/src/libexpr/package.nix @@ -36,7 +36,7 @@ let in mkMesonLibrary (finalAttrs: { - pname = "nix-expr"; + pname = "determinate-nix-expr"; inherit version; workDir = ./.; diff --git a/src/libexpr/parallel-eval.cc b/src/libexpr/parallel-eval.cc new file mode 100644 index 00000000000..d63e931845e --- /dev/null +++ b/src/libexpr/parallel-eval.cc @@ -0,0 +1,299 @@ +#include "nix/expr/eval.hh" +#include "nix/expr/parallel-eval.hh" +#include "nix/store/globals.hh" +#include "nix/expr/primops.hh" + +namespace nix { + +// cache line alignment to prevent false sharing +struct alignas(64) WaiterDomain +{ + std::condition_variable cv; +}; + +static std::array, 128> waiterDomains; + +thread_local bool Executor::amWorkerThread{false}; + +unsigned int Executor::getEvalCores(const EvalSettings & evalSettings) +{ + return evalSettings.evalCores == 0UL ? Settings::getDefaultCores() : evalSettings.evalCores; +} + +Executor::Executor(const EvalSettings & evalSettings) + : evalCores(getEvalCores(evalSettings)) + , enabled(evalCores > 1) + , interruptCallback(createInterruptCallback([&]() { + for (auto & domain : waiterDomains) + domain.lock()->cv.notify_all(); + })) +{ + debug("executor using %d threads", evalCores); + auto state(state_.lock()); + for (size_t n = 0; n < evalCores; ++n) + createWorker(*state); +} + +Executor::~Executor() +{ + std::vector threads; + { + auto state(state_.lock()); + quit = true; + std::swap(threads, state->threads); + debug("executor shutting down with %d items left", state->queue.size()); + } + + wakeup.notify_all(); + + for (auto & thr : threads) + thr.join(); +} + +void Executor::createWorker(State & state) +{ + boost::thread::attributes attrs; + attrs.set_stack_size(evalStackSize); + state.threads.push_back(boost::thread(attrs, [&]() { +#if NIX_USE_BOEHMGC + GC_stack_base sb; + GC_get_stack_base(&sb); + GC_register_my_thread(&sb); +#endif + worker(); +#if NIX_USE_BOEHMGC + GC_unregister_my_thread(); +#endif + })); +} + +void Executor::worker() +{ + ReceiveInterrupts receiveInterrupts; + + unix::interruptCheck = [&]() { return (bool) quit; }; + + amWorkerThread = true; + + while (true) { + Item item; + + while (true) { + auto state(state_.lock()); + if (quit) { + // Set an `Interrupted` exception on all promises so + // we get a nicer error than "std::future_error: + // Broken promise". + auto ex = std::make_exception_ptr(Interrupted("interrupted by the user")); + for (auto & item : state->queue) + item.second.promise.set_exception(ex); + state->queue.clear(); + return; + } + if (!state->queue.empty()) { + item = std::move(state->queue.begin()->second); + state->queue.erase(state->queue.begin()); + break; + } + state.wait(wakeup); + } + + try { + item.work(); + item.promise.set_value(); + } catch (const Interrupted &) { + quit = true; + item.promise.set_exception(std::current_exception()); + } catch (...) { + item.promise.set_exception(std::current_exception()); + } + } +} + +std::vector> Executor::spawn(std::vector> && items) +{ + if (items.empty()) + return {}; + + std::vector> futures; + + { + auto state(state_.lock()); + for (auto & item : items) { + std::promise promise; + futures.push_back(promise.get_future()); + thread_local std::random_device rd; + thread_local std::uniform_int_distribution dist(0, 1ULL << 48); + auto key = (uint64_t(item.second) << 48) | dist(rd); + state->queue.emplace(key, Item{.promise = std::move(promise), .work = std::move(item.first)}); + } + } + + if (items.size() == 1) + wakeup.notify_one(); + else + wakeup.notify_all(); + + return futures; +} + +FutureVector::~FutureVector() +{ + try { + finishAll(); + } catch (...) { + ignoreExceptionInDestructor(); + } +} + +void FutureVector::spawn(std::vector> && work) +{ + auto futures = executor.spawn(std::move(work)); + auto state(state_.lock()); + for (auto & future : futures) + state->futures.push_back(std::move(future)); +} + +void FutureVector::finishAll() +{ + std::exception_ptr ex; + while (true) { + std::vector> futures; + { + auto state(state_.lock()); + std::swap(futures, state->futures); + } + debug("got %d futures", futures.size()); + if (futures.empty()) + break; + for (auto & future : futures) + try { + future.get(); + } catch (...) { + if (ex) { + if (!getInterrupted()) + ignoreExceptionExceptInterrupt(); + } else + ex = std::current_exception(); + } + } + if (ex) + std::rethrow_exception(ex); +} + +static Sync & getWaiterDomain(detail::ValueBase & v) +{ + auto domain = (((size_t) &v) >> 5) % waiterDomains.size(); + return waiterDomains[domain]; +} + +static std::atomic nextEvalThreadId{1}; +thread_local uint32_t myEvalThreadId(nextEvalThreadId++); + +template<> +ValueStorage::PackedPointer +ValueStorage::waitOnThunk(EvalState & state, PackedPointer expectedP0) +{ + state.nrThunksAwaited++; + + auto domain = getWaiterDomain(*this).lock(); + + auto threadId = expectedP0 >> discriminatorBits; + + if (static_cast(expectedP0 & discriminatorMask) == pdAwaited) { + /* Make sure that the value is still awaited, now that we're + holding the domain lock. */ + auto p0_ = p0.load(std::memory_order_acquire); + auto pd = static_cast(p0_ & discriminatorMask); + + /* If the value has been finalized in the meantime (i.e. is no + longer pending), we're done. */ + if (pd != pdAwaited) { + assert(pd != pdThunk && pd != pdPending); + return p0_; + } + } else { + /* Mark this value as being waited on. */ + PackedPointer p0_ = expectedP0; + if (!p0.compare_exchange_strong( + p0_, + pdAwaited | (threadId << discriminatorBits), + std::memory_order_acquire, + std::memory_order_acquire)) { + /* If the value has been finalized in the meantime (i.e. is + no longer pending), we're done. */ + auto pd = static_cast(p0_ & discriminatorMask); + if (pd != pdAwaited) { + assert(pd != pdThunk && pd != pdPending); + return p0_; + } + /* The value was already in the "waited on" state, so we're + not the only thread waiting on it. */ + } + } + + /* Wait for another thread to finish this value. */ + if (threadId == myEvalThreadId) + state.error("infinite recursion encountered") + .atPos(((Value &) *this).determinePos(noPos)) + .debugThrow(); + + state.nrThunksAwaitedSlow++; + state.currentlyWaiting++; + state.maxWaiting = std::max(state.maxWaiting, state.currentlyWaiting); + + auto now1 = std::chrono::steady_clock::now(); + + while (true) { + domain.wait(domain->cv); + auto p0_ = p0.load(std::memory_order_acquire); + auto pd = static_cast(p0_ & discriminatorMask); + if (pd != pdAwaited) { + assert(pd != pdThunk && pd != pdPending); + auto now2 = std::chrono::steady_clock::now(); + state.microsecondsWaiting += std::chrono::duration_cast(now2 - now1).count(); + state.currentlyWaiting--; + return p0_; + } + state.nrSpuriousWakeups++; + checkInterrupt(); + } +} + +template<> +void ValueStorage::notifyWaiters() +{ + auto domain = getWaiterDomain(*this).lock(); + + domain->cv.notify_all(); +} + +static void prim_parallel(EvalState & state, const PosIdx pos, Value ** args, Value & v) +{ + state.forceList(*args[0], pos, "while evaluating the first argument passed to builtins.parallel"); + + if (state.executor->evalCores > 1) { + std::vector> work; + for (auto value : args[0]->listView()) + if (!value->isFinished()) + work.emplace_back([value(allocRootValue(value)), &state, pos]() { state.forceValue(**value, pos); }, 0); + state.executor->spawn(std::move(work)); + } + + state.forceValue(*args[1], pos); + v = *args[1]; +} + +// FIXME: gate this behind an experimental feature. +static RegisterPrimOp r_parallel({ + .name = "__parallel", + .args = {"xs", "x"}, + .arity = 2, + .doc = R"( + Start evaluation of the values `xs` in the background and return `x`. + )", + .fun = prim_parallel, + .experimentalFeature = Xp::ParallelEval, +}); + +} // namespace nix diff --git a/src/libexpr/paths.cc b/src/libexpr/paths.cc index 8622ab20885..17189d507ca 100644 --- a/src/libexpr/paths.cc +++ b/src/libexpr/paths.cc @@ -20,24 +20,96 @@ SourcePath EvalState::storePath(const StorePath & path) return {rootFS, CanonPath{store->printStorePath(path)}}; } -StorePath -EvalState::mountInput(fetchers::Input & input, const fetchers::Input & originalInput, ref accessor) +StorePath EvalState::devirtualize(const StorePath & path, StringMap * rewrites) { - auto storePath = fetchToStore(fetchSettings, *store, accessor, FetchMode::Copy, input.getName()); + if (auto mount = storeFS->getMount(CanonPath(store->printStorePath(path)))) { + auto storePath = fetchToStore( + fetchSettings, + *store, + SourcePath{ref(mount)}, + settings.readOnlyMode ? FetchMode::DryRun : FetchMode::Copy, + path.name()); + assert(storePath.name() == path.name()); + if (rewrites) + rewrites->emplace(path.hashPart(), storePath.hashPart()); + return storePath; + } else + return path; +} + +SingleDerivedPath EvalState::devirtualize(const SingleDerivedPath & path, StringMap * rewrites) +{ + if (auto o = std::get_if(&path.raw())) + return SingleDerivedPath::Opaque{devirtualize(o->path, rewrites)}; + else + return path; +} + +std::string EvalState::devirtualize(std::string_view s, const NixStringContext & context) +{ + StringMap rewrites; + + for (auto & c : context) + if (auto o = std::get_if(&c.raw)) + devirtualize(o->path, &rewrites); + + return rewriteStrings(std::string(s), rewrites); +} + +std::string EvalState::computeBaseName(const SourcePath & path, PosIdx pos) +{ + if (path.accessor == rootFS) { + if (auto storePath = store->maybeParseStorePath(path.path.abs())) { + debug( + "Copying '%s' to the store again.\n" + "You can make Nix evaluate faster and copy fewer files by replacing `./.` with the `self` flake input, " + "or `builtins.path { path = ./.; name = \"source\"; }`.\n", + path); + return std::string( + fetchToStore(fetchSettings, *store, path, FetchMode::DryRun, storePath->name()).to_string()); + } + } + return std::string(path.baseName()); +} + +StorePath EvalState::mountInput( + fetchers::Input & input, + const fetchers::Input & originalInput, + ref accessor, + bool requireLockable, + bool forceNarHash) +{ + auto storePath = settings.lazyTrees + ? StorePath::random(input.getName()) + : fetchToStore(fetchSettings, *store, accessor, FetchMode::Copy, input.getName()); allowPath(storePath); // FIXME: should just whitelist the entire virtual store + std::optional _narHash; + + auto getNarHash = [&]() { + if (!_narHash) { + if (store->isValidPath(storePath)) + _narHash = store->queryPathInfo(storePath)->narHash; + else + _narHash = fetchToStore2(fetchSettings, *store, accessor, FetchMode::DryRun, input.getName()).second; + } + return _narHash; + }; + storeFS->mount(CanonPath(store->printStorePath(storePath)), accessor); - auto narHash = store->queryPathInfo(storePath)->narHash; - input.attrs.insert_or_assign("narHash", narHash.to_string(HashFormat::SRI, true)); + if (forceNarHash + || (requireLockable && (!settings.lazyTrees || !settings.lazyLocks || !input.isLocked(fetchSettings)) + && !input.getNarHash())) + input.attrs.insert_or_assign("narHash", getNarHash()->to_string(HashFormat::SRI, true)); - if (originalInput.getNarHash() && narHash != *originalInput.getNarHash()) + if (originalInput.getNarHash() && *getNarHash() != *originalInput.getNarHash()) throw Error( (unsigned int) 102, "NAR hash mismatch in input '%s', expected '%s' but got '%s'", originalInput.to_string(), - narHash.to_string(HashFormat::SRI, true), + getNarHash()->to_string(HashFormat::SRI, true), originalInput.getNarHash()->to_string(HashFormat::SRI, true)); return storePath; diff --git a/src/libexpr/primops.cc b/src/libexpr/primops.cc index 759b33ac6fd..a8b5e87c304 100644 --- a/src/libexpr/primops.cc +++ b/src/libexpr/primops.cc @@ -17,6 +17,7 @@ #include "nix/expr/primops.hh" #include "nix/fetchers/fetch-to-store.hh" #include "nix/util/sort.hh" +#include "nix/util/mounted-source-accessor.hh" #include #include @@ -73,6 +74,7 @@ StringMap EvalState::realiseContext(const NixStringContext & context, StorePathS for (auto & c : context) { auto ensureValid = [&](const StorePath & p) { + waitForPath(p); if (!store->isValidPath(p)) error(store->printStorePath(p)).debugThrow(); }; @@ -87,7 +89,10 @@ StringMap EvalState::realiseContext(const NixStringContext & context, StorePathS ensureValid(b.drvPath->getBaseStorePath()); }, [&](const NixStringContextElem::Opaque & o) { - ensureValid(o.path); + // We consider virtual store paths valid here. They'll + // be devirtualized if needed elsewhere. + if (!storeFS->getMount(CanonPath(store->printStorePath(o.path)))) + ensureValid(o.path); if (maybePathsOut) maybePathsOut->emplace(o.path); }, @@ -97,6 +102,9 @@ StringMap EvalState::realiseContext(const NixStringContext & context, StorePathS if (maybePathsOut) maybePathsOut->emplace(d.drvPath); }, + [&](const NixStringContextElem::Path & p) { + // FIXME: do something? + }, }, c.raw); } @@ -302,6 +310,7 @@ static void import(EvalState & state, const PosIdx pos, Value & vPath, Value * v if (!state.store->isStorePath(path2)) return std::nullopt; auto storePath = state.store->parseStorePath(path2); + state.waitForPath(storePath); if (!(state.store->isValidPath(storePath) && isDerivation(path2))) return std::nullopt; return storePath; @@ -566,6 +575,7 @@ static void prim_typeOf(EvalState & state, const PosIdx pos, Value ** args, Valu v.mkStringNoCopy("float"_sds); break; case nThunk: + case nFailed: unreachable(); } } @@ -1150,7 +1160,7 @@ static RegisterPrimOp primop_floor({ a NixInt and if `*number* < -9007199254740992` or `*number* > 9007199254740992`. If the datatype of *number* is neither a NixInt (signed 64-bit integer) nor a NixFloat - (IEEE-754 double-precision floating-point number), an evaluation error will be thrown. + (IEEE-754 double-precision floating-point number), an evaluation error is thrown. )", .fun = prim_floor, }); @@ -1197,7 +1207,7 @@ static RegisterPrimOp primop_tryEval({ `false` if an error was thrown) and `value`, equalling *e* if successful and `false` otherwise. `tryEval` only prevents errors created by `throw` or `assert` from being thrown. - Errors `tryEval` doesn't catch are, for example, those created + Errors that `tryEval` doesn't catch are, for example, those created by `abort` and type errors generated by builtins. Also note that this doesn't evaluate *e* deeply, so `let e = { x = throw ""; }; in (builtins.tryEval e).success` is `true`. Using @@ -1349,7 +1359,7 @@ static RegisterPrimOp primop_warn({ [`debugger-on-trace`](@docroot@/command-ref/conf-file.md#conf-debugger-on-trace) or [`debugger-on-warn`](@docroot@/command-ref/conf-file.md#conf-debugger-on-warn) option is set to `true` and the `--debugger` flag is given, the - interactive debugger will be started when `warn` is called (like + interactive debugger is started when `warn` is called (like [`break`](@docroot@/language/builtins.md#builtins-break)). If the @@ -1705,6 +1715,10 @@ static void derivationStrictInternal(EvalState & state, std::string_view drvName /* Everything in the context of the strings in the derivation attributes should be added as dependencies of the resulting derivation. */ + StringMap rewrites; + + std::optional drvS; + for (auto & c : context) { std::visit( overloaded{ @@ -1716,6 +1730,8 @@ static void derivationStrictInternal(EvalState & state, std::string_view drvName [&](const NixStringContextElem::DrvDeep & d) { /* !!! This doesn't work if readOnlyMode is set. */ StorePathSet refs; + // FIXME: don't need to wait, we only need the references. + state.waitForPath(d.drvPath); state.store->computeFSClosure(d.drvPath, refs); for (auto & j : refs) { drv.inputSrcs.insert(j); @@ -1727,11 +1743,27 @@ static void derivationStrictInternal(EvalState & state, std::string_view drvName [&](const NixStringContextElem::Built & b) { drv.inputDrvs.ensureSlot(*b.drvPath).value.insert(b.output); }, - [&](const NixStringContextElem::Opaque & o) { drv.inputSrcs.insert(o.path); }, + [&](const NixStringContextElem::Opaque & o) { + drv.inputSrcs.insert(state.devirtualize(o.path, &rewrites)); + }, + [&](const NixStringContextElem::Path & p) { + if (!drvS) + drvS = drv.unparse(*state.store, true); + if (drvS->find(p.storePath.to_string()) != drvS->npos) { + auto devirtualized = state.devirtualize(p.storePath, &rewrites); + warn( + "Using 'builtins.derivation' to create a derivation named '%s' that references the store path '%s' without a proper context. " + "The resulting derivation will not have a correct store reference, so this is unreliable and may stop working in the future.", + drvName, + state.store->printStorePath(devirtualized)); + } + }, }, c.raw); } + drv.applyRewrites(rewrites); + /* Do we have all required attributes? */ if (drv.builder == "") state.error("required attribute 'builder' missing").atPos(v).debugThrow(); @@ -1819,7 +1851,7 @@ static void derivationStrictInternal(EvalState & state, std::string_view drvName } /* Write the resulting term into the Nix store directory. */ - auto drvPath = writeDerivation(*state.store, drv, state.repair); + auto drvPath = writeDerivation(*state.store, *state.asyncPathWriter, drv, state.repair); auto drvPathS = state.store->printStorePath(drvPath); printMsg(lvlChatty, "instantiated '%1%' -> '%2%'", drvName, drvPathS); @@ -2086,14 +2118,17 @@ static void prim_readFile(EvalState & state, const PosIdx pos, Value ** args, Va .debugThrow(); StorePathSet refs; if (state.store->isInStore(path.path.abs())) { - try { - refs = state.store->queryPathInfo(state.store->toStorePath(path.path.abs()).first)->references; - } catch (Error &) { // FIXME: should be InvalidPathError + auto storePath = state.store->toStorePath(path.path.abs()).first; + // Skip virtual paths since they don't have references and + // don't exist anyway. + if (!state.storeFS->getMount(CanonPath(state.store->printStorePath(storePath)))) { + if (auto info = state.store->maybeQueryPathInfo(state.store->toStorePath(path.path.abs()).first)) { + // Re-scan references to filter down to just the ones that actually occur in the file. + auto refsSink = PathRefScanSink::fromPaths(info->references); + refsSink << s; + refs = refsSink.getResultPaths(); + } } - // Re-scan references to filter down to just the ones that actually occur in the file. - auto refsSink = PathRefScanSink::fromPaths(refs); - refsSink << s; - refs = refsSink.getResultPaths(); } NixStringContext context; for (auto && p : std::move(refs)) { @@ -2656,15 +2691,25 @@ static void prim_toFile(EvalState & state, const PosIdx pos, Value ** args, Valu { NixStringContext context; auto name = state.forceStringNoCtx(*args[0], pos, "while evaluating the first argument passed to builtins.toFile"); - auto contents = - state.forceString(*args[1], context, pos, "while evaluating the second argument passed to builtins.toFile"); + std::string contents( + state.forceString(*args[1], context, pos, "while evaluating the second argument passed to builtins.toFile")); StorePathSet refs; + StringMap rewrites; for (auto c : context) { if (auto p = std::get_if(&c.raw)) refs.insert(p->path); - else + else if (auto p = std::get_if(&c.raw)) { + if (contents.find(p->storePath.to_string()) != contents.npos) { + auto devirtualized = state.devirtualize(p->storePath, &rewrites); + warn( + "Using 'builtins.toFile' to create a file named '%s' that references the store path '%s' without a proper context. " + "The resulting file will not have a correct store reference, so this is unreliable and may stop working in the future.", + name, + state.store->printStorePath(devirtualized)); + } + } else state .error( "files created by %1% may not reference derivations, but %2% references %3%", @@ -2675,6 +2720,8 @@ static void prim_toFile(EvalState & state, const PosIdx pos, Value ** args, Valu .debugThrow(); } + contents = rewriteStrings(contents, rewrites); + auto storePath = settings.readOnlyMode ? state.store->makeFixedOutputPathFromCA( name, TextInfo{ @@ -2834,6 +2881,7 @@ static void addPath( name, ContentAddressWithReferences::fromParts(method, *expectedHash, {refs})); if (!expectedHash || !state.store->isValidPath(*expectedStorePath)) { + // FIXME: make this lazy? // FIXME: support refs in fetchToStore()? auto dstPath = refs.empty() ? fetchToStore( state.fetchSettings, @@ -2876,7 +2924,15 @@ static void prim_filterSource(EvalState & state, const PosIdx pos, Value ** args state.forceFunction(*args[0], pos, "while evaluating the first argument passed to builtins.filterSource"); addPath( - state, pos, path.baseName(), path, args[0], ContentAddressMethod::Raw::NixArchive, std::nullopt, v, context); + state, + pos, + state.computeBaseName(path, pos), + path, + args[0], + ContentAddressMethod::Raw::NixArchive, + std::nullopt, + v, + context); } static RegisterPrimOp primop_filterSource({ @@ -3529,6 +3585,49 @@ static RegisterPrimOp primop_mapAttrs({ .fun = prim_mapAttrs, }); +static void prim_filterAttrs(EvalState & state, const PosIdx pos, Value ** args, Value & v) +{ + state.forceAttrs(*args[1], pos, "while evaluating the second argument passed to builtins.filterAttrs"); + + if (args[1]->attrs()->empty()) { + v = *args[1]; + return; + } + + state.forceFunction(*args[0], pos, "while evaluating the first argument passed to builtins.filterAttrs"); + + auto attrs = state.buildBindings(args[1]->attrs()->size()); + + for (auto & i : *args[1]->attrs()) { + Value * vName = Value::toPtr(state.symbols[i.name]); + Value * callArgs[] = {vName, i.value}; + Value res; + state.callFunction(*args[0], callArgs, res, noPos); + if (state.forceBool( + res, pos, "while evaluating the return value of the filtering function passed to builtins.filterAttrs")) + attrs.insert(i.name, i.value); + } + + v.mkAttrs(attrs.alreadySorted()); +} + +static RegisterPrimOp primop_filterAttrs({ + .name = "__filterAttrs", + .args = {"f", "attrset"}, + .doc = R"( + Return an attribute set consisting of the attributes in *attrset* for which + the function *f* returns `true`. The function *f* is called with two arguments: + the name of the attribute and the value of the attribute. For example, + + ```nix + builtins.filterAttrs (name: value: name == "foo") { foo = 1; bar = 2; } + ``` + + evaluates to `{ foo = 1; }`. + )", + .fun = prim_filterAttrs, +}); + static void prim_zipAttrsWith(EvalState & state, const PosIdx pos, Value ** args, Value & v) { // we will first count how many values are present for each given key. @@ -3909,8 +4008,8 @@ static void anyOrAll(bool any, EvalState & state, const PosIdx pos, Value ** arg std::string_view errorCtx = any ? "while evaluating the return value of the function passed to builtins.any" : "while evaluating the return value of the function passed to builtins.all"; - Value vTmp; for (auto elem : args[1]->listView()) { + Value vTmp; state.callFunction(*args[0], *elem, vTmp, pos); bool res = state.forceBool(vTmp, pos, errorCtx); if (res == any) { @@ -5199,9 +5298,7 @@ void EvalState::createBaseEnv(const EvalSettings & evalSettings) )", }); - if (!settings.pureEval) { - v.mkInt(time(0)); - } + v.mkInt(time(0)); addConstant( "__currentTime", v, @@ -5229,8 +5326,7 @@ void EvalState::createBaseEnv(const EvalSettings & evalSettings) .impureOnly = true, }); - if (!settings.pureEval) - v.mkString(settings.getCurrentSystem(), mem); + v.mkString(settings.getCurrentSystem(), mem); addConstant( "__currentSystem", v, diff --git a/src/libexpr/primops/context.cc b/src/libexpr/primops/context.cc index 70c13e2985b..d4824d9b9e5 100644 --- a/src/libexpr/primops/context.cc +++ b/src/libexpr/primops/context.cc @@ -8,10 +8,16 @@ namespace nix { static void prim_unsafeDiscardStringContext(EvalState & state, const PosIdx pos, Value ** args, Value & v) { - NixStringContext context; + NixStringContext context, filtered; + auto s = state.coerceToString( pos, *args[0], context, "while evaluating the argument passed to builtins.unsafeDiscardStringContext"); - v.mkString(*s, state.mem); + + for (auto & c : context) + if (auto * p = std::get_if(&c.raw)) + filtered.insert(*p); + + v.mkString(*s, filtered, state.mem); } static RegisterPrimOp primop_unsafeDiscardStringContext({ @@ -23,11 +29,19 @@ static RegisterPrimOp primop_unsafeDiscardStringContext({ .fun = prim_unsafeDiscardStringContext, }); +bool hasContext(const NixStringContext & context) +{ + for (auto & c : context) + if (!std::get_if(&c.raw)) + return true; + return false; +} + static void prim_hasContext(EvalState & state, const PosIdx pos, Value ** args, Value & v) { NixStringContext context; state.forceString(*args[0], context, pos, "while evaluating the argument passed to builtins.hasContext"); - v.mkBool(!context.empty()); + v.mkBool(hasContext(context)); } static RegisterPrimOp primop_hasContext( @@ -62,6 +76,7 @@ static void prim_unsafeDiscardOutputDependency(EvalState & state, const PosIdx p NixStringContext context2; for (auto && c : context) { if (auto * ptr = std::get_if(&c.raw)) { + state.waitForPath(ptr->drvPath); // FIXME: why? context2.emplace(NixStringContextElem::Opaque{.path = ptr->drvPath}); } else { /* Can reuse original item */ @@ -133,6 +148,11 @@ static void prim_addDrvOutputDependencies(EvalState & state, const PosIdx pos, V above does not make much sense. */ return std::move(c); }, + [&](const NixStringContextElem::Path & p) -> NixStringContextElem::DrvDeep { + state.error("`addDrvOutputDependencies` does not work on a string without context") + .atPos(pos) + .debugThrow(); + }, }, context.begin()->raw)}), }; @@ -201,6 +221,7 @@ static void prim_getContext(EvalState & state, const PosIdx pos, Value ** args, contextInfos[std::move(drvPath)].outputs.emplace_back(std::move(b.output)); }, [&](NixStringContextElem::Opaque && o) { contextInfos[std::move(o.path)].path = true; }, + [&](NixStringContextElem::Path && p) {}, }, ((NixStringContextElem &&) i).raw); } diff --git a/src/libexpr/primops/fetchClosure.cc b/src/libexpr/primops/fetchClosure.cc index 6e1389814fc..f849d0debb8 100644 --- a/src/libexpr/primops/fetchClosure.cc +++ b/src/libexpr/primops/fetchClosure.cc @@ -136,7 +136,7 @@ static void prim_fetchClosure(EvalState & state, const PosIdx pos, Value ** args std::optional inputAddressedMaybe; for (auto & attr : *args[0]->attrs()) { - const auto & attrName = state.symbols[attr.name]; + std::string_view attrName = state.symbols[attr.name]; auto attrHint = [&]() -> std::string { return fmt("while evaluating the attribute '%s' passed to builtins.fetchClosure", attrName); }; @@ -243,7 +243,7 @@ static RegisterPrimOp primop_fetchClosure({ ```nix builtins.fetchClosure { fromStore = "https://cache.nixos.org"; - fromPath = /nix/store/r2jd6ygnmirm2g803mksqqjm4y39yi6i-git-2.33.1; + fromPath = /nix/store/nph9br6y2dmciy6q3dj3fwk2brdlr4gh-git-2.33.1; toPath = /nix/store/ldbhlwhh39wha58rm61bkiiwm6j7211j-git-2.33.1; } ``` @@ -258,8 +258,8 @@ static RegisterPrimOp primop_fetchClosure({ use [`nix store make-content-addressed`](@docroot@/command-ref/new-cli/nix3-store-make-content-addressed.md): ```console - # nix store make-content-addressed --from https://cache.nixos.org /nix/store/r2jd6ygnmirm2g803mksqqjm4y39yi6i-git-2.33.1 - rewrote '/nix/store/r2jd6ygnmirm2g803mksqqjm4y39yi6i-git-2.33.1' to '/nix/store/ldbhlwhh39wha58rm61bkiiwm6j7211j-git-2.33.1' + # nix store make-content-addressed --from https://cache.nixos.org /nix/store/nph9br6y2dmciy6q3dj3fwk2brdlr4gh-git-2.33.1 + rewrote '/nix/store/nph9br6y2dmciy6q3dj3fwk2brdlr4gh-git-2.33.1' to '/nix/store/ldbhlwhh39wha58rm61bkiiwm6j7211j-git-2.33.1' ``` Alternatively, set `toPath = ""` and find the correct `toPath` in the error message. @@ -271,7 +271,7 @@ static RegisterPrimOp primop_fetchClosure({ ```nix builtins.fetchClosure { fromStore = "https://cache.nixos.org"; - fromPath = /nix/store/r2jd6ygnmirm2g803mksqqjm4y39yi6i-git-2.33.1; + fromPath = /nix/store/nph9br6y2dmciy6q3dj3fwk2brdlr4gh-git-2.33.1; inputAddressed = true; } ``` diff --git a/src/libexpr/primops/fetchMercurial.cc b/src/libexpr/primops/fetchMercurial.cc index cc42931a61e..4ab060f7807 100644 --- a/src/libexpr/primops/fetchMercurial.cc +++ b/src/libexpr/primops/fetchMercurial.cc @@ -81,7 +81,7 @@ static void prim_fetchMercurial(EvalState & state, const PosIdx pos, Value ** ar attrs.insert_or_assign("rev", rev->gitRev()); auto input = fetchers::Input::fromAttrs(state.fetchSettings, std::move(attrs)); - auto [storePath, input2] = input.fetchToStore(state.fetchSettings, *state.store); + auto [storePath, accessor, input2] = input.fetchToStore(state.fetchSettings, *state.store); auto attrs2 = state.buildBindings(8); state.mkStorePathString(storePath, attrs2.alloc(state.s.outPath)); diff --git a/src/libexpr/primops/fetchTree.cc b/src/libexpr/primops/fetchTree.cc index 1614fcc595d..dda81b9d328 100644 --- a/src/libexpr/primops/fetchTree.cc +++ b/src/libexpr/primops/fetchTree.cc @@ -77,7 +77,6 @@ struct FetchTreeParams bool emptyRevFallback = false; bool allowNameArgument = false; bool isFetchGit = false; - bool isFinal = false; }; static void fetchTree( @@ -151,11 +150,6 @@ static void fetchTree( attrs.emplace("exportIgnore", Explicit{true}); } - // fetchTree should fetch git repos with shallow = true by default - if (type == "git" && !params.isFetchGit && !attrs.contains("shallow")) { - attrs.emplace("shallow", Explicit{true}); - } - if (!params.allowNameArgument) if (auto nameIter = attrs.find("name"); nameIter != attrs.end()) state.error("argument 'name' isn’t supported in call to '%s'", fetcher) @@ -184,17 +178,11 @@ static void fetchTree( } input = fetchers::Input::fromAttrs(state.fetchSettings, std::move(attrs)); } else { - if (!experimentalFeatureSettings.isEnabled(Xp::Flakes)) - state - .error( - "passing a string argument to '%s' requires the 'flakes' experimental feature", fetcher) - .atPos(pos) - .debugThrow(); input = fetchers::Input::fromURL(state.fetchSettings, url); } } - if (!state.settings.pureEval && !input.isDirect() && experimentalFeatureSettings.isEnabled(Xp::Flakes)) + if (!state.settings.pureEval && !input.isDirect()) input = lookupInRegistries(state.fetchSettings, *state.store, input, fetchers::UseRegistries::Limited).first; if (state.settings.pureEval && !input.isLocked(state.fetchSettings)) { @@ -213,17 +201,13 @@ static void fetchTree( state.checkURI(input.toURLString()); - if (params.isFinal) { + if (input.getNarHash()) input.attrs.insert_or_assign("__final", Explicit(true)); - } else { - if (input.isFinal()) - throw Error("input '%s' is not allowed to use the '__final' attribute", input.to_string()); - } auto cachedInput = state.inputCache->getAccessor(state.fetchSettings, *state.store, input, fetchers::UseRegistries::No); - auto storePath = state.mountInput(cachedInput.lockedInput, input, cachedInput.accessor); + auto storePath = state.mountInput(cachedInput.lockedInput, input, cachedInput.accessor, true); emitTreeAttrs(state, storePath, cachedInput.lockedInput, v, params.emptyRevFallback, false); } @@ -318,7 +302,6 @@ static RegisterPrimOp primop_fetchTree({ - `"mercurial"` *input* can also be a [URL-like reference](@docroot@/command-ref/new-cli/nix3-flake.md#flake-references). - The additional input types and the URL-like syntax requires the [`flakes` experimental feature](@docroot@/development/experimental-features.md#xp-feature-flakes) to be enabled. > **Example** > @@ -358,19 +341,6 @@ static RegisterPrimOp primop_fetchTree({ return doc; }(), .fun = prim_fetchTree, - .experimentalFeature = Xp::FetchTree, -}); - -void prim_fetchFinalTree(EvalState & state, const PosIdx pos, Value ** args, Value & v) -{ - fetchTree(state, pos, args, v, {.isFinal = true}); -} - -static RegisterPrimOp primop_fetchFinalTree({ - .name = "fetchFinalTree", - .args = {"input"}, - .fun = prim_fetchFinalTree, - .internal = true, }); static void fetch( @@ -719,7 +689,7 @@ static RegisterPrimOp primop_fetchGit({ name in the `ref` attribute. However, if the revision you're looking for is in a future - branch for the non-default branch you will need to specify the + branch for the non-default branch you need to specify the the `ref` attribute as well. ```nix diff --git a/src/libexpr/print-ambiguous.cc b/src/libexpr/print-ambiguous.cc index 8b80e2a6634..f80ef2b044b 100644 --- a/src/libexpr/print-ambiguous.cc +++ b/src/libexpr/print-ambiguous.cc @@ -6,8 +6,7 @@ namespace nix { // See: https://github.com/NixOS/nix/issues/9730 -void printAmbiguous( - Value & v, const SymbolTable & symbols, std::ostream & str, std::set * seen, int depth) +void printAmbiguous(EvalState & state, Value & v, std::ostream & str, std::set * seen, int depth) { checkInterrupt(); @@ -22,9 +21,13 @@ void printAmbiguous( case nBool: printLiteralBool(str, v.boolean()); break; - case nString: - printLiteralString(str, v.string_view()); + case nString: { + NixStringContext context; + copyContext(v, context); + // FIXME: make devirtualization configurable? + printLiteralString(str, state.devirtualize(v.string_view(), context)); break; + } case nPath: str << v.path().to_string(); // !!! escaping? break; @@ -36,9 +39,9 @@ void printAmbiguous( str << "«repeated»"; else { str << "{ "; - for (auto & i : v.attrs()->lexicographicOrder(symbols)) { - str << symbols[i->name] << " = "; - printAmbiguous(*i->value, symbols, str, seen, depth - 1); + for (auto & i : v.attrs()->lexicographicOrder(state.symbols)) { + str << state.symbols[i->name] << " = "; + printAmbiguous(state, *i->value, str, seen, depth - 1); str << "; "; } str << "}"; @@ -54,7 +57,7 @@ void printAmbiguous( str << "[ "; for (auto v2 : v.listView()) { if (v2) - printAmbiguous(*v2, symbols, str, seen, depth - 1); + printAmbiguous(state, *v2, str, seen, depth - 1); else str << "(nullptr)"; str << " "; @@ -75,6 +78,9 @@ void printAmbiguous( str << "«potential infinite recursion»"; } break; + case nFailed: + str << "«failed»"; + break; case nFunction: if (v.isLambda()) { str << ""; diff --git a/src/libexpr/print.cc b/src/libexpr/print.cc index 4776be03385..bf856db45d5 100644 --- a/src/libexpr/print.cc +++ b/src/libexpr/print.cc @@ -249,7 +249,11 @@ class Printer void printString(Value & v) { - printLiteralString(output, v.string_view(), options.maxStringLength, options.ansiColors); + NixStringContext context; + copyContext(v, context); + std::ostringstream s; + printLiteralString(s, v.string_view(), options.maxStringLength, options.ansiColors); + output << state.devirtualize(s.str(), context); } void printPath(Value & v) @@ -498,7 +502,7 @@ class Printer output << "«potential infinite recursion»"; if (options.ansiColors) output << ANSI_NORMAL; - } else if (v.isThunk() || v.isApp()) { + } else if (!v.isFinished()) { if (options.ansiColors) output << ANSI_MAGENTA; output << "«thunk»"; @@ -509,6 +513,11 @@ class Printer } } + void printFailed(Value & v) + { + output << "«failed»"; + } + void printExternal(Value & v) { v.external()->print(output); @@ -584,6 +593,10 @@ class Printer printThunk(v); break; + case nFailed: + printFailed(v); + break; + case nExternal: printExternal(v); break; diff --git a/src/libexpr/symbol-table.cc b/src/libexpr/symbol-table.cc new file mode 100644 index 00000000000..052c7257037 --- /dev/null +++ b/src/libexpr/symbol-table.cc @@ -0,0 +1,63 @@ +#include "nix/expr/symbol-table.hh" +#include "nix/util/logging.hh" + +#include + +namespace nix { + +#ifndef MAP_NORESERVE +# define MAP_NORESERVE 0 +#endif + +static void * allocateLazyMemory(size_t maxSize) +{ + auto p = mmap(nullptr, maxSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, -1, 0); + if (p == MAP_FAILED) + throw SysError("allocating arena using mmap"); + return p; +} + +ContiguousArena::ContiguousArena(size_t maxSize) + : data((char *) allocateLazyMemory(maxSize)) + , maxSize(maxSize) +{ +} + +size_t ContiguousArena::allocate(size_t bytes) +{ + auto offset = size.fetch_add(bytes); + if (offset + bytes > maxSize) + throw Error("arena ran out of space"); + return offset; +} + +Symbol SymbolTable::create(std::string_view s) +{ + uint32_t idx; + + auto visit = [&](const SymbolStr & sym) { idx = ((const char *) sym.s) - arena.data; }; + + symbols.insert_and_visit(SymbolStr::Key{s, arena}, visit, visit); + + return Symbol(idx); +} + +SymbolStr::SymbolStr(const SymbolStr::Key & key) +{ + auto size = SymbolStr::computeSize(key.s); + + auto id = key.arena.allocate(size); + + auto v = (SymbolValue *) (const_cast(key.arena.data) + id); + + auto s = (StringData *) (v + 1); + s->size_ = key.s.size(); + std::memcpy(s->data_, key.s.data(), key.s.size()); + s->data_[key.s.size()] = '\0'; + + v->mkStringNoCopy(*s); + + this->s = v; +} + +} // namespace nix diff --git a/src/libexpr/value-to-json.cc b/src/libexpr/value-to-json.cc index b2cc482c6e3..45ae57d7977 100644 --- a/src/libexpr/value-to-json.cc +++ b/src/libexpr/value-to-json.cc @@ -2,106 +2,147 @@ #include "nix/expr/eval-inline.hh" #include "nix/store/store-api.hh" #include "nix/util/signals.hh" +#include "nix/expr/parallel-eval.hh" #include #include #include namespace nix { + using json = nlohmann::json; +#pragma GCC diagnostic ignored "-Wswitch-enum" + +static void parallelForceDeep(EvalState & state, Value & v, PosIdx pos) +{ + state.forceValue(v, pos); + + std::vector> work; + + switch (v.type()) { + + case nAttrs: { + NixStringContext context; + if (state.tryAttrsToString(pos, v, context, false, false)) + return; + if (v.attrs()->get(state.s.outPath)) + return; + for (auto & a : *v.attrs()) + work.emplace_back( + [value(allocRootValue(a.value)), pos(a.pos), &state]() { parallelForceDeep(state, **value, pos); }, 0); + break; + } + + default: + break; + } + + state.executor->spawn(std::move(work)); +} + // TODO: rename. It doesn't print. json printValueAsJSON( EvalState & state, bool strict, Value & v, const PosIdx pos, NixStringContext & context, bool copyToStore) { - checkInterrupt(); + if (strict && state.executor->enabled && !Executor::amWorkerThread) + parallelForceDeep(state, v, pos); - auto _level = state.addCallDepth(pos); + auto recurse = [&](this const auto & recurse, json & res, Value & v, PosIdx pos) -> void { + checkInterrupt(); - if (strict) - state.forceValue(v, pos); + auto _level = state.addCallDepth(pos); - json out; + if (strict) + state.forceValue(v, pos); - switch (v.type()) { + switch (v.type()) { - case nInt: - out = v.integer().value; - break; + case nInt: + res = v.integer().value; + break; - case nBool: - out = v.boolean(); - break; + case nBool: + res = v.boolean(); + break; - case nString: - copyContext(v, context); - out = v.string_view(); - break; + case nString: { + copyContext(v, context); + res = v.string_view(); + break; + } - case nPath: - if (copyToStore) - out = state.store->printStorePath(state.copyPathToStore(context, v.path())); - else - out = v.path().path.abs(); - break; + case nPath: + if (copyToStore) + res = state.store->printStorePath(state.copyPathToStore(context, v.path(), v.determinePos(pos))); + else + res = v.path().path.abs(); + break; - case nNull: - // already initialized as null - break; + case nNull: + // already initialized as null + break; - case nAttrs: { - auto maybeString = state.tryAttrsToString(pos, v, context, false, false); - if (maybeString) { - out = *maybeString; + case nAttrs: { + auto maybeString = state.tryAttrsToString(pos, v, context, false, false); + if (maybeString) { + res = *maybeString; + break; + } + if (auto i = v.attrs()->get(state.s.outPath)) + return recurse(res, *i->value, i->pos); + else { + res = json::object(); + for (auto & a : v.attrs()->lexicographicOrder(state.symbols)) { + json & j = res.emplace(state.symbols[a->name], json()).first.value(); + try { + recurse(j, *a->value, a->pos); + } catch (Error & e) { + e.addTrace( + state.positions[a->pos], + HintFmt("while evaluating attribute '%1%'", state.symbols[a->name])); + throw; + } + } + } break; } - if (auto i = v.attrs()->get(state.s.outPath)) - return printValueAsJSON(state, strict, *i->value, i->pos, context, copyToStore); - else { - out = json::object(); - for (auto & a : v.attrs()->lexicographicOrder(state.symbols)) { + + case nList: { + res = json::array(); + for (const auto & [i, elem] : enumerate(v.listView())) { try { - out.emplace( - state.symbols[a->name], - printValueAsJSON(state, strict, *a->value, a->pos, context, copyToStore)); + res.push_back(json()); + recurse(res.back(), *elem, pos); } catch (Error & e) { - e.addTrace( - state.positions[a->pos], HintFmt("while evaluating attribute '%1%'", state.symbols[a->name])); + e.addTrace(state.positions[pos], HintFmt("while evaluating list element at index %1%", i)); throw; } } + break; } - break; - } - case nList: { - out = json::array(); - int i = 0; - for (auto elem : v.listView()) { - try { - out.push_back(printValueAsJSON(state, strict, *elem, pos, context, copyToStore)); - } catch (Error & e) { - e.addTrace(state.positions[pos], HintFmt("while evaluating list element at index %1%", i)); - throw; - } - i++; + case nExternal: { + res = v.external()->printValueAsJSON(state, strict, context, copyToStore); + break; } - break; - } - case nExternal: - return v.external()->printValueAsJSON(state, strict, context, copyToStore); - break; + case nFloat: + res = v.fpoint(); + break; - case nFloat: - out = v.fpoint(); - break; + case nThunk: + case nFailed: + case nFunction: + state.error("cannot convert %1% to JSON", showType(v)).atPos(v.determinePos(pos)).debugThrow(); + } + }; - case nThunk: - case nFunction: - state.error("cannot convert %1% to JSON", showType(v)).atPos(v.determinePos(pos)).debugThrow(); - } - return out; + json res; + + recurse(res, v, pos); + + return res; } void printValueAsJSON( diff --git a/src/libexpr/value-to-xml.cc b/src/libexpr/value-to-xml.cc index 0a7a334f41b..21de85a1717 100644 --- a/src/libexpr/value-to-xml.cc +++ b/src/libexpr/value-to-xml.cc @@ -170,6 +170,11 @@ static void printValueAsXML( case nThunk: doc.writeEmptyElement("unevaluated"); + break; + + case nFailed: + doc.writeEmptyElement("failed"); + break; } } diff --git a/src/libexpr/value/context.cc b/src/libexpr/value/context.cc index dcc577f056c..a06d79ddebf 100644 --- a/src/libexpr/value/context.cc +++ b/src/libexpr/value/context.cc @@ -50,6 +50,11 @@ NixStringContextElem NixStringContextElem::parse(std::string_view s0, const Expe .drvPath = StorePath{s.substr(1)}, }; } + case '@': { + return NixStringContextElem::Path{ + .storePath = StorePath{s.substr(1)}, + }; + } default: { // Ensure no '!' if (s.find("!") != std::string_view::npos) { @@ -90,6 +95,10 @@ std::string NixStringContextElem::to_string() const res += '='; res += d.drvPath.to_string(); }, + [&](const NixStringContextElem::Path & p) { + res += '@'; + res += p.storePath.to_string(); + }, }, raw); diff --git a/src/libfetchers-c/package.nix b/src/libfetchers-c/package.nix index 9a601d70417..13ec30d566e 100644 --- a/src/libfetchers-c/package.nix +++ b/src/libfetchers-c/package.nix @@ -17,7 +17,7 @@ let in mkMesonLibrary (finalAttrs: { - pname = "nix-fetchers-c"; + pname = "determinate-nix-fetchers-c"; inherit version; workDir = ./.; diff --git a/src/libfetchers-tests/access-tokens.cc b/src/libfetchers-tests/access-tokens.cc index 7127434db9d..26cdcfb83fc 100644 --- a/src/libfetchers-tests/access-tokens.cc +++ b/src/libfetchers-tests/access-tokens.cc @@ -15,10 +15,7 @@ class AccessKeysTest : public ::testing::Test protected: public: - void SetUp() override - { - experimentalFeatureSettings.experimentalFeatures.get().insert(Xp::Flakes); - } + void SetUp() override {} void TearDown() override {} }; diff --git a/src/libfetchers-tests/git-utils.cc b/src/libfetchers-tests/git-utils.cc index 762e39ad6ea..0b21fd0c67d 100644 --- a/src/libfetchers-tests/git-utils.cc +++ b/src/libfetchers-tests/git-utils.cc @@ -48,7 +48,7 @@ class GitUtilsTest : public ::testing::Test ref openRepo() { - return GitRepo::openRepo(tmpDir, true, false); + return GitRepo::openRepo(tmpDir, {.create = true}); } std::string getRepoName() const @@ -115,9 +115,10 @@ TEST_F(GitUtilsTest, sink_hardlink) try { sink->createHardlink(CanonPath("foo-1.1/link"), CanonPath("hello")); + sink->flush(); FAIL() << "Expected an exception"; } catch (const nix::Error & e) { - ASSERT_THAT(e.msg(), testing::HasSubstr("cannot find hard link target")); + ASSERT_THAT(e.msg(), testing::HasSubstr("does not exist")); ASSERT_THAT(e.msg(), testing::HasSubstr("/hello")); ASSERT_THAT(e.msg(), testing::HasSubstr("foo-1.1/link")); } diff --git a/src/libfetchers/builtin-flake-registry.json b/src/libfetchers/builtin-flake-registry.json new file mode 100644 index 00000000000..65e973290a0 --- /dev/null +++ b/src/libfetchers/builtin-flake-registry.json @@ -0,0 +1,425 @@ +{ + "flakes": [ + { + "from": { + "id": "agda", + "type": "indirect" + }, + "to": { + "owner": "agda", + "repo": "agda", + "type": "github" + } + }, + { + "from": { + "id": "agenix", + "type": "indirect" + }, + "to": { + "owner": "ryantm", + "repo": "agenix", + "type": "github" + } + }, + { + "from": { + "id": "arion", + "type": "indirect" + }, + "to": { + "owner": "hercules-ci", + "repo": "arion", + "type": "github" + } + }, + { + "from": { + "id": "blender-bin", + "type": "indirect" + }, + "to": { + "dir": "blender", + "owner": "edolstra", + "repo": "nix-warez", + "type": "github" + } + }, + { + "from": { + "id": "bundlers", + "type": "indirect" + }, + "to": { + "owner": "NixOS", + "repo": "bundlers", + "type": "github" + } + }, + { + "from": { + "id": "cachix", + "type": "indirect" + }, + "to": { + "owner": "cachix", + "repo": "cachix", + "type": "github" + } + }, + { + "from": { + "id": "composable", + "type": "indirect" + }, + "to": { + "owner": "ComposableFi", + "repo": "composable", + "type": "github" + } + }, + { + "from": { + "id": "disko", + "type": "indirect" + }, + "to": { + "owner": "nix-community", + "repo": "disko", + "type": "github" + } + }, + { + "from": { + "id": "dreampkgs", + "type": "indirect" + }, + "to": { + "owner": "nix-community", + "repo": "dreampkgs", + "type": "github" + } + }, + { + "from": { + "id": "dwarffs", + "type": "indirect" + }, + "to": { + "owner": "edolstra", + "repo": "dwarffs", + "type": "github" + } + }, + { + "from": { + "id": "emacs-overlay", + "type": "indirect" + }, + "to": { + "owner": "nix-community", + "repo": "emacs-overlay", + "type": "github" + } + }, + { + "from": { + "id": "fenix", + "type": "indirect" + }, + "to": { + "owner": "nix-community", + "repo": "fenix", + "type": "github" + } + }, + { + "from": { + "id": "flake-parts", + "type": "indirect" + }, + "to": { + "owner": "hercules-ci", + "repo": "flake-parts", + "type": "github" + } + }, + { + "from": { + "id": "flake-utils", + "type": "indirect" + }, + "to": { + "owner": "numtide", + "repo": "flake-utils", + "type": "github" + } + }, + { + "from": { + "id": "helix", + "type": "indirect" + }, + "to": { + "owner": "helix-editor", + "repo": "helix", + "type": "github" + } + }, + { + "from": { + "id": "hercules-ci-agent", + "type": "indirect" + }, + "to": { + "owner": "hercules-ci", + "repo": "hercules-ci-agent", + "type": "github" + } + }, + { + "from": { + "id": "hercules-ci-effects", + "type": "indirect" + }, + "to": { + "owner": "hercules-ci", + "repo": "hercules-ci-effects", + "type": "github" + } + }, + { + "from": { + "id": "home-manager", + "type": "indirect" + }, + "to": { + "owner": "nix-community", + "repo": "home-manager", + "type": "github" + } + }, + { + "from": { + "id": "hydra", + "type": "indirect" + }, + "to": { + "owner": "NixOS", + "repo": "hydra", + "type": "github" + } + }, + { + "from": { + "id": "mach-nix", + "type": "indirect" + }, + "to": { + "owner": "DavHau", + "repo": "mach-nix", + "type": "github" + } + }, + { + "from": { + "id": "ngipkgs", + "type": "indirect" + }, + "to": { + "owner": "ngi-nix", + "repo": "ngipkgs", + "type": "github" + } + }, + { + "from": { + "id": "nickel", + "type": "indirect" + }, + "to": { + "owner": "tweag", + "repo": "nickel", + "type": "github" + } + }, + { + "from": { + "id": "nix", + "type": "indirect" + }, + "to": { + "owner": "NixOS", + "repo": "nix", + "type": "github" + } + }, + { + "from": { + "id": "nix-darwin", + "type": "indirect" + }, + "to": { + "owner": "nix-darwin", + "repo": "nix-darwin", + "type": "github" + } + }, + { + "from": { + "id": "nix-serve", + "type": "indirect" + }, + "to": { + "owner": "edolstra", + "repo": "nix-serve", + "type": "github" + } + }, + { + "from": { + "id": "nixops", + "type": "indirect" + }, + "to": { + "owner": "NixOS", + "repo": "nixops", + "type": "github" + } + }, + { + "from": { + "id": "nixos-anywhere", + "type": "indirect" + }, + "to": { + "owner": "nix-community", + "repo": "nixos-anywhere", + "type": "github" + } + }, + { + "from": { + "id": "nixos-hardware", + "type": "indirect" + }, + "to": { + "owner": "NixOS", + "repo": "nixos-hardware", + "type": "github" + } + }, + { + "from": { + "id": "nixos-homepage", + "type": "indirect" + }, + "to": { + "owner": "NixOS", + "repo": "nixos-homepage", + "type": "github" + } + }, + { + "from": { + "id": "nixos-search", + "type": "indirect" + }, + "to": { + "owner": "NixOS", + "repo": "nixos-search", + "type": "github" + } + }, + { + "from": { + "id": "nixpkgs", + "type": "indirect" + }, + "to": { + "owner": "NixOS", + "ref": "nixpkgs-unstable", + "repo": "nixpkgs", + "type": "github" + } + }, + { + "from": { + "id": "nur", + "type": "indirect" + }, + "to": { + "owner": "nix-community", + "repo": "NUR", + "type": "github" + } + }, + { + "from": { + "id": "patchelf", + "type": "indirect" + }, + "to": { + "owner": "NixOS", + "repo": "patchelf", + "type": "github" + } + }, + { + "from": { + "id": "poetry2nix", + "type": "indirect" + }, + "to": { + "owner": "nix-community", + "repo": "poetry2nix", + "type": "github" + } + }, + { + "from": { + "id": "pridefetch", + "type": "indirect" + }, + "to": { + "owner": "SpyHoodle", + "repo": "pridefetch", + "type": "github" + } + }, + { + "from": { + "id": "sops-nix", + "type": "indirect" + }, + "to": { + "owner": "Mic92", + "repo": "sops-nix", + "type": "github" + } + }, + { + "from": { + "id": "systems", + "type": "indirect" + }, + "to": { + "owner": "nix-systems", + "repo": "default", + "type": "github" + } + }, + { + "from": { + "id": "templates", + "type": "indirect" + }, + "to": { + "owner": "NixOS", + "repo": "templates", + "type": "github" + } + } + ], + "version": 2 +} diff --git a/src/libfetchers/builtin.cc b/src/libfetchers/builtin.cc new file mode 100644 index 00000000000..44b3baf0b1a --- /dev/null +++ b/src/libfetchers/builtin.cc @@ -0,0 +1,60 @@ +#include "nix/store/builtins.hh" +#include "nix/store/parsed-derivations.hh" +#include "nix/fetchers/fetchers.hh" +#include "nix/fetchers/fetch-settings.hh" +#include "nix/util/archive.hh" +#include "nix/store/filetransfer.hh" +#include "nix/store/store-open.hh" + +#include + +namespace nix { + +static void builtinFetchTree(const BuiltinBuilderContext & ctx) +{ + experimentalFeatureSettings.require(Xp::BuildTimeFetchTree); + + auto out = get(ctx.drv.outputs, "out"); + if (!out) + throw Error("'builtin:fetch-tree' requires an 'out' output"); + + if (!(ctx.drv.type().isFixed() || ctx.drv.type().isImpure())) + throw Error("'builtin:fetch-tree' must be a fixed-output or impure derivation"); + + if (!ctx.drv.structuredAttrs) + throw Error("'builtin:fetch-tree' must have '__structuredAttrs = true'"); + + setenv("NIX_CACHE_HOME", ctx.tmpDirInSandbox.c_str(), 1); + + using namespace fetchers; + + fetchers::Settings myFetchSettings; + myFetchSettings.accessTokens = fetchSettings.accessTokens.get(); + + // Make sure we don't use the FileTransfer object of the parent + // since it's in a broken state after the fork. We also must not + // delete it, so hang on to the shared_ptr. + // FIXME: move FileTransfer into fetchers::Settings. + static auto prevFileTransfer = resetFileTransfer(); + + // FIXME: disable use of the git/tarball cache + + auto input = Input::fromAttrs(myFetchSettings, jsonToAttrs(ctx.drv.structuredAttrs->structuredAttrs.at("input"))); + + std::cerr << fmt("fetching '%s'...\n", input.to_string()); + + /* Functions like downloadFile() expect a store. We can't use the + real one since we're in a forked process. FIXME: use recursive + Nix's daemon so we can use the real store? */ + auto tmpStore = openStore(ctx.tmpDirInSandbox + "/nix"); + + auto [accessor, lockedInput] = input.getAccessor(myFetchSettings, *tmpStore); + + auto source = sinkToSource([&](Sink & sink) { accessor->dumpPath(CanonPath::root, sink); }); + + restorePath(ctx.outputs.at("out"), *source); +} + +static RegisterBuiltinBuilder registerUnpackChannel("fetch-tree", builtinFetchTree); + +} // namespace nix diff --git a/src/libfetchers/cache.cc b/src/libfetchers/cache.cc index 183f106a5d3..1db3ed8dc89 100644 --- a/src/libfetchers/cache.cc +++ b/src/libfetchers/cache.cc @@ -109,7 +109,7 @@ struct CacheImpl : Cache upsert(key, value); } - std::optional lookupStorePath(Key key, Store & store) override + std::optional lookupStorePath(Key key, Store & store, bool allowInvalid) override { key.second.insert_or_assign("store", store.storeDir); @@ -123,7 +123,7 @@ struct CacheImpl : Cache ResultWithStorePath res2(*res, StorePath(storePathS)); store.addTempRoot(res2.storePath); - if (!store.isValidPath(res2.storePath)) { + if (!allowInvalid && !store.isValidPath(res2.storePath)) { // FIXME: we could try to substitute 'storePath'. debug( "ignoring disappeared cache entry '%s:%s' -> '%s'", @@ -145,7 +145,7 @@ struct CacheImpl : Cache std::optional lookupStorePathWithTTL(Key key, Store & store) override { - auto res = lookupStorePath(std::move(key), store); + auto res = lookupStorePath(std::move(key), store, false); return res && !res->expired ? res : std::nullopt; } }; diff --git a/src/libfetchers/fetch-settings.cc b/src/libfetchers/fetch-settings.cc index f92b94a0b3b..f50177f094e 100644 --- a/src/libfetchers/fetch-settings.cc +++ b/src/libfetchers/fetch-settings.cc @@ -1,7 +1,16 @@ #include "nix/fetchers/fetch-settings.hh" +#include "nix/util/config-global.hh" namespace nix::fetchers { Settings::Settings() {} } // namespace nix::fetchers + +namespace nix { + +fetchers::Settings fetchSettings; + +static GlobalConfig::Register rFetchSettings(&fetchSettings); + +} // namespace nix diff --git a/src/libfetchers/fetch-to-store.cc b/src/libfetchers/fetch-to-store.cc index b1e8b9d72bb..7c5c5be1036 100644 --- a/src/libfetchers/fetch-to-store.cc +++ b/src/libfetchers/fetch-to-store.cc @@ -5,12 +5,11 @@ namespace nix { -fetchers::Cache::Key makeFetchToStoreCacheKey( - const std::string & name, const std::string & fingerprint, ContentAddressMethod method, const std::string & path) +fetchers::Cache::Key +makeSourcePathToHashCacheKey(const std::string & fingerprint, ContentAddressMethod method, const std::string & path) { return fetchers::Cache::Key{ - "fetchToStore", - {{"name", name}, {"fingerprint", fingerprint}, {"method", std::string{method.render()}}, {"path", path}}}; + "sourcePathToHash", {{"fingerprint", fingerprint}, {"method", std::string{method.render()}}, {"path", path}}}; } StorePath fetchToStore( @@ -23,23 +22,43 @@ StorePath fetchToStore( PathFilter * filter, RepairFlag repair) { - // FIXME: add an optimisation for the case where the accessor is - // a `PosixSourceAccessor` pointing to a store path. + return fetchToStore2(settings, store, path, mode, name, method, filter, repair).first; +} +std::pair fetchToStore2( + const fetchers::Settings & settings, + Store & store, + const SourcePath & path, + FetchMode mode, + std::string_view name, + ContentAddressMethod method, + PathFilter * filter, + RepairFlag repair) +{ std::optional cacheKey; auto [subpath, fingerprint] = filter ? std::pair>{path.path, std::nullopt} : path.accessor->getFingerprint(path.path); if (fingerprint) { - cacheKey = makeFetchToStoreCacheKey(std::string{name}, *fingerprint, method, subpath.abs()); - if (auto res = settings.getCache()->lookupStorePath(*cacheKey, store)) { - debug("store path cache hit for '%s'", path); - return res->storePath; + cacheKey = makeSourcePathToHashCacheKey(*fingerprint, method, subpath.abs()); + if (auto res = settings.getCache()->lookup(*cacheKey)) { + auto hash = Hash::parseSRI(fetchers::getStrAttr(*res, "hash")); + auto storePath = + store.makeFixedOutputPathFromCA(name, ContentAddressWithReferences::fromParts(method, hash, {})); + if (mode == FetchMode::DryRun || store.maybeQueryPathInfo(storePath)) { + debug( + "source path '%s' cache hit in '%s' (hash '%s')", + path, + store.printStorePath(storePath), + hash.to_string(HashFormat::SRI, true)); + return {storePath, hash}; + } + debug("source path '%s' not in store", path); } } else { static auto barf = getEnv("_NIX_TEST_BARF_ON_UNCACHEABLE").value_or("") == "1"; - if (barf && !filter) + if (barf && !filter && !(path.to_string().starts_with("/") || path.to_string().starts_with("«path:/"))) throw Error("source path '%s' is uncacheable (filter=%d)", path, (bool) filter); // FIXME: could still provide in-memory caching keyed on `SourcePath`. debug("source path '%s' is uncacheable", path); @@ -53,16 +72,41 @@ StorePath fetchToStore( auto filter2 = filter ? *filter : defaultPathFilter; - auto storePath = mode == FetchMode::DryRun - ? store.computeStorePath(name, path, method, HashAlgorithm::SHA256, {}, filter2).first - : store.addToStore(name, path, method, HashAlgorithm::SHA256, {}, filter2, repair); - - debug(mode == FetchMode::DryRun ? "hashed '%s'" : "copied '%s' to '%s'", path, store.printStorePath(storePath)); + auto [storePath, hash] = + mode == FetchMode::DryRun + ? ({ + auto [storePath, hash] = + store.computeStorePath(name, path, method, HashAlgorithm::SHA256, {}, filter2); + debug( + "hashed '%s' to '%s' (hash '%s')", + path, + store.printStorePath(storePath), + hash.to_string(HashFormat::SRI, true)); + std::make_pair(storePath, hash); + }) + : ({ + // FIXME: ideally addToStore() would return the hash + // right away (like computeStorePath()). + auto storePath = store.addToStore(name, path, method, HashAlgorithm::SHA256, {}, filter2, repair); + auto info = store.queryPathInfo(storePath); + assert(info->references.empty()); + auto hash = method == ContentAddressMethod::Raw::NixArchive ? info->narHash : ({ + if (!info->ca || info->ca->method != method) + throw Error("path '%s' lacks a CA field", store.printStorePath(storePath)); + info->ca->hash; + }); + debug( + "copied '%s' to '%s' (hash '%s')", + path, + store.printStorePath(storePath), + hash.to_string(HashFormat::SRI, true)); + std::make_pair(storePath, hash); + }); - if (cacheKey && mode == FetchMode::Copy) - settings.getCache()->upsert(*cacheKey, store, {}, storePath); + if (cacheKey) + settings.getCache()->upsert(*cacheKey, {{"hash", hash.to_string(HashFormat::SRI, true)}}); - return storePath; + return {storePath, hash}; } } // namespace nix diff --git a/src/libfetchers/fetchers.cc b/src/libfetchers/fetchers.cc index 7e091ef1071..48c75df4f64 100644 --- a/src/libfetchers/fetchers.cc +++ b/src/libfetchers/fetchers.cc @@ -6,6 +6,7 @@ #include "nix/fetchers/fetch-settings.hh" #include "nix/fetchers/fetch-to-store.hh" #include "nix/util/url.hh" +#include "nix/util/forwarding-source-accessor.hh" #include "nix/util/archive.hh" #include @@ -126,24 +127,30 @@ std::optional Input::getFingerprint(Store & store) const return fingerprint; } -ParsedURL Input::toURL() const +ParsedURL Input::toURL(bool abbreviate) const { if (!scheme) throw Error("cannot show unsupported input '%s'", attrsToJSON(attrs)); - return scheme->toURL(*this); + + auto url = scheme->toURL(*this, abbreviate); + + if (abbreviate) + url.query.erase("narHash"); + + return url; } -std::string Input::toURLString(const StringMap & extraQuery) const +std::string Input::toURLString(const StringMap & extraQuery, bool abbreviate) const { - auto url = toURL(); + auto url = toURL(abbreviate); for (auto & attr : extraQuery) url.query.insert(attr); return url.to_string(); } -std::string Input::to_string() const +std::string Input::to_string(bool abbreviate) const { - return toURL().to_string(); + return toURL(abbreviate).to_string(); } bool Input::isDirect() const @@ -190,35 +197,30 @@ bool Input::contains(const Input & other) const } // FIXME: remove -std::pair Input::fetchToStore(const Settings & settings, Store & store) const +std::tuple, Input> Input::fetchToStore(const Settings & settings, Store & store) const { if (!scheme) throw Error("cannot fetch unsupported input '%s'", attrsToJSON(toAttrs())); - auto [storePath, input] = [&]() -> std::pair { - try { - auto [accessor, result] = getAccessorUnchecked(settings, store); - - auto storePath = - nix::fetchToStore(settings, store, SourcePath(accessor), FetchMode::Copy, result.getName()); + try { + auto [accessor, result] = getAccessorUnchecked(settings, store); - auto narHash = store.queryPathInfo(storePath)->narHash; - result.attrs.insert_or_assign("narHash", narHash.to_string(HashFormat::SRI, true)); + auto storePath = nix::fetchToStore(settings, store, SourcePath(accessor), FetchMode::Copy, result.getName()); - result.attrs.insert_or_assign("__final", Explicit(true)); + auto narHash = store.queryPathInfo(storePath)->narHash; + result.attrs.insert_or_assign("narHash", narHash.to_string(HashFormat::SRI, true)); - assert(result.isFinal()); + result.attrs.insert_or_assign("__final", Explicit(true)); - checkLocks(*this, result); + assert(result.isFinal()); - return {storePath, result}; - } catch (Error & e) { - e.addTrace({}, "while fetching the input '%s'", to_string()); - throw; - } - }(); + checkLocks(*this, result); - return {std::move(storePath), input}; + return {std::move(storePath), accessor, result}; + } catch (Error & e) { + e.addTrace({}, "while fetching the input '%s'", to_string()); + throw; + } } void Input::checkLocks(Input specified, Input & result) @@ -236,6 +238,9 @@ void Input::checkLocks(Input specified, Input & result) if (auto prevNarHash = specified.getNarHash()) specified.attrs.insert_or_assign("narHash", prevNarHash->to_string(HashFormat::SRI, true)); + if (auto narHash = result.getNarHash()) + result.attrs.insert_or_assign("narHash", narHash->to_string(HashFormat::SRI, true)); + for (auto & field : specified.attrs) { auto field2 = result.attrs.find(field.first); if (field2 != result.attrs.end() && field.second != field2->second) @@ -269,24 +274,10 @@ void Input::checkLocks(Input specified, Input & result) } } - if (auto prevLastModified = specified.getLastModified()) { - if (result.getLastModified() != prevLastModified) - throw Error( - "'lastModified' attribute mismatch in input '%s', expected %d, got %d", - result.to_string(), - *prevLastModified, - result.getLastModified().value_or(-1)); - } - if (auto prevRev = specified.getRev()) { if (result.getRev() != prevRev) throw Error("'rev' attribute mismatch in input '%s', expected %s", result.to_string(), prevRev->gitRev()); } - - if (auto prevRevCount = specified.getRevCount()) { - if (result.getRevCount() != prevRevCount) - throw Error("'revCount' attribute mismatch in input '%s', expected %d", result.to_string(), *prevRevCount); - } } std::pair, Input> Input::getAccessor(const Settings & settings, Store & store) const @@ -305,6 +296,21 @@ std::pair, Input> Input::getAccessor(const Settings & settin } } +/** + * Helper class that ensures that paths in substituted source trees + * are rendered as `«input»/path` rather than + * `«input»/nix/store/-source/path`. + */ +struct SubstitutedSourceAccessor : ForwardingSourceAccessor +{ + using ForwardingSourceAccessor::ForwardingSourceAccessor; + + std::string showPath(const CanonPath & path) override + { + return displayPrefix + path.abs() + displaySuffix; + } +}; + std::pair, Input> Input::getAccessorUnchecked(const Settings & settings, Store & store) const { // FIXME: cache the accessor @@ -312,54 +318,73 @@ std::pair, Input> Input::getAccessorUnchecked(const Settings if (!scheme) throw Error("cannot fetch unsupported input '%s'", attrsToJSON(toAttrs())); - /* The tree may already be in the Nix store, or it could be - substituted (which is often faster than fetching from the - original source). So check that. We only do this for final - inputs, otherwise there is a risk that we don't return the - same attributes (like `lastModified`) that the "real" fetcher - would return. - - FIXME: add a setting to disable this. - FIXME: substituting may be slower than fetching normally, - e.g. for fetchers like Git that are incremental! - */ - if (isFinal() && getNarHash()) { - try { - auto storePath = computeStorePath(store); - - store.ensurePath(storePath); + std::optional storePath; + if (isFinal() && getNarHash()) + storePath = computeStorePath(store); + + auto makeStoreAccessor = [&]() -> std::pair, Input> { + auto accessor = make_ref(store.requireStoreObjectAccessor(*storePath)); + + // FIXME: use the NAR hash for fingerprinting Git trees that have a .gitattributes file, since we don't know if + // we used `git archive` or libgit2 to fetch it. + accessor->fingerprint = getType() == "git" && accessor->pathExists(CanonPath(".gitattributes")) + ? std::optional(storePath->hashPart()) + : getFingerprint(store); + cachedFingerprint = accessor->fingerprint; + + // Store a cache entry for the substituted tree so later fetches + // can reuse the existing nar instead of copying the unpacked + // input back into the store on every evaluation. + if (accessor->fingerprint) { + settings.getCache()->upsert( + makeSourcePathToHashCacheKey(*accessor->fingerprint, ContentAddressMethod::Raw::NixArchive, "/"), + {{"hash", store.queryPathInfo(*storePath)->narHash.to_string(HashFormat::SRI, true)}}); + } - debug("using substituted/cached input '%s' in '%s'", to_string(), store.printStorePath(storePath)); + // FIXME: ideally we would use the `showPath()` of the + // "real" accessor for this fetcher type. + accessor->setPathDisplay("«" + to_string(true) + "»"); - auto accessor = store.requireStoreObjectAccessor(storePath); + return {accessor, *this}; + }; - accessor->fingerprint = getFingerprint(store); + /* If a tree with the expected hash is already in the Nix store, + reuse it. We only do this for final inputs, since otherwise + there is a risk that we don't return the same attributes (like + `lastModified`) that the "real" fetcher would return. */ + if (storePath && store.isValidPath(*storePath)) { + debug("using input '%s' in '%s'", to_string(), store.printStorePath(*storePath)); + return makeStoreAccessor(); + } - // Store a cache entry for the substituted tree so later fetches - // can reuse the existing nar instead of copying the unpacked - // input back into the store on every evaluation. - if (accessor->fingerprint) { - ContentAddressMethod method = ContentAddressMethod::Raw::NixArchive; - auto cacheKey = makeFetchToStoreCacheKey(getName(), *accessor->fingerprint, method, "/"); - settings.getCache()->upsert(cacheKey, store, {}, storePath); - } + try { + auto [accessor, result] = scheme->getAccessor(settings, store, *this); - accessor->setPathDisplay("«" + to_string() + "»"); + if (auto fp = accessor->getFingerprint(CanonPath::root).second) + result.cachedFingerprint = *fp; + else + accessor->fingerprint = result.getFingerprint(store); - return {accessor, *this}; - } catch (Error & e) { - debug("substitution of input '%s' failed: %s", to_string(), e.what()); + return {accessor, std::move(result)}; + } catch (Error & e) { + if (storePath) { + // Fall back to substitution. + try { + store.ensurePath(*storePath); + warn( + "Successfully substituted input '%s' after failing to fetch it from its original location: %s", + to_string(), + e.info().msg); + return makeStoreAccessor(); + } + // Ignore any substitution error, rethrow the original error. + catch (Error & e2) { + debug("substitution of input '%s' failed: %s", to_string(), e2.info().msg); + } catch (...) { + } } + throw; } - - auto [accessor, result] = scheme->getAccessor(settings, store, *this); - - if (!accessor->fingerprint) - accessor->fingerprint = result.getFingerprint(store); - else - result.cachedFingerprint = accessor->fingerprint; - - return {accessor, std::move(result)}; } Input Input::applyOverrides(std::optional ref, std::optional rev) const @@ -460,7 +485,7 @@ std::optional Input::getLastModified() const return {}; } -ParsedURL InputScheme::toURL(const Input & input) const +ParsedURL InputScheme::toURL(const Input & input, bool abbreviate) const { throw Error("don't know how to convert input '%s' to a URL", attrsToJSON(input.attrs)); } diff --git a/src/libfetchers/filtering-source-accessor.cc b/src/libfetchers/filtering-source-accessor.cc index 8f1b50eb937..6b0748860c8 100644 --- a/src/libfetchers/filtering-source-accessor.cc +++ b/src/libfetchers/filtering-source-accessor.cc @@ -1,4 +1,5 @@ #include "nix/fetchers/filtering-source-accessor.hh" +#include "nix/util/sync.hh" #include @@ -67,6 +68,11 @@ std::pair> FilteringSourceAccessor::getFin return next->getFingerprint(prefix / path); } +void FilteringSourceAccessor::invalidateCache(const CanonPath & path) +{ + next->invalidateCache(prefix / path); +} + void FilteringSourceAccessor::checkAccess(const CanonPath & path) { if (!isAllowed(path)) @@ -76,8 +82,8 @@ void FilteringSourceAccessor::checkAccess(const CanonPath & path) struct AllowListSourceAccessorImpl : AllowListSourceAccessor { - std::set allowedPrefixes; - boost::unordered_flat_set allowedPaths; + SharedSync> allowedPrefixes; + SharedSync> allowedPaths; AllowListSourceAccessorImpl( ref next, @@ -92,12 +98,12 @@ struct AllowListSourceAccessorImpl : AllowListSourceAccessor bool isAllowed(const CanonPath & path) override { - return allowedPaths.contains(path) || path.isAllowed(allowedPrefixes); + return allowedPaths.readLock()->contains(path) || path.isAllowed(*allowedPrefixes.readLock()); } void allowPrefix(CanonPath prefix) override { - allowedPrefixes.insert(std::move(prefix)); + allowedPrefixes.lock()->insert(std::move(prefix)); } }; diff --git a/src/libfetchers/git-utils.cc b/src/libfetchers/git-utils.cc index fecceeffffe..f21313a1040 100644 --- a/src/libfetchers/git-utils.cc +++ b/src/libfetchers/git-utils.cc @@ -12,6 +12,7 @@ #include "nix/util/util.hh" #include "nix/util/thread-pool.hh" #include "nix/util/pool.hh" +#include "nix/util/executable-path.hh" #include #include @@ -203,16 +204,19 @@ static git_packbuilder_progress PACKBUILDER_PROGRESS_CHECK_INTERRUPT = &packBuil } // extern "C" -static void initRepoAtomically(std::filesystem::path & path, bool bare) +static void initRepoAtomically(std::filesystem::path & path, GitRepo::Options options) { if (pathExists(path.string())) return; + if (!options.create) + throw Error("Git repository %s does not exist.", path); + std::filesystem::path tmpDir = createTempDir(path.parent_path()); AutoDelete delTmpDir(tmpDir, true); Repository tmpRepo; - if (git_repository_init(Setter(tmpRepo), tmpDir.string().c_str(), bare)) + if (git_repository_init(Setter(tmpRepo), tmpDir.string().c_str(), options.bare)) throw Error("creating Git repository %s: %s", path, git_error_last()->message); try { std::filesystem::rename(tmpDir, path); @@ -234,7 +238,7 @@ struct GitRepoImpl : GitRepo, std::enable_shared_from_this /** Location of the repository on disk. */ std::filesystem::path path; - bool bare; + Options options; /** * libgit2 repository. Note that new objects are not written to disk, @@ -255,18 +259,18 @@ struct GitRepoImpl : GitRepo, std::enable_shared_from_this */ git_odb_backend * packBackend = nullptr; - GitRepoImpl(std::filesystem::path _path, bool create, bool bare, bool packfilesOnly = false) + GitRepoImpl(std::filesystem::path _path, Options _options) : path(std::move(_path)) - , bare(bare) + , options(_options) { initLibGit2(); - initRepoAtomically(path, bare); + initRepoAtomically(path, options); if (git_repository_open(Setter(repo), path.string().c_str())) throw Error("opening Git repository %s: %s", path, git_error_last()->message); ObjectDb odb; - if (packfilesOnly) { + if (options.packfilesOnly) { /* Create a fresh object database because by default the repo also loose object backends. We are not using any of those for the tarball cache, but libgit2 still does a bunch of unnecessary @@ -295,7 +299,7 @@ struct GitRepoImpl : GitRepo, std::enable_shared_from_this if (git_odb_add_backend(odb.get(), mempackBackend, 999)) throw Error("adding mempack backend to Git object database: %s", git_error_last()->message); - if (packfilesOnly) { + if (options.packfilesOnly) { if (git_repository_set_odb(repo.get(), odb.get())) throw Error("setting Git object database: %s", git_error_last()->message); } @@ -366,7 +370,26 @@ struct GitRepoImpl : GitRepo, std::enable_shared_from_this { // TODO: as an optimization, it would be nice to include `this` in the pool. return Pool(std::numeric_limits::max(), [this]() -> ref { - return make_ref(path, false, bare); + auto repo = make_ref(path, options); + + /* Monkey-patching the pack backend to only read the pack directory + once. Otherwise it will do a readdir for each added oid when it's + not found and that translates to ~6 syscalls. Since we are never + writing pack files until flushing we can force the odb backend to + read the directory just once. It's very convenient that the vtable is + semi-public interface and is up for grabs. + + This is purely an optimization for our use-case with a tarball cache. + libgit2 calls refresh() if the backend provides it when an oid isn't found. + We are only writing objects to a mempack (it has higher priority) and there isn't + a realistic use-case where a previously missing object would appear from thin air + on the disk (unless another process happens to be unpacking a similar tarball to + the cache at the same time, but that's a very unrealistic scenario). + */ + if (auto * backend = repo->packBackend) + backend->refresh = nullptr; + + return repo; }); } @@ -382,7 +405,7 @@ struct GitRepoImpl : GitRepo, std::enable_shared_from_this ThreadPool pool; - auto process = [&done, &pool, &repoPool](this const auto & process, const git_oid & oid) -> void { + auto process = [&done, &pool, &repoPool](this auto const & process, const git_oid & oid) -> void { auto repo(repoPool.get()); auto _commit = lookupObject(*repo, oid, GIT_OBJECT_COMMIT); @@ -593,16 +616,37 @@ struct GitRepoImpl : GitRepo, std::enable_shared_from_this // that) // then use code that was removed in this commit (see blame) - auto dir = this->path; - Strings gitArgs{"-C", dir.string(), "--git-dir", ".", "fetch", "--progress", "--force"}; - if (shallow) - append(gitArgs, {"--depth", "1"}); - append(gitArgs, {std::string("--"), url, refspec}); + if (ExecutablePath::load().findName("git")) { + auto dir = this->path; + Strings gitArgs{"-C", dir.string(), "--git-dir", ".", "fetch", "--progress", "--force"}; + if (shallow) + append(gitArgs, {"--depth", "1"}); + append(gitArgs, {std::string("--"), url, refspec}); + + auto status = runProgram(RunOptions{.program = "git", .args = gitArgs, .isInteractive = true}).first; + + if (status > 0) + throw Error("Failed to fetch git repository '%s'", url); + } else { + // Fall back to using libgit2 for fetching. This does not + // support SSH very well. + Remote remote; + + if (git_remote_create_anonymous(Setter(remote), *this, url.c_str())) + throw Error("cannot create Git remote '%s': %s", url, git_error_last()->message); + + char * refspecs[] = {(char *) refspec.c_str()}; + git_strarray refspecs2{.strings = refspecs, .count = 1}; - auto status = runProgram(RunOptions{.program = "git", .args = gitArgs, .isInteractive = true}).first; + git_fetch_options opts = GIT_FETCH_OPTIONS_INIT; + // FIXME: for some reason, shallow fetching over ssh barfs + // with "could not read from remote repository". + opts.depth = shallow && parseURL(url).scheme != "ssh" ? 1 : GIT_FETCH_DEPTH_FULL; + opts.callbacks.payload = &act; - if (status > 0) - throw Error("Failed to fetch git repository '%s'", url); + if (git_remote_fetch(remote.get(), &refspecs2, &opts, nullptr)) + throw Error("fetching '%s' from '%s': %s", refspec, url, git_error_last()->message); + } } void verifyCommit(const Hash & rev, const std::vector & publicKeys) override @@ -665,6 +709,7 @@ struct GitRepoImpl : GitRepo, std::enable_shared_from_this keyDecoded = base64::decode(k.key); } catch (Error & e) { e.addTrace({}, "while decoding public key '%s' used for git signature", k.key); + throw; } auto fingerprint = trim(hashString(HashAlgorithm::SHA256, keyDecoded).to_string(nix::HashFormat::Base64, false), "="); @@ -712,15 +757,19 @@ struct GitRepoImpl : GitRepo, std::enable_shared_from_this } }; -ref GitRepo::openRepo(const std::filesystem::path & path, bool create, bool bare, bool packfilesOnly) +ref GitRepo::openRepo(const std::filesystem::path & path, GitRepo::Options options) +{ + return make_ref(path, options); +} + +std::string GitAccessorOptions::makeFingerprint(const Hash & rev) const { - return make_ref(path, create, bare, packfilesOnly); + return "git:" + rev.gitRev() + (exportIgnore ? ";e" : "") + (smudgeLfs ? ";l" : ""); } /** * Raw git tree input accessor. */ - struct GitSourceAccessor : SourceAccessor { struct State @@ -741,6 +790,7 @@ struct GitSourceAccessor : SourceAccessor .options = options, }} { + fingerprint = options.makeFingerprint(rev); } std::string readBlob(const CanonPath & path, bool symlink) @@ -1055,185 +1105,155 @@ struct GitFileSystemObjectSinkImpl : GitFileSystemObjectSink { ref repo; - struct PendingDir - { - std::string name; - TreeBuilder builder; - }; - - std::vector pendingDirs; + Pool repoPool; - /** - * Temporary buffer used by createRegularFile for storing small file contents. - */ - std::string regularFileContentsBuffer; + unsigned int concurrency = std::min(std::thread::hardware_concurrency(), 10U); - /** - * If repo has a non-null packBackend, this has a copy of the refresh function - * from the backend virtual table. This is needed to restore it after we've flushed - * the sink. We modify it to avoid unnecessary I/O on non-existent oids. - */ - decltype(::git_odb_backend::refresh) packfileOdbRefresh = nullptr; + ThreadPool workers{concurrency}; - void pushBuilder(std::string name) - { - const git_tree_entry * entry; - Tree prevTree = nullptr; - - if (!pendingDirs.empty() && (entry = git_treebuilder_get(pendingDirs.back().builder.get(), name.c_str()))) { - /* Clone a tree that we've already finished. This happens - if a tarball has directory entries that are not - contiguous. */ - if (git_tree_entry_type(entry) != GIT_OBJECT_TREE) - throw Error("parent of '%s' is not a directory", name); - - if (git_tree_entry_to_object((git_object **) (git_tree **) Setter(prevTree), *repo, entry)) - throw Error("looking up parent of '%s': %s", name, git_error_last()->message); - } + /** Total file contents in flight. */ + std::atomic totalBufSize{0}; - git_treebuilder * b; - if (git_treebuilder_new(&b, *repo, prevTree.get())) - throw Error("creating a tree builder: %s", git_error_last()->message); - pendingDirs.push_back({.name = std::move(name), .builder = TreeBuilder(b)}); - }; + static constexpr std::size_t maxBufSize = 16 * 1024 * 1024; GitFileSystemObjectSinkImpl(ref repo) : repo(repo) + , repoPool(repo->getPool()) { - /* Monkey-patching the pack backend to only read the pack directory - once. Otherwise it will do a readdir for each added oid when it's - not found and that translates to ~6 syscalls. Since we are never - writing pack files until flushing we can force the odb backend to - read the directory just once. It's very convenient that the vtable is - semi-public interface and is up for grabs. - - This is purely an optimization for our use-case with a tarball cache. - libgit2 calls refresh() if the backend provides it when an oid isn't found. - We are only writing objects to a mempack (it has higher priority) and there isn't - a realistic use-case where a previously missing object would appear from thin air - on the disk (unless another process happens to be unpacking a similar tarball to - the cache at the same time, but that's a very unrealistic scenario). - */ - if (auto * backend = repo->packBackend) { - if (backend->refresh(backend)) /* Refresh just once manually. */ - throw Error("refreshing packfiles: %s", git_error_last()->message); - /* Save the function pointer to restore it later in flush() and - unset it in the vtable. libgit2 does nothing if it's a nullptr: - https://github.com/libgit2/libgit2/blob/58d9363f02f1fa39e46d49b604f27008e75b72f2/src/libgit2/odb.c#L1922 - */ - packfileOdbRefresh = std::exchange(backend->refresh, nullptr); - } - pushBuilder(""); } - std::pair popBuilder() + ~GitFileSystemObjectSinkImpl() { - assert(!pendingDirs.empty()); - auto pending = std::move(pendingDirs.back()); - git_oid oid; - if (git_treebuilder_write(&oid, pending.builder.get())) - throw Error("creating a tree object: %s", git_error_last()->message); - pendingDirs.pop_back(); - return {oid, pending.name}; - }; + // Make sure the worker threads are destroyed before any state + // they're referring to. + workers.shutdown(); + } - void addToTree(const std::string & name, const git_oid & oid, git_filemode_t mode) + struct Child; + + /// A directory to be written as a Git tree. + struct Directory { - assert(!pendingDirs.empty()); - auto & pending = pendingDirs.back(); - if (git_treebuilder_insert(nullptr, pending.builder.get(), name.c_str(), &oid, mode)) - throw Error("adding a file to a tree builder: %s", git_error_last()->message); + std::map children; + std::optional oid; + + Child & lookup(const CanonPath & path) + { + assert(!path.isRoot()); + auto parent = path.parent(); + auto cur = this; + for (auto & name : *parent) { + auto i = cur->children.find(std::string(name)); + if (i == cur->children.end()) + throw Error("path '%s' does not exist", path); + auto dir = std::get_if(&i->second.file); + if (!dir) + throw Error("path '%s' has a non-directory parent", path); + cur = dir; + } + + auto i = cur->children.find(std::string(*path.baseName())); + if (i == cur->children.end()) + throw Error("path '%s' does not exist", path); + return i->second; + } }; - void updateBuilders(std::span names) + size_t nextId = 0; // for Child.id + + struct Child { - // Find the common prefix of pendingDirs and names. - size_t prefixLen = 0; - for (; prefixLen < names.size() && prefixLen + 1 < pendingDirs.size(); ++prefixLen) - if (names[prefixLen] != pendingDirs[prefixLen + 1].name) - break; - - // Finish the builders that are not part of the common prefix. - for (auto n = pendingDirs.size(); n > prefixLen + 1; --n) { - auto [oid, name] = popBuilder(); - addToTree(name, oid, GIT_FILEMODE_TREE); - } + git_filemode_t mode; + std::variant file; - // Create builders for the new directories. - for (auto n = prefixLen; n < names.size(); ++n) - pushBuilder(names[n]); + /// Sequential numbering of the file in the tarball. This is + /// used to make sure we only import the latest version of a + /// path. + size_t id{0}; }; - bool prepareDirs(const std::vector & pathComponents, bool isDir) + struct State { - std::span pathComponents2{pathComponents}; + Directory root; + }; - updateBuilders(isDir ? pathComponents2 : pathComponents2.first(pathComponents2.size() - 1)); + Sync _state; - return true; + void addNode(State & state, const CanonPath & path, Child && child) + { + assert(!path.isRoot()); + auto parent = path.parent(); + + Directory * cur = &state.root; + + for (auto & i : *parent) { + auto child = std::get_if( + &cur->children.emplace(std::string(i), Child{GIT_FILEMODE_TREE, {Directory()}}).first->second.file); + assert(child); + cur = child; + } + + std::string name(*path.baseName()); + + if (auto prev = cur->children.find(name); prev == cur->children.end() || prev->second.id < child.id) + cur->children.insert_or_assign(name, std::move(child)); } void createRegularFile(const CanonPath & path, std::function func) override { - auto pathComponents = tokenizeString>(path.rel(), "/"); - if (!prepareDirs(pathComponents, false)) - return; + checkInterrupt(); + + /* Multithreaded blob writing. We read the incoming file data into memory and asynchronously write it to a Git + blob object. However, to avoid unbounded memory usage, if the amount of data in flight exceeds a threshold, + we switch to writing directly to a Git write stream. */ using WriteStream = std::unique_ptr<::git_writestream, decltype([](::git_writestream * stream) { if (stream) stream->free(stream); })>; - /* Maximum file size that gets buffered in memory before flushing to a WriteStream, - that's backed by a temporary objects/streamed_git2_* file. We should avoid that - for common cases, since creating (and deleting) a temporary file for each blob - is insanely expensive. */ - static constexpr std::size_t maxBufferSize = 1024 * 1024; /* 1 MiB */ - struct CRF : CreateRegularFileSink { - const CanonPath & path; - GitFileSystemObjectSinkImpl & back; + CanonPath path; + GitFileSystemObjectSinkImpl & parent; WriteStream stream; - std::string & contents; + std::optional repo; + + std::string contents; bool executable = false; - CRF(const CanonPath & path, GitFileSystemObjectSinkImpl & back, std::string & regularFileContentsBuffer) - : path(path) - , back(back) - , stream(nullptr) - , contents(regularFileContentsBuffer) + CRF(CanonPath path, GitFileSystemObjectSinkImpl & parent) + : path(std::move(path)) + , parent(parent) { - contents.clear(); } - void writeToStream(std::string_view data) + ~CRF() { - /* Lazily create the stream. */ - if (!stream) { - ::git_writestream * stream2 = nullptr; - if (git_blob_create_from_stream(&stream2, *back.repo, nullptr)) - throw Error("creating a blob stream object: %s", git_error_last()->message); - stream = WriteStream{stream2}; - assert(stream); - } - - if (stream->write(stream.get(), data.data(), data.size())) - throw Error("writing a blob for tarball member '%s': %s", path, git_error_last()->message); + parent.totalBufSize -= contents.size(); } void operator()(std::string_view data) override { - /* Already in slow path. Just write to the slow stream. */ - if (stream) { - writeToStream(data); - return; - } + if (!stream) { + contents.append(data); + parent.totalBufSize += data.size(); + + if (parent.totalBufSize > parent.maxBufSize) { + repo.emplace(parent.repoPool.get()); - contents += data; - if (contents.size() > maxBufferSize) { - writeToStream(contents); /* Will initialize stream. */ - contents.clear(); + if (git_blob_create_from_stream(Setter(stream), **repo, nullptr)) + throw Error("creating a blob stream object: %s", git_error_last()->message); + + if (stream->write(stream.get(), contents.data(), contents.size())) + throw Error("writing a blob for tarball member '%s': %s", path, git_error_last()->message); + + parent.totalBufSize -= contents.size(); + contents.clear(); + } + } else { + if (stream->write(stream.get(), data.data(), data.size())) + throw Error("writing a blob for tarball member '%s': %s", path, git_error_last()->message); } } @@ -1241,112 +1261,140 @@ struct GitFileSystemObjectSinkImpl : GitFileSystemObjectSink { executable = true; } - } crf{path, *this, regularFileContentsBuffer}; + }; + + auto crf = std::make_shared(path, *this); + + func(*crf); - func(crf); + auto id = nextId++; - git_oid oid; - if (crf.stream) { - /* Call .release(), since git_blob_create_from_stream_commit + if (crf->stream) { + /* Finish the slow path by creating the blob object synchronously. + Call .release(), since git_blob_create_from_stream_commit acquires ownership and frees the stream. */ - if (git_blob_create_from_stream_commit(&oid, crf.stream.release())) + git_oid oid; + if (git_blob_create_from_stream_commit(&oid, crf->stream.release())) throw Error("creating a blob object for '%s': %s", path, git_error_last()->message); - } else { - if (git_blob_create_from_buffer(&oid, *repo, crf.contents.data(), crf.contents.size())) - throw Error( - "creating a blob object for '%s' from in-memory buffer: %s", path, git_error_last()->message); + addNode( + *_state.lock(), + crf->path, + Child{crf->executable ? GIT_FILEMODE_BLOB_EXECUTABLE : GIT_FILEMODE_BLOB, oid, id}); + return; } - addToTree(*pathComponents.rbegin(), oid, crf.executable ? GIT_FILEMODE_BLOB_EXECUTABLE : GIT_FILEMODE_BLOB); + /* Fast path: create the blob object in a separate thread. */ + workers.enqueue([this, crf{std::move(crf)}, id]() { + auto repo(repoPool.get()); + + git_oid oid; + if (git_blob_create_from_buffer(&oid, *repo, crf->contents.data(), crf->contents.size())) + throw Error( + "creating a blob object for '%s' from in-memory buffer: %s", crf->path, git_error_last()->message); + + addNode( + *_state.lock(), + crf->path, + Child{crf->executable ? GIT_FILEMODE_BLOB_EXECUTABLE : GIT_FILEMODE_BLOB, oid, id}); + }); } void createDirectory(const CanonPath & path) override { - auto pathComponents = tokenizeString>(path.rel(), "/"); - (void) prepareDirs(pathComponents, true); + if (path.isRoot()) + return; + auto state(_state.lock()); + addNode(*state, path, {GIT_FILEMODE_TREE, Directory()}); } void createSymlink(const CanonPath & path, const std::string & target) override { - auto pathComponents = tokenizeString>(path.rel(), "/"); - if (!prepareDirs(pathComponents, false)) - return; + workers.enqueue([this, path, target]() { + auto repo(repoPool.get()); - git_oid oid; - if (git_blob_create_from_buffer(&oid, *repo, target.c_str(), target.size())) - throw Error("creating a blob object for tarball symlink member '%s': %s", path, git_error_last()->message); + git_oid oid; + if (git_blob_create_from_buffer(&oid, *repo, target.c_str(), target.size())) + throw Error( + "creating a blob object for tarball symlink member '%s': %s", path, git_error_last()->message); - addToTree(*pathComponents.rbegin(), oid, GIT_FILEMODE_LINK); + auto state(_state.lock()); + addNode(*state, path, Child{GIT_FILEMODE_LINK, oid}); + }); } + std::map hardLinks; + void createHardlink(const CanonPath & path, const CanonPath & target) override { - std::vector pathComponents; - for (auto & c : path) - pathComponents.emplace_back(c); + hardLinks.insert_or_assign(path, target); + } - if (!prepareDirs(pathComponents, false)) - return; + Hash flush() override + { + workers.process(); - // We can't just look up the path from the start of the root, since - // some parent directories may not have finished yet, so we compute - // a relative path that helps us find the right git_tree_builder or object. - auto relTarget = CanonPath(path).parent()->makeRelative(target); - - auto dir = pendingDirs.rbegin(); - - // For each ../ component at the start, go up one directory. - // CanonPath::makeRelative() always puts all .. elements at the start, - // so they're all handled by this loop: - std::string_view relTargetLeft(relTarget); - while (hasPrefix(relTargetLeft, "../")) { - if (dir == pendingDirs.rend()) - throw Error("invalid hard link target '%s' for path '%s'", target, path); - ++dir; - relTargetLeft = relTargetLeft.substr(3); - } - if (dir == pendingDirs.rend()) - throw Error("invalid hard link target '%s' for path '%s'", target, path); - - // Look up the remainder of the target, starting at the - // top-most `git_treebuilder`. - std::variant curDir{dir->builder.get()}; - Object tree; // needed to keep `entry` alive - const git_tree_entry * entry = nullptr; - - for (auto & c : CanonPath(relTargetLeft)) { - if (auto builder = std::get_if(&curDir)) { - assert(*builder); - if (!(entry = git_treebuilder_get(*builder, std::string(c).c_str()))) - throw Error("cannot find hard link target '%s' for path '%s'", target, path); - curDir = *git_tree_entry_id(entry); - } else if (auto oid = std::get_if(&curDir)) { - tree = lookupObject(*repo, *oid, GIT_OBJECT_TREE); - if (!(entry = git_tree_entry_byname((const git_tree *) &*tree, std::string(c).c_str()))) - throw Error("cannot find hard link target '%s' for path '%s'", target, path); - curDir = *git_tree_entry_id(entry); + /* Create hard links. */ + { + auto state(_state.lock()); + for (auto & [path, target] : hardLinks) { + if (target.isRoot()) + continue; + try { + auto child = state->root.lookup(target); + auto oid = std::get_if(&child.file); + if (!oid) + throw Error("cannot create a hard link to a directory"); + addNode(*state, path, {child.mode, *oid}); + } catch (Error & e) { + e.addTrace(nullptr, "while creating a hard link from '%s' to '%s'", path, target); + throw; + } } } - assert(entry); + // Flush all repo objects to disk. + { + auto repos = repoPool.clear(); + ThreadPool workers{repos.size()}; + for (auto & repo : repos) + workers.enqueue([repo]() { repo->flush(); }); + workers.process(); + } - addToTree(*pathComponents.rbegin(), *git_tree_entry_id(entry), git_tree_entry_filemode(entry)); - } + // Write the Git trees to disk. Would be nice to have this multithreaded too, but that's hard because a tree + // can't refer to an object that hasn't been written yet. Also it doesn't make a big difference for performance. + auto repo(repoPool.get()); - Hash flush() override - { - updateBuilders({}); + [&](this const auto & visit, Directory & node) -> void { + checkInterrupt(); - auto [oid, _name] = popBuilder(); + // Write the child directories. + for (auto & child : node.children) + if (auto dir = std::get_if(&child.second.file)) + visit(*dir); + + // Write this directory. + git_treebuilder * b; + if (git_treebuilder_new(&b, *repo, nullptr)) + throw Error("creating a tree builder: %s", git_error_last()->message); + TreeBuilder builder(b); + + for (auto & [name, child] : node.children) { + auto oid_p = std::get_if(&child.file); + auto oid = oid_p ? *oid_p : std::get(child.file).oid.value(); + if (git_treebuilder_insert(nullptr, builder.get(), name.c_str(), &oid, child.mode)) + throw Error("adding a file to a tree builder: %s", git_error_last()->message); + } - if (auto * backend = repo->packBackend) { - /* We are done writing blobs, can restore refresh functionality. */ - backend->refresh = packfileOdbRefresh; - } + git_oid oid; + if (git_treebuilder_write(&oid, builder.get())) + throw Error("creating a tree object: %s", git_error_last()->message); + node.oid = oid; + }(_state.lock()->root); repo->flush(); - return toHash(oid); + return toHash(_state.lock()->root.oid.value()); } }; @@ -1427,8 +1475,15 @@ namespace fetchers { ref Settings::getTarballCache() const { - static auto repoDir = std::filesystem::path(getCacheDir()) / "tarball-cache"; - return GitRepo::openRepo(repoDir, /*create=*/true, /*bare=*/true, /*packfilesOnly=*/true); + /* v1: Had either only loose objects or thin packfiles referring to loose objects + * v2: Must have only packfiles with no loose objects. Should get repacked periodically + * for optimal packfiles. + */ + static auto repoDir = std::filesystem::path(getCacheDir()) / "tarball-cache-v2"; + auto tarballCache(_tarballCache.lock()); + if (!*tarballCache) + *tarballCache = GitRepo::openRepo(repoDir, {.create = true, .bare = true, .packfilesOnly = true}); + return ref(*tarballCache); } } // namespace fetchers @@ -1442,7 +1497,7 @@ GitRepo::WorkdirInfo GitRepo::getCachedWorkdirInfo(const std::filesystem::path & if (i != cache->end()) return i->second; } - auto workdirInfo = GitRepo::openRepo(path)->getWorkdirInfo(); + auto workdirInfo = GitRepo::openRepo(path, {})->getWorkdirInfo(); _cache.lock()->emplace(path, workdirInfo); return workdirInfo; } diff --git a/src/libfetchers/git.cc b/src/libfetchers/git.cc index 4f5247861d8..7f33d9d8c60 100644 --- a/src/libfetchers/git.cc +++ b/src/libfetchers/git.cc @@ -16,6 +16,7 @@ #include "nix/util/json-utils.hh" #include "nix/util/archive.hh" #include "nix/util/mounted-source-accessor.hh" +#include "nix/fetchers/fetch-to-store.hh" #include #include @@ -393,15 +394,17 @@ struct GitInputScheme : InputScheme return input; } - ParsedURL toURL(const Input & input) const override + ParsedURL toURL(const Input & input, bool abbreviate) const override { auto url = parseURL(getStrAttr(input.attrs, "url")); if (url.scheme != "git") url.scheme = "git+" + url.scheme; if (auto rev = input.getRev()) url.query.insert_or_assign("rev", rev->gitRev()); - if (auto ref = input.getRef()) - url.query.insert_or_assign("ref", *ref); + if (auto ref = input.getRef()) { + if (!abbreviate || (*ref != "master" && *ref != "main")) + url.query.insert_or_assign("ref", *ref); + } if (getShallowAttr(input)) url.query.insert_or_assign("shallow", "1"); if (getLfsAttr(input)) @@ -558,10 +561,10 @@ struct GitInputScheme : InputScheme { if (workdirInfo.isDirty) { if (!settings.allowDirty) - throw Error("Git tree '%s' is dirty", locationToArg()); + throw Error("Git tree '%s' has uncommitted changes", locationToArg()); if (settings.warnDirty) - warn("Git tree '%s' is dirty", locationToArg()); + warn("Git tree '%s' has uncommitted changes", locationToArg()); } } @@ -637,11 +640,6 @@ struct GitInputScheme : InputScheme url); } - // If we don't check here for the path existence, then we can give libgit2 any directory - // and it will initialize them as git directories. - if (!pathExists(path)) { - throw Error("The path '%s' does not exist.", path); - } repoInfo.location = std::filesystem::absolute(path); } else { if (url.scheme == "file") @@ -703,7 +701,7 @@ struct GitInputScheme : InputScheme if (auto res = cache->lookup(key)) return getIntAttr(*res, "lastModified"); - auto lastModified = GitRepo::openRepo(repoDir)->getLastModified(rev); + auto lastModified = GitRepo::openRepo(repoDir, {})->getLastModified(rev); cache->upsert(key, {{"lastModified", lastModified}}); @@ -726,7 +724,7 @@ struct GitInputScheme : InputScheme Activity act( *logger, lvlChatty, actUnknown, fmt("getting Git revision count of '%s'", repoInfo.locationToArg())); - auto revCount = GitRepo::openRepo(repoDir)->getRevCount(rev); + auto revCount = GitRepo::openRepo(repoDir, {})->getRevCount(rev); cache->upsert(key, Attrs{{"revCount", revCount}}); @@ -737,7 +735,7 @@ struct GitInputScheme : InputScheme { auto head = std::visit( overloaded{ - [&](const std::filesystem::path & path) { return GitRepo::openRepo(path)->getWorkdirRef(); }, + [&](const std::filesystem::path & path) { return GitRepo::openRepo(path, {})->getWorkdirRef(); }, [&](const ParsedURL & url) { return readHeadCached(url.to_string(), shallow); }}, repoInfo.location); if (!head) { @@ -778,6 +776,79 @@ struct GitInputScheme : InputScheme } } + /** + * Decide whether we can do a shallow clone, which is faster. This is possible if the user explicitly specified + * `shallow = true`, or if we already have a `revCount`. + */ + bool canDoShallow(const Input & input) const + { + bool shallow = getShallowAttr(input); + return shallow || input.getRevCount().has_value(); + } + + GitAccessorOptions getGitAccessorOptions(const Input & input) const + { + return GitAccessorOptions{ + .exportIgnore = getExportIgnoreAttr(input), + .smudgeLfs = getLfsAttr(input), + .submodules = getSubmodulesAttr(input), + }; + } + + /** + * Get a `SourceAccessor` for the given Git revision using Nix < 2.20 semantics, i.e. using `git archive` or `git + * checkout`. + */ + ref getLegacyGitAccessor( + Store & store, + RepoInfo & repoInfo, + const std::filesystem::path & repoDir, + const Hash & rev, + GitAccessorOptions & options) const + { + auto tmpDir = createTempDir(); + AutoDelete delTmpDir(tmpDir, true); + + auto storePath = + options.submodules + ? [&]() { + // Nix < 2.20 used `git checkout` for repos with submodules. + runProgram2({.program = "git", .args = {"init", tmpDir}}); + runProgram2({.program = "git", .args = {"-C", tmpDir, "remote", "add", "origin", repoDir}}); + runProgram2({.program = "git", .args = {"-C", tmpDir, "fetch", "origin", rev.gitRev()}}); + runProgram2({.program = "git", .args = {"-C", tmpDir, "checkout", rev.gitRev()}}); + PathFilter filter = [&](const Path & path) { return baseNameOf(path) != ".git"; }; + return store.addToStore( + "source", + {getFSSourceAccessor(), CanonPath(tmpDir.string())}, + ContentAddressMethod::Raw::NixArchive, + HashAlgorithm::SHA256, + {}, + filter); + }() + : [&]() { + // Nix < 2.20 used `git archive` for repos without submodules. + options.exportIgnore = true; + + auto source = sinkToSource([&](Sink & sink) { + runProgram2( + {.program = "git", + .args = {"-C", repoDir, "--git-dir", repoInfo.gitDir, "archive", rev.gitRev()}, + .standardOut = &sink}); + }); + + unpackTarfile(*source, tmpDir); + + return store.addToStore("source", {getFSSourceAccessor(), CanonPath(tmpDir.string())}); + }(); + + auto accessor = store.getFSAccessor(storePath); + + accessor->fingerprint = options.makeFingerprint(rev) + ";legacy"; + + return ref{accessor}; + } + std::pair, Input> getAccessorFromCommit(const Settings & settings, Store & store, RepoInfo & repoInfo, Input && input) const { @@ -786,7 +857,7 @@ struct GitInputScheme : InputScheme auto origRev = input.getRev(); auto originalRef = input.getRef(); - bool shallow = getShallowAttr(input); + bool shallow = canDoShallow(input); auto ref = originalRef ? *originalRef : getDefaultRef(repoInfo, shallow); input.attrs.insert_or_assign("ref", ref); @@ -795,17 +866,33 @@ struct GitInputScheme : InputScheme if (auto repoPath = repoInfo.getPath()) { repoDir = *repoPath; if (!input.getRev()) - input.attrs.insert_or_assign("rev", GitRepo::openRepo(repoDir)->resolveRef(ref).gitRev()); + input.attrs.insert_or_assign("rev", GitRepo::openRepo(repoDir, {})->resolveRef(ref).gitRev()); } else { + auto rev = input.getRev(); auto repoUrl = std::get(repoInfo.location); std::filesystem::path cacheDir = getCachePath(repoUrl.to_string(), shallow); repoDir = cacheDir; repoInfo.gitDir = "."; + /* If shallow = false, but we have a non-shallow repo that already contains the desired rev, then use that + * repo instead. */ + std::filesystem::path cacheDirNonShallow = getCachePath(repoUrl.to_string(), false); + if (rev && shallow && pathExists(cacheDirNonShallow)) { + auto nonShallowRepo = GitRepo::openRepo(cacheDirNonShallow, {.create = true, .bare = true}); + if (nonShallowRepo->hasObject(*rev)) { + debug( + "using non-shallow cached repo for '%s' since it contains rev '%s'", + repoUrl.to_string(), + rev->gitRev()); + repoDir = cacheDirNonShallow; + goto have_rev; + } + } + std::filesystem::create_directories(cacheDir.parent_path()); PathLocks cacheDirLock({cacheDir.string()}); - auto repo = GitRepo::openRepo(cacheDir, true, true); + auto repo = GitRepo::openRepo(cacheDir, {.create = true, .bare = true}); // We need to set the origin so resolving submodule URLs works repo->setRemote("origin", repoUrl.to_string()); @@ -817,7 +904,7 @@ struct GitInputScheme : InputScheme /* If a rev was specified, we need to fetch if it's not in the repo. */ - if (auto rev = input.getRev()) { + if (rev) { doFetch = !repo->hasObject(*rev); } else { if (getAllRefsAttr(input)) { @@ -831,7 +918,6 @@ struct GitInputScheme : InputScheme } if (doFetch) { - bool shallow = getShallowAttr(input); try { auto fetchRef = getAllRefsAttr(input) ? "refs/*:refs/*" : input.getRev() ? input.getRev()->gitRev() @@ -859,7 +945,7 @@ struct GitInputScheme : InputScheme warn("could not update cached head '%s' for '%s'", ref, repoInfo.locationToArg()); } - if (auto rev = input.getRev()) { + if (rev) { if (!repo->hasObject(*rev)) throw Error( "Cannot find Git revision '%s' in ref '%s' of repository '%s'! " @@ -876,40 +962,88 @@ struct GitInputScheme : InputScheme // the remainder } - auto repo = GitRepo::openRepo(repoDir); - - auto isShallow = repo->isShallow(); - - if (isShallow && !getShallowAttr(input)) - throw Error( - "'%s' is a shallow Git repository, but shallow repositories are only allowed when `shallow = true;` is specified", - repoInfo.locationToArg()); + have_rev: + auto repo = GitRepo::openRepo(repoDir, {}); // FIXME: check whether rev is an ancestor of ref? auto rev = *input.getRev(); - input.attrs.insert_or_assign("lastModified", getLastModified(settings, repoInfo, repoDir, rev)); + /* Skip lastModified computation if it's already supplied by the caller. + We don't care if they specify an incorrect value; it doesn't + matter for security, unlike narHash. */ + if (!input.attrs.contains("lastModified")) + input.attrs.insert_or_assign("lastModified", getLastModified(settings, repoInfo, repoDir, rev)); + + /* Like lastModified, skip revCount if supplied by the caller. */ + if (!shallow && !input.attrs.contains("revCount")) { + auto isShallow = repo->isShallow(); + + if (isShallow && !shallow) + throw Error( + "'%s' is a shallow Git repository, but shallow repositories are only allowed when `shallow = true;` is specified", + repoInfo.locationToArg()); - if (!getShallowAttr(input)) input.attrs.insert_or_assign("revCount", getRevCount(settings, repoInfo, repoDir, rev)); + } printTalkative("using revision %s of repo '%s'", rev.gitRev(), repoInfo.locationToArg()); verifyCommit(input, repo); - bool exportIgnore = getExportIgnoreAttr(input); - bool smudgeLfs = getLfsAttr(input); - auto accessor = repo->getAccessor( - rev, {.exportIgnore = exportIgnore, .smudgeLfs = smudgeLfs}, "«" + input.to_string() + "»"); + auto options = getGitAccessorOptions(input); + + auto expectedNarHash = input.getNarHash(); + + auto accessor = repo->getAccessor(rev, options, "«" + input.to_string(true) + "»"); + + if (settings.nix219Compat && !options.smudgeLfs && accessor->pathExists(CanonPath(".gitattributes"))) { + /* Use Nix 2.19 semantics to generate locks, but if a NAR hash is specified, support Nix >= 2.20 semantics + * as well. */ + warn("Using Nix 2.19 semantics to export Git repository '%s'.", input.to_string()); + auto accessorModern = accessor; + accessor = getLegacyGitAccessor(store, repoInfo, repoDir, rev, options); + if (expectedNarHash) { + auto narHashLegacy = + fetchToStore2(settings, store, {accessor}, FetchMode::DryRun, input.getName()).second; + if (expectedNarHash != narHashLegacy) { + auto narHashModern = + fetchToStore2(settings, store, {accessorModern}, FetchMode::DryRun, input.getName()).second; + if (expectedNarHash == narHashModern) + accessor = accessorModern; + } + } + } else { + /* Backward compatibility hack for locks produced by Nix < 2.20 that depend on Nix applying Git filters, + * `export-ignore` or `export-subst`. Nix >= 2.20 doesn't do those, so we may get a NAR hash mismatch. If + * that happens, try again using `git archive`. */ + auto narHashNew = fetchToStore2(settings, store, {accessor}, FetchMode::DryRun, input.getName()).second; + if (expectedNarHash && accessor->pathExists(CanonPath(".gitattributes"))) { + if (expectedNarHash != narHashNew) { + auto accessorLegacy = getLegacyGitAccessor(store, repoInfo, repoDir, rev, options); + auto narHashLegacy = + fetchToStore2(settings, store, {accessorLegacy}, FetchMode::DryRun, input.getName()).second; + if (expectedNarHash == narHashLegacy) { + warn( + "Git input '%s' specifies a NAR hash '%s' that was created by Nix < 2.20.\n" + "Nix >= 2.20 does not apply Git filters, `export-ignore` and `export-subst` by default, which changes the NAR hash.\n" + "Please update the NAR hash to '%s'.", + input.to_string(), + expectedNarHash->to_string(HashFormat::SRI, true), + narHashNew.to_string(HashFormat::SRI, true)); + accessor = accessorLegacy; + } + } + } + } /* If the repo has submodules, fetch them and return a mounted input accessor consisting of the accessor for the top-level repo and the accessors for the submodules. */ - if (getSubmodulesAttr(input)) { + if (options.submodules) { std::map> mounts; - for (auto & [submodule, submoduleRev] : repo->getSubmodules(rev, exportIgnore)) { + for (auto & [submodule, submoduleRev] : repo->getSubmodules(rev, options.exportIgnore)) { auto resolved = repo->resolveSubmoduleUrl(submodule.url); debug( "Git submodule %s: %s %s %s -> %s", @@ -932,19 +1066,21 @@ struct GitInputScheme : InputScheme } } attrs.insert_or_assign("rev", submoduleRev.gitRev()); - attrs.insert_or_assign("exportIgnore", Explicit{exportIgnore}); + attrs.insert_or_assign("exportIgnore", Explicit{options.exportIgnore}); attrs.insert_or_assign("submodules", Explicit{true}); - attrs.insert_or_assign("lfs", Explicit{smudgeLfs}); + attrs.insert_or_assign("lfs", Explicit{options.smudgeLfs}); attrs.insert_or_assign("allRefs", Explicit{true}); auto submoduleInput = fetchers::Input::fromAttrs(settings, std::move(attrs)); auto [submoduleAccessor, submoduleInput2] = submoduleInput.getAccessor(settings, store); - submoduleAccessor->setPathDisplay("«" + submoduleInput.to_string() + "»"); + submoduleAccessor->setPathDisplay("«" + submoduleInput.to_string(true) + "»"); mounts.insert_or_assign(submodule.path, submoduleAccessor); } if (!mounts.empty()) { + auto newFingerprint = accessor->getFingerprint(CanonPath::root).second->append(";s"); mounts.insert_or_assign(CanonPath::root, accessor); accessor = makeMountedSourceAccessor(std::move(mounts)); + accessor->fingerprint = newFingerprint; } } @@ -963,7 +1099,7 @@ struct GitInputScheme : InputScheme for (auto & submodule : repoInfo.workdirInfo.submodules) repoInfo.workdirInfo.files.insert(submodule.path); - auto repo = GitRepo::openRepo(repoPath, false, false); + auto repo = GitRepo::openRepo(repoPath, {}); auto exportIgnore = getExportIgnoreAttr(input); @@ -988,7 +1124,7 @@ struct GitInputScheme : InputScheme auto submoduleInput = fetchers::Input::fromAttrs(settings, std::move(attrs)); auto [submoduleAccessor, submoduleInput2] = submoduleInput.getAccessor(settings, store); - submoduleAccessor->setPathDisplay("«" + submoduleInput.to_string() + "»"); + submoduleAccessor->setPathDisplay("«" + submoduleInput.to_string(true) + "»"); /* If the submodule is dirty, mark this repo dirty as well. */ @@ -1003,7 +1139,7 @@ struct GitInputScheme : InputScheme } if (!repoInfo.workdirInfo.isDirty) { - auto repo = GitRepo::openRepo(repoPath); + auto repo = GitRepo::openRepo(repoPath, {}); if (auto ref = repo->getWorkdirRef()) input.attrs.insert_or_assign("ref", *ref); @@ -1062,13 +1198,12 @@ struct GitInputScheme : InputScheme std::optional getFingerprint(Store & store, const Input & input) const override { - auto makeFingerprint = [&](const Hash & rev) { - return rev.gitRev() + (getSubmodulesAttr(input) ? ";s" : "") + (getExportIgnoreAttr(input) ? ";e" : "") - + (getLfsAttr(input) ? ";l" : ""); - }; + auto options = getGitAccessorOptions(input); if (auto rev = input.getRev()) - return makeFingerprint(*rev); + // FIXME: this can return a wrong fingerprint for the legacy (`git archive`) case, since we don't know here + // whether to append the `;legacy` suffix or not. + return options.makeFingerprint(*rev); else { auto repoInfo = getRepoInfo(input); if (auto repoPath = repoInfo.getPath(); repoPath && repoInfo.workdirInfo.submodules.empty()) { @@ -1084,7 +1219,7 @@ struct GitInputScheme : InputScheme writeString("deleted:", hashSink); writeString(file.abs(), hashSink); } - return makeFingerprint(repoInfo.workdirInfo.headRev.value_or(nullRev)) + return options.makeFingerprint(repoInfo.workdirInfo.headRev.value_or(nullRev)) + ";d=" + hashSink.finish().hash.to_string(HashFormat::Base16, false); } return std::nullopt; diff --git a/src/libfetchers/github.cc b/src/libfetchers/github.cc index b86fa926a66..b3c892c6133 100644 --- a/src/libfetchers/github.cc +++ b/src/libfetchers/github.cc @@ -162,7 +162,7 @@ struct GitArchiveInputScheme : InputScheme return input; } - ParsedURL toURL(const Input & input) const override + ParsedURL toURL(const Input & input, bool abbreviate) const override { auto owner = getStrAttr(input.attrs, "owner"); auto repo = getStrAttr(input.attrs, "repo"); @@ -173,7 +173,7 @@ struct GitArchiveInputScheme : InputScheme if (ref) path.push_back(*ref); if (rev) - path.push_back(rev->to_string(HashFormat::Base16, false)); + path.push_back(abbreviate ? rev->gitShortRev() : rev->gitRev()); auto url = ParsedURL{ .scheme = std::string{schemeName()}, .path = path, @@ -354,7 +354,14 @@ struct GitArchiveInputScheme : InputScheme input.attrs.insert_or_assign("lastModified", uint64_t(tarballInfo.lastModified)); auto accessor = - settings.getTarballCache()->getAccessor(tarballInfo.treeHash, {}, "«" + input.to_string() + "»"); + settings.getTarballCache()->getAccessor(tarballInfo.treeHash, {}, "«" + input.to_string(true) + "»"); + + if (!settings.trustTarballsFromGitForges) + // FIXME: computing the NAR hash here is wasteful if + // copyInputToStore() is just going to hash/copy it as + // well. + input.attrs.insert_or_assign( + "narHash", accessor->hashPath(CanonPath::root).to_string(HashFormat::SRI, true)); return {accessor, input}; } @@ -368,15 +375,10 @@ struct GitArchiveInputScheme : InputScheme return input.getRev().has_value() && (settings.trustTarballsFromGitForges || input.getNarHash().has_value()); } - std::optional experimentalFeature() const override - { - return Xp::Flakes; - } - std::optional getFingerprint(Store & store, const Input & input) const override { if (auto rev = input.getRev()) - return rev->gitRev(); + return "github:" + rev->gitRev(); else return std::nullopt; } @@ -454,8 +456,7 @@ struct GitHubInputScheme : GitArchiveInputScheme : headers.empty() ? "https://%s/%s/%s/archive/%s.tar.gz" : "https://api.%s/repos/%s/%s/tarball/%s"; - const auto url = - fmt(urlFmt, host, getOwner(input), getRepo(input), input.getRev()->to_string(HashFormat::Base16, false)); + const auto url = fmt(urlFmt, host, getOwner(input), getRepo(input), input.getRev()->gitRev()); return DownloadUrl{parseURL(url), headers}; } @@ -542,7 +543,7 @@ struct GitLabInputScheme : GitArchiveInputScheme host, getStrAttr(input.attrs, "owner"), getStrAttr(input.attrs, "repo"), - input.getRev()->to_string(HashFormat::Base16, false)); + input.getRev()->gitRev()); Headers headers = makeHeadersWithAuthTokens(settings, host, input); return DownloadUrl{parseURL(url), headers}; @@ -638,7 +639,7 @@ struct SourceHutInputScheme : GitArchiveInputScheme host, getStrAttr(input.attrs, "owner"), getStrAttr(input.attrs, "repo"), - input.getRev()->to_string(HashFormat::Base16, false)); + input.getRev()->gitRev()); Headers headers = makeHeadersWithAuthTokens(settings, host, input); return DownloadUrl{parseURL(url), headers}; diff --git a/src/libfetchers/include/nix/fetchers/cache.hh b/src/libfetchers/include/nix/fetchers/cache.hh index 7219635ec07..8cac076f1f2 100644 --- a/src/libfetchers/include/nix/fetchers/cache.hh +++ b/src/libfetchers/include/nix/fetchers/cache.hh @@ -67,9 +67,9 @@ struct Cache /** * Look up a store path in the cache. The returned store path will - * be valid, but it may be expired. + * be valid (unless `allowInvalid` is true), but it may be expired. */ - virtual std::optional lookupStorePath(Key key, Store & store) = 0; + virtual std::optional lookupStorePath(Key key, Store & store, bool allowInvalid = false) = 0; /** * Look up a store path in the cache. Return nothing if its TTL diff --git a/src/libfetchers/include/nix/fetchers/fetch-settings.hh b/src/libfetchers/include/nix/fetchers/fetch-settings.hh index 8cfa7f6091e..e2268203b56 100644 --- a/src/libfetchers/include/nix/fetchers/fetch-settings.hh +++ b/src/libfetchers/include/nix/fetchers/fetch-settings.hh @@ -94,10 +94,7 @@ struct Settings : public Config are subsequently modified. Therefore lock files with dirty locks should generally only be used for local testing, and should not be pushed to other users. - )", - {}, - true, - Xp::Flakes}; + )"}; Setting trustTarballsFromGitForges{ this, @@ -118,16 +115,23 @@ struct Settings : public Config Setting flakeRegistry{ this, - "https://channels.nixos.org/flake-registry.json", + "https://install.determinate.systems/flake-registry/stable/flake-registry.json", "flake-registry", R"( Path or URI of the global flake registry. When empty, disables the global flake registry. - )", - {}, - true, - Xp::Flakes}; + )"}; + + Setting nix219Compat{ + this, + false, + "nix-219-compat", + R"( + If enabled, Nix will generate lock files that are compatible with Nix 2.19. + In particular, Nix will use `git archive` rather than `libgit2` to copy Git inputs. + The resulting locks may not be compatible with Nix >= 2.20. + )"}; ref getCache() const; @@ -135,6 +139,17 @@ struct Settings : public Config private: mutable Sync> _cache; + + mutable Sync> _tarballCache; }; } // namespace nix::fetchers + +namespace nix { + +/** + * @todo Get rid of global setttings variables + */ +extern fetchers::Settings fetchSettings; + +} // namespace nix diff --git a/src/libfetchers/include/nix/fetchers/fetch-to-store.hh b/src/libfetchers/include/nix/fetchers/fetch-to-store.hh index 3a223230235..e7f88072491 100644 --- a/src/libfetchers/include/nix/fetchers/fetch-to-store.hh +++ b/src/libfetchers/include/nix/fetchers/fetch-to-store.hh @@ -24,7 +24,17 @@ StorePath fetchToStore( PathFilter * filter = nullptr, RepairFlag repair = NoRepair); -fetchers::Cache::Key makeFetchToStoreCacheKey( - const std::string & name, const std::string & fingerprint, ContentAddressMethod method, const std::string & path); +std::pair fetchToStore2( + const fetchers::Settings & settings, + Store & store, + const SourcePath & path, + FetchMode mode, + std::string_view name = "source", + ContentAddressMethod method = ContentAddressMethod::Raw::NixArchive, + PathFilter * filter = nullptr, + RepairFlag repair = NoRepair); + +fetchers::Cache::Key +makeSourcePathToHashCacheKey(const std::string & fingerprint, ContentAddressMethod method, const std::string & path); } // namespace nix diff --git a/src/libfetchers/include/nix/fetchers/fetchers.hh b/src/libfetchers/include/nix/fetchers/fetchers.hh index 32a3d7d9bf5..c4b7c589d6c 100644 --- a/src/libfetchers/include/nix/fetchers/fetchers.hh +++ b/src/libfetchers/include/nix/fetchers/fetchers.hh @@ -61,11 +61,11 @@ public: */ static Input fromAttrs(const Settings & settings, Attrs && attrs); - ParsedURL toURL() const; + ParsedURL toURL(bool abbreviate = false) const; - std::string toURLString(const StringMap & extraQuery = {}) const; + std::string toURLString(const StringMap & extraQuery = {}, bool abbreviate = false) const; - std::string to_string() const; + std::string to_string(bool abbreviate = false) const; Attrs toAttrs() const; @@ -113,7 +113,7 @@ public: * Fetch the entire input into the Nix store, returning the * location in the Nix store and the locked input. */ - std::pair fetchToStore(const Settings & settings, Store & store) const; + std::tuple, Input> fetchToStore(const Settings & settings, Store & store) const; /** * Check the locking attributes in `result` against @@ -225,7 +225,7 @@ struct InputScheme */ virtual const std::map & allowedAttrs() const = 0; - virtual ParsedURL toURL(const Input & input) const; + virtual ParsedURL toURL(const Input & input, bool abbreviate = false) const; virtual Input applyOverrides(const Input & input, std::optional ref, std::optional rev) const; diff --git a/src/libfetchers/include/nix/fetchers/filtering-source-accessor.hh b/src/libfetchers/include/nix/fetchers/filtering-source-accessor.hh index 5e98caa5816..63df495907a 100644 --- a/src/libfetchers/include/nix/fetchers/filtering-source-accessor.hh +++ b/src/libfetchers/include/nix/fetchers/filtering-source-accessor.hh @@ -52,6 +52,8 @@ struct FilteringSourceAccessor : SourceAccessor std::pair> getFingerprint(const CanonPath & path) override; + void invalidateCache(const CanonPath & path) override; + /** * Call `makeNotAllowedError` to throw a `RestrictedPathError` * exception if `isAllowed()` returns `false` for `path`. diff --git a/src/libfetchers/include/nix/fetchers/git-utils.hh b/src/libfetchers/include/nix/fetchers/git-utils.hh index 5c79f256e86..eada8745c3e 100644 --- a/src/libfetchers/include/nix/fetchers/git-utils.hh +++ b/src/libfetchers/include/nix/fetchers/git-utils.hh @@ -26,14 +26,23 @@ struct GitAccessorOptions { bool exportIgnore = false; bool smudgeLfs = false; + bool submodules = false; // Currently implemented in GitInputScheme rather than GitAccessor + + std::string makeFingerprint(const Hash & rev) const; }; struct GitRepo { virtual ~GitRepo() {} - static ref - openRepo(const std::filesystem::path & path, bool create = false, bool bare = false, bool packfilesOnly = false); + struct Options + { + bool create = false; + bool bare = false; + bool packfilesOnly = false; + }; + + static ref openRepo(const std::filesystem::path & path, Options options); virtual uint64_t getRevCount(const Hash & rev) = 0; diff --git a/src/libfetchers/include/nix/fetchers/registry.hh b/src/libfetchers/include/nix/fetchers/registry.hh index dc7e3edb590..ca38dd805d6 100644 --- a/src/libfetchers/include/nix/fetchers/registry.hh +++ b/src/libfetchers/include/nix/fetchers/registry.hh @@ -39,6 +39,9 @@ struct Registry static std::shared_ptr read(const Settings & settings, const SourcePath & path, RegistryType type); + static std::shared_ptr + read(const Settings & settings, std::string_view whence, std::string_view jsonStr, RegistryType type); + void write(const std::filesystem::path & path); void add(const Input & from, const Input & to, const Attrs & extraAttrs); diff --git a/src/libfetchers/indirect.cc b/src/libfetchers/indirect.cc index b2a41a7421f..e629dcbac6b 100644 --- a/src/libfetchers/indirect.cc +++ b/src/libfetchers/indirect.cc @@ -100,7 +100,7 @@ struct IndirectInputScheme : InputScheme return input; } - ParsedURL toURL(const Input & input) const override + ParsedURL toURL(const Input & input, bool abbreviate) const override { ParsedURL url{ .scheme = "flake", @@ -131,11 +131,6 @@ struct IndirectInputScheme : InputScheme throw Error("indirect input '%s' cannot be fetched directly", input.to_string()); } - std::optional experimentalFeature() const override - { - return Xp::Flakes; - } - bool isDirect(const Input & input) const override { return false; diff --git a/src/libfetchers/mercurial.cc b/src/libfetchers/mercurial.cc index dd31e224f6c..f9297ce8c2f 100644 --- a/src/libfetchers/mercurial.cc +++ b/src/libfetchers/mercurial.cc @@ -119,7 +119,7 @@ struct MercurialInputScheme : InputScheme return input; } - ParsedURL toURL(const Input & input) const override + ParsedURL toURL(const Input & input, bool abbreviate) const override { auto url = parseURL(getStrAttr(input.attrs, "url")); url.scheme = "hg+" + url.scheme; @@ -247,9 +247,7 @@ struct MercurialInputScheme : InputScheme auto revInfoKey = [&](const Hash & rev) { if (rev.algo != HashAlgorithm::SHA1) - throw Error( - "Hash '%s' is not supported by Mercurial. Only sha1 is supported.", - rev.to_string(HashFormat::Base16, true)); + throw Error("Hash '%s' is not supported by Mercurial. Only sha1 is supported.", rev.gitRev()); return Cache::Key{"hgRev", {{"store", store.storeDir}, {"name", name}, {"rev", input.getRev()->gitRev()}}}; }; @@ -356,7 +354,7 @@ struct MercurialInputScheme : InputScheme auto storePath = fetchToStore(settings, store, input); auto accessor = store.requireStoreObjectAccessor(storePath); - accessor->setPathDisplay("«" + input.to_string() + "»"); + accessor->setPathDisplay("«" + input.to_string(true) + "»"); return {accessor, input}; } @@ -369,7 +367,7 @@ struct MercurialInputScheme : InputScheme std::optional getFingerprint(Store & store, const Input & input) const override { if (auto rev = input.getRev()) - return rev->gitRev(); + return "hg:" + rev->gitRev(); else return std::nullopt; } diff --git a/src/libfetchers/meson.build b/src/libfetchers/meson.build index d34dd4f434d..cd04615e52c 100644 --- a/src/libfetchers/meson.build +++ b/src/libfetchers/meson.build @@ -35,6 +35,7 @@ subdir('nix-meson-build-support/common') sources = files( 'attrs.cc', + 'builtin.cc', 'cache.cc', 'fetch-settings.cc', 'fetch-to-store.cc', @@ -54,6 +55,13 @@ sources = files( subdir('include/nix/fetchers') +# Generate builtin-flake-registry.json.gen.hh +subdir('nix-meson-build-support/generate-header') + +sources += gen_header.process( + 'builtin-flake-registry.json', +) + subdir('nix-meson-build-support/export-all-symbols') subdir('nix-meson-build-support/windows-version') diff --git a/src/libfetchers/package.nix b/src/libfetchers/package.nix index 14592087999..1a30ac29301 100644 --- a/src/libfetchers/package.nix +++ b/src/libfetchers/package.nix @@ -17,7 +17,7 @@ let in mkMesonLibrary (finalAttrs: { - pname = "nix-fetchers"; + pname = "determinate-nix-fetchers"; inherit version; workDir = ./.; @@ -28,6 +28,7 @@ mkMesonLibrary (finalAttrs: { ./.version ./meson.build ./include/nix/fetchers/meson.build + ./builtin-flake-registry.json (fileset.fileFilter (file: file.hasExt "cc") ./.) (fileset.fileFilter (file: file.hasExt "hh") ./.) ]; diff --git a/src/libfetchers/path.cc b/src/libfetchers/path.cc index 7f48ce07bb9..b75f7053e3e 100644 --- a/src/libfetchers/path.cc +++ b/src/libfetchers/path.cc @@ -87,7 +87,7 @@ struct PathInputScheme : InputScheme return input; } - ParsedURL toURL(const Input & input) const override + ParsedURL toURL(const Input & input, bool abbreviate) const override { auto query = attrsToQuery(input.attrs); query.erase("path"); @@ -142,51 +142,31 @@ struct PathInputScheme : InputScheme getAccessor(const Settings & settings, Store & store, const Input & _input) const override { Input input(_input); - auto path = getStrAttr(input.attrs, "path"); auto absPath = getAbsPath(input); // FIXME: check whether access to 'path' is allowed. + + auto accessor = makeFSSourceAccessor(absPath); + auto storePath = store.maybeParseStorePath(absPath.string()); - if (storePath) + if (storePath) { store.addTempRoot(*storePath); - time_t mtime = 0; - if (!storePath || storePath->name() != "source" || !store.isValidPath(*storePath)) { - Activity act(*logger, lvlTalkative, actUnknown, fmt("copying %s to the store", absPath)); - // FIXME: try to substitute storePath. - auto src = sinkToSource( - [&](Sink & sink) { mtime = dumpPathAndGetMtime(absPath.string(), sink, defaultPathFilter); }); - storePath = store.addToStoreFromDump(*src, "source"); + // To prevent `fetchToStore()` copying the path again to Nix + // store, pre-create an entry in the fetcher cache. + auto info = store.maybeQueryPathInfo(*storePath); + if (info) { + accessor->fingerprint = fmt("path:%s", info->narHash.to_string(HashFormat::SRI, true)); + settings.getCache()->upsert( + makeSourcePathToHashCacheKey(*accessor->fingerprint, ContentAddressMethod::Raw::NixArchive, "/"), + {{"hash", info->narHash.to_string(HashFormat::SRI, true)}}); + } } - auto accessor = store.requireStoreObjectAccessor(*storePath); - - // To prevent `fetchToStore()` copying the path again to Nix - // store, pre-create an entry in the fetcher cache. - auto info = store.queryPathInfo(*storePath); - accessor->fingerprint = - fmt("path:%s", store.queryPathInfo(*storePath)->narHash.to_string(HashFormat::SRI, true)); - settings.getCache()->upsert( - makeFetchToStoreCacheKey( - input.getName(), *accessor->fingerprint, ContentAddressMethod::Raw::NixArchive, "/"), - store, - {}, - *storePath); - - /* Trust the lastModified value supplied by the user, if - any. It's not a "secure" attribute so we don't care. */ - if (!input.getLastModified()) - input.attrs.insert_or_assign("lastModified", uint64_t(mtime)); - return {accessor, std::move(input)}; } - - std::optional experimentalFeature() const override - { - return Xp::Flakes; - } }; static auto rPathInputScheme = OnStartup([] { registerInputScheme(std::make_unique()); }); diff --git a/src/libfetchers/registry.cc b/src/libfetchers/registry.cc index f96ef89b3ee..83de80bbccf 100644 --- a/src/libfetchers/registry.cc +++ b/src/libfetchers/registry.cc @@ -14,14 +14,24 @@ std::shared_ptr Registry::read(const Settings & settings, const Source { debug("reading registry '%s'", path); - auto registry = std::make_shared(type); - if (!path.pathExists()) return std::make_shared(type); try { + return read(settings, path.to_string(), path.readFile(), type); + } catch (Error & e) { + warn("cannot read flake registry '%s': %s", path, e.what()); + return std::make_shared(type); + } +} - auto json = nlohmann::json::parse(path.readFile()); +std::shared_ptr +Registry::read(const Settings & settings, std::string_view whence, std::string_view jsonStr, RegistryType type) +{ + auto registry = std::make_shared(type); + + try { + auto json = nlohmann::json::parse(jsonStr); auto version = json.value("version", 0); @@ -45,12 +55,10 @@ std::shared_ptr Registry::read(const Settings & settings, const Source } else - throw Error("flake registry '%s' has unsupported version %d", path, version); + warn("flake registry '%s' has unsupported version %d", whence, version); } catch (nlohmann::json::exception & e) { - warn("cannot parse flake registry '%s': %s", path, e.what()); - } catch (Error & e) { - warn("cannot read flake registry '%s': %s", path, e.what()); + warn("cannot parse flake registry '%s': %s", whence, e.what()); } return registry; @@ -139,24 +147,38 @@ void overrideRegistry(const Input & from, const Input & to, const Attrs & extraA static std::shared_ptr getGlobalRegistry(const Settings & settings, Store & store) { static auto reg = [&]() { - auto path = settings.flakeRegistry.get(); - if (path == "") { - return std::make_shared(Registry::Global); // empty registry - } + try { + auto path = settings.flakeRegistry.get(); + if (path == "") { + return std::make_shared(Registry::Global); // empty registry + } - return Registry::read( - settings, - [&] -> SourcePath { - if (!isAbsolute(path)) { - auto storePath = downloadFile(store, settings, path, "flake-registry.json").storePath; - if (auto store2 = dynamic_cast(&store)) - store2->addPermRoot(storePath, (getCacheDir() / "flake-registry.json").string()); - return {store.requireStoreObjectAccessor(storePath)}; - } else { - return SourcePath{getFSSourceAccessor(), CanonPath{path}}.resolveSymlinks(); - } - }(), - Registry::Global); + return Registry::read( + settings, + [&] -> SourcePath { + if (!isAbsolute(path)) { + auto storePath = downloadFile(store, settings, path, "flake-registry.json").storePath; + if (auto store2 = dynamic_cast(&store)) + store2->addPermRoot(storePath, (getCacheDir() / "flake-registry.json").string()); + return {store.requireStoreObjectAccessor(storePath)}; + } else { + return SourcePath{getFSSourceAccessor(), CanonPath{path}}.resolveSymlinks(); + } + }(), + Registry::Global); + } catch (Error & e) { + warn( + "cannot fetch global flake registry '%s', will use builtin fallback registry: %s", + settings.flakeRegistry.get(), + e.info().msg); + // Use builtin registry as fallback + return Registry::read( + settings, + "builtin flake registry", +#include "builtin-flake-registry.json.gen.hh" + , + Registry::Global); + } }(); return reg; diff --git a/src/libfetchers/tarball.cc b/src/libfetchers/tarball.cc index b1ebd749df6..3b9e756fec8 100644 --- a/src/libfetchers/tarball.cc +++ b/src/libfetchers/tarball.cc @@ -374,7 +374,7 @@ struct CurlInputScheme : InputScheme return input; } - ParsedURL toURL(const Input & input) const override + ParsedURL toURL(const Input & input, bool abbreviate) const override { auto url = parseURL(getStrAttr(input.attrs, "url")); // NAR hashes are preferred over file hashes since tar/zip @@ -429,7 +429,7 @@ struct FileInputScheme : CurlInputScheme auto accessor = ref{store.getFSAccessor(file.storePath)}; - accessor->setPathDisplay("«" + input.to_string() + "»"); + accessor->setPathDisplay("«" + input.to_string(true) + "»"); return {accessor, input}; } @@ -484,7 +484,7 @@ struct TarballInputScheme : CurlInputScheme { auto input(_input); - auto result = downloadTarball_(settings, getStrAttr(input.attrs, "url"), {}, "«" + input.to_string() + "»"); + auto result = downloadTarball_(settings, getStrAttr(input.attrs, "url"), {}, "«" + input.to_string(true) + "»"); if (result.immutableUrl) { auto immutableInput = Input::fromURL(settings, *result.immutableUrl); @@ -508,9 +508,9 @@ struct TarballInputScheme : CurlInputScheme std::optional getFingerprint(Store & store, const Input & input) const override { if (auto narHash = input.getNarHash()) - return narHash->to_string(HashFormat::SRI, true); + return "tarball:" + narHash->to_string(HashFormat::SRI, true); else if (auto rev = input.getRev()) - return rev->gitRev(); + return "tarball:" + rev->gitRev(); else return std::nullopt; } diff --git a/src/libflake-c/nix_api_flake.cc b/src/libflake-c/nix_api_flake.cc index 32329585a66..793db44b438 100644 --- a/src/libflake-c/nix_api_flake.cc +++ b/src/libflake-c/nix_api_flake.cc @@ -206,4 +206,20 @@ nix_value * nix_locked_flake_get_output_attrs( NIXC_CATCH_ERRS_NULL } +nix_err nix_locked_flake_read_path( + nix_c_context * context, + nix_locked_flake * lockedFlake, + const char * path, + nix_get_string_callback callback, + void * user_data) +{ + nix_clear_err(context); + try { + auto source_path = lockedFlake->lockedFlake->flake.path.parent() / nix::CanonPath(path); + auto v = source_path.readFile(); + return call_nix_get_string_callback(v, callback, user_data); + } + NIXC_CATCH_ERRS +} + } // extern "C" diff --git a/src/libflake-c/nix_api_flake.h b/src/libflake-c/nix_api_flake.h index a1a7060a614..92546348311 100644 --- a/src/libflake-c/nix_api_flake.h +++ b/src/libflake-c/nix_api_flake.h @@ -238,6 +238,23 @@ void nix_flake_reference_free(nix_flake_reference * store); nix_value * nix_locked_flake_get_output_attrs( nix_c_context * context, nix_flake_settings * settings, EvalState * evalState, nix_locked_flake * lockedFlake); +/** + * @brief Reads a file within the flake. + * @note The callback borrows the string only for the duration of the call. + * + * @param[out] context Optional, stores error information + * @param[in] locked_flake the flake to get the path for + * @param[in] path The path within the flake. + * @param[in] callback The callback to call with the string + * @param[in] user_data Additional data to pass for the callback + */ +nix_err nix_locked_flake_read_path( + nix_c_context * context, + nix_locked_flake * lockedFlake, + const char * path, + nix_get_string_callback callback, + void * user_data); + #ifdef __cplusplus } // extern "C" #endif diff --git a/src/libflake-c/package.nix b/src/libflake-c/package.nix index 8c6883d9cf9..9ae3ec69515 100644 --- a/src/libflake-c/package.nix +++ b/src/libflake-c/package.nix @@ -17,7 +17,7 @@ let in mkMesonLibrary (finalAttrs: { - pname = "nix-flake-c"; + pname = "determinate-nix-flake-c"; inherit version; workDir = ./.; diff --git a/src/libflake-tests/flakeref.cc b/src/libflake-tests/flakeref.cc index eb8b56ea29d..3cc655907b3 100644 --- a/src/libflake-tests/flakeref.cc +++ b/src/libflake-tests/flakeref.cc @@ -17,8 +17,6 @@ namespace nix { TEST(parseFlakeRef, path) { - experimentalFeatureSettings.experimentalFeatures.get().insert(Xp::Flakes); - fetchers::Settings fetchSettings; { @@ -67,8 +65,6 @@ TEST(parseFlakeRef, path) TEST(parseFlakeRef, GitArchiveInput) { - experimentalFeatureSettings.experimentalFeatures.get().insert(Xp::Flakes); - fetchers::Settings fetchSettings; { @@ -111,7 +107,6 @@ class InputFromURLTest : public ::testing::WithParamInterface, fetchers::Attrs> parseFlakeInputs( @@ -90,7 +93,7 @@ static std::pair, fetchers::Attrs> parseFlakeInput const SourcePath & flakeDir, bool allowSelf); -static void parseFlakeInputAttr(EvalState & state, const Attr & attr, fetchers::Attrs & attrs) +static void parseFlakeInputAttr(EvalState & state, const nix::Attr & attr, fetchers::Attrs & attrs) { // Allow selecting a subset of enum values #pragma GCC diagnostic push @@ -144,6 +147,7 @@ static FlakeInput parseFlakeInput( auto sUrl = state.symbols.create("url"); auto sFlake = state.symbols.create("flake"); auto sFollows = state.symbols.create("follows"); + auto sBuildTime = state.symbols.create("buildTime"); fetchers::Attrs attrs; std::optional url; @@ -172,6 +176,11 @@ static FlakeInput parseFlakeInput( } else if (attr.name == sFlake) { expectType(state, nBool, *attr.value, attr.pos); input.isFlake = attr.value->boolean(); + } else if (attr.name == sBuildTime) { + expectType(state, nBool, *attr.value, attr.pos); + input.buildTime = attr.value->boolean(); + if (input.buildTime) + experimentalFeatureSettings.require(Xp::BuildTimeFetchTree); } else if (attr.name == sInputs) { input.overrides = parseFlakeInputs(state, attr.value, attr.pos, lockRootAttrPath, flakeDir, false).first; @@ -369,7 +378,8 @@ static Flake getFlake( EvalState & state, const FlakeRef & originalRef, fetchers::UseRegistries useRegistries, - const InputAttrPath & lockRootAttrPath) + const InputAttrPath & lockRootAttrPath, + bool requireLockable) { // Fetch a lazy tree first. auto cachedInput = @@ -401,13 +411,14 @@ static Flake getFlake( originalRef, resolvedRef, lockedRef, - state.storePath(state.mountInput(lockedRef.input, originalRef.input, cachedInput.accessor)), + state.storePath(state.mountInput(lockedRef.input, originalRef.input, cachedInput.accessor, requireLockable)), lockRootAttrPath); } -Flake getFlake(EvalState & state, const FlakeRef & originalRef, fetchers::UseRegistries useRegistries) +Flake getFlake( + EvalState & state, const FlakeRef & originalRef, fetchers::UseRegistries useRegistries, bool requireLockable) { - return getFlake(state, originalRef, useRegistries, {}); + return getFlake(state, originalRef, useRegistries, {}, requireLockable); } static LockFile readLockFile(const fetchers::Settings & fetchSettings, const SourcePath & lockFilePath) @@ -421,13 +432,11 @@ static LockFile readLockFile(const fetchers::Settings & fetchSettings, const Sou LockedFlake lockFlake(const Settings & settings, EvalState & state, const FlakeRef & topRef, const LockFlags & lockFlags) { - experimentalFeatureSettings.require(Xp::Flakes); - auto useRegistries = lockFlags.useRegistries.value_or(settings.useRegistries); auto useRegistriesTop = useRegistries ? fetchers::UseRegistries::All : fetchers::UseRegistries::No; auto useRegistriesInputs = useRegistries ? fetchers::UseRegistries::Limited : fetchers::UseRegistries::No; - auto flake = getFlake(state, topRef, useRegistriesTop, {}); + auto flake = getFlake(state, topRef, useRegistriesTop, {}, false); if (lockFlags.applyNixConfig) { flake.config.apply(settings); @@ -608,7 +617,7 @@ lockFlake(const Settings & settings, EvalState & state, const FlakeRef & topRef, if (auto resolvedPath = resolveRelativePath()) { return readFlake(state, ref, ref, ref, *resolvedPath, inputAttrPath); } else { - return getFlake(state, ref, useRegistries, inputAttrPath); + return getFlake(state, ref, useRegistriesInputs, inputAttrPath, true); } }; @@ -631,7 +640,11 @@ lockFlake(const Settings & settings, EvalState & state, const FlakeRef & topRef, didn't change and there is no override from a higher level flake. */ auto childNode = make_ref( - oldLock->lockedRef, oldLock->originalRef, oldLock->isFlake, oldLock->parentInputAttrPath); + oldLock->lockedRef, + oldLock->originalRef, + oldLock->isFlake, + oldLock->buildTime, + oldLock->parentInputAttrPath); node->inputs.insert_or_assign(id, childNode); @@ -720,12 +733,34 @@ lockFlake(const Settings & settings, EvalState & state, const FlakeRef & topRef, auto inputIsOverride = explicitCliOverrides.contains(inputAttrPath); auto ref = (input2.ref && inputIsOverride) ? *input2.ref : *input.ref; + /* Warn against the use of indirect flakerefs + (but only at top-level since we don't want + to annoy users about flakes that are not + under their control). */ + auto warnRegistry = [&](const FlakeRef & resolvedRef) { + if (inputAttrPath.size() == 1 && !input.ref->input.isDirect()) { + std::ostringstream s; + printLiteralString(s, resolvedRef.to_string()); + warn( + "Flake input '%1%' uses the flake registry. " + "Using the registry in flake inputs is deprecated in Determinate Nix. " + "To make your flake future-proof, add the following to '%2%':\n" + "\n" + " inputs.%1%.url = %3%;\n" + "\n" + "For more information, see: https://github.com/DeterminateSystems/nix-src/issues/37", + inputAttrPathS, + flake.path, + s.str()); + } + }; + if (input.isFlake) { auto inputFlake = getInputFlake( *input.ref, inputIsOverride ? fetchers::UseRegistries::All : useRegistriesInputs); - auto childNode = - make_ref(inputFlake.lockedRef, ref, true, overriddenParentPath); + auto childNode = make_ref( + inputFlake.lockedRef, ref, true, input.buildTime, overriddenParentPath); node->inputs.insert_or_assign(id, childNode); @@ -747,6 +782,8 @@ lockFlake(const Settings & settings, EvalState & state, const FlakeRef & topRef, inputAttrPath, inputFlake.path, false); + + warnRegistry(inputFlake.resolvedRef); } else { @@ -758,16 +795,21 @@ lockFlake(const Settings & settings, EvalState & state, const FlakeRef & topRef, auto cachedInput = state.inputCache->getAccessor( state.fetchSettings, *state.store, input.ref->input, useRegistriesInputs); + auto resolvedRef = + FlakeRef(std::move(cachedInput.resolvedInput), input.ref->subdir); auto lockedRef = FlakeRef(std::move(cachedInput.lockedInput), input.ref->subdir); + warnRegistry(resolvedRef); + return { - state.storePath( - state.mountInput(lockedRef.input, input.ref->input, cachedInput.accessor)), + state.storePath(state.mountInput( + lockedRef.input, input.ref->input, cachedInput.accessor, true, true)), lockedRef}; } }(); - auto childNode = make_ref(lockedRef, ref, false, overriddenParentPath); + auto childNode = + make_ref(lockedRef, ref, false, input.buildTime, overriddenParentPath); nodePaths.emplace(childNode, path); @@ -877,13 +919,15 @@ lockFlake(const Settings & settings, EvalState & state, const FlakeRef & topRef, CanonPath((topRef.subdir == "" ? "" : topRef.subdir + "/") + "flake.lock"), newLockFileS, commitMessage); + + flake.lockFilePath().invalidateCache(); } /* Rewriting the lockfile changed the top-level repo, so we should re-read it. FIXME: we could also just clear the 'rev' field... */ auto prevLockedRef = flake.lockedRef; - flake = getFlake(state, topRef, useRegistriesTop); + flake = getFlake(state, topRef, useRegistriesTop, lockFlags.requireLockable); if (lockFlags.commitLockFile && flake.lockedRef.input.getRev() && prevLockedRef.input.getRev() != flake.lockedRef.input.getRev()) @@ -930,8 +974,6 @@ static Value * requireInternalFile(EvalState & state, CanonPath path) void callFlake(EvalState & state, const LockedFlake & lockedFlake, Value & vRes) { - experimentalFeatureSettings.require(Xp::Flakes); - auto [lockFileStr, keyMap] = lockedFlake.lockFile.to_string(); auto overrides = state.buildBindings(lockedFlake.nodePaths.size()); @@ -968,10 +1010,7 @@ void callFlake(EvalState & state, const LockedFlake & lockedFlake, Value & vRes) auto vLocks = state.allocValue(); vLocks->mkString(lockFileStr, state.mem); - auto vFetchFinalTree = get(state.internalPrimOps, "fetchFinalTree"); - assert(vFetchFinalTree); - - Value * args[] = {vLocks, &vOverrides, *vFetchFinalTree}; + Value * args[] = {vLocks, &vOverrides}; state.callFunction(*vCallFlake, args, vRes, noPos); } diff --git a/src/libflake/flakeref.cc b/src/libflake/flakeref.cc index 0a55ac35cf3..d186db8ac85 100644 --- a/src/libflake/flakeref.cc +++ b/src/libflake/flakeref.cc @@ -26,6 +26,7 @@ #include "nix/store/outputs-spec.hh" #include "nix/util/ref.hh" #include "nix/util/types.hh" +#include "nix/fetchers/fetch-settings.hh" namespace nix { class Store; @@ -42,12 +43,12 @@ const static std::string subDirElemRegex = "(?:[a-zA-Z0-9_-]+[a-zA-Z0-9._-]*)"; const static std::string subDirRegex = subDirElemRegex + "(?:/" + subDirElemRegex + ")*"; #endif -std::string FlakeRef::to_string() const +std::string FlakeRef::to_string(bool abbreviate) const { StringMap extraQuery; if (subdir != "") extraQuery.insert_or_assign("dir", subdir); - return input.toURLString(extraQuery); + return input.toURLString(extraQuery, abbreviate); } fetchers::Attrs FlakeRef::toAttrs() const @@ -90,7 +91,8 @@ static std::pair fromParsedURL(const fetchers::Settings & fetchSettings, ParsedURL && parsedURL, bool isFlake) { auto dir = getOr(parsedURL.query, "dir", ""); - parsedURL.query.erase("dir"); + if (!fetchSettings.nix219Compat) + parsedURL.query.erase("dir"); std::string fragment; std::swap(fragment, parsedURL.fragment); diff --git a/src/libflake/include/nix/flake/flake.hh b/src/libflake/include/nix/flake/flake.hh index c2d597ac15e..0ca6094175e 100644 --- a/src/libflake/include/nix/flake/flake.hh +++ b/src/libflake/include/nix/flake/flake.hh @@ -44,12 +44,18 @@ typedef std::map FlakeInputs; struct FlakeInput { std::optional ref; + /** - * true = process flake to get outputs - * - * false = (fetched) static source path + * Whether to call the `flake.nix` file in this input to get its outputs. */ bool isFlake = true; + + /** + * Whether to fetch this input at evaluation time or at build + * time. + */ + bool buildTime = false; + std::optional follows; FlakeInputs overrides; }; @@ -116,7 +122,8 @@ struct Flake } }; -Flake getFlake(EvalState & state, const FlakeRef & flakeRef, fetchers::UseRegistries useRegistries); +Flake getFlake( + EvalState & state, const FlakeRef & flakeRef, fetchers::UseRegistries useRegistries, bool requireLockable = true); /** * Fingerprint of a locked flake; used as a cache key. @@ -212,6 +219,11 @@ struct LockFlags * for those inputs will be ignored. */ std::set inputUpdates; + + /** + * Whether to require a locked input. + */ + bool requireLockable = true; }; LockedFlake @@ -234,11 +246,4 @@ void emitTreeAttrs( bool emptyRevFallback = false, bool forceDirty = false); -/** - * An internal builtin similar to `fetchTree`, except that it - * always treats the input as final (i.e. no attributes can be - * added/removed/changed). - */ -void prim_fetchFinalTree(EvalState & state, const PosIdx pos, Value ** args, Value & v); - } // namespace nix diff --git a/src/libflake/include/nix/flake/flakeref.hh b/src/libflake/include/nix/flake/flakeref.hh index 05c21bd1c6d..629afab03b5 100644 --- a/src/libflake/include/nix/flake/flakeref.hh +++ b/src/libflake/include/nix/flake/flakeref.hh @@ -66,8 +66,7 @@ struct FlakeRef { } - // FIXME: change to operator <<. - std::string to_string() const; + std::string to_string(bool abbreviate = false) const; fetchers::Attrs toAttrs() const; diff --git a/src/libflake/include/nix/flake/lockfile.hh b/src/libflake/include/nix/flake/lockfile.hh index c5740a2f114..1ca7cc3dd30 100644 --- a/src/libflake/include/nix/flake/lockfile.hh +++ b/src/libflake/include/nix/flake/lockfile.hh @@ -37,6 +37,7 @@ struct LockedNode : Node { FlakeRef lockedRef, originalRef; bool isFlake = true; + bool buildTime = false; /* The node relative to which relative source paths (e.g. 'path:../foo') are interpreted. */ @@ -46,10 +47,12 @@ struct LockedNode : Node const FlakeRef & lockedRef, const FlakeRef & originalRef, bool isFlake = true, + bool buildTime = false, std::optional parentInputAttrPath = {}) : lockedRef(std::move(lockedRef)) , originalRef(std::move(originalRef)) , isFlake(isFlake) + , buildTime(buildTime) , parentInputAttrPath(std::move(parentInputAttrPath)) { } diff --git a/src/libflake/include/nix/flake/settings.hh b/src/libflake/include/nix/flake/settings.hh index 7187a3294a3..05b36f5b779 100644 --- a/src/libflake/include/nix/flake/settings.hh +++ b/src/libflake/include/nix/flake/settings.hh @@ -21,13 +21,7 @@ struct Settings : public Config void configureEvalSettings(nix::EvalSettings & evalSettings) const; Setting useRegistries{ - this, - true, - "use-registries", - "Whether to use flake registries to resolve flake references.", - {}, - true, - Xp::Flakes}; + this, true, "use-registries", "Whether to use flake registries to resolve flake references.", {}, true}; Setting acceptFlakeConfig{ this, @@ -35,8 +29,7 @@ struct Settings : public Config "accept-flake-config", "Whether to accept Nix configuration settings from a flake without prompting.", {}, - true, - Xp::Flakes}; + true}; Setting commitLockFileSummary{ this, @@ -47,8 +40,7 @@ struct Settings : public Config empty, the summary is generated based on the action performed. )", {"commit-lockfile-summary"}, - true, - Xp::Flakes}; + true}; }; } // namespace nix::flake diff --git a/src/libflake/lockfile.cc b/src/libflake/lockfile.cc index f2914feab78..83a692b9871 100644 --- a/src/libflake/lockfile.cc +++ b/src/libflake/lockfile.cc @@ -71,6 +71,7 @@ LockedNode::LockedNode(const fetchers::Settings & fetchSettings, const nlohmann: : lockedRef(getFlakeRef(fetchSettings, json, "locked", "info")) // FIXME: remove "info" , originalRef(getFlakeRef(fetchSettings, json, "original", nullptr)) , isFlake(json.find("flake") != json.end() ? (bool) json["flake"] : true) + , buildTime(json.find("buildTime") != json.end() ? (bool) json["buildTime"] : false) , parentInputAttrPath( json.find("parent") != json.end() ? (std::optional) json["parent"] : std::nullopt) { @@ -236,6 +237,8 @@ std::pair LockFile::toJSON() const n["locked"].erase("__final"); if (!lockedNode->isFlake) n["flake"] = false; + if (lockedNode->buildTime) + n["buildTime"] = true; if (lockedNode->parentInputAttrPath) n["parent"] = *lockedNode->parentInputAttrPath; } @@ -339,7 +342,7 @@ std::map LockFile::getAllInputs() const static std::string describe(const FlakeRef & flakeRef) { - auto s = fmt("'%s'", flakeRef.to_string()); + auto s = fmt("'%s'", flakeRef.to_string(true)); if (auto lastModified = flakeRef.input.getLastModified()) s += fmt(" (%s)", std::put_time(std::gmtime(&*lastModified), "%Y-%m-%d")); diff --git a/src/libflake/package.nix b/src/libflake/package.nix index dd442a44ec9..2b0c827a09c 100644 --- a/src/libflake/package.nix +++ b/src/libflake/package.nix @@ -18,7 +18,7 @@ let in mkMesonLibrary (finalAttrs: { - pname = "nix-flake"; + pname = "determinate-nix-flake"; inherit version; workDir = ./.; diff --git a/src/libmain-c/package.nix b/src/libmain-c/package.nix index f019a917d36..17858d56f2e 100644 --- a/src/libmain-c/package.nix +++ b/src/libmain-c/package.nix @@ -17,7 +17,7 @@ let in mkMesonLibrary (finalAttrs: { - pname = "nix-main-c"; + pname = "determinate-nix-main-c"; inherit version; workDir = ./.; diff --git a/src/libmain/include/nix/main/shared.hh b/src/libmain/include/nix/main/shared.hh index 43069ba82bd..800018290f6 100644 --- a/src/libmain/include/nix/main/shared.hh +++ b/src/libmain/include/nix/main/shared.hh @@ -29,6 +29,8 @@ void parseCmdLine( const Strings & args, std::function parseArg); +std::string version(); + void printVersion(const std::string & programName); /** diff --git a/src/libmain/package.nix b/src/libmain/package.nix index 7b0a4dee7da..119e1f1aca5 100644 --- a/src/libmain/package.nix +++ b/src/libmain/package.nix @@ -18,7 +18,7 @@ let in mkMesonLibrary (finalAttrs: { - pname = "nix-main"; + pname = "determinate-nix-main"; inherit version; workDir = ./.; diff --git a/src/libmain/progress-bar.cc b/src/libmain/progress-bar.cc index a973102f950..05fd8982786 100644 --- a/src/libmain/progress-bar.cc +++ b/src/libmain/progress-bar.cc @@ -51,6 +51,7 @@ class ProgressBar : public Logger ActivityId parent; std::optional name; std::chrono::time_point startTime; + bool logged = false; }; struct ActivitiesByType @@ -142,8 +143,14 @@ class ProgressBar : public Logger return; } - if (state->active) + if (state->active) { writeToStderr("\r\e[K"); + /* Show activities that were previously only shown on the + progress bar. Otherwise the user won't know what's + happening. */ + for (auto & act : state->activities) + logActivity(*state, lvlNotice, act); + } } void resume() override @@ -196,6 +203,14 @@ class ProgressBar : public Logger } } + void logActivity(State & state, Verbosity lvl, ActInfo & act) + { + if (!act.logged && lvl <= verbosity && !act.s.empty() && act.type != actBuildWaiting) { + log(state, lvl, act.s + "..."); + act.logged = true; + } + } + void startActivity( ActivityId act, Verbosity lvl, @@ -206,15 +221,14 @@ class ProgressBar : public Logger { auto state(state_.lock()); - if (lvl <= verbosity && !s.empty() && type != actBuildWaiting) - log(*state, lvl, s + "..."); - state->activities.emplace_back( ActInfo{.s = s, .type = type, .parent = parent, .startTime = std::chrono::steady_clock::now()}); auto i = std::prev(state->activities.end()); state->its.emplace(act, i); state->activitiesByType[type].its.emplace(act, i); + logActivity(*state, lvl, *i); + if (type == actBuild) { std::string name(storePathToName(getS(fields, 0))); if (hasSuffix(name, ".drv")) @@ -456,11 +470,7 @@ class ProgressBar : public Logger } } - auto width = getWindowSize().second; - if (width <= 0) - width = std::numeric_limits::max(); - - redraw("\r" + filterANSIEscapes(line, false, width) + ANSI_NORMAL + "\e[K"); + redraw("\r" + filterANSIEscapes(line, false, getWindowWidth()) + ANSI_NORMAL + "\e[K"); return nextWakeup; } diff --git a/src/libmain/shared.cc b/src/libmain/shared.cc index ad1caae2b64..cac9e38ad85 100644 --- a/src/libmain/shared.cc +++ b/src/libmain/shared.cc @@ -292,9 +292,14 @@ void parseCmdLine( LegacyArgs(programName, parseArg).parseCmdline(args); } +std::string version() +{ + return fmt("(Determinate Nix %s) %s", determinateNixVersion, nixVersion); +} + void printVersion(const std::string & programName) { - std::cout << fmt("%1% (Nix) %2%", programName, nixVersion) << std::endl; + std::cout << fmt("%s %s", programName, version()) << std::endl; if (verbosity > lvlInfo) { Strings cfg; #if NIX_USE_BOEHMGC @@ -326,7 +331,7 @@ int handleExceptions(const std::string & programName, std::function fun) return e.status; } catch (UsageError & e) { logError(e.info()); - printError("Try '%1% --help' for more information.", programName); + printError("\nTry '%1% --help' for more information.", programName); return 1; } catch (BaseError & e) { logError(e.info()); diff --git a/src/libstore-c/nix_api_store.cc b/src/libstore-c/nix_api_store.cc index 4f71d0a3cae..80fcf10cb0d 100644 --- a/src/libstore-c/nix_api_store.cc +++ b/src/libstore-c/nix_api_store.cc @@ -290,6 +290,31 @@ nix_derivation * nix_derivation_from_json(nix_c_context * context, Store * store NIXC_CATCH_ERRS_NULL } +nix_err nix_derivation_make_outputs( + nix_c_context * context, + Store * store, + const char * json, + void (*callback)(void * userdata, const char * output_name, const char * path), + void * userdata) +{ + if (context) + context->last_err_code = NIX_OK; + try { + auto drv = nix::Derivation::parseJsonAndValidate(*store->ptr, nlohmann::json::parse(json)); + auto hashesModulo = hashDerivationModulo(*store->ptr, drv, true); + + for (auto & output : drv.outputs) { + nix::Hash h = hashesModulo.hashes.at(output.first); + auto outPath = store->ptr->makeOutputPath(output.first, h, drv.name); + + if (callback) { + callback(userdata, output.first.c_str(), store->ptr->printStorePath(outPath).c_str()); + } + } + } + NIXC_CATCH_ERRS +} + nix_err nix_derivation_to_json( nix_c_context * context, const nix_derivation * drv, nix_get_string_callback callback, void * userdata) { @@ -338,4 +363,94 @@ nix_derivation * nix_store_drv_from_store_path(nix_c_context * context, Store * NIXC_CATCH_ERRS_NULL } +nix_err nix_store_drv_from_path( + nix_c_context * context, + Store * store, + const StorePath * path, + void (*callback)(void * userdata, const nix_derivation * drv), + void * userdata) +{ + if (context) + context->last_err_code = NIX_OK; + try { + nix::Derivation drv = store->ptr->derivationFromPath(path->path); + if (callback) { + const nix_derivation tmp{drv, store}; + callback(userdata, &tmp); + } + } + NIXC_CATCH_ERRS +} + +nix_err nix_store_query_path_info( + nix_c_context * context, + Store * store, + const StorePath * store_path, + void * userdata, + nix_get_string_callback callback) +{ + if (context) + context->last_err_code = NIX_OK; + try { + auto info = store->ptr->queryPathInfo(store_path->path); + if (callback) { + auto result = info->toJSON(&store->ptr->config, true, nix::PathInfoJsonFormat::V1).dump(); + callback(result.data(), result.size(), userdata); + } + } + NIXC_CATCH_ERRS +} + +nix_err nix_store_build_paths( + nix_c_context * context, + Store * store, + const StorePath ** store_paths, + unsigned int num_store_paths, + void (*callback)(void * userdata, const char * path, const char * result), + void * userdata) +{ + if (context) + context->last_err_code = NIX_OK; + try { + std::vector derived_paths; + for (size_t i = 0; i < num_store_paths; i++) { + const StorePath * store_path = store_paths[i]; + derived_paths.push_back(nix::SingleDerivedPath::Opaque{store_path->path}); + } + + auto results = store->ptr->buildPathsWithResults(derived_paths); + for (auto & result : results) { + if (callback) + callback( + userdata, result.path.to_string(store->ptr->config).c_str(), nlohmann::json(result).dump().c_str()); + } + } + NIXC_CATCH_ERRS +} + +nix_err nix_derivation_get_outputs_and_optpaths( + nix_c_context * context, + const nix_derivation * drv, + const Store * store, + void (*callback)(void * userdata, const char * name, const StorePath * path), + void * userdata) +{ + if (context) + context->last_err_code = NIX_OK; + try { + auto value = drv->drv.outputsAndOptPaths(store->ptr->config); + if (callback) { + for (const auto & [name, result] : value) { + if (auto store_path = result.second) { + const StorePath tmp_path{*store_path}; + callback(userdata, name.c_str(), &tmp_path); + } else { + callback(userdata, name.c_str(), nullptr); + } + } + } + } + NIXC_CATCH_ERRS +} + } // extern "C" diff --git a/src/libstore-c/nix_api_store.h b/src/libstore-c/nix_api_store.h index 761fdf3c899..5e542b0caaf 100644 --- a/src/libstore-c/nix_api_store.h +++ b/src/libstore-c/nix_api_store.h @@ -201,6 +201,22 @@ nix_store_get_version(nix_c_context * context, Store * store, nix_get_string_cal */ nix_derivation * nix_derivation_from_json(nix_c_context * context, Store * store, const char * json); +/** + * @brief Hashes the derivation and gives the output paths + * + * @param[in] context Optional, stores error information. + * @param[in] store nix store reference. + * @param[in] json JSON of the derivation as a string. + * @param[in] callback Called for every output to provide the output path. + * @param[in] userdata User data to pass to the callback. + */ +nix_err nix_derivation_make_outputs( + nix_c_context * context, + Store * store, + const char * json, + void (*callback)(void * userdata, const char * output_name, const char * path), + void * userdata); + /** * @brief Add the given `nix_derivation` to the given store * @@ -259,6 +275,88 @@ nix_err nix_store_get_fs_closure( */ nix_derivation * nix_store_drv_from_store_path(nix_c_context * context, Store * store, const StorePath * path); +/** + * @note The callback borrows the Derivation only for the duration of the call. + * + * @param[out] context Optional, stores error information + * @param[in] store The nix store + * @param[in] path The nix store path + * @param[in] callback The callback to call + * @param[in] userdata The userdata to pass to the callback + */ +nix_err nix_store_drv_from_path( + nix_c_context * context, + Store * store, + const StorePath * path, + void (*callback)(void * userdata, const nix_derivation * drv), + void * userdata); + +/** + * @brief Queries for the nix store path info. + * + * @param[out] context Optional, stores error information + * @param[in] store nix store reference + * @param[in] store_path A store path + * @param[in] userdata The data to pass to the callback + * @param[in] callback Called for when the path info is resolved + */ +nix_err nix_store_query_path_info( + nix_c_context * context, + Store * store, + const StorePath * store_path, + void * userdata, + nix_get_string_callback callback); + +/** + * @brief Builds the paths, if they are a derivation then they get built. + * + * @note Path and result for the callback only exist for the lifetime of + * the call. Result is a string containing the build result in JSON. + * + * @param[out] context Optional, stores error information + * @param[in] store nix store reference + * @param[in] store_paths Pointer to list of nix store paths + * @param[in] num_store_paths Number of nix store paths + * @param[in] callback The callback to trigger for build results + * @param[in] userdata User data to pass to the callback + */ +nix_err nix_store_build_paths( + nix_c_context * context, + Store * store, + const StorePath ** store_paths, + unsigned int num_store_paths, + void (*callback)(void * userdata, const char * path, const char * result), + void * userdata); + +/** + * @brief Iterate and get all of the store paths for each output. + * + * @note The callback borrows the StorePath only for the duration of the call. + * + * @param[out] context Optional, stores error information + * @param[in] drv The derivation + * @param[in] store The nix store + * @param[in] callback The function to call on every output and store path + * @param[in] userdata The userdata to pass to the callback + */ +nix_err nix_derivation_get_outputs_and_optpaths( + nix_c_context * context, + const nix_derivation * drv, + const Store * store, + void (*callback)(void * userdata, const char * name, const StorePath * path), + void * userdata); + +/** + * @brief Gets the derivation as a JSON string + * + * @param[out] context Optional, stores error information + * @param[in] drv The derivation + * @param[in] callback Called with the JSON string + * @param[in] userdata Arbitrary data passed to the callback + */ +nix_err nix_derivation_to_json( + nix_c_context * context, const nix_derivation * drv, nix_get_string_callback callback, void * userdata); + // cffi end #ifdef __cplusplus } diff --git a/src/libstore-c/nix_api_store_internal.h b/src/libstore-c/nix_api_store_internal.h index 712d96488a5..0199628da8a 100644 --- a/src/libstore-c/nix_api_store_internal.h +++ b/src/libstore-c/nix_api_store_internal.h @@ -18,6 +18,7 @@ struct StorePath struct nix_derivation { nix::Derivation drv; + Store * store; }; } // extern "C" diff --git a/src/libstore-c/package.nix b/src/libstore-c/package.nix index fde17c78e01..0ce37e44c01 100644 --- a/src/libstore-c/package.nix +++ b/src/libstore-c/package.nix @@ -15,7 +15,7 @@ let in mkMesonLibrary (finalAttrs: { - pname = "nix-store-c"; + pname = "determinate-nix-store-c"; inherit version; workDir = ./.; diff --git a/src/libstore-test-support/package.nix b/src/libstore-test-support/package.nix index 391ddeefda2..2561dd791eb 100644 --- a/src/libstore-test-support/package.nix +++ b/src/libstore-test-support/package.nix @@ -18,7 +18,7 @@ let in mkMesonLibrary (finalAttrs: { - pname = "nix-store-test-support"; + pname = "determinate-nix-store-test-support"; inherit version; workDir = ./.; diff --git a/src/libstore/active-builds.cc b/src/libstore/active-builds.cc new file mode 100644 index 00000000000..838f188d891 --- /dev/null +++ b/src/libstore/active-builds.cc @@ -0,0 +1,149 @@ +#include "nix/store/active-builds.hh" +#include "nix/util/json-utils.hh" + +#include + +#ifndef _WIN32 +# include +#endif + +namespace nix { + +UserInfo UserInfo::fromUid(uid_t uid) +{ + UserInfo info; + info.uid = uid; + +#ifndef _WIN32 + // Look up the user name for the UID (thread-safe) + struct passwd pwd; + struct passwd * result; + std::vector buf(16384); + if (getpwuid_r(uid, &pwd, buf.data(), buf.size(), &result) == 0 && result) + info.name = result->pw_name; +#endif + + return info; +} + +} // namespace nix + +namespace nlohmann { + +using namespace nix; + +UserInfo adl_serializer::from_json(const json & j) +{ + return UserInfo{ + .uid = j.at("uid").get(), + .name = j.contains("name") && !j.at("name").is_null() + ? std::optional(j.at("name").get()) + : std::nullopt, + }; +} + +void adl_serializer::to_json(json & j, const UserInfo & info) +{ + j = nlohmann::json{ + {"uid", info.uid}, + {"name", info.name}, + }; +} + +// Durations are serialized as floats representing seconds. +static std::optional parseDuration(const json & j, const char * key) +{ + if (j.contains(key) && !j.at(key).is_null()) + return std::chrono::duration_cast( + std::chrono::duration(j.at(key).get())); + else + return std::nullopt; +} + +static nlohmann::json printDuration(const std::optional & duration) +{ + return duration + ? nlohmann::json( + std::chrono::duration_cast>(*duration) + .count()) + : nullptr; +} + +ActiveBuildInfo::ProcessInfo adl_serializer::from_json(const json & j) +{ + return ActiveBuildInfo::ProcessInfo{ + .pid = j.at("pid").get(), + .parentPid = j.at("parentPid").get(), + .user = j.at("user").get(), + .argv = j.at("argv").get>(), + .utime = parseDuration(j, "utime"), + .stime = parseDuration(j, "stime"), + .cutime = parseDuration(j, "cutime"), + .cstime = parseDuration(j, "cstime"), + }; +} + +void adl_serializer::to_json(json & j, const ActiveBuildInfo::ProcessInfo & process) +{ + j = nlohmann::json{ + {"pid", process.pid}, + {"parentPid", process.parentPid}, + {"user", process.user}, + {"argv", process.argv}, + {"utime", printDuration(process.utime)}, + {"stime", printDuration(process.stime)}, + {"cutime", printDuration(process.cutime)}, + {"cstime", printDuration(process.cstime)}, + }; +} + +ActiveBuild adl_serializer::from_json(const json & j) +{ + auto type = j.at("type").get(); + if (type != "build") + throw Error("invalid active build JSON: expected type 'build' but got '%s'", type); + return ActiveBuild{ + .nixPid = j.at("nixPid").get(), + .clientPid = j.at("clientPid").get>(), + .clientUid = j.at("clientUid").get>(), + .mainPid = j.at("mainPid").get(), + .mainUser = j.at("mainUser").get(), + .cgroup = j.at("cgroup").get>(), + .startTime = (time_t) j.at("startTime").get(), + .derivation = StorePath{getString(j.at("derivation"))}, + }; +} + +void adl_serializer::to_json(json & j, const ActiveBuild & build) +{ + j = nlohmann::json{ + {"type", "build"}, + {"nixPid", build.nixPid}, + {"clientPid", build.clientPid}, + {"clientUid", build.clientUid}, + {"mainPid", build.mainPid}, + {"mainUser", build.mainUser}, + {"cgroup", build.cgroup}, + {"startTime", (double) build.startTime}, + {"derivation", build.derivation.to_string()}, + }; +} + +ActiveBuildInfo adl_serializer::from_json(const json & j) +{ + ActiveBuildInfo info(adl_serializer::from_json(j)); + info.processes = j.at("processes").get>(); + info.utime = parseDuration(j, "utime"); + info.stime = parseDuration(j, "stime"); + return info; +} + +void adl_serializer::to_json(json & j, const ActiveBuildInfo & build) +{ + adl_serializer::to_json(j, build); + j["processes"] = build.processes; + j["utime"] = printDuration(build.utime); + j["stime"] = printDuration(build.stime); +} + +} // namespace nlohmann diff --git a/src/libstore/async-path-writer.cc b/src/libstore/async-path-writer.cc new file mode 100644 index 00000000000..3271e7926a8 --- /dev/null +++ b/src/libstore/async-path-writer.cc @@ -0,0 +1,173 @@ +#include "nix/store/async-path-writer.hh" +#include "nix/util/archive.hh" + +#include +#include + +namespace nix { + +struct AsyncPathWriterImpl : AsyncPathWriter +{ + ref store; + + struct Item + { + StorePath storePath; + std::string contents; + std::string name; + Hash hash; + StorePathSet references; + RepairFlag repair; + std::promise promise; + }; + + struct State + { + std::vector items; + std::unordered_map> futures; + bool quit = false; + }; + + Sync state_; + + std::thread workerThread; + + std::condition_variable wakeupCV; + + AsyncPathWriterImpl(ref store) + : store(store) + { + workerThread = std::thread([&]() { + while (true) { + std::vector items; + + { + auto state(state_.lock()); + while (!state->quit && state->items.empty()) + state.wait(wakeupCV); + if (state->items.empty() && state->quit) + return; + std::swap(items, state->items); + } + + try { + writePaths(items); + for (auto & item : items) + item.promise.set_value(); + } catch (...) { + for (auto & item : items) + item.promise.set_exception(std::current_exception()); + } + } + }); + } + + virtual ~AsyncPathWriterImpl() + { + state_.lock()->quit = true; + wakeupCV.notify_all(); + workerThread.join(); + } + + StorePath + addPath(std::string contents, std::string name, StorePathSet references, RepairFlag repair, bool readOnly) override + { + auto hash = hashString(HashAlgorithm::SHA256, contents); + + auto storePath = store->makeFixedOutputPathFromCA( + name, + TextInfo{ + .hash = hash, + .references = references, + }); + + if (!readOnly) { + auto state(state_.lock()); + std::promise promise; + state->futures.insert_or_assign(storePath, promise.get_future()); + state->items.push_back( + Item{ + .storePath = storePath, + .contents = std::move(contents), + .name = std::move(name), + .hash = hash, + .references = std::move(references), + .repair = repair, + .promise = std::move(promise), + }); + wakeupCV.notify_all(); + } + + return storePath; + } + + void waitForPath(const StorePath & path) override + { + auto future = ({ + auto state = state_.lock(); + auto i = state->futures.find(path); + if (i == state->futures.end()) + return; + i->second; + }); + future.get(); + } + + void waitForAllPaths() override + { + auto futures = ({ + auto state(state_.lock()); + std::move(state->futures); + }); + for (auto & future : futures) + future.second.get(); + } + + void writePaths(const std::vector & items) + { +// FIXME: addMultipeToStore() shouldn't require a NAR hash. +#if 0 + Store::PathsSource sources; + RepairFlag repair = NoRepair; + + for (auto & item : items) { + ValidPathInfo info{item.storePath, Hash(HashAlgorithm::SHA256)}; + info.references = item.references; + info.ca = ContentAddress { + .method = ContentAddressMethod::Raw::Text, + .hash = item.hash, + }; + if (item.repair) repair = item.repair; + auto source = sinkToSource([&](Sink & sink) + { + dumpString(item.contents, sink); + }); + sources.push_back({std::move(info), std::move(source)}); + } + + Activity act(*logger, lvlDebug, actUnknown, fmt("adding %d paths to the store", items.size())); + + store->addMultipleToStore(std::move(sources), act, repair); +#endif + + for (auto & item : items) { + StringSource source(item.contents); + auto storePath = store->addToStoreFromDump( + source, + item.storePath.name(), + FileSerialisationMethod::Flat, + ContentAddressMethod::Raw::Text, + HashAlgorithm::SHA256, + item.references, + item.repair); + assert(storePath == item.storePath); + } + } +}; + +ref AsyncPathWriter::make(ref store) +{ + return make_ref(store); +} + +} // namespace nix diff --git a/src/libstore/build-result.cc b/src/libstore/build-result.cc index f4bc8ab3353..4967b64423d 100644 --- a/src/libstore/build-result.cc +++ b/src/libstore/build-result.cc @@ -22,7 +22,7 @@ static constexpr std::array, 12> failureStatusStrings{{ +static constexpr std::array, 13> failureStatusStrings{{ #define ENUM_ENTRY(e) {BuildResult::Failure::e, #e} ENUM_ENTRY(PermanentFailure), ENUM_ENTRY(InputRejected), @@ -54,10 +54,11 @@ static constexpr std::array::to_json(json & res, const BuildResult & br) overloaded{ [&](const BuildResult::Success & success) { res["success"] = true; - res["status"] = successStatusToString(success.status); + res["status"] = BuildResult::Success::statusToString(success.status); res["builtOutputs"] = success.builtOutputs; }, [&](const BuildResult::Failure & failure) { res["success"] = false; - res["status"] = failureStatusToString(failure.status); + res["status"] = BuildResult::Failure::statusToString(failure.status); res["errorMsg"] = failure.errorMsg; res["isNonDeterministic"] = failure.isNonDeterministic; }, diff --git a/src/libstore/build/derivation-building-goal.cc b/src/libstore/build/derivation-building-goal.cc index 8221e12c697..10ba0e78b77 100644 --- a/src/libstore/build/derivation-building-goal.cc +++ b/src/libstore/build/derivation-building-goal.cc @@ -628,6 +628,7 @@ Goal::Co DerivationBuildingGoal::tryToBuild() .defaultPathsInChroot = std::move(defaultPathsInChroot), .systemFeatures = worker.store.config.systemFeatures.get(), .desugaredEnv = std::move(desugaredEnv), + .act = act, }; /* If we have to wait and retry (see below), then `builder` will @@ -812,7 +813,7 @@ BuildError DerivationBuildingGoal::fixupBuilderFailureErrorMessage(BuilderFailur msg += line; msg += "\n"; } - auto nixLogCommand = experimentalFeatureSettings.isEnabled(Xp::NixCommand) ? "nix log" : "nix-store -l"; + auto nixLogCommand = "nix log"; // The command is on a separate line for easy copying, such as with triple click. // This message will be indented elsewhere, so removing the indentation before the // command will not put it at the start of the line unfortunately. @@ -1181,6 +1182,13 @@ Goal::Done DerivationBuildingGoal::doneSuccess(BuildResult::Success::Status stat .builtOutputs = std::move(builtOutputs), }; + logger->result( + act ? act->id : getCurActivity(), + resBuildResult, + nlohmann::json(KeyedBuildResult( + buildResult, + DerivedPath::Built{.drvPath = makeConstantStorePathRef(drvPath), .outputs = OutputsSpec::All{}}))); + mcRunningBuilds.reset(); if (status == BuildResult::Success::Built) @@ -1198,6 +1206,13 @@ Goal::Done DerivationBuildingGoal::doneFailure(BuildError ex) .errorMsg = fmt("%s", Uncolored(ex.info().msg)), }; + logger->result( + act ? act->id : getCurActivity(), + resBuildResult, + nlohmann::json(KeyedBuildResult( + buildResult, + DerivedPath::Built{.drvPath = makeConstantStorePathRef(drvPath), .outputs = OutputsSpec::All{}}))); + mcRunningBuilds.reset(); if (ex.status == BuildResult::Failure::TimedOut) diff --git a/src/libstore/build/derivation-check.cc b/src/libstore/build/derivation-check.cc index e56b9fe49a1..677546e878b 100644 --- a/src/libstore/build/derivation-check.cc +++ b/src/libstore/build/derivation-check.cc @@ -12,7 +12,8 @@ void checkOutputs( const StorePath & drvPath, const decltype(Derivation::outputs) & drvOutputs, const decltype(DerivationOptions::outputChecks) & outputChecks, - const std::map & outputs) + const std::map & outputs, + Activity & act) { std::map outputsByPath; for (auto & output : outputs) @@ -36,6 +37,13 @@ void checkOutputs( if (wanted != got) { /* Throw an error after registering the path as valid. */ + act.result( + resHashMismatch, + { + {"storePath", store.printStorePath(drvPath)}, + {"wanted", wanted}, + {"got", got}, + }); throw BuildError( BuildResult::Failure::HashMismatch, "hash mismatch in fixed-output derivation '%s':\n specified: %s\n got: %s", diff --git a/src/libstore/build/derivation-check.hh b/src/libstore/build/derivation-check.hh index 01e6c5d5638..ee2d0122952 100644 --- a/src/libstore/build/derivation-check.hh +++ b/src/libstore/build/derivation-check.hh @@ -22,6 +22,7 @@ void checkOutputs( const StorePath & drvPath, const decltype(Derivation::outputs) & drvOutputs, const decltype(DerivationOptions::outputChecks) & drvOptions, - const std::map & outputs); + const std::map & outputs, + Activity & act); } // namespace nix diff --git a/src/libstore/build/derivation-goal.cc b/src/libstore/build/derivation-goal.cc index b36685a242c..1908f957313 100644 --- a/src/libstore/build/derivation-goal.cc +++ b/src/libstore/build/derivation-goal.cc @@ -466,6 +466,13 @@ Goal::Done DerivationGoal::doneSuccess(BuildResult::Success::Status status, Unke }}, }; + logger->result( + getCurActivity(), + resBuildResult, + nlohmann::json(KeyedBuildResult( + buildResult, + DerivedPath::Built{.drvPath = makeConstantStorePathRef(drvPath), .outputs = OutputsSpec::All{}}))); + mcExpectedBuilds.reset(); if (status == BuildResult::Success::Built) @@ -483,6 +490,13 @@ Goal::Done DerivationGoal::doneFailure(BuildError ex) .errorMsg = fmt("%s", Uncolored(ex.info().msg)), }; + logger->result( + getCurActivity(), + resBuildResult, + nlohmann::json(KeyedBuildResult( + buildResult, + DerivedPath::Built{.drvPath = makeConstantStorePathRef(drvPath), .outputs = OutputsSpec::All{}}))); + mcExpectedBuilds.reset(); if (ex.status == BuildResult::Failure::TimedOut) diff --git a/src/libstore/build/substitution-goal.cc b/src/libstore/build/substitution-goal.cc index ac18de304b7..b2e321c7238 100644 --- a/src/libstore/build/substitution-goal.cc +++ b/src/libstore/build/substitution-goal.cc @@ -8,6 +8,8 @@ #include +#include + namespace nix { PathSubstitutionGoal::PathSubstitutionGoal( @@ -32,6 +34,12 @@ Goal::Done PathSubstitutionGoal::doneSuccess(BuildResult::Success::Status status buildResult.inner = BuildResult::Success{ .status = status, }; + + logger->result( + getCurActivity(), + resBuildResult, + nlohmann::json(KeyedBuildResult(buildResult, DerivedPath::Opaque{storePath}))); + return amDone(ecSuccess); } @@ -42,6 +50,12 @@ Goal::Done PathSubstitutionGoal::doneFailure(ExitCode result, BuildResult::Failu .status = status, .errorMsg = std::move(errorMsg), }; + + logger->result( + getCurActivity(), + resBuildResult, + nlohmann::json(KeyedBuildResult(buildResult, DerivedPath::Opaque{storePath}))); + return amDone(result); } diff --git a/src/libstore/common-protocol.cc b/src/libstore/common-protocol.cc index b069c949823..3db3c419fb2 100644 --- a/src/libstore/common-protocol.cc +++ b/src/libstore/common-protocol.cc @@ -26,13 +26,13 @@ void CommonProto::Serialise::write( StorePath CommonProto::Serialise::read(const StoreDirConfig & store, CommonProto::ReadConn conn) { - return store.parseStorePath(readString(conn.from)); + return conn.shortStorePaths ? StorePath(readString(conn.from)) : store.parseStorePath(readString(conn.from)); } void CommonProto::Serialise::write( const StoreDirConfig & store, CommonProto::WriteConn conn, const StorePath & storePath) { - conn.to << store.printStorePath(storePath); + conn.to << (conn.shortStorePaths ? storePath.to_string() : store.printStorePath(storePath)); } ContentAddress CommonProto::Serialise::read(const StoreDirConfig & store, CommonProto::ReadConn conn) @@ -78,13 +78,15 @@ std::optional CommonProto::Serialise>::read(const StoreDirConfig & store, CommonProto::ReadConn conn) { auto s = readString(conn.from); - return s == "" ? std::optional{} : store.parseStorePath(s); + return s == "" ? std::optional{} : conn.shortStorePaths ? StorePath(s) : store.parseStorePath(s); } void CommonProto::Serialise>::write( const StoreDirConfig & store, CommonProto::WriteConn conn, const std::optional & storePathOpt) { - conn.to << (storePathOpt ? store.printStorePath(*storePathOpt) : ""); + conn.to + << (storePathOpt ? (conn.shortStorePaths ? storePathOpt->to_string() : store.printStorePath(*storePathOpt)) + : ""); } std::optional diff --git a/src/libstore/daemon.cc b/src/libstore/daemon.cc index 4d1c9078ff0..f71f66db5d8 100644 --- a/src/libstore/daemon.cc +++ b/src/libstore/daemon.cc @@ -17,6 +17,7 @@ #include "nix/util/git.hh" #include "nix/util/logging.hh" #include "nix/store/globals.hh" +#include "nix/store/active-builds.hh" #ifndef _WIN32 // TODO need graceful async exit support on Windows? # include "nix/util/monitor-fd.hh" @@ -746,6 +747,7 @@ static void performOp( options.action = WorkerProto::Serialise::read(*store, rconn); options.pathsToDelete = WorkerProto::Serialise::read(*store, rconn); conn.from >> options.ignoreLiveness >> options.maxFreed; + options.censor = !trusted; // obsolete fields readInt(conn.from); readInt(conn.from); @@ -754,7 +756,7 @@ static void performOp( GCResults results; logger->startWork(); - if (options.ignoreLiveness) + if (options.ignoreLiveness && !getEnv("_NIX_IN_TEST").has_value()) throw Error("you are not allowed to ignore liveness"); auto & gcStore = require(*store); gcStore.collectGarbage(options, results); @@ -1015,6 +1017,15 @@ static void performOp( case WorkerProto::Op::ClearFailedPaths: throw Error("Removed operation %1%", op); + case WorkerProto::Op::QueryActiveBuilds: { + logger->startWork(); + auto & activeBuildsStore = require(*store); + auto activeBuilds = activeBuildsStore.queryActiveBuilds(); + logger->stopWork(); + conn.to << nlohmann::json(activeBuilds).dump(); + break; + } + default: throw Error("invalid operation %1%", op); } diff --git a/src/libstore/derivations.cc b/src/libstore/derivations.cc index a4cdcb17a70..1a5d683c865 100644 --- a/src/libstore/derivations.cc +++ b/src/libstore/derivations.cc @@ -9,6 +9,7 @@ #include "nix/store/common-protocol-impl.hh" #include "nix/util/strings-inline.hh" #include "nix/util/json-utils.hh" +#include "nix/store/async-path-writer.hh" #include #include @@ -156,6 +157,20 @@ StorePath Store::writeDerivation(const Derivation & drv, RepairFlag repair) return path; } +StorePath writeDerivation( + Store & store, AsyncPathWriter & asyncPathWriter, const Derivation & drv, RepairFlag repair, bool readOnly) +{ + auto references = drv.inputSrcs; + for (auto & i : drv.inputDrvs.map) + references.insert(i.first); + return asyncPathWriter.addPath( + drv.unparse(store, false), + std::string(drv.name) + drvExtension, + references, + repair, + readOnly || settings.readOnlyMode); +} + namespace { /** * This mimics std::istream to some extent. We use this much smaller implementation diff --git a/src/libstore/export-import.cc b/src/libstore/export-import.cc index b1c61626c8c..7b6193c657d 100644 --- a/src/libstore/export-import.cc +++ b/src/libstore/export-import.cc @@ -4,92 +4,163 @@ #include "nix/util/archive.hh" #include "nix/store/common-protocol.hh" #include "nix/store/common-protocol-impl.hh" - -#include +#include "nix/store/worker-protocol.hh" namespace nix { -static void exportPath(Store & store, const StorePath & path, Sink & sink) -{ - auto info = store.queryPathInfo(path); - - HashSink hashSink(HashAlgorithm::SHA256); - TeeSink teeSink(sink, hashSink); - - store.narFromPath(path, teeSink); - - /* Refuse to export paths that have changed. This prevents - filesystem corruption from spreading to other machines. - Don't complain if the stored hash is zero (unknown). */ - Hash hash = hashSink.currentHash().hash; - if (hash != info->narHash && info->narHash != Hash(info->narHash.algo)) - throw Error( - "hash of path '%s' has changed from '%s' to '%s'!", - store.printStorePath(path), - info->narHash.to_string(HashFormat::Nix32, true), - hash.to_string(HashFormat::Nix32, true)); - - teeSink << exportMagic << store.printStorePath(path); - CommonProto::write(store, CommonProto::WriteConn{.to = teeSink}, info->references); - teeSink << (info->deriver ? store.printStorePath(*info->deriver) : "") << 0; -} +static const uint32_t exportMagicV1 = 0x4558494e; +static const uint64_t exportMagicV2 = 0x324f4952414e; // = 'NARIO2' -void exportPaths(Store & store, const StorePathSet & paths, Sink & sink) +void exportPaths(Store & store, const StorePathSet & paths, Sink & sink, unsigned int version) { auto sorted = store.topoSortPaths(paths); std::reverse(sorted.begin(), sorted.end()); - for (auto & path : sorted) { - sink << 1; - exportPath(store, path, sink); + auto dumpNar = [&](const ValidPathInfo & info) { + HashSink hashSink(HashAlgorithm::SHA256); + TeeSink teeSink(sink, hashSink); + + store.narFromPath(info.path, teeSink); + + /* Refuse to export paths that have changed. This prevents + filesystem corruption from spreading to other machines. + Don't complain if the stored hash is zero (unknown). */ + Hash hash = hashSink.currentHash().hash; + if (hash != info.narHash && info.narHash != Hash(info.narHash.algo)) + throw Error( + "hash of path '%s' has changed from '%s' to '%s'!", + store.printStorePath(info.path), + info.narHash.to_string(HashFormat::Nix32, true), + hash.to_string(HashFormat::Nix32, true)); + }; + + switch (version) { + + case 1: + for (auto & path : sorted) { + sink << 1; + auto info = store.queryPathInfo(path); + dumpNar(*info); + sink << exportMagicV1 << store.printStorePath(path); + CommonProto::write(store, CommonProto::WriteConn{.to = sink}, info->references); + sink << (info->deriver ? store.printStorePath(*info->deriver) : "") << 0; + } + sink << 0; + break; + + case 2: + sink << exportMagicV2; + + for (auto & path : sorted) { + Activity act(*logger, lvlTalkative, actUnknown, fmt("exporting path '%s'", store.printStorePath(path))); + sink << 1; + auto info = store.queryPathInfo(path); + // FIXME: move to CommonProto? + WorkerProto::Serialise::write( + store, WorkerProto::WriteConn{.to = sink, .version = 16, .shortStorePaths = true}, *info); + dumpNar(*info); + } + + sink << 0; + break; + + default: + throw Error("unsupported nario version %d", version); } - - sink << 0; } StorePaths importPaths(Store & store, Source & source, CheckSigsFlag checkSigs) { StorePaths res; - while (true) { - auto n = readNum(source); - if (n == 0) - break; - if (n != 1) - throw Error("input doesn't look like something created by 'nix-store --export'"); - - /* Extract the NAR from the source. */ + + auto version = readNum(source); + + /* Note: nario version 1 lacks an explicit header. The first + integer denotes whether a store path follows or not. So look + for 0 or 1. */ + switch (version) { + + case 0: + /* Empty version 1 nario, nothing to do. */ + break; + + case 1: { + /* Reuse a string buffer to avoid kernel overhead allocating + memory for large strings. */ StringSink saved; - TeeSource tee{source, saved}; - NullFileSystemObjectSink ether; - parseDump(ether, tee); - uint32_t magic = readInt(source); - if (magic != exportMagic) - throw Error("Nix archive cannot be imported; wrong format"); + /* Non-empty version 1 nario. */ + while (true) { + /* Extract the NAR from the source. */ + saved.s.clear(); + TeeSource tee{source, saved}; + NullFileSystemObjectSink ether; + parseDump(ether, tee); + + uint32_t magic = readInt(source); + if (magic != exportMagicV1) + throw Error("nario cannot be imported; wrong format"); + + auto path = store.parseStorePath(readString(source)); + + auto references = CommonProto::Serialise::read(store, CommonProto::ReadConn{.from = source}); + auto deriver = readString(source); + + // Ignore optional legacy signature. + if (readInt(source) == 1) + readString(source); + + if (!store.isValidPath(path)) { + auto narHash = hashString(HashAlgorithm::SHA256, saved.s); + + ValidPathInfo info{path, {store, narHash}}; + if (deriver != "") + info.deriver = store.parseStorePath(deriver); + info.references = references; + info.narSize = saved.s.size(); + + // Can't use underlying source, which would have been exhausted. + auto source2 = StringSource(saved.s); + store.addToStore(info, source2, NoRepair, checkSigs); + } + + res.push_back(path); + + auto n = readNum(source); + if (n == 0) + break; + if (n != 1) + throw Error("input doesn't look like a nario"); + } + break; + } - auto path = store.parseStorePath(readString(source)); + case exportMagicV2: + while (true) { + auto n = readNum(source); + if (n == 0) + break; + if (n != 1) + throw Error("input doesn't look like a nario"); - // Activity act(*logger, lvlInfo, "importing path '%s'", info.path); + auto info = WorkerProto::Serialise::read( + store, WorkerProto::ReadConn{.from = source, .version = 16, .shortStorePaths = true}); - auto references = CommonProto::Serialise::read(store, CommonProto::ReadConn{.from = source}); - auto deriver = readString(source); - auto narHash = hashString(HashAlgorithm::SHA256, saved.s); + if (!store.isValidPath(info.path)) { + Activity act( + *logger, lvlTalkative, actUnknown, fmt("importing path '%s'", store.printStorePath(info.path))); - ValidPathInfo info{path, {store, narHash}}; - if (deriver != "") - info.deriver = store.parseStorePath(deriver); - info.references = references; - info.narSize = saved.s.size(); + store.addToStore(info, source, NoRepair, checkSigs); + } else + source.skip(info.narSize); - // Ignore optional legacy signature. - if (readInt(source) == 1) - readString(source); + res.push_back(info.path); + } - // Can't use underlying source, which would have been exhausted - auto source = StringSource(saved.s); - store.addToStore(info, source, NoRepair, checkSigs); + break; - res.push_back(info.path); + default: + throw Error("input doesn't look like a nario"); } return res; diff --git a/src/libstore/filetransfer.cc b/src/libstore/filetransfer.cc index 26ceba729e1..039f766104d 100644 --- a/src/libstore/filetransfer.cc +++ b/src/libstore/filetransfer.cc @@ -53,7 +53,7 @@ struct curlFileTransfer : public FileTransfer curlFileTransfer & fileTransfer; FileTransferRequest request; FileTransferResult result; - Activity act; + std::unique_ptr _act; bool done = false; // whether either the success or failure function has been called Callback callback; CURL * req = 0; @@ -98,12 +98,6 @@ struct curlFileTransfer : public FileTransfer Callback && callback) : fileTransfer(fileTransfer) , request(request) - , act(*logger, - lvlTalkative, - actFileTransfer, - fmt("%s '%s'", request.verb(/*continuous=*/true), request.uri), - {request.uri.to_string()}, - request.parentAct) , callback(std::move(callback)) , finalSink([this](std::string_view data) { if (errorSink) { @@ -301,9 +295,29 @@ struct curlFileTransfer : public FileTransfer return ((TransferItem *) userp)->headerCallback(contents, size, nmemb); } + /** + * Lazily start an `Activity`. We don't do this in the `TransferItem` constructor to avoid showing downloads + * that are only enqueued but not actually started. + */ + Activity & act() + { + if (!_act) { + _act = std::make_unique( + *logger, + lvlTalkative, + actFileTransfer, + fmt("%s '%s'", request.verb(/*continuous=*/true), request.uri), + Logger::Fields{request.uri.to_string()}, + request.parentAct); + // Reset the start time to when we actually started the download. + startTime = std::chrono::steady_clock::now(); + } + return *_act; + } + int progressCallback(curl_off_t dltotal, curl_off_t dlnow) noexcept try { - act.progress(dlnow, dltotal); + act().progress(dlnow, dltotal); return getInterrupted(); } catch (nix::Interrupted &) { assert(getInterrupted()); @@ -380,6 +394,15 @@ struct curlFileTransfer : public FileTransfer return ((TransferItem *) clientp)->seekCallback(offset, origin); } + static int resolverCallbackWrapper(void *, void *, void * clientp) noexcept + try { + // Create the `Activity` associated with this download. + ((TransferItem *) clientp)->act(); + return 0; + } catch (...) { + return 1; + } + void unpause() { /* Unpausing an already unpaused transfer is a no-op. */ @@ -408,7 +431,7 @@ struct curlFileTransfer : public FileTransfer curl_easy_setopt( req, CURLOPT_USERAGENT, - ("curl/" LIBCURL_VERSION " Nix/" + nixVersion + ("curl/" LIBCURL_VERSION " Nix/" + nixVersion + " DeterminateNix/" + determinateNixVersion + (fileTransferSettings.userAgentSuffix != "" ? " " + fileTransferSettings.userAgentSuffix.get() : "")) .c_str()); #if LIBCURL_VERSION_NUM >= 0x072b00 @@ -497,6 +520,11 @@ struct curlFileTransfer : public FileTransfer } #endif + // This seems to be the earliest libcurl callback that signals that the download is happening, so we can + // call act(). + curl_easy_setopt(req, CURLOPT_RESOLVER_START_FUNCTION, resolverCallbackWrapper); + curl_easy_setopt(req, CURLOPT_RESOLVER_START_DATA, this); + result.data.clear(); result.bodySize = 0; } @@ -545,7 +573,7 @@ struct curlFileTransfer : public FileTransfer if (httpStatus == 304 && result.etag == "") result.etag = request.expectedETag; - act.progress(result.bodySize, result.bodySize); + act().progress(result.bodySize, result.bodySize); done = true; callback(std::move(result)); } @@ -703,6 +731,8 @@ struct curlFileTransfer : public FileTransfer std::thread workerThread; + const size_t maxQueueSize = fileTransferSettings.httpConnections.get() * 5; + curlFileTransfer() : mt19937(rd()) { @@ -832,6 +862,13 @@ struct curlFileTransfer : public FileTransfer { auto state(state_.lock()); while (!state->incoming.empty()) { + /* Limit the number of active curl handles, since curl doesn't scale well. */ + if (items.size() + incoming.size() >= maxQueueSize) { + auto t = now + std::chrono::milliseconds(100); + if (nextWakeup == std::chrono::steady_clock::time_point() || t < nextWakeup) + nextWakeup = t; + break; + } auto item = state->incoming.top(); if (item->embargo <= now) { incoming.push_back(item); @@ -933,24 +970,29 @@ struct curlFileTransfer : public FileTransfer } }; -ref makeCurlFileTransfer() -{ - return make_ref(); -} +static Sync> _fileTransfer; ref getFileTransfer() { - static ref fileTransfer = makeCurlFileTransfer(); + auto fileTransfer(_fileTransfer.lock()); - if (fileTransfer->state_.lock()->isQuitting()) - fileTransfer = makeCurlFileTransfer(); + if (!*fileTransfer || (*fileTransfer)->state_.lock()->isQuitting()) + *fileTransfer = std::make_shared(); - return fileTransfer; + return ref(*fileTransfer); } ref makeFileTransfer() { - return makeCurlFileTransfer(); + return make_ref(); +} + +std::shared_ptr resetFileTransfer() +{ + auto fileTransfer(_fileTransfer.lock()); + std::shared_ptr prev; + fileTransfer->swap(prev); + return prev; } void FileTransferRequest::setupForS3() diff --git a/src/libstore/gc.cc b/src/libstore/gc.cc index 4846d445fe1..37f148cbc43 100644 --- a/src/libstore/gc.cc +++ b/src/libstore/gc.cc @@ -208,7 +208,7 @@ void LocalStore::findTempRoots(Roots & tempRoots, bool censor) while ((end = contents.find((char) 0, pos)) != std::string::npos) { Path root(contents, pos, end - pos); debug("got temporary root '%s'", root); - tempRoots[parseStorePath(root)].emplace(censor ? censored : fmt("{temp:%d}", pid)); + tempRoots[parseStorePath(root)].emplace(censor ? censored : fmt("{nix-process:%d}", pid)); pos = end + 1; } } @@ -467,13 +467,14 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results) bool gcKeepOutputs = settings.gcKeepOutputs; bool gcKeepDerivations = settings.gcKeepDerivations; - boost::unordered_flat_set> roots, dead, alive; + Roots roots; + boost::unordered_flat_set> dead, alive; struct Shared { // The temp roots only store the hash part to make it easier to // ignore suffixes like '.lock', '.chroot' and '.check'. - boost::unordered_flat_set> tempRoots; + boost::unordered_flat_map tempRoots; // Hash part of the store path currently being deleted, if // any. @@ -584,7 +585,8 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results) debug("got new GC root '%s'", path); auto hashPart = storePath->hashPart(); auto shared(_shared.lock()); - shared->tempRoots.emplace(hashPart); + // FIXME: could get the PID from the socket. + shared->tempRoots.insert_or_assign(std::string(hashPart), "{nix-process:unknown}"); /* If this path is currently being deleted, then we have to wait until deletion is finished to ensure that @@ -624,20 +626,16 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results) /* Find the roots. Since we've grabbed the GC lock, the set of permanent roots cannot increase now. */ printInfo("finding garbage collector roots..."); - Roots rootMap; if (!options.ignoreLiveness) - findRootsNoTemp(rootMap, true); - - for (auto & i : rootMap) - roots.insert(i.first); + findRootsNoTemp(roots, options.censor); /* Read the temporary roots created before we acquired the global GC root. Any new roots will be sent to our socket. */ - Roots tempRoots; - findTempRoots(tempRoots, true); - for (auto & root : tempRoots) { - _shared.lock()->tempRoots.emplace(root.first.hashPart()); - roots.insert(root.first); + { + Roots tempRoots; + findTempRoots(tempRoots, options.censor); + for (auto & root : tempRoots) + _shared.lock()->tempRoots.insert_or_assign(std::string(root.first.hashPart()), *root.second.begin()); } /* Synchronisation point for testing, see tests/functional/gc-non-blocking.sh. */ @@ -733,20 +731,32 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results) } }; + if (options.action == GCOptions::gcDeleteSpecific && !options.pathsToDelete.count(*path)) { + throw Error( + "Cannot delete path '%s' because it's referenced by path '%s'.", + printStorePath(start), + printStorePath(*path)); + } + /* If this is a root, bail out. */ - if (roots.count(*path)) { + if (auto i = roots.find(*path); i != roots.end()) { + if (options.action == GCOptions::gcDeleteSpecific) + throw Error( + "Cannot delete path '%s' because it's referenced by the GC root '%s'.", + printStorePath(start), + *i->second.begin()); debug("cannot delete '%s' because it's a root", printStorePath(*path)); return markAlive(); } - if (options.action == GCOptions::gcDeleteSpecific && !options.pathsToDelete.count(*path)) - return; - - { + static bool inTest = getEnv("_NIX_IN_TEST").has_value(); + if (!(inTest && options.ignoreLiveness)) { auto hashPart = path->hashPart(); auto shared(_shared.lock()); - if (shared->tempRoots.count(hashPart)) { - debug("cannot delete '%s' because it's a temporary root", printStorePath(*path)); + if (auto i = shared->tempRoots.find(std::string(hashPart)); i != shared->tempRoots.end()) { + if (options.action == GCOptions::gcDeleteSpecific) + throw Error( + "Cannot delete path '%s' because it's in use by '%s'.", printStorePath(start), i->second); return markAlive(); } shared->pending = hashPart; @@ -805,12 +815,7 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results) for (auto & i : options.pathsToDelete) { deleteReferrersClosure(i); - if (!dead.count(i)) - throw Error( - "Cannot delete path '%1%' since it is still alive. " - "To find out why, use: " - "nix-store --query --roots and nix-store --query --referrers", - printStorePath(i)); + assert(dead.count(i)); } } else if (options.maxFreed > 0) { diff --git a/src/libstore/globals.cc b/src/libstore/globals.cc index 27d17e1a9b9..72fea31775e 100644 --- a/src/libstore/globals.cc +++ b/src/libstore/globals.cc @@ -269,6 +269,8 @@ const ExternalBuilder * Settings::findExternalDerivationBuilderIfSupported(const std::string nixVersion = PACKAGE_VERSION; +const std::string determinateNixVersion = DETERMINATE_NIX_VERSION; + NLOHMANN_JSON_SERIALIZE_ENUM( SandboxMode, { diff --git a/src/libstore/http-binary-cache-store.cc b/src/libstore/http-binary-cache-store.cc index ef6ae92a44d..d4361264edf 100644 --- a/src/libstore/http-binary-cache-store.cc +++ b/src/libstore/http-binary-cache-store.cc @@ -182,7 +182,7 @@ FileTransferRequest HttpBinaryCacheStore::makeRequest(std::string_view path) /* path is not a path, but a full relative or absolute URL, e.g. we've seen in the wild NARINFO files have a URL field which is - `nar/15f99rdaf26k39knmzry4xd0d97wp6yfpnfk1z9avakis7ipb9yg.nar?hash=zphkqn2wg8mnvbkixnl2aadkbn0rcnfj` + `nar/15f99rdaf26k39knmzry4xd0d97wp6yfpnfk1z9avakis7ipb9yg.nar?hash=wvx0nans273vb7b0cjlplsmr2z905hwd` (note the query param) and that gets passed here. */ auto result = parseURLRelative(path, cacheUriWithTrailingSlash); diff --git a/src/libstore/include/nix/store/active-builds.hh b/src/libstore/include/nix/store/active-builds.hh new file mode 100644 index 00000000000..c8a40e13798 --- /dev/null +++ b/src/libstore/include/nix/store/active-builds.hh @@ -0,0 +1,108 @@ +#pragma once + +#include "nix/util/util.hh" +#include "nix/util/json-impls.hh" +#include "nix/store/path.hh" + +#include +#include + +namespace nix { + +/** + * A uid and optional corresponding user name. + */ +struct UserInfo +{ + uid_t uid = -1; + std::optional name; + + /** + * Create a UserInfo from a UID, looking up the username if possible. + */ + static UserInfo fromUid(uid_t uid); +}; + +struct ActiveBuild +{ + pid_t nixPid; + + std::optional clientPid; + std::optional clientUid; + + pid_t mainPid; + UserInfo mainUser; + std::optional cgroup; + + time_t startTime; + + StorePath derivation; +}; + +struct ActiveBuildInfo : ActiveBuild +{ + struct ProcessInfo + { + pid_t pid = 0; + pid_t parentPid = 0; + UserInfo user; + std::vector argv; + std::optional utime, stime, cutime, cstime; + }; + + // User/system CPU time for the entire cgroup, if available. + std::optional utime, stime; + + std::vector processes; +}; + +struct TrackActiveBuildsStore +{ + struct BuildHandle + { + TrackActiveBuildsStore & tracker; + uint64_t id; + + BuildHandle(TrackActiveBuildsStore & tracker, uint64_t id) + : tracker(tracker) + , id(id) + { + } + + BuildHandle(BuildHandle && other) noexcept + : tracker(other.tracker) + , id(other.id) + { + other.id = 0; + } + + ~BuildHandle() + { + if (id) { + try { + tracker.buildFinished(*this); + } catch (...) { + ignoreExceptionInDestructor(); + } + } + } + }; + + virtual BuildHandle buildStarted(const ActiveBuild & build) = 0; + + virtual void buildFinished(const BuildHandle & handle) = 0; +}; + +struct QueryActiveBuildsStore +{ + inline static std::string operationName = "Querying active builds"; + + virtual std::vector queryActiveBuilds() = 0; +}; + +} // namespace nix + +JSON_IMPL(UserInfo) +JSON_IMPL(ActiveBuild) +JSON_IMPL(ActiveBuildInfo) +JSON_IMPL(ActiveBuildInfo::ProcessInfo) diff --git a/src/libstore/include/nix/store/async-path-writer.hh b/src/libstore/include/nix/store/async-path-writer.hh new file mode 100644 index 00000000000..80997dc6ac2 --- /dev/null +++ b/src/libstore/include/nix/store/async-path-writer.hh @@ -0,0 +1,19 @@ +#pragma once + +#include "nix/store/store-api.hh" + +namespace nix { + +struct AsyncPathWriter +{ + virtual StorePath addPath( + std::string contents, std::string name, StorePathSet references, RepairFlag repair, bool readOnly = false) = 0; + + virtual void waitForPath(const StorePath & path) = 0; + + virtual void waitForAllPaths() = 0; + + static ref make(ref store); +}; + +} // namespace nix diff --git a/src/libstore/include/nix/store/build-result.hh b/src/libstore/include/nix/store/build-result.hh index 96134791b9d..bbf4de6310a 100644 --- a/src/libstore/include/nix/store/build-result.hh +++ b/src/libstore/include/nix/store/build-result.hh @@ -30,6 +30,8 @@ struct BuildResult ResolvesToAlreadyValid = 13, } status; + static std::string_view statusToString(Status status); + /** * For derivations, a mapping from the names of the wanted outputs * to actual paths. @@ -74,8 +76,11 @@ struct BuildResult /// know about this one, so change it back to `OutputRejected` /// before serialization. HashMismatch = 15, + Cancelled = 16, } status = MiscFailure; + static std::string_view statusToString(Status status); + /** * Information about the error if the build failed. * @@ -97,7 +102,7 @@ struct BuildResult [[noreturn]] void rethrow() const { - throw Error("%s", errorMsg); + throw Error("%s", errorMsg.empty() ? statusToString(status) : errorMsg); } }; @@ -141,6 +146,13 @@ struct BuildResult bool operator==(const BuildResult &) const noexcept; std::strong_ordering operator<=>(const BuildResult &) const noexcept; + + bool isCancelled() const + { + auto failure = tryGetFailure(); + // FIXME: remove MiscFailure eventually. + return failure && (failure->status == Failure::Cancelled || failure->status == Failure::MiscFailure); + } }; /** diff --git a/src/libstore/include/nix/store/build/derivation-builder.hh b/src/libstore/include/nix/store/build/derivation-builder.hh index af84661e252..c51424d0ea6 100644 --- a/src/libstore/include/nix/store/build/derivation-builder.hh +++ b/src/libstore/include/nix/store/build/derivation-builder.hh @@ -79,7 +79,7 @@ struct DerivationBuilderParams */ const StorePathSet & inputPaths; - const std::map & initialOutputs; + const std::map initialOutputs; const BuildMode & buildMode; @@ -98,6 +98,11 @@ struct DerivationBuilderParams StringSet systemFeatures; DesugaredEnv desugaredEnv; + + /** + * The activity corresponding to the build. + */ + std::unique_ptr & act; }; /** diff --git a/src/libstore/include/nix/store/build/goal.hh b/src/libstore/include/nix/store/build/goal.hh index 4d57afc0f7c..f048c75687c 100644 --- a/src/libstore/include/nix/store/build/goal.hh +++ b/src/libstore/include/nix/store/build/goal.hh @@ -109,7 +109,7 @@ public: /** * Build result. */ - BuildResult buildResult; + BuildResult buildResult = {.inner = BuildResult::Failure{.status = BuildResult::Failure::Cancelled}}; /** * Suspend our goal and wait until we get `work`-ed again. diff --git a/src/libstore/include/nix/store/builtins.hh b/src/libstore/include/nix/store/builtins.hh index 7cc9c091102..fee11e59e9f 100644 --- a/src/libstore/include/nix/store/builtins.hh +++ b/src/libstore/include/nix/store/builtins.hh @@ -8,8 +8,12 @@ # include "nix/store/aws-creds.hh" #endif +#include + namespace nix { +struct StructuredAttrs; + struct BuiltinBuilderContext { const BasicDerivation & drv; diff --git a/src/libstore/include/nix/store/common-protocol.hh b/src/libstore/include/nix/store/common-protocol.hh index c1d22fa6c54..6139afc5d2e 100644 --- a/src/libstore/include/nix/store/common-protocol.hh +++ b/src/libstore/include/nix/store/common-protocol.hh @@ -30,6 +30,7 @@ struct CommonProto struct ReadConn { Source & from; + bool shortStorePaths = false; }; /** @@ -39,6 +40,7 @@ struct CommonProto struct WriteConn { Sink & to; + bool shortStorePaths = false; }; template diff --git a/src/libstore/include/nix/store/derivations.hh b/src/libstore/include/nix/store/derivations.hh index a8c702fc366..e4c3e29e877 100644 --- a/src/libstore/include/nix/store/derivations.hh +++ b/src/libstore/include/nix/store/derivations.hh @@ -17,6 +17,7 @@ namespace nix { struct StoreDirConfig; +struct AsyncPathWriter; /* Abstract syntax of derivations. */ @@ -457,6 +458,16 @@ class Store; */ StorePath writeDerivation(Store & store, const Derivation & drv, RepairFlag repair = NoRepair, bool readOnly = false); +/** + * Asynchronously write a derivation to the Nix store, and return its path. + */ +StorePath writeDerivation( + Store & store, + AsyncPathWriter & asyncPathWriter, + const Derivation & drv, + RepairFlag repair = NoRepair, + bool readOnly = false); + /** * Read a derivation from a file. */ diff --git a/src/libstore/include/nix/store/export-import.hh b/src/libstore/include/nix/store/export-import.hh index 15092202f1f..4ea696f992f 100644 --- a/src/libstore/include/nix/store/export-import.hh +++ b/src/libstore/include/nix/store/export-import.hh @@ -4,16 +4,11 @@ namespace nix { -/** - * Magic header of exportPath() output (obsolete). - */ -const uint32_t exportMagic = 0x4558494e; - /** * Export multiple paths in the format expected by `nix-store * --import`. The paths will be sorted topologically. */ -void exportPaths(Store & store, const StorePathSet & paths, Sink & sink); +void exportPaths(Store & store, const StorePathSet & paths, Sink & sink, unsigned int version); /** * Import a sequence of NAR dumps created by `exportPaths()` into the diff --git a/src/libstore/include/nix/store/filetransfer.hh b/src/libstore/include/nix/store/filetransfer.hh index 57b781c3320..fa8a649e2b3 100644 --- a/src/libstore/include/nix/store/filetransfer.hh +++ b/src/libstore/include/nix/store/filetransfer.hh @@ -328,6 +328,8 @@ ref getFileTransfer(); */ ref makeFileTransfer(); +std::shared_ptr resetFileTransfer(); + class FileTransferError : public Error { public: diff --git a/src/libstore/include/nix/store/gc-store.hh b/src/libstore/include/nix/store/gc-store.hh index 7f04ed5a2c2..de7a71382f9 100644 --- a/src/libstore/include/nix/store/gc-store.hh +++ b/src/libstore/include/nix/store/gc-store.hh @@ -7,9 +7,13 @@ namespace nix { +// FIXME: should turn this into an std::variant to represent the +// several root types. +using GcRootInfo = std::string; + typedef boost::unordered_flat_map< StorePath, - boost::unordered_flat_set>, + boost::unordered_flat_set>, std::hash> Roots; @@ -58,6 +62,12 @@ struct GCOptions * Stop after at least `maxFreed` bytes have been freed. */ uint64_t maxFreed{std::numeric_limits::max()}; + + /** + * Whether to hide potentially sensitive information about GC + * roots (such as PIDs). + */ + bool censor = false; }; struct GCResults diff --git a/src/libstore/include/nix/store/globals.hh b/src/libstore/include/nix/store/globals.hh index 89e92d30e44..37130521bbf 100644 --- a/src/libstore/include/nix/store/globals.hh +++ b/src/libstore/include/nix/store/globals.hh @@ -225,12 +225,8 @@ public: The following system types are widely used, as Nix is actively supported on these platforms: - `x86_64-linux` - - `x86_64-darwin` - - `i686-linux` - `aarch64-linux` - `aarch64-darwin` - - `armv6l-linux` - - `armv7l-linux` In general, you do not have to modify this setting. While you can force Nix to run a Darwin-specific `builder` executable on a Linux machine, the result would obviously be wrong. @@ -1125,11 +1121,11 @@ public: character. Example: - `/nix/store/zf5lbh336mnzf1nlswdn11g4n2m8zh3g-bash-4.4-p23-dev - /nix/store/rjxwxwv1fpn9wa2x5ssk5phzwlcv4mna-bash-4.4-p23-doc - /nix/store/6bqvbzjkcp9695dq0dpl5y43nvy37pq1-bash-4.4-p23-info - /nix/store/r7fng3kk3vlpdlh2idnrbn37vh4imlj2-bash-4.4-p23-man - /nix/store/xfghy8ixrhz3kyy6p724iv3cxji088dx-bash-4.4-p23`. + `/nix/store/l88brggg9hpy96ijds34dlq4n8fan63g-bash-4.4-p23-dev + /nix/store/vch71bhyi5akr5zs40k8h2wqxx69j80l-bash-4.4-p23-doc + /nix/store/c5cxjywi66iwn9dcx5yvwjkvl559ay6p-bash-4.4-p23-info + /nix/store/scz72lskj03ihkcn42ias5mlp4i4gr1k-bash-4.4-p23-man + /nix/store/a724znygmd1cac856j3gfsyvih3lw07j-bash-4.4-p23`. )"}; Setting downloadSpeed{ @@ -1357,11 +1353,12 @@ public: Setting upgradeNixStorePathUrl{ this, - "https://github.com/NixOS/nixpkgs/raw/master/nixos/modules/installer/tools/nix-fallback-paths.nix", + "", "upgrade-nix-store-path-url", R"( - Used by `nix upgrade-nix`, the URL of the file that contains the - store paths of the latest Nix release. + Deprecated. This option was used to configure how `nix upgrade-nix` operated. + + Using this setting has no effect. It will be removed in a future release of Determinate Nix. )"}; Setting warnLargePathThreshold{ @@ -1470,6 +1467,8 @@ std::vector getUserConfigFiles(); */ extern std::string nixVersion; +extern const std::string determinateNixVersion; + /** * @param loadConfig Whether to load configuration from `nix.conf`, `NIX_CONFIG`, etc. May be disabled for unit tests. * @note When using libexpr, and/or libmain, This is not sufficient. See initNix(). diff --git a/src/libstore/include/nix/store/local-store.hh b/src/libstore/include/nix/store/local-store.hh index 7d93d7045f4..fd457c2d3be 100644 --- a/src/libstore/include/nix/store/local-store.hh +++ b/src/libstore/include/nix/store/local-store.hh @@ -6,6 +6,7 @@ #include "nix/store/pathlocks.hh" #include "nix/store/store-api.hh" #include "nix/store/indirect-root-store.hh" +#include "nix/store/active-builds.hh" #include "nix/util/sync.hh" #include @@ -127,7 +128,10 @@ public: StoreReference getReference() const override; }; -class LocalStore : public virtual IndirectRootStore, public virtual GcStore +class LocalStore : public virtual IndirectRootStore, + public virtual GcStore, + public virtual TrackActiveBuildsStore, + public virtual QueryActiveBuildsStore { public: @@ -459,6 +463,24 @@ private: friend struct PathSubstitutionGoal; friend struct DerivationGoal; + +private: + + std::filesystem::path activeBuildsDir; + + struct ActiveBuildFile + { + AutoCloseFD fd; + AutoDelete del; + }; + + Sync> activeBuilds; + + std::vector queryActiveBuilds() override; + + BuildHandle buildStarted(const ActiveBuild & build) override; + + void buildFinished(const BuildHandle & handle) override; }; } // namespace nix diff --git a/src/libstore/include/nix/store/meson.build b/src/libstore/include/nix/store/meson.build index c17d6a9cb5a..91bce9ba9b9 100644 --- a/src/libstore/include/nix/store/meson.build +++ b/src/libstore/include/nix/store/meson.build @@ -10,6 +10,8 @@ config_pub_h = configure_file( ) headers = [ config_pub_h ] + files( + 'active-builds.hh', + 'async-path-writer.hh', 'aws-creds.hh', 'binary-cache-store.hh', 'build-result.hh', diff --git a/src/libstore/include/nix/store/remote-store.hh b/src/libstore/include/nix/store/remote-store.hh index b152e054b9d..1244eeec001 100644 --- a/src/libstore/include/nix/store/remote-store.hh +++ b/src/libstore/include/nix/store/remote-store.hh @@ -7,6 +7,7 @@ #include "nix/store/store-api.hh" #include "nix/store/gc-store.hh" #include "nix/store/log-store.hh" +#include "nix/store/active-builds.hh" namespace nix { @@ -23,7 +24,7 @@ struct RemoteStoreConfig : virtual StoreConfig using StoreConfig::StoreConfig; const Setting maxConnections{ - this, 1, "max-connections", "Maximum number of concurrent connections to the Nix daemon."}; + this, 64, "max-connections", "Maximum number of concurrent connections to the Nix daemon."}; const Setting maxConnectionAge{ this, @@ -36,7 +37,10 @@ struct RemoteStoreConfig : virtual StoreConfig * \todo RemoteStore is a misnomer - should be something like * DaemonStore. */ -struct RemoteStore : public virtual Store, public virtual GcStore, public virtual LogStore +struct RemoteStore : public virtual Store, + public virtual GcStore, + public virtual LogStore, + public virtual QueryActiveBuildsStore { using Config = RemoteStoreConfig; @@ -143,6 +147,8 @@ struct RemoteStore : public virtual Store, public virtual GcStore, public virtua void addBuildLog(const StorePath & drvPath, std::string_view log) override; + std::vector queryActiveBuilds() override; + std::optional getVersion() override; void connect() override; diff --git a/src/libstore/include/nix/store/store-api.hh b/src/libstore/include/nix/store/store-api.hh index db107fc0ce7..e74bee09550 100644 --- a/src/libstore/include/nix/store/store-api.hh +++ b/src/libstore/include/nix/store/store-api.hh @@ -337,7 +337,9 @@ public: StorePath followLinksToStorePath(std::string_view path) const; /** - * Check whether a path is valid. + * Check whether a path is valid. NOTE: this function does not + * generally cache whether a path is valid. You may want to use + * `maybeQueryPathInfo()`, which does cache. */ bool isValidPath(const StorePath & path); @@ -377,10 +379,17 @@ public: /** * Query information about a valid path. It is permitted to omit - * the name part of the store path. + * the name part of the store path. Throws an exception if the + * path is not valid. */ ref queryPathInfo(const StorePath & path); + /** + * Like `queryPathInfo()`, but returns `nullptr` if the path is + * not valid. + */ + std::shared_ptr maybeQueryPathInfo(const StorePath & path); + /** * Asynchronous version of queryPathInfo(). */ diff --git a/src/libstore/include/nix/store/worker-protocol-impl.hh b/src/libstore/include/nix/store/worker-protocol-impl.hh index 26f6b9d44e4..c36145d620d 100644 --- a/src/libstore/include/nix/store/worker-protocol-impl.hh +++ b/src/libstore/include/nix/store/worker-protocol-impl.hh @@ -45,12 +45,14 @@ struct WorkerProto::Serialise { static T read(const StoreDirConfig & store, WorkerProto::ReadConn conn) { - return CommonProto::Serialise::read(store, CommonProto::ReadConn{.from = conn.from}); + return CommonProto::Serialise::read( + store, CommonProto::ReadConn{.from = conn.from, .shortStorePaths = conn.shortStorePaths}); } static void write(const StoreDirConfig & store, WorkerProto::WriteConn conn, const T & t) { - CommonProto::Serialise::write(store, CommonProto::WriteConn{.to = conn.to}, t); + CommonProto::Serialise::write( + store, CommonProto::WriteConn{.to = conn.to, .shortStorePaths = conn.shortStorePaths}, t); } }; diff --git a/src/libstore/include/nix/store/worker-protocol.hh b/src/libstore/include/nix/store/worker-protocol.hh index 87ef2a39984..36d918a3dc8 100644 --- a/src/libstore/include/nix/store/worker-protocol.hh +++ b/src/libstore/include/nix/store/worker-protocol.hh @@ -67,6 +67,7 @@ struct WorkerProto { Source & from; Version version; + bool shortStorePaths = false; }; /** @@ -77,6 +78,7 @@ struct WorkerProto { Sink & to; Version version; + bool shortStorePaths = false; }; /** @@ -137,6 +139,8 @@ struct WorkerProto using Feature = std::string; using FeatureSet = std::set>; + static constexpr std::string_view featureQueryActiveBuilds{"queryActiveBuilds"}; + static const FeatureSet allFeatures; }; @@ -185,6 +189,7 @@ enum struct WorkerProto::Op : uint64_t { AddBuildLog = 45, BuildPathsWithResults = 46, AddPermRoot = 47, + QueryActiveBuilds = 48, }; struct WorkerProto::ClientHandshakeInfo diff --git a/src/libstore/local-fs-store.cc b/src/libstore/local-fs-store.cc index 1a38cac3b7f..b8f8c6dbdea 100644 --- a/src/libstore/local-fs-store.cc +++ b/src/libstore/local-fs-store.cc @@ -55,7 +55,7 @@ struct LocalStoreAccessor : PosixSourceAccessor void requireStoreObject(const CanonPath & path) { auto [storePath, rest] = store->toStorePath(store->storeDir + path.abs()); - if (requireValidPath && !store->isValidPath(storePath)) + if (requireValidPath && !store->maybeQueryPathInfo(storePath)) throw InvalidPath("path '%1%' is not a valid store path", store->printStorePath(storePath)); } diff --git a/src/libstore/local-store-active-builds.cc b/src/libstore/local-store-active-builds.cc new file mode 100644 index 00000000000..25c6ece5897 --- /dev/null +++ b/src/libstore/local-store-active-builds.cc @@ -0,0 +1,282 @@ +#include "nix/store/local-store.hh" +#include "nix/util/json-utils.hh" +#ifdef __linux__ +# include "nix/util/cgroup.hh" +# include +# include +# include +#endif + +#ifdef __APPLE__ +# include +# include +# include +#endif + +#include +#include + +namespace nix { + +#ifdef __linux__ +static ActiveBuildInfo::ProcessInfo getProcessInfo(pid_t pid) +{ + ActiveBuildInfo::ProcessInfo info; + info.pid = pid; + info.argv = + tokenizeString>(readFile(fmt("/proc/%d/cmdline", pid)), std::string("\000", 1)); + + auto statPath = fmt("/proc/%d/stat", pid); + + AutoCloseFD statFd = open(statPath.c_str(), O_RDONLY | O_CLOEXEC); + if (!statFd) + throw SysError("opening '%s'", statPath); + + // Get the UID from the ownership of the stat file. + struct stat st; + if (fstat(statFd.get(), &st) == -1) + throw SysError("getting ownership of '%s'", statPath); + info.user = UserInfo::fromUid(st.st_uid); + + // Read /proc/[pid]/stat for parent PID and CPU times. + // Format: pid (comm) state ppid ... + // Note that the comm field can contain spaces, so use a regex to parse it. + auto statContent = trim(readFile(statFd.get())); + static std::regex statRegex(R"((\d+) \(([^)]*)\) (.*))"); + std::smatch match; + if (!std::regex_match(statContent, match, statRegex)) + throw Error("failed to parse /proc/%d/stat", pid); + + // Parse the remaining fields after (comm). + auto remainingFields = tokenizeString>(match[3].str()); + + if (remainingFields.size() > 1) + info.parentPid = string2Int(remainingFields[1]).value_or(0); + + static long clkTck = sysconf(_SC_CLK_TCK); + if (remainingFields.size() > 14 && clkTck > 0) { + if (auto utime = string2Int(remainingFields[11])) + info.utime = std::chrono::microseconds((*utime * 1'000'000) / clkTck); + if (auto stime = string2Int(remainingFields[12])) + info.stime = std::chrono::microseconds((*stime * 1'000'000) / clkTck); + if (auto cutime = string2Int(remainingFields[13])) + info.cutime = std::chrono::microseconds((*cutime * 1'000'000) / clkTck); + if (auto cstime = string2Int(remainingFields[14])) + info.cstime = std::chrono::microseconds((*cstime * 1'000'000) / clkTck); + } + + return info; +} + +/** + * Recursively get all descendant PIDs of a given PID using /proc/[pid]/task/[pid]/children. + */ +static std::set getDescendantPids(pid_t pid) +{ + std::set descendants; + + [&](this auto self, pid_t pid) -> void { + try { + descendants.insert(pid); + for (const auto & childPidStr : + tokenizeString>(readFile(fmt("/proc/%d/task/%d/children", pid, pid)))) + if (auto childPid = string2Int(childPidStr)) + self(*childPid); + } catch (...) { + // Process may have exited. + ignoreExceptionExceptInterrupt(); + } + }(pid); + + return descendants; +} +#endif + +#ifdef __APPLE__ +static ActiveBuildInfo::ProcessInfo getProcessInfo(pid_t pid) +{ + ActiveBuildInfo::ProcessInfo info; + info.pid = pid; + + // Get basic process info including ppid and uid. + struct proc_bsdinfo procInfo; + if (proc_pidinfo(pid, PROC_PIDTBSDINFO, 0, &procInfo, sizeof(procInfo)) != sizeof(procInfo)) + throw SysError("getting process info for pid %d", pid); + + info.parentPid = procInfo.pbi_ppid; + info.user = UserInfo::fromUid(procInfo.pbi_uid); + + // Get CPU times. + struct proc_taskinfo taskInfo; + if (proc_pidinfo(pid, PROC_PIDTASKINFO, 0, &taskInfo, sizeof(taskInfo)) == sizeof(taskInfo)) { + + mach_timebase_info_data_t timebase; + mach_timebase_info(&timebase); + auto nanosecondsPerTick = (double) timebase.numer / (double) timebase.denom; + + // Convert nanoseconds to microseconds. + info.utime = + std::chrono::microseconds((uint64_t) ((double) taskInfo.pti_total_user * nanosecondsPerTick / 1000)); + info.stime = + std::chrono::microseconds((uint64_t) ((double) taskInfo.pti_total_system * nanosecondsPerTick / 1000)); + } + + // Get argv using sysctl. + int mib[3] = {CTL_KERN, KERN_PROCARGS2, pid}; + size_t size = 0; + + // First call to get size. + if (sysctl(mib, 3, nullptr, &size, nullptr, 0) == 0 && size > 0) { + std::vector buffer(size); + if (sysctl(mib, 3, buffer.data(), &size, nullptr, 0) == 0) { + // Format: argc (int), followed by executable path, followed by null-terminated args + if (size >= sizeof(int)) { + int argc; + memcpy(&argc, buffer.data(), sizeof(argc)); + + // Skip past argc and executable path (null-terminated). + size_t pos = sizeof(int); + while (pos < size && buffer[pos] != '\0') + pos++; + pos++; // Skip the null terminator + + // Parse the arguments. + while (pos < size && info.argv.size() < (size_t) argc) { + size_t argStart = pos; + while (pos < size && buffer[pos] != '\0') + pos++; + + if (pos > argStart) + info.argv.emplace_back(buffer.data() + argStart, pos - argStart); + + pos++; // Skip the null terminator + } + } + } + } + + return info; +} + +/** + * Recursively get all descendant PIDs using sysctl with KERN_PROC. + */ +static std::set getDescendantPids(pid_t startPid) +{ + // Get all processes. + int mib[4] = {CTL_KERN, KERN_PROC, KERN_PROC_ALL, 0}; + size_t size = 0; + + if (sysctl(mib, 4, nullptr, &size, nullptr, 0) == -1) + return {startPid}; + + std::vector procs(size / sizeof(struct kinfo_proc)); + if (sysctl(mib, 4, procs.data(), &size, nullptr, 0) == -1) + return {startPid}; + + // Get the children of all processes. + std::map> children; + size_t count = size / sizeof(struct kinfo_proc); + for (size_t i = 0; i < count; i++) { + pid_t childPid = procs[i].kp_proc.p_pid; + pid_t parentPid = procs[i].kp_eproc.e_ppid; + children[parentPid].insert(childPid); + } + + // Get all children of `pid`. + std::set descendants; + std::queue todo; + todo.push(startPid); + while (auto pid = pop(todo)) { + if (!descendants.insert(*pid).second) + continue; + for (auto & child : children[*pid]) + todo.push(child); + } + + return descendants; +} +#endif + +std::vector LocalStore::queryActiveBuilds() +{ + std::vector result; + + for (auto & entry : DirectoryIterator{activeBuildsDir}) { + auto path = entry.path(); + + try { + // Open the file. If we can lock it, the build is not active. + auto fd = openLockFile(path, false); + if (!fd || lockFile(fd.get(), ltRead, false)) { + AutoDelete(path, false); + continue; + } + + ActiveBuildInfo info(nlohmann::json::parse(readFile(fd.get())).get()); + +#if defined(__linux__) || defined(__APPLE__) + /* Read process information. */ + try { +# ifdef __linux__ + if (info.cgroup) { + for (auto pid : getPidsInCgroup(*info.cgroup)) + info.processes.push_back(getProcessInfo(pid)); + + /* Read CPU statistics from the cgroup. */ + auto stats = getCgroupStats(*info.cgroup); + info.utime = stats.cpuUser; + info.stime = stats.cpuSystem; + } else +# endif + { + for (auto pid : getDescendantPids(info.mainPid)) + info.processes.push_back(getProcessInfo(pid)); + } + } catch (...) { + ignoreExceptionExceptInterrupt(); + } +#endif + + result.push_back(std::move(info)); + } catch (...) { + ignoreExceptionExceptInterrupt(); + } + } + + return result; +} + +LocalStore::BuildHandle LocalStore::buildStarted(const ActiveBuild & build) +{ + // Write info about the active build to the active-builds directory where it can be read by `queryBuilds()`. + static std::atomic nextId{1}; + + auto id = nextId++; + + auto infoFileName = fmt("%d-%d", getpid(), id); + auto infoFilePath = activeBuildsDir / infoFileName; + + auto infoFd = openLockFile(infoFilePath, true); + + // Lock the file to denote that the build is active. + lockFile(infoFd.get(), ltWrite, true); + + writeFile(infoFilePath, nlohmann::json(build).dump(), 0600, FsSync::Yes); + + activeBuilds.lock()->emplace( + id, + ActiveBuildFile{ + .fd = std::move(infoFd), + .del = AutoDelete(infoFilePath, false), + }); + + return BuildHandle(*this, id); +} + +void LocalStore::buildFinished(const BuildHandle & handle) +{ + activeBuilds.lock()->erase(handle.id); +} + +} // namespace nix diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc index b625b6c1bf2..3c9ae14f03f 100644 --- a/src/libstore/local-store.cc +++ b/src/libstore/local-store.cc @@ -125,6 +125,7 @@ LocalStore::LocalStore(ref config) , schemaPath(dbDir + "/schema") , tempRootsDir(config->stateDir + "/temproots") , fnTempRoots(fmt("%s/%d", tempRootsDir, getpid())) + , activeBuildsDir(config->stateDir + "/active-builds") { auto state(_state->lock()); state->stmts = std::make_unique(); @@ -146,6 +147,7 @@ LocalStore::LocalStore(ref config) createDirs(gcRootsDir); replaceSymlink(profilesDir, gcRootsDir + "/profiles"); } + createDirs(activeBuildsDir); for (auto & perUserDir : {profilesDir + "/per-user", gcRootsDir + "/per-user"}) { createDirs(perUserDir); diff --git a/src/libstore/meson.build b/src/libstore/meson.build index d8927c3a6c3..0a0d2b8cac6 100644 --- a/src/libstore/meson.build +++ b/src/libstore/meson.build @@ -13,6 +13,8 @@ project( license : 'LGPL-2.1-or-later', ) +fs = import('fs') + cxx = meson.get_compiler('cpp') subdir('nix-meson-build-support/deps-lists') @@ -23,6 +25,11 @@ configdata_priv = configuration_data() # TODO rename, because it will conflict with downstream projects configdata_priv.set_quoted('PACKAGE_VERSION', meson.project_version()) +configdata_priv.set_quoted( + 'DETERMINATE_NIX_VERSION', + fs.read('../../.version-determinate').strip(), +) + subdir('nix-meson-build-support/default-system-cpu') # Used in public header. @@ -200,8 +207,6 @@ if get_option('embedded-sandbox-shell') generated_headers += embedded_sandbox_shell_gen endif -fs = import('fs') - prefix = get_option('prefix') # For each of these paths, assume that it is relative to the prefix unless # it is already an absolute path (which is the default for store-dir, localstatedir, and log-dir). @@ -267,6 +272,8 @@ config_priv_h = configure_file( subdir('nix-meson-build-support/common') sources = files( + 'active-builds.cc', + 'async-path-writer.cc', 'binary-cache-store.cc', 'build-result.cc', 'build/derivation-builder.cc', @@ -305,6 +312,7 @@ sources = files( 'local-binary-cache-store.cc', 'local-fs-store.cc', 'local-overlay-store.cc', + 'local-store-active-builds.cc', 'local-store.cc', 'log-store.cc', 'machines.cc', diff --git a/src/libstore/package.nix b/src/libstore/package.nix index b451b404146..44f43fdad36 100644 --- a/src/libstore/package.nix +++ b/src/libstore/package.nix @@ -4,7 +4,7 @@ mkMesonLibrary, unixtools, - darwin, + apple-sdk, nix-util, boost, @@ -32,15 +32,17 @@ let in mkMesonLibrary (finalAttrs: { - pname = "nix-store"; + pname = "determinate-nix-store"; inherit version; workDir = ./.; fileset = fileset.unions [ ../../nix-meson-build-support ./nix-meson-build-support + # FIXME: get rid of these symlinks. ../../.version ./.version + ../../.version-determinate ./meson.build ./meson.options ./include/nix/store/meson.build diff --git a/src/libstore/remote-store.cc b/src/libstore/remote-store.cc index 6d1204570d8..91ff48a76c1 100644 --- a/src/libstore/remote-store.cc +++ b/src/libstore/remote-store.cc @@ -769,6 +769,16 @@ void RemoteStore::addBuildLog(const StorePath & drvPath, std::string_view log) readInt(conn->from); } +std::vector RemoteStore::queryActiveBuilds() +{ + auto conn(getConnection()); + if (!conn->features.count(WorkerProto::featureQueryActiveBuilds)) + throw Error("remote store does not support querying active builds"); + conn->to << WorkerProto::Op::QueryActiveBuilds; + conn.processStderr(); + return nlohmann::json::parse(readString(conn->from)).get>(); +} + std::optional RemoteStore::getVersion() { auto conn(getConnection()); diff --git a/src/libstore/store-api.cc b/src/libstore/store-api.cc index dc5be065d87..5abaee7355e 100644 --- a/src/libstore/store-api.cc +++ b/src/libstore/store-api.cc @@ -110,8 +110,12 @@ StorePath Store::addToStore( auto sink = sourceToSink([&](Source & source) { LengthSource lengthSource(source); storePath = addToStoreFromDump(lengthSource, name, fsm, method, hashAlgo, references, repair); - if (settings.warnLargePathThreshold && lengthSource.total >= settings.warnLargePathThreshold) - warn("copied large path '%s' to the store (%s)", path, renderSize(lengthSource.total)); + if (settings.warnLargePathThreshold && lengthSource.total >= settings.warnLargePathThreshold) { + static bool failOnLargePath = getEnv("_NIX_TEST_FAIL_ON_LARGE_PATH").value_or("") == "1"; + if (failOnLargePath) + throw Error("doesn't copy large path '%s' to the store (%d)", path, renderSize(lengthSource.total)); + warn("copied large path '%s' to the store (%d)", path, renderSize(lengthSource.total)); + } }); dumpPath(path, *sink, fsm, filter); sink->finish(); @@ -454,6 +458,8 @@ void Store::querySubstitutablePathInfos(const StorePathCAMap & paths, Substituta .downloadSize = narInfo ? narInfo->fileSize : 0, .narSize = info->narSize, }); + + break; /* We are done. */ } catch (InvalidPath &) { } catch (SubstituterDisabled &) { } catch (Error & e) { @@ -527,6 +533,23 @@ ref Store::queryPathInfo(const StorePath & storePath) return promise.get_future().get(); } +std::shared_ptr Store::maybeQueryPathInfo(const StorePath & storePath) +{ + std::promise> promise; + + queryPathInfo(storePath, {[&](std::future> result) { + try { + promise.set_value(result.get()); + } catch (InvalidPath &) { + promise.set_value(nullptr); + } catch (...) { + promise.set_exception(std::current_exception()); + } + }}); + + return promise.get_future().get(); +} + static bool goodStorePath(const StorePath & expected, const StorePath & actual) { return expected.hashPart() == actual.hashPart() diff --git a/src/libstore/unix/build/derivation-builder.cc b/src/libstore/unix/build/derivation-builder.cc index 8b2298aa508..3e15cc11fc5 100644 --- a/src/libstore/unix/build/derivation-builder.cc +++ b/src/libstore/unix/build/derivation-builder.cc @@ -1,6 +1,7 @@ #include "nix/store/build/derivation-builder.hh" #include "nix/util/file-system.hh" #include "nix/store/local-store.hh" +#include "nix/store/active-builds.hh" #include "nix/util/processes.hh" #include "nix/store/builtins.hh" #include "nix/store/path-references.hh" @@ -83,6 +84,11 @@ class DerivationBuilderImpl : public DerivationBuilder, public DerivationBuilder */ Pid pid; + /** + * Handles to track active builds for `nix ps`. + */ + std::optional activeBuildHandle; + LocalStore & store; std::unique_ptr miscMethods; @@ -235,6 +241,11 @@ class DerivationBuilderImpl : public DerivationBuilder, public DerivationBuilder return acquireUserLock(1, false); } + /** + * Construct the `ActiveBuild` object for `ActiveBuildsTracker`. + */ + virtual ActiveBuild getActiveBuild(); + /** * Return the paths that should be made available in the sandbox. * This includes: @@ -490,6 +501,8 @@ bool DerivationBuilderImpl::killChild() killSandbox(true); pid.wait(); + + activeBuildHandle.reset(); } return ret; } @@ -523,6 +536,8 @@ SingleDrvOutputs DerivationBuilderImpl::unprepareBuild() root. */ killSandbox(true); + activeBuildHandle.reset(); + /* Terminate the recursive Nix daemon. */ stopDaemon(); @@ -678,17 +693,17 @@ static void handleChildException(bool sendException) } } -static bool checkNotWorldWritable(std::filesystem::path path) +static void checkNotWorldWritable(std::filesystem::path path) { while (true) { auto st = lstat(path); if (st.st_mode & S_IWOTH) - return false; + throw Error("Path %s is world-writable or a symlink. That's not allowed for security.", path); if (path == path.parent_path()) break; path = path.parent_path(); } - return true; + return; } std::optional DerivationBuilderImpl::startBuild() @@ -710,9 +725,8 @@ std::optional DerivationBuilderImpl::startBuild() createDirs(buildDir); - if (buildUser && !checkNotWorldWritable(buildDir)) - throw Error( - "Path %s or a parent directory is world-writable or a symlink. That's not allowed for security.", buildDir); + if (buildUser) + checkNotWorldWritable(buildDir); /* Create a temporary directory where the build will take place. */ @@ -837,17 +851,39 @@ std::optional DerivationBuilderImpl::startBuild() pid.setSeparatePG(true); + /* Make the build visible to `nix ps`. */ + if (auto tracker = dynamic_cast(&store)) + activeBuildHandle.emplace(tracker->buildStarted(getActiveBuild())); + processSandboxSetupMessages(); return builderOut.get(); } +ActiveBuild DerivationBuilderImpl::getActiveBuild() +{ + return { + .nixPid = getpid(), + .clientPid = std::nullopt, // FIXME + .clientUid = std::nullopt, // FIXME + .mainPid = pid, + .mainUser = UserInfo::fromUid(buildUser ? buildUser->getUID() : getuid()), + .startTime = buildResult.startTime, + .derivation = drvPath, + }; +} + PathsInChroot DerivationBuilderImpl::getPathsInSandbox() { /* Allow a user-configurable set of directories from the host file system. */ PathsInChroot pathsInChroot = defaultPathsInChroot; + for (auto & p : pathsInChroot) + if (!p.second.optional && !maybeLstat(p.second.source)) + throw SysError( + "path '%s' is configured as part of the `sandbox-paths` option, but is inaccessible", p.second.source); + if (hasPrefix(store.storeDir, tmpDirInSandbox())) { throw Error("`sandbox-build-dir` must not contain the storeDir"); } @@ -999,7 +1035,7 @@ void DerivationBuilderImpl::processSandboxSetupMessages() "while waiting for the build environment for '%s' to initialize (%s, previous messages: %s)", store.printStorePath(drvPath), statusToString(status), - concatStringsSep("|", msgs)); + concatStringsSep("\n", msgs)); throw; } }(); @@ -1853,7 +1889,7 @@ SingleDrvOutputs DerivationBuilderImpl::registerOutputs() /* Apply output checks. This includes checking of the wanted vs got hash of fixed-outputs. */ - checkOutputs(store, drvPath, drv.outputs, drvOptions.outputChecks, infos); + checkOutputs(store, drvPath, drv.outputs, drvOptions.outputChecks, infos, *act); if (buildMode == bmCheck) { return {}; diff --git a/src/libstore/unix/build/linux-derivation-builder.cc b/src/libstore/unix/build/linux-derivation-builder.cc index d15e6e1ae7a..fc2140817d7 100644 --- a/src/libstore/unix/build/linux-derivation-builder.cc +++ b/src/libstore/unix/build/linux-derivation-builder.cc @@ -711,6 +711,9 @@ struct ChrootLinuxDerivationBuilder : ChrootDerivationBuilder, LinuxDerivationBu void addDependencyImpl(const StorePath & path) override { + if (isAllowed(path)) + return; + auto [source, target] = ChrootDerivationBuilder::addDependencyPrep(path); /* Bind-mount the path into the sandbox. This requires @@ -733,6 +736,13 @@ struct ChrootLinuxDerivationBuilder : ChrootDerivationBuilder, LinuxDerivationBu if (status != 0) throw Error("could not add path '%s' to sandbox", store.printStorePath(path)); } + + ActiveBuild getActiveBuild() override + { + auto build = DerivationBuilderImpl::getActiveBuild(); + build.cgroup = cgroup; + return build; + } }; } // namespace nix diff --git a/src/libstore/unix/build/sandbox-network.sb b/src/libstore/unix/build/sandbox-network.sb index 335edbaed2e..a504027c729 100644 --- a/src/libstore/unix/build/sandbox-network.sb +++ b/src/libstore/unix/build/sandbox-network.sb @@ -16,6 +16,7 @@ R""( ; Allow DNS lookups. (allow network-outbound (remote unix-socket (path-literal "/private/var/run/mDNSResponder"))) +(allow mach-lookup (global-name "com.apple.SystemConfiguration.DNSConfiguration")) ; Allow access to trustd. (allow mach-lookup (global-name "com.apple.trustd")) diff --git a/src/libstore/worker-protocol-connection.cc b/src/libstore/worker-protocol-connection.cc index 8a37662904d..24d1ea82395 100644 --- a/src/libstore/worker-protocol-connection.cc +++ b/src/libstore/worker-protocol-connection.cc @@ -5,7 +5,7 @@ namespace nix { -const WorkerProto::FeatureSet WorkerProto::allFeatures{}; +const WorkerProto::FeatureSet WorkerProto::allFeatures{{std::string(WorkerProto::featureQueryActiveBuilds)}}; WorkerProto::BasicClientConnection::~BasicClientConnection() { diff --git a/src/libutil-c/package.nix b/src/libutil-c/package.nix index f26f57775d4..a1605bf5bb8 100644 --- a/src/libutil-c/package.nix +++ b/src/libutil-c/package.nix @@ -14,7 +14,7 @@ let in mkMesonLibrary (finalAttrs: { - pname = "nix-util-c"; + pname = "determinate-nix-util-c"; inherit version; workDir = ./.; diff --git a/src/libutil-test-support/package.nix b/src/libutil-test-support/package.nix index f8e92c27113..40ff65d6135 100644 --- a/src/libutil-test-support/package.nix +++ b/src/libutil-test-support/package.nix @@ -17,7 +17,7 @@ let in mkMesonLibrary (finalAttrs: { - pname = "nix-util-test-support"; + pname = "determinate-nix-util-test-support"; inherit version; workDir = ./.; diff --git a/src/libutil-tests/config.cc b/src/libutil-tests/config.cc index 5fb2229b6b9..87c1e556b73 100644 --- a/src/libutil-tests/config.cc +++ b/src/libutil-tests/config.cc @@ -218,7 +218,7 @@ TEST(Config, toJSONOnNonEmptyConfigWithExperimentalSetting) "description", {}, true, - Xp::Flakes, + Xp::CaDerivations, }; setting.assign("value"); @@ -231,7 +231,7 @@ TEST(Config, toJSONOnNonEmptyConfigWithExperimentalSetting) "description": "description\n", "documentDefault": true, "value": "value", - "experimentalFeature": "flakes" + "experimentalFeature": "ca-derivations" } })#"_json); } diff --git a/src/libutil/args.cc b/src/libutil/args.cc index c6d450a0bc6..bd3dc9c95df 100644 --- a/src/libutil/args.cc +++ b/src/libutil/args.cc @@ -513,7 +513,7 @@ void Args::checkArgs() { for (auto & [name, flag] : longFlags) { if (flag->required && flag->timesUsed == 0) - throw UsageError("required argument '--%s' is missing", name); + throw UsageError("required argument '%s' is missing", "--" + name); } } @@ -607,7 +607,7 @@ Strings argvToStrings(int argc, char ** argv) std::optional Command::experimentalFeature() { - return {Xp::NixCommand}; + return {}; } MultiCommand::MultiCommand(std::string_view commandName, const Commands & commands_) diff --git a/src/libutil/configuration.cc b/src/libutil/configuration.cc index 832099dab99..407320a6b51 100644 --- a/src/libutil/configuration.cc +++ b/src/libutil/configuration.cc @@ -398,11 +398,11 @@ std::set BaseSetting>::parse( { std::set res; for (auto & s : tokenizeString(str)) { - if (auto thisXpFeature = parseExperimentalFeature(s); thisXpFeature) { + if (auto thisXpFeature = parseExperimentalFeature(s)) res.insert(thisXpFeature.value()); - if (thisXpFeature.value() == Xp::Flakes) - res.insert(Xp::FetchTree); - } else + else if (stabilizedFeatures.count(s)) + debug("experimental feature '%s' is now stable", s); + else warn("unknown experimental feature '%s'", s); } return res; diff --git a/src/libutil/experimental-features.cc b/src/libutil/experimental-features.cc index 69ba62b5619..ea4f57821ea 100644 --- a/src/libutil/experimental-features.cc +++ b/src/libutil/experimental-features.cc @@ -17,7 +17,7 @@ struct ExperimentalFeatureDetails /** * If two different PRs both add an experimental feature, and we just - * used a number for this, we *woudln't* get merge conflict and the + * used a number for this, we *wouldn't* get merge conflict and the * counter will be incremented once instead of twice, causing a build * failure. * @@ -25,7 +25,7 @@ struct ExperimentalFeatureDetails * feature, we either have no issue at all if few features are not added * at the end of the list, or a proper merge conflict if they are. */ -constexpr size_t numXpFeatures = 1 + static_cast(Xp::BLAKE3Hashes); +constexpr size_t numXpFeatures = 1 + static_cast(Xp::ParallelEval); constexpr std::array xpFeatureDetails = {{ { @@ -71,38 +71,21 @@ constexpr std::array xpFeatureDetails )", .trackingUrl = "https://github.com/NixOS/nix/milestone/42", }, - { - .tag = Xp::Flakes, - .name = "flakes", - .description = R"( - Enable flakes. See the manual entry for [`nix - flake`](@docroot@/command-ref/new-cli/nix3-flake.md) for details. - )", - .trackingUrl = "https://github.com/NixOS/nix/milestone/27", - }, { .tag = Xp::FetchTree, .name = "fetch-tree", .description = R"( + *Enabled for Determinate Nix Installer users since 2.24* + Enable the use of the [`fetchTree`](@docroot@/language/builtins.md#builtins-fetchTree) built-in function in the Nix language. `fetchTree` exposes a generic interface for fetching remote file system trees from different types of remote sources. - The [`flakes`](#xp-feature-flakes) feature flag always enables `fetch-tree`. This built-in was previously guarded by the `flakes` experimental feature because of that overlap. Enabling just this feature serves as a "release candidate", allowing users to try it out in isolation. )", .trackingUrl = "https://github.com/NixOS/nix/milestone/31", }, - { - .tag = Xp::NixCommand, - .name = "nix-command", - .description = R"( - Enable the new `nix` subcommands. See the manual on - [`nix`](@docroot@/command-ref/new-cli/nix.md) for details. - )", - .trackingUrl = "https://github.com/NixOS/nix/milestone/28", - }, { .tag = Xp::GitHashing, .name = "git-hashing", @@ -143,14 +126,14 @@ constexpr std::array xpFeatureDetails arbitrary substitutions. For example, running ``` - nix-store -r /nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10 + nix-store -r /nix/store/lrs9qfm60jcgsk83qhyypj3m4jqsgdid-hello-2.10 ``` in the above `runCommand` script would be disallowed, as this could lead to derivations with hidden dependencies or breaking reproducibility by relying on the current state of the Nix store. An exception would be if - `/nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10` were + `/nix/store/lrs9qfm60jcgsk83qhyypj3m4jqsgdid-hello-2.10` were already in the build inputs or built by a previous recursive Nix call. )", @@ -171,7 +154,7 @@ constexpr std::array xpFeatureDetails "http://foo" ``` - But enabling this experimental feature will cause the Nix parser to + But enabling this experimental feature causes the Nix parser to throw an error when encountering a URL literal: ``` @@ -321,6 +304,22 @@ constexpr std::array xpFeatureDetails )", .trackingUrl = "", }, + { + .tag = Xp::BuildTimeFetchTree, + .name = "build-time-fetch-tree", + .description = R"( + Enable the built-in derivation `builtin:fetch-tree`, as well as the flake input attribute `buildTime`. + )", + .trackingUrl = "", + }, + { + .tag = Xp::ParallelEval, + .name = "parallel-eval", + .description = R"( + Enable built-in functions for parallel evaluation. + )", + .trackingUrl = "", + }, }}; static_assert( @@ -332,6 +331,12 @@ static_assert( }(), "array order does not match enum tag order"); +/** + * A set of previously experimental features that are now considered + * stable. We don't warn if users have these in `experimental-features`. + */ +std::set stabilizedFeatures{"flakes", "nix-command"}; + const std::optional parseExperimentalFeature(const std::string_view & name) { using ReverseXpMap = std::map; diff --git a/src/libutil/file-system.cc b/src/libutil/file-system.cc index 4851d8cfb57..ead940760a3 100644 --- a/src/libutil/file-system.cc +++ b/src/libutil/file-system.cc @@ -713,17 +713,27 @@ AutoCloseFD createAnonymousTempFile() { AutoCloseFD fd; #ifdef O_TMPFILE - fd = ::open(defaultTempDir().c_str(), O_TMPFILE | O_CLOEXEC | O_RDWR, S_IWUSR | S_IRUSR); - if (!fd) - throw SysError("creating anonymous temporary file"); -#else + static std::atomic_flag tmpfileUnsupported{}; + if (!tmpfileUnsupported.test()) /* Try with O_TMPFILE first. */ { + /* Use O_EXCL, because the file is never supposed to be linked into filesystem. */ + fd = ::open(defaultTempDir().c_str(), O_TMPFILE | O_CLOEXEC | O_RDWR | O_EXCL, S_IWUSR | S_IRUSR); + if (!fd) { + /* Not supported by the filesystem or the kernel. */ + if (errno == EOPNOTSUPP || errno == EISDIR) + tmpfileUnsupported.test_and_set(); /* Set flag and fall through to createTempFile. */ + else + throw SysError("creating anonymous temporary file"); + } else { + return fd; /* Successfully created. */ + } + } +#endif auto [fd2, path] = createTempFile("nix-anonymous"); if (!fd2) throw SysError("creating temporary file '%s'", path); fd = std::move(fd2); -# ifndef _WIN32 +#ifndef _WIN32 unlink(requireCString(path)); /* We only care about the file descriptor. */ -# endif #endif return fd; } diff --git a/src/libutil/include/nix/util/configuration.hh b/src/libutil/include/nix/util/configuration.hh index 541febdb5f9..6b9f2d6f5d0 100644 --- a/src/libutil/include/nix/util/configuration.hh +++ b/src/libutil/include/nix/util/configuration.hh @@ -444,7 +444,7 @@ struct ExperimentalFeatureSettings : Config Example: ``` - experimental-features = nix-command flakes + experimental-features = ca-derivations ``` The following experimental features are available: diff --git a/src/libutil/include/nix/util/experimental-features.hh b/src/libutil/include/nix/util/experimental-features.hh index aca14bfbb41..20a4610a390 100644 --- a/src/libutil/include/nix/util/experimental-features.hh +++ b/src/libutil/include/nix/util/experimental-features.hh @@ -19,9 +19,7 @@ namespace nix { enum struct ExperimentalFeature { CaDerivations, ImpureDerivations, - Flakes, FetchTree, - NixCommand, GitHashing, RecursiveNix, NoUrlLiterals, @@ -39,8 +37,12 @@ enum struct ExperimentalFeature { PipeOperators, ExternalBuilders, BLAKE3Hashes, + BuildTimeFetchTree, + ParallelEval, }; +extern std::set stabilizedFeatures; + /** * Just because writing `ExperimentalFeature::CaDerivations` is way too long */ diff --git a/src/libutil/include/nix/util/forwarding-source-accessor.hh b/src/libutil/include/nix/util/forwarding-source-accessor.hh new file mode 100644 index 00000000000..02474a3a7f3 --- /dev/null +++ b/src/libutil/include/nix/util/forwarding-source-accessor.hh @@ -0,0 +1,57 @@ +#pragma once + +#include "source-accessor.hh" + +namespace nix { + +/** + * A source accessor that just forwards every operation to another + * accessor. This is not useful in itself but can be used as a + * superclass for accessors that do change some operations. + */ +struct ForwardingSourceAccessor : SourceAccessor +{ + ref next; + + ForwardingSourceAccessor(ref next) + : next(next) + { + } + + std::string readFile(const CanonPath & path) override + { + return next->readFile(path); + } + + void readFile(const CanonPath & path, Sink & sink, std::function sizeCallback) override + { + next->readFile(path, sink, sizeCallback); + } + + std::optional maybeLstat(const CanonPath & path) override + { + return next->maybeLstat(path); + } + + DirEntries readDirectory(const CanonPath & path) override + { + return next->readDirectory(path); + } + + std::string readLink(const CanonPath & path) override + { + return next->readLink(path); + } + + std::string showPath(const CanonPath & path) override + { + return next->showPath(path); + } + + std::optional getPhysicalPath(const CanonPath & path) override + { + return next->getPhysicalPath(path); + } +}; + +} // namespace nix diff --git a/src/libutil/include/nix/util/logging.hh b/src/libutil/include/nix/util/logging.hh index 4673895aad6..de2c3f683df 100644 --- a/src/libutil/include/nix/util/logging.hh +++ b/src/libutil/include/nix/util/logging.hh @@ -39,6 +39,8 @@ typedef enum { resSetExpected = 106, resPostBuildLogLine = 107, resFetchStatus = 108, + resHashMismatch = 109, + resBuildResult = 110, } ResultType; typedef uint64_t ActivityId; @@ -59,7 +61,7 @@ struct LoggerSettings : Config {}, "json-log-path", R"( - A file or unix socket to which JSON records of Nix's log output are + A file or Unix domain socket to which JSON records of Nix's log output are written, in the same format as `--log-format internal-json` (without the `@nix ` prefixes on each line). Concurrent writes to the same file by multiple Nix processes are not supported and @@ -158,6 +160,8 @@ public: virtual void result(ActivityId act, ResultType type, const Fields & fields) {}; + virtual void result(ActivityId act, ResultType type, const nlohmann::json & json) {}; + virtual void writeToStdout(std::string_view s); template @@ -222,6 +226,11 @@ struct Activity result(resSetExpected, type2, expected); } + void result(ResultType type, const nlohmann::json & json) const + { + logger.result(id, type, json); + } + template void result(ResultType type, const Args &... args) const { diff --git a/src/libutil/include/nix/util/meson.build b/src/libutil/include/nix/util/meson.build index 45b52ff5e4a..b2d2dc8d316 100644 --- a/src/libutil/include/nix/util/meson.build +++ b/src/libutil/include/nix/util/meson.build @@ -38,6 +38,7 @@ headers = files( 'file-system.hh', 'finally.hh', 'fmt.hh', + 'forwarding-source-accessor.hh', 'fs-sink.hh', 'git.hh', 'hash.hh', @@ -65,6 +66,7 @@ headers = files( 'signals.hh', 'signature/local-keys.hh', 'signature/signer.hh', + 'socket.hh', 'sort.hh', 'source-accessor.hh', 'source-path.hh', diff --git a/src/libutil/include/nix/util/pool.hh b/src/libutil/include/nix/util/pool.hh index a9091c2dee2..952c29ad5de 100644 --- a/src/libutil/include/nix/util/pool.hh +++ b/src/libutil/include/nix/util/pool.hh @@ -211,6 +211,12 @@ public: left.push_back(p); std::swap(state_->idle, left); } + + std::vector> clear() + { + auto state_(state.lock()); + return std::exchange(state_->idle, {}); + } }; } // namespace nix diff --git a/src/libutil/include/nix/util/pos-idx.hh b/src/libutil/include/nix/util/pos-idx.hh index 8e668176c61..7b7d16ca3a4 100644 --- a/src/libutil/include/nix/util/pos-idx.hh +++ b/src/libutil/include/nix/util/pos-idx.hh @@ -15,12 +15,12 @@ class PosIdx private: uint32_t id; +public: explicit PosIdx(uint32_t id) : id(id) { } -public: PosIdx() : id(0) { @@ -45,6 +45,11 @@ public: { return std::hash{}(id); } + + uint32_t get() const + { + return id; + } }; inline PosIdx noPos = {}; diff --git a/src/libutil/include/nix/util/pos-table.hh b/src/libutil/include/nix/util/pos-table.hh index c5f93a3d597..954138afbc8 100644 --- a/src/libutil/include/nix/util/pos-table.hh +++ b/src/libutil/include/nix/util/pos-table.hh @@ -49,20 +49,29 @@ private: */ using LinesCache = LRUCache; - std::map origins; - mutable Sync linesCache; + // FIXME: this could be made lock-free (at least for access) if we + // have a data structure where pointers to existing positions are + // never invalidated. + struct State + { + std::map origins; + }; + + SharedSync state_; + const Origin * resolve(PosIdx p) const { if (p.id == 0) return nullptr; + auto state(state_.readLock()); const auto idx = p.id - 1; - /* we want the last key <= idx, so we'll take prev(first key > idx). - this is guaranteed to never rewind origin.begin because the first - key is always 0. */ - const auto pastOrigin = origins.upper_bound(idx); + /* We want the last key <= idx, so we'll take prev(first key > + idx). This is guaranteed to never rewind origin.begin + because the first key is always 0. */ + const auto pastOrigin = state->origins.upper_bound(idx); return &std::prev(pastOrigin)->second; } @@ -74,15 +83,16 @@ public: Origin addOrigin(Pos::Origin origin, size_t size) { + auto state(state_.lock()); uint32_t offset = 0; - if (auto it = origins.rbegin(); it != origins.rend()) + if (auto it = state->origins.rbegin(); it != state->origins.rend()) offset = it->first + it->second.size; // +1 because all PosIdx are offset by 1 to begin with, and // another +1 to ensure that all origins can point to EOF, eg // on (invalid) empty inputs. if (2 + offset + size < offset) return Origin{origin, offset, 0}; - return origins.emplace(offset, Origin{origin, offset, size}).first->second; + return state->origins.emplace(offset, Origin{origin, offset, size}).first->second; } PosIdx add(const Origin & origin, size_t offset) @@ -119,7 +129,7 @@ public: { auto lines = linesCache.lock(); lines->clear(); - origins.clear(); + state_.lock()->origins.clear(); } }; diff --git a/src/libutil/include/nix/util/posix-source-accessor.hh b/src/libutil/include/nix/util/posix-source-accessor.hh index 29561a3daaf..006ba0e7e4d 100644 --- a/src/libutil/include/nix/util/posix-source-accessor.hh +++ b/src/libutil/include/nix/util/posix-source-accessor.hh @@ -78,6 +78,8 @@ public: return trackLastModified ? std::optional{mtime} : std::nullopt; } + void invalidateCache(const CanonPath & path) override; + private: /** diff --git a/src/libutil/include/nix/util/socket.hh b/src/libutil/include/nix/util/socket.hh new file mode 100644 index 00000000000..30d963ec89b --- /dev/null +++ b/src/libutil/include/nix/util/socket.hh @@ -0,0 +1,61 @@ +#pragma once +///@file + +#include "nix/util/file-descriptor.hh" + +#ifdef _WIN32 +# include +#endif + +namespace nix { + +/** + * Often we want to use `Descriptor`, but Windows makes a slightly + * stronger file descriptor vs socket distinction, at least at the level + * of C types. + */ +using Socket = +#ifdef _WIN32 + SOCKET +#else + int +#endif + ; + +#ifdef _WIN32 +/** + * Windows gives this a different name + */ +# define SHUT_WR SD_SEND +# define SHUT_RDWR SD_BOTH +#endif + +/** + * Convert a `Descriptor` to a `Socket` + * + * This is a no-op except on Windows. + */ +static inline Socket toSocket(Descriptor fd) +{ +#ifdef _WIN32 + return reinterpret_cast(fd); +#else + return fd; +#endif +} + +/** + * Convert a `Socket` to a `Descriptor` + * + * This is a no-op except on Windows. + */ +static inline Descriptor fromSocket(Socket fd) +{ +#ifdef _WIN32 + return reinterpret_cast(fd); +#else + return fd; +#endif +} + +} // namespace nix diff --git a/src/libutil/include/nix/util/source-accessor.hh b/src/libutil/include/nix/util/source-accessor.hh index 1006895b33c..1357cf79a28 100644 --- a/src/libutil/include/nix/util/source-accessor.hh +++ b/src/libutil/include/nix/util/source-accessor.hh @@ -209,6 +209,11 @@ struct SourceAccessor : std::enable_shared_from_this { return std::nullopt; } + + /** + * Invalidate any cached value the accessor may have for the specified path. + */ + virtual void invalidateCache(const CanonPath & path) {} }; /** diff --git a/src/libutil/include/nix/util/source-path.hh b/src/libutil/include/nix/util/source-path.hh index 08f9fe580b0..4597de1ff46 100644 --- a/src/libutil/include/nix/util/source-path.hh +++ b/src/libutil/include/nix/util/source-path.hh @@ -114,6 +114,11 @@ struct SourcePath return {accessor, accessor->resolveSymlinks(path, mode)}; } + void invalidateCache() const + { + accessor->invalidateCache(path); + } + friend class std::hash; }; diff --git a/src/libutil/include/nix/util/table.hh b/src/libutil/include/nix/util/table.hh index 13e4506d5a3..0af33b66cc3 100644 --- a/src/libutil/include/nix/util/table.hh +++ b/src/libutil/include/nix/util/table.hh @@ -2,10 +2,26 @@ #include "nix/util/types.hh" +#include + namespace nix { -typedef std::vector> Table; +struct TableCell +{ + std::string content; + + enum Alignment { Left, Right } alignment = Left; + + TableCell(std::string content, Alignment alignment = Left) + : content(std::move(content)) + , alignment(alignment) + { + } +}; + +using TableRow = std::vector; +using Table = std::vector; -void printTable(std::ostream & out, Table & table); +void printTable(std::ostream & out, Table & table, unsigned int width = std::numeric_limits::max()); } // namespace nix diff --git a/src/libutil/include/nix/util/terminal.hh b/src/libutil/include/nix/util/terminal.hh index 5e35cbb9540..c70006bc51e 100644 --- a/src/libutil/include/nix/util/terminal.hh +++ b/src/libutil/include/nix/util/terminal.hh @@ -44,6 +44,11 @@ void updateWindowSize(); */ std::pair getWindowSize(); +/** + * @return The number of columns of the terminal, or std::numeric_limits::max() if unknown. + */ +unsigned int getWindowWidth(); + /** * Get the slave name of a pseudoterminal in a thread-safe manner. * diff --git a/src/libutil/include/nix/util/thread-pool.hh b/src/libutil/include/nix/util/thread-pool.hh index a0735414663..63f1141f6a5 100644 --- a/src/libutil/include/nix/util/thread-pool.hh +++ b/src/libutil/include/nix/util/thread-pool.hh @@ -52,6 +52,12 @@ public: */ void process(); + /** + * Shut down all worker threads and wait until they've exited. + * Active work items are finished, but any pending work items are discarded. + */ + void shutdown(); + private: size_t maxThreads; @@ -72,8 +78,6 @@ private: std::condition_variable work; void doWork(bool mainThread); - - void shutdown(); }; /** @@ -85,21 +89,24 @@ template void processGraph( const std::set & nodes, std::function(const T &)> getEdges, - std::function processNode) + std::function processNode, + bool discoverNodes = false, + size_t maxThreads = 0) { struct Graph { + std::set known; std::set left; std::map> refs, rrefs; }; - Sync graph_(Graph{nodes, {}, {}}); + Sync graph_(Graph{nodes, nodes, {}, {}}); std::function worker; - /* Create pool last to ensure threads are stopped before other destructors - * run */ - ThreadPool pool; + /* Create pool last to ensure threads are stopped before other + destructors run. */ + ThreadPool pool(maxThreads); worker = [&](const T & node) { { @@ -116,11 +123,19 @@ void processGraph( { auto graph(graph_.lock()); - for (auto & ref : refs) + for (auto & ref : refs) { + if (discoverNodes) { + auto [i, inserted] = graph->known.insert(ref); + if (inserted) { + pool.enqueue(std::bind(worker, std::ref(*i))); + graph->left.insert(ref); + } + } if (graph->left.count(ref)) { graph->refs[node].insert(ref); graph->rrefs[ref].insert(node); } + } if (graph->refs[node].empty()) goto doWork; } diff --git a/src/libutil/include/nix/util/unix-domain-socket.hh b/src/libutil/include/nix/util/unix-domain-socket.hh index 6d28b62764b..99fd331ce38 100644 --- a/src/libutil/include/nix/util/unix-domain-socket.hh +++ b/src/libutil/include/nix/util/unix-domain-socket.hh @@ -3,10 +3,8 @@ #include "nix/util/types.hh" #include "nix/util/file-descriptor.hh" +#include "nix/util/socket.hh" -#ifdef _WIN32 -# include -#endif #include #include @@ -23,55 +21,6 @@ AutoCloseFD createUnixDomainSocket(); */ AutoCloseFD createUnixDomainSocket(const Path & path, mode_t mode); -/** - * Often we want to use `Descriptor`, but Windows makes a slightly - * stronger file descriptor vs socket distinction, at least at the level - * of C types. - */ -using Socket = -#ifdef _WIN32 - SOCKET -#else - int -#endif - ; - -#ifdef _WIN32 -/** - * Windows gives this a different name - */ -# define SHUT_WR SD_SEND -# define SHUT_RDWR SD_BOTH -#endif - -/** - * Convert a `Socket` to a `Descriptor` - * - * This is a no-op except on Windows. - */ -static inline Socket toSocket(Descriptor fd) -{ -#ifdef _WIN32 - return reinterpret_cast(fd); -#else - return fd; -#endif -} - -/** - * Convert a `Socket` to a `Descriptor` - * - * This is a no-op except on Windows. - */ -static inline Descriptor fromSocket(Socket fd) -{ -#ifdef _WIN32 - return reinterpret_cast(fd); -#else - return fd; -#endif -} - /** * Bind a Unix domain socket to a path. */ diff --git a/src/libutil/include/nix/util/util.hh b/src/libutil/include/nix/util/util.hh index 7556663cd1d..8130c52ed27 100644 --- a/src/libutil/include/nix/util/util.hh +++ b/src/libutil/include/nix/util/util.hh @@ -298,9 +298,15 @@ typename T::mapped_type * get(T & map, const K & key) template typename T::mapped_type * get(T && map, const K & key) = delete; -/** - * Look up a value in a `boost::concurrent_flat_map`. - */ +template +std::optional getOptional(const T & map, const typename T::key_type & key) +{ + auto i = map.find(key); + if (i == map.end()) + return std::nullopt; + return {i->second}; +} + template std::optional getConcurrent(const T & map, const typename T::key_type & key) { diff --git a/src/libutil/linux/cgroup.cc b/src/libutil/linux/cgroup.cc index 928b44d6c50..802b56336d1 100644 --- a/src/libutil/linux/cgroup.cc +++ b/src/libutil/linux/cgroup.cc @@ -174,4 +174,23 @@ std::string getRootCgroup() return rootCgroup; } +std::set getPidsInCgroup(const std::filesystem::path & cgroup) +{ + if (!pathExists(cgroup)) + return {}; + + auto procsFile = cgroup / "cgroup.procs"; + + std::set result; + + for (auto & pidStr : tokenizeString>(readFile(procsFile))) { + if (auto o = string2Int(pidStr)) + result.insert(*o); + else + throw Error("invalid PID '%s'", pidStr); + } + + return result; +} + } // namespace nix diff --git a/src/libutil/linux/include/nix/util/cgroup.hh b/src/libutil/linux/include/nix/util/cgroup.hh index a759bdd0852..ad777347670 100644 --- a/src/libutil/linux/include/nix/util/cgroup.hh +++ b/src/libutil/linux/include/nix/util/cgroup.hh @@ -40,4 +40,9 @@ std::string getCurrentCgroup(); */ std::string getRootCgroup(); +/** + * Get the PIDs of all processes in the given cgroup. + */ +std::set getPidsInCgroup(const std::filesystem::path & cgroup); + } // namespace nix diff --git a/src/libutil/logging.cc b/src/libutil/logging.cc index 8f7ec2d294e..842381acf66 100644 --- a/src/libutil/logging.cc +++ b/src/libutil/logging.cc @@ -336,6 +336,16 @@ struct JSONLogger : Logger addFields(json, fields); write(json); } + + void result(ActivityId act, ResultType type, const nlohmann::json & j) override + { + nlohmann::json json; + json["action"] = "result"; + json["id"] = act; + json["type"] = type; + json["payload"] = j; + write(json); + } }; std::unique_ptr makeJSONLogger(Descriptor fd, bool includeNixPrefix) diff --git a/src/libutil/mounted-source-accessor.cc b/src/libutil/mounted-source-accessor.cc index d9398045cc5..13b77d2d1e1 100644 --- a/src/libutil/mounted-source-accessor.cc +++ b/src/libutil/mounted-source-accessor.cc @@ -99,6 +99,12 @@ struct MountedSourceAccessorImpl : MountedSourceAccessor auto [accessor, subpath] = resolve(path); return accessor->getFingerprint(subpath); } + + void invalidateCache(const CanonPath & path) override + { + auto [accessor, subpath] = resolve(path); + accessor->invalidateCache(subpath); + } }; ref makeMountedSourceAccessor(std::map> mounts) diff --git a/src/libutil/package.nix b/src/libutil/package.nix index 3deb7ba3ae3..287e6c6a113 100644 --- a/src/libutil/package.nix +++ b/src/libutil/package.nix @@ -22,7 +22,7 @@ let in mkMesonLibrary (finalAttrs: { - pname = "nix-util"; + pname = "determinate-nix-util"; inherit version; workDir = ./.; diff --git a/src/libutil/posix-source-accessor.cc b/src/libutil/posix-source-accessor.cc index abbab45db21..632504e74a0 100644 --- a/src/libutil/posix-source-accessor.cc +++ b/src/libutil/posix-source-accessor.cc @@ -87,11 +87,11 @@ bool PosixSourceAccessor::pathExists(const CanonPath & path) return nix::pathExists(makeAbsPath(path).string()); } +using Cache = boost::concurrent_flat_map>; +static Cache cache; + std::optional PosixSourceAccessor::cachedLstat(const CanonPath & path) { - using Cache = boost::concurrent_flat_map>; - static Cache cache; - // Note: we convert std::filesystem::path to Path because the // former is not hashable on libc++. Path absPath = makeAbsPath(path).string(); @@ -108,6 +108,11 @@ std::optional PosixSourceAccessor::cachedLstat(const CanonPath & pa return st; } +void PosixSourceAccessor::invalidateCache(const CanonPath & path) +{ + cache.erase(makeAbsPath(path).string()); +} + std::optional PosixSourceAccessor::maybeLstat(const CanonPath & path) { if (auto parent = path.parent()) diff --git a/src/libutil/serialise.cc b/src/libutil/serialise.cc index 9791b4fed8d..e71ec66d26a 100644 --- a/src/libutil/serialise.cc +++ b/src/libutil/serialise.cc @@ -1,6 +1,7 @@ #include "nix/util/serialise.hh" #include "nix/util/compression.hh" #include "nix/util/signals.hh" +#include "nix/util/socket.hh" #include "nix/util/util.hh" #include @@ -11,7 +12,6 @@ #ifdef _WIN32 # include -# include # include "nix/util/windows-error.hh" #else # include @@ -184,20 +184,20 @@ bool FdSource::hasData() while (true) { fd_set fds; FD_ZERO(&fds); - int fd_ = fromDescriptorReadOnly(fd); - FD_SET(fd_, &fds); + Socket sock = toSocket(fd); + FD_SET(sock, &fds); struct timeval timeout; timeout.tv_sec = 0; timeout.tv_usec = 0; - auto n = select(fd_ + 1, &fds, nullptr, nullptr, &timeout); + auto n = select(sock + 1, &fds, nullptr, nullptr, &timeout); if (n < 0) { if (errno == EINTR) continue; throw SysError("polling file descriptor"); } - return FD_ISSET(fd, &fds); + return FD_ISSET(sock, &fds); } } diff --git a/src/libutil/table.cc b/src/libutil/table.cc index fa1bf110d93..215171dc02f 100644 --- a/src/libutil/table.cc +++ b/src/libutil/table.cc @@ -1,4 +1,5 @@ #include "nix/util/table.hh" +#include "nix/util/terminal.hh" #include #include @@ -7,7 +8,7 @@ namespace nix { -void printTable(std::ostream & out, Table & table) +void printTable(std::ostream & out, Table & table, unsigned int width) { auto nrColumns = table.size() > 0 ? table.front().size() : 0; @@ -18,19 +19,31 @@ void printTable(std::ostream & out, Table & table) assert(i.size() == nrColumns); size_t column = 0; for (auto j = i.begin(); j != i.end(); ++j, ++column) - if (j->size() > widths[column]) - widths[column] = j->size(); + // TODO: take ANSI escapes into account when calculating width. + widths[column] = std::max(widths[column], j->content.size()); } for (auto & i : table) { size_t column = 0; + std::string line; for (auto j = i.begin(); j != i.end(); ++j, ++column) { - std::string s = *j; + std::string s = j->content; replace(s.begin(), s.end(), '\n', ' '); - out << s; - if (column < nrColumns - 1) - out << std::string(widths[column] - s.size() + 2, ' '); + + auto padding = std::string(widths[column] - s.size(), ' '); + if (j->alignment == TableCell::Right) { + line += padding; + line += s; + } else { + line += s; + if (column + 1 < nrColumns) + line += padding; + } + + if (column + 1 < nrColumns) + line += " "; } + out << filterANSIEscapes(line, false, width); out << std::endl; } } diff --git a/src/libutil/tee-logger.cc b/src/libutil/tee-logger.cc index 8433168a5a8..889b82ca02b 100644 --- a/src/libutil/tee-logger.cc +++ b/src/libutil/tee-logger.cc @@ -65,6 +65,12 @@ struct TeeLogger : Logger logger->result(act, type, fields); } + void result(ActivityId act, ResultType type, const nlohmann::json & json) override + { + for (auto & logger : loggers) + logger->result(act, type, json); + } + void writeToStdout(std::string_view s) override { for (auto & logger : loggers) { diff --git a/src/libutil/terminal.cc b/src/libutil/terminal.cc index 401ce16043d..c52cc14975b 100644 --- a/src/libutil/terminal.cc +++ b/src/libutil/terminal.cc @@ -189,6 +189,14 @@ std::pair getWindowSize() return *windowSize.lock(); } +unsigned int getWindowWidth() +{ + unsigned int width = getWindowSize().second; + if (width <= 0) + width = std::numeric_limits::max(); + return width; +} + #ifndef _WIN32 std::string getPtsName(int fd) { diff --git a/src/libutil/union-source-accessor.cc b/src/libutil/union-source-accessor.cc index e3b39f14ed2..ea5f77f64f2 100644 --- a/src/libutil/union-source-accessor.cc +++ b/src/libutil/union-source-accessor.cc @@ -35,14 +35,18 @@ struct UnionSourceAccessor : SourceAccessor DirEntries readDirectory(const CanonPath & path) override { DirEntries result; + bool exists = false; for (auto & accessor : accessors) { auto st = accessor->maybeLstat(path); if (!st) continue; + exists = true; for (auto & entry : accessor->readDirectory(path)) // Don't override entries from previous accessors. result.insert(entry); } + if (!exists) + throw FileNotFound("path '%s' does not exist", showPath(path)); return result; } @@ -84,6 +88,12 @@ struct UnionSourceAccessor : SourceAccessor } return {path, std::nullopt}; } + + void invalidateCache(const CanonPath & path) override + { + for (auto & accessor : accessors) + accessor->invalidateCache(path); + } }; ref makeUnionSourceAccessor(std::vector> && accessors) diff --git a/src/nix/app.cc b/src/nix/app.cc index 634db04f3fe..07c7c55cfdb 100644 --- a/src/nix/app.cc +++ b/src/nix/app.cc @@ -74,6 +74,7 @@ UnresolvedApp InstallableValue::toApp(EvalState & state) std::visit( overloaded{ [&](const NixStringContextElem::DrvDeep & d) -> DerivedPath { + state.waitForPath(d.drvPath); /* We want all outputs of the drv */ return DerivedPath::Built{ .drvPath = makeConstantStorePathRef(d.drvPath), @@ -81,6 +82,7 @@ UnresolvedApp InstallableValue::toApp(EvalState & state) }; }, [&](const NixStringContextElem::Built & b) -> DerivedPath { + state.waitForPath(*b.drvPath); return DerivedPath::Built{ .drvPath = b.drvPath, .outputs = OutputsSpec::Names{b.output}, @@ -91,6 +93,9 @@ UnresolvedApp InstallableValue::toApp(EvalState & state) .path = o.path, }; }, + [&](const NixStringContextElem::Path & p) -> DerivedPath { + throw Error("'program' attribute of an 'app' output cannot have no context"); + }, }, c.raw)); } diff --git a/src/nix/build.md b/src/nix/build.md index 5dfdd44a71f..b5964e13dde 100644 --- a/src/nix/build.md +++ b/src/nix/build.md @@ -21,15 +21,15 @@ R""( ```console # nix build nixpkgs#hello nixpkgs#cowsay # ls -l result* - lrwxrwxrwx 1 … result -> /nix/store/v5sv61sszx301i0x6xysaqzla09nksnd-hello-2.10 - lrwxrwxrwx 1 … result-1 -> /nix/store/rkfrm0z6x6jmi7d3gsmma4j53h15mg33-cowsay-3.03+dfsg2 + lrwxrwxrwx 1 … result -> /nix/store/10l19qifk7hjjq47px8m2prqk1gv4isy-hello-2.10 + lrwxrwxrwx 1 … result-1 -> /nix/store/frzgk3v1ycnarpfc2rkynravng27a86d-cowsay-3.03+dfsg2 ``` * Build GNU Hello and print the resulting store path. ```console # nix build nixpkgs#hello --print-out-paths - /nix/store/v5sv61sszx301i0x6xysaqzla09nksnd-hello-2.10 + /nix/store/10l19qifk7hjjq47px8m2prqk1gv4isy-hello-2.10 ``` * Build a specific output: @@ -37,19 +37,19 @@ R""( ```console # nix build nixpkgs#glibc.dev # ls -ld ./result-dev - lrwxrwxrwx 1 … ./result-dev -> /nix/store/dkm3gwl0xrx0wrw6zi5x3px3lpgjhlw4-glibc-2.32-dev + lrwxrwxrwx 1 … ./result-dev -> /nix/store/hb4lb9n3gv855llky72hrs4pglpxq70m-glibc-2.32-dev ``` * Build all outputs: ```console # nix build "nixpkgs#openssl^*" --print-out-paths - /nix/store/gvad6v0cmq1qccmc4wphsazqbj0xzjsl-openssl-3.0.13-bin - /nix/store/a07jqdrc8afnk8r6f3lnhh4gvab7chk4-openssl-3.0.13-debug - /nix/store/yg75achq89wgqn2fi3gglgsd77kjpi03-openssl-3.0.13-dev - /nix/store/bvdcihi8c88fw31cg6gzzmpnwglpn1jv-openssl-3.0.13-doc - /nix/store/gjqcvq47cmxazxga0cirspm3jywkmvfv-openssl-3.0.13-man - /nix/store/7nmrrad8skxr47f9hfl3xc0pfqmwq51b-openssl-3.0.13 + /nix/store/ah1slww3lfsj02w563wjf1xcz5fayj36-openssl-3.0.13-bin + /nix/store/vswlynn75s0bpba3vl6bi3wyzjym95yi-openssl-3.0.13-debug + /nix/store/z71nwwni9dcxdmd3v3a7j24v70c7v7z3-openssl-3.0.13-dev + /nix/store/iabzsa5c73p4f10zfmf5r2qsrn0hl4lk-openssl-3.0.13-doc + /nix/store/zqmfrpxvcll69a2lyawnpvp15zh421v2-openssl-3.0.13-man + /nix/store/l3nlzki957anyy7yb25qvwk6cqrnvb67-openssl-3.0.13 ``` * Build attribute `build.x86_64-linux` from (non-flake) Nix expression @@ -89,7 +89,7 @@ R""( already exist: ```console - # nix build /nix/store/rkfrm0z6x6jmi7d3gsmma4j53h15mg33-cowsay-3.03+dfsg2 + # nix build /nix/store/frzgk3v1ycnarpfc2rkynravng27a86d-cowsay-3.03+dfsg2 ``` # Description diff --git a/src/nix/bundle.cc b/src/nix/bundle.cc index e11f37b847e..331035eaf2b 100644 --- a/src/nix/bundle.cc +++ b/src/nix/bundle.cc @@ -107,6 +107,8 @@ struct CmdBundle : InstallableValueCommand NixStringContext context2; auto drvPath = evalState->coerceToStorePath(attr1->pos, *attr1->value, context2, ""); + evalState->waitForAllPaths(); + drvPath.requireDerivation(); auto attr2 = vRes->attrs()->get(evalState->s.outPath); @@ -115,6 +117,8 @@ struct CmdBundle : InstallableValueCommand auto outPath = evalState->coerceToStorePath(attr2->pos, *attr2->value, context2, ""); + evalState->waitForAllPaths(); + store->buildPaths({ DerivedPath::Built{ .drvPath = makeConstantStorePathRef(drvPath), diff --git a/src/nix/crash-handler.cc b/src/nix/crash-handler.cc index 17c948dab14..29c4f2027ca 100644 --- a/src/nix/crash-handler.cc +++ b/src/nix/crash-handler.cc @@ -34,7 +34,7 @@ void logFatal(std::string const & s) void onTerminate() { logFatal( - "Nix crashed. This is a bug. Please report this at https://github.com/NixOS/nix/issues with the following information included:\n"); + "Determinate Nix crashed. This is a bug. Please report this at https://github.com/DeterminateSystems/nix-src/issues with the following information included:\n"); try { std::exception_ptr eptr = std::current_exception(); if (eptr) { diff --git a/src/nix/develop.cc b/src/nix/develop.cc index 68ff3fcf965..b5f8db26d17 100644 --- a/src/nix/develop.cc +++ b/src/nix/develop.cc @@ -1,5 +1,6 @@ #include "nix/util/config-global.hh" #include "nix/expr/eval.hh" +#include "nix/fetchers/fetch-settings.hh" #include "nix/cmd/installable-flake.hh" #include "nix/cmd/command-installable-value.hh" #include "nix/main/common-args.hh" diff --git a/src/nix/diff-closures.cc b/src/nix/diff-closures.cc index d36a21d746f..a71efa042e8 100644 --- a/src/nix/diff-closures.cc +++ b/src/nix/diff-closures.cc @@ -54,10 +54,10 @@ GroupedPaths getClosureInfo(ref store, const StorePath & toplevel) std::string showVersions(const StringSet & versions) { if (versions.empty()) - return "∅"; + return "(absent)"; StringSet versions2; for (auto & version : versions) - versions2.insert(version.empty() ? "ε" : version); + versions2.insert(version.empty() ? "(no version)" : version); return concatStringsSep(", ", versions2); } @@ -104,8 +104,13 @@ void printClosureDiff( if (showDelta || !removed.empty() || !added.empty()) { std::vector items; - if (!removed.empty() || !added.empty()) + if (!removed.empty() && !added.empty()) { items.push_back(fmt("%s → %s", showVersions(removed), showVersions(added))); + } else if (!removed.empty()) { + items.push_back(fmt("%s removed", showVersions(removed))); + } else if (!added.empty()) { + items.push_back(fmt("%s added", showVersions(added))); + } if (showDelta) items.push_back(fmt("%s%s" ANSI_NORMAL, sizeDelta > 0 ? ANSI_RED : ANSI_GREEN, renderSize(sizeDelta))); logger->cout("%s%s: %s", indent, name, concatStringsSep(", ", items)); diff --git a/src/nix/diff-closures.md b/src/nix/diff-closures.md index 0294c0d8def..6b07af28f95 100644 --- a/src/nix/diff-closures.md +++ b/src/nix/diff-closures.md @@ -11,8 +11,8 @@ R""( baloo-widgets: 20.08.1 → 20.08.2 bluez-qt: +12.6 KiB dolphin: 20.08.1 → 20.08.2, +13.9 KiB - kdeconnect: 20.08.2 → ∅, -6597.8 KiB - kdeconnect-kde: ∅ → 20.08.2, +6599.7 KiB + kdeconnect: 20.08.2 removed, -6597.8 KiB + kdeconnect-kde: 20.08.2 added, +6599.7 KiB … ``` @@ -34,9 +34,9 @@ dolphin: 20.08.1 → 20.08.2, +13.9 KiB No size change is shown if it's below the threshold. If the package does not exist in either the *before* or *after* closures, it is -represented using `∅` (empty set) on the appropriate side of the -arrow. If a package has an empty version string, the version is -rendered as `ε` (epsilon). +represented using `added` or `removed`. +If a package has an empty version string, the version is +rendered as `(no version)`. There may be multiple versions of a package in each closure. In that case, only the changed versions are shown. Thus, diff --git a/src/nix/eval.cc b/src/nix/eval.cc index 584b2122f09..2f1ba63956f 100644 --- a/src/nix/eval.cc +++ b/src/nix/eval.cc @@ -116,11 +116,14 @@ struct CmdEval : MixJSON, InstallableValueCommand, MixReadOnlyOption logger->stop(); writeFull( getStandardOutput(), - *state->coerceToString(noPos, *v, context, "while generating the eval command output")); + state->devirtualize( + *state->coerceToString(noPos, *v, context, "while generating the eval command output"), context)); } else if (json) { - printJSON(printValueAsJSON(*state, true, *v, pos, context, false)); + // FIXME: use printJSON + auto j = printValueAsJSON(*state, true, *v, pos, context, false); + logger->cout("%s", state->devirtualize(outputPretty ? j.dump(2) : j.dump(), context)); } else { diff --git a/src/nix/flake-metadata.md b/src/nix/flake-metadata.md index adfd3dc96bb..b9977004995 100644 --- a/src/nix/flake-metadata.md +++ b/src/nix/flake-metadata.md @@ -9,7 +9,7 @@ R""( Resolved URL: github:edolstra/dwarffs Locked URL: github:edolstra/dwarffs/f691e2c991e75edb22836f1dbe632c40324215c5 Description: A filesystem that fetches DWARF debug info from the Internet on demand - Path: /nix/store/769s05vjydmc2lcf6b02az28wsa9ixh1-source + Path: /nix/store/vdyf2s1pygcl4y3dn3bm9wy7mnl8hxcv-source Revision: f691e2c991e75edb22836f1dbe632c40324215c5 Last modified: 2021-01-21 15:41:26 Inputs: @@ -40,7 +40,7 @@ R""( "type": "indirect" }, "originalUrl": "flake:dwarffs", - "path": "/nix/store/hang3792qwdmm2n0d9nsrs5n6bsws6kv-source", + "path": "/nix/store/l06r23gw4psl1f547il2hbnwnxaplbaz-source", "resolved": { "owner": "edolstra", "repo": "dwarffs", diff --git a/src/nix/flake-prefetch-inputs.cc b/src/nix/flake-prefetch-inputs.cc index 4ea6342c369..19fbb0b574b 100644 --- a/src/nix/flake-prefetch-inputs.cc +++ b/src/nix/flake-prefetch-inputs.cc @@ -43,11 +43,14 @@ struct CmdFlakePrefetchInputs : FlakeCommand return; if (auto lockedNode = dynamic_cast(&node)) { + if (lockedNode->buildTime) + return; try { Activity act(*logger, lvlInfo, actUnknown, fmt("fetching '%s'", lockedNode->lockedRef)); auto accessor = lockedNode->lockedRef.input.getAccessor(fetchSettings, *store).first; - fetchToStore( - fetchSettings, *store, accessor, FetchMode::Copy, lockedNode->lockedRef.input.getName()); + if (!evalSettings.lazyTrees) + fetchToStore( + fetchSettings, *store, accessor, FetchMode::Copy, lockedNode->lockedRef.input.getName()); } catch (Error & e) { printError("%s", e.what()); nrFailed++; diff --git a/src/nix/flake-prefetch-inputs.md b/src/nix/flake-prefetch-inputs.md index a69f7d36791..b571fa34837 100644 --- a/src/nix/flake-prefetch-inputs.md +++ b/src/nix/flake-prefetch-inputs.md @@ -12,6 +12,6 @@ R""( Fetch the inputs of a flake. This ensures that they are already available for any subsequent evaluation of the flake. -This operation is recursive: it will fetch not just the direct inputs of the top-level flake, but also transitive inputs. +This operation is recursive: it fetches not just the direct inputs of the top-level flake, but also transitive inputs. It skips build-time inputs, i.e. inputs that have the attribute `buildTime = true`. )"" diff --git a/src/nix/flake-prefetch.md b/src/nix/flake-prefetch.md index 4666aadc4df..a634c502262 100644 --- a/src/nix/flake-prefetch.md +++ b/src/nix/flake-prefetch.md @@ -20,7 +20,7 @@ R""( ```console # nix flake prefetch dwarffs --json {"hash":"sha256-VHg3MYVgQ12LeRSU2PSoDeKlSPD8PYYEFxxwkVVDRd0=" - ,"storePath":"/nix/store/hang3792qwdmm2n0d9nsrs5n6bsws6kv-source"} + ,"storePath":"/nix/store/l06r23gw4psl1f547il2hbnwnxaplbaz-source"} ``` # Description diff --git a/src/nix/flake.cc b/src/nix/flake.cc index 5324e0121d5..dc7e82d98a0 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -17,6 +17,8 @@ #include "nix/fetchers/fetch-to-store.hh" #include "nix/store/local-fs-store.hh" #include "nix/store/globals.hh" +#include "nix/expr/parallel-eval.hh" +#include "nix/util/exit.hh" #include #include @@ -132,6 +134,7 @@ struct CmdFlakeUpdate : FlakeCommand lockFlags.recreateLockFile = updateAll; lockFlags.writeLockFile = true; lockFlags.applyNixConfig = true; + lockFlags.requireLockable = false; lockFlake(); } @@ -164,6 +167,7 @@ struct CmdFlakeLock : FlakeCommand lockFlags.writeLockFile = true; lockFlags.failOnUnlocked = true; lockFlags.applyNixConfig = true; + lockFlags.requireLockable = false; lockFlake(); } @@ -212,11 +216,17 @@ struct CmdFlakeMetadata : FlakeCommand, MixJSON void run(nix::ref store) override { + lockFlags.requireLockable = false; auto lockedFlake = lockFlake(); auto & flake = lockedFlake.flake; - // Currently, all flakes are in the Nix store via the rootFS accessor. - auto storePath = store->printStorePath(store->toStorePath(flake.path.path.abs()).first); + /* Hack to show the store path if available. */ + std::optional storePath; + if (store->isInStore(flake.path.path.abs())) { + auto path = store->toStorePath(flake.path.path.abs()).first; + if (store->isValidPath(path)) + storePath = path; + } if (json) { nlohmann::json j; @@ -238,7 +248,8 @@ struct CmdFlakeMetadata : FlakeCommand, MixJSON j["revCount"] = *revCount; if (auto lastModified = flake.lockedRef.input.getLastModified()) j["lastModified"] = *lastModified; - j["path"] = storePath; + if (storePath) + j["path"] = store->printStorePath(*storePath); j["locks"] = lockedFlake.lockFile.toJSON().first; if (auto fingerprint = lockedFlake.getFingerprint(*store, fetchSettings)) j["fingerprint"] = fingerprint->to_string(HashFormat::Base16, false); @@ -249,7 +260,8 @@ struct CmdFlakeMetadata : FlakeCommand, MixJSON logger->cout(ANSI_BOLD "Locked URL:" ANSI_NORMAL " %s", flake.lockedRef.to_string()); if (flake.description) logger->cout(ANSI_BOLD "Description:" ANSI_NORMAL " %s", *flake.description); - logger->cout(ANSI_BOLD "Path:" ANSI_NORMAL " %s", storePath); + if (storePath) + logger->cout(ANSI_BOLD "Path:" ANSI_NORMAL " %s", store->printStorePath(*storePath)); if (auto rev = flake.lockedRef.input.getRev()) logger->cout(ANSI_BOLD "Revision:" ANSI_NORMAL " %s", rev->to_string(HashFormat::Base16, false)); if (auto dirtyRev = fetchers::maybeGetStrAttr(flake.lockedRef.toAttrs(), "dirtyRev")) @@ -281,7 +293,7 @@ struct CmdFlakeMetadata : FlakeCommand, MixJSON "%s" ANSI_BOLD "%s" ANSI_NORMAL ": %s%s", prefix + (last ? treeLast : treeConn), input.first, - (*lockedNode)->lockedRef, + (*lockedNode)->lockedRef.to_string(true), lastModifiedStr); bool firstVisit = visited.insert(*lockedNode).second; @@ -354,7 +366,7 @@ struct CmdFlakeCheck : FlakeCommand auto flake = lockFlake(); auto localSystem = std::string(settings.thisSystem.get()); - bool hasErrors = false; + std::atomic_bool hasErrors = false; auto reportError = [&](const Error & e) { try { throw e; @@ -369,7 +381,9 @@ struct CmdFlakeCheck : FlakeCommand } }; - StringSet omittedSystems; + Sync> drvPaths_; + Sync> omittedSystems; + Sync>> derivedPathToAttrPaths_; // FIXME: rewrite to use EvalCache. @@ -388,7 +402,7 @@ struct CmdFlakeCheck : FlakeCommand auto checkSystemType = [&](std::string_view system, const PosIdx pos) { if (!checkAllSystems && system != localSystem) { - omittedSystems.insert(std::string(system)); + omittedSystems.lock()->insert(std::string(system)); return false; } else { return true; @@ -418,7 +432,7 @@ struct CmdFlakeCheck : FlakeCommand return std::nullopt; }; - std::map> attrPathsByDrv; + FutureVector futures(*state->executor); auto checkApp = [&](const std::string & attrPath, Value & v, const PosIdx pos) { try { @@ -488,9 +502,9 @@ struct CmdFlakeCheck : FlakeCommand } }; - std::function checkHydraJobs; + std::function checkHydraJobs; - checkHydraJobs = [&](std::string_view attrPath, Value & v, const PosIdx pos) { + checkHydraJobs = [&](const std::string & attrPath, Value & v, const PosIdx pos) { try { Activity act(*logger, lvlInfo, actUnknown, fmt("checking Hydra job '%s'", attrPath)); state->forceAttrs(v, pos, ""); @@ -498,15 +512,16 @@ struct CmdFlakeCheck : FlakeCommand if (state->isDerivation(v)) throw Error("jobset should not be a derivation at top-level"); - for (auto & attr : *v.attrs()) { - state->forceAttrs(*attr.value, attr.pos, ""); - auto attrPath2 = concatStrings(attrPath, ".", state->symbols[attr.name]); - if (state->isDerivation(*attr.value)) { - Activity act(*logger, lvlInfo, actUnknown, fmt("checking Hydra job '%s'", attrPath2)); - checkDerivation(attrPath2, *attr.value, attr.pos); - } else - checkHydraJobs(attrPath2, *attr.value, attr.pos); - } + for (auto & attr : *v.attrs()) + futures.spawn(1, [&, attrPath]() { + state->forceAttrs(*attr.value, attr.pos, ""); + auto attrPath2 = concatStrings(attrPath, ".", state->symbols[attr.name]); + if (state->isDerivation(*attr.value)) { + Activity act(*logger, lvlInfo, actUnknown, fmt("checking Hydra job '%s'", attrPath2)); + checkDerivation(attrPath2, *attr.value, attr.pos); + } else + checkHydraJobs(attrPath2, *attr.value, attr.pos); + }); } catch (Error & e) { e.addTrace(resolve(pos), HintFmt("while checking the Hydra jobset '%s'", attrPath)); @@ -574,218 +589,225 @@ struct CmdFlakeCheck : FlakeCommand } }; - { + auto checkFlake = [&]() { Activity act(*logger, lvlInfo, actUnknown, "evaluating flake"); auto vFlake = state->allocValue(); flake::callFlake(*state, flake, *vFlake); enumerateOutputs(*state, *vFlake, [&](std::string_view name, Value & vOutput, const PosIdx pos) { - Activity act(*logger, lvlInfo, actUnknown, fmt("checking flake output '%s'", name)); - - try { - evalSettings.enableImportFromDerivation.setDefault(name != "hydraJobs"); - - state->forceValue(vOutput, pos); - - std::string_view replacement = name == "defaultPackage" ? "packages..default" - : name == "defaultApp" ? "apps..default" - : name == "defaultTemplate" ? "templates.default" - : name == "defaultBundler" ? "bundlers..default" - : name == "overlay" ? "overlays.default" - : name == "devShell" ? "devShells..default" - : name == "nixosModule" ? "nixosModules.default" - : ""; - if (replacement != "") - warn("flake output attribute '%s' is deprecated; use '%s' instead", name, replacement); - - if (name == "checks") { - state->forceAttrs(vOutput, pos, ""); - for (auto & attr : *vOutput.attrs()) { - std::string_view attr_name = state->symbols[attr.name]; - checkSystemName(attr_name, attr.pos); - if (checkSystemType(attr_name, attr.pos)) { - state->forceAttrs(*attr.value, attr.pos, ""); - for (auto & attr2 : *attr.value->attrs()) { - auto drvPath = checkDerivation( - fmt("%s.%s.%s", name, attr_name, state->symbols[attr2.name]), - *attr2.value, - attr2.pos); - if (drvPath && attr_name == settings.thisSystem.get()) { - auto path = DerivedPath::Built{ - .drvPath = makeConstantStorePathRef(*drvPath), - .outputs = OutputsSpec::All{}, - }; - - // Build and store the attribute path for error reporting - AttrPath attrPath{state->symbols.create(name), attr.name, attr2.name}; - attrPathsByDrv[path].push_back(std::move(attrPath)); + futures.spawn(2, [&, name, pos]() { + Activity act(*logger, lvlInfo, actUnknown, fmt("checking flake output '%s'", name)); + + try { + evalSettings.enableImportFromDerivation.setDefault(name != "hydraJobs"); + + state->forceValue(vOutput, pos); + + std::string_view replacement = name == "defaultPackage" ? "packages..default" + : name == "defaultApp" ? "apps..default" + : name == "defaultTemplate" ? "templates.default" + : name == "defaultBundler" ? "bundlers..default" + : name == "overlay" ? "overlays.default" + : name == "devShell" ? "devShells..default" + : name == "nixosModule" ? "nixosModules.default" + : ""; + if (replacement != "") + warn("flake output attribute '%s' is deprecated; use '%s' instead", name, replacement); + + if (name == "checks") { + state->forceAttrs(vOutput, pos, ""); + for (auto & attr : *vOutput.attrs()) + futures.spawn(3, [&, name]() { + const auto & attr_name = state->symbols[attr.name]; + checkSystemName(attr_name, attr.pos); + if (checkSystemType(attr_name, attr.pos)) { + state->forceAttrs(*attr.value, attr.pos, ""); + for (auto & attr2 : *attr.value->attrs()) { + auto drvPath = checkDerivation( + fmt("%s.%s.%s", name, attr_name, state->symbols[attr2.name]), + *attr2.value, + attr2.pos); + if (drvPath && attr_name == settings.thisSystem.get()) { + auto derivedPath = DerivedPath::Built{ + .drvPath = makeConstantStorePathRef(*drvPath), + .outputs = OutputsSpec::All{}, + }; + (*derivedPathToAttrPaths_.lock())[derivedPath].push_back( + {state->symbols.create("checks"), attr.name, attr2.name}); + drvPaths_.lock()->push_back(std::move(derivedPath)); + } + } } - } - } + }); } - } - else if (name == "formatter") { - state->forceAttrs(vOutput, pos, ""); - for (auto & attr : *vOutput.attrs()) { - const auto & attr_name = state->symbols[attr.name]; - checkSystemName(attr_name, attr.pos); - if (checkSystemType(attr_name, attr.pos)) { - checkDerivation(fmt("%s.%s", name, attr_name), *attr.value, attr.pos); - }; + else if (name == "formatter") { + state->forceAttrs(vOutput, pos, ""); + for (auto & attr : *vOutput.attrs()) { + const auto & attr_name = state->symbols[attr.name]; + checkSystemName(attr_name, attr.pos); + if (checkSystemType(attr_name, attr.pos)) { + checkDerivation(fmt("%s.%s", name, attr_name), *attr.value, attr.pos); + }; + } } - } - else if (name == "packages" || name == "devShells") { - state->forceAttrs(vOutput, pos, ""); - for (auto & attr : *vOutput.attrs()) { - const auto & attr_name = state->symbols[attr.name]; - checkSystemName(attr_name, attr.pos); - if (checkSystemType(attr_name, attr.pos)) { - state->forceAttrs(*attr.value, attr.pos, ""); - for (auto & attr2 : *attr.value->attrs()) - checkDerivation( - fmt("%s.%s.%s", name, attr_name, state->symbols[attr2.name]), - *attr2.value, - attr2.pos); - }; + else if (name == "packages" || name == "devShells") { + state->forceAttrs(vOutput, pos, ""); + for (auto & attr : *vOutput.attrs()) + futures.spawn(3, [&, name]() { + const auto & attr_name = state->symbols[attr.name]; + checkSystemName(attr_name, attr.pos); + if (checkSystemType(attr_name, attr.pos)) { + state->forceAttrs(*attr.value, attr.pos, ""); + for (auto & attr2 : *attr.value->attrs()) + checkDerivation( + fmt("%s.%s.%s", name, attr_name, state->symbols[attr2.name]), + *attr2.value, + attr2.pos); + }; + }); } - } - else if (name == "apps") { - state->forceAttrs(vOutput, pos, ""); - for (auto & attr : *vOutput.attrs()) { - const auto & attr_name = state->symbols[attr.name]; - checkSystemName(attr_name, attr.pos); - if (checkSystemType(attr_name, attr.pos)) { - state->forceAttrs(*attr.value, attr.pos, ""); - for (auto & attr2 : *attr.value->attrs()) - checkApp( - fmt("%s.%s.%s", name, attr_name, state->symbols[attr2.name]), - *attr2.value, - attr2.pos); - }; + else if (name == "apps") { + state->forceAttrs(vOutput, pos, ""); + for (auto & attr : *vOutput.attrs()) { + const auto & attr_name = state->symbols[attr.name]; + checkSystemName(attr_name, attr.pos); + if (checkSystemType(attr_name, attr.pos)) { + state->forceAttrs(*attr.value, attr.pos, ""); + for (auto & attr2 : *attr.value->attrs()) + checkApp( + fmt("%s.%s.%s", name, attr_name, state->symbols[attr2.name]), + *attr2.value, + attr2.pos); + }; + } } - } - else if (name == "defaultPackage" || name == "devShell") { - state->forceAttrs(vOutput, pos, ""); - for (auto & attr : *vOutput.attrs()) { - const auto & attr_name = state->symbols[attr.name]; - checkSystemName(attr_name, attr.pos); - if (checkSystemType(attr_name, attr.pos)) { - checkDerivation(fmt("%s.%s", name, attr_name), *attr.value, attr.pos); - }; + else if (name == "defaultPackage" || name == "devShell") { + state->forceAttrs(vOutput, pos, ""); + for (auto & attr : *vOutput.attrs()) { + const auto & attr_name = state->symbols[attr.name]; + checkSystemName(attr_name, attr.pos); + if (checkSystemType(attr_name, attr.pos)) { + checkDerivation(fmt("%s.%s", name, attr_name), *attr.value, attr.pos); + }; + } } - } - else if (name == "defaultApp") { - state->forceAttrs(vOutput, pos, ""); - for (auto & attr : *vOutput.attrs()) { - const auto & attr_name = state->symbols[attr.name]; - checkSystemName(attr_name, attr.pos); - if (checkSystemType(attr_name, attr.pos)) { - checkApp(fmt("%s.%s", name, attr_name), *attr.value, attr.pos); - }; + else if (name == "defaultApp") { + state->forceAttrs(vOutput, pos, ""); + for (auto & attr : *vOutput.attrs()) { + const auto & attr_name = state->symbols[attr.name]; + checkSystemName(attr_name, attr.pos); + if (checkSystemType(attr_name, attr.pos)) { + checkApp(fmt("%s.%s", name, attr_name), *attr.value, attr.pos); + }; + } } - } - else if (name == "legacyPackages") { - state->forceAttrs(vOutput, pos, ""); - for (auto & attr : *vOutput.attrs()) { - checkSystemName(state->symbols[attr.name], attr.pos); - checkSystemType(state->symbols[attr.name], attr.pos); - // FIXME: do getDerivations? + else if (name == "legacyPackages") { + state->forceAttrs(vOutput, pos, ""); + for (auto & attr : *vOutput.attrs()) { + checkSystemName(state->symbols[attr.name], attr.pos); + checkSystemType(state->symbols[attr.name], attr.pos); + // FIXME: do getDerivations? + } } - } - else if (name == "overlay") - checkOverlay(name, vOutput, pos); + else if (name == "overlay") + checkOverlay(name, vOutput, pos); - else if (name == "overlays") { - state->forceAttrs(vOutput, pos, ""); - for (auto & attr : *vOutput.attrs()) - checkOverlay(fmt("%s.%s", name, state->symbols[attr.name]), *attr.value, attr.pos); - } + else if (name == "overlays") { + state->forceAttrs(vOutput, pos, ""); + for (auto & attr : *vOutput.attrs()) + checkOverlay(fmt("%s.%s", name, state->symbols[attr.name]), *attr.value, attr.pos); + } - else if (name == "nixosModule") - checkModule(name, vOutput, pos); + else if (name == "nixosModule") + checkModule(name, vOutput, pos); - else if (name == "nixosModules") { - state->forceAttrs(vOutput, pos, ""); - for (auto & attr : *vOutput.attrs()) - checkModule(fmt("%s.%s", name, state->symbols[attr.name]), *attr.value, attr.pos); - } + else if (name == "nixosModules") { + state->forceAttrs(vOutput, pos, ""); + for (auto & attr : *vOutput.attrs()) + checkModule(fmt("%s.%s", name, state->symbols[attr.name]), *attr.value, attr.pos); + } - else if (name == "nixosConfigurations") { - state->forceAttrs(vOutput, pos, ""); - for (auto & attr : *vOutput.attrs()) - checkNixOSConfiguration( - fmt("%s.%s", name, state->symbols[attr.name]), *attr.value, attr.pos); - } + else if (name == "nixosConfigurations") { + state->forceAttrs(vOutput, pos, ""); + for (auto & attr : *vOutput.attrs()) + checkNixOSConfiguration( + fmt("%s.%s", name, state->symbols[attr.name]), *attr.value, attr.pos); + } - else if (name == "hydraJobs") - checkHydraJobs(name, vOutput, pos); + else if (name == "hydraJobs") + checkHydraJobs(std::string(name), vOutput, pos); - else if (name == "defaultTemplate") - checkTemplate(name, vOutput, pos); + else if (name == "defaultTemplate") + checkTemplate(name, vOutput, pos); - else if (name == "templates") { - state->forceAttrs(vOutput, pos, ""); - for (auto & attr : *vOutput.attrs()) - checkTemplate(fmt("%s.%s", name, state->symbols[attr.name]), *attr.value, attr.pos); - } + else if (name == "templates") { + state->forceAttrs(vOutput, pos, ""); + for (auto & attr : *vOutput.attrs()) + checkTemplate(fmt("%s.%s", name, state->symbols[attr.name]), *attr.value, attr.pos); + } - else if (name == "defaultBundler") { - state->forceAttrs(vOutput, pos, ""); - for (auto & attr : *vOutput.attrs()) { - const auto & attr_name = state->symbols[attr.name]; - checkSystemName(attr_name, attr.pos); - if (checkSystemType(attr_name, attr.pos)) { - checkBundler(fmt("%s.%s", name, attr_name), *attr.value, attr.pos); - }; + else if (name == "defaultBundler") { + state->forceAttrs(vOutput, pos, ""); + for (auto & attr : *vOutput.attrs()) { + const auto & attr_name = state->symbols[attr.name]; + checkSystemName(attr_name, attr.pos); + if (checkSystemType(attr_name, attr.pos)) { + checkBundler(fmt("%s.%s", name, attr_name), *attr.value, attr.pos); + }; + } } - } - else if (name == "bundlers") { - state->forceAttrs(vOutput, pos, ""); - for (auto & attr : *vOutput.attrs()) { - const auto & attr_name = state->symbols[attr.name]; - checkSystemName(attr_name, attr.pos); - if (checkSystemType(attr_name, attr.pos)) { - state->forceAttrs(*attr.value, attr.pos, ""); - for (auto & attr2 : *attr.value->attrs()) { - checkBundler( - fmt("%s.%s.%s", name, attr_name, state->symbols[attr2.name]), - *attr2.value, - attr2.pos); - } - }; + else if (name == "bundlers") { + state->forceAttrs(vOutput, pos, ""); + for (auto & attr : *vOutput.attrs()) { + const auto & attr_name = state->symbols[attr.name]; + checkSystemName(attr_name, attr.pos); + if (checkSystemType(attr_name, attr.pos)) { + state->forceAttrs(*attr.value, attr.pos, ""); + for (auto & attr2 : *attr.value->attrs()) { + checkBundler( + fmt("%s.%s.%s", name, attr_name, state->symbols[attr2.name]), + *attr2.value, + attr2.pos); + } + }; + } } - } - else if ( - name == "lib" || name == "darwinConfigurations" || name == "darwinModules" - || name == "flakeModule" || name == "flakeModules" || name == "herculesCI" - || name == "homeConfigurations" || name == "homeModule" || name == "homeModules" - || name == "nixopsConfigurations") - // Known but unchecked community attribute - ; + else if ( + name == "lib" || name == "darwinConfigurations" || name == "darwinModules" + || name == "flakeModule" || name == "flakeModules" || name == "herculesCI" + || name == "homeConfigurations" || name == "homeModule" || name == "homeModules" + || name == "nixopsConfigurations") + // Known but unchecked community attribute + ; - else - warn("unknown flake output '%s'", name); + else + warn("unknown flake output '%s'", name); - } catch (Error & e) { - e.addTrace(resolve(pos), HintFmt("while checking flake output '%s'", name)); - reportError(e); - } + } catch (Error & e) { + e.addTrace(resolve(pos), HintFmt("while checking flake output '%s'", name)); + reportError(e); + } + }); }); - } + }; - if (build && !attrPathsByDrv.empty()) { - auto keys = std::views::keys(attrPathsByDrv); - std::vector drvPaths(keys.begin(), keys.end()); + futures.spawn(1, checkFlake); + futures.finishAll(); + + auto drvPaths(drvPaths_.lock()); + auto derivedPathToAttrPaths(derivedPathToAttrPaths_.lock()); + + if (build && !drvPaths->empty()) { // TODO: This filtering of substitutable paths is a temporary workaround until // https://github.com/NixOS/nix/issues/5025 (union stores) is implemented. // @@ -797,52 +819,67 @@ struct CmdFlakeCheck : FlakeCommand // For now, we skip building derivations whose outputs are already available // via substitution, as `nix flake check` only needs to verify buildability, // not actually produce the outputs. - auto missing = store->queryMissing(drvPaths); + state->waitForAllPaths(); + auto missing = store->queryMissing(*drvPaths); std::vector toBuild; + std::set toBuildSet; for (auto & path : missing.willBuild) { - toBuild.emplace_back( - DerivedPath::Built{ - .drvPath = makeConstantStorePathRef(path), - .outputs = OutputsSpec::All{}, - }); + auto derivedPath = DerivedPath::Built{ + .drvPath = makeConstantStorePathRef(path), + .outputs = OutputsSpec::All{}, + }; + toBuild.emplace_back(derivedPath); + toBuildSet.insert(std::move(derivedPath)); } + for (auto & [derivedPath, attrPaths] : *derivedPathToAttrPaths) + if (!toBuildSet.contains(derivedPath)) + for (auto & attrPath : attrPaths) + notice( + "✅ " ANSI_BOLD "%s" ANSI_NORMAL ANSI_ITALIC ANSI_FAINT " (previously built)" ANSI_NORMAL, + attrPath.to_string(*state)); + + // FIXME: should start building while evaluating. Activity act(*logger, lvlInfo, actUnknown, fmt("running %d flake checks", toBuild.size())); - auto results = store->buildPathsWithResults(toBuild); - - // Report build failures with attribute paths - for (auto & result : results) { - if (auto * failure = result.tryGetFailure()) { - auto it = attrPathsByDrv.find(result.path); - if (it != attrPathsByDrv.end() && !it->second.empty()) { - for (auto & attrPath : it->second) { - reportError(Error( - "failed to build attribute '%s', build of '%s' failed: %s", - attrPath.to_string(*state), - result.path.to_string(*store), - failure->errorMsg)); - } - } else { - // Derivation has no attribute path (e.g., a build dependency) - reportError( - Error("build of '%s' failed: %s", result.path.to_string(*store), failure->errorMsg)); + auto buildResults = store->buildPathsWithResults(toBuild); + assert(buildResults.size() == toBuild.size()); + + // Report successes first. + for (auto & buildResult : buildResults) + if (buildResult.tryGetSuccess()) + for (auto & attrPath : (*derivedPathToAttrPaths)[buildResult.path]) + notice("✅ " ANSI_BOLD "%s" ANSI_NORMAL, attrPath.to_string(*state)); + + // Then cancelled builds. + for (auto & buildResult : buildResults) + if (buildResult.isCancelled()) + for (auto & attrPath : (*derivedPathToAttrPaths)[buildResult.path]) + notice("❓ " ANSI_BOLD "%s" ANSI_NORMAL ANSI_FAINT " (cancelled)", attrPath.to_string(*state)); + + // Then failures. + for (auto & buildResult : buildResults) + if (auto failure = buildResult.tryGetFailure(); failure && !buildResult.isCancelled()) + try { + hasErrors = true; + for (auto & attrPath : (*derivedPathToAttrPaths)[buildResult.path]) + printError("❌ " ANSI_RED "%s" ANSI_NORMAL, attrPath.to_string(*state)); + failure->rethrow(); + } catch (Error & e) { + logError(e.info()); } - } - } } - if (hasErrors) - throw Error("some errors were encountered during the evaluation"); - - logger->log(lvlInfo, ANSI_GREEN "all checks passed!" ANSI_NORMAL); - if (!omittedSystems.empty()) { + if (!omittedSystems.lock()->empty()) { // TODO: empty system is not visible; render all as nix strings? warn( "The check omitted these incompatible systems: %s\n" "Use '--all-systems' to check all.", - concatStringsSep(", ", omittedSystems)); - }; + concatStringsSep(", ", *omittedSystems.lock())); + } + + if (hasErrors) + throw Exit(1); }; }; @@ -851,7 +888,7 @@ static Strings defaultTemplateAttrPaths = {"templates.default", "defaultTemplate struct CmdFlakeInitCommon : virtual Args, EvalCommand { - std::string templateUrl = "templates"; + std::string templateUrl = "https://flakehub.com/f/DeterminateSystems/flake-templates/0.1"; Path destDir; const LockFlags lockFlags{.writeLockFile = false}; @@ -1083,7 +1120,8 @@ struct CmdFlakeArchive : FlakeCommand, MixJSON, MixDryRun, MixNoCheckSigs StorePathSet sources; - auto storePath = store->toStorePath(flake.flake.path.path.abs()).first; + auto storePath = dryRun ? flake.flake.lockedRef.input.computeStorePath(*store) + : std::get(flake.flake.lockedRef.input.fetchToStore(fetchSettings, *store)); sources.insert(storePath); @@ -1096,7 +1134,8 @@ struct CmdFlakeArchive : FlakeCommand, MixJSON, MixDryRun, MixNoCheckSigs std::optional storePath; if (!(*inputNode)->lockedRef.input.isRelative()) { storePath = dryRun ? (*inputNode)->lockedRef.input.computeStorePath(*store) - : (*inputNode)->lockedRef.input.fetchToStore(fetchSettings, *store).first; + : std::get( + (*inputNode)->lockedRef.input.fetchToStore(fetchSettings, *store)); sources.insert(*storePath); } if (json) { @@ -1168,124 +1207,57 @@ struct CmdFlakeShow : FlakeCommand, MixJSON auto flake = make_ref(lockFlake()); auto localSystem = std::string(settings.thisSystem.get()); - std::function - hasContent; - - // For frameworks it's important that structures are as lazy as possible - // to prevent infinite recursions, performance issues and errors that - // aren't related to the thing to evaluate. As a consequence, they have - // to emit more attributes than strictly (sic) necessary. - // However, these attributes with empty values are not useful to the user - // so we omit them. - hasContent = [&](eval_cache::AttrCursor & visitor, const AttrPath & attrPath, const Symbol & attr) -> bool { - auto attrPath2(attrPath); - attrPath2.push_back(attr); - auto attrPathS = attrPath2.resolve(*state); - const auto & attrName = state->symbols[attr]; - - auto visitor2 = visitor.getAttr(attrName); - - try { - if ((attrPathS[0] == "apps" || attrPathS[0] == "checks" || attrPathS[0] == "devShells" - || attrPathS[0] == "legacyPackages" || attrPathS[0] == "packages") - && (attrPathS.size() == 1 || attrPathS.size() == 2)) { - for (const auto & subAttr : visitor2->getAttrs()) { - if (hasContent(*visitor2, attrPath2, subAttr)) { - return true; - } - } - return false; - } - - if ((attrPathS.size() == 1) - && (attrPathS[0] == "formatter" || attrPathS[0] == "nixosConfigurations" - || attrPathS[0] == "nixosModules" || attrPathS[0] == "overlays")) { - for (const auto & subAttr : visitor2->getAttrs()) { - if (hasContent(*visitor2, attrPath2, subAttr)) { - return true; - } - } - return false; - } + auto cache = openEvalCache(*state, flake); - // If we don't recognize it, it's probably content - return true; - } catch (EvalError & e) { - // Some attrs may contain errors, e.g. legacyPackages of - // nixpkgs. We still want to recurse into it, instead of - // skipping it at all. - return true; - } - }; + auto j = nlohmann::json::object(); - std::function - visit; + std::function visit; - visit = [&](eval_cache::AttrCursor & visitor, - const AttrPath & attrPath, - const std::string & headerPrefix, - const std::string & nextPrefix) -> nlohmann::json { - auto j = nlohmann::json::object(); + FutureVector futures(*state->executor); + visit = [&](eval_cache::AttrCursor & visitor, nlohmann::json & j) { + auto attrPath = visitor.getAttrPath(); auto attrPathS = attrPath.resolve(*state); Activity act(*logger, lvlInfo, actUnknown, fmt("evaluating '%s'", attrPath.to_string(*state))); try { auto recurse = [&]() { - if (!json) - logger->cout("%s", headerPrefix); - std::vector attrs; for (const auto & attr : visitor.getAttrs()) { - if (hasContent(visitor, attrPath, attr)) - attrs.push_back(attr); - } - - for (const auto & [i, attr] : enumerate(attrs)) { const auto & attrName = state->symbols[attr]; - bool last = i + 1 == attrs.size(); auto visitor2 = visitor.getAttr(attrName); - auto attrPath2(attrPath); - attrPath2.push_back(attr); - auto j2 = visit( - *visitor2, - attrPath2, - fmt(ANSI_GREEN "%s%s" ANSI_NORMAL ANSI_BOLD "%s" ANSI_NORMAL, - nextPrefix, - last ? treeLast : treeConn, - attrName), - nextPrefix + (last ? treeNull : treeLine)); - if (json) - j.emplace(attrName, std::move(j2)); + auto & j2 = *j.emplace(attrName, nlohmann::json::object()).first; + futures.spawn(1, [&, visitor2]() { visit(*visitor2, j2); }); } }; auto showDerivation = [&]() { auto name = visitor.getAttr(state->s.name)->getString(); - - if (json) { - std::optional description; - if (auto aMeta = visitor.maybeGetAttr(state->s.meta)) { - if (auto aDescription = aMeta->maybeGetAttr(state->s.description)) - description = aDescription->getString(); - } - j.emplace("type", "derivation"); - j.emplace("name", name); - j.emplace("description", description ? *description : ""); - } else { - logger->cout( - "%s: %s '%s'", - headerPrefix, + std::optional description; + if (auto aMeta = visitor.maybeGetAttr(state->s.meta)) { + if (auto aDescription = aMeta->maybeGetAttr(state->s.description)) + description = aDescription->getString(); + } + j.emplace("type", "derivation"); + if (!json) + j.emplace( + "subtype", attrPath.size() == 2 && attrPathS[0] == "devShell" ? "development environment" : attrPath.size() >= 2 && attrPathS[0] == "devShells" ? "development environment" : attrPath.size() == 3 && attrPathS[0] == "checks" ? "derivation" : attrPath.size() >= 1 && attrPathS[0] == "hydraJobs" ? "derivation" - : "package", - name); + : "package"); + j.emplace("name", name); + if (description) + j.emplace("description", *description); + }; + + auto omit = [&](std::string_view flag) { + if (json) + logger->warn(fmt("%s omitted (use '%s' to show)", attrPath.to_string(*state), flag)); + else { + j.emplace("type", "omitted"); + j.emplace("message", fmt(ANSI_WARNING "omitted" ANSI_NORMAL " (use '%s' to show)", flag)); } }; @@ -1307,13 +1279,7 @@ struct CmdFlakeShow : FlakeCommand, MixJSON || (attrPath.size() == 3 && (attrPathS[0] == "checks" || attrPathS[0] == "packages" || attrPathS[0] == "devShells"))) { if (!showAllSystems && std::string(attrPathS[1]) != localSystem) { - if (!json) - logger->cout( - fmt("%s " ANSI_WARNING "omitted" ANSI_NORMAL " (use '--all-systems' to show)", - headerPrefix)); - else { - logger->warn(fmt("%s omitted (use '--all-systems' to show)", attrPath.to_string(*state))); - } + omit("--all-systems"); } else { try { if (visitor.isDerivation()) @@ -1323,14 +1289,8 @@ struct CmdFlakeShow : FlakeCommand, MixJSON logger->warn(fmt("%s is not a derivation", name)); } } catch (IFDError & e) { - if (!json) { - logger->cout( - fmt("%s " ANSI_WARNING "omitted due to use of import from derivation" ANSI_NORMAL, - headerPrefix)); - } else { - logger->warn( - fmt("%s omitted due to use of import from derivation", attrPath.to_string(*state))); - } + logger->warn( + fmt("%s omitted due to use of import from derivation", attrPath.to_string(*state))); } } } @@ -1342,14 +1302,8 @@ struct CmdFlakeShow : FlakeCommand, MixJSON else recurse(); } catch (IFDError & e) { - if (!json) { - logger->cout( - fmt("%s " ANSI_WARNING "omitted due to use of import from derivation" ANSI_NORMAL, - headerPrefix)); - } else { - logger->warn( - fmt("%s omitted due to use of import from derivation", attrPath.to_string(*state))); - } + logger->warn( + fmt("%s omitted due to use of import from derivation", attrPath.to_string(*state))); } } @@ -1357,20 +1311,9 @@ struct CmdFlakeShow : FlakeCommand, MixJSON if (attrPath.size() == 1) recurse(); else if (!showLegacy) { - if (!json) - logger->cout(fmt( - "%s " ANSI_WARNING "omitted" ANSI_NORMAL " (use '--legacy' to show)", headerPrefix)); - else { - logger->warn(fmt("%s omitted (use '--legacy' to show)", attrPath.to_string(*state))); - } + omit("--legacy"); } else if (!showAllSystems && std::string(attrPathS[1]) != localSystem) { - if (!json) - logger->cout( - fmt("%s " ANSI_WARNING "omitted" ANSI_NORMAL " (use '--all-systems' to show)", - headerPrefix)); - else { - logger->warn(fmt("%s omitted (use '--all-systems' to show)", attrPath.to_string(*state))); - } + omit("--all-systems"); } else { try { if (visitor.isDerivation()) @@ -1379,14 +1322,8 @@ struct CmdFlakeShow : FlakeCommand, MixJSON // FIXME: handle recurseIntoAttrs recurse(); } catch (IFDError & e) { - if (!json) { - logger->cout( - fmt("%s " ANSI_WARNING "omitted due to use of import from derivation" ANSI_NORMAL, - headerPrefix)); - } else { - logger->warn( - fmt("%s omitted due to use of import from derivation", attrPath.to_string(*state))); - } + logger->warn( + fmt("%s omitted due to use of import from derivation", attrPath.to_string(*state))); } } } @@ -1402,28 +1339,17 @@ struct CmdFlakeShow : FlakeCommand, MixJSON } if (!aType || aType->getString() != "app") state->error("not an app definition").debugThrow(); - if (json) { - j.emplace("type", "app"); - if (description) - j.emplace("description", *description); - } else { - logger->cout( - "%s: app: " ANSI_BOLD "%s" ANSI_NORMAL, - headerPrefix, - description ? *description : "no description"); - } + j.emplace("type", "app"); + if (description) + j.emplace("description", *description); } else if ( (attrPath.size() == 1 && attrPathS[0] == "defaultTemplate") || (attrPath.size() == 2 && attrPathS[0] == "templates")) { auto description = visitor.getAttr("description")->getString(); - if (json) { - j.emplace("type", "template"); - j.emplace("description", description); - } else { - logger->cout("%s: template: " ANSI_BOLD "%s" ANSI_NORMAL, headerPrefix, description); - } + j.emplace("type", "template"); + j.emplace("description", description); } else { @@ -1436,25 +1362,85 @@ struct CmdFlakeShow : FlakeCommand, MixJSON || (attrPath.size() == 2 && attrPathS[0] == "nixosModules") ? std::make_pair("nixos-module", "NixOS module") : std::make_pair("unknown", "unknown"); - if (json) { - j.emplace("type", type); - } else { - logger->cout("%s: " ANSI_WARNING "%s" ANSI_NORMAL, headerPrefix, description); - } + j.emplace("type", type); + j.emplace("description", description); } } catch (EvalError & e) { if (!(attrPath.size() > 0 && attrPathS[0] == "legacyPackages")) throw; } - - return j; }; - auto cache = openEvalCache(*state, ref(flake)); + futures.spawn(1, [&]() { visit(*cache->getRoot(), j); }); + futures.finishAll(); - auto j = visit(*cache->getRoot(), {}, fmt(ANSI_BOLD "%s" ANSI_NORMAL, flake->flake.lockedRef), ""); if (json) printJSON(j); + else { + + // For frameworks it's important that structures are as + // lazy as possible to prevent infinite recursions, + // performance issues and errors that aren't related to + // the thing to evaluate. As a consequence, they have to + // emit more attributes than strictly (sic) necessary. + // However, these attributes with empty values are not + // useful to the user so we omit them. + std::function hasContent; + + hasContent = [&](const nlohmann::json & j) -> bool { + if (j.find("type") != j.end()) + return true; + else { + for (auto & j2 : j) + if (hasContent(j2)) + return true; + return false; + } + }; + + // Render the JSON into a tree representation. + std::function + render; + + render = [&](nlohmann::json j, const std::string & headerPrefix, const std::string & nextPrefix) { + if (j.find("type") != j.end()) { + std::string s; + + std::string type = j["type"]; + if (type == "omitted") { + s = j["message"]; + } else if (type == "derivation") { + s = (std::string) j["subtype"] + " '" + (std::string) j["name"] + "'"; + } else { + s = type; + } + + logger->cout("%s: %s", headerPrefix, s); + return; + } + + logger->cout("%s", headerPrefix); + + auto nonEmpty = nlohmann::json::object(); + for (const auto & j2 : j.items()) { + if (hasContent(j2.value())) + nonEmpty[j2.key()] = j2.value(); + } + + for (const auto & [i, j2] : enumerate(nonEmpty.items())) { + bool last = i + 1 == nonEmpty.size(); + render( + j2.value(), + fmt(ANSI_GREEN "%s%s" ANSI_NORMAL ANSI_BOLD "%s" ANSI_NORMAL, + nextPrefix, + last ? treeLast : treeConn, + j2.key()), + nextPrefix + (last ? treeNull : treeLine)); + } + }; + + render(j, fmt(ANSI_BOLD "%s" ANSI_NORMAL, flake->flake.lockedRef), ""); + } } }; @@ -1538,12 +1524,6 @@ struct CmdFlake : NixMultiCommand #include "flake.md" ; } - - void run() override - { - experimentalFeatureSettings.require(Xp::Flakes); - NixMultiCommand::run(); - } }; static auto rCmdFlake = registerCommand("flake"); diff --git a/src/nix/get-env.sh b/src/nix/get-env.sh index 39fa6f9ac8f..5b6162b4ba2 100644 --- a/src/nix/get-env.sh +++ b/src/nix/get-env.sh @@ -17,6 +17,7 @@ __functions="$(declare -F)" __dumpEnv() { printf '{\n' + printf ' "version": 1,\n' printf ' "bashFunctions": {\n' local __first=1 diff --git a/src/nix/log.md b/src/nix/log.md index 01e9801df72..9d526bb420c 100644 --- a/src/nix/log.md +++ b/src/nix/log.md @@ -11,7 +11,7 @@ R""( * Get the build log of a specific store path: ```console - # nix log /nix/store/lmngj4wcm9rkv3w4dfhzhcyij3195hiq-thunderbird-52.2.1 + # nix log /nix/store/vaph2hfdmnipqr90v6g5mcdn8h5p5iss-thunderbird-52.2.1 ``` * Get a build log from a specific binary cache: diff --git a/src/nix/ls.cc b/src/nix/ls.cc index 66f52a18afc..012850cc05d 100644 --- a/src/nix/ls.cc +++ b/src/nix/ls.cc @@ -4,12 +4,13 @@ #include "nix/main/common-args.hh" #include +#include "ls.hh" + using namespace nix; -struct MixLs : virtual Args, MixJSON +struct MixLs : virtual Args, MixJSON, MixLongListing { bool recursive = false; - bool verbose = false; bool showDirectory = false; MixLs() @@ -21,13 +22,6 @@ struct MixLs : virtual Args, MixJSON .handler = {&recursive, true}, }); - addFlag({ - .longName = "long", - .shortName = 'l', - .description = "Show detailed file information.", - .handler = {&verbose, true}, - }); - addFlag({ .longName = "directory", .shortName = 'd', @@ -41,13 +35,13 @@ struct MixLs : virtual Args, MixJSON std::function doPath; auto showFile = [&](const CanonPath & curPath, std::string_view relPath) { - if (verbose) { + if (longListing) { auto st = accessor->lstat(curPath); std::string tp = st.type == SourceAccessor::Type::tRegular ? (st.isExecutable ? "-r-xr-xr-x" : "-r--r--r--") : st.type == SourceAccessor::Type::tSymlink ? "lrwxrwxrwx" : "dr-xr-xr-x"; - auto line = fmt("%s %20d %s", tp, st.fileSize.value_or(0), relPath); + auto line = fmt("%s %9d %s", tp, st.fileSize.value_or(0), relPath); if (st.type == SourceAccessor::Type::tSymlink) line += " -> " + accessor->readLink(curPath); logger->cout(line); diff --git a/src/nix/ls.hh b/src/nix/ls.hh new file mode 100644 index 00000000000..36e61162035 --- /dev/null +++ b/src/nix/ls.hh @@ -0,0 +1,22 @@ +#pragma once + +#include "nix/util/args.hh" + +namespace nix { + +struct MixLongListing : virtual Args +{ + bool longListing = false; + + MixLongListing() + { + addFlag({ + .longName = "long", + .shortName = 'l', + .description = "Show detailed file information.", + .handler = {&longListing, true}, + }); + } +}; + +} // namespace nix diff --git a/src/nix/main.cc b/src/nix/main.cc index 93c1dc42a38..47ef6ea20a1 100644 --- a/src/nix/main.cc +++ b/src/nix/main.cc @@ -86,6 +86,20 @@ static bool haveInternet() #endif } +static void disableNet() +{ + // FIXME: should check for command line overrides only. + if (!settings.useSubstitutes.overridden) + // FIXME: should not disable local substituters (like file:///). + settings.useSubstitutes = false; + if (!settings.tarballTtl.overridden) + settings.tarballTtl = std::numeric_limits::max(); + if (!fileTransferSettings.tries.overridden) + fileTransferSettings.tries = 0; + if (!fileTransferSettings.connectTimeout.overridden) + fileTransferSettings.connectTimeout = 1; +} + std::string programPath; struct NixArgs : virtual MultiCommand, virtual MixCommonArgs, virtual RootArgs @@ -119,7 +133,6 @@ struct NixArgs : virtual MultiCommand, virtual MixCommonArgs, virtual RootArgs .description = "Print full build logs on standard error.", .category = loggingCategory, .handler = {[&]() { logger->setPrintBuildLogs(true); }}, - .experimentalFeature = Xp::NixCommand, }); addFlag({ @@ -135,7 +148,6 @@ struct NixArgs : virtual MultiCommand, virtual MixCommonArgs, virtual RootArgs .description = "Disable substituters and consider all previously downloaded files up-to-date.", .category = miscCategory, .handler = {[&]() { useNet = false; }}, - .experimentalFeature = Xp::NixCommand, }); addFlag({ @@ -143,7 +155,6 @@ struct NixArgs : virtual MultiCommand, virtual MixCommonArgs, virtual RootArgs .description = "Consider all previously downloaded files out-of-date.", .category = miscCategory, .handler = {[&]() { refresh = true; }}, - .experimentalFeature = Xp::NixCommand, }); aliases = { @@ -441,7 +452,6 @@ void mainWrapped(int argc, char ** argv) if (argc == 2 && std::string(argv[1]) == "__dump-language") { experimentalFeatureSettings.experimentalFeatures = { - Xp::Flakes, Xp::FetchClosure, Xp::DynamicDerivations, Xp::FetchTree, @@ -500,6 +510,12 @@ void mainWrapped(int argc, char ** argv) } }); + if (getEnv("NIX_GET_COMPLETIONS")) + /* Avoid fetching stuff during tab completion. We have to this + early because we haven't checked `haveInternet()` yet + (below). */ + disableNet(); + try { auto isNixCommand = std::regex_search(programName, std::regex("nix$")); auto allowShebang = isNixCommand && argc > 1; @@ -511,6 +527,8 @@ void mainWrapped(int argc, char ** argv) applyJSONLogger(); + printTalkative("Nix %s", version()); + if (args.helpRequested) { std::vector subcommand; MultiCommand * command = &args; @@ -543,17 +561,8 @@ void mainWrapped(int argc, char ** argv) args.useNet = false; } - if (!args.useNet) { - // FIXME: should check for command line overrides only. - if (!settings.useSubstitutes.overridden) - settings.useSubstitutes = false; - if (!settings.tarballTtl.overridden) - settings.tarballTtl = std::numeric_limits::max(); - if (!fileTransferSettings.tries.overridden) - fileTransferSettings.tries = 0; - if (!fileTransferSettings.connectTimeout.overridden) - fileTransferSettings.connectTimeout = 1; - } + if (!args.useNet) + disableNet(); if (args.refresh) { settings.tarballTtl = 0; @@ -579,15 +588,15 @@ void mainWrapped(int argc, char ** argv) int main(int argc, char ** argv) { + using namespace nix; + // The CLI has a more detailed version than the libraries; see nixVersion. - nix::nixVersion = NIX_CLI_VERSION; + nixVersion = NIX_CLI_VERSION; #ifndef _WIN32 // Increase the default stack size for the evaluator and for // libstdc++'s std::regex. - // This used to be 64 MiB, but macOS as deployed on GitHub Actions has a - // hard limit slightly under that, so we round it down a bit. - nix::setStackSize(60 * 1024 * 1024); + setStackSize(evalStackSize); #endif - return nix::handleExceptions(argv[0], [&]() { nix::mainWrapped(argc, argv); }); + return handleExceptions(argv[0], [&]() { mainWrapped(argc, argv); }); } diff --git a/src/nix/make-content-addressed.md b/src/nix/make-content-addressed.md index e6a51c83ada..4acbba6f4fb 100644 --- a/src/nix/make-content-addressed.md +++ b/src/nix/make-content-addressed.md @@ -7,7 +7,7 @@ R""( ```console # nix store make-content-addressed nixpkgs#hello … - rewrote '/nix/store/v5sv61sszx301i0x6xysaqzla09nksnd-hello-2.10' to '/nix/store/5skmmcb9svys5lj3kbsrjg7vf2irid63-hello-2.10' + rewrote '/nix/store/10l19qifk7hjjq47px8m2prqk1gv4isy-hello-2.10' to '/nix/store/5skmmcb9svys5lj3kbsrjg7vf2irid63-hello-2.10' ``` Since the resulting paths are content-addressed, they are always @@ -22,7 +22,7 @@ R""( ```console # nix copy --to /tmp/nix --trusted-public-keys '' nixpkgs#hello - cannot add path '/nix/store/zy9wbxwcygrwnh8n2w9qbbcr6zk87m26-libunistring-0.9.10' because it lacks a signature by a trusted key + cannot add path '/nix/store/gs7mh6q22l1ivxazxja2mjlsdwhw8zg9-libunistring-0.9.10' because it lacks a signature by a trusted key ``` * Create a content-addressed representation of the current NixOS diff --git a/src/nix/meson.build b/src/nix/meson.build index 61aac6b4d8f..77e3c05dc5e 100644 --- a/src/nix/meson.build +++ b/src/nix/meson.build @@ -78,6 +78,7 @@ nix_sources = [ config_priv_h ] + files( 'env.cc', 'eval.cc', 'flake-prefetch-inputs.cc', + 'flake-prefetch-inputs.cc', 'flake.cc', 'formatter.cc', 'hash.cc', @@ -87,11 +88,13 @@ nix_sources = [ config_priv_h ] + files( 'make-content-addressed.cc', 'man-pages.cc', 'nar.cc', + 'nario.cc', 'optimise-store.cc', 'path-from-hash-part.cc', 'path-info.cc', 'prefetch.cc', 'profile.cc', + 'ps.cc', 'realisation.cc', 'registry.cc', 'repl.cc', diff --git a/src/nix/nario-export.md b/src/nix/nario-export.md new file mode 100644 index 00000000000..2480733c1ca --- /dev/null +++ b/src/nix/nario-export.md @@ -0,0 +1,29 @@ +R""( + +# Examples + +* Export the closure of the build of `nixpkgs#hello`: + + ```console + # nix nario export --format 2 -r nixpkgs#hello > dump.nario + ``` + + It can be imported into another store: + + ```console + # nix nario import --no-check-sigs < dump.nario + ``` + +# Description + +This command prints to standard output a serialization of the specified store paths in `nario` format. This serialization can be imported into another store using `nix nario import`. + +References of a path are not exported by default; use `-r` to export a complete closure. +Paths are exported in topologically sorted order (i.e. if path `X` refers to `Y`, then `Y` appears before `X`). +You must specify the desired `nario` version. Currently the following versions are supported: + +* `1`: This version is compatible with the legacy `nix-store --export` and `nix-store --import` commands. It should be avoided because it is not memory-efficient on import. It does not support signatures, so you have to use `--no-check-sigs` on import. + +* `2`: The latest version. Recommended. + +)"" diff --git a/src/nix/nario-import.md b/src/nix/nario-import.md new file mode 100644 index 00000000000..9cba60c6220 --- /dev/null +++ b/src/nix/nario-import.md @@ -0,0 +1,15 @@ +R""( + +# Examples + +* Import store paths from the file named `dump`: + + ```console + # nix nario import < dump.nario + ``` + +# Description + +This command reads from standard input a serialization of store paths produced by `nix nario export` and adds them to the Nix store. + +)"" diff --git a/src/nix/nario-list.md b/src/nix/nario-list.md new file mode 100644 index 00000000000..c050457b365 --- /dev/null +++ b/src/nix/nario-list.md @@ -0,0 +1,43 @@ +R""( + +# Examples + +* List the contents of a nario file: + + ```console + # nix nario list < dump.nario + /nix/store/f671jqvjcz37fsprzqn5jjsmyjj69p9b-xgcc-14.2.1.20250322-libgcc: 201856 bytes + /nix/store/n7iwblclbrz20xinvy4cxrvippdhvqll-libunistring-1.3: 2070240 bytes + … + ``` + +* Use `--json` to get detailed information in JSON format: + + ```console + # nix nario list --json < dump.nario + { + "paths": { + "/nix/store/m1r53pnn…-hello-2.12.1": { + "ca": null, + "deriver": "/nix/store/qa8is0vm…-hello-2.12.1.drv", + "narHash": "sha256-KSCYs4J7tFa+oX7W5M4D7ZYNvrWtdcWTdTL5fQk+za8=", + "narSize": 234672, + "references": [ + "/nix/store/g8zyryr9…-glibc-2.40-66", + "/nix/store/m1r53pnn…-hello-2.12.1" + ], + "registrationTime": 1756900709, + "signatures": [ "cache.nixos.org-1:QbG7A…" ], + "ultimate": false + }, + … + }, + "version": 1 + } + ``` + +# Description + +This command lists the contents of a nario file read from standard input. + +)"" diff --git a/src/nix/nario.cc b/src/nix/nario.cc new file mode 100644 index 00000000000..df9ae16340b --- /dev/null +++ b/src/nix/nario.cc @@ -0,0 +1,345 @@ +#include "nix/cmd/command.hh" +#include "nix/main/shared.hh" +#include "nix/store/store-api.hh" +#include "nix/store/export-import.hh" +#include "nix/util/callback.hh" +#include "nix/util/fs-sink.hh" +#include "nix/util/archive.hh" + +#include "ls.hh" + +#include + +using namespace nix; + +struct CmdNario : NixMultiCommand +{ + CmdNario() + : NixMultiCommand("nario", RegisterCommand::getCommandsFor({"nario"})) + { + } + + std::string description() override + { + return "operations for manipulating nario files"; + } + + Category category() override + { + return catUtility; + } +}; + +static auto rCmdNario = registerCommand("nario"); + +struct CmdNarioExport : StorePathsCommand +{ + unsigned int version = 0; + + CmdNarioExport() + { + addFlag({ + .longName = "format", + .description = "Version of the nario format to use. Must be `1` or `2`.", + .labels = {"nario-format"}, + .handler = {&version}, + .required = true, + }); + } + + std::string description() override + { + return "serialize store paths to standard output in nario format"; + } + + std::string doc() override + { + return +#include "nario-export.md" + ; + } + + void run(ref store, StorePaths && storePaths) override + { + auto fd = getStandardOutput(); + if (isatty(fd)) + throw UsageError("refusing to write nario to a terminal"); + FdSink sink(std::move(fd)); + exportPaths(*store, StorePathSet(storePaths.begin(), storePaths.end()), sink, version); + } +}; + +static auto rCmdNarioExport = registerCommand2({"nario", "export"}); + +static FdSource getNarioSource() +{ + auto fd = getStandardInput(); + if (isatty(fd)) + throw UsageError("refusing to read nario from a terminal"); + return FdSource(std::move(fd)); +} + +struct CmdNarioImport : StoreCommand, MixNoCheckSigs +{ + std::string description() override + { + return "import store paths from a nario file on standard input"; + } + + std::string doc() override + { + return +#include "nario-import.md" + ; + } + + void run(ref store) override + { + auto source{getNarioSource()}; + importPaths(*store, source, checkSigs); + } +}; + +static auto rCmdNarioImport = registerCommand2({"nario", "import"}); + +nlohmann::json listNar(Source & source) +{ + struct : FileSystemObjectSink + { + nlohmann::json root = nlohmann::json::object(); + + nlohmann::json & makeObject(const CanonPath & path, std::string_view type) + { + auto * cur = &root; + for (auto & c : path) { + assert((*cur)["type"] == "directory"); + auto i = (*cur)["entries"].emplace(c, nlohmann::json::object()).first; + cur = &i.value(); + } + auto inserted = cur->emplace("type", type).second; + assert(inserted); + return *cur; + } + + void createDirectory(const CanonPath & path) override + { + auto & j = makeObject(path, "directory"); + j["entries"] = nlohmann::json::object(); + } + + void createRegularFile(const CanonPath & path, std::function func) override + { + struct : CreateRegularFileSink + { + bool executable = false; + std::optional size; + + void operator()(std::string_view data) override {} + + void preallocateContents(uint64_t s) override + { + size = s; + } + + void isExecutable() override + { + executable = true; + } + } crf; + + crf.skipContents = true; + + func(crf); + + auto & j = makeObject(path, "regular"); + j.emplace("size", crf.size.value()); + if (crf.executable) + j.emplace("executable", true); + } + + void createSymlink(const CanonPath & path, const std::string & target) override + { + auto & j = makeObject(path, "symlink"); + j.emplace("target", target); + } + + } parseSink; + + parseDump(parseSink, source); + + return parseSink.root; +} + +void renderNarListing(const CanonPath & prefix, const nlohmann::json & root, bool longListing) +{ + std::function recurse; + recurse = [&](const nlohmann::json & json, const CanonPath & path) { + auto type = json["type"]; + + if (longListing) { + auto tp = type == "regular" ? (json.find("executable") != json.end() ? "-r-xr-xr-x" : "-r--r--r--") + : type == "symlink" ? "lrwxrwxrwx" + : "dr-xr-xr-x"; + auto line = fmt("%s %9d %s", tp, type == "regular" ? (uint64_t) json["size"] : 0, prefix / path); + if (type == "symlink") + line += " -> " + (std::string) json["target"]; + logger->cout(line); + } else + logger->cout(fmt("%s", prefix / path)); + + if (type == "directory") { + for (auto & entry : json["entries"].items()) { + recurse(entry.value(), path / entry.key()); + } + } + }; + + recurse(root, CanonPath::root); +} + +struct CmdNarioList : Command, MixJSON, MixLongListing +{ + bool listContents = false; + + CmdNarioList() + { + addFlag({ + .longName = "recursive", + .shortName = 'R', + .description = "List the contents of NARs inside the nario.", + .handler = {&listContents, true}, + }); + } + + std::string description() override + { + return "list the contents of a nario file"; + } + + std::string doc() override + { + return +#include "nario-list.md" + ; + } + + void run() override + { + struct Config : StoreConfig + { + Config(const Params & params) + : StoreConfig(params) + { + } + + ref openStore() const override + { + abort(); + } + }; + + struct ListingStore : Store + { + std::optional json; + CmdNarioList & cmd; + + ListingStore(ref config, CmdNarioList & cmd) + : Store{*config} + , cmd(cmd) + { + } + + void queryPathInfoUncached( + const StorePath & path, Callback> callback) noexcept override + { + callback(nullptr); + } + + std::optional isTrustedClient() override + { + return Trusted; + } + + std::optional queryPathFromHashPart(const std::string & hashPart) override + { + return std::nullopt; + } + + void + addToStore(const ValidPathInfo & info, Source & source, RepairFlag repair, CheckSigsFlag checkSigs) override + { + std::optional contents; + if (cmd.listContents) + contents = listNar(source); + else + source.skip(info.narSize); + + if (json) { + // FIXME: make the JSON format configurable. + auto obj = info.toJSON(this, true, PathInfoJsonFormat::V1); + if (contents) + obj.emplace("contents", *contents); + json->emplace(printStorePath(info.path), std::move(obj)); + } else { + if (contents) + renderNarListing(CanonPath(printStorePath(info.path)), *contents, cmd.longListing); + else + logger->cout(fmt("%s: %d bytes", printStorePath(info.path), info.narSize)); + } + } + + StorePath addToStoreFromDump( + Source & dump, + std::string_view name, + FileSerialisationMethod dumpMethod, + ContentAddressMethod hashMethod, + HashAlgorithm hashAlgo, + const StorePathSet & references, + RepairFlag repair) override + { + unsupported("addToStoreFromDump"); + } + + void narFromPath(const StorePath & path, Sink & sink) override + { + unsupported("narFromPath"); + } + + void queryRealisationUncached( + const DrvOutput &, Callback> callback) noexcept override + { + callback(nullptr); + } + + ref getFSAccessor(bool requireValidPath) override + { + return makeEmptySourceAccessor(); + } + + std::shared_ptr getFSAccessor(const StorePath & path, bool requireValidPath) override + { + unsupported("getFSAccessor"); + } + + void registerDrvOutput(const Realisation & output) override + { + unsupported("registerDrvOutput"); + } + }; + + auto source{getNarioSource()}; + auto config = make_ref(StoreConfig::Params()); + ListingStore lister(config, *this); + if (json) + lister.json = nlohmann::json::object(); + importPaths(lister, source, NoCheckSigs); + if (json) { + auto j = nlohmann::json::object(); + j["version"] = 1; + j["paths"] = std::move(*lister.json); + printJSON(j); + } + } +}; + +static auto rCmdNarioList = registerCommand2({"nario", "list"}); diff --git a/src/nix/nix-build/nix-build.cc b/src/nix/nix-build/nix-build.cc index a21d1a56549..217382ef8ee 100644 --- a/src/nix/nix-build/nix-build.cc +++ b/src/nix/nix-build/nix-build.cc @@ -452,7 +452,9 @@ static void main_nix_build(int argc, char ** argv) throw UsageError("nix-shell requires a single derivation"); auto & packageInfo = drvs.front(); - auto drv = evalStore->derivationFromPath(packageInfo.requireDrvPath()); + auto drvPath = packageInfo.requireDrvPath(); + state->waitForPath(drvPath); + auto drv = evalStore->derivationFromPath(drvPath); std::vector pathsToBuild; RealisedPath::Set pathsToCopy; @@ -476,6 +478,7 @@ static void main_nix_build(int argc, char ** argv) throw Error("the 'bashInteractive' attribute in did not evaluate to a derivation"); auto bashDrv = drv->requireDrvPath(); + state->waitForPath(bashDrv); pathsToBuild.push_back( DerivedPath::Built{ .drvPath = makeConstantStorePathRef(bashDrv), @@ -682,6 +685,7 @@ static void main_nix_build(int argc, char ** argv) for (auto & packageInfo : drvs) { auto drvPath = packageInfo.requireDrvPath(); + state->waitForPath(drvPath); auto outputName = packageInfo.queryOutputName(); if (outputName == "") diff --git a/src/nix/nix-channel/nix-channel.cc b/src/nix/nix-channel/nix-channel.cc index 6d9a0ea5898..00723ba2b09 100644 --- a/src/nix/nix-channel/nix-channel.cc +++ b/src/nix/nix-channel/nix-channel.cc @@ -179,6 +179,11 @@ static void update(const StringSet & channelNames) static int main_nix_channel(int argc, char ** argv) { + warn( + "nix-channel is deprecated in favor of flakes in Determinate Nix. \ +See https://zero-to-nix.com for a guide to Nix flakes. \ +For details and to offer feedback on the deprecation process, see: https://github.com/DeterminateSystems/nix-src/issues/34."); + { // Figure out the name of the `.nix-channels' file to use auto home = getHome(); diff --git a/src/nix/nix-env/nix-env.cc b/src/nix/nix-env/nix-env.cc index 31aa2b3f2cd..f3e6d2acd81 100644 --- a/src/nix/nix-env/nix-env.cc +++ b/src/nix/nix-env/nix-env.cc @@ -747,6 +747,8 @@ static void opSet(Globals & globals, Strings opFlags, Strings opArgs) drv.setName(globals.forceName); auto drvPath = drv.queryDrvPath(); + if (drvPath) + globals.state->waitForPath(*drvPath); std::vector paths{ drvPath ? (DerivedPath) (DerivedPath::Built{ .drvPath = makeConstantStorePathRef(*drvPath), @@ -1062,7 +1064,7 @@ static void opQuery(Globals & globals, Strings opFlags, Strings opArgs) continue; /* For table output. */ - std::vector columns; + TableRow columns; /* For XML output. */ XMLAttrs attrs; diff --git a/src/nix/nix-env/user-env.cc b/src/nix/nix-env/user-env.cc index 5beed78f724..ac36bf97011 100644 --- a/src/nix/nix-env/user-env.cc +++ b/src/nix/nix-env/user-env.cc @@ -38,8 +38,10 @@ bool createUserEnv( exist already. */ std::vector drvsToBuild; for (auto & i : elems) - if (auto drvPath = i.queryDrvPath()) + if (auto drvPath = i.queryDrvPath()) { + state.waitForPath(*drvPath); drvsToBuild.push_back({*drvPath}); + } debug("building user environment dependencies"); state.store->buildPaths(toDerivedPaths(drvsToBuild), state.repair ? bmRepair : bmNormal); @@ -108,7 +110,7 @@ bool createUserEnv( environment. */ auto manifestFile = ({ std::ostringstream str; - printAmbiguous(manifest, state.symbols, str, nullptr, std::numeric_limits::max()); + printAmbiguous(state, manifest, str, nullptr, std::numeric_limits::max()); StringSource source{str.view()}; state.store->addToStoreFromDump( source, @@ -152,6 +154,7 @@ bool createUserEnv( debug("building user environment"); std::vector topLevelDrvs; topLevelDrvs.push_back({topLevelDrv}); + state.waitForPath(topLevelDrv); state.store->buildPaths(toDerivedPaths(topLevelDrvs), state.repair ? bmRepair : bmNormal); /* Switch the current user environment to the output path. */ diff --git a/src/nix/nix-instantiate/nix-instantiate.cc b/src/nix/nix-instantiate/nix-instantiate.cc index 3d5c3e26a46..f09b4078a24 100644 --- a/src/nix/nix-instantiate/nix-instantiate.cc +++ b/src/nix/nix-instantiate/nix-instantiate.cc @@ -17,6 +17,8 @@ #include #include +#include + using namespace nix; static Path gcRoot; @@ -56,19 +58,23 @@ void processExpr( else state.autoCallFunction(autoArgs, v, vRes); if (output == okRaw) - std::cout << *state.coerceToString(noPos, vRes, context, "while generating the nix-instantiate output"); + std::cout << state.devirtualize( + *state.coerceToString(noPos, vRes, context, "while generating the nix-instantiate output"), + context); // We intentionally don't output a newline here. The default PS1 for Bash in NixOS starts with a newline // and other interactive shells like Zsh are smart enough to print a missing newline before the prompt. - else if (output == okXML) - printValueAsXML(state, strict, location, vRes, std::cout, context, noPos); - else if (output == okJSON) { - printValueAsJSON(state, strict, vRes, v.determinePos(noPos), std::cout, context); - std::cout << std::endl; + else if (output == okXML) { + std::ostringstream s; + printValueAsXML(state, strict, location, vRes, s, context, noPos); + std::cout << state.devirtualize(s.str(), context); + } else if (output == okJSON) { + auto j = printValueAsJSON(state, strict, vRes, v.determinePos(noPos), context); + std::cout << state.devirtualize(j.dump(), context) << std::endl; } else { if (strict) state.forceValueDeep(vRes); std::set seen; - printAmbiguous(vRes, state.symbols, std::cout, &seen, std::numeric_limits::max()); + printAmbiguous(state, vRes, std::cout, &seen, std::numeric_limits::max()); std::cout << std::endl; } } else { diff --git a/src/nix/nix-store/nix-store.cc b/src/nix/nix-store/nix-store.cc index a2c0aaf3ff8..74697ade110 100644 --- a/src/nix/nix-store/nix-store.cc +++ b/src/nix/nix-store/nix-store.cc @@ -775,7 +775,7 @@ static void opExport(Strings opFlags, Strings opArgs) paths.insert(store->followLinksToStorePath(i)); FdSink sink(getStandardOutput()); - exportPaths(*store, paths, sink); + exportPaths(*store, paths, sink, 1); sink.flush(); } diff --git a/src/nix/nix.md b/src/nix/nix.md index 10a2aaee88c..cc31dabbab4 100644 --- a/src/nix/nix.md +++ b/src/nix/nix.md @@ -48,11 +48,6 @@ manual](https://nix.dev/manual/nix/stable/). # Installables -> **Warning** \ -> Installables are part of the unstable -> [`nix-command` experimental feature](@docroot@/development/experimental-features.md#xp-feature-nix-command), -> and subject to change without notice. - Many `nix` subcommands operate on one or more *installables*. These are command line arguments that represent something that can be realised in the Nix store. @@ -72,13 +67,6 @@ That is, Nix will operate on the default flake output attribute of the flake in ### Flake output attribute -> **Warning** \ -> Flake output attribute installables depend on both the -> [`flakes`](@docroot@/development/experimental-features.md#xp-feature-flakes) -> and -> [`nix-command`](@docroot@/development/experimental-features.md#xp-feature-nix-command) -> experimental features, and subject to change without notice. - Example: `nixpkgs#hello` These have the form *flakeref*[`#`*attrpath*], where *flakeref* is a @@ -140,7 +128,7 @@ If *attrpath* begins with `.` then no prefixes or defaults are attempted. This a ### Store path -Example: `/nix/store/v5sv61sszx301i0x6xysaqzla09nksnd-hello-2.10` +Example: `/nix/store/10l19qifk7hjjq47px8m2prqk1gv4isy-hello-2.10` These are paths inside the Nix store, or symlinks that resolve to a path in the Nix store. @@ -196,7 +184,7 @@ operate are determined as follows: and likewise, using a store path to a "drv" file to specify the derivation: ```console - # nix build '/nix/store/gzaflydcr6sb3567hap9q6srzx8ggdgg-glibc-2.33-78.drv^dev,static' + # nix build '/nix/store/fpq78s2h8ffh66v2iy0q1838mhff06y8-glibc-2.33-78.drv^dev,static' … ``` @@ -219,17 +207,17 @@ operate are determined as follows: ```console # nix path-info --closure-size --eval-store auto --store https://cache.nixos.org 'nixpkgs#glibc^*' - /nix/store/g02b1lpbddhymmcjb923kf0l7s9nww58-glibc-2.33-123 33208200 - /nix/store/851dp95qqiisjifi639r0zzg5l465ny4-glibc-2.33-123-bin 36142896 - /nix/store/kdgs3q6r7xdff1p7a9hnjr43xw2404z7-glibc-2.33-123-debug 155787312 - /nix/store/n4xa8h6pbmqmwnq0mmsz08l38abb06zc-glibc-2.33-123-static 42488328 - /nix/store/q6580lr01jpcsqs4r5arlh4ki2c1m9rv-glibc-2.33-123-dev 44200560 + /nix/store/i2fn2mjgihz960bwa7ldab5ra5fhxznh-glibc-2.33-123 33208200 + /nix/store/n2wnn3i47w6dbylh64hdjzgd5rrprdn8-glibc-2.33-123-bin 36142896 + /nix/store/v7dyz518sbkzl8x2a1sgk1lwsfd3d6gm-glibc-2.33-123-debug 155787312 + /nix/store/z4hv6ybyinqw9a3dwyl5k66a91aggylj-glibc-2.33-123-static 42488328 + /nix/store/lrjirf0j1rjnvif6amyp9pfcqr2km385-glibc-2.33-123-dev 44200560 ``` and likewise, using a store path to a "drv" file to specify the derivation: ```console - # nix path-info --closure-size '/nix/store/gzaflydcr6sb3567hap9q6srzx8ggdgg-glibc-2.33-78.drv^*' + # nix path-info --closure-size '/nix/store/fpq78s2h8ffh66v2iy0q1838mhff06y8-glibc-2.33-78.drv^*' … ``` * If you didn't specify the desired outputs, but the derivation has an diff --git a/src/nix/package.nix b/src/nix/package.nix index 8195e6c6ff5..d362d092309 100644 --- a/src/nix/package.nix +++ b/src/nix/package.nix @@ -18,7 +18,7 @@ let in mkMesonExecutable (finalAttrs: { - pname = "nix"; + pname = "determinate-nix"; inherit version; workDir = ./.; diff --git a/src/nix/path-from-hash-part.md b/src/nix/path-from-hash-part.md index 788e13ab6d4..b646aa57dd1 100644 --- a/src/nix/path-from-hash-part.md +++ b/src/nix/path-from-hash-part.md @@ -5,8 +5,8 @@ R""( * Return the full store path with the given hash part: ```console - # nix store path-from-hash-part --store https://cache.nixos.org/ 0i2jd68mp5g6h2sa5k9c85rb80sn8hi9 - /nix/store/0i2jd68mp5g6h2sa5k9c85rb80sn8hi9-hello-2.10 + # nix store path-from-hash-part --store https://cache.nixos.org/ qbhyj3blxpw2i6pb7c6grc9185nbnpvy + /nix/store/qbhyj3blxpw2i6pb7c6grc9185nbnpvy-hello-2.10 ``` # Description @@ -15,6 +15,6 @@ Given the hash part of a store path (that is, the 32 characters following `/nix/store/`), return the full store path. This is primarily useful in the implementation of binary caches, where a request for a `.narinfo` file only supplies the hash part -(e.g. `https://cache.nixos.org/0i2jd68mp5g6h2sa5k9c85rb80sn8hi9.narinfo`). +(e.g. `https://cache.nixos.org/qbhyj3blxpw2i6pb7c6grc9185nbnpvy.narinfo`). )"" diff --git a/src/nix/path-info.md b/src/nix/path-info.md index 2e39225b865..bd4a9311cf9 100644 --- a/src/nix/path-info.md +++ b/src/nix/path-info.md @@ -6,7 +6,7 @@ R""( ```console # nix path-info nixpkgs#hello - /nix/store/v5sv61sszx301i0x6xysaqzla09nksnd-hello-2.10 + /nix/store/10l19qifk7hjjq47px8m2prqk1gv4isy-hello-2.10 ``` * Show the closure sizes of every path in the current NixOS system @@ -14,8 +14,8 @@ R""( ```console # nix path-info --recursive --closure-size /run/current-system | sort -nk2 - /nix/store/hl5xwp9kdrd1zkm0idm3kkby9q66z404-empty 96 - /nix/store/27324qvqhnxj3rncazmxc4mwy79kz8ha-nameservers 112 + /nix/store/zlnmjjbpv5pwwv911qp0grqi25y80wbs-empty 96 + /nix/store/v40fjpq45135avrmnfm8klbvdhf0dcp7-nameservers 112 … /nix/store/539jkw9a8dyry7clcv60gk6na816j7y8-etc 5783255504 /nix/store/zqamz3cz4dbzfihki2mk7a63mbkxz9xq-nixos-system-machine-20.09.20201112.3090c65 5887562256 @@ -26,8 +26,8 @@ R""( ```console # nix path-info --recursive --size --closure-size --human-readable nixpkgs#rustc - /nix/store/01rrgsg5zk3cds0xgdsq40zpk6g51dz9-ncurses-6.2-dev 386.7 KiB 69.1 MiB - /nix/store/0q783wnvixpqz6dxjp16nw296avgczam-libpfm-4.11.0 5.9 MiB 37.4 MiB + /nix/store/klarszqikbvf6n70581w0381zb7rlzri-ncurses-6.2-dev 386.7 KiB 69.1 MiB + /nix/store/30rva1kafnr6fyf8y5xxlpnwixvdpv4w-libpfm-4.11.0 5.9 MiB 37.4 MiB … ``` diff --git a/src/nix/print-dev-env.md b/src/nix/print-dev-env.md index a8ce9d36ae7..fd84b8afe86 100644 --- a/src/nix/print-dev-env.md +++ b/src/nix/print-dev-env.md @@ -25,7 +25,7 @@ R""( "variables": { "src": { "type": "exported", - "value": "/nix/store/3x7dwzq014bblazs7kq20p9hyzz0qh8g-hello-2.10.tar.gz" + "value": "/nix/store/8alrpdaasjd1x6g1fczchmzbpqm936a3-hello-2.10.tar.gz" }, "postUnpackHooks": { "type": "array", diff --git a/src/nix/profile-history.md b/src/nix/profile-history.md index f0bfe503791..0c9a340ddf0 100644 --- a/src/nix/profile-history.md +++ b/src/nix/profile-history.md @@ -7,7 +7,7 @@ R""( ```console # nix profile history Version 508 (2020-04-10): - flake:nixpkgs#legacyPackages.x86_64-linux.awscli: ∅ -> 1.17.13 + flake:nixpkgs#legacyPackages.x86_64-linux.awscli: 1.17.13 added Version 509 (2020-05-16) <- 508: flake:nixpkgs#legacyPackages.x86_64-linux.awscli: 1.17.13 -> 1.18.211 @@ -20,7 +20,7 @@ between subsequent versions of a profile. It only shows top-level packages, not dependencies; for that, use [`nix profile diff-closures`](./nix3-profile-diff-closures.md). -The addition of a package to a profile is denoted by the string `∅ ->` -*version*, whereas the removal is denoted by *version* `-> ∅`. +The addition of a package to a profile is denoted by the string +*version* `added`, whereas the removal is denoted by *version* ` removed`. )"" diff --git a/src/nix/profile-list.md b/src/nix/profile-list.md index 9811b9ec920..89ac228a393 100644 --- a/src/nix/profile-list.md +++ b/src/nix/profile-list.md @@ -10,13 +10,13 @@ R""( Flake attribute: legacyPackages.x86_64-linux.gdb Original flake URL: flake:nixpkgs Locked flake URL: github:NixOS/nixpkgs/7b38b03d76ab71bdc8dc325e3f6338d984cc35ca - Store paths: /nix/store/indzcw5wvlhx6vwk7k4iq29q15chvr3d-gdb-11.1 + Store paths: /nix/store/i6i08pl20rh0lm46g38wk3bfnvhdl43d-gdb-11.1 Name: blender-bin Flake attribute: packages.x86_64-linux.default Original flake URL: flake:blender-bin Locked flake URL: github:edolstra/nix-warez/91f2ffee657bf834e4475865ae336e2379282d34?dir=blender - Store paths: /nix/store/i798sxl3j40wpdi1rgf391id1b5klw7g-blender-bin-3.1.2 + Store paths: /nix/store/rlgr8vjhcv6v2rv7ljgl0pr6g74r0cg9-blender-bin-3.1.2 ``` Note that you can unambiguously rebuild a package from a profile diff --git a/src/nix/profile-remove.md b/src/nix/profile-remove.md index e7e5e0dfb94..2d32447d49c 100644 --- a/src/nix/profile-remove.md +++ b/src/nix/profile-remove.md @@ -24,7 +24,7 @@ R""( * Remove a package by store path: ```console - # nix profile remove /nix/store/rr3y0c6zyk7kjjl8y19s4lsrhn4aiq1z-hello-2.10 + # nix profile remove /nix/store/xwjlac5ay8hw3djdm5llhjz79isgngbl-hello-2.10 ``` # Description diff --git a/src/nix/profile.cc b/src/nix/profile.cc index 822c8046eb8..ba17b6a6212 100644 --- a/src/nix/profile.cc +++ b/src/nix/profile.cc @@ -288,11 +288,11 @@ struct ProfileManifest while (i != prev.elements.end() || j != cur.elements.end()) { if (j != cur.elements.end() && (i == prev.elements.end() || i->first > j->first)) { - logger->cout("%s%s: ∅ -> %s", indent, j->second.identifier(), j->second.versions()); + logger->cout("%s%s: %s added", indent, j->second.identifier(), j->second.versions()); changes = true; ++j; } else if (i != prev.elements.end() && (j == cur.elements.end() || i->first < j->first)) { - logger->cout("%s%s: %s -> ∅", indent, i->second.identifier(), i->second.versions()); + logger->cout("%s%s: %s removed", indent, i->second.identifier(), i->second.versions()); changes = true; ++i; } else { @@ -313,11 +313,11 @@ struct ProfileManifest }; static std::map>> -builtPathsPerInstallable(const std::vector, BuiltPathWithResult>> & builtPaths) +builtPathsPerInstallable(const std::vector & builtPaths) { std::map>> res; - for (auto & [installable, builtPath] : builtPaths) { - auto & r = res.insert({&*installable, + for (auto & b : builtPaths) { + auto & r = res.insert({&*b.installable, { {}, make_ref(), @@ -327,6 +327,7 @@ builtPathsPerInstallable(const std::vector, BuiltPath (e.g. meta.priority fields) if the installable returned multiple derivations. So pick one arbitrarily. FIXME: print a warning? */ + auto builtPath = b.getSuccess(); r.first.push_back(builtPath.path); r.second = builtPath.info; } @@ -363,8 +364,10 @@ struct CmdProfileAdd : InstallablesCommand, MixDefaultProfile { ProfileManifest manifest(*getEvalState(), *profile); - auto builtPaths = builtPathsPerInstallable( - Installable::build2(getEvalStore(), store, Realise::Outputs, installables, bmNormal)); + auto buildResults = Installable::build2(getEvalStore(), store, Realise::Outputs, installables, bmNormal); + Installable::throwBuildErrors(buildResults, *store); + + auto builtPaths = builtPathsPerInstallable(buildResults); for (auto & installable : installables) { ProfileElement element; @@ -766,8 +769,10 @@ struct CmdProfileUpgrade : virtual SourceExprCommand, MixDefaultProfile, MixProf return; } - auto builtPaths = builtPathsPerInstallable( - Installable::build2(getEvalStore(), store, Realise::Outputs, installables, bmNormal)); + auto buildResults = Installable::build2(getEvalStore(), store, Realise::Outputs, installables, bmNormal); + Installable::throwBuildErrors(buildResults, *store); + + auto builtPaths = builtPathsPerInstallable(buildResults); for (size_t i = 0; i < installables.size(); ++i) { auto & installable = installables.at(i); diff --git a/src/nix/ps.cc b/src/nix/ps.cc new file mode 100644 index 00000000000..9ae9d97bf98 --- /dev/null +++ b/src/nix/ps.cc @@ -0,0 +1,146 @@ +#include "nix/cmd/command.hh" +#include "nix/main/common-args.hh" +#include "nix/main/shared.hh" +#include "nix/store/store-api.hh" +#include "nix/store/store-cast.hh" +#include "nix/store/active-builds.hh" +#include "nix/util/table.hh" +#include "nix/util/terminal.hh" + +#include + +using namespace nix; + +struct CmdPs : MixJSON, StoreCommand +{ + std::string description() override + { + return "list active builds"; + } + + Category category() override + { + return catUtility; + } + + std::string doc() override + { + return +#include "ps.md" + ; + } + + void run(ref store) override + { + auto & tracker = require(*store); + + auto builds = tracker.queryActiveBuilds(); + + if (json) { + printJSON(nlohmann::json(builds)); + return; + } + + if (builds.empty()) { + notice("No active builds."); + return; + } + + /* Helper to format user info: show name if available, else UID */ + auto formatUser = [](const UserInfo & user) -> std::string { + return user.name ? *user.name : std::to_string(user.uid); + }; + + Table table; + + /* Add column headers. */ + table.push_back({{"USER"}, {"PID"}, {"CPU", TableCell::Alignment::Right}, {"DERIVATION/COMMAND"}}); + + for (const auto & build : builds) { + /* Calculate CPU time - use cgroup stats if available, otherwise sum process times. */ + std::chrono::microseconds cpuTime = build.utime && build.stime ? *build.utime + *build.stime : [&]() { + std::chrono::microseconds total{0}; + for (const auto & process : build.processes) + total += process.utime.value_or(std::chrono::microseconds(0)) + + process.stime.value_or(std::chrono::microseconds(0)) + + process.cutime.value_or(std::chrono::microseconds(0)) + + process.cstime.value_or(std::chrono::microseconds(0)); + return total; + }(); + + /* Add build summary row. */ + table.push_back( + {formatUser(build.mainUser), + std::to_string(build.mainPid), + {fmt("%.1fs", + std::chrono::duration_cast>(cpuTime) + .count()), + TableCell::Alignment::Right}, + fmt(ANSI_BOLD "%s" ANSI_NORMAL " (wall=%ds)", + store->printStorePath(build.derivation), + time(nullptr) - build.startTime)}); + + if (build.processes.empty()) { + table.push_back( + {formatUser(build.mainUser), + std::to_string(build.mainPid), + {"", TableCell::Alignment::Right}, + fmt("%s" ANSI_ITALIC "(no process info)" ANSI_NORMAL, treeLast)}); + } else { + /* Recover the tree structure of the processes. */ + std::set pids; + for (auto & process : build.processes) + pids.insert(process.pid); + + using Processes = std::set; + std::map children; + Processes rootProcesses; + for (auto & process : build.processes) { + if (pids.contains(process.parentPid)) + children[process.parentPid].insert(&process); + else + rootProcesses.insert(&process); + } + + /* Render the process tree. */ + [&](this auto const & visit, const Processes & processes, std::string_view prefix) -> void { + for (const auto & [n, process] : enumerate(processes)) { + bool last = n + 1 == processes.size(); + + // Format CPU time if available + std::string cpuInfo; + if (process->utime || process->stime || process->cutime || process->cstime) { + auto totalCpu = process->utime.value_or(std::chrono::microseconds(0)) + + process->stime.value_or(std::chrono::microseconds(0)) + + process->cutime.value_or(std::chrono::microseconds(0)) + + process->cstime.value_or(std::chrono::microseconds(0)); + auto totalSecs = + std::chrono::duration_cast>( + totalCpu) + .count(); + cpuInfo = fmt("%.1fs", totalSecs); + } + + // Format argv with tree structure + auto argv = concatStringsSep( + " ", tokenizeString>(concatStringsSep(" ", process->argv))); + + table.push_back( + {formatUser(process->user), + std::to_string(process->pid), + {cpuInfo, TableCell::Alignment::Right}, + fmt("%s%s%s", prefix, last ? treeLast : treeConn, argv)}); + + visit(children[process->pid], last ? prefix + treeNull : prefix + treeLine); + } + }(rootProcesses, ""); + } + } + + auto width = isTTY() && isatty(STDOUT_FILENO) ? getWindowWidth() : std::numeric_limits::max(); + + printTable(std::cout, table, width); + } +}; + +static auto rCmdPs = registerCommand2({"ps"}); diff --git a/src/nix/ps.md b/src/nix/ps.md new file mode 100644 index 00000000000..e48a308e696 --- /dev/null +++ b/src/nix/ps.md @@ -0,0 +1,27 @@ +R"( + +# Examples + +* Show all active builds: + + ```console + # nix ps + USER PID CPU DERIVATION/COMMAND + nixbld11 3534394 110.2s /nix/store/lzvdxlbr6xjd9w8py4nd2y2nnqb9gz7p-nix-util-tests-3.13.2.drv (wall=8s) + nixbld11 3534394 0.8s └───bash -e /nix/store/jwqf79v5p51x9mv8vx20fv9mzm2x7kig-source-stdenv.sh /nix/store/shkw4qm9qcw5sc5n1k5jznc83ny02 + nixbld11 3534751 36.3s └───ninja -j24 + nixbld11 3535637 0.0s ├───/nix/store/0v2jfvx71l1zn14l97pznvbqnhiq3pyd-gcc-14.3.0/bin/g++ -fPIC -fstack-clash-protection -O2 -U_ + nixbld11 3535639 0.1s │ └───/nix/store/0v2jfvx71l1zn14l97pznvbqnhiq3pyd-gcc-14.3.0/libexec/gcc/x86_64-unknown-linux-gnu/14.3. + nixbld11 3535658 0.0s └───/nix/store/0v2jfvx71l1zn14l97pznvbqnhiq3pyd-gcc-14.3.0/bin/g++ -fPIC -fstack-clash-protection -O2 -U_ + nixbld1 3534377 1.8s /nix/store/nh2dx9cqcy9lw4d4rvd0dbsflwdsbzdy-patchelf-0.18.0.drv (wall=5s) + nixbld1 3534377 1.8s └───bash -e /nix/store/xk05lkk4ij6pc7anhdbr81appiqbcb01-default-builder.sh + nixbld1 3535074 0.0s └───/nix/store/21ymxxap3y8hb9ijcfah8ani9cjpv8m6-bash-5.2p37/bin/bash ./configure --disable-dependency-trackin + ``` + +# Description + +This command lists all currently running Nix builds. +For each build, it shows the derivation path and the main process ID. +On Linux and macOS, it also shows the child processes of each build. + +)" diff --git a/src/nix/registry-list.md b/src/nix/registry-list.md index 30b6e29d8aa..a3eb65c89f5 100644 --- a/src/nix/registry-list.md +++ b/src/nix/registry-list.md @@ -7,7 +7,7 @@ R""( ```console # nix registry list user flake:dwarffs github:edolstra/dwarffs/d181d714fd36eb06f4992a1997cd5601e26db8f5 - system flake:nixpkgs path:/nix/store/fxl9mrm5xvzam0lxi9ygdmksskx4qq8s-source?lastModified=1605220118&narHash=sha256-Und10ixH1WuW0XHYMxxuHRohKYb45R%2fT8CwZuLd2D2Q=&rev=3090c65041104931adda7625d37fa874b2b5c124 + system flake:nixpkgs path:/nix/store/jschy88crdk7jqqbk1p2b4l1c9gljl9b-source?lastModified=1605220118&narHash=sha256-Und10ixH1WuW0XHYMxxuHRohKYb45R%2fT8CwZuLd2D2Q=&rev=3090c65041104931adda7625d37fa874b2b5c124 global flake:blender-bin github:edolstra/nix-warez?dir=blender global flake:dwarffs github:edolstra/dwarffs … diff --git a/src/nix/repl.md b/src/nix/repl.md index 32c08e24b24..e608dabf6f9 100644 --- a/src/nix/repl.md +++ b/src/nix/repl.md @@ -36,7 +36,7 @@ R""( Loading Installable ''... Added 1 variables. - # nix repl --extra-experimental-features 'flakes' nixpkgs + # nix repl nixpkgs Loading Installable 'flake:nixpkgs#'... Added 5 variables. diff --git a/src/nix/search.cc b/src/nix/search.cc index dac60ceba57..729a505f534 100644 --- a/src/nix/search.cc +++ b/src/nix/search.cc @@ -11,6 +11,7 @@ #include "nix/expr/attr-path.hh" #include "nix/util/hilite.hh" #include "nix/util/strings-inline.hh" +#include "nix/expr/parallel-eval.hh" #include #include @@ -84,11 +85,13 @@ struct CmdSearch : InstallableValueCommand, MixJSON auto state = getEvalState(); - std::optional jsonOut; + std::optional> jsonOut; if (json) - jsonOut = json::object(); + jsonOut.emplace(json::object()); - uint64_t results = 0; + std::atomic results = 0; + + FutureVector futures(*state->executor); std::function visit; @@ -96,15 +99,21 @@ struct CmdSearch : InstallableValueCommand, MixJSON auto attrPathS = state->symbols.resolve({attrPath}); auto attrPathStr = attrPath.to_string(*state); + /* Activity act(*logger, lvlInfo, actUnknown, fmt("evaluating '%s'", attrPathStr)); + */ try { auto recurse = [&]() { + std::vector> work; for (const auto & attr : cursor.getAttrs()) { auto cursor2 = cursor.getAttr(state->symbols[attr]); auto attrPath2(attrPath); attrPath2.push_back(attr); - visit(*cursor2, attrPath2, false); + work.emplace_back( + [cursor2, attrPath2, visit]() { visit(*cursor2, attrPath2, false); }, + std::string_view(state->symbols[attr]).find("Packages") != std::string_view::npos ? 0 : 2); } + futures.spawn(std::move(work)); }; if (cursor.isDerivation()) { @@ -147,21 +156,21 @@ struct CmdSearch : InstallableValueCommand, MixJSON if (found) { results++; if (json) { - (*jsonOut)[attrPathStr] = { + (*jsonOut->lock())[attrPathStr] = { {"pname", name.name}, {"version", name.version}, {"description", description}, }; } else { - if (results > 1) - logger->cout(""); - logger->cout( - "* %s%s", - wrap("\e[0;1m", hiliteMatches(attrPathStr, attrPathMatches, ANSI_GREEN, "\e[0;1m")), - optionalBracket(" (", name.version, ")")); + auto out = + fmt("%s* %s%s", + results > 1 ? "\n" : "", + wrap("\e[0;1m", hiliteMatches(attrPathStr, attrPathMatches, ANSI_GREEN, "\e[0;1m")), + optionalBracket(" (", name.version, ")")); if (description != "") - logger->cout( - " %s", hiliteMatches(description, descriptionMatches, ANSI_GREEN, ANSI_NORMAL)); + out += fmt( + "\n %s", hiliteMatches(description, descriptionMatches, ANSI_GREEN, ANSI_NORMAL)); + logger->cout(out); } } } @@ -186,14 +195,21 @@ struct CmdSearch : InstallableValueCommand, MixJSON } }; - for (auto & cursor : installable->getCursors(*state)) - visit(*cursor, cursor->getAttrPath(), true); + std::vector> work; + for (auto & cursor : installable->getCursors(*state)) { + work.emplace_back([cursor, visit]() { visit(*cursor, cursor->getAttrPath(), true); }, 1); + } + + futures.spawn(std::move(work)); + futures.finishAll(); if (json) - printJSON(*jsonOut); + printJSON(*(jsonOut->lock())); if (!json && !results) throw Error("no results for the given search term(s)!"); + + notice("Found %d matching packages.", results); } }; diff --git a/src/nix/store-cat.md b/src/nix/store-cat.md index da2073473fd..6638be2d54f 100644 --- a/src/nix/store-cat.md +++ b/src/nix/store-cat.md @@ -6,7 +6,7 @@ R""( ```console # nix store cat --store https://cache.nixos.org/ \ - /nix/store/0i2jd68mp5g6h2sa5k9c85rb80sn8hi9-hello-2.10/bin/hello | hexdump -C | head -n1 + /nix/store/qbhyj3blxpw2i6pb7c6grc9185nbnpvy-hello-2.10/bin/hello | hexdump -C | head -n1 00000000 7f 45 4c 46 02 01 01 00 00 00 00 00 00 00 00 00 |.ELF............| ``` diff --git a/src/nix/store-copy-log.md b/src/nix/store-copy-log.md index 61daa75c178..9d10174bba3 100644 --- a/src/nix/store-copy-log.md +++ b/src/nix/store-copy-log.md @@ -23,7 +23,7 @@ R""( [store derivation]: @docroot@/glossary.md#gloss-store-derivation ```console - # nix store copy-log --to ssh-ng://machine /nix/store/ilgm50plpmcgjhcp33z6n4qbnpqfhxym-glibc-2.33-59.drv + # nix store copy-log --to ssh-ng://machine /nix/store/yaxvykk956vdrwrx9cxyw44mpqr1ml7i-glibc-2.33-59.drv ``` # Description diff --git a/src/nix/store-copy-sigs.md b/src/nix/store-copy-sigs.md index 67875622156..25c60966e00 100644 --- a/src/nix/store-copy-sigs.md +++ b/src/nix/store-copy-sigs.md @@ -6,7 +6,7 @@ R""( ```console # nix store copy-sigs --substituter https://cache.nixos.org \ - --recursive /nix/store/y1x7ng5bmc9s8lqrf98brcpk1a7lbcl5-hello-2.12.1 + --recursive /nix/store/q833p12cmm9qknyp1walqih941msnb9z-hello-2.12.1 ``` * To copy signatures from one binary cache to another: @@ -15,7 +15,7 @@ R""( # nix store copy-sigs --substituter https://cache.nixos.org \ --store file:///tmp/binary-cache \ --recursive -v \ - /nix/store/y1x7ng5bmc9s8lqrf98brcpk1a7lbcl5-hello-2.12.1 + /nix/store/q833p12cmm9qknyp1walqih941msnb9z-hello-2.12.1 imported 2 signatures ``` diff --git a/src/nix/store-delete.md b/src/nix/store-delete.md index 431bc5f5e3f..026dccd0f19 100644 --- a/src/nix/store-delete.md +++ b/src/nix/store-delete.md @@ -5,7 +5,7 @@ R""( * Delete a specific store path: ```console - # nix store delete /nix/store/yb5q57zxv6hgqql42d5r8b5k5mcq6kay-hello-2.10 + # nix store delete /nix/store/fdhrijyv3670djsgprx596nn89iwlj2s-hello-2.10 ``` # Description diff --git a/src/nix/store-dump-path.md b/src/nix/store-dump-path.md index 21467ff329e..4e5c6aeddbe 100644 --- a/src/nix/store-dump-path.md +++ b/src/nix/store-dump-path.md @@ -12,7 +12,7 @@ R""( ```console # nix store dump-path --store https://cache.nixos.org/ \ - /nix/store/7crrmih8c52r8fbnqb933dxrsp44md93-glibc-2.25 > glibc.nar + /nix/store/vyrnv99qi410q82qp7nw7lcl37zmzaxd-glibc-2.25 > glibc.nar ``` # Description diff --git a/src/nix/store-ls.md b/src/nix/store-ls.md index 14c4627c97a..62f6cd0709d 100644 --- a/src/nix/store-ls.md +++ b/src/nix/store-ls.md @@ -5,7 +5,7 @@ R""( * To list the contents of a store path in a binary cache: ```console - # nix store ls --store https://cache.nixos.org/ --long --recursive /nix/store/0i2jd68mp5g6h2sa5k9c85rb80sn8hi9-hello-2.10 + # nix store ls --store https://cache.nixos.org/ --long --recursive /nix/store/qbhyj3blxpw2i6pb7c6grc9185nbnpvy-hello-2.10 dr-xr-xr-x 0 ./bin -r-xr-xr-x 38184 ./bin/hello dr-xr-xr-x 0 ./share @@ -15,7 +15,7 @@ R""( * To show information about a specific file in a binary cache: ```console - # nix store ls --store https://cache.nixos.org/ --long /nix/store/0i2jd68mp5g6h2sa5k9c85rb80sn8hi9-hello-2.10/bin/hello + # nix store ls --store https://cache.nixos.org/ --long /nix/store/qbhyj3blxpw2i6pb7c6grc9185nbnpvy-hello-2.10/bin/hello -r-xr-xr-x 38184 hello ``` diff --git a/src/nix/store-repair.md b/src/nix/store-repair.md index 180c577acae..a03952714cb 100644 --- a/src/nix/store-repair.md +++ b/src/nix/store-repair.md @@ -5,13 +5,13 @@ R""( * Repair a store path, after determining that it is corrupt: ```console - # nix store verify /nix/store/yb5q57zxv6hgqql42d5r8b5k5mcq6kay-hello-2.10 - path '/nix/store/yb5q57zxv6hgqql42d5r8b5k5mcq6kay-hello-2.10' was + # nix store verify /nix/store/fdhrijyv3670djsgprx596nn89iwlj2s-hello-2.10 + path '/nix/store/fdhrijyv3670djsgprx596nn89iwlj2s-hello-2.10' was modified! expected hash 'sha256:1hd5vnh6xjk388gdk841vflicy8qv7qzj2hb7xlyh8lpb43j921l', got 'sha256:1a25lf78x5wi6pfkrxalf0n13kdaca0bqmjqnp7wfjza2qz5ssgl' - # nix store repair /nix/store/yb5q57zxv6hgqql42d5r8b5k5mcq6kay-hello-2.10 + # nix store repair /nix/store/fdhrijyv3670djsgprx596nn89iwlj2s-hello-2.10 ``` # Description diff --git a/src/nix/unix/daemon.cc b/src/nix/unix/daemon.cc index 406258ff821..661488c56ef 100644 --- a/src/nix/unix/daemon.cc +++ b/src/nix/unix/daemon.cc @@ -437,22 +437,23 @@ static void forwardStdioConnection(RemoteStore & store) int from = conn->from.fd; int to = conn->to.fd; - auto nfds = std::max(from, STDIN_FILENO) + 1; + Socket fromSock = toSocket(from), stdinSock = toSocket(getStandardInput()); + auto nfds = std::max(fromSock, stdinSock) + 1; while (true) { fd_set fds; FD_ZERO(&fds); - FD_SET(from, &fds); - FD_SET(STDIN_FILENO, &fds); + FD_SET(fromSock, &fds); + FD_SET(stdinSock, &fds); if (select(nfds, &fds, nullptr, nullptr, nullptr) == -1) throw SysError("waiting for data from client or server"); - if (FD_ISSET(from, &fds)) { + if (FD_ISSET(fromSock, &fds)) { auto res = splice(from, nullptr, STDOUT_FILENO, nullptr, SSIZE_MAX, SPLICE_F_MOVE); if (res == -1) throw SysError("splicing data from daemon socket to stdout"); else if (res == 0) throw EndOfFile("unexpected EOF from daemon socket"); } - if (FD_ISSET(STDIN_FILENO, &fds)) { + if (FD_ISSET(stdinSock, &fds)) { auto res = splice(STDIN_FILENO, nullptr, to, nullptr, SSIZE_MAX, SPLICE_F_MOVE); if (res == -1) throw SysError("splicing data from stdin to daemon socket"); diff --git a/src/nix/upgrade-nix.cc b/src/nix/upgrade-nix.cc index f26613bf899..f5ca094c6af 100644 --- a/src/nix/upgrade-nix.cc +++ b/src/nix/upgrade-nix.cc @@ -15,26 +15,6 @@ using namespace nix; struct CmdUpgradeNix : MixDryRun, StoreCommand { - std::filesystem::path profileDir; - - CmdUpgradeNix() - { - addFlag({ - .longName = "profile", - .shortName = 'p', - .description = "The path to the Nix profile to upgrade.", - .labels = {"profile-dir"}, - .handler = {&profileDir}, - }); - - addFlag({ - .longName = "nix-store-paths-url", - .description = "The URL of the file that contains the store paths of the latest Nix release.", - .labels = {"url"}, - .handler = {&(std::string &) settings.upgradeNixStorePathUrl}, - }); - } - /** * This command is stable before the others */ @@ -45,7 +25,7 @@ struct CmdUpgradeNix : MixDryRun, StoreCommand std::string description() override { - return "upgrade Nix to the latest stable version"; + return "deprecated in favor of determinate-nixd upgrade"; } std::string doc() override @@ -62,111 +42,9 @@ struct CmdUpgradeNix : MixDryRun, StoreCommand void run(ref store) override { - evalSettings.pureEval = true; - - if (profileDir == "") - profileDir = getProfileDir(store); - - printInfo("upgrading Nix in profile %s", profileDir); - - auto storePath = getLatestNix(store); - - auto version = DrvName(storePath.name()).version; - - if (dryRun) { - logger->stop(); - warn("would upgrade to version %s", version); - return; - } - - { - Activity act(*logger, lvlInfo, actUnknown, fmt("downloading '%s'...", store->printStorePath(storePath))); - store->ensurePath(storePath); - } - - { - Activity act( - *logger, lvlInfo, actUnknown, fmt("verifying that '%s' works...", store->printStorePath(storePath))); - auto program = store->printStorePath(storePath) + "/bin/nix-env"; - auto s = runProgram(program, false, {"--version"}); - if (s.find("Nix") == std::string::npos) - throw Error("could not verify that '%s' works", program); - } - - logger->stop(); - - { - Activity act( - *logger, - lvlInfo, - actUnknown, - fmt("installing '%s' into profile %s...", store->printStorePath(storePath), profileDir)); - - // FIXME: don't call an external process. - runProgram( - getNixBin("nix-env").string(), - false, - {"--profile", profileDir.string(), "-i", store->printStorePath(storePath), "--no-sandbox"}); - } - - printInfo(ANSI_GREEN "upgrade to version %s done" ANSI_NORMAL, version); - } - - /* Return the profile in which Nix is installed. */ - std::filesystem::path getProfileDir(ref store) - { - auto whereOpt = ExecutablePath::load().findName(OS_STR("nix-env")); - if (!whereOpt) - throw Error("couldn't figure out how Nix is installed, so I can't upgrade it"); - const auto & where = whereOpt->parent_path(); - - printInfo("found Nix in %s", where); - - if (hasPrefix(where.string(), "/run/current-system")) - throw Error("Nix on NixOS must be upgraded via 'nixos-rebuild'"); - - auto profileDir = where.parent_path(); - - // Resolve profile to /nix/var/nix/profiles/ link. - while (canonPath(profileDir.string()).find("/profiles/") == std::string::npos - && std::filesystem::is_symlink(profileDir)) - profileDir = readLink(profileDir.string()); - - printInfo("found profile %s", profileDir); - - Path userEnv = canonPath(profileDir.string(), true); - - if (std::filesystem::exists(profileDir / "manifest.json")) - throw Error( - "directory %s is managed by 'nix profile' and currently cannot be upgraded by 'nix upgrade-nix'", - profileDir); - - if (!std::filesystem::exists(profileDir / "manifest.nix")) - throw Error("directory %s does not appear to be part of a Nix profile", profileDir); - - if (!store->isValidPath(store->parseStorePath(userEnv))) - throw Error("directory '%s' is not in the Nix store", userEnv); - - return profileDir; - } - - /* Return the store path of the latest stable Nix. */ - StorePath getLatestNix(ref store) - { - Activity act(*logger, lvlInfo, actUnknown, "querying latest Nix version"); - - // FIXME: use nixos.org? - auto req = FileTransferRequest(parseURL(settings.upgradeNixStorePathUrl.get())); - auto res = getFileTransfer()->download(req); - - auto state = std::make_unique(LookupPath{}, store, fetchSettings, evalSettings); - auto v = state->allocValue(); - state->eval(state->parseExprFromString(res.data, state->rootPath(CanonPath("/no-such-path"))), *v); - Bindings & bindings = Bindings::emptyBindings; - auto v2 = findAlongAttrPath(*state, settings.thisSystem, bindings, *v).first; - - return store->parseStorePath( - state->forceString(*v2, noPos, "while evaluating the path tho latest nix version")); + throw Error( + "The upgrade-nix command isn't available in Determinate Nix; use %s instead", + "sudo determinate-nixd upgrade"); } }; diff --git a/src/nix/upgrade-nix.md b/src/nix/upgrade-nix.md index 3a3bf61b9b0..bb515717582 100644 --- a/src/nix/upgrade-nix.md +++ b/src/nix/upgrade-nix.md @@ -1,33 +1,11 @@ R""( -# Examples - -* Upgrade Nix to the stable version declared in Nixpkgs: - - ```console - # nix upgrade-nix - ``` - -* Upgrade Nix in a specific profile: - - ```console - # nix upgrade-nix --profile ~alice/.local/state/nix/profiles/profile - ``` - # Description -This command upgrades Nix to the stable version. - -By default, the latest stable version is defined by Nixpkgs, in -[nix-fallback-paths.nix](https://github.com/NixOS/nixpkgs/raw/master/nixos/modules/installer/tools/nix-fallback-paths.nix) -and updated manually. It may not always be the latest tagged release. - -By default, it locates the directory containing the `nix` binary in the `$PATH` -environment variable. If that directory is a Nix profile, it will -upgrade the `nix` package in that profile to the latest stable binary -release. +This command isn't available in Determinate Nix but is present in order to guide +users to the new upgrade path. -You cannot use this command to upgrade Nix in the system profile of a -NixOS system (that is, if `nix` is found in `/run/current-system`). +Use `sudo determinate-nixd upgrade` to upgrade Determinate Nix on systems that manage it imperatively. +In practice, this is any system that isn't running NixOS. )"" diff --git a/src/nix/verify.md b/src/nix/verify.md index ae0b0acd68a..0c18449e2c0 100644 --- a/src/nix/verify.md +++ b/src/nix/verify.md @@ -19,7 +19,7 @@ R""( ```console # nix store verify --store https://cache.nixos.org/ \ - /nix/store/v5sv61sszx301i0x6xysaqzla09nksnd-hello-2.10 + /nix/store/10l19qifk7hjjq47px8m2prqk1gv4isy-hello-2.10 ``` # Description diff --git a/src/nix/why-depends.md b/src/nix/why-depends.md index dc13619e13a..ac8adeb7e85 100644 --- a/src/nix/why-depends.md +++ b/src/nix/why-depends.md @@ -7,9 +7,9 @@ R""( ```console # nix why-depends nixpkgs#hello nixpkgs#glibc - /nix/store/v5sv61sszx301i0x6xysaqzla09nksnd-hello-2.10 - └───bin/hello: …...................../nix/store/9l06v7fc38c1x3r2iydl15ksgz0ysb82-glibc-2.32/lib/ld-linux-x86-64.… - → /nix/store/9l06v7fc38c1x3r2iydl15ksgz0ysb82-glibc-2.32 + /nix/store/10l19qifk7hjjq47px8m2prqk1gv4isy-hello-2.10 + └───bin/hello: …...................../nix/store/kmmr0ggkywxvnad4z1chqb6lsxi6pqgc-glibc-2.32/lib/ld-linux-x86-64.… + → /nix/store/kmmr0ggkywxvnad4z1chqb6lsxi6pqgc-glibc-2.32 ``` * Show all files and paths in the dependency graph leading from @@ -17,13 +17,13 @@ R""( ```console # nix why-depends --all nixpkgs#thunderbird nixpkgs#xorg.libX11 - /nix/store/qfc8729nzpdln1h0hvi1ziclsl3m84sr-thunderbird-78.5.1 - ├───lib/thunderbird/libxul.so: …6wrw-libxcb-1.14/lib:/nix/store/adzfjjh8w25vdr0xdx9x16ah4f5rqrw5-libX11-1.7.0/lib:/nix/store/ssf… - │ → /nix/store/adzfjjh8w25vdr0xdx9x16ah4f5rqrw5-libX11-1.7.0 - ├───lib/thunderbird/libxul.so: …pxyc-libXt-1.2.0/lib:/nix/store/1qj29ipxl2fyi2b13l39hdircq17gnk0-libXdamage-1.1.5/lib:/nix/store… - │ → /nix/store/1qj29ipxl2fyi2b13l39hdircq17gnk0-libXdamage-1.1.5 - │ ├───lib/libXdamage.so.1.1.0: …-libXfixes-5.0.3/lib:/nix/store/adzfjjh8w25vdr0xdx9x16ah4f5rqrw5-libX11-1.7.0/lib:/nix/store/9l0… - │ │ → /nix/store/adzfjjh8w25vdr0xdx9x16ah4f5rqrw5-libX11-1.7.0 + /nix/store/0my2p7psgdzqc5pq6dyl4ld9w6g0np58-thunderbird-78.5.1 + ├───lib/thunderbird/libxul.so: …6wrw-libxcb-1.14/lib:/nix/store/jmwiq1bb3n47a0css8b1q7lhgf7416k5-libX11-1.7.0/lib:/nix/store/ssf… + │ → /nix/store/jmwiq1bb3n47a0css8b1q7lhgf7416k5-libX11-1.7.0 + ├───lib/thunderbird/libxul.so: …pxyc-libXt-1.2.0/lib:/nix/store/l1sv43bafhkf2iikmdw9y62aybjdhcmm-libXdamage-1.1.5/lib:/nix/store… + │ → /nix/store/l1sv43bafhkf2iikmdw9y62aybjdhcmm-libXdamage-1.1.5 + │ ├───lib/libXdamage.so.1.1.0: …-libXfixes-5.0.3/lib:/nix/store/jmwiq1bb3n47a0css8b1q7lhgf7416k5-libX11-1.7.0/lib:/nix/store/9l0… + │ │ → /nix/store/jmwiq1bb3n47a0css8b1q7lhgf7416k5-libX11-1.7.0 … ``` @@ -31,9 +31,9 @@ R""( ```console # nix why-depends nixpkgs#glibc nixpkgs#glibc - /nix/store/9df65igwjmf2wbw0gbrrgair6piqjgmi-glibc-2.31 - └───lib/ld-2.31.so: …che Do not use /nix/store/9df65igwjmf2wbw0gbrrgair6piqjgmi-glibc-2.31/etc/ld.so.cache. --… - → /nix/store/9df65igwjmf2wbw0gbrrgair6piqjgmi-glibc-2.31 + /nix/store/q9mknq836i0kblq8g1hm9f3cv9qda0r9-glibc-2.31 + └───lib/ld-2.31.so: …che Do not use /nix/store/q9mknq836i0kblq8g1hm9f3cv9qda0r9-glibc-2.31/etc/ld.so.cache. --… + → /nix/store/q9mknq836i0kblq8g1hm9f3cv9qda0r9-glibc-2.31 ``` * Show why Geeqie has a build-time dependency on `systemd`: @@ -54,7 +54,7 @@ R""( Nix automatically determines potential runtime dependencies between store paths by scanning for the *hash parts* of store paths. For instance, if there exists a store path -`/nix/store/9df65igwjmf2wbw0gbrrgair6piqjgmi-glibc-2.31`, and a file +`/nix/store/q9mknq836i0kblq8g1hm9f3cv9qda0r9-glibc-2.31`, and a file inside another store path contains the string `9df65igw…`, then the latter store path *refers* to the former, and thus might need it at runtime. Nix always maintains the existence of the transitive closure diff --git a/src/perl/lib/Nix/Store.xs b/src/perl/lib/Nix/Store.xs index 93e9f0f9541..6de26f0d284 100644 --- a/src/perl/lib/Nix/Store.xs +++ b/src/perl/lib/Nix/Store.xs @@ -234,7 +234,7 @@ StoreWrapper::exportPaths(int fd, ...) StorePathSet paths; for (int n = 2; n < items; ++n) paths.insert(THIS->store->parseStorePath(SvPV_nolen(ST(n)))); FdSink sink(fd); - exportPaths(*THIS->store, paths, sink); + exportPaths(*THIS->store, paths, sink, 1); } catch (Error & e) { croak("%s", e.what()); } diff --git a/src/perl/package.nix b/src/perl/package.nix index 864558ec855..b2a1f697583 100644 --- a/src/perl/package.nix +++ b/src/perl/package.nix @@ -18,7 +18,7 @@ in perl.pkgs.toPerlModule ( mkMesonDerivation (finalAttrs: { - pname = "nix-perl"; + pname = "determinate-nix-perl"; inherit version; workDir = ./.; diff --git a/tests/functional/build.sh b/tests/functional/build.sh index 0b06dcd917d..51f2e2423f3 100755 --- a/tests/functional/build.sh +++ b/tests/functional/build.sh @@ -160,21 +160,18 @@ printf "%s\n" "$drv^*" | nix build --no-link --stdin --json | jq --exit-status ' # --keep-going and FOD out="$(nix build -f fod-failing.nix -L 2>&1)" && status=0 || status=$? test "$status" = 1 -# one "hash mismatch" error, one "build of ... failed" -test "$(<<<"$out" grep -cE '^error:')" = 2 -<<<"$out" grepQuiet -E "hash mismatch in fixed-output derivation '.*-x1\\.drv'" -<<<"$out" grepQuiet -vE "hash mismatch in fixed-output derivation '.*-x3\\.drv'" -<<<"$out" grepQuiet -vE "hash mismatch in fixed-output derivation '.*-x2\\.drv'" -<<<"$out" grepQuiet -E "error: build of '.*-x[1-4]\\.drv\\^out', '.*-x[1-4]\\.drv\\^out', '.*-x[1-4]\\.drv\\^out', '.*-x[1-4]\\.drv\\^out' failed" +# one "hash mismatch" error, one cancelled build +test "$(<<<"$out" grep -cE '^error:')" = 1 +test "$(<<<"$out" grep -cE '(cancelled)')" = 3 +<<<"$out" grepQuiet -E "hash mismatch in fixed-output derivation" out="$(nix build -f fod-failing.nix -L x1 x2 x3 --keep-going 2>&1)" && status=0 || status=$? test "$status" = 1 -# three "hash mismatch" errors - for each failing fod, one "build of ... failed" -test "$(<<<"$out" grep -cE '^error:')" = 4 +# three "hash mismatch" errors - for each failing fod +test "$(<<<"$out" grep -cE '^error:')" = 3 <<<"$out" grepQuiet -E "hash mismatch in fixed-output derivation '.*-x1\\.drv'" <<<"$out" grepQuiet -E "hash mismatch in fixed-output derivation '.*-x3\\.drv'" <<<"$out" grepQuiet -E "hash mismatch in fixed-output derivation '.*-x2\\.drv'" -<<<"$out" grepQuiet -E "error: build of '.*-x[1-3]\\.drv\\^out', '.*-x[1-3]\\.drv\\^out', '.*-x[1-3]\\.drv\\^out' failed" out="$(nix build -f fod-failing.nix -L x4 2>&1)" && status=0 || status=$? test "$status" = 1 diff --git a/tests/functional/ca/build-with-garbage-path.sh b/tests/functional/ca/build-with-garbage-path.sh index 298cd469a92..b610f8e2b1c 100755 --- a/tests/functional/ca/build-with-garbage-path.sh +++ b/tests/functional/ca/build-with-garbage-path.sh @@ -9,7 +9,7 @@ requireDaemonNewerThan "2.4pre20210621" # Get the output path of `rootCA`, and put some garbage instead outPath="$(nix-build ./content-addressed.nix -A rootCA --no-out-link)" # shellcheck disable=SC2046 # Multiple store paths need to become individual args -nix-store --delete $(nix-store -q --referrers-closure "$outPath") +nix-store --delete $(nix-store -q --referrers-closure "$outPath") --ignore-liveness touch "$outPath" # The build should correctly remove the garbage and put the expected path instead diff --git a/tests/functional/ca/derivation-json.sh b/tests/functional/ca/derivation-json.sh index f94c5a72c3f..eb1d949676a 100644 --- a/tests/functional/ca/derivation-json.sh +++ b/tests/functional/ca/derivation-json.sh @@ -19,7 +19,7 @@ drvPath3=$(nix derivation add --dry-run < "$TEST_HOME"/foo.json) [[ ! -e "$drvPath3" ]] # But the JSON is rejected without the experimental feature -expectStderr 1 nix derivation add < "$TEST_HOME"/foo.json --experimental-features nix-command | grepQuiet "experimental Nix feature 'ca-derivations' is disabled" +expectStderr 1 nix derivation add < "$TEST_HOME"/foo.json --experimental-features '' | grepQuiet "experimental Nix feature 'ca-derivations' is disabled" # Without --dry-run it is actually written drvPath4=$(nix derivation add < "$TEST_HOME"/foo.json) diff --git a/tests/functional/ca/selfref-gc.sh b/tests/functional/ca/selfref-gc.sh index 7ac9ec9f78d..fdee6b07ab0 100755 --- a/tests/functional/ca/selfref-gc.sh +++ b/tests/functional/ca/selfref-gc.sh @@ -4,7 +4,7 @@ source common.sh requireDaemonNewerThan "2.4pre20210626" -enableFeatures "ca-derivations nix-command flakes" +enableFeatures "ca-derivations" export NIX_TESTS_CA_BY_DEFAULT=1 cd .. diff --git a/tests/functional/common/functions.sh b/tests/functional/common/functions.sh index 1b2ec8fe0e8..3e3aeef3ddc 100644 --- a/tests/functional/common/functions.sh +++ b/tests/functional/common/functions.sh @@ -73,6 +73,7 @@ startDaemon() { fi # Start the daemon, wait for the socket to appear. rm -f "$NIX_DAEMON_SOCKET_PATH" + # TODO: remove the nix-command feature when we're no longer testing against old daemons. PATH=$DAEMON_PATH nix --extra-experimental-features 'nix-command' daemon & _NIX_TEST_DAEMON_PID=$! export _NIX_TEST_DAEMON_PID @@ -132,11 +133,11 @@ restartDaemon() { } isDaemonNewer () { - [[ -n "${NIX_DAEMON_PACKAGE:-}" ]] || return 0 - local requiredVersion="$1" - local daemonVersion - daemonVersion=$("$NIX_DAEMON_PACKAGE/bin/nix" daemon --version | cut -d' ' -f3) - [[ $(nix eval --expr "builtins.compareVersions ''$daemonVersion'' ''$requiredVersion''") -ge 0 ]] + [[ -n "${NIX_DAEMON_PACKAGE:-}" ]] || return 0 + local requiredVersion="$1" + local daemonVersion + daemonVersion=$("$NIX_DAEMON_PACKAGE/bin/nix" daemon --version | sed 's/.*) //') + [[ $(nix eval --expr "builtins.compareVersions ''$daemonVersion'' ''$requiredVersion''") -ge 0 ]] } skipTest () { @@ -360,4 +361,25 @@ execUnshare () { exec unshare --mount --map-root-user "$SHELL" "$@" } +initGitRepo() { + local repo="$1" + local extraArgs="${2-}" + + # shellcheck disable=SC2086 # word splitting of extraArgs is intended + git -C "$repo" init $extraArgs + git -C "$repo" config user.email "foobar@example.com" + git -C "$repo" config user.name "Foobar" +} + +createGitRepo() { + local repo="$1" + local extraArgs="${2-}" + + rm -rf "$repo" "$repo".tmp + mkdir -p "$repo" + + # shellcheck disable=SC2086 # word splitting of extraArgs is intended + initGitRepo "$repo" $extraArgs +} + fi # COMMON_FUNCTIONS_SH_SOURCED diff --git a/tests/functional/common/init.sh b/tests/functional/common/init.sh index 66b44c76f69..7f28a09d753 100755 --- a/tests/functional/common/init.sh +++ b/tests/functional/common/init.sh @@ -12,7 +12,7 @@ if isTestOnNixOS; then ! test -e "$test_nix_conf" cat > "$test_nix_conf" < "$NIX_CONF_DIR"/nix.conf < "$NIX_CONF_DIR"/nix.conf.extra <"$TEST_ROOT"/stdout 2>"$TEST_ROOT"/stderr +xpFeature=auto-allocate-uids +gatedSetting=auto-allocate-uids + +# Experimental feature is disabled before, ignore and warn. +NIX_CONFIG=" + experimental-features = + $gatedSetting = true +" expect 1 nix config show $gatedSetting 1>"$TEST_ROOT"/stdout 2>"$TEST_ROOT"/stderr [[ $(cat "$TEST_ROOT/stdout") = '' ]] -grepQuiet "Ignoring setting 'accept-flake-config' because experimental feature 'flakes' is not enabled" "$TEST_ROOT/stderr" -grepQuiet "error: could not find setting 'accept-flake-config'" "$TEST_ROOT/stderr" +grepQuiet "error: could not find setting '$gatedSetting'" "$TEST_ROOT/stderr" -# 'flakes' experimental-feature is disabled after, ignore and warn -NIX_CONFIG=' - accept-flake-config = true - experimental-features = nix-command -' expect 1 nix config show accept-flake-config 1>"$TEST_ROOT"/stdout 2>"$TEST_ROOT"/stderr +# Experimental feature is disabled after, ignore and warn. +NIX_CONFIG=" + $gatedSetting = true + experimental-features = +" expect 1 nix config show $gatedSetting 1>"$TEST_ROOT"/stdout 2>"$TEST_ROOT"/stderr [[ $(cat "$TEST_ROOT/stdout") = '' ]] -grepQuiet "Ignoring setting 'accept-flake-config' because experimental feature 'flakes' is not enabled" "$TEST_ROOT/stderr" -grepQuiet "error: could not find setting 'accept-flake-config'" "$TEST_ROOT/stderr" +grepQuiet "error: could not find setting '$gatedSetting'" "$TEST_ROOT/stderr" -# 'flakes' experimental-feature is enabled before, process -NIX_CONFIG=' - experimental-features = nix-command flakes - accept-flake-config = true -' nix config show accept-flake-config 1>"$TEST_ROOT"/stdout 2>"$TEST_ROOT"/stderr +# Experimental feature is enabled before, process. +NIX_CONFIG=" + experimental-features = $xpFeature + $gatedSetting = true +" nix config show $gatedSetting 1>"$TEST_ROOT"/stdout 2>"$TEST_ROOT"/stderr grepQuiet "true" "$TEST_ROOT/stdout" -grepQuietInverse "Ignoring setting 'accept-flake-config'" "$TEST_ROOT/stderr" -# 'flakes' experimental-feature is enabled after, process -NIX_CONFIG=' - accept-flake-config = true - experimental-features = nix-command flakes -' nix config show accept-flake-config 1>"$TEST_ROOT"/stdout 2>"$TEST_ROOT"/stderr +# Experimental feature is enabled after, process. +NIX_CONFIG=" + $gatedSetting = true + experimental-features = $xpFeature +" nix config show $gatedSetting 1>"$TEST_ROOT"/stdout 2>"$TEST_ROOT"/stderr grepQuiet "true" "$TEST_ROOT/stdout" -grepQuietInverse "Ignoring setting 'accept-flake-config'" "$TEST_ROOT/stderr" +grepQuietInverse "Ignoring setting '$gatedSetting'" "$TEST_ROOT/stderr" function exit_code_both_ways { - expect 1 nix --experimental-features 'nix-command' "$@" 1>/dev/null - nix --experimental-features 'nix-command flakes' "$@" 1>/dev/null + expect 1 nix --experimental-features '' "$@" 1>/dev/null + nix --experimental-features "$xpFeature" "$@" 1>/dev/null # Also, the order should not matter - expect 1 nix "$@" --experimental-features 'nix-command' 1>/dev/null - nix "$@" --experimental-features 'nix-command flakes' 1>/dev/null + expect 1 nix "$@" --experimental-features '' 1>/dev/null + nix "$@" --experimental-features "$xpFeature" 1>/dev/null } -exit_code_both_ways show-config --flake-registry 'https://no' +exit_code_both_ways config show --auto-allocate-uids # Double check these are stable nix --experimental-features '' --help 1>/dev/null nix --experimental-features '' doctor --help 1>/dev/null nix --experimental-features '' repl --help 1>/dev/null nix --experimental-features '' upgrade-nix --help 1>/dev/null - -# These 3 arguments are currently given to all commands, which is wrong (as not -# all care). To deal with fixing later, we simply make them require the -# nix-command experimental features --- it so happens that the commands we wish -# stabilizing to do not need them anyways. -for arg in '--print-build-logs' '--offline' '--refresh'; do - nix --experimental-features 'nix-command' "$arg" --help 1>/dev/null - expect 1 nix --experimental-features '' "$arg" --help 1>/dev/null -done diff --git a/tests/functional/export.sh b/tests/functional/export.sh index 53bbdd9ac39..a74efa91d80 100755 --- a/tests/functional/export.sh +++ b/tests/functional/export.sh @@ -7,18 +7,24 @@ TODO_NixOS clearStore outPath=$(nix-build dependencies.nix --no-out-link) +drvPath=$(nix path-info --json "$outPath" | jq -r .\""$outPath"\".deriver) nix-store --export "$outPath" > "$TEST_ROOT"/exp +expectStderr 1 nix nario export "$outPath" | grepQuiet "required argument.*missing" +nix nario export --format 1 "$outPath" > "$TEST_ROOT/exp2" +cmp "$TEST_ROOT/exp" "$TEST_ROOT/exp2" # shellcheck disable=SC2046 nix-store --export $(nix-store -qR "$outPath") > "$TEST_ROOT"/exp_all +nix nario export --format 1 -r "$outPath" > "$TEST_ROOT"/exp_all2 +cmp "$TEST_ROOT/exp_all" "$TEST_ROOT/exp_all2" + if nix-store --export "$outPath" >/dev/full ; then echo "exporting to a bad file descriptor should fail" exit 1 fi - clearStore if nix-store --import < "$TEST_ROOT"/exp; then @@ -26,7 +32,6 @@ if nix-store --import < "$TEST_ROOT"/exp; then exit 1 fi - clearStore nix-store --import < "$TEST_ROOT"/exp_all @@ -34,9 +39,42 @@ nix-store --import < "$TEST_ROOT"/exp_all # shellcheck disable=SC2046 nix-store --export $(nix-store -qR "$outPath") > "$TEST_ROOT"/exp_all2 - clearStore # Regression test: the derivers in exp_all2 are empty, which shouldn't # cause a failure. nix-store --import < "$TEST_ROOT"/exp_all2 + +# Test `nix nario import` on files created by `nix-store --export`. +clearStore +expectStderr 1 nix nario import < "$TEST_ROOT"/exp_all | grepQuiet "lacks a signature" +nix nario import --no-check-sigs < "$TEST_ROOT"/exp_all +nix path-info "$outPath" + +# Test `nix nario list`. +nix nario list < "$TEST_ROOT"/exp_all +nix nario list < "$TEST_ROOT"/exp_all | grepQuiet ".*dependencies-input-0.*bytes" +nix nario list -lR < "$TEST_ROOT"/exp_all | grepQuiet "dr-xr-xr-x .*0 $outPath" +nix nario list -lR < "$TEST_ROOT"/exp_all | grepQuiet "lrwxrwxrwx .*0 $outPath/self -> $outPath" +nix nario list -lR < "$TEST_ROOT"/exp_all | grepQuiet -- "-r--r--r-- .*7 $outPath/foobar" + +# Test format 2 (including signatures). +nix key generate-secret --key-name my-key > "$TEST_ROOT"/secret +public_key=$(nix key convert-secret-to-public < "$TEST_ROOT"/secret) +nix store sign --key-file "$TEST_ROOT/secret" -r "$outPath" +nix nario export --format 2 -r "$outPath" > "$TEST_ROOT"/exp_all +clearStore +expectStderr 1 nix nario import < "$TEST_ROOT"/exp_all | grepQuiet "lacks a signature" +nix nario import --trusted-public-keys "$public_key" < "$TEST_ROOT"/exp_all +[[ $(nix path-info --json "$outPath" | jq -r .[].signatures[]) =~ my-key: ]] + +# Test json listing. +json=$(nix nario list --json -R < "$TEST_ROOT/exp_all") +[[ $(printf "%s" "$json" | jq -r ".paths.\"$outPath\".deriver") = "$drvPath" ]] +[[ $(printf "%s" "$json" | jq -r ".paths.\"$outPath\".contents.type") = directory ]] +[[ $(printf "%s" "$json" | jq -r ".paths.\"$outPath\".contents.entries.foobar.type") = regular ]] +[[ $(printf "%s" "$json" | jq ".paths.\"$outPath\".contents.entries.foobar.size") = 7 ]] + +json=$(nix nario list --json < "$TEST_ROOT/exp_all") +[[ $(printf "%s" "$json" | jq -r ".paths.\"$outPath\".deriver") = "$drvPath" ]] +[[ $(printf "%s" "$json" | jq -r ".paths.\"$outPath\".contents.type") = null ]] diff --git a/tests/functional/fetchGit.sh b/tests/functional/fetchGit.sh index e3135328f40..e6d47e7f33e 100755 --- a/tests/functional/fetchGit.sh +++ b/tests/functional/fetchGit.sh @@ -12,11 +12,9 @@ repo=$TEST_ROOT/./git export _NIX_FORCE_HTTP=1 -rm -rf "$repo" "${repo}"-tmp "$TEST_HOME"/.cache/nix "$TEST_ROOT"/worktree "$TEST_ROOT"/minimal +rm -rf "${repo}"-tmp "$TEST_HOME"/.cache/nix "$TEST_ROOT"/worktree "$TEST_ROOT"/minimal -git init "$repo" -git -C "$repo" config user.email "foobar@example.com" -git -C "$repo" config user.name "Foobar" +createGitRepo "$repo" echo utrecht > "$repo"/hello touch "$repo"/.gitignore @@ -213,8 +211,7 @@ path5=$(nix eval --impure --raw --expr "(builtins.fetchGit { url = $repo; ref = # Fetching from a repo with only a specific revision and no branches should # not fall back to copying files and record correct revision information. See: #5302 -mkdir "$TEST_ROOT"/minimal -git -C "$TEST_ROOT"/minimal init +createGitRepo "$TEST_ROOT"/minimal git -C "$TEST_ROOT"/minimal fetch "$repo" "$rev2" git -C "$TEST_ROOT"/minimal checkout "$rev2" [[ $(nix eval --impure --raw --expr "(builtins.fetchGit { url = $TEST_ROOT/minimal; }).rev") = "$rev2" ]] @@ -267,7 +264,7 @@ rm -rf "$TEST_HOME"/.cache/nix (! nix eval --impure --raw --expr "(builtins.fetchGit \"file://$repo\").outPath") # should succeed for a repo without commits -git init "$repo" +initGitRepo "$repo" git -C "$repo" add hello # need to add at least one file to cause the root of the repo to be visible # shellcheck disable=SC2034 path10=$(nix eval --impure --raw --expr "(builtins.fetchGit \"file://$repo\").outPath") @@ -275,9 +272,7 @@ path10=$(nix eval --impure --raw --expr "(builtins.fetchGit \"file://$repo\").ou # should succeed for a path with a space # regression test for #7707 repo="$TEST_ROOT/a b" -git init "$repo" -git -C "$repo" config user.email "foobar@example.com" -git -C "$repo" config user.name "Foobar" +createGitRepo "$repo" echo utrecht > "$repo/hello" touch "$repo/.gitignore" @@ -289,7 +284,7 @@ path11=$(nix eval --impure --raw --expr "(builtins.fetchGit ./.).outPath") # Test a workdir with no commits. empty="$TEST_ROOT/empty" -git init "$empty" +createGitRepo "$empty" emptyAttrs="{ lastModified = 0; lastModifiedDate = \"19700101000000\"; narHash = \"sha256-pQpattmS9VmO3ZIQUFn66az8GSmB4IvYhTTCFn6SUmo=\"; rev = \"0000000000000000000000000000000000000000\"; revCount = 0; shortRev = \"0000000\"; submodules = false; }" result=$(nix eval --impure --expr "builtins.removeAttrs (builtins.fetchGit $empty) [\"outPath\"]") @@ -314,3 +309,56 @@ git -C "$empty" config user.name "Foobar" git -C "$empty" commit --allow-empty --allow-empty-message --message "" nix eval --impure --expr "let attrs = builtins.fetchGit $empty; in assert attrs.lastModified != 0; assert attrs.rev != \"0000000000000000000000000000000000000000\"; assert attrs.revCount == 1; true" + +# Test backward compatibility hack for Nix < 2.20 locks / fetchTree calls that expect Git filters to be applied. +eol="$TEST_ROOT/git-eol" +createGitRepo "$eol" +printf "Hello\nWorld\n" > "$eol/crlf" +printf "ignore me" > "$eol/ignored" +git -C "$eol" add crlf ignored +git -C "$eol" commit -a -m Initial +echo "Version: \$Format:%s\$" > "$eol/version" +printf "crlf text eol=crlf\nignored export-ignore\nversion export-subst\n" > "$eol/.gitattributes" +git -C "$eol" add .gitattributes version +git -C "$eol" commit -a -m 'Apply gitattributes' + +rev="$(git -C "$eol" rev-parse HEAD)" + +export _NIX_TEST_BARF_ON_UNCACHEABLE=1 + +oldHash="sha256-cOuYSqDjvOBmKCuH5nXEfHRIAUVJZlictW0raF+3ynk=" +newHash="sha256-WZ5VePvmUcbRbkWLlNtCywWrAcr7EvVeJP8xKdZR7pc=" + +expectStderr 0 nix eval --expr \ + "let tree = builtins.fetchTree { type = \"git\"; url = \"file://$eol\"; rev = \"$rev\"; narHash = \"$oldHash\"; }; in assert builtins.readFile \"\${tree}/crlf\" == \"Hello\r\nWorld\r\n\"; assert !builtins.pathExists \"\${tree}/ignored\"; assert builtins.readFile \"\${tree}/version\" == \"Version: Apply gitattributes\n\"; true" \ + | grepQuiet "Please update the NAR hash to '$newHash'" + +nix eval --expr \ + "let tree = builtins.fetchTree { type = \"git\"; url = \"file://$eol\"; rev = \"$rev\"; narHash = \"$newHash\"; }; in assert builtins.readFile \"\${tree}/crlf\" == \"Hello\nWorld\n\"; assert builtins.pathExists \"\${tree}/ignored\"; assert builtins.readFile \"\${tree}/version\" == \"Version: \$Format:%s\$\n\"; true" + +expectStderr 102 nix eval --expr \ + "builtins.fetchTree { type = \"git\"; url = \"file://$eol\"; rev = \"$rev\"; narHash = \"sha256-DLDvcwdcwCxnuPTxSQ6gLAyopB20lD0bOQoQB3i2hsA=\"; }" \ + | grepQuiet "NAR hash mismatch" + +mkdir -p "$TEST_ROOT"/flake +cat > "$TEST_ROOT"/flake/flake.nix << EOF +{ + inputs.eol = { type = "git"; url = "file://$eol"; rev = "$rev"; flake = false; }; + outputs = { self, eol }: rec { + crlf = builtins.readFile "\${eol}/crlf"; + isLegacy = assert crlf == "Hello\r\nWorld\r\n"; true; + isModern = assert crlf == "Hello\nWorld\n"; true; + }; +} +EOF + +# Test locking with Nix < 2.20 semantics (i.e. using `git archive`). +nix eval --nix-219-compat "path:$TEST_ROOT/flake"#isLegacy +nix eval "path:$TEST_ROOT/flake"#isLegacy +[[ $(jq -r .nodes.eol.locked.narHash < "$TEST_ROOT"/flake/flake.lock) = "$oldHash" ]] + +# Test locking with Nix >= 2.20 semantics (i.e. using libgit2). +rm "$TEST_ROOT"/flake/flake.lock +nix eval "path:$TEST_ROOT/flake"#isModern +nix eval --nix-219-compat "path:$TEST_ROOT/flake"#isModern +[[ $(jq -r .nodes.eol.locked.narHash < "$TEST_ROOT"/flake/flake.lock) = "$newHash" ]] diff --git a/tests/functional/fetchGitRefs.sh b/tests/functional/fetchGitRefs.sh index a7d1a2a2931..9c7fb323eb7 100755 --- a/tests/functional/fetchGitRefs.sh +++ b/tests/functional/fetchGitRefs.sh @@ -8,11 +8,9 @@ clearStoreIfPossible repo="$TEST_ROOT/git" -rm -rf "$repo" "${repo}-tmp" "$TEST_HOME/.cache/nix" +rm -rf "${repo}-tmp" "$TEST_HOME/.cache/nix" -git init "$repo" -git -C "$repo" config user.email "foobar@example.com" -git -C "$repo" config user.name "Foobar" +createGitRepo "$repo" echo utrecht > "$repo/hello" git -C "$repo" add hello diff --git a/tests/functional/fetchGitShallow.sh b/tests/functional/fetchGitShallow.sh index 4c21bd7ac80..6b91d60cd9e 100644 --- a/tests/functional/fetchGitShallow.sh +++ b/tests/functional/fetchGitShallow.sh @@ -6,9 +6,7 @@ source common.sh requireGit # Create a test repo with multiple commits for all our tests -git init "$TEST_ROOT/shallow-parent" -git -C "$TEST_ROOT/shallow-parent" config user.email "foobar@example.com" -git -C "$TEST_ROOT/shallow-parent" config user.name "Foobar" +createGitRepo "$TEST_ROOT/shallow-parent" # Add several commits to have history echo "{ outputs = _: {}; }" > "$TEST_ROOT/shallow-parent/flake.nix" diff --git a/tests/functional/fetchGitSubmodules.sh b/tests/functional/fetchGitSubmodules.sh index 2a25245be75..bf5fe5df387 100755 --- a/tests/functional/fetchGitSubmodules.sh +++ b/tests/functional/fetchGitSubmodules.sh @@ -22,22 +22,16 @@ rm -rf "${rootRepo}" "${subRepo}" "$TEST_HOME"/.cache/nix export XDG_CONFIG_HOME=$TEST_HOME/.config git config --global protocol.file.allow always -initGitRepo() { - git init "$1" - git -C "$1" config user.email "foobar@example.com" - git -C "$1" config user.name "Foobar" -} - addGitContent() { echo "lorem ipsum" > "$1"/content git -C "$1" add content git -C "$1" commit -m "Initial commit" } -initGitRepo "$subRepo" +createGitRepo "$subRepo" addGitContent "$subRepo" -initGitRepo "$rootRepo" +createGitRepo "$rootRepo" git -C "$rootRepo" submodule init git -C "$rootRepo" submodule add "$subRepo" sub @@ -199,19 +193,19 @@ test_submodule_nested() { local repoB=$TEST_ROOT/submodule_nested/b local repoC=$TEST_ROOT/submodule_nested/c - rm -rf "$repoA" "$repoB" "$repoC" "$TEST_HOME"/.cache/nix + rm -rf "$TEST_HOME"/.cache/nix - initGitRepo "$repoC" + createGitRepo "$repoC" touch "$repoC"/inside-c git -C "$repoC" add inside-c addGitContent "$repoC" - initGitRepo "$repoB" + createGitRepo "$repoB" git -C "$repoB" submodule add "$repoC" c git -C "$repoB" add c addGitContent "$repoB" - initGitRepo "$repoA" + createGitRepo "$repoA" git -C "$repoA" submodule add "$repoB" b git -C "$repoA" add b addGitContent "$repoA" diff --git a/tests/functional/fetchGitVerification.sh b/tests/functional/fetchGitVerification.sh index 79c78d0c9f6..3b5f9b866b9 100755 --- a/tests/functional/fetchGitVerification.sh +++ b/tests/functional/fetchGitVerification.sh @@ -21,9 +21,7 @@ ssh-keygen -f "$keysDir/testkey2" -t rsa -P "" -C "test key 2" key2File="$keysDir/testkey2.pub" publicKey2=$(awk '{print $2}' "$key2File") -git init "$repo" -git -C "$repo" config user.email "foobar@example.com" -git -C "$repo" config user.name "Foobar" +createGitRepo "$repo" git -C "$repo" config gpg.format ssh echo 'hello' > "$repo"/text diff --git a/tests/functional/fetchPath.sh b/tests/functional/fetchPath.sh index 1df895b6166..2784afb0388 100755 --- a/tests/functional/fetchPath.sh +++ b/tests/functional/fetchPath.sh @@ -3,9 +3,9 @@ source common.sh touch "$TEST_ROOT/foo" -t 202211111111 -# We only check whether 2022-11-1* **:**:** is the last modified date since -# `lastModified` is transformed into UTC in `builtins.fetchTarball`. -[[ "$(nix eval --impure --raw --expr "(builtins.fetchTree \"path://$TEST_ROOT/foo\").lastModifiedDate")" =~ 2022111.* ]] + +# The path fetcher does not return lastModified. +[[ "$(nix eval --impure --expr "(builtins.fetchTree \"path://$TEST_ROOT/foo\") ? lastModifiedDate")" = false ]] # Check that we can override lastModified for "path:" inputs. [[ "$(nix eval --impure --expr "(builtins.fetchTree { type = \"path\"; path = \"$TEST_ROOT/foo\"; lastModified = 123; }).lastModified")" = 123 ]] diff --git a/tests/functional/fetchurl.sh b/tests/functional/fetchurl.sh index c25ac321668..96d46abf468 100755 --- a/tests/functional/fetchurl.sh +++ b/tests/functional/fetchurl.sh @@ -71,7 +71,7 @@ echo "$outPath" | grepQuiet 'xyzzy' test -x "$outPath/fetchurl.sh" test -L "$outPath/symlink" -nix-store --delete "$outPath" +nix-store --delete "$outPath" --ignore-liveness # Test unpacking a compressed NAR. narxz="$TEST_ROOT/archive.nar.xz" diff --git a/tests/functional/flakes/build-time-flake-inputs.sh b/tests/functional/flakes/build-time-flake-inputs.sh new file mode 100644 index 00000000000..d1fc1c45360 --- /dev/null +++ b/tests/functional/flakes/build-time-flake-inputs.sh @@ -0,0 +1,88 @@ +#!/usr/bin/env bash + +source ./common.sh + +TODO_NixOS +enableFeatures "build-time-fetch-tree" +restartDaemon +requireGit + +lazy="$TEST_ROOT/lazy" +createGitRepo "$lazy" +echo world > "$lazy/who" +git -C "$lazy" add who +git -C "$lazy" commit -a -m foo + +repo="$TEST_ROOT/repo" + +createGitRepo "$repo" + +cat > "$repo/flake.nix" < "$lazy/who" +git -C "$lazy" commit -a -m foo + +nix flake update --flake "$repo" + +clearStore + +nix build --out-link "$TEST_ROOT/result" -L "$repo" +[[ $(cat "$TEST_ROOT/result") = utrecht ]] + +rm -rf "$lazy" + +clearStore + +expectStderr 1 nix build --out-link "$TEST_ROOT/result" -L "$repo" | grepQuiet "Cannot build.*source.drv" + +# `nix flake prefetch-inputs` should ignore build-time inputs. +depDir=$TEST_ROOT/dep +createGitRepo "$depDir" +createSimpleGitFlake "$depDir" + +cat > "$repo/flake.nix" <&1 | grepQuiet 'error: breaks' -expect 1 nix build "$flake1Dir#foo.bar" 2>&1 | grepQuiet 'error: breaks' +expect 1 nix build --no-link "$flake1Dir#foo.bar" 2>&1 | grepQuiet 'error: breaks' +expect 1 nix build --no-link "$flake1Dir#foo.bar" 2>&1 | grepQuiet 'error: breaks' # Stack overflow error must not be cached -expect 1 nix build --max-call-depth 50 "$flake1Dir#stack-depth" 2>&1 \ +expect 1 nix build --no-link --max-call-depth 50 "$flake1Dir#stack-depth" 2>&1 \ | grepQuiet 'error: stack overflow; max-call-depth exceeded' # If the SO is cached, the following invocation will produce a cached failure; we expect it to succeed nix build --no-link "$flake1Dir#stack-depth" @@ -48,3 +48,11 @@ nix build --no-link "$flake1Dir#stack-depth" expect 1 nix build "$flake1Dir#ifd" --option allow-import-from-derivation false 2>&1 \ | grepQuiet 'error: cannot build .* during evaluation because the option '\''allow-import-from-derivation'\'' is disabled' nix build --no-link "$flake1Dir#ifd" + +# Test that a store derivation is recreated when it has been deleted +# but the corresponding attribute is still cached. +if ! isTestOnNixOS; then + nix build --no-link "$flake1Dir#drv" + clearStore + nix build --no-link "$flake1Dir#drv" +fi diff --git a/tests/functional/flakes/flakes.sh b/tests/functional/flakes/flakes.sh index d9e187251f9..c33b2a64ae1 100755 --- a/tests/functional/flakes/flakes.sh +++ b/tests/functional/flakes/flakes.sh @@ -69,7 +69,9 @@ nix flake metadata "$flake1Dir" | grepQuiet 'URL:.*flake1.*' # Test 'nix flake metadata --json'. json=$(nix flake metadata flake1 --json | jq .) [[ $(echo "$json" | jq -r .description) = 'Bla bla' ]] -[[ -d $(echo "$json" | jq -r .path) ]] +if [[ $(nix config show lazy-trees) = false ]]; then + [[ -d $(echo "$json" | jq -r .path) ]] +fi [[ $(echo "$json" | jq -r .lastModified) = $(git -C "$flake1Dir" log -n1 --format=%ct) ]] hash1=$(echo "$json" | jq -r .revision) [[ -n $(echo "$json" | jq -r .fingerprint) ]] @@ -77,6 +79,7 @@ hash1=$(echo "$json" | jq -r .revision) echo foo > "$flake1Dir/foo" git -C "$flake1Dir" add "$flake1Dir"/foo [[ $(nix flake metadata flake1 --json --refresh | jq -r .dirtyRevision) == "$hash1-dirty" ]] +[[ $(_NIX_TEST_FAIL_ON_LARGE_PATH=1 nix flake metadata flake1 --json --refresh --warn-large-path-threshold 1 --lazy-trees | jq -r .dirtyRevision) == "$hash1-dirty" ]] [[ "$(nix flake metadata flake1 --json | jq -r .fingerprint)" != null ]] echo -n '# foo' >> "$flake1Dir/flake.nix" @@ -110,6 +113,11 @@ nix build -o "$TEST_ROOT/result" "git+file://$flake1Dir#default" nix build -o "$TEST_ROOT/result" "$flake1Dir?ref=HEAD#default" nix build -o "$TEST_ROOT/result" "git+file://$flake1Dir?ref=HEAD#default" +# Check that the fetcher cache works. +if [[ $(nix config show lazy-trees) = false ]]; then + nix build -o "$TEST_ROOT/result" "git+file://$flake1Dir?ref=HEAD#default" -vvvvv 2>&1 | grepQuiet "source path.*cache hit" +fi + # Check that relative paths are allowed for git flakes. # This may change in the future once git submodule support is refined. # See: https://discourse.nixos.org/t/57783 and #9708. @@ -161,7 +169,12 @@ expect 1 nix build -o "$TEST_ROOT/result" "$flake2Dir#bar" --no-update-lock-file nix build -o "$TEST_ROOT/result" "$flake2Dir#bar" --commit-lock-file [[ -e "$flake2Dir/flake.lock" ]] [[ -z $(git -C "$flake2Dir" diff main || echo failed) ]] -[[ $(jq --indent 0 --compact-output . < "$flake2Dir/flake.lock") =~ ^'{"nodes":{"flake1":{"locked":{"lastModified":'.*',"narHash":"sha256-'.*'","ref":"refs/heads/master","rev":"'.*'","revCount":2,"type":"git","url":"file:///'.*'"},"original":{"id":"flake1","type":"indirect"}},"root":{"inputs":{"flake1":"flake1"}}},"root":"root","version":7}'$ ]] +[[ $(jq --indent 0 --compact-output . < "$flake2Dir/flake.lock") =~ ^'{"nodes":{"flake1":{"locked":{"lastModified":'[0-9]*',"narHash":"sha256-'.*'","ref":"refs/heads/master","rev":"'.*'","revCount":2,"type":"git","url":"file:///'.*'"},"original":{"id":"flake1","type":"indirect"}},"root":{"inputs":{"flake1":"flake1"}}},"root":"root","version":7}'$ ]] +if [[ $(nix config show lazy-trees) = true ]]; then + # Test that `lazy-locks` causes NAR hashes to be omitted from the lock file. + nix flake update --flake "$flake2Dir" --commit-lock-file --lazy-locks + [[ $(jq --indent 0 --compact-output . < "$flake2Dir/flake.lock") =~ ^'{"nodes":{"flake1":{"locked":{"lastModified":'[0-9]*',"ref":"refs/heads/master","rev":"'.*'","revCount":2,"type":"git","url":"file:///'.*'"},"original":{"id":"flake1","type":"indirect"}},"root":{"inputs":{"flake1":"flake1"}}},"root":"root","version":7}'$ ]] +fi # Rerunning the build should not change the lockfile. nix build -o "$TEST_ROOT/result" "$flake2Dir#bar" @@ -264,6 +277,9 @@ nix registry remove flake1 [[ $(nix registry list | wc -l) == 4 ]] [[ $(nix registry resolve flake1) = "git+file://$flake1Dir" ]] +# Test the builtin fallback registry. +[[ $(nix registry resolve nixpkgs --flake-registry http://fail.invalid.org/sdklsdklsd --download-attempts 1) = github:NixOS/nixpkgs/nixpkgs-unstable ]] + # Test 'nix registry list' with a disabled global registry. nix registry add user-flake1 git+file://"$flake1Dir" nix registry add user-flake2 "git+file://$percentEncodedFlake2Dir" @@ -420,7 +436,7 @@ nix flake metadata "$flake3Dir" --json --eval-store "dummy://?read-only=false" | rm -rf "$badFlakeDir" mkdir "$badFlakeDir" echo INVALID > "$badFlakeDir"/flake.nix -nix store delete "$(nix store add-path "$badFlakeDir")" +nix store delete --ignore-liveness "$(nix store add-path "$badFlakeDir")" [[ $(nix path-info "$(nix store add-path "$flake1Dir")") =~ flake1 ]] [[ $(nix path-info path:"$(nix store add-path "$flake1Dir")") =~ simple ]] diff --git a/tests/functional/flakes/follow-paths.sh b/tests/functional/flakes/follow-paths.sh index f658a0847f7..3d668d687ab 100755 --- a/tests/functional/flakes/follow-paths.sh +++ b/tests/functional/flakes/follow-paths.sh @@ -131,7 +131,7 @@ EOF git -C "$flakeFollowsA" add flake.nix expect 1 nix flake lock "$flakeFollowsA" 2>&1 | grep '/flakeB.*is forbidden in pure evaluation mode' -expect 1 nix flake lock --impure "$flakeFollowsA" 2>&1 | grep "'flakeB' is too short to be a valid store path" +expect 1 nix flake lock --impure "$flakeFollowsA" 2>&1 | grep "error: 'flakeB' is too short to be a valid store path" # Test relative non-flake inputs. cat > "$flakeFollowsA"/flake.nix < "$flakeDir/a" -(cd "$flakeDir" && nix flake init) # check idempotence +(cd "$flakeDir" && nix flake init --template "git+file://$templatesDir") # check idempotence # Test 'nix flake init' with conflicts createGitRepo "$flakeDir" echo b > "$flakeDir/a" pushd "$flakeDir" -(! nix flake init) |& grep "refusing to overwrite existing file '$flakeDir/a'" +(! nix flake init --template "git+file://$templatesDir") |& grep "refusing to overwrite existing file '$flakeDir/a'" popd git -C "$flakeDir" commit -a -m 'Changed' diff --git a/tests/functional/flakes/meson.build b/tests/functional/flakes/meson.build index de76a55804a..dba4035610f 100644 --- a/tests/functional/flakes/meson.build +++ b/tests/functional/flakes/meson.build @@ -34,6 +34,9 @@ suites += { 'source-paths.sh', 'old-lockfiles.sh', 'trace-ifd.sh', + 'build-time-flake-inputs.sh', + 'substitution.sh', + 'shallow.sh', ], 'workdir' : meson.current_source_dir(), } diff --git a/tests/functional/flakes/relative-paths.sh b/tests/functional/flakes/relative-paths.sh index 7480cd50458..323e97ba9cc 100644 --- a/tests/functional/flakes/relative-paths.sh +++ b/tests/functional/flakes/relative-paths.sh @@ -135,10 +135,8 @@ EOF # https://github.com/NixOS/nix/issues/13164 mkdir -p "$TEST_ROOT/issue-13164/nested-flake1/nested-flake2" ( + initGitRepo "$TEST_ROOT/issue-13164" cd "$TEST_ROOT/issue-13164" - git init - git config --global user.email "you@example.com" - git config --global user.name "Your Name" cat >flake.nix <> "$flake1Dir/flake.nix" +git -C "$flake1Dir" commit -a -m bla + +cat > "$repoDir"/flake.nix <flake.nix < show-output.json -nix eval --impure --expr ' -let show_output = builtins.fromJSON (builtins.readFile ./show-output.json); -in -assert show_output == { }; -true -' +[[ $(nix flake show --all-systems --legacy | wc -l) = 1 ]] # Test that attributes with errors are handled correctly. # nixpkgs.legacyPackages is a particularly prominent instance of this. diff --git a/tests/functional/flakes/source-paths.sh b/tests/functional/flakes/source-paths.sh index 3aa3683c27c..e4f4ec1cda8 100644 --- a/tests/functional/flakes/source-paths.sh +++ b/tests/functional/flakes/source-paths.sh @@ -30,10 +30,10 @@ expectStderr 1 nix eval "$repo#y" | grepQuiet "at $repo/flake.nix:" git -C "$repo" commit -a -m foo -expectStderr 1 nix eval "git+file://$repo?ref=master#y" | grepQuiet "at «git+file://$repo?ref=master&rev=.*»/flake.nix:" +expectStderr 1 nix eval "git+file://$repo?ref=master#y" | grepQuiet "at «git+file://$repo?rev=.*»/flake.nix:" expectStderr 1 nix eval "$repo#z" | grepQuiet "error: Path 'foo' does not exist in Git repository \"$repo\"." -expectStderr 1 nix eval "git+file://$repo?ref=master#z" | grepQuiet "error: '«git+file://$repo?ref=master&rev=.*»/foo' does not exist" +expectStderr 1 nix eval "git+file://$repo?ref=master#z" | grepQuiet "error: '«git+file://$repo?rev=.*»/foo' does not exist" expectStderr 1 nix eval "$repo#a" | grepQuiet "error: Path 'foo' does not exist in Git repository \"$repo\"." echo 123 > "$repo/foo" diff --git a/tests/functional/flakes/substitution.sh b/tests/functional/flakes/substitution.sh new file mode 100644 index 00000000000..97a04931abf --- /dev/null +++ b/tests/functional/flakes/substitution.sh @@ -0,0 +1,31 @@ +#! /usr/bin/env bash + +# Test that inputs are substituted if they cannot be fetched from their original location. + +source ./common.sh + +if [[ $(nix config show lazy-trees) = true ]]; then + exit 0 +fi + +TODO_NixOS + +createFlake1 +createFlake2 + +nix build --no-link "$flake2Dir#bar" + +path1="$(nix flake metadata --json "$flake1Dir" | jq -r .path)" + +# Building after an input disappeared should succeed, because it's still in the Nix store. +mv "$flake1Dir" "$flake1Dir-tmp" +nix build --no-link "$flake2Dir#bar" --no-eval-cache + +# Check that Nix will fall back to fetching the input from a substituter. +cache="file://$TEST_ROOT/binary-cache" +nix copy --to "$cache" "$path1" +clearStore +nix build --no-link "$flake2Dir#bar" --no-eval-cache --substitute --substituters "$cache" + +clearStore +expectStderr 1 nix build --no-link "$flake2Dir#bar" --no-eval-cache | grepQuiet "Git repository.*does not exist" diff --git a/tests/functional/flakes/unlocked-override.sh b/tests/functional/flakes/unlocked-override.sh index ed05440de03..ed4d131b7ad 100755 --- a/tests/functional/flakes/unlocked-override.sh +++ b/tests/functional/flakes/unlocked-override.sh @@ -36,6 +36,7 @@ expectStderr 1 nix flake lock "$flake2Dir" --override-input flake1 "$TEST_ROOT/f grepQuiet "Not writing lock file.*because it has an unlocked input" nix flake lock "$flake2Dir" --override-input flake1 "$TEST_ROOT/flake1" --allow-dirty-locks +_NIX_TEST_FAIL_ON_LARGE_PATH=1 nix flake lock "$flake2Dir" --override-input flake1 "$TEST_ROOT/flake1" --allow-dirty-locks --warn-large-path-threshold 1 --lazy-trees # Using a lock file with a dirty lock does not require --allow-dirty-locks, but should print a warning. expectStderr 0 nix eval "$flake2Dir#x" | diff --git a/tests/functional/gc-runtime.nix b/tests/functional/gc-runtime.nix index ee5980bdff9..df7f8ad1647 100644 --- a/tests/functional/gc-runtime.nix +++ b/tests/functional/gc-runtime.nix @@ -9,6 +9,7 @@ mkDerivation { cat > $out/program < \$TEST_ROOT/fifo sleep 10000 EOF diff --git a/tests/functional/gc-runtime.sh b/tests/functional/gc-runtime.sh index 0cccaaf16ab..34e99415d5c 100755 --- a/tests/functional/gc-runtime.sh +++ b/tests/functional/gc-runtime.sh @@ -21,11 +21,16 @@ nix-env -p "$profiles/test" -f ./gc-runtime.nix -i gc-runtime outPath=$(nix-env -p "$profiles/test" -q --no-name --out-path gc-runtime) echo "$outPath" +fifo="$TEST_ROOT/fifo" +mkfifo "$fifo" + echo "backgrounding program..." -"$profiles"/test/program & -sleep 2 # hack - wait for the program to get started +"$profiles"/test/program "$fifo" & child=$! echo PID=$child +cat "$fifo" + +expectStderr 1 nix-store --delete "$outPath" | grepQuiet "Cannot delete path.*because it's referenced by the GC root '/proc/" nix-env -p "$profiles/test" -e gc-runtime nix-env -p "$profiles/test" --delete-generations old diff --git a/tests/functional/gc.sh b/tests/functional/gc.sh index c58f47021f8..3ade6e4f582 100755 --- a/tests/functional/gc.sh +++ b/tests/functional/gc.sh @@ -13,7 +13,7 @@ outPath=$(nix-store -rvv "$drvPath") rm -f "$NIX_STATE_DIR/gcroots/foo" ln -sf "$outPath" "$NIX_STATE_DIR/gcroots/foo" -[ "$(nix-store -q --roots "$outPath")" = "$NIX_STATE_DIR/gcroots/foo -> $outPath" ] +expectStderr 0 nix-store -q --roots "$outPath" | grepQuiet "$NIX_STATE_DIR/gcroots/foo -> $outPath" nix-store --gc --print-roots | grep "$outPath" nix-store --gc --print-live | grep "$outPath" @@ -23,10 +23,10 @@ if nix-store --gc --print-dead | grep -E "$outPath"$; then false; fi nix-store --gc --print-dead inUse=$(readLink "$outPath/reference-to-input-2") -if nix-store --delete "$inUse"; then false; fi +expectStderr 1 nix-store --delete "$inUse" | grepQuiet "Cannot delete path.*because it's referenced by path '" test -e "$inUse" -if nix-store --delete "$outPath"; then false; fi +expectStderr 1 nix-store --delete "$outPath" | grepQuiet "Cannot delete path.*because it's referenced by the GC root " test -e "$outPath" for i in "$NIX_STORE_DIR"/*; do diff --git a/tests/functional/git-hashing/simple-common.sh b/tests/functional/git-hashing/simple-common.sh index a776ec43e00..1c5b0bf6552 100644 --- a/tests/functional/git-hashing/simple-common.sh +++ b/tests/functional/git-hashing/simple-common.sh @@ -7,13 +7,6 @@ source common.sh repo="$TEST_ROOT/scratch" -initRepo () { - git init "$repo" --object-format="$hashAlgo" - - git -C "$repo" config user.email "you@example.com" - git -C "$repo" config user.name "Your Name" -} - # Compare Nix's and git's implementation of git hashing try () { local expected="$1" diff --git a/tests/functional/git-hashing/simple-sha1.sh b/tests/functional/git-hashing/simple-sha1.sh index a883ea84808..f8024f80aab 100755 --- a/tests/functional/git-hashing/simple-sha1.sh +++ b/tests/functional/git-hashing/simple-sha1.sh @@ -4,7 +4,7 @@ hashAlgo=sha1 source simple-common.sh -initRepo +createGitRepo "$repo" "--object-format=$hashAlgo" # blob test0 diff --git a/tests/functional/git-hashing/simple-sha256.sh b/tests/functional/git-hashing/simple-sha256.sh index c7da71e00c7..0f2a3a2e6c8 100755 --- a/tests/functional/git-hashing/simple-sha256.sh +++ b/tests/functional/git-hashing/simple-sha256.sh @@ -6,7 +6,7 @@ source simple-common.sh requireDaemonNewerThan 2.31pre20250724 -initRepo +createGitRepo "$repo" "--object-format=$hashAlgo" # blob test0 diff --git a/tests/functional/impure-derivations.sh b/tests/functional/impure-derivations.sh index f887ca408f7..89392ce3071 100755 --- a/tests/functional/impure-derivations.sh +++ b/tests/functional/impure-derivations.sh @@ -21,7 +21,7 @@ drvPath2=$(nix derivation add < "$TEST_HOME"/impure-drv.json) [[ "$drvPath" = "$drvPath2" ]] # But only with the experimental feature! -expectStderr 1 nix derivation add < "$TEST_HOME"/impure-drv.json --experimental-features nix-command | grepQuiet "experimental Nix feature 'impure-derivations' is disabled" +expectStderr 1 nix derivation add < "$TEST_HOME"/impure-drv.json --experimental-features '' | grepQuiet "experimental Nix feature 'impure-derivations' is disabled" nix build --dry-run --json --file ./impure-derivations.nix impure.all json=$(nix build -L --no-link --json --file ./impure-derivations.nix impure.all) diff --git a/tests/functional/lang.sh b/tests/functional/lang.sh index e64663d3064..63264ec2277 100755 --- a/tests/functional/lang.sh +++ b/tests/functional/lang.sh @@ -27,7 +27,7 @@ nix-instantiate --show-trace --eval -E 'builtins.addErrorContext "Hello" 123' 2> expectStderr 1 nix-instantiate --show-trace --eval -E 'builtins.addErrorContext "Hello" (throw "Foo")' | grepQuiet Hello expectStderr 1 nix-instantiate --show-trace --eval -E 'builtins.addErrorContext "Hello %" (throw "Foo")' | grepQuiet 'Hello %' # Relies on parsing the expression derivation as a derivation, can't use --eval -expectStderr 1 nix-instantiate --show-trace lang/non-eval-fail-bad-drvPath.nix | grepQuiet "store path '8qlfcic10lw5304gqm8q45nr7g7jl62b-cachix-1.7.3-bin' is not a valid derivation path" +expectStderr 1 nix-instantiate --show-trace lang/non-eval-fail-bad-drvPath.nix | grepQuiet "store path '2chwzswhhmpxbgc981i2vcz7xj4d1in9-cachix-1.7.3-bin' is not a valid derivation path" nix-instantiate --eval -E 'let x = builtins.trace { x = x; } true; in x' \ diff --git a/tests/functional/lang/eval-fail-blackhole.err.exp b/tests/functional/lang/eval-fail-blackhole.err.exp index 95e33a5fe45..d11eb338f9a 100644 --- a/tests/functional/lang/eval-fail-blackhole.err.exp +++ b/tests/functional/lang/eval-fail-blackhole.err.exp @@ -7,8 +7,8 @@ error: 3| x = y; error: infinite recursion encountered - at /pwd/lang/eval-fail-blackhole.nix:3:7: + at /pwd/lang/eval-fail-blackhole.nix:2:3: + 1| let { 2| body = x; + | ^ 3| x = y; - | ^ - 4| y = x; diff --git a/tests/functional/lang/eval-fail-deepseq-stack-overflow.err.exp b/tests/functional/lang/eval-fail-deepseq-stack-overflow.err.exp index 4cc43ca095e..f142b5c4d45 100644 --- a/tests/functional/lang/eval-fail-deepseq-stack-overflow.err.exp +++ b/tests/functional/lang/eval-fail-deepseq-stack-overflow.err.exp @@ -23,8 +23,3 @@ error: 7| in error: stack overflow; max-call-depth exceeded - at /pwd/lang/eval-fail-deepseq-stack-overflow.nix:5:28: - 4| let - 5| long = builtins.genList (x: x) 100000; - | ^ - 6| reverseLinkedList = builtins.foldl' (tail: head: { inherit head tail; }) null long; diff --git a/tests/functional/lang/eval-fail-readDir-nonexistent-1.err.exp b/tests/functional/lang/eval-fail-readDir-nonexistent-1.err.exp new file mode 100644 index 00000000000..eedce081807 --- /dev/null +++ b/tests/functional/lang/eval-fail-readDir-nonexistent-1.err.exp @@ -0,0 +1,16 @@ +error: + … while evaluating the attribute 'absolutePath' + at /pwd/lang/eval-fail-readDir-nonexistent-1.nix:2:3: + 1| { + 2| absolutePath = builtins.readDir /this/path/really/should/not/exist; + | ^ + 3| } + + … while calling the 'readDir' builtin + at /pwd/lang/eval-fail-readDir-nonexistent-1.nix:2:18: + 1| { + 2| absolutePath = builtins.readDir /this/path/really/should/not/exist; + | ^ + 3| } + + error: path '/this/path/really/should/not/exist' does not exist diff --git a/tests/functional/lang/eval-fail-readDir-nonexistent-1.nix b/tests/functional/lang/eval-fail-readDir-nonexistent-1.nix new file mode 100644 index 00000000000..4b9a53c2d7d --- /dev/null +++ b/tests/functional/lang/eval-fail-readDir-nonexistent-1.nix @@ -0,0 +1,3 @@ +{ + absolutePath = builtins.readDir /this/path/really/should/not/exist; +} diff --git a/tests/functional/lang/eval-fail-readDir-nonexistent-2.err.exp b/tests/functional/lang/eval-fail-readDir-nonexistent-2.err.exp new file mode 100644 index 00000000000..0be546b27bc --- /dev/null +++ b/tests/functional/lang/eval-fail-readDir-nonexistent-2.err.exp @@ -0,0 +1,16 @@ +error: + … while evaluating the attribute 'relativePath' + at /pwd/lang/eval-fail-readDir-nonexistent-2.nix:2:3: + 1| { + 2| relativePath = builtins.readDir ./this/path/really/should/not/exist; + | ^ + 3| } + + … while calling the 'readDir' builtin + at /pwd/lang/eval-fail-readDir-nonexistent-2.nix:2:18: + 1| { + 2| relativePath = builtins.readDir ./this/path/really/should/not/exist; + | ^ + 3| } + + error: path '/pwd/lang/this/path/really/should/not/exist' does not exist diff --git a/tests/functional/lang/eval-fail-readDir-nonexistent-2.nix b/tests/functional/lang/eval-fail-readDir-nonexistent-2.nix new file mode 100644 index 00000000000..14be5671c8a --- /dev/null +++ b/tests/functional/lang/eval-fail-readDir-nonexistent-2.nix @@ -0,0 +1,3 @@ +{ + relativePath = builtins.readDir ./this/path/really/should/not/exist; +} diff --git a/tests/functional/lang/eval-fail-readDir-not-a-directory-1.err.exp b/tests/functional/lang/eval-fail-readDir-not-a-directory-1.err.exp new file mode 100644 index 00000000000..f94a7ed7452 --- /dev/null +++ b/tests/functional/lang/eval-fail-readDir-not-a-directory-1.err.exp @@ -0,0 +1,16 @@ +error: + … while evaluating the attribute 'regularFile' + at /pwd/lang/eval-fail-readDir-not-a-directory-1.nix:2:3: + 1| { + 2| regularFile = builtins.readDir ./readDir/bar; + | ^ + 3| } + + … while calling the 'readDir' builtin + at /pwd/lang/eval-fail-readDir-not-a-directory-1.nix:2:17: + 1| { + 2| regularFile = builtins.readDir ./readDir/bar; + | ^ + 3| } + + error: cannot read directory "/pwd/lang/readDir/bar": Not a directory diff --git a/tests/functional/lang/eval-fail-readDir-not-a-directory-1.nix b/tests/functional/lang/eval-fail-readDir-not-a-directory-1.nix new file mode 100644 index 00000000000..02b4f8551f3 --- /dev/null +++ b/tests/functional/lang/eval-fail-readDir-not-a-directory-1.nix @@ -0,0 +1,3 @@ +{ + regularFile = builtins.readDir ./readDir/bar; +} diff --git a/tests/functional/lang/eval-fail-readDir-not-a-directory-2.err.exp b/tests/functional/lang/eval-fail-readDir-not-a-directory-2.err.exp new file mode 100644 index 00000000000..f5e6775545a --- /dev/null +++ b/tests/functional/lang/eval-fail-readDir-not-a-directory-2.err.exp @@ -0,0 +1,16 @@ +error: + … while evaluating the attribute 'symlinkedRegularFile' + at /pwd/lang/eval-fail-readDir-not-a-directory-2.nix:2:3: + 1| { + 2| symlinkedRegularFile = builtins.readDir ./readDir/linked; + | ^ + 3| } + + … while calling the 'readDir' builtin + at /pwd/lang/eval-fail-readDir-not-a-directory-2.nix:2:26: + 1| { + 2| symlinkedRegularFile = builtins.readDir ./readDir/linked; + | ^ + 3| } + + error: cannot read directory "/pwd/lang/readDir/foo/git-hates-directories": Not a directory diff --git a/tests/functional/lang/eval-fail-readDir-not-a-directory-2.nix b/tests/functional/lang/eval-fail-readDir-not-a-directory-2.nix new file mode 100644 index 00000000000..1756684a747 --- /dev/null +++ b/tests/functional/lang/eval-fail-readDir-not-a-directory-2.nix @@ -0,0 +1,3 @@ +{ + symlinkedRegularFile = builtins.readDir ./readDir/linked; +} diff --git a/tests/functional/lang/eval-fail-recursion.err.exp b/tests/functional/lang/eval-fail-recursion.err.exp index ee41ff46bea..21bf7a695bd 100644 --- a/tests/functional/lang/eval-fail-recursion.err.exp +++ b/tests/functional/lang/eval-fail-recursion.err.exp @@ -1,14 +1,14 @@ error: … in the right operand of the update (//) operator - at /pwd/lang/eval-fail-recursion.nix:2:14: + at /pwd/lang/eval-fail-recursion.nix:2:11: 1| let 2| a = { } // a; - | ^ + | ^ 3| in error: infinite recursion encountered - at /pwd/lang/eval-fail-recursion.nix:2:14: - 1| let - 2| a = { } // a; - | ^ + at /pwd/lang/eval-fail-recursion.nix:4:1: 3| in + 4| a.foo + | ^ + 5| diff --git a/tests/functional/lang/eval-fail-scope-5.err.exp b/tests/functional/lang/eval-fail-scope-5.err.exp index 6edc85f4f16..557054b5354 100644 --- a/tests/functional/lang/eval-fail-scope-5.err.exp +++ b/tests/functional/lang/eval-fail-scope-5.err.exp @@ -21,8 +21,8 @@ error: 8| x ? y, error: infinite recursion encountered - at /pwd/lang/eval-fail-scope-5.nix:8:11: - 7| { - 8| x ? y, - | ^ - 9| y ? x, + at /pwd/lang/eval-fail-scope-5.nix:13:3: + 12| + 13| body = f { }; + | ^ + 14| diff --git a/tests/functional/lang/eval-okay-filterattrs-names.exp b/tests/functional/lang/eval-okay-filterattrs-names.exp new file mode 100644 index 00000000000..3f07d6e1a02 --- /dev/null +++ b/tests/functional/lang/eval-okay-filterattrs-names.exp @@ -0,0 +1 @@ +{ a = 3; } diff --git a/tests/functional/lang/eval-okay-filterattrs-names.nix b/tests/functional/lang/eval-okay-filterattrs-names.nix new file mode 100644 index 00000000000..94108fbefda --- /dev/null +++ b/tests/functional/lang/eval-okay-filterattrs-names.nix @@ -0,0 +1,5 @@ +builtins.filterAttrs (name: value: name == "a") { + a = 3; + b = 6; + c = 10; +} diff --git a/tests/functional/lang/eval-okay-filterattrs.exp b/tests/functional/lang/eval-okay-filterattrs.exp new file mode 100644 index 00000000000..74b9825e9c4 --- /dev/null +++ b/tests/functional/lang/eval-okay-filterattrs.exp @@ -0,0 +1 @@ +{ b = 6; c = 10; } diff --git a/tests/functional/lang/eval-okay-filterattrs.nix b/tests/functional/lang/eval-okay-filterattrs.nix new file mode 100644 index 00000000000..28d37bbe784 --- /dev/null +++ b/tests/functional/lang/eval-okay-filterattrs.nix @@ -0,0 +1,5 @@ +builtins.filterAttrs (name: value: value > 5) { + a = 3; + b = 6; + c = 10; +} diff --git a/tests/functional/lang/eval-okay-readDir-symlinked-directory.exp b/tests/functional/lang/eval-okay-readDir-symlinked-directory.exp new file mode 100644 index 00000000000..f9a314482df --- /dev/null +++ b/tests/functional/lang/eval-okay-readDir-symlinked-directory.exp @@ -0,0 +1 @@ +{ git-hates-directories = "regular"; } diff --git a/tests/functional/lang/eval-okay-readDir-symlinked-directory.nix b/tests/functional/lang/eval-okay-readDir-symlinked-directory.nix new file mode 100644 index 00000000000..a052b259774 --- /dev/null +++ b/tests/functional/lang/eval-okay-readDir-symlinked-directory.nix @@ -0,0 +1 @@ +builtins.readDir ./readDir/ldir diff --git a/tests/functional/lang/eval-okay-regex-match2.nix b/tests/functional/lang/eval-okay-regex-match2.nix index 31a94423d86..2345b265535 100644 --- a/tests/functional/lang/eval-okay-regex-match2.nix +++ b/tests/functional/lang/eval-okay-regex-match2.nix @@ -155,7 +155,7 @@ builtins.map ] [ ''.*pypy.*'' - ''/nix/store/8w718rm43x7z73xhw9d6vh8s4snrq67h-python3-3.12.10/bin/python3.12'' + ''/nix/store/iqlzcyc1z7nv804n9wc5k5i0l180wnbs-python3-3.12.10/bin/python3.12'' ] [ ''(.*/)?\.\.(/.*)?'' @@ -199,19 +199,19 @@ builtins.map ] [ ''.*-polly.*'' - ''/nix/store/0yxfdnfxbzczjxhgdpac81jnas194wfj-gnu-install-dirs.patch'' + ''/nix/store/21yv6cysn8axxjyh7dbsnnmbp9nprg9i-gnu-install-dirs.patch'' ] [ ''.*-polly.*'' - ''/nix/store/jh2pda7psaasq85b2rrigmkjdbl8d0a1-llvm-lit-cfg-add-libs-to-dylib-path.patch'' + ''/nix/store/96dqnv9v20fi7glzsah6qx5zypbkrwsh-llvm-lit-cfg-add-libs-to-dylib-path.patch'' ] [ ''.*-polly.*'' - ''/nix/store/x868j4ih7wqiivf6wr9m4g424jav0hpq-gnu-install-dirs-polly.patch'' + ''/nix/store/hjlgp59nhxjj2y8ghf7mmqgbirqarccy-gnu-install-dirs-polly.patch'' ] [ ''.*-polly.*'' - ''/nix/store/gr73nf6sca9nyzl88x58y3qxrav04yhd-polly-lit-cfg-add-libs-to-dylib-path.patch'' + ''/nix/store/ybagzhw2933fvgi95qgbyw6i4avahyzr-polly-lit-cfg-add-libs-to-dylib-path.patch'' ] [ ''(.*/)?\.\.(/.*)?'' @@ -367,7 +367,7 @@ builtins.map ] [ ''.*pypy.*'' - ''/nix/store/8w718rm43x7z73xhw9d6vh8s4snrq67h-python3-3.12.10/bin/python3.12'' + ''/nix/store/iqlzcyc1z7nv804n9wc5k5i0l180wnbs-python3-3.12.10/bin/python3.12'' ] [ ''(.*)\.git'' @@ -453,11 +453,11 @@ builtins.map ] [ ''.*llvm-tblgen.*'' - ''-DLLVM_TABLEGEN:STRING=/nix/store/xp9hkw8nsw9p81d69yvcg1yr6f7vh71c-llvm-tblgen-18.1.8/bin/llvm-tblgen'' + ''-DLLVM_TABLEGEN:STRING=/nix/store/sclapmhdj6i9h02y7s5a630kfy55v9h1-llvm-tblgen-18.1.8/bin/llvm-tblgen'' ] [ ''.*llvm-tblgen.*'' - ''-DLLVM_TABLEGEN_EXE:STRING=/nix/store/xp9hkw8nsw9p81d69yvcg1yr6f7vh71c-llvm-tblgen-18.1.8/bin/llvm-tblgen'' + ''-DLLVM_TABLEGEN_EXE:STRING=/nix/store/sclapmhdj6i9h02y7s5a630kfy55v9h1-llvm-tblgen-18.1.8/bin/llvm-tblgen'' ] [ ''(.+)-b(.+)'' diff --git a/tests/functional/lang/non-eval-fail-bad-drvPath.nix b/tests/functional/lang/non-eval-fail-bad-drvPath.nix index 23639bc5465..327a2cb2c9f 100644 --- a/tests/functional/lang/non-eval-fail-bad-drvPath.nix +++ b/tests/functional/lang/non-eval-fail-bad-drvPath.nix @@ -5,9 +5,9 @@ let system = builtins.currentSystem; outputs = [ "out" ]; # Illegal, because does not end in `.drv` - drvPath = "${builtins.storeDir}/8qlfcic10lw5304gqm8q45nr7g7jl62b-cachix-1.7.3-bin"; + drvPath = "${builtins.storeDir}/2chwzswhhmpxbgc981i2vcz7xj4d1in9-cachix-1.7.3-bin"; outputName = "out"; - outPath = "${builtins.storeDir}/8qlfcic10lw5304gqm8q45nr7g7jl62b-cachix-1.7.3-bin"; + outPath = "${builtins.storeDir}/2chwzswhhmpxbgc981i2vcz7xj4d1in9-cachix-1.7.3-bin"; out = package; }; in diff --git a/tests/functional/linux-sandbox.sh b/tests/functional/linux-sandbox.sh index c3ddf6ce65f..484ad1d2b68 100755 --- a/tests/functional/linux-sandbox.sh +++ b/tests/functional/linux-sandbox.sh @@ -96,3 +96,9 @@ nix-sandbox-build symlink-derivation.nix -A test_sandbox_paths \ --option extra-sandbox-paths "/dir=$TEST_ROOT" \ --option extra-sandbox-paths "/symlinkDir=$symlinkDir" \ --option extra-sandbox-paths "/symlink=$symlinkcert" + +# Nonexistent sandbox paths should error early in the build process +# shellcheck disable=SC2016 +expectStderr 1 nix-sandbox-build --option extra-sandbox-paths '/does-not-exist' \ + -E 'with import '"${config_nix}"'; mkDerivation { name = "trivial"; buildCommand = "echo > $out"; }' | + grepQuiet "path '/does-not-exist' is configured as part of the \`sandbox-paths\` option, but is inaccessible" diff --git a/tests/functional/local-overlay-store/delete-refs-inner.sh b/tests/functional/local-overlay-store/delete-refs-inner.sh index f54ef2bb6b4..708e8c5a8df 100644 --- a/tests/functional/local-overlay-store/delete-refs-inner.sh +++ b/tests/functional/local-overlay-store/delete-refs-inner.sh @@ -23,14 +23,14 @@ input2=$(nix-build ../hermetic.nix --no-out-link --arg busybox "$busybox" --arg input3=$(nix-build ../hermetic.nix --no-out-link --arg busybox "$busybox" --arg withFinalRefs true --arg seed 2 -A passthru.input3 -j0) # Can't delete because referenced -expectStderr 1 nix-store --delete "$input1" | grepQuiet "Cannot delete path" -expectStderr 1 nix-store --delete "$input2" | grepQuiet "Cannot delete path" -expectStderr 1 nix-store --delete "$input3" | grepQuiet "Cannot delete path" +expectStderr 1 nix-store --delete "$input1" | grepQuiet "Cannot delete path.*because it's referenced by path" +expectStderr 1 nix-store --delete "$input2" | grepQuiet "Cannot delete path.*because it's referenced by path" +expectStderr 1 nix-store --delete "$input3" | grepQuiet "Cannot delete path.*because it's referenced by path" # These same paths are referenced in the lower layer (by the seed 1 # build done in `initLowerStore`). -expectStderr 1 nix-store --store "$storeA" --delete "$input2" | grepQuiet "Cannot delete path" -expectStderr 1 nix-store --store "$storeA" --delete "$input3" | grepQuiet "Cannot delete path" +expectStderr 1 nix-store --store "$storeA" --delete "$input2" | grepQuiet "Cannot delete path.*because it's referenced by path" +expectStderr 1 nix-store --store "$storeA" --delete "$input3" | grepQuiet "Cannot delete path.*because it's referenced by path" # Can delete nix-store --delete "$hermetic" diff --git a/tests/functional/logging.sh b/tests/functional/logging.sh index 600fce43e94..ffb1e6d9621 100755 --- a/tests/functional/logging.sh +++ b/tests/functional/logging.sh @@ -40,5 +40,6 @@ if [[ "$NIX_REMOTE" != "daemon" ]]; then nix build -vv --file dependencies.nix --no-link --json-log-path "$TEST_ROOT/log.json" 2>&1 | grepQuiet 'building.*dependencies-top.drv' jq < "$TEST_ROOT/log.json" grep '{"action":"start","fields":\[".*-dependencies-top.drv","",1,1\],"id":.*,"level":3,"parent":0' "$TEST_ROOT/log.json" >&2 + grep -E '{"action":"result","id":[^,]+,"payload":{"builtOutputs":{"out":{"dependentRealisations":\{\},"id":"[^"]+","outPath":"[^-]+-dependencies-top".*"status":"Built".*"success":true' "$TEST_ROOT/log.json" >&2 (( $(grep -c '{"action":"msg","level":5,"msg":"executing builder .*"}' "$TEST_ROOT/log.json" ) == 5 )) fi diff --git a/tests/functional/misc.sh b/tests/functional/misc.sh index 131b63323e5..b8bbb74dddd 100755 --- a/tests/functional/misc.sh +++ b/tests/functional/misc.sh @@ -23,11 +23,11 @@ expect 1 nix-env -q --foo 2>&1 | grep "unknown flag" # Eval Errors. eval_arg_res=$(nix-instantiate --eval -E 'let a = {} // a; in a.foo' 2>&1 || true) -echo "$eval_arg_res" | grep "at «string»:1:15:" +echo "$eval_arg_res" | grep "at «string»:1:12:" echo "$eval_arg_res" | grep "infinite recursion encountered" eval_stdin_res=$(echo 'let a = {} // a; in a.foo' | nix-instantiate --eval -E - 2>&1 || true) -echo "$eval_stdin_res" | grep "at «stdin»:1:15:" +echo "$eval_stdin_res" | grep "at «stdin»:1:12:" echo "$eval_stdin_res" | grep "infinite recursion encountered" # Attribute path errors diff --git a/tests/functional/nix-profile.sh b/tests/functional/nix-profile.sh index cf84088d761..a27a32c6efd 100755 --- a/tests/functional/nix-profile.sh +++ b/tests/functional/nix-profile.sh @@ -4,6 +4,8 @@ source common.sh TODO_NixOS +requireGit + clearStore clearProfiles @@ -12,7 +14,7 @@ restartDaemon # Make a flake. flake1Dir=$TEST_ROOT/flake1 -mkdir -p "$flake1Dir" +createGitRepo "$flake1Dir" # shellcheck disable=SC2154,SC1039 cat > "$flake1Dir"/flake.nix < "$flake1Dir"/ca.nix cp "${config_nix}" "$flake1Dir"/ +git -C "$flake1Dir" add flake.nix config.nix who version ca.nix +git -C "$flake1Dir" commit -m 'Initial' + # Test upgrading from nix-env. nix-env -f ./user-envs.nix -i foo-1.0 nix profile list | grep -A2 'Name:.*foo' | grep 'Store paths:.*foo-1.0' nix profile add "$flake1Dir" -L -nix profile list | grep -A4 'Name:.*flake1' | grep 'Locked flake URL:.*narHash' +#nix profile list | grep -A4 'Name:.*flake1' | grep 'Locked flake URL:.*narHash' [[ $("$TEST_HOME"/.nix-profile/bin/hello) = "Hello World" ]] [ -e "$TEST_HOME"/.nix-profile/share/man ] # shellcheck disable=SC2235 (! [ -e "$TEST_HOME"/.nix-profile/include ]) nix profile history -nix profile history | grep "packages.$system.default: ∅ -> 1.0" -nix profile diff-closures | grep 'env-manifest.nix: ε → ∅' +nix profile history | grep "packages.$system.default: 1.0, 1.0-man added" +nix profile diff-closures | grep 'env-manifest.nix: (no version) removed' # Test XDG Base Directories support export NIX_CONFIG="use-xdg-base-directories = true" @@ -96,6 +101,7 @@ printf 1.0 > "$flake1Dir"/version # Test --all exclusivity. assertStderr nix --offline profile upgrade --all foo << EOF error: --all cannot be used with package names or regular expressions. + Try 'nix --help' for more information. EOF @@ -130,9 +136,8 @@ nix profile rollback [ -e "$TEST_HOME"/.nix-profile/bin/foo ] # shellcheck disable=SC2235 nix profile remove foo 2>&1 | grep 'removed 1 packages' -# shellcheck disable=SC2235 -(! [ -e "$TEST_HOME"/.nix-profile/bin/foo ]) -nix profile history | grep 'foo: 1.0 -> ∅' +[[ ! -e "$TEST_HOME"/.nix-profile/bin/foo ]] +nix profile history | grep 'foo: 1.0 removed' nix profile diff-closures | grep 'Version 3 -> 4' # Test installing a non-flake package. @@ -224,11 +229,11 @@ error: An existing package already provides the following file: The conflicting packages have a priority of 5. To prioritise the new package: - nix profile add path:${flake2Dir}#packages.${system}.default --priority 4 + nix profile add git+file://${flake2Dir}#packages.${system}.default --priority 4 To prioritise the existing package: - nix profile add path:${flake2Dir}#packages.${system}.default --priority 6 + nix profile add git+file://${flake2Dir}#packages.${system}.default --priority 6 EOF ) [[ $("$TEST_HOME"/.nix-profile/bin/hello) = "Hello World" ]] diff --git a/tests/functional/package.nix b/tests/functional/package.nix index b3b314a50d7..2c1146ec4e8 100644 --- a/tests/functional/package.nix +++ b/tests/functional/package.nix @@ -26,6 +26,9 @@ # For running the functional tests against a different pre-built Nix. test-daemon ? null, + + # Whether to run tests with lazy trees enabled. + lazyTrees ? false, }: let @@ -95,6 +98,8 @@ mkMesonDerivation ( mkdir $out ''; + _NIX_TEST_EXTRA_CONFIG = lib.optionalString lazyTrees "lazy-trees = true"; + meta = { platforms = lib.platforms.unix; }; diff --git a/tests/functional/path-info.sh b/tests/functional/path-info.sh index 712b5267878..3d6041914ed 100755 --- a/tests/functional/path-info.sh +++ b/tests/functional/path-info.sh @@ -13,7 +13,7 @@ barBase=$(basename "$bar") echo baz > "$TEST_ROOT"/baz baz=$(nix store add-file "$TEST_ROOT"/baz) bazBase=$(basename "$baz") -nix-store --delete "$baz" +nix-store --delete --ignore-liveness "$baz" diff --unified --color=always \ <(nix path-info --json --json-format 2 "$foo" "$bar" "$baz" | diff --git a/tests/functional/recursive.nix b/tests/functional/recursive.nix index be9e55da37e..aa2aa26c549 100644 --- a/tests/functional/recursive.nix +++ b/tests/functional/recursive.nix @@ -17,7 +17,7 @@ mkDerivation rec { buildCommand = '' mkdir $out - opts="--experimental-features nix-command ${ + opts="${ if (NIX_TESTS_CA_BY_DEFAULT == "1") then "--extra-experimental-features ca-derivations" else "" }" diff --git a/tests/functional/recursive.sh b/tests/functional/recursive.sh index 9115aa77583..16c3fdab4df 100755 --- a/tests/functional/recursive.sh +++ b/tests/functional/recursive.sh @@ -14,7 +14,7 @@ rm -f "$TEST_ROOT"/result unreachable=$(nix store add-path ./recursive.sh) export unreachable -NIX_BIN_DIR=$(dirname "$(type -p nix)") nix --extra-experimental-features 'nix-command recursive-nix' build -o "$TEST_ROOT"/result -L --impure --file ./recursive.nix +NIX_BIN_DIR=$(dirname "$(type -p nix)") nix --extra-experimental-features 'recursive-nix' build -o "$TEST_ROOT"/result -L --impure --file ./recursive.nix [[ $(cat "$TEST_ROOT"/result/inner1) =~ blaat ]] diff --git a/tests/functional/repl.sh b/tests/functional/repl.sh index 7023f2b8a0d..0e84a3d1438 100755 --- a/tests/functional/repl.sh +++ b/tests/functional/repl.sh @@ -162,7 +162,7 @@ EOF testReplResponse ' foo + baz ' "3" \ - ./flake ./flake\#bar --experimental-features 'flakes' + ./flake ./flake\#bar testReplResponse $' :a { a = 1; b = 2; longerName = 3; "with spaces" = 4; } @@ -197,7 +197,7 @@ testReplResponseNoRegex $' # - Check that the result has changed mkfifo repl_fifo touch repl_output -nix repl ./flake --experimental-features 'flakes' < repl_fifo >> repl_output 2>&1 & +nix repl ./flake < repl_fifo >> repl_output 2>&1 & repl_pid=$! exec 3>repl_fifo # Open fifo for writing echo "changingThing" >&3 @@ -321,7 +321,7 @@ import $testDir/lang/parse-fail-eof-pos.nix badDiff=0 badExitCode=0 -nixVersion="$(nix eval --impure --raw --expr 'builtins.nixVersion' --extra-experimental-features nix-command)" +nixVersion="$(nix --version | sed 's/nix //')" # TODO: write a repl interacter for testing. Papering over the differences between readline / editline and between platforms is a pain. diff --git a/tests/functional/simple.sh b/tests/functional/simple.sh index c1f2eef411e..e54ad860ca9 100755 --- a/tests/functional/simple.sh +++ b/tests/functional/simple.sh @@ -21,7 +21,7 @@ TODO_NixOS # Directed delete: $outPath is not reachable from a root, so it should # be deleteable. -nix-store --delete "$outPath" +nix-store --delete "$outPath" --ignore-liveness [[ ! -e $outPath/hello ]] outPath="$(NIX_REMOTE='local?store=/foo&real='"$TEST_ROOT"'/real-store' nix-instantiate --readonly-mode hash-check.nix)" diff --git a/tests/functional/store-info.sh b/tests/functional/store-info.sh index adaee5dfecf..ee896929ae3 100755 --- a/tests/functional/store-info.sh +++ b/tests/functional/store-info.sh @@ -65,7 +65,7 @@ check_human_readable "$STORE_INFO" check_human_readable "$LEGACY_STORE_INFO" if [[ -v NIX_DAEMON_PACKAGE ]] && isDaemonNewer "2.7.0pre20220126"; then - DAEMON_VERSION=$("$NIX_DAEMON_PACKAGE"/bin/nix daemon --version | cut -d' ' -f3) + DAEMON_VERSION=$("$NIX_DAEMON_PACKAGE"/bin/nix daemon --version | sed 's/.*) //') echo "$STORE_INFO" | grep "Version: $DAEMON_VERSION" [[ "$(echo "$STORE_INFO_JSON" | jq -r ".version")" == "$DAEMON_VERSION" ]] fi diff --git a/tests/functional/tarball.sh b/tests/functional/tarball.sh index 6b09cf6a5ce..451ee879a5b 100755 --- a/tests/functional/tarball.sh +++ b/tests/functional/tarball.sh @@ -38,6 +38,9 @@ test_tarball() { [[ $(nix eval --impure --expr "(fetchTree file://$tarball).lastModified") = 1000000000 ]] + # fetchTree with a narHash is implicitly final, so it doesn't return attributes like lastModified. + [[ $(nix eval --impure --expr "(fetchTree { type = \"tarball\"; url = file://$tarball; narHash = \"$hash\"; }) ? lastModified") = false ]] + nix-instantiate --strict --eval -E "!((import (fetchTree { type = \"tarball\"; url = file://$tarball; narHash = \"$hash\"; })) ? submodules)" >&2 nix-instantiate --strict --eval -E "!((import (fetchTree { type = \"tarball\"; url = file://$tarball; narHash = \"$hash\"; })) ? submodules)" 2>&1 | grep 'true' diff --git a/tests/installer/default.nix b/tests/installer/default.nix index d48537dd0d0..dc831cc97b1 100644 --- a/tests/installer/default.nix +++ b/tests/installer/default.nix @@ -232,7 +232,7 @@ let source /etc/bashrc || true nix-env --version - nix --extra-experimental-features nix-command store info + nix store info out=\$(nix-build --no-substitute -E 'derivation { name = "foo"; system = "x86_64-linux"; builder = "/bin/sh"; args = ["-c" "echo foobar > \$out"]; }') [[ \$(cat \$out) = foobar ]] diff --git a/tests/nixos/authorization.nix b/tests/nixos/authorization.nix index 6540e9fa337..944e5925925 100644 --- a/tests/nixos/authorization.nix +++ b/tests/nixos/authorization.nix @@ -13,8 +13,6 @@ users.users.alice.isNormalUser = true; users.users.bob.isNormalUser = true; users.users.mallory.isNormalUser = true; - - nix.settings.experimental-features = "nix-command"; }; testScript = diff --git a/tests/nixos/cgroups/default.nix b/tests/nixos/cgroups/default.nix index a6b4bca8c76..4161aba2ca2 100644 --- a/tests/nixos/cgroups/default.nix +++ b/tests/nixos/cgroups/default.nix @@ -9,7 +9,7 @@ { virtualisation.additionalPaths = [ pkgs.stdenvNoCC ]; nix.extraOptions = '' - extra-experimental-features = nix-command auto-allocate-uids cgroups + extra-experimental-features = auto-allocate-uids cgroups extra-system-features = uid-range ''; nix.settings.use-cgroups = true; diff --git a/tests/nixos/chroot-store.nix b/tests/nixos/chroot-store.nix index 0a4fff99222..ecac371e152 100644 --- a/tests/nixos/chroot-store.nix +++ b/tests/nixos/chroot-store.nix @@ -25,7 +25,6 @@ in virtualisation.writableStore = true; virtualisation.additionalPaths = [ pkgA ]; environment.systemPackages = [ pkgB ]; - nix.extraOptions = "experimental-features = nix-command"; }; }; diff --git a/tests/nixos/containers/containers.nix b/tests/nixos/containers/containers.nix index b590dc8498f..8d07c80b6a3 100644 --- a/tests/nixos/containers/containers.nix +++ b/tests/nixos/containers/containers.nix @@ -23,7 +23,7 @@ virtualisation.memorySize = 4096; nix.settings.substituters = lib.mkForce [ ]; nix.extraOptions = '' - extra-experimental-features = nix-command auto-allocate-uids cgroups + extra-experimental-features = auto-allocate-uids cgroups extra-system-features = uid-range ''; nix.nixPath = [ "nixpkgs=${nixpkgs}" ]; diff --git a/tests/nixos/content-encoding.nix b/tests/nixos/content-encoding.nix index debee377bdf..1e188cb060b 100644 --- a/tests/nixos/content-encoding.nix +++ b/tests/nixos/content-encoding.nix @@ -131,6 +131,7 @@ in start_all() machine.wait_for_unit("nginx.service") + machine.wait_for_open_port(80) # Original test: zstd archive with gzip content-encoding # Make sure that the file is properly compressed as the test would be meaningless otherwise diff --git a/tests/nixos/fetch-git/test-cases/build-time-fetch-tree/default.nix b/tests/nixos/fetch-git/test-cases/build-time-fetch-tree/default.nix new file mode 100644 index 00000000000..a241c877d21 --- /dev/null +++ b/tests/nixos/fetch-git/test-cases/build-time-fetch-tree/default.nix @@ -0,0 +1,49 @@ +{ config, ... }: +{ + description = "build-time fetching"; + script = '' + import json + + # add a file to the repo + client.succeed(f""" + echo ${config.name # to make the git tree and store path unique + } > {repo.path}/test-case \ + && echo chiang-mai > {repo.path}/thailand \ + && {repo.git} add test-case thailand \ + && {repo.git} commit -m 'commit1' \ + && {repo.git} push origin main + """) + + # get the NAR hash + nar_hash = json.loads(client.succeed(f""" + nix flake prefetch --flake-registry "" git+{repo.remote} --json + """))['hash'] + + # construct the derivation + expr = f""" + derivation {{ + name = "source"; + builder = "builtin:fetch-tree"; + system = "builtin"; + __structuredAttrs = true; + input = {{ + type = "git"; + url = "{repo.remote}"; + ref = "main"; + }}; + outputHashMode = "recursive"; + outputHash = "{nar_hash}"; + }} + """ + + # do the build-time fetch + out_path = client.succeed(f""" + nix build --print-out-paths --store /run/store --flake-registry "" --extra-experimental-features build-time-fetch-tree --expr '{expr}' + """).strip() + + # check if the committed file is there + client.succeed(f""" + test -f /run/store/{out_path}/thailand + """) + ''; +} diff --git a/tests/nixos/fetch-git/test-cases/fetchTree-shallow/default.nix b/tests/nixos/fetch-git/test-cases/fetchTree-shallow/default.nix index f635df1f879..a204caedd57 100644 --- a/tests/nixos/fetch-git/test-cases/fetchTree-shallow/default.nix +++ b/tests/nixos/fetch-git/test-cases/fetchTree-shallow/default.nix @@ -1,5 +1,5 @@ { - description = "fetchTree fetches git repos shallowly by default"; + description = "fetchTree fetches git repos shallowly if possible"; script = '' # purge nix git cache to make sure we start with a clean slate client.succeed("rm -rf ~/.cache/nix") @@ -28,6 +28,7 @@ type = "git"; url = "{repo.remote}"; rev = "{commit2_rev}"; + revCount = 1234; }} """ diff --git a/tests/nixos/fetch-git/testsupport/setup.nix b/tests/nixos/fetch-git/testsupport/setup.nix index c13386c7223..3c9f4bddea1 100644 --- a/tests/nixos/fetch-git/testsupport/setup.nix +++ b/tests/nixos/fetch-git/testsupport/setup.nix @@ -81,10 +81,6 @@ in environment.variables = { _NIX_FORCE_HTTP = "1"; }; - nix.settings.experimental-features = [ - "nix-command" - "flakes" - ]; }; setupScript = ''''; testScript = '' diff --git a/tests/nixos/fetchers-substitute.nix b/tests/nixos/fetchers-substitute.nix index bfe15c5c36e..a26748dca65 100644 --- a/tests/nixos/fetchers-substitute.nix +++ b/tests/nixos/fetchers-substitute.nix @@ -150,28 +150,5 @@ content = importer.succeed(f"cat {result_path}/hello.txt").strip() assert content == "Hello from tarball!", f"Content mismatch: {content}" print("✓ fetchTarball content verified!") - - ########################################## - # Test 3: Verify fetchTree does NOT substitute (preserves metadata) - ########################################## - - print("Testing that fetchTree without __final does NOT use substitution...") - - # fetchTree with just narHash (not __final) should try to download, which will fail - # since the file doesn't exist on the importer - exit_code = importer.fail(f""" - nix-instantiate --eval --json --read-write-mode --expr ' - builtins.fetchTree {{ - type = "tarball"; - url = "file:///only-on-substituter.tar.gz"; - narHash = "{tarball_hash_sri}"; - }} - ' 2>&1 - """) - - # Should fail with "does not exist" since it tries to download instead of substituting - assert "does not exist" in exit_code or "Couldn't open file" in exit_code, f"Expected download failure, got: {exit_code}" - print("✓ fetchTree correctly does NOT substitute non-final inputs!") - print(" (This preserves metadata like lastModified from the actual fetch)") ''; } diff --git a/tests/nixos/fetchurl.nix b/tests/nixos/fetchurl.nix index e8663debbcd..d75cc2017de 100644 --- a/tests/nixos/fetchurl.nix +++ b/tests/nixos/fetchurl.nix @@ -64,8 +64,6 @@ in ]; virtualisation.writableStore = true; - - nix.settings.experimental-features = "nix-command"; }; }; diff --git a/tests/nixos/fsync.nix b/tests/nixos/fsync.nix index e215e5b3c25..50105f1ccd9 100644 --- a/tests/nixos/fsync.nix +++ b/tests/nixos/fsync.nix @@ -23,7 +23,6 @@ in { virtualisation.emptyDiskImages = [ 1024 ]; environment.systemPackages = [ pkg1 ]; - nix.settings.experimental-features = [ "nix-command" ]; nix.settings.fsync-store-paths = true; nix.settings.require-sigs = false; boot.supportedFilesystems = [ diff --git a/tests/nixos/functional/common.nix b/tests/nixos/functional/common.nix index 4d32b757324..72b7b61d12c 100644 --- a/tests/nixos/functional/common.nix +++ b/tests/nixos/functional/common.nix @@ -24,6 +24,7 @@ in ]; nix.settings.substituters = lib.mkForce [ ]; + systemd.services.nix-daemon.environment._NIX_IN_TEST = "1"; environment.systemPackages = let diff --git a/tests/nixos/git-submodules.nix b/tests/nixos/git-submodules.nix index c6f53ada2dc..9105eb79bd7 100644 --- a/tests/nixos/git-submodules.nix +++ b/tests/nixos/git-submodules.nix @@ -24,7 +24,6 @@ { programs.ssh.extraConfig = "ConnectTimeout 30"; environment.systemPackages = [ pkgs.git ]; - nix.extraOptions = "experimental-features = nix-command flakes"; }; }; diff --git a/tests/nixos/github-flakes.nix b/tests/nixos/github-flakes.nix index d14cd9d0c75..3a72c669162 100644 --- a/tests/nixos/github-flakes.nix +++ b/tests/nixos/github-flakes.nix @@ -17,7 +17,7 @@ let openssl req -newkey rsa:2048 -nodes -keyout $out/server.key \ -subj "/C=CN/ST=Denial/L=Springfield/O=Dis/CN=github.com" -out server.csr - openssl x509 -req -extfile <(printf "subjectAltName=DNS:api.github.com,DNS:github.com,DNS:channels.nixos.org") \ + openssl x509 -req -extfile <(printf "subjectAltName=DNS:api.github.com,DNS:github.com,DNS:channels.nixos.org,DNS:install.determinate.systems") \ -days 36500 -in server.csr -CA $out/ca.crt -CAkey ca.key -CAcreateserial -out $out/server.crt ''; @@ -107,13 +107,13 @@ in services.httpd.extraConfig = '' ErrorLog syslog:local6 ''; - services.httpd.virtualHosts."channels.nixos.org" = { + services.httpd.virtualHosts."install.determinate.systems" = { forceSSL = true; sslServerKey = "${cert}/server.key"; sslServerCert = "${cert}/server.crt"; servedDirs = [ { - urlPath = "/"; + urlPath = "/flake-registry/stable/"; dir = registry; } ]; @@ -163,9 +163,9 @@ in ]; virtualisation.memorySize = 4096; nix.settings.substituters = lib.mkForce [ ]; - nix.extraOptions = "experimental-features = nix-command flakes"; networking.hosts.${(builtins.head nodes.github.networking.interfaces.eth1.ipv4.addresses).address} = [ + "install.determinate.systems" "channels.nixos.org" "api.github.com" "github.com" @@ -204,14 +204,53 @@ in assert info["revision"] == "${nixpkgs.rev}", f"revision mismatch: {info['revision']} != ${nixpkgs.rev}" cat_log() + out = client.succeed("nix flake prefetch nixpkgs --json") + nar_hash = json.loads(out)['hash'] + + # Test build-time fetching of public flakes. + expr = f""" + derivation {{ + name = "source"; + builder = "builtin:fetch-tree"; + system = "builtin"; + __structuredAttrs = true; + input = {{ + type = "github"; + owner = "NixOS"; + repo = "nixpkgs"; + }}; + outputHashMode = "recursive"; + outputHash = "{nar_hash}"; + }} + """ + client.succeed(f"nix build --store /run/store --extra-experimental-features build-time-fetch-tree -L --expr '{expr}'") + # ... otherwise it should use the API - out = client.succeed("nix flake metadata private-flake --json --access-tokens github.com=ghp_000000000000000000000000000000000000 --tarball-ttl 0") + out = client.succeed("nix flake metadata private-flake --json --access-tokens github.com=ghp_000000000000000000000000000000000000 --tarball-ttl 0 --no-trust-tarballs-from-git-forges") print(out) info = json.loads(out) assert info["revision"] == "${private-flake-rev}", f"revision mismatch: {info['revision']} != ${private-flake-rev}" assert info["fingerprint"] cat_log() + # Test build-time fetching of private flakes. + expr = f""" + derivation {{ + name = "source"; + builder = "builtin:fetch-tree"; + system = "builtin"; + __structuredAttrs = true; + input = {{ + type = "github"; + owner = "fancy-enterprise"; + repo = "private-flake"; + }}; + outputHashMode = "recursive"; + outputHash = "{info['locked']['narHash']}"; + }} + """ + client.succeed(f"nix build --store /run/store --extra-experimental-features build-time-fetch-tree --access-tokens github.com=ghp_000000000000000000000000000000000000 -L --expr '{expr}'") + # Fetching with the resolved URL should produce the same result. info2 = json.loads(client.succeed(f"nix flake metadata {info['url']} --json --access-tokens github.com=ghp_000000000000000000000000000000000000 --tarball-ttl 0")) print(info["fingerprint"], info2["fingerprint"]) @@ -225,6 +264,10 @@ in hash = client.succeed(f"nix eval --no-trust-tarballs-from-git-forges --raw --expr '(fetchTree {info['url']}).narHash'") assert hash == info['locked']['narHash'] + # Fetching with an incorrect NAR hash should fail. + out = client.fail(f"nix eval --no-trust-tarballs-from-git-forges --raw --expr '(fetchTree \"github:fancy-enterprise/private-flake/{info['revision']}?narHash=sha256-HsrRFZYg69qaVe/wDyWBYLeS6ca7ACEJg2Z%2BGpEFw4A%3D\").narHash' 2>&1") + assert "mismatch in field 'narHash'" in out, "NAR hash check did not fail with the expected error" + # Fetching without a narHash should succeed if trust-github is set and fail otherwise. client.succeed(f"nix eval --raw --expr 'builtins.fetchTree github:github:fancy-enterprise/private-flake/{info['revision']}'") out = client.fail(f"nix eval --no-trust-tarballs-from-git-forges --raw --expr 'builtins.fetchTree github:github:fancy-enterprise/private-flake/{info['revision']}' 2>&1") diff --git a/tests/nixos/nix-copy.nix b/tests/nixos/nix-copy.nix index 64de622de76..a7f0a6a326f 100644 --- a/tests/nixos/nix-copy.nix +++ b/tests/nixos/nix-copy.nix @@ -39,7 +39,6 @@ in pkgD.drvPath ]; nix.settings.substituters = lib.mkForce [ ]; - nix.settings.experimental-features = [ "nix-command" ]; services.getty.autologinUser = "root"; programs.ssh.extraConfig = '' Host * diff --git a/tests/nixos/s3-binary-cache-store.nix b/tests/nixos/s3-binary-cache-store.nix index a2ba1dae6c7..33e869d1954 100644 --- a/tests/nixos/s3-binary-cache-store.nix +++ b/tests/nixos/s3-binary-cache-store.nix @@ -39,7 +39,6 @@ in environment.systemPackages = [ pkgs.minio-client ]; nix.nixPath = [ "nixpkgs=${pkgs.path}" ]; nix.extraOptions = '' - experimental-features = nix-command substituters = ''; services.minio = { @@ -59,7 +58,6 @@ in virtualisation.writableStore = true; virtualisation.cores = 2; nix.extraOptions = '' - experimental-features = nix-command substituters = ''; }; diff --git a/tests/nixos/sourcehut-flakes.nix b/tests/nixos/sourcehut-flakes.nix index 3f05130d6aa..5b40866d1fa 100644 --- a/tests/nixos/sourcehut-flakes.nix +++ b/tests/nixos/sourcehut-flakes.nix @@ -119,7 +119,6 @@ in virtualisation.memorySize = 4096; nix.settings.substituters = lib.mkForce [ ]; nix.extraOptions = '' - experimental-features = nix-command flakes flake-registry = https://git.sr.ht/~NixOS/flake-registry/blob/master/flake-registry.json ''; environment.systemPackages = [ pkgs.jq ]; diff --git a/tests/nixos/tarball-flakes.nix b/tests/nixos/tarball-flakes.nix index 26c20cb1aef..ab9b200db26 100644 --- a/tests/nixos/tarball-flakes.nix +++ b/tests/nixos/tarball-flakes.nix @@ -61,7 +61,6 @@ in ]; virtualisation.memorySize = 4096; nix.settings.substituters = lib.mkForce [ ]; - nix.extraOptions = "experimental-features = nix-command flakes"; }; }; @@ -99,7 +98,6 @@ in # Check that fetching fails if we provide incorrect attributes. machine.fail("nix flake metadata --json http://localhost/tags/latest.tar.gz?rev=493300eb13ae6fb387fbd47bf54a85915acc31c0") - machine.fail("nix flake metadata --json http://localhost/tags/latest.tar.gz?revCount=789") machine.fail("nix flake metadata --json http://localhost/tags/latest.tar.gz?narHash=sha256-tbudgBSg+bHWHiHnlteNzN8TUvI80ygS9IULh4rklEw=") ''; diff --git a/tests/repl-completion.nix b/tests/repl-completion.nix index 07406e969cd..9ae37796bf5 100644 --- a/tests/repl-completion.nix +++ b/tests/repl-completion.nix @@ -15,7 +15,7 @@ runCommand "repl-completion" ]; expectScript = '' # Regression https://github.com/NixOS/nix/pull/10778 - spawn nix repl --offline --extra-experimental-features nix-command + spawn nix repl --offline expect "nix-repl>" send "foo = import ./does-not-exist.nix\n" expect "nix-repl>"