Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Make nix flake metadata|update|lock lazy #12432

Open
wants to merge 7 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion src/libexpr/eval.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2441,7 +2441,7 @@ StorePath EvalState::coerceToStorePath(const PosIdx pos, Value & v, NixStringCon
auto path = coerceToString(pos, v, context, errorCtx, false, false, true).toOwned();
if (auto storePath = store->maybeParseStorePath(path))
return *storePath;
error<EvalError>("path '%1%' is not in the Nix store", path).withTrace(pos, errorCtx).debugThrow();
error<EvalError>("cannot coerce '%s' to a store path because it does not denote a subpath of the Nix store", path).withTrace(pos, errorCtx).debugThrow();
}


Expand Down
7 changes: 7 additions & 0 deletions src/libfetchers/github.cc
Original file line number Diff line number Diff line change
Expand Up @@ -299,6 +299,13 @@ struct GitArchiveInputScheme : InputScheme
false,
"«" + input.to_string() + "»");

if (!input.settings->trustTarballsFromGitForges)
// FIXME: computing the NAR hash here is wasteful if
// copyInputToStore() is just going to hash/copy it as
// well.
input.attrs.insert_or_assign("narHash",
accessor->hashPath(CanonPath::root).to_string(HashFormat::SRI, true));

return {accessor, input};
}

Expand Down
50 changes: 36 additions & 14 deletions src/libflake/flake/flake.cc
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,20 @@ static StorePath copyInputToStore(
return storePath;
}

static SourcePath maybeCopyInputToStore(
EvalState & state,
fetchers::Input & input,
const fetchers::Input & originalInput,
ref<SourceAccessor> accessor,
CopyMode copyMode)
{
return copyMode == CopyMode::Lazy || (copyMode == CopyMode::RequireLockable && (input.isLocked() || input.getNarHash()))
? SourcePath(accessor)
: state.rootPath(
state.store->toRealPath(
copyInputToStore(state, input, originalInput, accessor)));
}

static void forceTrivialValue(EvalState & state, Value & value, const PosIdx pos)
{
if (value.isThunk() && value.isTrivial())
Expand Down Expand Up @@ -397,7 +411,8 @@ static Flake getFlake(
const FlakeRef & originalRef,
bool useRegistries,
FlakeCache & flakeCache,
const InputAttrPath & lockRootAttrPath)
const InputAttrPath & lockRootAttrPath,
CopyMode copyMode)
{
// Fetch a lazy tree first.
auto [accessor, resolvedRef, lockedRef] = fetchOrSubstituteTree(
Expand All @@ -419,17 +434,17 @@ static Flake getFlake(
lockedRef = lockedRef2;
}

// Copy the tree to the store.
auto storePath = copyInputToStore(state, lockedRef.input, originalRef.input, accessor);

// Re-parse flake.nix from the store.
return readFlake(state, originalRef, resolvedRef, lockedRef, state.rootPath(state.store->toRealPath(storePath)), lockRootAttrPath);
return readFlake(
state, originalRef, resolvedRef, lockedRef,
maybeCopyInputToStore(state, lockedRef.input, originalRef.input, accessor, copyMode),
lockRootAttrPath);
}

Flake getFlake(EvalState & state, const FlakeRef & originalRef, bool useRegistries)
Flake getFlake(EvalState & state, const FlakeRef & originalRef, bool useRegistries, CopyMode copyMode)
{
FlakeCache flakeCache;
return getFlake(state, originalRef, useRegistries, flakeCache, {});
return getFlake(state, originalRef, useRegistries, flakeCache, {}, copyMode);
}

static LockFile readLockFile(
Expand All @@ -455,7 +470,7 @@ LockedFlake lockFlake(

auto useRegistries = lockFlags.useRegistries.value_or(settings.useRegistries);

auto flake = getFlake(state, topRef, useRegistries, flakeCache, {});
auto flake = getFlake(state, topRef, useRegistries, flakeCache, {}, lockFlags.copyMode);

if (lockFlags.applyNixConfig) {
flake.config.apply(settings);
Expand Down Expand Up @@ -500,6 +515,13 @@ LockedFlake lockFlake(
explicitCliOverrides.insert(i.first);
}

/* For locking of inputs, we require at least a NAR
hash. I.e. we can't be fully lazy. */
auto inputCopyMode =
lockFlags.copyMode == CopyMode::Lazy
? CopyMode::RequireLockable
: lockFlags.copyMode;

LockFile newLockFile;

std::vector<FlakeRef> parents;
Expand Down Expand Up @@ -630,7 +652,7 @@ LockedFlake lockFlake(
if (auto resolvedPath = resolveRelativePath()) {
return readFlake(state, ref, ref, ref, *resolvedPath, inputAttrPath);
} else {
return getFlake(state, ref, useRegistries, flakeCache, inputAttrPath);
return getFlake(state, ref, useRegistries, flakeCache, inputAttrPath, inputCopyMode);
}
};

Expand Down Expand Up @@ -781,10 +803,10 @@ LockedFlake lockFlake(
auto [accessor, resolvedRef, lockedRef] = fetchOrSubstituteTree(
state, *input.ref, useRegistries, flakeCache);

// FIXME: allow input to be lazy.
auto storePath = copyInputToStore(state, lockedRef.input, input.ref->input, accessor);

return {state.rootPath(state.store->toRealPath(storePath)), lockedRef};
return {
maybeCopyInputToStore(state, lockedRef.input, input.ref->input, accessor, inputCopyMode),
lockedRef
};
}
}();

Expand Down Expand Up @@ -894,7 +916,7 @@ LockedFlake lockFlake(
repo, so we should re-read it. FIXME: we could
also just clear the 'rev' field... */
auto prevLockedRef = flake.lockedRef;
flake = getFlake(state, topRef, useRegistries);
flake = getFlake(state, topRef, useRegistries, lockFlags.copyMode);

if (lockFlags.commitLockFile &&
flake.lockedRef.input.getRev() &&
Expand Down
20 changes: 19 additions & 1 deletion src/libflake/flake/flake.hh
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,20 @@ struct Flake
}
};

Flake getFlake(EvalState & state, const FlakeRef & flakeRef, bool useRegistries);
enum struct CopyMode {
//! Copy the input to the store.
RequireStorePath,
//! Ensure that the input is locked or has a NAR hash.
RequireLockable,
//! Just return a lazy source accessor.
Lazy,
};

Flake getFlake(
EvalState & state,
const FlakeRef & flakeRef,
bool useRegistries,
CopyMode copyMode = CopyMode::RequireStorePath);

/**
* Fingerprint of a locked flake; used as a cache key.
Expand Down Expand Up @@ -221,6 +234,11 @@ struct LockFlags
* for those inputs will be ignored.
*/
std::set<InputAttrPath> inputUpdates;

/**
* If set, do not copy the flake to the Nix store.
*/
CopyMode copyMode = CopyMode::RequireStorePath;
};

LockedFlake lockFlake(
Expand Down
8 changes: 6 additions & 2 deletions src/libstore/store-api.cc
Original file line number Diff line number Diff line change
Expand Up @@ -214,8 +214,12 @@ StorePath Store::addToStore(
auto sink = sourceToSink([&](Source & source) {
LengthSource lengthSource(source);
storePath = addToStoreFromDump(lengthSource, name, fsm, method, hashAlgo, references, repair);
if (settings.warnLargePathThreshold && lengthSource.total >= settings.warnLargePathThreshold)
warn("copied large path '%s' to the store (%s)", path, renderSize(lengthSource.total));
if (settings.warnLargePathThreshold && lengthSource.total >= settings.warnLargePathThreshold) {
static bool failOnLargePath = getEnv("_NIX_TEST_FAIL_ON_LARGE_PATH").value_or("") == "1";
if (failOnLargePath)
throw Error("won't copy large path '%s' to the store (%d)", path, renderSize(lengthSource.total));
warn("copied large path '%s' to the store (%d)", path, renderSize(lengthSource.total));
}
});
dumpPath(path, *sink, fsm, filter);
sink->finish();
Expand Down
18 changes: 12 additions & 6 deletions src/nix/flake.cc
Original file line number Diff line number Diff line change
Expand Up @@ -134,6 +134,7 @@ struct CmdFlakeUpdate : FlakeCommand
lockFlags.recreateLockFile = updateAll;
lockFlags.writeLockFile = true;
lockFlags.applyNixConfig = true;
lockFlags.copyMode = CopyMode::Lazy;

lockFlake();
}
Expand Down Expand Up @@ -166,6 +167,7 @@ struct CmdFlakeLock : FlakeCommand
lockFlags.writeLockFile = true;
lockFlags.failOnUnlocked = true;
lockFlags.applyNixConfig = true;
lockFlags.copyMode = CopyMode::Lazy;

lockFlake();
}
Expand Down Expand Up @@ -212,11 +214,13 @@ struct CmdFlakeMetadata : FlakeCommand, MixJSON

void run(nix::ref<nix::Store> store) override
{
lockFlags.copyMode = CopyMode::Lazy;
auto lockedFlake = lockFlake();
auto & flake = lockedFlake.flake;

// Currently, all flakes are in the Nix store via the rootFS accessor.
auto storePath = store->printStorePath(sourcePathToStorePath(store, flake.path).first);
std::optional<StorePath> storePath;
if (flake.lockedRef.input.getNarHash())
storePath = flake.lockedRef.input.computeStorePath(*store);

if (json) {
nlohmann::json j;
Expand All @@ -238,7 +242,8 @@ struct CmdFlakeMetadata : FlakeCommand, MixJSON
j["revCount"] = *revCount;
if (auto lastModified = flake.lockedRef.input.getLastModified())
j["lastModified"] = *lastModified;
j["path"] = storePath;
if (storePath)
j["path"] = store->printStorePath(*storePath);
j["locks"] = lockedFlake.lockFile.toJSON().first;
if (auto fingerprint = lockedFlake.getFingerprint(store, fetchSettings))
j["fingerprint"] = fingerprint->to_string(HashFormat::Base16, false);
Expand All @@ -255,9 +260,10 @@ struct CmdFlakeMetadata : FlakeCommand, MixJSON
logger->cout(
ANSI_BOLD "Description:" ANSI_NORMAL " %s",
*flake.description);
logger->cout(
ANSI_BOLD "Path:" ANSI_NORMAL " %s",
storePath);
if (storePath)
logger->cout(
ANSI_BOLD "Path:" ANSI_NORMAL " %s",
store->printStorePath(*storePath));
if (auto rev = flake.lockedRef.input.getRev())
logger->cout(
ANSI_BOLD "Revision:" ANSI_NORMAL " %s",
Expand Down
3 changes: 1 addition & 2 deletions tests/functional/flakes/flakes.sh
Original file line number Diff line number Diff line change
Expand Up @@ -69,14 +69,13 @@ nix flake metadata "$flake1Dir" | grepQuiet 'URL:.*flake1.*'
# Test 'nix flake metadata --json'.
json=$(nix flake metadata flake1 --json | jq .)
[[ $(echo "$json" | jq -r .description) = 'Bla bla' ]]
[[ -d $(echo "$json" | jq -r .path) ]]
[[ $(echo "$json" | jq -r .lastModified) = $(git -C "$flake1Dir" log -n1 --format=%ct) ]]
hash1=$(echo "$json" | jq -r .revision)
[[ -n $(echo "$json" | jq -r .fingerprint) ]]

echo foo > "$flake1Dir/foo"
git -C "$flake1Dir" add $flake1Dir/foo
[[ $(nix flake metadata flake1 --json --refresh | jq -r .dirtyRevision) == "$hash1-dirty" ]]
[[ $(_NIX_TEST_FAIL_ON_LARGE_PATH=1 nix flake metadata flake1 --json --refresh --warn-large-path-threshold 1 | jq -r .dirtyRevision) == "$hash1-dirty" ]]
[[ "$(nix flake metadata flake1 --json | jq -r .fingerprint)" != null ]]

echo -n '# foo' >> "$flake1Dir/flake.nix"
Expand Down
9 changes: 6 additions & 3 deletions tests/functional/flakes/follow-paths.sh
Original file line number Diff line number Diff line change
Expand Up @@ -118,20 +118,23 @@ nix flake lock $flakeFollowsA
jq -r -c '.nodes | keys | .[]' $flakeFollowsA/flake.lock | grep "^foobar$"

# Check that path: inputs cannot escape from their root.
# FIXME: this test is wonky because with lazy trees, ../flakeB at the root is equivalent to /flakeB and not an error.
cat > $flakeFollowsA/flake.nix <<EOF
{
description = "Flake A";
inputs = {
B.url = "path:../flakeB";
};
outputs = { ... }: {};
outputs = { ... }: {
x = 123;
};
}
EOF

git -C $flakeFollowsA add flake.nix

expect 1 nix flake lock $flakeFollowsA 2>&1 | grep '/flakeB.*is forbidden in pure evaluation mode'
expect 1 nix flake lock --impure $flakeFollowsA 2>&1 | grep '/flakeB.*does not exist'
expect 1 nix eval $flakeFollowsA#x 2>&1 | grep '/flakeB.*is forbidden in pure evaluation mode'
expect 1 nix eval --impure $flakeFollowsA#x 2>&1 | grep '/flakeB.*does not exist'

# Test relative non-flake inputs.
cat > $flakeFollowsA/flake.nix <<EOF
Expand Down
2 changes: 1 addition & 1 deletion tests/functional/flakes/unlocked-override.sh
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ echo 456 > "$flake1Dir"/x.nix
expectStderr 1 nix flake lock "$flake2Dir" --override-input flake1 "$TEST_ROOT/flake1" |
grepQuiet "Will not write lock file.*because it has an unlocked input"

nix flake lock "$flake2Dir" --override-input flake1 "$TEST_ROOT/flake1" --allow-dirty-locks
_NIX_TEST_FAIL_ON_LARGE_PATH=1 nix flake lock "$flake2Dir" --override-input flake1 "$TEST_ROOT/flake1" --allow-dirty-locks --warn-large-path-threshold 1

# Using a lock file with a dirty lock does not require --allow-dirty-locks, but should print a warning.
expectStderr 0 nix eval "$flake2Dir#x" |
Expand Down
6 changes: 5 additions & 1 deletion tests/nixos/github-flakes.nix
Original file line number Diff line number Diff line change
Expand Up @@ -205,7 +205,7 @@ in
cat_log()

# ... otherwise it should use the API
out = client.succeed("nix flake metadata private-flake --json --access-tokens github.com=ghp_000000000000000000000000000000000000 --tarball-ttl 0")
out = client.succeed("nix flake metadata private-flake --json --access-tokens github.com=ghp_000000000000000000000000000000000000 --tarball-ttl 0 --no-trust-tarballs-from-git-forges")
print(out)
info = json.loads(out)
assert info["revision"] == "${private-flake-rev}", f"revision mismatch: {info['revision']} != ${private-flake-rev}"
Expand All @@ -224,6 +224,10 @@ in
hash = client.succeed(f"nix eval --no-trust-tarballs-from-git-forges --raw --expr '(fetchTree {info['url']}).narHash'")
assert hash == info['locked']['narHash']

# Fetching with an incorrect NAR hash should fail.
out = client.fail(f"nix eval --no-trust-tarballs-from-git-forges --raw --expr '(fetchTree \"github:fancy-enterprise/private-flake/{info['revision']}?narHash=sha256-HsrRFZYg69qaVe/wDyWBYLeS6ca7ACEJg2Z%2BGpEFw4A%3D\").narHash' 2>&1")
assert "NAR hash mismatch in input" in out, "NAR hash check did not fail with the expected error"

# Fetching without a narHash should succeed if trust-github is set and fail otherwise.
client.succeed(f"nix eval --raw --expr 'builtins.fetchTree github:github:fancy-enterprise/private-flake/{info['revision']}'")
out = client.fail(f"nix eval --no-trust-tarballs-from-git-forges --raw --expr 'builtins.fetchTree github:github:fancy-enterprise/private-flake/{info['revision']}' 2>&1")
Expand Down
Loading