Skip to content

Commit

Permalink
Merge master into staging-next
Browse files Browse the repository at this point in the history
  • Loading branch information
github-actions[bot] authored Nov 30, 2024
2 parents 7eddd28 + e383460 commit aa33135
Show file tree
Hide file tree
Showing 58 changed files with 4,454 additions and 5,214 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/check-shell.yml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ on:
pull_request_target:
paths:
- 'shell.nix'
- './ci/**'
- 'ci/**'

permissions: {}

Expand Down
7 changes: 0 additions & 7 deletions maintainers/maintainer-list.nix
Original file line number Diff line number Diff line change
Expand Up @@ -13743,7 +13743,6 @@
github = "matthewpi";
githubId = 26559841;
name = "Matthew Penner";
keys = [ { fingerprint = "5118 F1CC B7B0 6C17 4DD1 5267 3131 1906 AD4C F6D6"; } ];
};
matthiasbenaets = {
email = "matthias.benaets@gmail.com";
Expand Down Expand Up @@ -21203,12 +21202,6 @@
githubId = 1694705;
name = "Sam Stites";
};
stnley = {
email = "michael@stnley.io";
github = "stnley";
githubId = 64174376;
name = "Michael Stanley";
};
strager = {
email = "strager.nds@gmail.com";
github = "strager";
Expand Down
4 changes: 4 additions & 0 deletions nixos/doc/manual/release-notes/rl-2505.section.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@

- [Kimai](https://www.kimai.org/), a web-based multi-user time-tracking application. Available as [services.kimai](option.html#opt-services.kimai).

- [Omnom](https://github.com/asciimoo/omnom), a webpage bookmarking and snapshotting service. Available as [services.omnom](options.html#opt-services.omnom.enable).

- [Amazon CloudWatch Agent](https://github.com/aws/amazon-cloudwatch-agent), the official telemetry collector for AWS CloudWatch and AWS X-Ray. Available as [services.amazon-cloudwatch-agent](#opt-services.amazon-cloudwatch-agent.enable).

- [agorakit](https://github.com/agorakit/agorakit), an organization tool for citizens' collectives. Available with [services.agorakit](#opt-services.agorakit.enable).
Expand Down Expand Up @@ -49,6 +51,8 @@
[official website](https://www.nerdfonts.com/font-downloads) as the titles in preview images, with the "Nerd Font"
suffix and any whitespaces trimmed.

- `gkraken` software and `hardware.gkraken.enable` option have been removed, use `coolercontrol` via `programs.coolercontrol.enable` option instead.

- the notmuch vim plugin now lives in a separate output of the `notmuch`
package. Installing `notmuch` will not bring the notmuch vim package anymore,
add `vimPlugins.notmuch-vim` to your (Neo)vim configuration if you want the
Expand Down
15 changes: 0 additions & 15 deletions nixos/modules/hardware/gkraken.nix

This file was deleted.

1 change: 0 additions & 1 deletion nixos/modules/module-list.nix
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,6 @@
./hardware/digitalbitbox.nix
./hardware/flipperzero.nix
./hardware/flirc.nix
./hardware/gkraken.nix
./hardware/glasgow.nix
./hardware/gpgsmartcards.nix
./hardware/graphics.nix
Expand Down
4 changes: 4 additions & 0 deletions nixos/modules/rename.nix
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,10 @@ in
systemd-logind API). Instead of using the module you can now
simply add the brightnessctl package to environment.systemPackages.
'')
(mkRemovedOptionModule [ "hardware" "gkraken" "enable" ] ''
gkraken was deprecated by coolercontrol and thus removed from nixpkgs.
Consider using programs.coolercontrol instead.
'')
(mkRemovedOptionModule [ "hardware" "u2f" ] ''
The U2F modules module was removed, as all it did was adding the
udev rules from libu2f-host to the system. Udev gained native support
Expand Down
8 changes: 8 additions & 0 deletions nixos/modules/system/boot/networkd.nix
Original file line number Diff line number Diff line change
Expand Up @@ -672,10 +672,12 @@ let
"IPv6AcceptRA"
"IPv6DuplicateAddressDetection"
"IPv6HopLimit"
"IPv6RetransmissionTimeSec"
"IPv4ReversePathFilter"
"IPv4AcceptLocal"
"IPv4RouteLocalnet"
"IPv4ProxyARP"
"IPv4ProxyARPPrivateVLAN"
"IPv6ProxyNDP"
"IPv6ProxyNDPAddress"
"IPv6SendRA"
Expand Down Expand Up @@ -726,10 +728,12 @@ let
(assertMinimum "IPv6DuplicateAddressDetection" 0)
(assertInt "IPv6HopLimit")
(assertMinimum "IPv6HopLimit" 0)
(assertInt "IPv6RetransmissionTimeSec")
(assertValueOneOf "IPv4ReversePathFilter" ["no" "strict" "loose"])
(assertValueOneOf "IPv4AcceptLocal" boolValues)
(assertValueOneOf "IPv4RouteLocalnet" boolValues)
(assertValueOneOf "IPv4ProxyARP" boolValues)
(assertValueOneof "IPv4ProxyARPPrivateVLAN" boolValues)
(assertValueOneOf "IPv6ProxyNDP" boolValues)
(assertValueOneOf "IPv6SendRA" boolValues)
(assertValueOneOf "DHCPPrefixDelegation" boolValues)
Expand Down Expand Up @@ -776,6 +780,7 @@ let
"Priority"
"IncomingInterface"
"OutgoingInterface"
"L3MasterDevice"
"SourcePort"
"DestinationPort"
"IPProtocol"
Expand All @@ -790,6 +795,7 @@ let
(assertRange "TypeOfService" 0 255)
(assertRangeWithOptionalMask "FirewallMark" 1 4294967295)
(assertInt "Priority")
(assertValueOneOf "L3MasterDevice" boolValues)
(assertPortOrPortRange "SourcePort")
(assertPortOrPortRange "DestinationPort")
(assertValueOneOf "InvertRule" boolValues)
Expand Down Expand Up @@ -1033,6 +1039,7 @@ let
"BootServerName"
"BootFilename"
"IPv6OnlyPreferredSec"
"PersistLeases"
])
(assertInt "PoolOffset")
(assertMinimum "PoolOffset" 0)
Expand All @@ -1047,6 +1054,7 @@ let
(assertValueOneOf "EmitRouter" boolValues)
(assertValueOneOf "EmitTimezone" boolValues)
(assertValueOneOf "BindToInterface" boolValues)
(assertValueOneOf "PersistLeases" boolValues)
];

sectionIPv6SendRA = checkUnitConfig "IPv6SendRA" [
Expand Down
10 changes: 5 additions & 5 deletions pkgs/applications/networking/browsers/librewolf/src.json
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
{
"packageVersion": "132.0.2-1",
"packageVersion": "133.0-1",
"source": {
"rev": "132.0.2-1",
"sha256": "7DB0QSQHNRw991yRR5+/Oo4fpXCR/Zklxb7ILRIH0WM="
"rev": "133.0-1",
"sha256": "1xf7gx3xm3c7dhch9gwpb0xp11lcyim1nrbm8sjljxdcs7iq9jy4"
},
"firefox": {
"version": "132.0.2",
"sha512": "nqldn7GpQaxaW1DaZ+Ik88z4xAHybLYbt0rX9OHocG1GnEtjJXFPLLnN9QwycQN31ryhjdZbVdssOe8rJ6V/rg=="
"version": "133.0",
"sha512": "b16f9898bee4121914caef48d4f7f44bf9d69aee168586b02bf1b4f4197844fd10179e1b63b273f52929fb348030df36328f24993cd666969da4ddc82562a90c"
}
}
93 changes: 30 additions & 63 deletions pkgs/build-support/docker/default.nix
Original file line number Diff line number Diff line change
Expand Up @@ -919,10 +919,19 @@ rec {
, includeStorePaths ? true
, includeNixDB ? false
, passthru ? {}
,
, # Pipeline used to produce docker layers. If not set, popularity contest
# algorithm is used. If set, maxLayers is ignored as the author of the
# pipeline can use one of the available functions (like "limit_layers")
# to control the amount of layers.
# See: pkgs/build-support/flatten-references-graph/src/flatten_references_graph/pipe.py
# for available functions, and it's test for how to use them.
# WARNING!! this interface is highly experimental and subject to change.
layeringPipeline ? null
, # Enables debug logging for the layering pipeline.
debug ? false
}:
assert
(lib.assertMsg (maxLayers > 1)
(lib.assertMsg (layeringPipeline == null -> maxLayers > 1)
"the maxLayers argument of dockerTools.buildLayeredImage function must be greather than 1 (current value: ${toString maxLayers})");
assert
(lib.assertMsg (enableFakechroot -> !stdenv.hostPlatform.isDarwin) ''
Expand Down Expand Up @@ -999,26 +1008,30 @@ rec {
'';
};

closureRoots = lib.optionals includeStorePaths /* normally true */ (
[ baseJson customisationLayer ]
);
overallClosure = writeText "closure" (lib.concatStringsSep " " closureRoots);

# These derivations are only created as implementation details of docker-tools,
# so they'll be excluded from the created images.
unnecessaryDrvs = [ baseJson overallClosure customisationLayer ];
layersJsonFile = buildPackages.dockerMakeLayers {
inherit debug;
closureRoots = optionals includeStorePaths [ baseJson customisationLayer ];
excludePaths = [ baseJson customisationLayer ];
pipeline =
if layeringPipeline != null
then layeringPipeline
else import
./popularity-contest-layering-pipeline.nix
{ inherit lib jq runCommand; }
{ inherit fromImage maxLayers; }
;
};

conf = runCommand "${baseName}-conf.json"
{
inherit fromImage maxLayers created mtime uid gid uname gname;
inherit fromImage created mtime uid gid uname gname layersJsonFile;
imageName = lib.toLower name;
preferLocalBuild = true;
passthru.imageTag =
if tag != null
then tag
else
lib.head (lib.strings.splitString "-" (baseNameOf (builtins.unsafeDiscardStringContext conf.outPath)));
paths = buildPackages.referencesByPopularity overallClosure;
nativeBuildInputs = [ jq ];
} ''
${if (tag == null) then ''
Expand All @@ -1038,54 +1051,7 @@ rec {
mtime="$(date -Iseconds -d "$mtime")"
fi
paths() {
cat $paths ${lib.concatMapStringsSep " "
(path: "| (grep -v ${path} || true)")
unnecessaryDrvs}
}
# Compute the number of layers that are already used by a potential
# 'fromImage' as well as the customization layer. Ensure that there is
# still at least one layer available to store the image contents.
usedLayers=0
# subtract number of base image layers
if [[ -n "$fromImage" ]]; then
(( usedLayers += $(tar -xOf "$fromImage" manifest.json | jq '.[0].Layers | length') ))
fi
# one layer will be taken up by the customisation layer
(( usedLayers += 1 ))
if ! (( $usedLayers < $maxLayers )); then
echo >&2 "Error: usedLayers $usedLayers layers to store 'fromImage' and" \
"'extraCommands', but only maxLayers=$maxLayers were" \
"allowed. At least 1 layer is required to store contents."
exit 1
fi
availableLayers=$(( maxLayers - usedLayers ))
# Create $maxLayers worth of Docker Layers, one layer per store path
# unless there are more paths than $maxLayers. In that case, create
# $maxLayers-1 for the most popular layers, and smush the remainaing
# store paths in to one final layer.
#
# The following code is fiddly w.r.t. ensuring every layer is
# created, and that no paths are missed. If you change the
# following lines, double-check that your code behaves properly
# when the number of layers equals:
# maxLayers-1, maxLayers, and maxLayers+1, 0
paths |
jq -sR '
rtrimstr("\n") | split("\n")
| (.[:$maxLayers-1] | map([.])) + [ .[$maxLayers-1:] ]
| map(select(length > 0))
' \
--argjson maxLayers "$availableLayers" > store_layers.json
# The index on $store_layers is necessary because the --slurpfile
# automatically reads the file as an array.
cat ${baseJson} | jq '
jq '
. + {
"store_dir": $store_dir,
"from_image": $from_image,
Expand All @@ -1101,16 +1067,17 @@ rec {
}
' --arg store_dir "${storeDir}" \
--argjson from_image ${if fromImage == null then "null" else "'\"${fromImage}\"'"} \
--slurpfile store_layers store_layers.json \
--slurpfile store_layers "$layersJsonFile" \
--arg customisation_layer ${customisationLayer} \
--arg repo_tag "$imageName:$imageTag" \
--arg created "$created" \
--arg mtime "$mtime" \
--arg uid "$uid" \
--arg gid "$gid" \
--arg uname "$uname" \
--arg gname "$gname" |
tee $out
--arg gname "$gname" \
${baseJson} \
| tee $out
'';

result = runCommand "stream-${baseName}"
Expand Down
50 changes: 50 additions & 0 deletions pkgs/build-support/docker/make-layers.nix
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
{
coreutils,
flattenReferencesGraph,
lib,
jq,
runCommand,
}:
{
closureRoots,
excludePaths ? [ ],
# This could be a path to (or a derivation producing a path to)
# a json file containing the pipeline
pipeline ? [ ],
debug ? false,
}:
if closureRoots == [ ] then
builtins.toFile "docker-layers-empty" "[]"
else
runCommand "docker-layers"
{
__structuredAttrs = true;
# graph, exclude_paths and pipeline are expected by the
# flatten_references_graph executable.
exportReferencesGraph.graph = closureRoots;
exclude_paths = excludePaths;
inherit pipeline;
nativeBuildInputs = [
coreutils
flattenReferencesGraph
jq
];
}
''
. .attrs.sh
flatten_references_graph_arg=.attrs.json
echo "pipeline: $pipeline"
if jq -e '.pipeline | type == "string"' .attrs.json; then
jq '. + { "pipeline": $pipeline[0] }' \
--slurpfile pipeline "$pipeline" \
.attrs.json > flatten_references_graph_arg.json
flatten_references_graph_arg=flatten_references_graph_arg.json
fi
${lib.optionalString debug "export DEBUG=True"}
flatten_references_graph "$flatten_references_graph_arg" > ''${outputs[out]}
''
34 changes: 34 additions & 0 deletions pkgs/build-support/docker/popularity-contest-layering-pipeline.nix
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
{
lib,
runCommand,
jq,
}:
{
maxLayers,
fromImage ? null,
}:
runCommand "popularity-contest-layering-pipeline.json" { inherit maxLayers; } ''
# Compute the number of layers that are already used by a potential
# 'fromImage' as well as the customization layer. Ensure that there is
# still at least one layer available to store the image contents.
# one layer will be taken up by the customisation layer
usedLayers=1
${lib.optionalString (fromImage != null) ''
# subtract number of base image layers
baseImageLayersCount=$(tar -xOf "${fromImage}" manifest.json | ${lib.getExe jq} '.[0].Layers | length')
(( usedLayers += baseImageLayersCount ))
''}
if ! (( $usedLayers < $maxLayers )); then
echo >&2 "Error: usedLayers $usedLayers layers to store 'fromImage' and" \
"'extraCommands', but only maxLayers=$maxLayers were" \
"allowed. At least 1 layer is required to store contents."
exit 1
fi
availableLayers=$(( maxLayers - usedLayers ))
# Produce pipeline which uses popularity_contest algo.
echo '[["popularity_contest"],["limit_layers",'$availableLayers']]' > $out
''
Loading

0 comments on commit aa33135

Please sign in to comment.