Compare commits
1 Commits
main
...
secrix-iss
Author | SHA1 | Date |
---|---|---|
Felix Stupp | fff0c9a8a7 | 2 months ago |
@ -1,17 +0,0 @@
|
||||
{
|
||||
flake,
|
||||
inputs,
|
||||
lib,
|
||||
...
|
||||
}@flakeArg:
|
||||
{ system, ... }@sysArg:
|
||||
{
|
||||
|
||||
# shortcut to fully configured secrix
|
||||
secrix =
|
||||
assert lib.assertMsg (system == "x86_64-linux") ''
|
||||
secrix is currently only compatible with x86_64-linux
|
||||
'';
|
||||
inputs.secrix.secrix flake;
|
||||
|
||||
}
|
@ -1,27 +0,0 @@
|
||||
{ outputs, ... }@flakeArg:
|
||||
{ pkgs_unstable, system, ... }@sysArg:
|
||||
let
|
||||
pkgs = pkgs_unstable;
|
||||
in
|
||||
{
|
||||
default = pkgs.mkShell {
|
||||
packages =
|
||||
(with pkgs; [
|
||||
curl
|
||||
mkpasswd
|
||||
rsync
|
||||
opentofu
|
||||
terranix
|
||||
# tooling for services
|
||||
wireguard-tools
|
||||
])
|
||||
++ [
|
||||
# flake stuff
|
||||
outputs.packages.${system}.secrix-wrapper
|
||||
];
|
||||
# TODO magic
|
||||
shellHook = ''
|
||||
export SECRIX_ID=~/".ssh/id_ed25519"
|
||||
'';
|
||||
};
|
||||
}
|
@ -1,7 +0,0 @@
|
||||
{
|
||||
imports = [
|
||||
# files
|
||||
./gpg-agent.nix
|
||||
./zsh.nix
|
||||
];
|
||||
}
|
@ -1,24 +0,0 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
osConfig ? null,
|
||||
...
|
||||
}:
|
||||
let
|
||||
cfg = config.programs.zsh;
|
||||
in
|
||||
{
|
||||
config = lib.mkIf cfg.enable {
|
||||
|
||||
assertions = lib.mkIf (!builtins.isNull osConfig) [
|
||||
# see https://github.com/nix-community/home-manager/blob/e1391fb22e18a36f57e6999c7a9f966dc80ac073/modules/programs/zsh.nix#L353
|
||||
{
|
||||
assertion = cfg.enableCompletion -> builtins.elem "/share/zsh" osConfig.environment.pathsToLink;
|
||||
message = ''
|
||||
for useful ZSH completion, add "/share/zsh" to NixOS environment.pathsToLink
|
||||
'';
|
||||
}
|
||||
];
|
||||
|
||||
};
|
||||
}
|
@ -1,15 +0,0 @@
|
||||
{ lib, self, ... }@flakeArg:
|
||||
{
|
||||
|
||||
assertions.imports = lib.singleton ./assertions;
|
||||
|
||||
# combination of all my custom modules
|
||||
# these should not change anything until you enable their custom options
|
||||
default.imports = [
|
||||
# flake
|
||||
self.assertions
|
||||
# directories
|
||||
./extends
|
||||
];
|
||||
|
||||
}
|
@ -1,8 +0,0 @@
|
||||
{
|
||||
imports = [
|
||||
# files
|
||||
./kdeconnect.nix
|
||||
./retroarch.nix
|
||||
./vscode.nix
|
||||
];
|
||||
}
|
@ -1,106 +0,0 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
osConfig ? null,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
let
|
||||
cfg = config.services.kdeconnect;
|
||||
configDescPreamble = ''
|
||||
Configuring KDE Connect using these options is probably not endorsed by upstream
|
||||
as no user documentation for these configuration files exist.
|
||||
'';
|
||||
optionDescPreamble = ''
|
||||
${configDescPreamble}
|
||||
{option}`services.kdeconnect.enableSettings` must be enabled
|
||||
for this option to be applied.
|
||||
'';
|
||||
typeConfig = pkgs.formats.ini { };
|
||||
in
|
||||
{
|
||||
|
||||
options.services.kdeconnect = {
|
||||
|
||||
enableSettings = lib.mkEnableOption ''
|
||||
KDE Connect settings defined in this module.
|
||||
|
||||
${configDescPreamble}
|
||||
|
||||
This option operates independently of {option}`services.kdeconnect.enable`
|
||||
and so can also be used when KDE Connect is already installed by other means,
|
||||
e.g. using the NixOS module option `programs.kdeconnect.enable`
|
||||
'';
|
||||
|
||||
settings = {
|
||||
name = lib.mkOption {
|
||||
description = ''
|
||||
Name of this device, advertised to other KDE Connect devices
|
||||
|
||||
${optionDescPreamble}
|
||||
'';
|
||||
type = lib.types.str;
|
||||
default = osConfig.networking.hostName;
|
||||
defaultText = lib.literalExpression "osConfig.networking.hostName";
|
||||
};
|
||||
customDevices = lib.mkOption {
|
||||
description = ''
|
||||
List of IPs & hostnames KDE Connect should try to connect to.
|
||||
Useful in scenarios where auto discover does not work,
|
||||
e.g. when combined with VPN.
|
||||
|
||||
${optionDescPreamble}
|
||||
'';
|
||||
# TODO limit to IPs
|
||||
# TODO check if hostname works now
|
||||
type = with lib.types; listOf str;
|
||||
default = [ ];
|
||||
example = [
|
||||
"192.168.12.10"
|
||||
"192.168.12.11"
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkOption {
|
||||
description = ''
|
||||
Arbitary settings for KDE Connect.
|
||||
|
||||
${optionDescPreamble}
|
||||
This will then overwrite the file {file}`$XDG_CONFIG_DIR/kdeconnect/config`.
|
||||
'';
|
||||
type = typeConfig.type;
|
||||
default = { };
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
config = {
|
||||
|
||||
warnings = [
|
||||
(lib.mkIf cfg.enableSettings "programs.kdeconnect.enableSettings is experimental, be aware")
|
||||
];
|
||||
|
||||
services.kdeconnect.config =
|
||||
let
|
||||
sets = cfg.settings;
|
||||
optConcat = sep: list: lib.mkIf (list != [ ]) (lib.concatStringsSep sep list);
|
||||
in
|
||||
{
|
||||
General = {
|
||||
customDevices = optConcat "," sets.customDevices;
|
||||
name = sets.name;
|
||||
};
|
||||
};
|
||||
|
||||
xdg.configFile."kdeconnect/config" = lib.mkIf cfg.enableSettings {
|
||||
# TODO make compatible with more systems
|
||||
onChange = ''
|
||||
${pkgs.systemd}/bin/systemctl --user reload-or-restart app-org.kde.kdeconnect.daemon@autostart.service
|
||||
'';
|
||||
source = typeConfig.generate "kdeconnect-config" cfg.config;
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
}
|
@ -1,43 +0,0 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
let
|
||||
cfg = config.programs.retroarch;
|
||||
in
|
||||
{
|
||||
options.programs.retroarch = {
|
||||
|
||||
enable = lib.mkEnableOption "RetroArch as user program";
|
||||
|
||||
package = lib.mkPackageOption pkgs "retroarch" {
|
||||
example = lib.literalExpression "pkgs.retroarchFull";
|
||||
};
|
||||
|
||||
cores = lib.mkOption {
|
||||
description = "List of cores to install.";
|
||||
type = lib.types.listOf lib.types.package;
|
||||
default = [ ];
|
||||
example = lib.literalExpression "with pkgs.libretro; [ twenty-fortyeight ]";
|
||||
};
|
||||
|
||||
finalPackage = lib.mkOption {
|
||||
description = "RetroArch package with the cores selected";
|
||||
type = lib.types.package;
|
||||
readOnly = true;
|
||||
default = if cfg.cores == [ ] then cfg.package else cfg.package.override { inherit (cfg) cores; };
|
||||
defaultText = ''
|
||||
with config.programs.retroarch;
|
||||
package.override { inherit cores; }
|
||||
'';
|
||||
};
|
||||
|
||||
};
|
||||
config = {
|
||||
|
||||
home.packages = lib.singleton cfg.finalPackage;
|
||||
|
||||
};
|
||||
}
|
@ -1,71 +0,0 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
options,
|
||||
...
|
||||
}:
|
||||
let
|
||||
cfg = config.programs.vscode;
|
||||
in
|
||||
{
|
||||
|
||||
options.programs.vscode = {
|
||||
keybindingsNext = lib.mkOption {
|
||||
description = ''
|
||||
More expressive and ordered way to set {option}`programs.vscode.keybindings`.
|
||||
Both options can be used simultaneously.
|
||||
|
||||
- key bindings are grouped by their key combination
|
||||
- you can shortcut commands without further options (see example)
|
||||
'';
|
||||
type =
|
||||
let
|
||||
bindsType = options.programs.vscode.keybindings.type;
|
||||
bindModule = bindsType.nestedTypes.elemType;
|
||||
bindOpts = bindModule.getSubOptions;
|
||||
inhOpts =
|
||||
prefix:
|
||||
builtins.removeAttrs (bindOpts prefix) [
|
||||
"_module"
|
||||
"key"
|
||||
];
|
||||
inhMod = lib.types.submodule { options = inhOpts [ ]; };
|
||||
commType = (bindOpts [ ]).command.type;
|
||||
bindsNextType = lib.types.either inhMod commType;
|
||||
bindsListNext = lib.types.listOf bindsNextType;
|
||||
in
|
||||
lib.types.attrsOf bindsListNext;
|
||||
default = { };
|
||||
example = {
|
||||
"ctrl+tab" = [
|
||||
{
|
||||
command = "-workbench.action.quickOpenNavigateNextInEditorPicker";
|
||||
when = "inEditorsPicker && inQuickOpen";
|
||||
}
|
||||
"-workbench.action.quickOpenPreviousRecentlyUsedEditorInGroup"
|
||||
"workbench.action.nextEditor"
|
||||
];
|
||||
"ctrl+shift+tab" = [
|
||||
{
|
||||
command = "-workbench.action.quickOpenNavigatePreviousInEditorPicker";
|
||||
when = "inEditorsPicker && inQuickOpen";
|
||||
}
|
||||
"-workbench.action.quickOpenLeastRecentlyUsedEditorInGroup"
|
||||
"workbench.action.previousEditor"
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config.programs.vscode = {
|
||||
keybindings =
|
||||
let
|
||||
expandEntry = opts: if builtins.isAttrs opts then opts else { command = opts; };
|
||||
transEntry = key: opts: (expandEntry opts) // { inherit key; };
|
||||
transKey = key: opts: map (transEntry key) opts;
|
||||
transAttr = attr: lib.flatten (lib.mapAttrsToList transKey attr);
|
||||
in
|
||||
transAttr config.programs.vscode.keybindingsNext;
|
||||
};
|
||||
|
||||
}
|
@ -0,0 +1,24 @@
|
||||
# this stuff replaces all settings which would be configured by the corresponding frontend NixOS module
|
||||
|
||||
{ config
|
||||
, lib
|
||||
, pkgs
|
||||
, ...
|
||||
}:
|
||||
let
|
||||
cfg = config.x-banananetwork.frontend;
|
||||
in
|
||||
{
|
||||
|
||||
config = lib.mkIf (cfg.enable && !cfg.nixosModuleCompat) {
|
||||
|
||||
assertions = [
|
||||
{
|
||||
assertion = !cfg.nixosModuleCompat;
|
||||
message = "missing implementation of base stuff";
|
||||
}
|
||||
];
|
||||
|
||||
};
|
||||
|
||||
}
|
@ -0,0 +1,35 @@
|
||||
{ config
|
||||
, lib
|
||||
, pkgs
|
||||
, ...
|
||||
|
||||
}: {
|
||||
|
||||
|
||||
imports = [
|
||||
./base.nix
|
||||
./extension.nix
|
||||
];
|
||||
|
||||
|
||||
options = {
|
||||
|
||||
x-banananetwork.frontend = {
|
||||
|
||||
enable = lib.mkEnableOption ''
|
||||
settings for frontend configuration in Home-Manager
|
||||
'';
|
||||
|
||||
nixosModuleCompat = lib.mkEnableOption ''
|
||||
compatibility to the corresponding frontend NixOS configuration.
|
||||
|
||||
This is created by opting out to configure stuff
|
||||
which is already configured by the corresponding NixOS module.
|
||||
'';
|
||||
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
|
||||
}
|
@ -0,0 +1,7 @@
|
||||
# this stuff must all be compatible to settings already configured by the corresponding frontend NixOS module
|
||||
|
||||
{ config
|
||||
, lib
|
||||
, pkgs
|
||||
, ...
|
||||
}: { }
|
@ -1,7 +0,0 @@
|
||||
{ ... }@flakeArg:
|
||||
let
|
||||
in
|
||||
{
|
||||
|
||||
|
||||
}
|
@ -1,42 +0,0 @@
|
||||
{ inputs, self, ... }@flakeArg:
|
||||
let
|
||||
inherit (inputs) nixpkgs nixpkgs_unstable;
|
||||
inherit (nixpkgs) lib; # prevent infinite recursion
|
||||
inherit (builtins) isString;
|
||||
inherit (lib.attrsets) attrByPath hasAttrByPath updateManyAttrsByPath;
|
||||
inherit (lib.options) showOption;
|
||||
inherit (lib.strings) splitString;
|
||||
inherit (lib.trivial) flip pipe warnIf;
|
||||
inherit (self) backportByPath;
|
||||
in
|
||||
{
|
||||
|
||||
backportByPath =
|
||||
let
|
||||
pathInterpret = p: if isString p then splitString "." p else p;
|
||||
in
|
||||
new: orig: prefix:
|
||||
flip pipe [
|
||||
(map (
|
||||
path:
|
||||
let
|
||||
pathList = pathInterpret path;
|
||||
pathFull = pathInterpret prefix ++ pathList;
|
||||
error = abort "attr not found on path ${showOption pathFull}";
|
||||
newVal = attrByPath pathFull error new;
|
||||
origVal = attrByPath pathFull newVal orig;
|
||||
in
|
||||
{
|
||||
path = pathList;
|
||||
update =
|
||||
_:
|
||||
warnIf (hasAttrByPath pathFull orig) "${showOption pathFull} no longer needs to be backported"
|
||||
origVal;
|
||||
}
|
||||
))
|
||||
(flip updateManyAttrsByPath { })
|
||||
];
|
||||
|
||||
backportNixpkg = backportByPath nixpkgs_unstable nixpkgs;
|
||||
|
||||
}
|
@ -1,44 +0,0 @@
|
||||
{ inputs, lib, ... }@flakeArg:
|
||||
let
|
||||
inherit (inputs) nixpkgs;
|
||||
inherit (builtins) isAttrs mapAttrs;
|
||||
inherit (lib) autoExtend importFlakeMod;
|
||||
in
|
||||
|
||||
# be a drop-in replacement
|
||||
nixpkgs.lib
|
||||
|
||||
# groups
|
||||
// mapAttrs (autoExtend nixpkgs.lib) {
|
||||
attrsets = ./attrsets.nix;
|
||||
backport = ./backport.nix;
|
||||
lists = ./lists.nix;
|
||||
math = ./math.nix;
|
||||
modules = ./modules.nix;
|
||||
network = ./network.nix;
|
||||
strings = ./strings.nix;
|
||||
types = ./types.nix;
|
||||
x-banananetwork-unused = ./unused.nix;
|
||||
}
|
||||
|
||||
# functions
|
||||
// {
|
||||
|
||||
autoExtend =
|
||||
upstream: name: obj:
|
||||
(upstream.${name} or { }) // (if isAttrs obj then obj else importFlakeMod obj);
|
||||
|
||||
supportedSystems = builtins.attrNames nixpkgs.legacyPackages;
|
||||
|
||||
systemSpecificVars = system: {
|
||||
pkgs = import nixpkgs { inherit system; };
|
||||
pkgs_unstable = import inputs.nixpkgs_unstable { inherit system; };
|
||||
inherit system;
|
||||
};
|
||||
|
||||
forAllSystems =
|
||||
gen: lib.genAttrs lib.supportedSystems (system: gen (lib.systemSpecificVars system));
|
||||
|
||||
importFlakeModWithSystem = path: lib.forAllSystems (lib.importFlakeMod path);
|
||||
|
||||
}
|
@ -1,42 +0,0 @@
|
||||
{ lib, ... }@flakeArg:
|
||||
let
|
||||
inherit (builtins)
|
||||
deepSeq
|
||||
foldl'
|
||||
groupBy
|
||||
mapAttrs
|
||||
;
|
||||
inherit (lib.lists) singleton;
|
||||
inherit (lib.trivial) flip pipe;
|
||||
in
|
||||
{
|
||||
|
||||
groupByMult =
|
||||
groupers:
|
||||
# TODO (maybe) make more efficient by building up grouping function from right
|
||||
# from left
|
||||
# 0= (x: x) vals
|
||||
# 1= groupBy values[0] <0>
|
||||
# 2= mapAttrs (_: groupBy values[1]) <1>
|
||||
# 3= mapAttrs (_: mapAttrs (_: groupBy values[2])) <2>
|
||||
# from right
|
||||
# 1= (groupBy values[0])
|
||||
# 2= ...
|
||||
let
|
||||
nul = {
|
||||
mapper = x: x;
|
||||
result = [ ];
|
||||
};
|
||||
op =
|
||||
{ mapper, result }:
|
||||
grouper: {
|
||||
mapper = fun: mapAttrs (_: mapper fun);
|
||||
result = result ++ singleton (mapper grouper);
|
||||
};
|
||||
list = map groupBy groupers;
|
||||
pipeList = (foldl' op nul list).result;
|
||||
in
|
||||
# catch errors while building groupers before values are passed through
|
||||
deepSeq pipeList (flip pipe pipeList);
|
||||
|
||||
}
|
@ -1,46 +0,0 @@
|
||||
# inefficient, but sufficient math functions missing in Nix & nixpkgs
|
||||
{ lib, ... }@flakeArg:
|
||||
let
|
||||
inherit (builtins) foldl' genList isInt;
|
||||
inherit (lib.lists) imap0 reverseList;
|
||||
inherit (lib.strings) stringToCharacters;
|
||||
inherit (lib.trivial) flip pipe;
|
||||
in
|
||||
rec {
|
||||
|
||||
bitAsString = bool: if bool then "1" else "0";
|
||||
|
||||
binToInt = flip pipe [
|
||||
stringToCharacters
|
||||
reverseList
|
||||
(imap0 (i: x: if x == "1" then pow 2 i else 0))
|
||||
(foldl' (a: v: a + v) 0)
|
||||
];
|
||||
|
||||
intToBin =
|
||||
len: num:
|
||||
assert isInt len && isInt num;
|
||||
let
|
||||
maxVal = pow 2 len;
|
||||
twoPowers = genList (i: pow 2 (len - 1 - i)) len;
|
||||
init = {
|
||||
number = num;
|
||||
output = "";
|
||||
};
|
||||
folder =
|
||||
{ number, output }:
|
||||
power:
|
||||
let
|
||||
bit = number >= power;
|
||||
in
|
||||
{
|
||||
number = if bit then number - power else number;
|
||||
output = "${output}${bitAsString bit}";
|
||||
};
|
||||
in
|
||||
assert num < maxVal;
|
||||
(foldl' folder init twoPowers).output;
|
||||
|
||||
pow = x: exp: foldl' (a: b: a * b) 1 (genList (_: x) exp); # TODO
|
||||
|
||||
}
|
@ -1,9 +0,0 @@
|
||||
{ lib, ... }@flakeArg:
|
||||
let
|
||||
inherit (lib.modules) mkOverride;
|
||||
in
|
||||
{
|
||||
|
||||
mkTestOverride = mkOverride 55;
|
||||
|
||||
}
|
@ -1,238 +0,0 @@
|
||||
{ lib, self, ... }@flakeArg:
|
||||
let
|
||||
inherit (builtins)
|
||||
concatMap
|
||||
concatStringsSep
|
||||
mapAttrs
|
||||
elemAt
|
||||
fromTOML
|
||||
genList
|
||||
isAttrs
|
||||
length
|
||||
match
|
||||
replaceStrings
|
||||
;
|
||||
inherit (lib.asserts) assertMsg;
|
||||
inherit (lib.lists)
|
||||
count
|
||||
imap1
|
||||
last
|
||||
singleton
|
||||
sublist
|
||||
;
|
||||
inherit (lib.math) binToInt intToBin;
|
||||
inherit (lib.strings)
|
||||
commonPrefixLength
|
||||
hasInfix
|
||||
splitString
|
||||
substring
|
||||
toIntBase10
|
||||
toLower
|
||||
;
|
||||
inherit (lib.trivial) flip pipe toHexString;
|
||||
fixedWidthStrSuffix =
|
||||
width: filler: str:
|
||||
let
|
||||
strw = lib.stringLength str;
|
||||
reqWidth = width - (lib.stringLength filler);
|
||||
in
|
||||
assert lib.assertMsg (strw <= width)
|
||||
"fixedWidthString: requested string length (${toString width}) must not be shorter than actual length (${toString strw})";
|
||||
if strw == width then str else fixedWidthStrSuffix reqWidth filler str + filler;
|
||||
fromHexString = str: (fromTOML "v=0x${str}").v; # TODO not (yet) available in nixpkgs.lib
|
||||
toHex = str: toLower (toHexString str);
|
||||
toIpClass =
|
||||
ipArg:
|
||||
let
|
||||
statics = {
|
||||
ipv4 = {
|
||||
_group_bits = 8;
|
||||
_group_count = 4;
|
||||
_group_sep = ".";
|
||||
_group_toInt = toIntBase10;
|
||||
compressed = ip.decCompressed;
|
||||
shorted = ip.decCompressed;
|
||||
};
|
||||
ipv6 = {
|
||||
_group_bits = 16;
|
||||
_group_count = 8;
|
||||
_group_sep = ":";
|
||||
_group_toInt = fromHexString;
|
||||
compressed = ip.hexShorted; # TODO temporary
|
||||
shorted = ip.hexShorted;
|
||||
};
|
||||
};
|
||||
ip =
|
||||
statics.${ipArg.version} # avoid recursion error
|
||||
// {
|
||||
type = "ipAddress";
|
||||
# internal operators
|
||||
__toString = s: s.cidrCompressed;
|
||||
# decimal
|
||||
decGroups = map ip._group_toInt ip._groups;
|
||||
decCompressed = concatStringsSep ip._group_sep (map toString ip.decGroups);
|
||||
# binary
|
||||
binGroups = map (v: intToBin ip._group_bits) ip.decGroups; # shortcut compared to hexGroups
|
||||
binRaw = concatStringsSep "" ip.binGroups;
|
||||
# hex
|
||||
hexGroups = map toHex ip.decGroups;
|
||||
hexShorted = concatStringsSep ":" ip.hexGroups;
|
||||
# TODO hexCompressed
|
||||
# TODO hexExploded
|
||||
# network
|
||||
binRawNet = substring 0 ip.cidrInt ip.binRaw;
|
||||
_cidr_max = ip._group_count * ip._group_bits;
|
||||
cidrInt = if ip._cidrGroup == null then ip._cidr_max else toIntBase10 ip._cidrGroup;
|
||||
cidrCompressed = "${ip.compressed}/${ip.cidrStr}";
|
||||
cidrShorted = "${ip.shorted}/${ip.cidrStr}";
|
||||
cidrStr = "${toString ip.cidrInt}";
|
||||
# helpers
|
||||
isCompatible =
|
||||
o:
|
||||
assert self.isParsedIP o;
|
||||
ip.type == o.type;
|
||||
__verifyCompat = fun: o: if ip.isCompatible o then fun o else false;
|
||||
split = map (self.parseBinNet ip.version) [
|
||||
"${ip.binRawNet}0"
|
||||
"${ip.binRawNet}1"
|
||||
];
|
||||
}
|
||||
// mapAttrs (_: ip.__verifyCompat) {
|
||||
contains = o: ip.cidrInt <= commonPrefixLength ip.binRawNet (o.binRawNet);
|
||||
equals = o: ip.decGroups == o.decGroups && ip.cidrInt == o.cidrInt;
|
||||
sameNetwork = o: ip.binRawNet == o.binRawNet;
|
||||
}
|
||||
// ipArg;
|
||||
in
|
||||
assert assertMsg (length ip._groups == ip._group_count)
|
||||
"invalid IP group count, expected ${toString ip._group_count}, got: ${toString (length ip._groups)}, input: ${ipArg._input} (bug, please report)";
|
||||
assert ip.cidrInt <= ip._cidr_max;
|
||||
ip;
|
||||
in
|
||||
rec {
|
||||
|
||||
formatMAC =
|
||||
let
|
||||
badChars = [
|
||||
"."
|
||||
":"
|
||||
"_"
|
||||
"-"
|
||||
];
|
||||
goodChars = map (x: "") badChars;
|
||||
in
|
||||
mac:
|
||||
pipe mac [
|
||||
(replaceStrings badChars goodChars)
|
||||
toLower
|
||||
];
|
||||
|
||||
isParsedIP = x: isAttrs x && x.type or null == "ipAddress";
|
||||
isParsedIPv4 = x: isParsedIP x && x.version == "ipv4";
|
||||
isParsedIPv6 = x: isParsedIP x && x.version == "ipv6";
|
||||
|
||||
parseIP = ip: if hasInfix ":" ip then parseIPv6 ip else parseIPv4 ip;
|
||||
|
||||
parseIPv4 =
|
||||
ipStr:
|
||||
let
|
||||
parsed = match ''^([0-9]+)\.([0-9]+)\.([0-9]+)\.([0-9]+)(/([0-9]+))?$'' ipStr;
|
||||
ip = toIpClass {
|
||||
version = "ipv4";
|
||||
_input = ipStr;
|
||||
_cidrGroup = last parsed;
|
||||
_groups = substring 0 4 parsed;
|
||||
};
|
||||
in
|
||||
assert parsed != null; # TODO improve
|
||||
ip;
|
||||
|
||||
parseIPv6 =
|
||||
# TODO add support for IPv4 mapped addresses
|
||||
ipStr:
|
||||
let
|
||||
parsed = match ''^(([0-9a-f]{0,4}:){1,7}[0-9a-f]{0,4})(/([0-9]+))?$'' (toLower ipStr);
|
||||
rawGroups = pipe parsed [
|
||||
(flip elemAt 0)
|
||||
(splitString ":")
|
||||
# first & last zeros might be omitted as well, but are not a compression artifact
|
||||
(imap1 (i: x: if (i == 1 || i == length rawGroups) && x == "" then "0" else x))
|
||||
];
|
||||
groups = flip concatMap rawGroups (
|
||||
x: if x == "" then genList (_: "0") (9 - length rawGroups) else singleton x
|
||||
);
|
||||
ip = toIpClass {
|
||||
version = "ipv6";
|
||||
_input = ipStr;
|
||||
_cidrGroup = last parsed;
|
||||
_groups = groups;
|
||||
};
|
||||
in
|
||||
assert parsed != null;
|
||||
assert count (g: g == "") rawGroups <= 1;
|
||||
ip;
|
||||
|
||||
parseIPv6IfId =
|
||||
ipStr:
|
||||
let
|
||||
parsed = match ''^(([0-9a-f]{0,4}:){1,3}[0-9a-f]{0,4})$'' (toLower ipStr);
|
||||
rawGroups = pipe parsed [
|
||||
(flip elemAt 0)
|
||||
(splitString ":")
|
||||
# first & last zeros might be omitted as well, but are not a compression artifact
|
||||
(imap1 (i: x: if (i == 1 || i == length rawGroups) && x == "" then "0" else x))
|
||||
];
|
||||
groups = flip concatMap rawGroups (
|
||||
x: if x == "" then genList (_: "0") (5 - length rawGroups) else singleton x
|
||||
);
|
||||
ip = toIpClass {
|
||||
type = "ipInterfaceIdentifier";
|
||||
version = "ipv6";
|
||||
_group_count = 4;
|
||||
_input = ipStr;
|
||||
_cidrGroup = null;
|
||||
_groups = groups;
|
||||
};
|
||||
in
|
||||
assert parsed != null;
|
||||
assert count (g: g == "") rawGroups <= 1;
|
||||
ip;
|
||||
|
||||
parseBinNet =
|
||||
ipV: binStr:
|
||||
let
|
||||
ip = toIpClass {
|
||||
version = ipV;
|
||||
_input = binStr;
|
||||
# special overwrites - TODO integrate into toIpClass
|
||||
cidrInt = length binStr;
|
||||
binRaw = fixedWidthStrSuffix ip._cidr_max "0" binStr;
|
||||
binGroups = genList (i: substring (ip._group_bits * i) ip.binRaw) ip._group_count;
|
||||
decGroups = map binToInt ip.binGroups;
|
||||
# shortcuts
|
||||
binRawNet = binStr;
|
||||
};
|
||||
in
|
||||
ip;
|
||||
|
||||
mergeIPv6IfId =
|
||||
prefix: suffix:
|
||||
let
|
||||
pref = parseIPv6 prefix;
|
||||
suff = parseIPv6IfId suffix;
|
||||
in
|
||||
assert pref.cidrInt <= 64;
|
||||
"${concatStringsSep ":" (sublist 0 4 pref.hexGroups ++ suff.hexGroups)}/64";
|
||||
|
||||
netMinus =
|
||||
excl: net:
|
||||
if net.sameNetwork excl then
|
||||
[ ]
|
||||
else if !net.contains excl then
|
||||
singleton net
|
||||
else
|
||||
net.split;
|
||||
|
||||
netListMinus = excl: concatMap (netMinus excl);
|
||||
|
||||
}
|
@ -1,32 +0,0 @@
|
||||
{ lib, ... }@flakeArg:
|
||||
let
|
||||
inherit (builtins)
|
||||
isAttrs
|
||||
isBool
|
||||
isList
|
||||
isNull
|
||||
isString
|
||||
typeOf
|
||||
;
|
||||
inherit (lib.strings) optionalString;
|
||||
in
|
||||
{
|
||||
|
||||
conditionalString =
|
||||
cond:
|
||||
optionalString (
|
||||
if isNull cond then
|
||||
false
|
||||
else if isBool cond then
|
||||
cond
|
||||
else if isString cond then
|
||||
cond != ""
|
||||
else if isList cond then
|
||||
cond != [ ]
|
||||
else if isAttrs cond then
|
||||
cond.enable or (cond != { })
|
||||
else
|
||||
throw "unexpected type of condition ${typeOf cond}"
|
||||
);
|
||||
|
||||
}
|
@ -1,100 +0,0 @@
|
||||
{
|
||||
inputs,
|
||||
lib,
|
||||
self,
|
||||
...
|
||||
}@flakeArg:
|
||||
# TODO upstream
|
||||
let
|
||||
inherit (builtins) concatStringsSep;
|
||||
repeat = expr: count: builtins.genList (_: expr) count;
|
||||
concatRepeat =
|
||||
sep: str: count:
|
||||
concatStringsSep sep (repeat str count);
|
||||
concatGroup = patterns: "(${concatStringsSep "|" patterns})";
|
||||
repeatOptional =
|
||||
sep: pattern: count:
|
||||
"${concatRepeat "" "(${pattern}${sep})?" count}${pattern}";
|
||||
matchType =
|
||||
{ description, pattern }: lib.types.strMatching "^${pattern}$" // { inherit description; };
|
||||
# === regex parts
|
||||
hexChar = "[0-9A-Fa-f]";
|
||||
ipv4Block = "(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])";
|
||||
euiHexBlock = concatRepeat "" hexChar 2;
|
||||
euiWith = concatRepeat "[.:_-]?" euiHexBlock;
|
||||
eui48 = euiWith 6;
|
||||
eui64 = euiWith 8;
|
||||
ipv4Addr = concatRepeat "\\." ipv4Block 4;
|
||||
ipv6Block = repeatOptional "" hexChar 4;
|
||||
ipv6Addr =
|
||||
let
|
||||
genVariant =
|
||||
max: rightNum:
|
||||
let
|
||||
leftNum = max - rightNum - 1;
|
||||
leftPart = concatRepeat ":" ipv6Block leftNum;
|
||||
middlePart = lib.optionalString (rightNum == 0) "(${ipv6Block})?"; # full address only required once
|
||||
rightPart = repeatOptional ":" ipv6Block rightNum;
|
||||
in
|
||||
"${leftPart}:${middlePart}:${rightPart}";
|
||||
genAll = max: builtins.genList (genVariant max) max;
|
||||
normals = genAll 8;
|
||||
ipv4Mapped = map (x: "${x}:${ipv4Addr}") (genAll 6);
|
||||
in
|
||||
concatGroup (normals ++ ipv4Mapped);
|
||||
v4CIDR = "/(3[0-2]|2[0-9]|1?[0-9])";
|
||||
v6CIDR = "/(12[0-8]|1[0-2][0-9]|[1-9]?[0-9])";
|
||||
interfaceId = "(%[[:alnum:]]+)?";
|
||||
# === references
|
||||
ipv6Ref = "RFC 4291 Section 2.2";
|
||||
in
|
||||
# extensions to the nix option types library
|
||||
{
|
||||
|
||||
eui48 = matchType {
|
||||
description = "EUI-48 (i.e. MAC address)";
|
||||
pattern = eui48;
|
||||
};
|
||||
|
||||
eui64 = matchType {
|
||||
description = "EUI-64";
|
||||
pattern = eui64;
|
||||
};
|
||||
|
||||
ipAddress = lib.types.either self.ipv4Address self.ipv6Address;
|
||||
|
||||
ipAddressPlain = lib.types.either self.ipv4AddressPlain self.ipv6AddressPlain;
|
||||
|
||||
ipNetwork = lib.types.either self.ipv4Network self.ipv6Network;
|
||||
|
||||
ipv4Address = matchType {
|
||||
description = "IPv4 address (no CIDR, opt. interface identifier)";
|
||||
pattern = ipv4Addr + interfaceId;
|
||||
};
|
||||
|
||||
ipv4AddressPlain = matchType {
|
||||
description = "IPv4 address (no CIDR, no interface identifier)";
|
||||
pattern = ipv4Addr;
|
||||
};
|
||||
|
||||
ipv4Network = matchType {
|
||||
description = "IPv4 address/network with CIDR";
|
||||
pattern = ipv4Addr + v4CIDR;
|
||||
};
|
||||
|
||||
ipv6Address = matchType {
|
||||
description = "IPv6 address (${ipv6Ref}, no CIDR, opt. interface identifier)";
|
||||
pattern = ipv6Addr + interfaceId;
|
||||
};
|
||||
|
||||
ipv6AddressPlain = matchType {
|
||||
description = "IPv6 address (${ipv6Ref}, no CIDR, no interface identifier)";
|
||||
pattern = ipv6Addr;
|
||||
};
|
||||
|
||||
ipv6Network = matchType {
|
||||
description = "IPv6 address/network with CIDR (${ipv6Ref})";
|
||||
pattern = ipv6Addr + v6CIDR;
|
||||
};
|
||||
|
||||
}
|
@ -1,9 +0,0 @@
|
||||
{
|
||||
imports = [
|
||||
# files
|
||||
./efi.nix
|
||||
./fileSystems.nix
|
||||
./mdns.nix
|
||||
./nixos.nix
|
||||
];
|
||||
}
|
@ -1,39 +0,0 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
options,
|
||||
...
|
||||
}:
|
||||
let
|
||||
inherit (lib.strings) escapeNixString;
|
||||
cfg = config.boot.loader;
|
||||
fs = config.fileSystems;
|
||||
efiIndicator = builtins.any (x: x) [
|
||||
(cfg.grub.enable && cfg.grub.efiSupport)
|
||||
(cfg.systemd-boot.enable)
|
||||
];
|
||||
efiMountPath = escapeNixString cfg.efi.efiSysMountPoint;
|
||||
efiMount = fs.${cfg.efi.efiSysMountPoint} or null;
|
||||
in
|
||||
# TODO check cfg.grub.mirroredBoots as well
|
||||
# TODO enable disko checks (optional i.e. when disko options are available)
|
||||
{
|
||||
config = lib.mkIf efiIndicator {
|
||||
|
||||
assertions = [
|
||||
{
|
||||
assertion = efiMount != null;
|
||||
message = ''
|
||||
There is no filesystem declaration for EFI System Partition ${efiMountPath}
|
||||
'';
|
||||
}
|
||||
{
|
||||
assertion = efiMount != null -> efiMount.fsType == "vfat";
|
||||
message = ''
|
||||
EFI System Partition ${efiMountPath} has not fsType "vfat"
|
||||
'';
|
||||
}
|
||||
];
|
||||
|
||||
};
|
||||
}
|
@ -1,21 +0,0 @@
|
||||
{ config, ... }:
|
||||
let
|
||||
inherit (builtins) any attrValues elem;
|
||||
allMounts = attrValues config.fileSystems;
|
||||
testDiskOption = option: disk: elem option disk.options;
|
||||
testDiskDiscard = testDiskOption "discard";
|
||||
in
|
||||
{
|
||||
config = {
|
||||
|
||||
assertions = [
|
||||
{
|
||||
assertion = config.services.fstrim.enable -> !any testDiskDiscard allMounts;
|
||||
message = ''
|
||||
enabling "discard" mount option is discouraged because services.fstrim is enabled
|
||||
'';
|
||||
}
|
||||
];
|
||||
|
||||
};
|
||||
}
|
@ -1,22 +0,0 @@
|
||||
{ config, lib, ... }:
|
||||
let
|
||||
cfgAvahi = config.services.avahi;
|
||||
avahiMDNS = cfgAvahi.enable && (cfgAvahi.nssmdns4 || cfgAvahi.nssmdns6);
|
||||
cfgResolved = config.services.resolved;
|
||||
# TODO check settings when cfgResolved.settings exist
|
||||
resolvedMDNS = cfgResolved.enable;
|
||||
in
|
||||
{
|
||||
config = {
|
||||
|
||||
assertions = [
|
||||
{
|
||||
assertion = !(avahiMDNS && cfgResolved.enable);
|
||||
message = ''
|
||||
systemd-resolved is enabled while Avahi mDNS is enabled, disable one of both!
|
||||
'';
|
||||
}
|
||||
];
|
||||
|
||||
};
|
||||
}
|
@ -1,42 +0,0 @@
|
||||
{ config, lib, ... }:
|
||||
let
|
||||
channelsEn = config.nix.channel.enable;
|
||||
nixFeature = lib.trivial.flip builtins.elem config.nix.settings.experimental-features;
|
||||
packageNames = map lib.strings.getName config.environment.systemPackages;
|
||||
isInstalled = lib.trivial.flip builtins.elem packageNames;
|
||||
gitInst = isInstalled "git";
|
||||
gitEn = config.programs.git.enable;
|
||||
in
|
||||
{
|
||||
config = {
|
||||
|
||||
assertions = [
|
||||
{
|
||||
assertion = !channelsEn -> nixFeature "flakes";
|
||||
message = ''
|
||||
You disabled Nix channels, then you should enable flakes, otherwise you cannot build a new config.
|
||||
'';
|
||||
}
|
||||
{
|
||||
assertion = (!channelsEn && nixFeature "flakes") -> (gitInst || gitEn);
|
||||
message = ''
|
||||
Missing git, which is required to interact with most flakes.
|
||||
'';
|
||||
}
|
||||
{
|
||||
assertion = nixFeature "flakes" -> nixFeature "nix-command";
|
||||
message = ''
|
||||
Nix experimental-feature "flakes" requires feature "nix-command"
|
||||
'';
|
||||
}
|
||||
];
|
||||
|
||||
warnings = [
|
||||
# TODO add link to this file
|
||||
(lib.mkIf (gitEn && !gitInst) ''
|
||||
(not relevant for you, please report to the module author) git package was not detected properly, fallback to programs.git module
|
||||
'')
|
||||
];
|
||||
|
||||
};
|
||||
}
|
@ -1,54 +0,0 @@
|
||||
{ config, lib, ... }:
|
||||
let
|
||||
cpu = config.hardware.cpu;
|
||||
anyArg = builtins.any (x: x) [
|
||||
# list of conditions which require cpu type to be known
|
||||
cpu.updateMicrocode
|
||||
];
|
||||
cpuOpts =
|
||||
type:
|
||||
lib.mkIf (anyArg && cpu.type == type) {
|
||||
# options for all cpu types
|
||||
updateMicrocode = lib.mkDefault cpu.updateMicrocode;
|
||||
};
|
||||
in
|
||||
{
|
||||
|
||||
options = {
|
||||
|
||||
hardware.cpu = {
|
||||
|
||||
type = lib.mkOption {
|
||||
description = ''
|
||||
Configures the CPU type to expect this configuration to run on.
|
||||
|
||||
This setting is required when using generalizing options
|
||||
like option{hardware.cpu.updateMicrocode}.
|
||||
'';
|
||||
type =
|
||||
with lib.types;
|
||||
nullOr (enum [
|
||||
"amd"
|
||||
"intel"
|
||||
]);
|
||||
# required
|
||||
};
|
||||
|
||||
updateMicrocode = lib.mkEnableOption ''
|
||||
microcode updates for CPU type selected in option{hardware.cpu.type}
|
||||
'';
|
||||
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
config = {
|
||||
|
||||
hardware.cpu = {
|
||||
amd = cpuOpts "amd";
|
||||
intel = cpuOpts "intel";
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
}
|
@ -1,11 +0,0 @@
|
||||
{
|
||||
imports = [
|
||||
# files
|
||||
./cpu.nix
|
||||
./kernel.nix
|
||||
./openssh.nix
|
||||
./podman.nix
|
||||
./printing.nix
|
||||
./tailscale.nix
|
||||
];
|
||||
}
|
@ -1,41 +0,0 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
options,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
let
|
||||
blocked = config.boot.blockedKernelModules;
|
||||
in
|
||||
{
|
||||
|
||||
options = {
|
||||
boot.blockedKernelModules = lib.mkOption {
|
||||
description = ''
|
||||
Kernel modules which are blocked from being loaded
|
||||
by using a rather hacky workaround called "fake install".
|
||||
Read in the [Debian Wiki](https://wiki.debian.org/KernelModuleBlacklisting) for more info.
|
||||
|
||||
Be aware that this should block all attempts
|
||||
from loading that module at runtime,
|
||||
*including other modules* depending on it.
|
||||
|
||||
Modules listed here are automatically blacklisted as well
|
||||
by adding them to {option}`boot.blacklistedKernelModules`,
|
||||
which should hinder them being loaded automatically
|
||||
due to supported devices detected.
|
||||
'';
|
||||
type = options.boot.blacklistedKernelModules.type;
|
||||
default = [ ];
|
||||
};
|
||||
};
|
||||
|
||||
config = {
|
||||
boot.blacklistedKernelModules = blocked;
|
||||
boot.extraModprobeConfig = lib.flip lib.concatMapStrings blocked (module: ''
|
||||
install ${module} ${lib.getExe' pkgs.coreutils "true"}
|
||||
'');
|
||||
};
|
||||
|
||||
}
|
@ -1,17 +0,0 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
let
|
||||
cfg = config.virtualisation.podman;
|
||||
in
|
||||
{
|
||||
options.virtualisation.podman = {
|
||||
compose.enable = lib.mkEnableOption "podman-compose";
|
||||
};
|
||||
config.environment.systemPackages = lib.mkIf (cfg.enable && cfg.compose.enable) [
|
||||
pkgs.podman-compose
|
||||
];
|
||||
}
|
@ -1,25 +0,0 @@
|
||||
{ config, lib, ... }:
|
||||
let
|
||||
cfg = config.services.printing;
|
||||
in
|
||||
{
|
||||
|
||||
options.services.printing = {
|
||||
enableAutoDiscovery = lib.mkEnableOption ''
|
||||
CUPS automatic discovery of printers.
|
||||
|
||||
This will enable & configure Avahi accordingly,
|
||||
including opening ports in the firewall'';
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
# TODO make also possible with systemd-resolved
|
||||
services.avahi = lib.mkIf cfg.enableAutoDiscovery {
|
||||
enable = true;
|
||||
nssmdns4 = true;
|
||||
nssmdns6 = true;
|
||||
openFirewall = true;
|
||||
};
|
||||
};
|
||||
|
||||
}
|
@ -1,67 +0,0 @@
|
||||
{ config, lib, ... }:
|
||||
let
|
||||
cfg = config.services.tailscale;
|
||||
boolToStr = v: if v then "true" else "false";
|
||||
toTsCli = lib.cli.toGNUCommandLine {
|
||||
mkBool = k: v: lib.singleton "--${k}=${boolToStr v}";
|
||||
mkList = k: v: lib.singleton "--${k}=${builtins.concatStringsSep "," v}";
|
||||
mkOption =
|
||||
k: v:
|
||||
if v == null then [ ] else lib.singleton "--${k}=${lib.generators.mkValueStringDefault { } v}";
|
||||
};
|
||||
in
|
||||
{
|
||||
|
||||
options.services.tailscale = {
|
||||
|
||||
setFlags = lib.mkOption {
|
||||
description = ''
|
||||
Options which are given to `tailscale set` on every boot.
|
||||
Will be translated to {option}`services.tailscale.extraSetFlags`.
|
||||
'';
|
||||
type = lib.types.anything;
|
||||
default = { };
|
||||
example = {
|
||||
advertise-exit-node = true;
|
||||
advertise-tags = [
|
||||
"mytag"
|
||||
"other"
|
||||
];
|
||||
netfilter-mode = "none";
|
||||
};
|
||||
};
|
||||
|
||||
upFlags = lib.mkOption {
|
||||
description = ''
|
||||
Will be translated to {option}`services.tailscale.extraUpFlags`.
|
||||
'';
|
||||
type = lib.types.anything;
|
||||
default = { };
|
||||
example = {
|
||||
ssh = true;
|
||||
advertise-tags = [
|
||||
"mytag"
|
||||
"other"
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
|
||||
services.tailscale = {
|
||||
extraSetFlags = toTsCli cfg.setFlags;
|
||||
# apply set flags already on autoconnect
|
||||
extraUpFlags = toTsCli cfg.upFlags ++ cfg.extraSetFlags;
|
||||
};
|
||||
|
||||
# ensure tailscale set settings really apply
|
||||
systemd.services.tailscaled-set = lib.mkIf (cfg.authKeyFile != null) {
|
||||
after = lib.singleton "tailscaled-autoconnect.service";
|
||||
wants = lib.singleton "tailscaled-autoconnect.service";
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
}
|
@ -0,0 +1,151 @@
|
||||
# applicable to all hosts running on bare hardware
|
||||
|
||||
{ config
|
||||
, lib
|
||||
, pkgs
|
||||
, ...
|
||||
}:
|
||||
let
|
||||
cfg = config.x-banananetwork.hwCommon;
|
||||
cpu = config.hardware.cpu;
|
||||
in
|
||||
{
|
||||
|
||||
|
||||
options = {
|
||||
|
||||
|
||||
hardware.cpu = {
|
||||
|
||||
type = lib.mkOption {
|
||||
description = ''
|
||||
Configures the CPU type to expect this configuration to run on.
|
||||
|
||||
This setting is required when using generalizing options
|
||||
like option{hardware.cpu.updateMicrocode}.
|
||||
'';
|
||||
type = with lib.types; nullOr (enum [
|
||||
"amd"
|
||||
"intel"
|
||||
]);
|
||||
# required
|
||||
};
|
||||
|
||||
updateMicrocode = lib.mkEnableOption ''
|
||||
microcode updates for CPU type selected in option{hardware.cpu.type}.
|
||||
|
||||
Because this module is not yet part of upstream,
|
||||
it requires option{x-banananetwork.hwCommon.enable} to be enabled.
|
||||
'';
|
||||
|
||||
};
|
||||
|
||||
|
||||
x-banananetwork.hwCommon = {
|
||||
|
||||
enable = lib.mkEnableOption ''
|
||||
settings common to all bare hardware-based hosts
|
||||
'';
|
||||
|
||||
};
|
||||
|
||||
|
||||
};
|
||||
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
|
||||
|
||||
assertions = [
|
||||
{
|
||||
assertion = cfg.enable -> !config.x-banananetwork.vmCommon.enable;
|
||||
message = "hwCommon & vmCommon profiles cannot both be enabled at the same time";
|
||||
}
|
||||
];
|
||||
|
||||
|
||||
boot = {
|
||||
|
||||
# TODO adapt better
|
||||
loader = {
|
||||
efi.canTouchEfiVariables = lib.mkDefault true;
|
||||
systemd-boot = {
|
||||
enable = true;
|
||||
editor = lib.mkDefault true; # TODO lockdown (disable this OR enable TPM PCR checks)
|
||||
memtest86.enable = lib.mkDefault true;
|
||||
};
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
|
||||
hardware = {
|
||||
|
||||
cpu = lib.mkMerge [
|
||||
|
||||
# TODO maybe upstream?
|
||||
(
|
||||
let
|
||||
type = config.hardware.cpu.type;
|
||||
opts = isType: {
|
||||
updateMicrocode = lib.mkDefault (isType && config.hardware.cpu.updateMicrocode);
|
||||
};
|
||||
in
|
||||
{
|
||||
amd = opts (type == "amd");
|
||||
intel = opts (type == "intel");
|
||||
}
|
||||
)
|
||||
|
||||
{
|
||||
updateMicrocode = lib.mkDefault true;
|
||||
}
|
||||
|
||||
];
|
||||
|
||||
enableRedistributableFirmware = lib.mkDefault true;
|
||||
|
||||
};
|
||||
|
||||
|
||||
powerManagement = {
|
||||
cpuFreqGovernor = "ondemand";
|
||||
enable = true;
|
||||
};
|
||||
|
||||
|
||||
services = {
|
||||
|
||||
fwupd = {
|
||||
enable = true;
|
||||
};
|
||||
|
||||
power-profiles-daemon = {
|
||||
# 2024-08-14: tlp seems way better in my experience, hence disable it
|
||||
enable = lib.mkIf config.services.tlp.enable false;
|
||||
};
|
||||
|
||||
smartd = {
|
||||
enable = true;
|
||||
};
|
||||
|
||||
tlp = {
|
||||
# energy-saving daemon, similar to powertop --autotune, but adaptive to BAT / AC
|
||||
enable = true;
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
|
||||
x-banananetwork = {
|
||||
|
||||
allCommon.enable = true;
|
||||
useable.enable = lib.mkDefault true; # add docs & tools for emergencies
|
||||
|
||||
};
|
||||
|
||||
|
||||
};
|
||||
|
||||
|
||||
}
|
@ -1,34 +0,0 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
let
|
||||
cfg = config.x-banananetwork.improvedDefaults;
|
||||
fx = config.programs.firefox;
|
||||
in
|
||||
{
|
||||
|
||||
config = lib.mkIf (cfg.enable && fx.enable) {
|
||||
|
||||
# TODO only on touchscreen / wayland
|
||||
environment.sessionVariables = {
|
||||
MOZ_USE_XINPUT2 = "1";
|
||||
};
|
||||
|
||||
programs.firefox = {
|
||||
|
||||
preferences = {
|
||||
"widget.use-xdg-desktop-portal.file-picker" = lib.mkIf config.xdg.portal.enable true;
|
||||
};
|
||||
|
||||
wrapperConfig = {
|
||||
pipewireSupport = lib.mkIf config.services.pipewire.enable true;
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
}
|
@ -1,19 +0,0 @@
|
||||
{ config, lib, ... }:
|
||||
let
|
||||
cfg = config.x-banananetwork.improvedDefaults;
|
||||
nmEn = config.networking.networkmanager.enable;
|
||||
waitOnlineEn = config.systemd.network.wait-online.enable;
|
||||
in
|
||||
{
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
systemd.network.wait-online.enable = lib.mkIf nmEn (lib.mkDefault false);
|
||||
|
||||
warnings = lib.singleton (
|
||||
lib.mkIf (nmEn && waitOnlineEn) ''
|
||||
systemd-networkd-wait-online is in most cases useless on systems primarily using NetworkManager & it may increase boot times if it just fails
|
||||
''
|
||||
);
|
||||
};
|
||||
|
||||
}
|
@ -1,10 +0,0 @@
|
||||
{ config, lib, ... }:
|
||||
let
|
||||
cfg = config.x-banananetwork.improvedDefaults;
|
||||
tlpEn = config.services.tlp.enable;
|
||||
in
|
||||
{
|
||||
# power-profiles-daemon gets enabled by most display managers
|
||||
# so this suppresses this if another daemon is enabled
|
||||
config = lib.mkIf cfg.enable { services.power-profiles-daemon.enable = lib.mkIf tlpEn false; };
|
||||
}
|
@ -1,29 +0,0 @@
|
||||
# This file especially especially includes special overlays
|
||||
# where a full include via the nixpkgs.overlays option is not feasible
|
||||
# because of a big list of dependencies
|
||||
# and where in most cases just setting a certain package option should be enough (e.g. systemd).
|
||||
{
|
||||
inputs,
|
||||
lib,
|
||||
outputs,
|
||||
...
|
||||
}@flakeArg:
|
||||
let
|
||||
withOverlay =
|
||||
overlay: configFun:
|
||||
{ pkgs, ... }:
|
||||
configFun (
|
||||
import inputs.nixpkgs {
|
||||
system = pkgs.system;
|
||||
overlays = lib.singleton overlay;
|
||||
}
|
||||
);
|
||||
in
|
||||
{
|
||||
|
||||
# TODO until https://github.com/systemd/systemd/issues/29651 is fixed
|
||||
systemd-radv-fadeout = withOverlay outputs.overlays.systemd-radv-fadeout (pkgs: {
|
||||
config.systemd.package = pkgs.systemd;
|
||||
});
|
||||
|
||||
}
|
@ -1,6 +0,0 @@
|
||||
{
|
||||
imports = [
|
||||
# files
|
||||
./nft-update-addresses.nix
|
||||
];
|
||||
}
|
@ -1,187 +0,0 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
let
|
||||
servName = "nft-update-addresses";
|
||||
cfg = config.services.${servName};
|
||||
settingsFormat = pkgs.formats.json { };
|
||||
mkDisableOption = desc: lib.mkEnableOption desc // { default = true; };
|
||||
# output options values
|
||||
configFile = pkgs.writeTextFile {
|
||||
name = "${servName}.json";
|
||||
text = builtins.toJSON cfg.settings; # TODO can otherwise not easily check the file for errors
|
||||
checkPhase = ''
|
||||
${lib.getExe cfg.package} --check-config --config-file "$out"
|
||||
'';
|
||||
};
|
||||
staticDefs = builtins.readFile (
|
||||
pkgs.runCommandLocal "${servName}.nftables" { } ''
|
||||
${lib.getExe cfg.package} --output-set-definitions --config-file ${configFile} > $out
|
||||
''
|
||||
);
|
||||
in
|
||||
{
|
||||
|
||||
options.services.${servName} = {
|
||||
|
||||
enable = lib.mkEnableOption "${servName} service";
|
||||
|
||||
package = lib.mkPackageOption pkgs (lib.singleton servName) { };
|
||||
|
||||
settings = lib.mkOption {
|
||||
# TODO link to docu
|
||||
description = "Configuration for ${servName}";
|
||||
type = settingsFormat.type;
|
||||
default = {
|
||||
nftTable = "nixos-fw";
|
||||
};
|
||||
example.interfaces = {
|
||||
wan0 = { };
|
||||
lan0.ports.tcp = {
|
||||
exposed = [
|
||||
{
|
||||
dest = "aa:bb:cc:dd:ee:ff";
|
||||
port = 80;
|
||||
}
|
||||
{
|
||||
dest = "aa:bb:cc:00:11:22";
|
||||
port = 80;
|
||||
}
|
||||
];
|
||||
forwarded = [
|
||||
{
|
||||
dest = "aabb-ccdd-eeff";
|
||||
lanPort = 80;
|
||||
wanPort = 80;
|
||||
}
|
||||
{
|
||||
dest = "aa.bbcc.0011.22";
|
||||
lanPort = 80;
|
||||
wanPort = 8080;
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
includeStaticDefinitions = mkDisableOption ''inclusion of static definitions from {option}`services.${servName}.nftablesStaticDefinitions` into the nftables config'';
|
||||
|
||||
configurationFile = lib.mkOption {
|
||||
description = "Path to configuration file used by ${servName}.";
|
||||
type = lib.types.path; # needs to be available at build time
|
||||
readOnly = true;
|
||||
default = configFile;
|
||||
defaultText = lib.literalExpression "# content as generated from config.services.${servName}.settings";
|
||||
};
|
||||
|
||||
nftablesStaticDefinitions = lib.mkOption {
|
||||
description = ''
|
||||
Static definitions provided by ${servName} when called with given configuration.
|
||||
|
||||
When {option}`services.${servName}.includeStaticDefinitions (which is default),
|
||||
these will be already included in your nftables setup.
|
||||
Otherwise, you can use the value of this output option as you prefer.
|
||||
'';
|
||||
readOnly = true;
|
||||
default = staticDefs;
|
||||
defaultText = lib.literalExpression "# as provided by ${servName}";
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
|
||||
assertions = [
|
||||
{
|
||||
assertion = cfg.enable -> config.networking.nftables.enable;
|
||||
message = "${servName} requires nftables to be configured";
|
||||
}
|
||||
# TODO assert for port duplications
|
||||
];
|
||||
|
||||
networking.nftables.tables.${cfg.settings.nftTable}.content = lib.mkIf cfg.includeStaticDefinitions staticDefs;
|
||||
|
||||
systemd.services.${servName} = {
|
||||
description = "IPv6 prefix rules updater for nftables";
|
||||
after = [
|
||||
"nftables.service"
|
||||
"network.target"
|
||||
];
|
||||
partOf = lib.singleton "nftables.service";
|
||||
requisite = lib.singleton "nftables.service";
|
||||
wantedBy = lib.singleton "multi-user.target";
|
||||
upheldBy = lib.singleton "systemd-networkd.service";
|
||||
unitConfig.ReloadPropagatedFrom = lib.singleton "nftables.service";
|
||||
restartIfChanged = true;
|
||||
restartTriggers = config.systemd.services.nftables.restartTriggers;
|
||||
serviceConfig = {
|
||||
# Service
|
||||
Type = "notify-reload";
|
||||
ExecStart = lib.singleton "${lib.getExe cfg.package} ${
|
||||
lib.cli.toGNUCommandLineShell { } {
|
||||
config-file = configFile;
|
||||
ip-command = "${pkgs.iproute2}/bin/ip";
|
||||
nft-command = lib.getExe pkgs.nftables;
|
||||
}
|
||||
}";
|
||||
RestartSec = "250ms";
|
||||
RestartSteps = 3;
|
||||
RestartMaxDelaySec = "3s";
|
||||
TimeoutSec = "10s";
|
||||
Restart = "always";
|
||||
NotifyAccess = "all"; # bash script opens subprocesses in pipes
|
||||
# Paths
|
||||
ProtectProc = "noaccess";
|
||||
ProcSubset = "pid";
|
||||
CapabilityBoundingSet = [
|
||||
"CAP_BPF" # nft is compiled to bpf
|
||||
"CAP_IPC_LOCK" # ?
|
||||
"CAP_KILL" # ?
|
||||
"CAP_NET_ADMIN"
|
||||
];
|
||||
# Security
|
||||
NoNewPrivileges = true;
|
||||
# Process
|
||||
KeyringMode = "private";
|
||||
OOMScoreAdjust = 10;
|
||||
# Scheduling
|
||||
Nice = -2;
|
||||
CPUSchedulingPolicy = "fifo";
|
||||
# Sandboxing
|
||||
ProtectSystem = "strict";
|
||||
ProtectHome = true;
|
||||
PrivateTmp = true;
|
||||
PrivateDevices = true;
|
||||
PrivateNetwork = false; # breaks nftables
|
||||
PrivateIPC = true;
|
||||
PrivateUsers = false; # breaks nftables
|
||||
ProtectClock = true;
|
||||
ProtectKernelTunables = true;
|
||||
ProtectKernelModules = true; # are already loaded
|
||||
ProtectKernelLogs = true;
|
||||
ProtectControlGroups = true;
|
||||
#RestrictAddressFamilies = [
|
||||
# # ?
|
||||
# "AF_INET"
|
||||
# "AF_INET6"
|
||||
# #"AF_NETLINK"
|
||||
#];
|
||||
RestrictNamespaces = true;
|
||||
RestrictSUIDSGID = true;
|
||||
#SystemCallFilter = "@basic-io @ipc @network-io @signal @timer" # definitly will break that
|
||||
#SystemCallLog = "~"; # for debugging; should lock all system calls made
|
||||
# Resource Control
|
||||
CPUQuota = "50%";
|
||||
# TODO test to gather real values
|
||||
MemoryLow = "8M";
|
||||
MemoryHigh = "32M";
|
||||
MemoryMax = "128M";
|
||||
};
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
}
|
@ -1,48 +0,0 @@
|
||||
# NixOS Router Framework
|
||||
|
||||
This is another NixOS router framework working better for my usecase
|
||||
|
||||
|
||||
## Features
|
||||
|
||||
- designed for environments with dynamic IP address configs
|
||||
- uses DHCPv4 on WAN to get private or public IPv4
|
||||
- uses DHCPv6 on WAN to get public IPv6 prefix via DHCP prefix delegation (DHCP-PD)
|
||||
- allows easy exposing & forwarding of ports
|
||||
- exposed port rules auto-adapt to changing IPv6 prefix
|
||||
- port forwardings (i.e. DNAT) work on IPv4 & IPv6
|
||||
- configuring them only requires MAC & static IPv4
|
||||
- configures AdGuard Home as filtering DNS server for clients
|
||||
- stays mostly compatible with common NixOS networking & firewall configs, e.g.:
|
||||
- `.openFirewall` & `.allowedTCPPorts`/`.allowedUDPPorts` options continue to work (opens port on all interfaces)
|
||||
|
||||
I also develop a NixOS test which tries to verify that these features work as expected, which will be published later in this flake.
|
||||
|
||||
|
||||
### Restrictions
|
||||
|
||||
Given all features, this module comes up with a few restrictions (; incomplete list):
|
||||
|
||||
- supports only one WAN & one LAN interface
|
||||
- does not allow easy integration of a VPN network
|
||||
- fully relies on systemd-networkd for DHCPv4/v6 client, DHCPv4 server & prefix-delegated router advertisements
|
||||
|
||||
It is not impossible or really, really hard to overcome these limitations but it may require changing this module in substantional ways.
|
||||
|
||||
|
||||
## Example Use
|
||||
|
||||
(**TODO** link to yet uncommited stuff)
|
||||
|
||||
|
||||
## Inspirators
|
||||
|
||||
I was inspired to implement this by other, similar projects, which were sadly lacking some features highly important to me.
|
||||
However, as a form of credit & to provide further ressources to you:
|
||||
|
||||
- [nixos-router](https://github.com/chayleaf/nixos-router) by [@chayleaf](https://github.com/chayleaf)
|
||||
- utilizes network namespaces (mine does not!)
|
||||
- because of that, (at time of writing) it ditched systemd-networkd for now, which I wanted to use
|
||||
- was not designed for a environment with dynamic IPs
|
||||
- [NixOS based router in 2023](https://github.com/ghostbuster91/blogposts/blob/a2374f0039f8cdf4faddeaaa0347661ffc2ec7cf/router2023-part2/main.md) by [@ghostbuster91](https://github.com/ghostbuster91)
|
||||
- was a useful ressource in creating my module
|
@ -1,115 +0,0 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
options,
|
||||
...
|
||||
}:
|
||||
let
|
||||
self = options.x-banananetwork.vmDisko;
|
||||
cfg = config.x-banananetwork.vmDisko;
|
||||
in
|
||||
{
|
||||
|
||||
# TODO upstream that to disko
|
||||
|
||||
options.x-banananetwork.vmDisko = {
|
||||
|
||||
enable = lib.mkEnableOption ''
|
||||
VM disk configuration with disko.
|
||||
Will be automatically enabled when option{x-banananetwork.vmDisko.generation} is manually set.
|
||||
'';
|
||||
|
||||
mainDiskName = lib.mkOption {
|
||||
description = ''
|
||||
Name of the main disk.
|
||||
|
||||
Similar to {option}`system.stateVersion`,
|
||||
**do not change this** unless you know what you are doing.
|
||||
'';
|
||||
type = lib.types.str;
|
||||
};
|
||||
|
||||
generation = lib.mkOption {
|
||||
description = ''
|
||||
Disk generation to use.
|
||||
|
||||
Similar to {option}`system.stateVersion`,
|
||||
**do not change this** unless you know what you are doing.
|
||||
|
||||
See option {option}`x-banananetwork.vmDisko.generations`
|
||||
for a list of all generations available.
|
||||
'';
|
||||
type = with lib.types; nullOr str;
|
||||
default = null;
|
||||
example = self.recommendedGeneration.default;
|
||||
};
|
||||
|
||||
recommendedGeneration = lib.mkOption {
|
||||
description = ''
|
||||
Disk generation recommended to use for new systems.
|
||||
'';
|
||||
default = "ext4-1";
|
||||
readOnly = true;
|
||||
};
|
||||
|
||||
generationsDir = lib.mkOption {
|
||||
description = ''
|
||||
Directories where to search for generations.
|
||||
|
||||
A generation must at least use one disk with the attr name `main`
|
||||
(through its label name can be different)
|
||||
because VMs are expected to have at least one disk as their primary available.
|
||||
'';
|
||||
type = lib.types.path;
|
||||
default = ./.;
|
||||
example = ./.;
|
||||
};
|
||||
|
||||
generationPath = lib.mkOption {
|
||||
description = "Path to selected generation template.";
|
||||
readOnly = true;
|
||||
type = lib.types.path;
|
||||
default =
|
||||
let
|
||||
path = cfg.generationsDir + "/${cfg.generation}";
|
||||
in
|
||||
if builtins.pathExists path then path else path + ".nix";
|
||||
defaultText = lib.literalExpression ''
|
||||
with config.x-banananetwork.vmDisko;
|
||||
generationsDir + ("/''${generation}" or "/''${generation}.nix")
|
||||
'';
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
config = lib.mkMerge [
|
||||
(lib.mkIf (cfg.generation != null) { x-banananetwork.vmDisko.enable = true; })
|
||||
(lib.mkIf cfg.enable {
|
||||
|
||||
assertions = [
|
||||
{
|
||||
assertion = cfg.enable -> cfg.generation != null;
|
||||
message = "x-banananetwork.vmDisko.generation must be set. Currently \"${cfg.recommendedGeneration}\" is recommended.";
|
||||
}
|
||||
{
|
||||
assertion = cfg.generation != null -> builtins.pathExists cfg.generationPath;
|
||||
message = "generation \"${cfg.generation}\" was not found in ${cfg.generationPath}";
|
||||
}
|
||||
];
|
||||
|
||||
disko.devices = lib.mkMerge [
|
||||
(import cfg.generationPath)
|
||||
{
|
||||
# avoid mixing VM disks
|
||||
# hint: mkOverride required because
|
||||
# - cfg.generations from above is already type-checked, hence priorities are discharged
|
||||
# - assigment from above has default priority of 100
|
||||
#disk.main.name = lib.mkOverride 99 cfg.mainDiskName;
|
||||
disk.main.name = cfg.mainDiskName;
|
||||
}
|
||||
];
|
||||
|
||||
})
|
||||
];
|
||||
|
||||
}
|
@ -1,29 +0,0 @@
|
||||
{
|
||||
disk = {
|
||||
main = {
|
||||
type = "disk";
|
||||
content = {
|
||||
type = "gpt";
|
||||
partitions = {
|
||||
ESP = {
|
||||
type = "EF00";
|
||||
size = "500M";
|
||||
content = {
|
||||
type = "filesystem";
|
||||
format = "vfat";
|
||||
mountpoint = "/boot";
|
||||
};
|
||||
};
|
||||
root = {
|
||||
size = "100%";
|
||||
content = {
|
||||
type = "filesystem";
|
||||
format = "ext4";
|
||||
mountpoint = "/";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
@ -1,105 +1,7 @@
|
||||
{
|
||||
inputs,
|
||||
lib,
|
||||
flake,
|
||||
outputs,
|
||||
...
|
||||
}@flakeArg:
|
||||
let
|
||||
nixpkgs = inputs.nixpkgs;
|
||||
nixosSystem =
|
||||
{ modules, system }:
|
||||
let
|
||||
modsExtended = [
|
||||
{
|
||||
system.configurationRevision = toString (
|
||||
flake.shortRev or flake.dirtyShortRev or flake.lastModified or "unknown"
|
||||
);
|
||||
}
|
||||
outputs.nixosModules.myOptions
|
||||
outputs.nixosModules.withDepends
|
||||
{ home-manager.sharedModules = [ outputs.homeManagerModules.default ]; }
|
||||
] ++ modules;
|
||||
systemArgs = {
|
||||
modules = modsExtended;
|
||||
# be aware: specialArgs will break in my nixos integration tests
|
||||
inherit system;
|
||||
};
|
||||
in
|
||||
nixpkgs.lib.nixosSystem systemArgs
|
||||
// {
|
||||
# expose module cleanly
|
||||
_banananetwork_systemArgs = systemArgs;
|
||||
};
|
||||
inherit (lib) importFlakeMod;
|
||||
importSystem = path: nixosSystem (importFlakeMod path);
|
||||
in
|
||||
{
|
||||
|
||||
"x13yz" = nixosSystem {
|
||||
modules = [
|
||||
{
|
||||
# TODO check if required & hide into modules
|
||||
boot = {
|
||||
initrd = {
|
||||
availableKernelModules = [
|
||||
"nvme" # nvme (probably required for booting)
|
||||
"rtsx_pci_sdmmc" # probably for SD card (required for booting?)
|
||||
"xhci_pci" # for USB 3.0 (required for booting?)
|
||||
];
|
||||
kernelModules = [
|
||||
"dm-snapshot" # pseudo-required for LVM
|
||||
];
|
||||
};
|
||||
kernelModules = [
|
||||
"kvm-intel" # do not know if that is required here?
|
||||
];
|
||||
};
|
||||
}
|
||||
outputs.nixosProfiles.blade
|
||||
inputs.nixos-hardware.nixosModules.lenovo-thinkpad-x13-yoga
|
||||
{
|
||||
# hardware
|
||||
hardware.cpu.type = "intel";
|
||||
hardware.graphics.intel.enable = true;
|
||||
programs.captive-browser.interface = "wlp0s20f3";
|
||||
x-banananetwork.frontend.convertable = true;
|
||||
}
|
||||
{
|
||||
# as currently installed
|
||||
boot.initrd.luks.devices."luks-herske.lvm.6nw.de" = {
|
||||
device = "/dev/disk/by-uuid/16b8f83d-0450-4c4d-9964-788575a31eec";
|
||||
preLVM = true;
|
||||
allowDiscards = true;
|
||||
};
|
||||
fileSystems."/" = {
|
||||
device = "/dev/disk/by-uuid/c93557db-e7c5-46ef-9cd8-87eb7c5753dc";
|
||||
fsType = "ext4";
|
||||
options = [ "relatime" ];
|
||||
};
|
||||
fileSystems."/boot" = {
|
||||
device = "/dev/disk/by-uuid/5F9A-9A2D";
|
||||
fsType = "vfat";
|
||||
options = [
|
||||
"uid=0"
|
||||
"gid=0"
|
||||
"fmask=0077"
|
||||
"dmask=0077"
|
||||
];
|
||||
};
|
||||
swapDevices = [ { device = "/dev/disk/by-uuid/8482463b-ceb3-40b3-abef-b49df2de88e5"; } ];
|
||||
system.stateVersion = "24.05";
|
||||
x-banananetwork.sshHostPublicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIG71dtqG/c0AiFBN9OxoLD35TDQm3m8LXj/BQw60PE0h root@x13yz.pc.6nw.de 2024-07-01";
|
||||
}
|
||||
{
|
||||
# host configuration
|
||||
networking.domain = "pc.6nw.de";
|
||||
networking.hostName = "x13yz";
|
||||
services.fprintd.enable = true;
|
||||
x-banananetwork.frontend.enable = true;
|
||||
}
|
||||
];
|
||||
system = "x86_64-linux";
|
||||
};
|
||||
}@args: {
|
||||
|
||||
}
|
||||
|
@ -1,68 +0,0 @@
|
||||
# applicable to all systems running on bare hardware
|
||||
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
{
|
||||
|
||||
imports = [
|
||||
# from here
|
||||
./common.nix
|
||||
];
|
||||
|
||||
config = {
|
||||
|
||||
# EFI by default
|
||||
boot.loader = {
|
||||
efi.canTouchEfiVariables = lib.mkDefault true;
|
||||
grub.memtest86.enable = lib.mkDefault true;
|
||||
systemd-boot = {
|
||||
enable = lib.mkDefault true;
|
||||
editor = lib.mkDefault true;
|
||||
memtest86.enable = lib.mkDefault true;
|
||||
};
|
||||
};
|
||||
|
||||
environment.systemPackages = with pkgs; [
|
||||
pciutils
|
||||
usbutils
|
||||
];
|
||||
|
||||
hardware = {
|
||||
cpu.updateMicrocode = lib.mkIf config.hardware.enableRedistributableFirmware true;
|
||||
enableRedistributableFirmware = lib.mkDefault true;
|
||||
};
|
||||
|
||||
powerManagement = {
|
||||
cpuFreqGovernor = "ondemand";
|
||||
enable = lib.mkDefault true;
|
||||
};
|
||||
|
||||
services = {
|
||||
|
||||
fwupd = {
|
||||
enable = true;
|
||||
};
|
||||
|
||||
smartd = {
|
||||
enable = true;
|
||||
};
|
||||
|
||||
tlp = {
|
||||
# 2024-08-14: tlp seems way better in my experience
|
||||
# energy-saving daemon, similar to powertop --autotune, but adaptive to BAT / AC
|
||||
enable = true;
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
x-banananetwork = {
|
||||
# add docs & tools for emergencies
|
||||
useable.enable = lib.mkDefault true;
|
||||
};
|
||||
|
||||
};
|
||||
}
|
@ -1,12 +0,0 @@
|
||||
{ lib, ... }@flakeArg:
|
||||
let
|
||||
importProfile = path: import path;
|
||||
importProfileMod = lib.importFlakeMod;
|
||||
in
|
||||
{
|
||||
blade = importProfile ./blade.nix;
|
||||
common = importProfile ./common.nix;
|
||||
installer = importProfileMod ./installer.nix;
|
||||
pveGuest = importProfile ./pveGuest.nix;
|
||||
pveGuestHwSupport.nix = importProfile ./pveGuestHwSupport.nix;
|
||||
}
|
@ -1,29 +0,0 @@
|
||||
# applies to self-built installers, esp. auto installers
|
||||
|
||||
{ inputs, ... }@flakeArg:
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
modulesPath,
|
||||
...
|
||||
}:
|
||||
{
|
||||
imports = [
|
||||
# from nixpkgs
|
||||
"${modulesPath}/installer/cd-dvd/installation-cd-minimal.nix" # includes allHardware configs
|
||||
# from flake inputs
|
||||
inputs.unattended-installer.nixosModules.default
|
||||
# from here
|
||||
./common.nix
|
||||
./pveGuestHwSupport.nix
|
||||
];
|
||||
config = {
|
||||
isoImage = {
|
||||
isoBaseName = "nixos-${config.isoImage.edition}";
|
||||
squashfsCompression = "zstd"; # more efficient
|
||||
};
|
||||
networking.domain = lib.mkDefault "temp.6nw.de"; # acceptable here because temporary
|
||||
system.stateVersion = lib.versions.majorMinor config.system.nixos.version;
|
||||
# installer does not necessarily need working SSH access & an extra user for that
|
||||
};
|
||||
}
|
@ -1,27 +0,0 @@
|
||||
# makes for nice-behaving pve-guests
|
||||
# extends pveGuestSupport by adding:
|
||||
# - EFI booting
|
||||
# ONLY for installed systems
|
||||
{ lib, ... }:
|
||||
{
|
||||
|
||||
imports = [
|
||||
# from here
|
||||
./common.nix
|
||||
./pveGuestHwSupport.nix
|
||||
];
|
||||
|
||||
config = {
|
||||
|
||||
# configure for EFI only
|
||||
boot.loader = {
|
||||
efi.canTouchEfiVariables = true;
|
||||
grub.enable = lib.mkDefault false;
|
||||
grub.efiSupport = true; # in case grub is preferred for some reason
|
||||
systemd-boot.enable = lib.mkDefault true;
|
||||
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
}
|
@ -1,71 +0,0 @@
|
||||
# makes for nice-behaving pve-guests:
|
||||
# - qemu-guest-agent & drivers
|
||||
# - support for serial output (but graphic output should still work the same)
|
||||
# works for installers as well (does NOT include common.nix)
|
||||
{
|
||||
lib,
|
||||
modulesPath,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
let
|
||||
# Based on https://unix.stackexchange.com/questions/16578/resizable-serial-console-window
|
||||
resize = pkgs.writeShellScriptBin "resize" ''
|
||||
export PATH="${lib.getBin pkgs.coreutils}/bin"
|
||||
if [ ! -t 0 ]; then
|
||||
# not a interactive...
|
||||
exit 0
|
||||
fi
|
||||
TTY="$(tty)"
|
||||
if [[ "$TTY" != /dev/ttyS* ]] && [[ "$TTY" != /dev/ttyAMA* ]] && [[ "$TTY" != /dev/ttySIF* ]]; then
|
||||
# probably not a known serial console, we could make this check more
|
||||
# precise by using `setserial` but this would require some additional
|
||||
# dependency
|
||||
exit 0
|
||||
fi
|
||||
old=$(stty -g)
|
||||
stty raw -echo min 0 time 5
|
||||
|
||||
printf '\0337\033[r\033[999;999H\033[6n\0338' > /dev/tty
|
||||
IFS='[;R' read -r _ rows cols _ < /dev/tty
|
||||
|
||||
stty "$old"
|
||||
stty cols "$cols" rows "$rows"
|
||||
'';
|
||||
in
|
||||
{
|
||||
|
||||
imports = [
|
||||
# from nixpkgs
|
||||
"${modulesPath}/profiles/qemu-guest.nix"
|
||||
];
|
||||
|
||||
config = {
|
||||
|
||||
boot = {
|
||||
|
||||
# TODO probably until https://github.com/NixOS/nixpkgs/issues/340086
|
||||
initrd.availableKernelModules = lib.singleton "virtio_iommu";
|
||||
|
||||
kernelParams = [
|
||||
# show kernel log on serial
|
||||
"console=ttyS0,115200"
|
||||
# but use virtual tty as /dev/console (last entry)
|
||||
"console=tty0"
|
||||
];
|
||||
|
||||
};
|
||||
|
||||
environment.systemPackages = [ resize ];
|
||||
|
||||
services = {
|
||||
qemuGuest.enable = true;
|
||||
};
|
||||
|
||||
systemd.services."serial-getty@".environment.TERM = "xterm-256color";
|
||||
|
||||
time.hardwareClockInLocalTime = false; # just to make sure
|
||||
|
||||
};
|
||||
|
||||
}
|
@ -1,20 +0,0 @@
|
||||
{ lib, ... }@flakeArg:
|
||||
{ pkgs_unstable, ... }@systemArg:
|
||||
final: prev:
|
||||
let
|
||||
list = [
|
||||
# TODO until 24.11
|
||||
"nixfmt-rfc-style"
|
||||
"wcurl"
|
||||
];
|
||||
backport =
|
||||
pkgAttrName:
|
||||
let
|
||||
alreadyStable = builtins.hasAttr pkgAttrName prev;
|
||||
stableSource = lib.warn "consider removing ${pkgAttrName} from backports list as it is now available on stable" prev;
|
||||
source = if alreadyStable then stableSource else pkgs_unstable;
|
||||
pkg = builtins.getAttr pkgAttrName source;
|
||||
in
|
||||
pkg;
|
||||
in
|
||||
lib.genAttrs list backport
|
@ -1,18 +0,0 @@
|
||||
{ lib, ... }@flakeArg:
|
||||
let
|
||||
inherit (lib) systemSpecificVars;
|
||||
rawImport = path: import path flakeArg;
|
||||
wrapOverlay =
|
||||
overlay: final: prev:
|
||||
overlay (systemSpecificVars prev.system) final prev;
|
||||
importOverlay = path: wrapOverlay (rawImport path);
|
||||
in
|
||||
{
|
||||
|
||||
backports = importOverlay ./backports.nix;
|
||||
|
||||
fromFlake = importOverlay ./fromFlake.nix;
|
||||
|
||||
systemd-radv-fadeout = importOverlay ./systemd-radv-fadeout;
|
||||
|
||||
}
|
@ -1,9 +0,0 @@
|
||||
{ outputs, ... }@flakeArg:
|
||||
{ ... }@systemArg:
|
||||
final: prev: {
|
||||
inherit (outputs.packages.${prev.system})
|
||||
# list all universally compatible packages from ./../packages
|
||||
librespot-auth
|
||||
nft-update-addresses
|
||||
;
|
||||
}
|
@ -1,8 +0,0 @@
|
||||
# TODO until https://github.com/systemd/systemd/issues/29651 is fixed
|
||||
{ outputs, ... }@flakeArg:
|
||||
{ ... }@systemArg:
|
||||
final: prev: {
|
||||
systemd = prev.systemd.overrideAttrs (old: {
|
||||
patches = old.patches ++ [ ./patch.patch ];
|
||||
});
|
||||
}
|
@ -1,55 +0,0 @@
|
||||
commit b09851c2be354e592802fe9209b4cd6150bd818d
|
||||
Author: Felix Stupp <felix.stupp@banananet.work>
|
||||
Date: Tue Sep 3 20:33:03 2024 +0000
|
||||
|
||||
network/radv: fade out announced prefixes rather than just deleting them
|
||||
|
||||
Fixes https://github.com/systemd/systemd/issues/29651
|
||||
|
||||
diff --git a/src/libsystemd-network/sd-radv.c b/src/libsystemd-network/sd-radv.c
|
||||
index c384d4e627..537203c13b 100644
|
||||
--- a/src/libsystemd-network/sd-radv.c
|
||||
+++ b/src/libsystemd-network/sd-radv.c
|
||||
@@ -679,6 +679,8 @@ void sd_radv_remove_prefix(
|
||||
if (!prefix)
|
||||
return;
|
||||
|
||||
+ const char *addr_p = IN6_ADDR_PREFIX_TO_STRING(prefix, prefixlen);
|
||||
+
|
||||
LIST_FOREACH(prefix, cur, ra->prefixes) {
|
||||
if (prefixlen != cur->opt.prefixlen)
|
||||
continue;
|
||||
@@ -686,9 +688,32 @@ void sd_radv_remove_prefix(
|
||||
if (!in6_addr_equal(prefix, &cur->opt.in6_addr))
|
||||
continue;
|
||||
|
||||
+ /* "Fade out" IPv6 prefix, i.e. informing clients about its sudden invalidity.
|
||||
+ * Realized by announcing the prefix with preferred=0 & valid=2h.
|
||||
+ * This makes clients rotating to a newer prefix if some is already defined.
|
||||
+ */
|
||||
+
|
||||
+ // TODO is copying required here? (& does it do anything at all?)
|
||||
+ sd_radv_prefix *p = cur;
|
||||
+
|
||||
+ // TODO replace hacky way to get current time
|
||||
+ uint64_t current_time = cur->valid_until - cur->lifetime_valid_usec;
|
||||
+ // TODO replace constant with setting (valid lifetime) ?
|
||||
+ uint64_t two_hours_usec = (uint64_t) 2 * 60 * 60 * 1000000;
|
||||
+ sd_radv_prefix_set_preferred_lifetime(p, 0, current_time);
|
||||
+ sd_radv_prefix_set_valid_lifetime(p, two_hours_usec, current_time + two_hours_usec);
|
||||
+
|
||||
+ // TODO is full replacement procedure required or can we just edit the stored prefix without this?
|
||||
+ // procedure cloned from sd_radv_add_prefix, if found. I do not call this here because of the duplicated search in the list & because of the different logging message
|
||||
+ sd_radv_prefix_ref(p);
|
||||
LIST_REMOVE(prefix, ra->prefixes, cur);
|
||||
- ra->n_prefixes--;
|
||||
sd_radv_prefix_unref(cur);
|
||||
+ LIST_APPEND(prefix, ra->prefixes, p);
|
||||
+
|
||||
+ log_radv(ra, "Fade out IPv6 prefix %s (preferred: %s, valid: %s)",
|
||||
+ addr_p,
|
||||
+ FORMAT_TIMESPAN(p->lifetime_preferred_usec, USEC_PER_SEC),
|
||||
+ FORMAT_TIMESPAN(p->lifetime_valid_usec, USEC_PER_SEC));
|
||||
return;
|
||||
}
|
||||
}
|
@ -1,65 +0,0 @@
|
||||
{
|
||||
inputs,
|
||||
lib,
|
||||
outputs,
|
||||
...
|
||||
}@flakeArg:
|
||||
{ pkgs, system, ... }@sysArg:
|
||||
let
|
||||
craneLib = inputs.crane.mkLib pkgs;
|
||||
in
|
||||
{
|
||||
|
||||
librespot-auth = pkgs.callPackage ./librespot-auth { inherit craneLib; };
|
||||
|
||||
nft-update-addresses = pkgs.callPackage ./nft-update-addresses { };
|
||||
|
||||
secrix-wrapper =
|
||||
let
|
||||
secrixExe = outputs.apps.${system}.secrix.program;
|
||||
in
|
||||
pkgs.writeShellApplication {
|
||||
name = "secr";
|
||||
text = ''
|
||||
secrix() {
|
||||
set -x
|
||||
exec ${secrixExe} "$@"
|
||||
}
|
||||
|
||||
help() {
|
||||
echo "Usages:"
|
||||
echo " $0 [create|rekey|edit|encrypt] <system> [<args> …] <file>"
|
||||
echo " $0 decrypt [<args> …] <file>"
|
||||
}
|
||||
|
||||
main() {
|
||||
if [[ $# -lt 1 ]]; then
|
||||
help
|
||||
exit 0
|
||||
fi
|
||||
cmd="$1"
|
||||
shift 1
|
||||
case "$cmd" in
|
||||
help|-h|--help)
|
||||
help
|
||||
;;
|
||||
create)
|
||||
secrix "$cmd" --all-users --system "$@"
|
||||
;;
|
||||
rekey|edit)
|
||||
secrix "$cmd" --identity "$SECRIX_ID" --all-users --system "$@"
|
||||
;;
|
||||
encrypt)
|
||||
secrix "$cmd" --all-users --system "$@"
|
||||
;;
|
||||
decrypt)
|
||||
secrix "$cmd" --identity "$SECRIX_ID" "$@"
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
main "$@"
|
||||
'';
|
||||
};
|
||||
|
||||
}
|
@ -1,35 +0,0 @@
|
||||
{
|
||||
stdenv,
|
||||
lib,
|
||||
fetchFromGitHub,
|
||||
# from flake
|
||||
craneLib,
|
||||
}:
|
||||
|
||||
# TODO temporary useful due to https://github.com/hrkfdn/ncspot/issues/1500
|
||||
craneLib.buildPackage rec {
|
||||
pname = "librespot-auth";
|
||||
version = "0.1.1";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "dspearson";
|
||||
repo = "librespot-auth";
|
||||
rev = "v${version}";
|
||||
hash = "sha256-IbbArRSKpnljhZSgL0b3EjVzKWN7bk6t0Bv7TkYr8FI=";
|
||||
};
|
||||
|
||||
buildInputs = [ ];
|
||||
|
||||
enableParallelBuilding = true;
|
||||
|
||||
# Ensure Rust compiles for the right target
|
||||
env.CARGO_BUILD_TARGET = stdenv.hostPlatform.rust.rustcTarget;
|
||||
|
||||
meta = with lib; {
|
||||
description = "A simple program for populating a `credentials.json` for librespot via Spotify's zeroconf authentication.";
|
||||
homepage = "https://github.com/dspearson/librespot-auth";
|
||||
changelog = "https://github.com/dspearson/librespot-auth/releases/tag/v${version}";
|
||||
license = licenses.isc;
|
||||
mainProgram = "librespot-auth";
|
||||
};
|
||||
}
|
@ -1,933 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from abc import (
|
||||
ABC,
|
||||
abstractmethod,
|
||||
)
|
||||
import argparse
|
||||
from collections import defaultdict
|
||||
from collections.abc import (
|
||||
Mapping,
|
||||
Sequence,
|
||||
)
|
||||
from datetime import (
|
||||
datetime,
|
||||
timedelta,
|
||||
)
|
||||
from enum import (
|
||||
Enum,
|
||||
Flag,
|
||||
auto,
|
||||
)
|
||||
from functools import cached_property
|
||||
import io
|
||||
from ipaddress import (
|
||||
IPv4Interface,
|
||||
IPv6Interface,
|
||||
IPv6Network,
|
||||
)
|
||||
from itertools import chain
|
||||
import json
|
||||
import logging
|
||||
from logging.handlers import SysLogHandler
|
||||
import os
|
||||
from pathlib import Path
|
||||
import re
|
||||
import shlex
|
||||
from signal import SIGHUP, signal
|
||||
from string import Template
|
||||
import subprocess
|
||||
import threading
|
||||
from threading import (
|
||||
RLock,
|
||||
Timer,
|
||||
)
|
||||
import traceback
|
||||
from typing import (
|
||||
Any,
|
||||
Iterable,
|
||||
Literal,
|
||||
NewType,
|
||||
NoReturn,
|
||||
Protocol,
|
||||
TypeAlias,
|
||||
TypeGuard,
|
||||
TypeVar,
|
||||
Union,
|
||||
cast,
|
||||
)
|
||||
|
||||
from attrs import (
|
||||
define,
|
||||
field,
|
||||
)
|
||||
from systemd import daemon # type: ignore[import-untyped]
|
||||
from systemd.journal import JournalHandler # type: ignore[import-untyped]
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def raise_and_exit(args: Any) -> None:
|
||||
Timer(0.01, os._exit, args=(1,)).start()
|
||||
logger.error(repr(args.exc_value))
|
||||
logger.error(
|
||||
"\n".join(traceback.format_tb(args.exc_traceback))
|
||||
if args.exc_traceback != None
|
||||
else "traceback from thread got lost!"
|
||||
)
|
||||
raise args.exc_value or Exception(f"{args.exc_type} (exception details got lost)")
|
||||
|
||||
|
||||
# ensure exceptions in any thread brings the program down
|
||||
# important for proper error detection via tests & in random cases in real world
|
||||
|
||||
threading.excepthook = raise_and_exit
|
||||
|
||||
|
||||
JsonVal: TypeAlias = Union["JsonObj", "JsonList", str, int, bool]
|
||||
JsonList: TypeAlias = Sequence[JsonVal]
|
||||
JsonObj: TypeAlias = Mapping[str, JsonVal]
|
||||
|
||||
T = TypeVar("T", contravariant=True)
|
||||
|
||||
MACAddress = NewType("MACAddress", str)
|
||||
"""format: aabbccddeeff (lower-case, without separators)"""
|
||||
IPInterface: TypeAlias = IPv4Interface | IPv6Interface
|
||||
NftProtocol = NewType("NftProtocol", str) # e.g. tcp, udp, …
|
||||
Port = NewType("Port", int)
|
||||
IfName = NewType("IfName", str)
|
||||
NftTable = NewType("NftTable", str)
|
||||
|
||||
|
||||
def to_mac(mac_str: str) -> MACAddress:
|
||||
eui48 = re.sub(r"[.:_-]", "", mac_str.lower())
|
||||
if not is_mac(eui48):
|
||||
raise ValueError(f"invalid MAC address / EUI48: {mac_str}")
|
||||
return MACAddress(eui48)
|
||||
|
||||
|
||||
def is_mac(mac_str: str) -> TypeGuard[MACAddress]:
|
||||
return re.match(r"^[0-9a-f]{12}$", mac_str) != None
|
||||
|
||||
|
||||
def to_port(port_str: str | int) -> Port:
|
||||
try:
|
||||
port = int(port_str)
|
||||
except ValueError as exc:
|
||||
raise ValueError(f"invalid port number: {port_str}") from exc
|
||||
if not is_port(port):
|
||||
raise ValueError(f"invalid port number: {port_str}")
|
||||
return Port(port)
|
||||
|
||||
|
||||
def is_port(port: int) -> TypeGuard[Port]:
|
||||
return 0 < port < 65536
|
||||
|
||||
|
||||
def slaac_eui48(prefix: IPv6Network, eui48: MACAddress) -> IPv6Interface:
|
||||
if prefix.prefixlen > 64:
|
||||
raise ValueError(
|
||||
f"a SLAAC IPv6 address requires a prefix with CIDR of at least /64, got {prefix}"
|
||||
)
|
||||
eui64 = eui48[0:6] + "fffe" + eui48[6:]
|
||||
modified = hex(int(eui64[0:2], 16) ^ 2)[2:].zfill(2) + eui64[2:]
|
||||
euil = int(modified, 16)
|
||||
return IPv6Interface(f"{prefix[euil].compressed}/{prefix.prefixlen}")
|
||||
|
||||
|
||||
class UpdateHandler(Protocol[T]):
|
||||
def update(self, data: T) -> None:
|
||||
...
|
||||
|
||||
def update_stack(self, data: Sequence[T]) -> None:
|
||||
...
|
||||
|
||||
|
||||
class UpdateStackHandler(UpdateHandler[T], ABC):
|
||||
def update(self, data: T) -> None:
|
||||
return self._update_stack((data,))
|
||||
|
||||
def update_stack(self, data: Sequence[T]) -> None:
|
||||
if len(data) <= 0:
|
||||
logger.warning(
|
||||
f"[bug, please report upstream] received empty data in update_stack. Traceback:\n{''.join(traceback.format_stack())}"
|
||||
)
|
||||
return
|
||||
return self._update_stack(data)
|
||||
|
||||
@abstractmethod
|
||||
def _update_stack(self, data: Sequence[T]) -> None:
|
||||
...
|
||||
|
||||
|
||||
class IgnoreHandler(UpdateStackHandler[object]):
|
||||
def _update_stack(self, data: Sequence[object]) -> None:
|
||||
return
|
||||
|
||||
|
||||
@define(
|
||||
kw_only=True,
|
||||
slots=False,
|
||||
)
|
||||
class UpdateBurstHandler(UpdateStackHandler[T]):
|
||||
burst_interval: float
|
||||
handler: Sequence[UpdateHandler[T]]
|
||||
__lock: RLock = field(factory=RLock)
|
||||
__updates: list[T] = field(factory=list)
|
||||
__timer: Timer | None = None
|
||||
|
||||
def _update_stack(self, data: Sequence[T]) -> None:
|
||||
with self.__lock:
|
||||
self.__updates.extend(data)
|
||||
self.__refresh_timer()
|
||||
|
||||
def __refresh_timer(self) -> None:
|
||||
with self.__lock:
|
||||
if self.__timer is not None:
|
||||
# try to cancel
|
||||
# not a problem if timer already elapsed but before processing really started
|
||||
# because due to using locks when accessing updates
|
||||
self.__timer.cancel()
|
||||
self.__timer = Timer(
|
||||
interval=self.burst_interval,
|
||||
function=self.__process_updates,
|
||||
)
|
||||
self.__timer.start()
|
||||
|
||||
def __process_updates(self) -> None:
|
||||
with self.__lock:
|
||||
self.__timer = None
|
||||
if not self.__updates:
|
||||
return
|
||||
updates = self.__updates
|
||||
self.__updates = []
|
||||
for handler in self.handler:
|
||||
handler.update_stack(updates)
|
||||
|
||||
|
||||
class IpFlag(Flag):
|
||||
dynamic = auto()
|
||||
mngtmpaddr = auto()
|
||||
noprefixroute = auto()
|
||||
temporary = auto()
|
||||
tentiative = auto()
|
||||
|
||||
@staticmethod
|
||||
def parse_str(flags_str: Sequence[str], ignore_unknown: bool = True) -> IpFlag:
|
||||
flags = IpFlag(0)
|
||||
for flag in flags_str:
|
||||
flag = flag.lower()
|
||||
member = IpFlag.__members__.get(flag)
|
||||
if member is not None:
|
||||
flags |= member
|
||||
elif not ignore_unknown:
|
||||
raise Exception(f"Unrecognized IpFlag: {flag}")
|
||||
return flags
|
||||
|
||||
|
||||
IPv6_ULA_NET = IPv6Network("fc00::/7") # because ip.is_private is wrong
|
||||
|
||||
# parses output of "ip -o address" / "ip -o monitor address"
|
||||
IP_MON_PATTERN = re.compile(
|
||||
r"""(?x)^
|
||||
(?P<deleted>[Dd]eleted\s+)?
|
||||
(?P<ifindex>\d+):\s+
|
||||
(?P<ifname>\S+)\s+
|
||||
(?P<type>inet6?)\s+
|
||||
(?P<ip>\S+)\s+
|
||||
#(?:metric\s+\S+\s+)? # sometimes possible
|
||||
#(?:brd\s+\S+\s+)? # broadcast IP on inet
|
||||
(?:\S+\s+\S+\s+)* # abstracted irrelevant attributes
|
||||
(?:scope\s+(?P<scope>\S+)\s+)
|
||||
(?P<flags>(?:(\S+)\s)*) # (single spaces required for parser below to work correctly)
|
||||
(?:\S+)? # random interface name repetition on inet
|
||||
[\\]\s+
|
||||
valid_lft\s+(
|
||||
(?P<valid_lft_sec>\d+)sec
|
||||
|
|
||||
(?P<valid_lft_forever>forever)
|
||||
)
|
||||
\s+
|
||||
preferred_lft\s+(
|
||||
(?P<preferred_lft_sec>\d+)sec
|
||||
|
|
||||
(?P<preferred_lft_forever>forever)
|
||||
)
|
||||
\s*
|
||||
$"""
|
||||
)
|
||||
|
||||
|
||||
class SpecialIpUpdate(Enum):
|
||||
FLUSH_RULES = auto()
|
||||
|
||||
|
||||
@define(
|
||||
frozen=True,
|
||||
kw_only=True,
|
||||
)
|
||||
class IpAddressUpdate:
|
||||
deleted: bool
|
||||
ifindex: int
|
||||
ifname: IfName
|
||||
ip: IPInterface
|
||||
scope: str
|
||||
flags: IpFlag
|
||||
valid_until: datetime
|
||||
preferred_until: datetime
|
||||
|
||||
@classmethod
|
||||
def parse_line(cls, line: str) -> IpAddressUpdate:
|
||||
m = IP_MON_PATTERN.search(line)
|
||||
if not m:
|
||||
raise Exception(f"Could not parse ip monitor output: {line!r}")
|
||||
grp = m.groupdict()
|
||||
ip_type: type[IPInterface] = (
|
||||
IPv6Interface if grp["type"] == "inet6" else IPv4Interface
|
||||
)
|
||||
try:
|
||||
ip = ip_type(grp["ip"])
|
||||
except ValueError as e:
|
||||
raise Exception(
|
||||
f"Could not parse ip monitor output, invalid IP: {grp['ip']!r}"
|
||||
) from e
|
||||
flags = IpFlag.parse_str(grp["flags"].strip().split(" "))
|
||||
return IpAddressUpdate(
|
||||
deleted=grp["deleted"] != None,
|
||||
ifindex=int(grp["ifindex"]),
|
||||
ifname=IfName(grp["ifname"]),
|
||||
ip=ip,
|
||||
scope=grp["scope"],
|
||||
flags=flags,
|
||||
valid_until=cls.__parse_lifetime(grp, "valid_lft"),
|
||||
preferred_until=cls.__parse_lifetime(grp, "preferred_lft"),
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def __parse_lifetime(grp: Mapping[str, str | None], name: str) -> datetime:
|
||||
if grp[f"{name}_forever"] != None:
|
||||
return datetime.now() + timedelta(days=30)
|
||||
sec = grp[f"{name}_sec"]
|
||||
if sec is None:
|
||||
raise ValueError(
|
||||
"IP address update parse error: expected regex group for seconds != None (bug in code)"
|
||||
)
|
||||
return datetime.now() + timedelta(seconds=int(sec))
|
||||
|
||||
|
||||
def kickoff_ip(
|
||||
ip_cmd: list[str],
|
||||
handler: UpdateHandler[IpAddressUpdate],
|
||||
) -> None:
|
||||
res = subprocess.run(
|
||||
ip_cmd + ["-o", "address", "show"],
|
||||
check=True,
|
||||
stdout=subprocess.PIPE,
|
||||
text=True,
|
||||
)
|
||||
for line in res.stdout.splitlines(keepends=False):
|
||||
line = line.rstrip()
|
||||
if line == "":
|
||||
continue
|
||||
update = IpAddressUpdate.parse_line(line)
|
||||
logger.debug(f"pass IP update: {update!r}")
|
||||
handler.update(update)
|
||||
|
||||
|
||||
def monitor_ip(
|
||||
ip_cmd: list[str],
|
||||
handler: UpdateHandler[IpAddressUpdate],
|
||||
) -> NoReturn:
|
||||
proc = subprocess.Popen(
|
||||
ip_cmd + ["-o", "monitor", "address"],
|
||||
stdout=subprocess.PIPE,
|
||||
text=True,
|
||||
)
|
||||
# initial kickoff (AFTER starting monitoring, to not miss any update)
|
||||
logger.info("kickoff IP monitoring with current data")
|
||||
kickoff_ip(ip_cmd, handler)
|
||||
logger.info("start regular monitoring")
|
||||
while True:
|
||||
rc = proc.poll()
|
||||
if rc != None:
|
||||
# flush stdout for easier debugging
|
||||
logger.error("Last stdout of monitor process:")
|
||||
logger.error(proc.stdout.read()) # type: ignore[union-attr]
|
||||
raise Exception(f"Monitor process crashed with returncode {rc}")
|
||||
line = proc.stdout.readline().rstrip() # type: ignore[union-attr]
|
||||
if not line:
|
||||
continue
|
||||
logger.info("IP change detected")
|
||||
update = IpAddressUpdate.parse_line(line)
|
||||
logger.debug(f"pass IP update: {update!r}")
|
||||
handler.update(update)
|
||||
|
||||
|
||||
class InterfaceUpdateHandler(UpdateStackHandler[IpAddressUpdate | SpecialIpUpdate]):
|
||||
# TODO regularly check (i.e. 1 hour) if stored lists are still correct
|
||||
slaac_prefix: IPv6Interface | None
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
config: InterfaceConfig,
|
||||
nft_handler: UpdateHandler[NftUpdate],
|
||||
) -> None:
|
||||
self.nft_handler = nft_handler
|
||||
self.lock = RLock()
|
||||
self.config = config
|
||||
self.addrs = dict[IPInterface, IpAddressUpdate]()
|
||||
self.slaac_prefix = None
|
||||
|
||||
def _update_stack(self, data: Sequence[IpAddressUpdate | SpecialIpUpdate]) -> None:
|
||||
nft_updates = tuple(
|
||||
chain.from_iterable(self.__parse_update(single) for single in data)
|
||||
)
|
||||
if len(nft_updates) <= 0:
|
||||
return
|
||||
self.nft_handler.update_stack(nft_updates)
|
||||
|
||||
def __parse_update(
|
||||
self, data: IpAddressUpdate | SpecialIpUpdate
|
||||
) -> Iterable[NftUpdate]:
|
||||
if isinstance(data, SpecialIpUpdate):
|
||||
if data is not SpecialIpUpdate.FLUSH_RULES:
|
||||
raise ValueError(f"unknown special update {data!r}")
|
||||
# TODO maybe flush all sets completely, for good measure
|
||||
for addr in self.addrs.keys():
|
||||
yield from self.__update_network_sets(addr, deleted=True)
|
||||
self.addrs = dict()
|
||||
yield from self.__empty_slaac_sets()
|
||||
self.slaac_prefix = None
|
||||
return
|
||||
if data.ifname != self.config.ifname:
|
||||
return
|
||||
if data.ip.is_link_local:
|
||||
logger.debug(
|
||||
f"{self.config.ifname}: ignore change for IP {data.ip} because link-local"
|
||||
)
|
||||
return
|
||||
if IpFlag.temporary in data.flags:
|
||||
logger.debug(
|
||||
f"{self.config.ifname}: ignore change for IP {data.ip} because temporary"
|
||||
)
|
||||
return # ignore IPv6 privacy extension addresses
|
||||
if IpFlag.tentiative in data.flags:
|
||||
logger.debug(
|
||||
f"{self.config.ifname}: ignore change for IP {data.ip} because tentiative"
|
||||
)
|
||||
return # ignore (yet) tentiative addresses
|
||||
logger.debug(f"{self.config.ifname}: process change of IP {data.ip}")
|
||||
with self.lock:
|
||||
stored = data.ip in self.addrs
|
||||
changed = stored != (not data.deleted)
|
||||
if data.deleted:
|
||||
if not changed:
|
||||
return # no updates required
|
||||
logger.info(f"{self.config.ifname}: deleted IP {data.ip}")
|
||||
del self.addrs[data.ip]
|
||||
else:
|
||||
if not stored:
|
||||
logger.info(f"{self.config.ifname}: discovered IP {data.ip}")
|
||||
self.addrs[data.ip] = data # keep entry up to date
|
||||
if changed:
|
||||
yield from self.__update_network_sets(data.ip, data.deleted)
|
||||
# even if "not changed", still check SLAAC rules because of lifetimes
|
||||
slaac_prefix = self.__select_slaac_prefix()
|
||||
if self.slaac_prefix == slaac_prefix:
|
||||
return # no SLAAC updates required
|
||||
self.slaac_prefix = slaac_prefix
|
||||
logger.info(f"{self.config.ifname}: change main SLAAC prefix to {slaac_prefix}")
|
||||
yield from (
|
||||
self.__empty_slaac_sets()
|
||||
if slaac_prefix is None
|
||||
else self.__update_slaac_sets(slaac_prefix)
|
||||
)
|
||||
|
||||
def __update_network_sets(
|
||||
self,
|
||||
ip: IPInterface,
|
||||
deleted: bool = False,
|
||||
) -> Iterable[NftUpdate]:
|
||||
set_prefix = f"{self.config.ifname}v{ip.version}"
|
||||
op = NftValueOperation.if_deleted(deleted)
|
||||
yield NftUpdate(
|
||||
obj_type="set",
|
||||
obj_name=f"all_ipv{ip.version}net",
|
||||
operation=op,
|
||||
values=(f"{self.config.ifname} . {ip.network.compressed}",),
|
||||
)
|
||||
yield NftUpdate(
|
||||
obj_type="set",
|
||||
obj_name=f"{set_prefix}net",
|
||||
operation=op,
|
||||
values=(ip.network.compressed,),
|
||||
)
|
||||
yield NftUpdate(
|
||||
obj_type="set",
|
||||
obj_name=f"all_ipv{ip.version}addr",
|
||||
operation=op,
|
||||
values=(f"{self.config.ifname} . {ip.ip.compressed}",),
|
||||
)
|
||||
yield NftUpdate(
|
||||
obj_type="set",
|
||||
obj_name=f"{set_prefix}addr",
|
||||
operation=op,
|
||||
values=(ip.ip.compressed,),
|
||||
)
|
||||
|
||||
def __update_slaac_sets(self, ip: IPv6Interface) -> Iterable[NftUpdate]:
|
||||
set_prefix = f"{self.config.ifname}v6"
|
||||
op = NftValueOperation.REPLACE
|
||||
slaacs = {mac: slaac_eui48(ip.network, mac) for mac in self.config.macs}
|
||||
for mac in self.config.macs:
|
||||
yield NftUpdate(
|
||||
obj_type="set",
|
||||
obj_name=f"{set_prefix}_{mac}",
|
||||
operation=op,
|
||||
values=(slaacs[mac].ip.compressed,),
|
||||
)
|
||||
slaacs_sub = {
|
||||
f"ipv6_{self.config.ifname}_{mac}": addr.ip.compressed
|
||||
for mac, addr in slaacs.items()
|
||||
}
|
||||
for one_set in self.config.sets:
|
||||
yield NftUpdate(
|
||||
obj_type=one_set.set_type,
|
||||
obj_name=one_set.name,
|
||||
operation=op,
|
||||
values=tuple(one_set.sub_elements(slaacs_sub)),
|
||||
)
|
||||
|
||||
def __empty_slaac_sets(self) -> Iterable[NftUpdate]:
|
||||
set_prefix = f"{self.config.ifname}v6"
|
||||
op = NftValueOperation.EMPTY
|
||||
for mac in self.config.macs:
|
||||
yield NftUpdate(
|
||||
obj_type="set",
|
||||
obj_name=f"{set_prefix}_{mac}",
|
||||
operation=op,
|
||||
values=tuple(),
|
||||
)
|
||||
for one_set in self.config.sets:
|
||||
yield NftUpdate(
|
||||
obj_type=one_set.set_type,
|
||||
obj_name=one_set.name,
|
||||
operation=op,
|
||||
values=tuple(),
|
||||
)
|
||||
|
||||
def __select_slaac_prefix(self) -> IPv6Interface | None:
|
||||
now = datetime.now()
|
||||
valid = tuple(data for data in self.addrs.values() if data.ip.version == 6)
|
||||
if len(valid) <= 0:
|
||||
return None
|
||||
selected = max(
|
||||
valid,
|
||||
key=lambda data: (
|
||||
# prefer valid
|
||||
1 if now < data.valid_until else 0,
|
||||
# prefer global unicast addresses
|
||||
1 if data.ip not in IPv6_ULA_NET else 0,
|
||||
# if preferred, take longest preferred
|
||||
max(now, data.preferred_until),
|
||||
# otherwise longest valid
|
||||
data.valid_until,
|
||||
),
|
||||
)
|
||||
return cast(IPv6Interface, selected.ip)
|
||||
|
||||
def gen_set_definitions(self) -> str:
|
||||
output = []
|
||||
for ip_v in [4, 6]:
|
||||
addr_type = f"ipv{ip_v}_addr"
|
||||
set_prefix = f"{self.config.ifname}v{ip_v}"
|
||||
output.append(gen_set_def("set", f"{set_prefix}addr", addr_type))
|
||||
output.append(gen_set_def("set", f"{set_prefix}net", addr_type, "interval"))
|
||||
if ip_v != 6:
|
||||
continue
|
||||
for mac in self.config.macs:
|
||||
output.append(gen_set_def("set", f"{set_prefix}_{mac}", addr_type))
|
||||
output.extend(s.definition for s in self.config.sets)
|
||||
return "\n".join(output)
|
||||
|
||||
|
||||
def gen_set_def(
|
||||
set_type: str,
|
||||
name: str,
|
||||
data_type: str,
|
||||
flags: str | None = None,
|
||||
elements: Sequence[str] = tuple(),
|
||||
) -> str:
|
||||
return "\n".join(
|
||||
line
|
||||
for line in (
|
||||
f"{set_type} {name} " + "{",
|
||||
f" type {data_type}",
|
||||
f" flags {flags}" if flags is not None else None,
|
||||
" elements = { " + ", ".join(elements) + " }"
|
||||
if len(elements) > 0
|
||||
else None,
|
||||
"}",
|
||||
)
|
||||
if line is not None
|
||||
)
|
||||
pass
|
||||
|
||||
|
||||
class NftValueOperation(Enum):
|
||||
ADD = auto()
|
||||
DELETE = auto()
|
||||
REPLACE = auto()
|
||||
EMPTY = auto()
|
||||
|
||||
@staticmethod
|
||||
def if_deleted(b: bool) -> NftValueOperation:
|
||||
return NftValueOperation.DELETE if b else NftValueOperation.ADD
|
||||
|
||||
@staticmethod
|
||||
def if_emptied(b: bool) -> NftValueOperation:
|
||||
return NftValueOperation.EMPTY if b else NftValueOperation.REPLACE
|
||||
|
||||
@property
|
||||
def set_operation(self) -> str:
|
||||
assert self.passes_values
|
||||
return "destroy" if self == NftValueOperation.DELETE else "add"
|
||||
|
||||
@property
|
||||
def passes_values(self) -> bool:
|
||||
return self in {
|
||||
NftValueOperation.ADD,
|
||||
NftValueOperation.REPLACE,
|
||||
NftValueOperation.DELETE,
|
||||
}
|
||||
|
||||
@property
|
||||
def flushes_values(self) -> bool:
|
||||
return self in {
|
||||
NftValueOperation.REPLACE,
|
||||
NftValueOperation.EMPTY,
|
||||
}
|
||||
|
||||
|
||||
@define(
|
||||
frozen=True,
|
||||
kw_only=True,
|
||||
)
|
||||
class NftUpdate:
|
||||
obj_type: str
|
||||
obj_name: str
|
||||
operation: NftValueOperation
|
||||
values: Sequence[str]
|
||||
|
||||
def to_script(self, table: NftTable) -> str:
|
||||
lines = []
|
||||
# inet family is the only which supports shared IPv4 & IPv6 entries
|
||||
obj_id = f"inet {table} {self.obj_name}"
|
||||
if self.operation.flushes_values:
|
||||
lines.append(f"flush {self.obj_type} {obj_id}")
|
||||
if self.operation.passes_values and len(self.values) > 0:
|
||||
op_str = self.operation.set_operation
|
||||
values_str = ", ".join(self.values)
|
||||
lines.append(f"{op_str} element {obj_id} {{ {values_str} }}")
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
class NftUpdateHandler(UpdateStackHandler[NftUpdate]):
|
||||
def __init__(
|
||||
self,
|
||||
update_cmd: Sequence[str],
|
||||
table: NftTable,
|
||||
handler: UpdateHandler[None],
|
||||
) -> None:
|
||||
self.update_cmd = update_cmd
|
||||
self.table = table
|
||||
self.handler = handler
|
||||
|
||||
def _update_stack(self, data: Sequence[NftUpdate]) -> None:
|
||||
logger.debug("compile stacked updates for nftables")
|
||||
script = "\n".join(
|
||||
map(
|
||||
lambda u: u.to_script(table=self.table),
|
||||
data,
|
||||
)
|
||||
)
|
||||
logger.debug(f"pass updates to nftables:\n{script}")
|
||||
subprocess.run(
|
||||
list(self.update_cmd) + ["-f", "-"],
|
||||
input=script,
|
||||
check=True,
|
||||
text=True,
|
||||
)
|
||||
self.handler.update(None)
|
||||
|
||||
|
||||
class SystemdHandler(UpdateHandler[object]):
|
||||
def update(self, data: object) -> None:
|
||||
# TODO improve status updates
|
||||
daemon.notify("READY=1\nSTATUS=operating …\n")
|
||||
|
||||
def update_stack(self, data: Sequence[object]) -> None:
|
||||
self.update(None)
|
||||
|
||||
|
||||
@define(
|
||||
frozen=True,
|
||||
kw_only=True,
|
||||
)
|
||||
class SetConfig:
|
||||
ifname: str
|
||||
set_type: str
|
||||
name: str
|
||||
data_type: str
|
||||
flags: str | None
|
||||
elements: Sequence[Template] = field()
|
||||
|
||||
@elements.validator
|
||||
def __elem_validate(self, attribute: str, value: Sequence[Template]) -> None:
|
||||
regex = self.__supported_vars
|
||||
for temp in self.elements:
|
||||
for var in temp.get_identifiers():
|
||||
m = regex.search(var)
|
||||
if m is None:
|
||||
raise ValueError(
|
||||
f"set {self.name!r} for if {self.ifname!r} uses invalid template variable {var!r}"
|
||||
)
|
||||
|
||||
@property
|
||||
def __supported_vars(self) -> re.Pattern[str]:
|
||||
return re.compile(rf"^ipv6_{re.escape(self.ifname)}_(?P<mac>[0-9a-f]{{12}})$")
|
||||
|
||||
@property
|
||||
def embedded_macs(self) -> Iterable[MACAddress]:
|
||||
regex = self.__supported_vars
|
||||
for temp in self.elements:
|
||||
for var in temp.get_identifiers():
|
||||
m = regex.search(var)
|
||||
assert m != None
|
||||
yield to_mac(m.group("mac")) # type: ignore[union-attr]
|
||||
|
||||
@property
|
||||
def definition(self) -> str:
|
||||
return gen_set_def(
|
||||
set_type=self.set_type,
|
||||
name=self.name,
|
||||
data_type=self.data_type,
|
||||
flags=self.flags,
|
||||
# non matching rules at the beginning (in static part)
|
||||
# to verify that all supplied patterns are correct
|
||||
# undefined address should be safest to use here, because:
|
||||
# - as src, it is valid, but if one can spoof this one, it can spoof other addresses (and routers should have simple anti-spoof mechanisms in place)
|
||||
# - as dest, it is invalid
|
||||
# - as NAT target, it is invalid
|
||||
elements=self.sub_elements(defaultdict(lambda: "::")),
|
||||
)
|
||||
|
||||
def sub_elements(self, substitutions: Mapping[str, str]) -> Sequence[str]:
|
||||
return tuple(elem.substitute(substitutions) for elem in self.elements)
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, *, ifname: str, name: str, obj: JsonObj) -> SetConfig:
|
||||
assert set(obj.keys()) <= set(("set_type", "name", "type", "flags", "elements"))
|
||||
set_type = obj["set_type"]
|
||||
assert isinstance(set_type, str)
|
||||
data_type = obj["type"]
|
||||
assert isinstance(data_type, str)
|
||||
flags = obj.get("flags")
|
||||
assert flags is None or isinstance(flags, str)
|
||||
elements = obj["elements"]
|
||||
assert isinstance(elements, Sequence) and all(
|
||||
isinstance(elem, str) for elem in elements
|
||||
)
|
||||
templates = tuple(map(lambda s: Template(cast(str, s)), elements))
|
||||
return SetConfig(
|
||||
set_type=set_type,
|
||||
ifname=ifname,
|
||||
name=name,
|
||||
data_type=data_type,
|
||||
flags=flags,
|
||||
elements=templates,
|
||||
)
|
||||
|
||||
|
||||
@define(
|
||||
frozen=True,
|
||||
kw_only=True,
|
||||
)
|
||||
class InterfaceConfig:
|
||||
ifname: IfName
|
||||
macs_direct: Sequence[MACAddress]
|
||||
sets: Sequence[SetConfig]
|
||||
|
||||
@cached_property
|
||||
def macs(self) -> Sequence[MACAddress]:
|
||||
return tuple(
|
||||
set(
|
||||
chain(
|
||||
self.macs_direct,
|
||||
(mac for one_set in self.sets for mac in one_set.embedded_macs),
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def from_json(ifname: str, obj: JsonObj) -> InterfaceConfig:
|
||||
assert set(obj.keys()) <= set(("macs", "sets"))
|
||||
macs = obj.get("macs")
|
||||
assert macs is None or isinstance(macs, Sequence)
|
||||
sets = obj.get("sets")
|
||||
assert sets is None or isinstance(sets, Mapping)
|
||||
return InterfaceConfig(
|
||||
ifname=IfName(ifname),
|
||||
macs_direct=tuple()
|
||||
if macs is None
|
||||
else tuple(to_mac(cast(str, mac)) for mac in macs),
|
||||
sets=tuple()
|
||||
if sets is None
|
||||
else tuple(
|
||||
SetConfig.from_json(
|
||||
ifname=ifname, name=name, obj=cast(JsonObj, one_set)
|
||||
)
|
||||
for name, one_set in sets.items()
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
@define(
|
||||
frozen=True,
|
||||
kw_only=True,
|
||||
)
|
||||
class AppConfig:
|
||||
nft_table: NftTable
|
||||
interfaces: Sequence[InterfaceConfig]
|
||||
|
||||
@staticmethod
|
||||
def from_json(obj: JsonObj) -> AppConfig:
|
||||
assert set(obj.keys()) <= set(("interfaces", "nftTable"))
|
||||
nft_table = obj["nftTable"]
|
||||
assert isinstance(nft_table, str)
|
||||
interfaces = obj["interfaces"]
|
||||
assert isinstance(interfaces, Mapping)
|
||||
return AppConfig(
|
||||
nft_table=NftTable(nft_table),
|
||||
interfaces=tuple(
|
||||
InterfaceConfig.from_json(ifname, cast(JsonObj, if_cfg))
|
||||
for ifname, if_cfg in interfaces.items()
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def read_config_file(path: Path) -> AppConfig:
|
||||
with path.open("r") as fh:
|
||||
json_data = json.load(fh)
|
||||
logger.debug(repr(json_data))
|
||||
return AppConfig.from_json(json_data)
|
||||
|
||||
|
||||
LOG_LEVEL_MAP = {
|
||||
"critical": logging.CRITICAL,
|
||||
"error": logging.ERROR,
|
||||
"warning": logging.WARNING,
|
||||
"info": logging.INFO,
|
||||
"debug": logging.DEBUG,
|
||||
}
|
||||
|
||||
|
||||
def _gen_if_updater(
|
||||
configs: Sequence[InterfaceConfig], nft_updater: UpdateHandler[NftUpdate]
|
||||
) -> Sequence[InterfaceUpdateHandler]:
|
||||
return tuple(
|
||||
InterfaceUpdateHandler(
|
||||
config=if_cfg,
|
||||
nft_handler=nft_updater,
|
||||
)
|
||||
for if_cfg in configs
|
||||
)
|
||||
|
||||
|
||||
def static_part_generation(config: AppConfig) -> None:
|
||||
for ipV in [4, 6]:
|
||||
print(gen_set_def("set", f"all_ipv{ipV}addr", f"ifname . ipv{ipV}_addr"))
|
||||
print(
|
||||
gen_set_def(
|
||||
"set", f"all_ipv{ipV}net", f"ifname . ipv{ipV}_addr", "interval"
|
||||
)
|
||||
)
|
||||
dummy = IgnoreHandler()
|
||||
if_updater = _gen_if_updater(config.interfaces, dummy)
|
||||
for if_up in if_updater:
|
||||
print(if_up.gen_set_definitions())
|
||||
|
||||
|
||||
def on_service_reload(
|
||||
ip_cmd: list[str], handler: UpdateHandler[IpAddressUpdate | SpecialIpUpdate]
|
||||
) -> None:
|
||||
# for now, reloading is kind of a hack to be able to react to nftables.service reloadings
|
||||
# because then we need to re-apply all of our rules again
|
||||
logger.info(
|
||||
"reload signal received; reapply all rules (config file will not be read on reload)"
|
||||
)
|
||||
daemon.notify("RELOADING=1\nSTATUS=reloading all rules …\n")
|
||||
handler.update(SpecialIpUpdate.FLUSH_RULES)
|
||||
kickoff_ip(ip_cmd, handler)
|
||||
|
||||
|
||||
def service_execution(args: argparse.Namespace, config: AppConfig) -> NoReturn:
|
||||
nft_updater = NftUpdateHandler(
|
||||
table=config.nft_table,
|
||||
update_cmd=shlex.split(args.nft_command),
|
||||
handler=SystemdHandler(),
|
||||
)
|
||||
nft_burst_handler = UpdateBurstHandler[NftUpdate](
|
||||
burst_interval=0.1,
|
||||
handler=(nft_updater,),
|
||||
)
|
||||
if_updater = _gen_if_updater(config.interfaces, nft_burst_handler)
|
||||
burst_handler = UpdateBurstHandler[IpAddressUpdate | SpecialIpUpdate](
|
||||
burst_interval=0.1,
|
||||
handler=if_updater,
|
||||
)
|
||||
ip_cmd = shlex.split(args.ip_command)
|
||||
# in case of systemd service reload
|
||||
signal(SIGHUP, lambda *_a, **_b: on_service_reload(ip_cmd, burst_handler))
|
||||
monitor_ip(ip_cmd, burst_handler)
|
||||
|
||||
|
||||
def setup_logging(args: Any) -> None:
|
||||
systemd_service = os.environ.get("INVOCATION_ID") and Path("/dev/log").exists()
|
||||
if systemd_service:
|
||||
logger.setLevel(logging.DEBUG)
|
||||
logger.addHandler(JournalHandler(SYSLOG_IDENTIFIER="nft-update-addresses"))
|
||||
else:
|
||||
logging.basicConfig() # get output to stdout/stderr
|
||||
logger.setLevel(LOG_LEVEL_MAP[args.log_level])
|
||||
|
||||
|
||||
def main() -> None:
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("-c", "--config-file", required=True)
|
||||
parser.add_argument("--check-config", action="store_true")
|
||||
parser.add_argument("--output-set-definitions", action="store_true")
|
||||
parser.add_argument("--ip-command", default="/usr/bin/env ip")
|
||||
parser.add_argument("--nft-command", default="/usr/bin/env nft")
|
||||
parser.add_argument(
|
||||
"-l",
|
||||
"--log-level",
|
||||
default="error",
|
||||
choices=LOG_LEVEL_MAP.keys(),
|
||||
help="Log level for outputs to stdout/stderr (ignored when launched in a systemd service)",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
setup_logging(args)
|
||||
config = read_config_file(Path(args.config_file))
|
||||
if args.check_config:
|
||||
return
|
||||
if args.output_set_definitions:
|
||||
return static_part_generation(config)
|
||||
service_execution(args, config)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
Loading…
Reference in New Issue