blob: 0aaed8d2032cb4a648f7bc3cc39ccc30e20e8780 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
|
{
pkgs,
options,
lib,
...
}:
let
inherit (lib) getExe getExe' genAttrs;
# we use tailscale as the main network provider
# for multiple essential services.
# it's important that tailscale starts up prior
# to them.
vitalServices = [
"sshd"
"docker"
"nginx"
];
# all metrics exporters are exposed through the tailnet,
# they also need to wait for the tailscale0 interface to be up.
# note that we modify all existing
monitoringServices = map (e: "prometheus-${e}-exporter") (
builtins.attrNames options.services.prometheus.exporters.value
);
servicesNeedingTailscale = vitalServices ++ monitoringServices;
serviceConfigAfterTailscale = {
after = [ "tailscaled.service" ];
};
in
{
# start tailscale :)
services.tailscale = {
enable = true;
useRoutingFeatures = "both";
extraUpFlags = [ "--ssh" ];
};
# tailscale0 is the only interface that is allowed
# to fully bypass the firewall.
networking.firewall.trustedInterfaces = [ "tailscale0" ];
systemd.services = {
# the tailscaled systemd service is
# too eager in marking itself ready,
# so we need to ensure it's actually
# up before proceeding with other units.
# see: https://github.com/tailscale/tailscale/issues/11504
"tailscaled".postStart = with pkgs; ''
echo "Waiting for tailscale0 to come online..."
for try in in {1..30}; do
if ${getExe' iproute2 "ip"} addr show dev tailscale0 | ${getExe gnugrep} -q 'inet '; then
echo "tailscale0 is up!"
break
fi
sleep 1
done
'';
} // genAttrs servicesNeedingTailscale (x: serviceConfigAfterTailscale);
}
|