aboutsummaryrefslogtreecommitdiff
path: root/nixpkgs/nixos
diff options
context:
space:
mode:
authorKatharina Fey <kookie@spacekookie.de>2020-02-03 09:26:35 +0100
committerKatharina Fey <kookie@spacekookie.de>2020-02-03 09:26:35 +0100
commit899a451e08f7d6d2c8214d119c2a0316849a0ed4 (patch)
tree5e72a7288b7d2b33fead36fbfe91a02a48ff7fef /nixpkgs/nixos
parent5962418b6543dfb3ca34965c0fa16dd77543801b (diff)
parenta21c2fa3ea2b88e698db6fc151d9c7259ae14d96 (diff)
Merge commit 'a21c2fa3ea2b88e698db6fc151d9c7259ae14d96'
Diffstat (limited to 'nixpkgs/nixos')
-rw-r--r--nixpkgs/nixos/doc/manual/configuration/declarative-packages.xml6
-rw-r--r--nixpkgs/nixos/doc/manual/configuration/luks-file-systems.xml34
-rw-r--r--nixpkgs/nixos/doc/manual/configuration/x-windows.xml9
-rw-r--r--nixpkgs/nixos/doc/manual/development/option-types.xml36
-rwxr-xr-xnixpkgs/nixos/doc/manual/development/releases.xml6
-rw-r--r--nixpkgs/nixos/doc/manual/man-nixos-install.xml2
-rw-r--r--nixpkgs/nixos/doc/manual/man-pages.xml2
-rw-r--r--nixpkgs/nixos/doc/manual/release-notes/rl-2003.xml156
-rw-r--r--nixpkgs/nixos/lib/test-driver/test-driver.py27
-rw-r--r--nixpkgs/nixos/lib/testing-python.nix2
-rw-r--r--nixpkgs/nixos/lib/testing/jquery-ui.nix4
-rw-r--r--nixpkgs/nixos/modules/hardware/opengl.nix6
-rw-r--r--nixpkgs/nixos/modules/hardware/tuxedo-keyboard.nix35
-rw-r--r--nixpkgs/nixos/modules/hardware/usb-wwan.nix13
-rw-r--r--nixpkgs/nixos/modules/installer/cd-dvd/installation-cd-graphical-plasma5-new-kernel.nix (renamed from nixpkgs/nixos/modules/installer/cd-dvd/installation-cd-graphical-kde-new-kernel.nix)2
-rw-r--r--nixpkgs/nixos/modules/installer/cd-dvd/installation-cd-graphical-plasma5.nix (renamed from nixpkgs/nixos/modules/installer/cd-dvd/installation-cd-graphical-kde.nix)0
-rw-r--r--nixpkgs/nixos/modules/installer/tools/nixos-rebuild.sh16
-rw-r--r--nixpkgs/nixos/modules/installer/tools/tools.nix10
-rw-r--r--nixpkgs/nixos/modules/misc/version.nix16
-rw-r--r--nixpkgs/nixos/modules/module-list.nix12
-rw-r--r--nixpkgs/nixos/modules/programs/geary.nix20
-rw-r--r--nixpkgs/nixos/modules/programs/gnupg.nix2
-rw-r--r--nixpkgs/nixos/modules/programs/liboping.nix22
-rw-r--r--nixpkgs/nixos/modules/programs/sway.nix19
-rw-r--r--nixpkgs/nixos/modules/programs/traceroute.nix26
-rw-r--r--nixpkgs/nixos/modules/programs/way-cooler.nix78
-rw-r--r--nixpkgs/nixos/modules/rename.nix14
-rw-r--r--nixpkgs/nixos/modules/services/amqp/rabbitmq.nix13
-rw-r--r--nixpkgs/nixos/modules/services/cluster/kubernetes/pki.nix10
-rw-r--r--nixpkgs/nixos/modules/services/continuous-integration/buildbot/master.nix2
-rw-r--r--nixpkgs/nixos/modules/services/continuous-integration/buildbot/worker.nix2
-rw-r--r--nixpkgs/nixos/modules/services/continuous-integration/buildkite-agent.nix98
-rw-r--r--nixpkgs/nixos/modules/services/continuous-integration/hydra/default.nix4
-rw-r--r--nixpkgs/nixos/modules/services/continuous-integration/jenkins/slave.nix2
-rw-r--r--nixpkgs/nixos/modules/services/databases/openldap.nix2
-rw-r--r--nixpkgs/nixos/modules/services/databases/victoriametrics.nix70
-rw-r--r--nixpkgs/nixos/modules/services/desktops/gnome3/at-spi2-core.nix3
-rw-r--r--nixpkgs/nixos/modules/services/hardware/actkbd.nix2
-rw-r--r--nixpkgs/nixos/modules/services/hardware/usbmuxd.nix2
-rw-r--r--nixpkgs/nixos/modules/services/mail/mailman.nix277
-rw-r--r--nixpkgs/nixos/modules/services/mail/postfix.nix5
-rw-r--r--nixpkgs/nixos/modules/services/mail/roundcube.nix79
-rw-r--r--nixpkgs/nixos/modules/services/mail/spamassassin.nix27
-rw-r--r--nixpkgs/nixos/modules/services/misc/freeswitch.nix103
-rw-r--r--nixpkgs/nixos/modules/services/misc/gitea.nix2
-rw-r--r--nixpkgs/nixos/modules/services/misc/home-assistant.nix1
-rw-r--r--nixpkgs/nixos/modules/services/misc/paperless.nix14
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/alertmanager.nix21
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/postfix.nix2
-rw-r--r--nixpkgs/nixos/modules/services/network-filesystems/kbfs.nix90
-rw-r--r--nixpkgs/nixos/modules/services/networking/bitlbee.nix3
-rw-r--r--nixpkgs/nixos/modules/services/networking/corerad.nix46
-rw-r--r--nixpkgs/nixos/modules/services/networking/dhcpcd.nix27
-rw-r--r--nixpkgs/nixos/modules/services/networking/keybase.nix11
-rw-r--r--nixpkgs/nixos/modules/services/networking/knot.nix2
-rw-r--r--nixpkgs/nixos/modules/services/networking/kresd.nix34
-rw-r--r--nixpkgs/nixos/modules/services/networking/matterbridge.nix2
-rw-r--r--nixpkgs/nixos/modules/services/networking/nat.nix2
-rw-r--r--nixpkgs/nixos/modules/services/networking/ndppd.nix20
-rw-r--r--nixpkgs/nixos/modules/services/networking/syncthing.nix18
-rw-r--r--nixpkgs/nixos/modules/services/networking/unifi.nix15
-rw-r--r--nixpkgs/nixos/modules/services/networking/wpa_supplicant.nix7
-rw-r--r--nixpkgs/nixos/modules/services/networking/xandikos.nix148
-rw-r--r--nixpkgs/nixos/modules/services/networking/zerotierone.nix10
-rw-r--r--nixpkgs/nixos/modules/services/search/solr.nix12
-rw-r--r--nixpkgs/nixos/modules/services/security/bitwarden_rs/default.nix44
-rw-r--r--nixpkgs/nixos/modules/services/security/certmgr.nix4
-rw-r--r--nixpkgs/nixos/modules/services/security/fail2ban.nix306
-rw-r--r--nixpkgs/nixos/modules/services/security/sshguard.nix13
-rw-r--r--nixpkgs/nixos/modules/services/security/vault.nix1
-rw-r--r--nixpkgs/nixos/modules/services/torrent/transmission.nix16
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/dokuwiki.nix272
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/nextcloud.nix10
-rw-r--r--nixpkgs/nixos/modules/services/web-servers/apache-httpd/default.nix37
-rw-r--r--nixpkgs/nixos/modules/services/web-servers/apache-httpd/location-options.nix54
-rw-r--r--nixpkgs/nixos/modules/services/web-servers/apache-httpd/per-server-options.nix33
-rw-r--r--nixpkgs/nixos/modules/services/web-servers/nginx/gitweb.nix53
-rw-r--r--nixpkgs/nixos/modules/services/web-servers/unit/default.nix8
-rw-r--r--nixpkgs/nixos/modules/services/x11/desktop-managers/default.nix26
-rw-r--r--nixpkgs/nixos/modules/services/x11/desktop-managers/gnome3.nix14
-rw-r--r--nixpkgs/nixos/modules/services/x11/desktop-managers/xfce.nix11
-rw-r--r--nixpkgs/nixos/modules/services/x11/display-managers/gdm.nix4
-rw-r--r--nixpkgs/nixos/modules/services/x11/hardware/multitouch.nix94
-rw-r--r--nixpkgs/nixos/modules/services/x11/unclutter.nix7
-rw-r--r--nixpkgs/nixos/modules/services/x11/xserver.nix3
-rw-r--r--nixpkgs/nixos/modules/system/activation/activation-script.nix33
-rw-r--r--nixpkgs/nixos/modules/system/boot/luksroot.nix79
-rw-r--r--nixpkgs/nixos/modules/system/boot/networkd.nix40
-rw-r--r--nixpkgs/nixos/modules/system/boot/systemd-lib.nix8
-rw-r--r--nixpkgs/nixos/modules/system/boot/systemd.nix4
-rw-r--r--nixpkgs/nixos/modules/tasks/powertop.nix1
-rw-r--r--nixpkgs/nixos/modules/virtualisation/amazon-init.nix11
-rw-r--r--nixpkgs/nixos/modules/virtualisation/docker-containers.nix55
-rw-r--r--nixpkgs/nixos/modules/virtualisation/lxd.nix44
-rw-r--r--nixpkgs/nixos/release-combined.nix2
-rw-r--r--nixpkgs/nixos/release.nix9
-rw-r--r--nixpkgs/nixos/tests/all-tests.nix7
-rw-r--r--nixpkgs/nixos/tests/bittorrent.nix155
-rw-r--r--nixpkgs/nixos/tests/blivet.nix87
-rw-r--r--nixpkgs/nixos/tests/buildkite-agent.nix36
-rw-r--r--nixpkgs/nixos/tests/certmgr.nix12
-rw-r--r--nixpkgs/nixos/tests/chromium.nix2
-rw-r--r--nixpkgs/nixos/tests/common/auto.nix (renamed from nixpkgs/nixos/modules/services/x11/display-managers/auto.nix)4
-rw-r--r--nixpkgs/nixos/tests/common/ec2.nix4
-rw-r--r--nixpkgs/nixos/tests/common/x11.nix9
-rw-r--r--nixpkgs/nixos/tests/corerad.nix70
-rw-r--r--nixpkgs/nixos/tests/docker-containers.nix9
-rw-r--r--nixpkgs/nixos/tests/docker-tools.nix3
-rw-r--r--nixpkgs/nixos/tests/dokuwiki.nix29
-rw-r--r--nixpkgs/nixos/tests/ec2.nix45
-rw-r--r--nixpkgs/nixos/tests/elk.nix111
-rw-r--r--nixpkgs/nixos/tests/freeswitch.nix29
-rw-r--r--nixpkgs/nixos/tests/gnome3.nix64
-rw-r--r--nixpkgs/nixos/tests/graphite.nix31
-rw-r--r--nixpkgs/nixos/tests/i3wm.nix2
-rw-r--r--nixpkgs/nixos/tests/initdb.nix26
-rw-r--r--nixpkgs/nixos/tests/kafka.nix44
-rw-r--r--nixpkgs/nixos/tests/limesurvey.nix29
-rw-r--r--nixpkgs/nixos/tests/networking-proxy.nix108
-rw-r--r--nixpkgs/nixos/tests/openstack-image.nix2
-rw-r--r--nixpkgs/nixos/tests/postgresql.nix45
-rw-r--r--nixpkgs/nixos/tests/proxy.nix143
-rw-r--r--nixpkgs/nixos/tests/riak.nix25
-rw-r--r--nixpkgs/nixos/tests/signal-desktop.nix2
-rw-r--r--nixpkgs/nixos/tests/solr.nix101
-rw-r--r--nixpkgs/nixos/tests/systemd.nix95
-rw-r--r--nixpkgs/nixos/tests/victoriametrics.nix31
-rw-r--r--nixpkgs/nixos/tests/virtualbox.nix2
-rw-r--r--nixpkgs/nixos/tests/xandikos.nix70
-rw-r--r--nixpkgs/nixos/tests/xautolock.nix2
-rw-r--r--nixpkgs/nixos/tests/xfce.nix14
-rw-r--r--nixpkgs/nixos/tests/xmonad.nix2
-rw-r--r--nixpkgs/nixos/tests/xrdp.nix2
-rw-r--r--nixpkgs/nixos/tests/xss-lock.nix4
-rw-r--r--nixpkgs/nixos/tests/yabar.nix2
135 files changed, 3248 insertions, 1269 deletions
diff --git a/nixpkgs/nixos/doc/manual/configuration/declarative-packages.xml b/nixpkgs/nixos/doc/manual/configuration/declarative-packages.xml
index 5fb3bcb9f8f..cd84d1951d2 100644
--- a/nixpkgs/nixos/doc/manual/configuration/declarative-packages.xml
+++ b/nixpkgs/nixos/doc/manual/configuration/declarative-packages.xml
@@ -19,6 +19,12 @@
<command>nixos-rebuild switch</command>.
</para>
+ <note>
+ <para>
+ Some packages require additional global configuration such as D-Bus or systemd service registration so adding them to <xref linkend="opt-environment.systemPackages"/> might not be sufficient. You are advised to check the <link xlink:href="#ch-options">list of options</link> whether a NixOS module for the package does not exist.
+ </para>
+ </note>
+
<para>
You can get a list of the available packages as follows:
<screen>
diff --git a/nixpkgs/nixos/doc/manual/configuration/luks-file-systems.xml b/nixpkgs/nixos/doc/manual/configuration/luks-file-systems.xml
index 8a2b107e0ee..d3007843d68 100644
--- a/nixpkgs/nixos/doc/manual/configuration/luks-file-systems.xml
+++ b/nixpkgs/nixos/doc/manual/configuration/luks-file-systems.xml
@@ -37,4 +37,38 @@ Enter passphrase for /dev/disk/by-uuid/3f6b0024-3a44-4fde-a43a-767b872abe5d: ***
on an encrypted partition, it is necessary to add the following grub option:
<programlisting><xref linkend="opt-boot.loader.grub.enableCryptodisk"/> = true;</programlisting>
</para>
+ <section xml:id="sec-luks-file-systems-fido2">
+ <title>FIDO2</title>
+
+ <para>
+ NixOS also supports unlocking your LUKS-Encrypted file system using a FIDO2 compatible token. In the following example, we will create a new FIDO2 credential
+ and add it as a new key to our existing device <filename>/dev/sda2</filename>:
+
+ <screen>
+# export FIDO2_LABEL="/dev/sda2 @ $HOSTNAME"
+# fido2luks credential "$FIDO2_LABEL"
+f1d00200108b9d6e849a8b388da457688e3dd653b4e53770012d8f28e5d3b269865038c346802f36f3da7278b13ad6a3bb6a1452e24ebeeaa24ba40eef559b1b287d2a2f80b7
+
+# fido2luks -i add-key /dev/sda2 f1d00200108b9d6e849a8b388da457688e3dd653b4e53770012d8f28e5d3b269865038c346802f36f3da7278b13ad6a3bb6a1452e24ebeeaa24ba40eef559b1b287d2a2f80b7
+Password:
+Password (again):
+Old password:
+Old password (again):
+Added to key to device /dev/sda2, slot: 2
+</screen>
+
+ To ensure that this file system is decrypted using the FIDO2 compatible key, add the following to <filename>configuration.nix</filename>:
+<programlisting>
+<link linkend="opt-boot.initrd.luks.fido2Support">boot.initrd.luks.fido2Support</link> = true;
+<link linkend="opt-boot.initrd.luks.devices._name__.fido2.credential">boot.initrd.luks.devices."/dev/sda2".fido2.credential</link> = "f1d00200108b9d6e849a8b388da457688e3dd653b4e53770012d8f28e5d3b269865038c346802f36f3da7278b13ad6a3bb6a1452e24ebeeaa24ba40eef559b1b287d2a2f80b7";
+</programlisting>
+
+ You can also use the FIDO2 passwordless setup, but for security reasons, you might want to enable it only when your device is PIN protected, such as <link xlink:href="https://trezor.io/">Trezor</link>.
+
+<programlisting>
+<link linkend="opt-boot.initrd.luks.devices._name__.fido2.passwordLess">boot.initrd.luks.devices."/dev/sda2".fido2.passwordLess</link> = true;
+</programlisting>
+ </para>
+ </section>
+
</section>
diff --git a/nixpkgs/nixos/doc/manual/configuration/x-windows.xml b/nixpkgs/nixos/doc/manual/configuration/x-windows.xml
index 55ad9fe6e65..06dd7c8bfb9 100644
--- a/nixpkgs/nixos/doc/manual/configuration/x-windows.xml
+++ b/nixpkgs/nixos/doc/manual/configuration/x-windows.xml
@@ -85,11 +85,14 @@
<programlisting>
<xref linkend="opt-services.xserver.displayManager.defaultSession"/> = "none+i3";
</programlisting>
- And, finally, to enable auto-login for a user <literal>johndoe</literal>:
+ Every display manager in NixOS supports auto-login, here is an example
+ using lightdm for a user <literal>alice</literal>:
<programlisting>
-<xref linkend="opt-services.xserver.displayManager.auto.enable"/> = true;
-<xref linkend="opt-services.xserver.displayManager.auto.user"/> = "johndoe";
+<xref linkend="opt-services.xserver.displayManager.lightdm.enable"/> = true;
+<xref linkend="opt-services.xserver.displayManager.lightdm.autoLogin.enable"/> = true;
+<xref linkend="opt-services.xserver.displayManager.lightdm.autoLogin.user"/> = "alice";
</programlisting>
+ The options are named identically for all other display managers.
</para>
</simplesect>
<simplesect xml:id="sec-x11-graphics-cards-nvidia">
diff --git a/nixpkgs/nixos/doc/manual/development/option-types.xml b/nixpkgs/nixos/doc/manual/development/option-types.xml
index 1ec7e3efad7..957349ad181 100644
--- a/nixpkgs/nixos/doc/manual/development/option-types.xml
+++ b/nixpkgs/nixos/doc/manual/development/option-types.xml
@@ -257,9 +257,9 @@
<listitem>
<para>
A set of sub options <replaceable>o</replaceable>.
- <replaceable>o</replaceable> can be an attribute set or a function
- returning an attribute set. Submodules are used in composed types to
- create modular options. This is equivalent to
+ <replaceable>o</replaceable> can be an attribute set, a function
+ returning an attribute set, or a path to a file containing such a value. Submodules are used in
+ composed types to create modular options. This is equivalent to
<literal>types.submoduleWith { modules = toList o; shorthandOnlyDefinesConfig = true; }</literal>.
Submodules are detailed in
<xref
@@ -352,6 +352,36 @@
An attribute set of where all the values are of
<replaceable>t</replaceable> type. Multiple definitions result in the
joined attribute set.
+ <note><para>
+ This type is <emphasis>strict</emphasis> in its values, which in turn
+ means attributes cannot depend on other attributes. See <varname>
+ types.lazyAttrsOf</varname> for a lazy version.
+ </para></note>
+ </para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>
+ <varname>types.lazyAttrsOf</varname> <replaceable>t</replaceable>
+ </term>
+ <listitem>
+ <para>
+ An attribute set of where all the values are of
+ <replaceable>t</replaceable> type. Multiple definitions result in the
+ joined attribute set. This is the lazy version of <varname>types.attrsOf
+ </varname>, allowing attributes to depend on each other.
+ <warning><para>
+ This version does not fully support conditional definitions! With an
+ option <varname>foo</varname> of this type and a definition
+ <literal>foo.attr = lib.mkIf false 10</literal>, evaluating
+ <literal>foo ? attr</literal> will return <literal>true</literal>
+ even though it should be false. Accessing the value will then throw
+ an error. For types <replaceable>t</replaceable> that have an
+ <literal>emptyValue</literal> defined, that value will be returned
+ instead of throwing an error. So if the type of <literal>foo.attr</literal>
+ was <literal>lazyAttrsOf (nullOr int)</literal>, <literal>null</literal>
+ would be returned instead for the same <literal>mkIf false</literal> definition.
+ </para></warning>
</para>
</listitem>
</varlistentry>
diff --git a/nixpkgs/nixos/doc/manual/development/releases.xml b/nixpkgs/nixos/doc/manual/development/releases.xml
index 9371af9984d..a22a0a3707b 100755
--- a/nixpkgs/nixos/doc/manual/development/releases.xml
+++ b/nixpkgs/nixos/doc/manual/development/releases.xml
@@ -187,7 +187,7 @@
</listitem>
<listitem>
<para>
- Update "Chapter 4. Upgrading NixOS" section of the manual to match
+ Update "Chapter 4. Upgrading NixOS" section of the manual to match
new stable release version.
</para>
</listitem>
@@ -237,6 +237,10 @@
experience.
</para>
<para>
+ Release managers for the current NixOS release are tracked by GitHub team
+ <link xlink:href="https://github.com/orgs/NixOS/teams/nixos-release-managers/members"><literal>@NixOS/nixos-release-managers</literal></link>.
+ </para>
+ <para>
A release manager's role and responsibilities are:
</para>
<itemizedlist>
diff --git a/nixpkgs/nixos/doc/manual/man-nixos-install.xml b/nixpkgs/nixos/doc/manual/man-nixos-install.xml
index 0752c397182..9255ce763ef 100644
--- a/nixpkgs/nixos/doc/manual/man-nixos-install.xml
+++ b/nixpkgs/nixos/doc/manual/man-nixos-install.xml
@@ -210,7 +210,7 @@
The closure must be an appropriately configured NixOS system, with boot
loader and partition configuration that fits the target host. Such a
closure is typically obtained with a command such as <command>nix-build
- -I nixos-config=./configuration.nix '&lt;nixos&gt;' -A system
+ -I nixos-config=./configuration.nix '&lt;nixpkgs/nixos&gt;' -A system
--no-out-link</command>
</para>
</listitem>
diff --git a/nixpkgs/nixos/doc/manual/man-pages.xml b/nixpkgs/nixos/doc/manual/man-pages.xml
index f5a1dd2d69f..49acfe7330b 100644
--- a/nixpkgs/nixos/doc/manual/man-pages.xml
+++ b/nixpkgs/nixos/doc/manual/man-pages.xml
@@ -6,7 +6,7 @@
<author><personname><firstname>Eelco</firstname><surname>Dolstra</surname></personname>
<contrib>Author</contrib>
</author>
- <copyright><year>2007-2019</year><holder>Eelco Dolstra</holder>
+ <copyright><year>2007-2020</year><holder>Eelco Dolstra</holder>
</copyright>
</info>
<xi:include href="man-configuration.xml" />
diff --git a/nixpkgs/nixos/doc/manual/release-notes/rl-2003.xml b/nixpkgs/nixos/doc/manual/release-notes/rl-2003.xml
index ca319dfea41..13981c0853d 100644
--- a/nixpkgs/nixos/doc/manual/release-notes/rl-2003.xml
+++ b/nixpkgs/nixos/doc/manual/release-notes/rl-2003.xml
@@ -126,7 +126,7 @@ services.xserver.displayManager.defaultSession = "xfce+icewm";
<listitem>
<para>
The <literal>dynamicHosts</literal> option has been removed from the
- <link linkend="opt-networking.networkmanager.enable">networkd</link>
+ <link linkend="opt-networking.networkmanager.enable">NetworkManager</link>
module. Allowing (multiple) regular users to override host entries
affecting the whole system opens up a huge attack vector.
There seem to be very rare cases where this might be useful.
@@ -170,6 +170,12 @@ services.xserver.displayManager.defaultSession = "xfce+icewm";
</listitem>
<listitem>
<para>
+ The Way Cooler wayland compositor has been removed, as the project has been officially canceled.
+ There are no more <literal>way-cooler</literal> attribute and <literal>programs.way-cooler</literal> options.
+ </para>
+ </listitem>
+ <listitem>
+ <para>
The BEAM package set has been deleted. You will only find there the different interpreters.
You should now use the different build tools coming with the languages with sandbox mode disabled.
</para>
@@ -391,6 +397,148 @@ users.users.me =
<link xlink:href="https://github.com/NixOS/nixpkgs/pull/63103">PR #63103</link>.
</para>
</listitem>
+ <listitem>
+ <para>
+ For NixOS modules, the types <literal>types.submodule</literal> and <literal>types.submoduleWith</literal> now support
+ paths as allowed values, similar to how <literal>imports</literal> supports paths.
+ Because of this, if you have a module that defines an option of type
+ <literal>either (submodule ...) path</literal>, it will break since a path
+ is now treated as the first type instead of the second. To fix this, change
+ the type to <literal>either path (submodule ...)</literal>.
+ </para>
+ </listitem>
+ <listitem>
+ <para>
+ The <link linkend="opt-services.buildkite-agent.enable">Buildkite Agent</link>
+ module and corresponding packages have been updated to 3.x.
+ While doing so, the following options have been changed:
+ </para>
+ <itemizedlist>
+ <listitem>
+ <para>
+ <literal>services.buildkite-agent.meta-data</literal> has been renamed to
+ <link linkend="opt-services.buildkite-agent.tags">services.buildkite-agent.tags</link>,
+ to match upstreams naming for 3.x.
+ Its type has also changed - it now accepts an attrset of strings.
+ </para>
+ </listitem>
+ <listitem>
+ <para>
+ The<literal>services.buildkite-agent.openssh.publicKeyPath</literal> option
+ has been removed, as it's not necessary to deploy public keys to clone private
+ repositories.
+ </para>
+ </listitem>
+ <listitem>
+ <para>
+ <literal>services.buildkite-agent.openssh.privateKeyPath</literal>
+ has been renamed to
+ <link linkend="opt-services.buildkite-agent.privateSshKeyPath">buildkite-agent.privateSshKeyPath</link>,
+ as the whole <literal>openssh</literal> now only contained that single option.
+ </para>
+ </listitem>
+ <listitem>
+ <para>
+ <link linkend="opt-services.buildkite-agent.shell">services.buildkite-agent.shell</link>
+ has been introduced, allowing to specify a custom shell to be used.
+ </para>
+ </listitem>
+ </itemizedlist>
+ </listitem>
+ <listitem>
+ <para>
+ The <literal>citrix_workspace_19_3_0</literal> package has been removed as
+ it will be EOLed within the lifespan of 20.03. For further information,
+ please refer to the <link xlink:href="https://www.citrix.com/de-de/support/product-lifecycle/milestones/receiver.html">support and maintenance information</link> from upstream.
+ </para>
+ </listitem>
+ <listitem>
+ <para>
+ The <literal>gcc5</literal> and <literal>gfortran5</literal> packages have been removed.
+ </para>
+ </listitem>
+ <listitem>
+ <para>
+ The <option>services.xserver.displayManager.auto</option> module has been removed.
+ It was only intended for use in internal NixOS tests, and gave the false impression
+ of it being a special display manager when it's actually LightDM.
+ Please use the <xref linkend="opt-services.xserver.displayManager.lightdm.autoLogin"/> options instead,
+ or any other display manager in NixOS as they all support auto-login. If you used this module specifically
+ because it permitted root auto-login you can override the lightdm-autologin pam module like:
+<programlisting>
+<link xlink:href="#opt-security.pam.services._name__.text">security.pam.services.lightdm-autologin.text</link> = lib.mkForce ''
+ auth requisite pam_nologin.so
+ auth required pam_succeed_if.so quiet
+ auth required pam_permit.so
+
+ account include lightdm
+
+ password include lightdm
+
+ session include lightdm
+'';
+</programlisting>
+ The difference is the:
+<programlisting>
+auth required pam_succeed_if.so quiet
+</programlisting>
+ line, where default it's:
+<programlisting>
+auth required pam_succeed_if.so uid >= 1000 quiet
+</programlisting>
+ not permitting users with uid's below 1000 (like root).
+ All other display managers in NixOS are configured like this.
+ </para>
+ </listitem>
+ <listitem>
+ <para>
+ There have been lots of improvements to the Mailman module. As
+ a result,
+ </para>
+ <itemizedlist>
+ <listitem>
+ <para>
+ The <option>services.mailman.hyperkittyBaseUrl</option>
+ option has been renamed to <xref
+ linkend="opt-services.mailman.hyperkitty.baseUrl"/>.
+ </para>
+ </listitem>
+ <listitem>
+ <para>
+ The <option>services.mailman.hyperkittyApiKey</option>
+ option has been removed. This is because having an option
+ for the Hyperkitty API key meant that the API key would be
+ stored in the world-readable Nix store, which was a
+ security vulnerability. A new Hyperkitty API key will be
+ generated the first time the new Hyperkitty service is run,
+ and it will then be persisted outside of the Nix store. To
+ continue using Hyperkitty, you must set <xref
+ linkend="opt-services.mailman.hyperkitty.enable"/> to
+ <literal>true</literal>.
+ </para>
+ </listitem>
+ <listitem>
+ <para>
+ Additionally, some Postfix configuration must now be set
+ manually instead of automatically by the Mailman module:
+<programlisting>
+<xref linkend="opt-services.postfix.relayDomains"/> = [ "hash:/var/lib/mailman/data/postfix_domains" ];
+<xref linkend="opt-services.postfix.config"/>.transport_maps = [ "hash:/var/lib/mailman/data/postfix_lmtp" ];
+<xref linkend="opt-services.postfix.config"/>.local_recipient_maps = [ "hash:/var/lib/mailman/data/postfix_lmtp" ];
+</programlisting>
+ This is because some users may want to include other values
+ in these lists as well, and this was not possible if they
+ were set automatically by the Mailman module. It would not
+ have been possible to just concatenate values from multiple
+ modules each setting the values they needed, because the
+ order of elements in the list is significant.
+ </para>
+ </listitem>
+ </itemizedlist>
+ </listitem>
+ <listitem>
+ <para>The LLVM versions 3.5, 3.9 and 4 (including the corresponding CLang versions) have been dropped.</para>
+ </listitem>
</itemizedlist>
</section>
@@ -431,6 +579,12 @@ users.users.me =
now uses the short rather than full version string.
</para>
</listitem>
+ <listitem>
+ <para>
+ It is now possible to unlock LUKS-Encrypted file systems using a FIDO2 token
+ via <option>boot.initrd.luks.fido2Support</option>.
+ </para>
+ </listitem>
</itemizedlist>
</section>
</section>
diff --git a/nixpkgs/nixos/lib/test-driver/test-driver.py b/nixpkgs/nixos/lib/test-driver/test-driver.py
index 7e575189209..75f80df53f2 100644
--- a/nixpkgs/nixos/lib/test-driver/test-driver.py
+++ b/nixpkgs/nixos/lib/test-driver/test-driver.py
@@ -84,7 +84,7 @@ CHAR_TO_KEY = {
# Forward references
nr_tests: int
-nr_succeeded: int
+failed_tests: list
log: "Logger"
machines: "List[Machine]"
@@ -221,7 +221,7 @@ class Machine:
return path
self.state_dir = create_dir("vm-state-{}".format(self.name))
- self.shared_dir = create_dir("{}/xchg".format(self.state_dir))
+ self.shared_dir = create_dir("shared-xchg")
self.booted = False
self.connected = False
@@ -395,7 +395,7 @@ class Machine:
status_code_pattern = re.compile(r"(.*)\|\!EOF\s+(\d+)")
while True:
- chunk = self.shell.recv(4096).decode()
+ chunk = self.shell.recv(4096).decode(errors="ignore")
match = status_code_pattern.match(chunk)
if match:
output += match[1]
@@ -576,7 +576,7 @@ class Machine:
vm_src = pathlib.Path(source)
with tempfile.TemporaryDirectory(dir=self.shared_dir) as shared_td:
shared_temp = pathlib.Path(shared_td)
- vm_shared_temp = pathlib.Path("/tmp/xchg") / shared_temp.name
+ vm_shared_temp = pathlib.Path("/tmp/shared") / shared_temp.name
vm_intermediate = vm_shared_temp / vm_src.name
intermediate = shared_temp / vm_src.name
# Copy the file to the shared directory inside VM
@@ -704,7 +704,8 @@ class Machine:
def process_serial_output() -> None:
for _line in self.process.stdout:
- line = _line.decode("unicode_escape").replace("\r", "").rstrip()
+ # Ignore undecodable bytes that may occur in boot menus
+ line = _line.decode(errors="ignore").replace("\r", "").rstrip()
eprint("{} # {}".format(self.name, line))
self.logger.enqueue({"msg": line, "machine": self.name})
@@ -841,23 +842,31 @@ def run_tests() -> None:
machine.execute("sync")
if nr_tests != 0:
+ nr_succeeded = nr_tests - len(failed_tests)
eprint("{} out of {} tests succeeded".format(nr_succeeded, nr_tests))
- if nr_tests > nr_succeeded:
+ if len(failed_tests) > 0:
+ eprint(
+ "The following tests have failed:\n - {}".format(
+ "\n - ".join(failed_tests)
+ )
+ )
sys.exit(1)
@contextmanager
def subtest(name: str) -> Iterator[None]:
global nr_tests
- global nr_succeeded
+ global failed_tests
with log.nested(name):
nr_tests += 1
try:
yield
- nr_succeeded += 1
return True
except Exception as e:
+ failed_tests.append(
+ 'Test "{}" failed with error: "{}"'.format(name, str(e))
+ )
log.log("error: {}".format(str(e)))
return False
@@ -879,7 +888,7 @@ if __name__ == "__main__":
exec("\n".join(machine_eval))
nr_tests = 0
- nr_succeeded = 0
+ failed_tests = []
@atexit.register
def clean_up() -> None:
diff --git a/nixpkgs/nixos/lib/testing-python.nix b/nixpkgs/nixos/lib/testing-python.nix
index 3d09be3b6cd..a7f6d792651 100644
--- a/nixpkgs/nixos/lib/testing-python.nix
+++ b/nixpkgs/nixos/lib/testing-python.nix
@@ -155,7 +155,7 @@ in rec {
--add-flags "''${vms[*]}" \
${lib.optionalString enableOCR
"--prefix PATH : '${ocrProg}/bin:${imagemagick_tiff}/bin'"} \
- --run "export testScript=\"\$(cat $out/test-script)\"" \
+ --run "export testScript=\"\$(${coreutils}/bin/cat $out/test-script)\"" \
--set VLANS '${toString vlans}'
ln -s ${testDriver}/bin/nixos-test-driver $out/bin/nixos-run-vms
wrapProgram $out/bin/nixos-run-vms \
diff --git a/nixpkgs/nixos/lib/testing/jquery-ui.nix b/nixpkgs/nixos/lib/testing/jquery-ui.nix
index e65107a3c2f..abd59da2d28 100644
--- a/nixpkgs/nixos/lib/testing/jquery-ui.nix
+++ b/nixpkgs/nixos/lib/testing/jquery-ui.nix
@@ -4,7 +4,7 @@ stdenv.mkDerivation rec {
name = "jquery-ui-1.11.4";
src = fetchurl {
- url = "http://jqueryui.com/resources/download/${name}.zip";
+ url = "https://jqueryui.com/resources/download/${name}.zip";
sha256 = "0ciyaj1acg08g8hpzqx6whayq206fvf4whksz2pjgxlv207lqgjh";
};
@@ -17,7 +17,7 @@ stdenv.mkDerivation rec {
'';
meta = {
- homepage = http://jqueryui.com/;
+ homepage = https://jqueryui.com/;
description = "A library of JavaScript widgets and effects";
platforms = stdenv.lib.platforms.all;
};
diff --git a/nixpkgs/nixos/modules/hardware/opengl.nix b/nixpkgs/nixos/modules/hardware/opengl.nix
index 89dc5008df5..28cddea8b79 100644
--- a/nixpkgs/nixos/modules/hardware/opengl.nix
+++ b/nixpkgs/nixos/modules/hardware/opengl.nix
@@ -43,11 +43,11 @@ in
description = ''
Whether to enable OpenGL drivers. This is needed to enable
OpenGL support in X11 systems, as well as for Wayland compositors
- like sway, way-cooler and Weston. It is enabled by default
+ like sway and Weston. It is enabled by default
by the corresponding modules, so you do not usually have to
set it yourself, only if there is no module for your wayland
- compositor of choice. See services.xserver.enable,
- programs.sway.enable, and programs.way-cooler.enable.
+ compositor of choice. See services.xserver.enable and
+ programs.sway.enable.
'';
type = types.bool;
default = false;
diff --git a/nixpkgs/nixos/modules/hardware/tuxedo-keyboard.nix b/nixpkgs/nixos/modules/hardware/tuxedo-keyboard.nix
new file mode 100644
index 00000000000..898eed24493
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/tuxedo-keyboard.nix
@@ -0,0 +1,35 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+ cfg = config.hardware.tuxedo-keyboard;
+ tuxedo-keyboard = config.boot.kernelPackages.tuxedo-keyboard;
+in
+ {
+ options.hardware.tuxedo-keyboard = {
+ enable = mkEnableOption ''
+ Enables the tuxedo-keyboard driver.
+
+ To configure the driver, pass the options to the <option>boot.kernelParams</option> configuration.
+ There are several parameters you can change. It's best to check at the source code description which options are supported.
+ You can find all the supported parameters at: <link xlink:href="https://github.com/tuxedocomputers/tuxedo-keyboard#kernelparam" />
+
+ In order to use the <literal>custom</literal> lighting with the maximumg brightness and a color of <literal>0xff0a0a</literal> one would put pass <option>boot.kernelParams</option> like this:
+
+ <programlisting>
+ boot.kernelParams = [
+ "tuxedo_keyboard.mode=0"
+ "tuxedo_keyboard.brightness=255"
+ "tuxedo_keyboard.color_left=0xff0a0a"
+ ];
+ </programlisting>
+ '';
+ };
+
+ config = mkIf cfg.enable
+ {
+ boot.kernelModules = ["tuxedo_keyboard"];
+ boot.extraModulePackages = [ tuxedo-keyboard ];
+ };
+ }
diff --git a/nixpkgs/nixos/modules/hardware/usb-wwan.nix b/nixpkgs/nixos/modules/hardware/usb-wwan.nix
index 2d20421586a..679a6c6497c 100644
--- a/nixpkgs/nixos/modules/hardware/usb-wwan.nix
+++ b/nixpkgs/nixos/modules/hardware/usb-wwan.nix
@@ -21,6 +21,19 @@ with lib;
###### implementation
config = mkIf config.hardware.usbWwan.enable {
+ # Attaches device specific handlers.
services.udev.packages = with pkgs; [ usb-modeswitch-data ];
+
+ # Triggered by udev, usb-modeswitch creates systemd services via a
+ # template unit in the usb-modeswitch package.
+ systemd.packages = with pkgs; [ usb-modeswitch ];
+
+ # The systemd service requires the usb-modeswitch-data. The
+ # usb-modeswitch package intends to discover this via the
+ # filesystem at /usr/share/usb_modeswitch, and merge it with user
+ # configuration in /etc/usb_modeswitch.d. Configuring the correct
+ # path in the package is difficult, as it would cause a cyclic
+ # dependency.
+ environment.etc."usb_modeswitch.d".source = "${pkgs.usb-modeswitch-data}/share/usb_modeswitch";
};
}
diff --git a/nixpkgs/nixos/modules/installer/cd-dvd/installation-cd-graphical-kde-new-kernel.nix b/nixpkgs/nixos/modules/installer/cd-dvd/installation-cd-graphical-plasma5-new-kernel.nix
index 3336d512cfd..d98325a99ac 100644
--- a/nixpkgs/nixos/modules/installer/cd-dvd/installation-cd-graphical-kde-new-kernel.nix
+++ b/nixpkgs/nixos/modules/installer/cd-dvd/installation-cd-graphical-plasma5-new-kernel.nix
@@ -1,7 +1,7 @@
{ pkgs, ... }:
{
- imports = [ ./installation-cd-graphical-kde.nix ];
+ imports = [ ./installation-cd-graphical-plasma5.nix ];
boot.kernelPackages = pkgs.linuxPackages_latest;
}
diff --git a/nixpkgs/nixos/modules/installer/cd-dvd/installation-cd-graphical-kde.nix b/nixpkgs/nixos/modules/installer/cd-dvd/installation-cd-graphical-plasma5.nix
index e00d3f7535b..e00d3f7535b 100644
--- a/nixpkgs/nixos/modules/installer/cd-dvd/installation-cd-graphical-kde.nix
+++ b/nixpkgs/nixos/modules/installer/cd-dvd/installation-cd-graphical-plasma5.nix
diff --git a/nixpkgs/nixos/modules/installer/tools/nixos-rebuild.sh b/nixpkgs/nixos/modules/installer/tools/nixos-rebuild.sh
index c53dc1000c4..61b4af11027 100644
--- a/nixpkgs/nixos/modules/installer/tools/nixos-rebuild.sh
+++ b/nixpkgs/nixos/modules/installer/tools/nixos-rebuild.sh
@@ -22,7 +22,7 @@ repair=
profile=/nix/var/nix/profiles/system
buildHost=
targetHost=
-maybeSudo=
+maybeSudo=()
while [ "$#" -gt 0 ]; do
i="$1"; shift 1
@@ -92,7 +92,7 @@ while [ "$#" -gt 0 ]; do
;;
--use-remote-sudo)
# note the trailing space
- maybeSudo="sudo "
+ maybeSudo=(sudo --)
shift 1
;;
*)
@@ -102,6 +102,10 @@ while [ "$#" -gt 0 ]; do
esac
done
+if [ -n "$SUDO_USER" ]; then
+ maybeSudo=(sudo --)
+fi
+
if [ -z "$buildHost" -a -n "$targetHost" ]; then
buildHost="$targetHost"
fi
@@ -116,17 +120,17 @@ buildHostCmd() {
if [ -z "$buildHost" ]; then
"$@"
elif [ -n "$remoteNix" ]; then
- ssh $SSHOPTS "$buildHost" env PATH="$remoteNix:$PATH" "$maybeSudo$@"
+ ssh $SSHOPTS "$buildHost" env PATH="$remoteNix:$PATH" "${maybeSudo[@]}" "$@"
else
- ssh $SSHOPTS "$buildHost" "$maybeSudo$@"
+ ssh $SSHOPTS "$buildHost" "${maybeSudo[@]}" "$@"
fi
}
targetHostCmd() {
if [ -z "$targetHost" ]; then
- "$@"
+ "${maybeSudo[@]}" "$@"
else
- ssh $SSHOPTS "$targetHost" "$maybeSudo$@"
+ ssh $SSHOPTS "$targetHost" "${maybeSudo[@]}" "$@"
fi
}
diff --git a/nixpkgs/nixos/modules/installer/tools/tools.nix b/nixpkgs/nixos/modules/installer/tools/tools.nix
index e4db39b5c81..5df9c23e6b6 100644
--- a/nixpkgs/nixos/modules/installer/tools/tools.nix
+++ b/nixpkgs/nixos/modules/installer/tools/tools.nix
@@ -159,10 +159,12 @@ in
# extraGroups = [ "wheel" ]; # Enable ‘sudo’ for the user.
# };
- # This value determines the NixOS release with which your system is to be
- # compatible, in order to avoid breaking some software such as database
- # servers. You should change this only after NixOS release notes say you
- # should.
+ # This value determines the NixOS release from which the default
+ # settings for stateful data, like file locations and database versions
+ # on your system were taken. It‘s perfectly fine and recommended to leave
+ # this value at the release version of the first install of this system.
+ # Before changing this value read the documentation for this option
+ # (e.g. man configuration.nix or on https://nixos.org/nixos/options.html).
system.stateVersion = "${config.system.nixos.release}"; # Did you read the comment?
}
diff --git a/nixpkgs/nixos/modules/misc/version.nix b/nixpkgs/nixos/modules/misc/version.nix
index 0540b493003..8a85035ceb7 100644
--- a/nixpkgs/nixos/modules/misc/version.nix
+++ b/nixpkgs/nixos/modules/misc/version.nix
@@ -6,6 +6,7 @@ let
cfg = config.system.nixos;
gitRepo = "${toString pkgs.path}/.git";
+ gitRepoValid = lib.pathIsGitRepo gitRepo;
gitCommitId = lib.substring 0 7 (commitIdFromGitRepo gitRepo);
in
@@ -61,11 +62,18 @@ in
configuration defaults in a way incompatible with stateful
data. For instance, if the default version of PostgreSQL
changes, the new version will probably be unable to read your
- existing databases. To prevent such breakage, you can set the
+ existing databases. To prevent such breakage, you should set the
value of this option to the NixOS release with which you want
- to be compatible. The effect is that NixOS will option
+ to be compatible. The effect is that NixOS will use
defaults corresponding to the specified release (such as using
an older version of PostgreSQL).
+ It‘s perfectly fine and recommended to leave this value at the
+ release version of the first install of this system.
+ Changing this option will not upgrade your system. In fact it
+ is meant to stay constant exactly when you upgrade your system.
+ You should only bump this option, if you are sure that you can
+ or have migrated all state on your system which is affected
+ by this option.
'';
};
@@ -84,8 +92,8 @@ in
# These defaults are set here rather than up there so that
# changing them would not rebuild the manual
version = mkDefault (cfg.release + cfg.versionSuffix);
- revision = mkIf (pathIsDirectory gitRepo) (mkDefault gitCommitId);
- versionSuffix = mkIf (pathIsDirectory gitRepo) (mkDefault (".git." + gitCommitId));
+ revision = mkIf gitRepoValid (mkDefault gitCommitId);
+ versionSuffix = mkIf gitRepoValid (mkDefault (".git." + gitCommitId));
};
# Generate /etc/os-release. See
diff --git a/nixpkgs/nixos/modules/module-list.nix b/nixpkgs/nixos/modules/module-list.nix
index d6886ba2c0b..29dbc39ac7e 100644
--- a/nixpkgs/nixos/modules/module-list.nix
+++ b/nixpkgs/nixos/modules/module-list.nix
@@ -62,6 +62,7 @@
./hardware/printers.nix
./hardware/raid/hpsa.nix
./hardware/steam-hardware.nix
+ ./hardware/tuxedo-keyboard.nix
./hardware/usb-wwan.nix
./hardware/onlykey.nix
./hardware/video/amdgpu.nix
@@ -116,6 +117,7 @@
./programs/fish.nix
./programs/freetds.nix
./programs/fuse.nix
+ ./programs/geary.nix
./programs/gnome-disks.nix
./programs/gnome-documents.nix
./programs/gnome-terminal.nix
@@ -127,6 +129,7 @@
./programs/java.nix
./programs/kbdlight.nix
./programs/less.nix
+ ./programs/liboping.nix
./programs/light.nix
./programs/mosh.nix
./programs/mininet.nix
@@ -152,13 +155,13 @@
./programs/system-config-printer.nix
./programs/thefuck.nix
./programs/tmux.nix
+ ./programs/traceroute.nix
./programs/tsm-client.nix
./programs/udevil.nix
./programs/usbtop.nix
./programs/venus.nix
./programs/vim.nix
./programs/wavemon.nix
- ./programs/way-cooler.nix
./programs/waybar.nix
./programs/wireshark.nix
./programs/x2goserver.nix
@@ -278,6 +281,7 @@
./services/databases/riak.nix
./services/databases/riak-cs.nix
./services/databases/stanchion.nix
+ ./services/databases/victoriametrics.nix
./services/databases/virtuoso.nix
./services/desktops/accountsservice.nix
./services/desktops/bamf.nix
@@ -424,6 +428,7 @@
./services/misc/exhibitor.nix
./services/misc/felix.nix
./services/misc/folding-at-home.nix
+ ./services/misc/freeswitch.nix
./services/misc/fstrim.nix
./services/misc/gammu-smsd.nix
./services/misc/geoip-updater.nix
@@ -577,6 +582,7 @@
./services/networking/connman.nix
./services/networking/consul.nix
./services/networking/coredns.nix
+ ./services/networking/corerad.nix
./services/networking/coturn.nix
./services/networking/dante.nix
./services/networking/ddclient.nix
@@ -736,6 +742,7 @@
./services/networking/wicd.nix
./services/networking/wireguard.nix
./services/networking/wpa_supplicant.nix
+ ./services/networking/xandikos.nix
./services/networking/xinetd.nix
./services/networking/xl2tpd.nix
./services/networking/xrdp.nix
@@ -803,6 +810,7 @@
./services/web-apps/codimd.nix
./services/web-apps/cryptpad.nix
./services/web-apps/documize.nix
+ ./services/web-apps/dokuwiki.nix
./services/web-apps/frab.nix
./services/web-apps/gotify-server.nix
./services/web-apps/icingaweb2/icingaweb2.nix
@@ -860,7 +868,6 @@
./services/x11/unclutter.nix
./services/x11/unclutter-xfixes.nix
./services/x11/desktop-managers/default.nix
- ./services/x11/display-managers/auto.nix
./services/x11/display-managers/default.nix
./services/x11/display-managers/gdm.nix
./services/x11/display-managers/lightdm.nix
@@ -870,7 +877,6 @@
./services/x11/display-managers/xpra.nix
./services/x11/fractalart.nix
./services/x11/hardware/libinput.nix
- ./services/x11/hardware/multitouch.nix
./services/x11/hardware/synaptics.nix
./services/x11/hardware/wacom.nix
./services/x11/hardware/digimend.nix
diff --git a/nixpkgs/nixos/modules/programs/geary.nix b/nixpkgs/nixos/modules/programs/geary.nix
new file mode 100644
index 00000000000..01803bc411e
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/geary.nix
@@ -0,0 +1,20 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+ cfg = config.programs.geary;
+
+in {
+ options = {
+ programs.geary.enable = mkEnableOption "Geary, a Mail client for GNOME 3";
+ };
+
+ config = mkIf cfg.enable {
+ environment.systemPackages = [ pkgs.gnome3.geary ];
+ programs.dconf.enable = true;
+ services.gnome3.gnome-keyring.enable = true;
+ services.gnome3.gnome-online-accounts.enable = true;
+ };
+}
+
diff --git a/nixpkgs/nixos/modules/programs/gnupg.nix b/nixpkgs/nixos/modules/programs/gnupg.nix
index 2d262d90657..7a3cb588ee7 100644
--- a/nixpkgs/nixos/modules/programs/gnupg.nix
+++ b/nixpkgs/nixos/modules/programs/gnupg.nix
@@ -96,7 +96,7 @@ in
# This overrides the systemd user unit shipped with the gnupg package
systemd.user.services.gpg-agent = mkIf (cfg.agent.pinentryFlavor != null) {
serviceConfig.ExecStart = [ "" ''
- ${pkgs.gnupg}/bin/gpg-agent --supervised \
+ ${cfg.package}/bin/gpg-agent --supervised \
--pinentry-program ${pkgs.pinentry.${cfg.agent.pinentryFlavor}}/bin/pinentry
'' ];
};
diff --git a/nixpkgs/nixos/modules/programs/liboping.nix b/nixpkgs/nixos/modules/programs/liboping.nix
new file mode 100644
index 00000000000..4e4c235ccde
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/liboping.nix
@@ -0,0 +1,22 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+ cfg = config.programs.liboping;
+in {
+ options.programs.liboping = {
+ enable = mkEnableOption "liboping";
+ };
+ config = mkIf cfg.enable {
+ environment.systemPackages = with pkgs; [ liboping ];
+ security.wrappers = mkMerge (map (
+ exec: {
+ "${exec}" = {
+ source = "${pkgs.liboping}/bin/${exec}";
+ capabilities = "cap_net_raw+p";
+ };
+ }
+ ) [ "oping" "noping" ]);
+ };
+}
diff --git a/nixpkgs/nixos/modules/programs/sway.nix b/nixpkgs/nixos/modules/programs/sway.nix
index e2a4018e902..7e646f8737d 100644
--- a/nixpkgs/nixos/modules/programs/sway.nix
+++ b/nixpkgs/nixos/modules/programs/sway.nix
@@ -28,6 +28,7 @@ let
swayPackage = pkgs.sway.override {
extraSessionCommands = cfg.extraSessionCommands;
+ extraOptions = cfg.extraOptions;
withBaseWrapper = cfg.wrapperFeatures.base;
withGtkWrapper = cfg.wrapperFeatures.gtk;
};
@@ -67,11 +68,27 @@ in {
'';
};
+ extraOptions = mkOption {
+ type = types.listOf types.str;
+ default = [];
+ example = [
+ "--verbose"
+ "--debug"
+ "--unsupported-gpu"
+ "--my-next-gpu-wont-be-nvidia"
+ ];
+ description = ''
+ Command line arguments passed to launch Sway. Please DO NOT report
+ issues if you use an unsupported GPU (proprietary drivers).
+ '';
+ };
+
extraPackages = mkOption {
type = with types; listOf package;
default = with pkgs; [
swaylock swayidle
- xwayland rxvt_unicode dmenu
+ xwayland alacritty dmenu
+ rxvt_unicode # For backward compatibility (old default terminal)
];
defaultText = literalExample ''
with pkgs; [ swaylock swayidle xwayland rxvt_unicode dmenu ];
diff --git a/nixpkgs/nixos/modules/programs/traceroute.nix b/nixpkgs/nixos/modules/programs/traceroute.nix
new file mode 100644
index 00000000000..4eb0be3f0e0
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/traceroute.nix
@@ -0,0 +1,26 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+ cfg = config.programs.traceroute;
+in {
+ options = {
+ programs.traceroute = {
+ enable = mkOption {
+ type = types.bool;
+ default = false;
+ description = ''
+ Whether to configure a setcap wrapper for traceroute.
+ '';
+ };
+ };
+ };
+
+ config = mkIf cfg.enable {
+ security.wrappers.traceroute = {
+ source = "${pkgs.traceroute}/bin/traceroute";
+ capabilities = "cap_net_raw+p";
+ };
+ };
+}
diff --git a/nixpkgs/nixos/modules/programs/way-cooler.nix b/nixpkgs/nixos/modules/programs/way-cooler.nix
deleted file mode 100644
index f27bd42bd76..00000000000
--- a/nixpkgs/nixos/modules/programs/way-cooler.nix
+++ /dev/null
@@ -1,78 +0,0 @@
-{ config, pkgs, lib, ... }:
-
-with lib;
-
-let
- cfg = config.programs.way-cooler;
- way-cooler = pkgs.way-cooler;
-
- wcWrapped = pkgs.writeShellScriptBin "way-cooler" ''
- ${cfg.extraSessionCommands}
- exec ${pkgs.dbus}/bin/dbus-run-session ${way-cooler}/bin/way-cooler
- '';
- wcJoined = pkgs.symlinkJoin {
- name = "way-cooler-wrapped";
- paths = [ wcWrapped way-cooler ];
- };
- configFile = readFile "${way-cooler}/etc/way-cooler/init.lua";
- spawnBar = ''
- util.program.spawn_at_startup("lemonbar");
- '';
-in
-{
- options.programs.way-cooler = {
- enable = mkEnableOption "way-cooler";
-
- extraSessionCommands = mkOption {
- default = "";
- type = types.lines;
- example = ''
- export XKB_DEFAULT_LAYOUT=us,de
- export XKB_DEFAULT_VARIANT=,nodeadkeys
- export XKB_DEFAULT_OPTIONS=grp:caps_toggle,
- '';
- description = ''
- Shell commands executed just before way-cooler is started.
- '';
- };
-
- extraPackages = mkOption {
- type = with types; listOf package;
- default = with pkgs; [
- westonLite xwayland dmenu
- ];
- example = literalExample ''
- with pkgs; [
- westonLite xwayland dmenu
- ]
- '';
- description = ''
- Extra packages to be installed system wide.
- '';
- };
-
- enableBar = mkOption {
- type = types.bool;
- default = true;
- description = ''
- Whether to enable an unofficial bar.
- '';
- };
- };
-
- config = mkIf cfg.enable {
- environment.systemPackages = [ wcJoined ] ++ cfg.extraPackages;
-
- security.pam.services.wc-lock = {};
- environment.etc."way-cooler/init.lua".text = ''
- ${configFile}
- ${optionalString cfg.enableBar spawnBar}
- '';
-
- hardware.opengl.enable = mkDefault true;
- fonts.enableDefaultFonts = mkDefault true;
- programs.dconf.enable = mkDefault true;
- };
-
- meta.maintainers = with maintainers; [ gnidorah ];
-}
diff --git a/nixpkgs/nixos/modules/rename.nix b/nixpkgs/nixos/modules/rename.nix
index 7109ab5a109..dbe687d8e22 100644
--- a/nixpkgs/nixos/modules/rename.nix
+++ b/nixpkgs/nixos/modules/rename.nix
@@ -27,6 +27,20 @@ with lib;
(mkRemovedOptionModule [ "services.osquery" ] "The osquery module has been removed")
(mkRemovedOptionModule [ "services.fourStore" ] "The fourStore module has been removed")
(mkRemovedOptionModule [ "services.fourStoreEndpoint" ] "The fourStoreEndpoint module has been removed")
+ (mkRemovedOptionModule [ "programs" "way-cooler" ] ("way-cooler is abandoned by its author: " +
+ "https://way-cooler.org/blog/2020/01/09/way-cooler-post-mortem.html"))
+ (mkRemovedOptionModule [ "services" "xserver" "multitouch" ] ''
+ services.xserver.multitouch (which uses xf86_input_mtrack) has been removed
+ as the underlying package isn't being maintained. Working alternatives are
+ libinput and synaptics.
+ '')
+ (mkRemovedOptionModule [ "services" "xserver" "displayManager" "auto" ] ''
+ The services.xserver.displayManager.auto module has been removed
+ because it was only intended for use in internal NixOS tests, and gave the
+ false impression of it being a special display manager when it's actually
+ LightDM. Please use the services.xserver.displayManager.lightdm.autoLogin options
+ instead, or any other display manager in NixOS as they all support auto-login.
+ '')
# Do NOT add any option renames here, see top of the file
];
diff --git a/nixpkgs/nixos/modules/services/amqp/rabbitmq.nix b/nixpkgs/nixos/modules/services/amqp/rabbitmq.nix
index 697732426cc..f80d6b3f1ba 100644
--- a/nixpkgs/nixos/modules/services/amqp/rabbitmq.nix
+++ b/nixpkgs/nixos/modules/services/amqp/rabbitmq.nix
@@ -98,8 +98,8 @@ in {
will be merged into these options by RabbitMQ at runtime to
form the final configuration.
- See http://www.rabbitmq.com/configure.html#config-items
- For the distinct formats, see http://www.rabbitmq.com/configure.html#config-file-formats
+ See https://www.rabbitmq.com/configure.html#config-items
+ For the distinct formats, see https://www.rabbitmq.com/configure.html#config-file-formats
'';
};
@@ -116,8 +116,8 @@ in {
The contents of this option will be merged into the <literal>configItems</literal>
by RabbitMQ at runtime to form the final configuration.
- See the second table on http://www.rabbitmq.com/configure.html#config-items
- For the distinct formats, see http://www.rabbitmq.com/configure.html#config-file-formats
+ See the second table on https://www.rabbitmq.com/configure.html#config-items
+ For the distinct formats, see https://www.rabbitmq.com/configure.html#config-file-formats
'';
};
@@ -165,7 +165,10 @@ in {
after = [ "network.target" "epmd.socket" ];
wants = [ "network.target" "epmd.socket" ];
- path = [ cfg.package pkgs.procps ];
+ path = [
+ cfg.package
+ pkgs.coreutils # mkdir/chown/chmod for preStart
+ ];
environment = {
RABBITMQ_MNESIA_BASE = "${cfg.dataDir}/mnesia";
diff --git a/nixpkgs/nixos/modules/services/cluster/kubernetes/pki.nix b/nixpkgs/nixos/modules/services/cluster/kubernetes/pki.nix
index 733479e24c9..4275563f1a3 100644
--- a/nixpkgs/nixos/modules/services/cluster/kubernetes/pki.nix
+++ b/nixpkgs/nixos/modules/services/cluster/kubernetes/pki.nix
@@ -20,6 +20,7 @@ let
size = 2048;
};
CN = top.masterAddress;
+ hosts = cfg.cfsslAPIExtraSANs;
});
cfsslAPITokenBaseName = "apitoken.secret";
@@ -66,6 +67,15 @@ in
type = bool;
};
+ cfsslAPIExtraSANs = mkOption {
+ description = ''
+ Extra x509 Subject Alternative Names to be added to the cfssl API webserver TLS cert.
+ '';
+ default = [];
+ example = [ "subdomain.example.com" ];
+ type = listOf str;
+ };
+
genCfsslAPIToken = mkOption {
description = ''
Whether to automatically generate cfssl API-token secret,
diff --git a/nixpkgs/nixos/modules/services/continuous-integration/buildbot/master.nix b/nixpkgs/nixos/modules/services/continuous-integration/buildbot/master.nix
index 326d2cbd82c..e3da3092d45 100644
--- a/nixpkgs/nixos/modules/services/continuous-integration/buildbot/master.nix
+++ b/nixpkgs/nixos/modules/services/continuous-integration/buildbot/master.nix
@@ -222,7 +222,7 @@ in {
};
config = mkIf cfg.enable {
- users.groups = optional (cfg.group == "buildbot") {
+ users.groups = optionalAttrs (cfg.group == "buildbot") {
buildbot = { };
};
diff --git a/nixpkgs/nixos/modules/services/continuous-integration/buildbot/worker.nix b/nixpkgs/nixos/modules/services/continuous-integration/buildbot/worker.nix
index 7613692f0a3..52f24b8cee3 100644
--- a/nixpkgs/nixos/modules/services/continuous-integration/buildbot/worker.nix
+++ b/nixpkgs/nixos/modules/services/continuous-integration/buildbot/worker.nix
@@ -136,7 +136,7 @@ in {
config = mkIf cfg.enable {
services.buildbot-worker.workerPassFile = mkDefault (pkgs.writeText "buildbot-worker-password" cfg.workerPass);
- users.groups = optional (cfg.group == "bbworker") {
+ users.groups = optionalAttrs (cfg.group == "bbworker") {
bbworker = { };
};
diff --git a/nixpkgs/nixos/modules/services/continuous-integration/buildkite-agent.nix b/nixpkgs/nixos/modules/services/continuous-integration/buildkite-agent.nix
index 32f361454bc..58bce654941 100644
--- a/nixpkgs/nixos/modules/services/continuous-integration/buildkite-agent.nix
+++ b/nixpkgs/nixos/modules/services/continuous-integration/buildkite-agent.nix
@@ -50,8 +50,8 @@ in
};
runtimePackages = mkOption {
- default = [ pkgs.bash pkgs.nix ];
- defaultText = "[ pkgs.bash pkgs.nix ]";
+ default = [ pkgs.bash pkgs.gnutar pkgs.gzip pkgs.git pkgs.nix ];
+ defaultText = "[ pkgs.bash pkgs.gnutar pkgs.gzip pkgs.git pkgs.nix ]";
description = "Add programs to the buildkite-agent environment";
type = types.listOf types.package;
};
@@ -74,13 +74,12 @@ in
'';
};
- meta-data = mkOption {
- type = types.str;
- default = "";
- example = "queue=default,docker=true,ruby2=true";
+ tags = mkOption {
+ type = types.attrsOf types.str;
+ default = {};
+ example = { queue = "default"; docker = "true"; ruby2 ="true"; };
description = ''
- Meta data for the agent. This is a comma-separated list of
- <code>key=value</code> pairs.
+ Tags for the agent.
'';
};
@@ -93,26 +92,20 @@ in
'';
};
- openssh =
- { privateKeyPath = mkOption {
- type = types.path;
- description = ''
- Private agent key.
+ privateSshKeyPath = mkOption {
+ type = types.nullOr types.path;
+ default = null;
+ ## maximum care is taken so that secrets (ssh keys and the CI token)
+ ## don't end up in the Nix store.
+ apply = final: if final == null then null else toString final;
- A run-time path to the key file, which is supposed to be provisioned
- outside of Nix store.
- '';
- };
- publicKeyPath = mkOption {
- type = types.path;
- description = ''
- Public agent key.
-
- A run-time path to the key file, which is supposed to be provisioned
- outside of Nix store.
- '';
- };
- };
+ description = ''
+ OpenSSH private key
+
+ A run-time path to the key file, which is supposed to be provisioned
+ outside of Nix store.
+ '';
+ };
hooks = mkHookOptions [
{ name = "checkout";
@@ -181,18 +174,26 @@ in
instead.
'';
};
+
+ shell = mkOption {
+ type = types.str;
+ default = "${pkgs.bash}/bin/bash -e -c";
+ description = ''
+ Command that buildkite-agent 3 will execute when it spawns a shell.
+ '';
+ };
};
};
config = mkIf config.services.buildkite-agent.enable {
- users.users.buildkite-agent =
- { name = "buildkite-agent";
- home = cfg.dataDir;
- createHome = true;
- description = "Buildkite agent user";
- extraGroups = [ "keys" ];
- isSystemUser = true;
- };
+ users.users.buildkite-agent = {
+ name = "buildkite-agent";
+ home = cfg.dataDir;
+ createHome = true;
+ description = "Buildkite agent user";
+ extraGroups = [ "keys" ];
+ isSystemUser = true;
+ };
environment.systemPackages = [ cfg.package ];
@@ -210,17 +211,18 @@ in
## don't end up in the Nix store.
preStart = let
sshDir = "${cfg.dataDir}/.ssh";
+ tagStr = lib.concatStringsSep "," (lib.mapAttrsToList (name: value: "${name}=${value}") cfg.tags);
in
- ''
+ optionalString (cfg.privateSshKeyPath != null) ''
mkdir -m 0700 -p "${sshDir}"
- cp -f "${toString cfg.openssh.privateKeyPath}" "${sshDir}/id_rsa"
- cp -f "${toString cfg.openssh.publicKeyPath}" "${sshDir}/id_rsa.pub"
- chmod 600 "${sshDir}"/id_rsa*
-
+ cp -f "${toString cfg.privateSshKeyPath}" "${sshDir}/id_rsa"
+ chmod 600 "${sshDir}"/id_rsa
+ '' + ''
cat > "${cfg.dataDir}/buildkite-agent.cfg" <<EOF
token="$(cat ${toString cfg.tokenPath})"
name="${cfg.name}"
- meta-data="${cfg.meta-data}"
+ shell="${cfg.shell}"
+ tags="${tagStr}"
build-path="${cfg.dataDir}/builds"
hooks-path="${cfg.hooksPath}"
${cfg.extraConfig}
@@ -228,11 +230,14 @@ in
'';
serviceConfig =
- { ExecStart = "${pkgs.buildkite-agent}/bin/buildkite-agent start --config /var/lib/buildkite-agent/buildkite-agent.cfg";
+ { ExecStart = "${cfg.package}/bin/buildkite-agent start --config /var/lib/buildkite-agent/buildkite-agent.cfg";
User = "buildkite-agent";
RestartSec = 5;
Restart = "on-failure";
TimeoutSec = 10;
+ # set a long timeout to give buildkite-agent a chance to finish current builds
+ TimeoutStopSec = "2 min";
+ KillMode = "mixed";
};
};
@@ -246,8 +251,11 @@ in
];
};
imports = [
- (mkRenamedOptionModule [ "services" "buildkite-agent" "token" ] [ "services" "buildkite-agent" "tokenPath" ])
- (mkRenamedOptionModule [ "services" "buildkite-agent" "openssh" "privateKey" ] [ "services" "buildkite-agent" "openssh" "privateKeyPath" ])
- (mkRenamedOptionModule [ "services" "buildkite-agent" "openssh" "publicKey" ] [ "services" "buildkite-agent" "openssh" "publicKeyPath" ])
+ (mkRenamedOptionModule [ "services" "buildkite-agent" "token" ] [ "services" "buildkite-agent" "tokenPath" ])
+ (mkRenamedOptionModule [ "services" "buildkite-agent" "openssh" "privateKey" ] [ "services" "buildkite-agent" "privateSshKeyPath" ])
+ (mkRenamedOptionModule [ "services" "buildkite-agent" "openssh" "privateKeyPath" ] [ "services" "buildkite-agent" "privateSshKeyPath" ])
+ (mkRemovedOptionModule [ "services" "buildkite-agent" "openssh" "publicKey" ] "SSH public keys aren't necessary to clone private repos.")
+ (mkRemovedOptionModule [ "services" "buildkite-agent" "openssh" "publicKeyPath" ] "SSH public keys aren't necessary to clone private repos.")
+ (mkRenamedOptionModule [ "services" "buildkite-agent" "meta-data"] [ "services" "buildkite-agent" "tags" ])
];
}
diff --git a/nixpkgs/nixos/modules/services/continuous-integration/hydra/default.nix b/nixpkgs/nixos/modules/services/continuous-integration/hydra/default.nix
index 30c5550f71c..8b56207590a 100644
--- a/nixpkgs/nixos/modules/services/continuous-integration/hydra/default.nix
+++ b/nixpkgs/nixos/modules/services/continuous-integration/hydra/default.nix
@@ -167,7 +167,7 @@ in
buildMachinesFiles = mkOption {
type = types.listOf types.path;
- default = [ "/etc/nix/machines" ];
+ default = optional (config.nix.buildMachines != []) "/etc/nix/machines";
example = [ "/etc/nix/machines" "/var/lib/hydra/provisioner/machines" ];
description = "List of files containing build machines.";
};
@@ -333,7 +333,7 @@ in
IN_SYSTEMD = "1"; # to get log severity levels
};
serviceConfig =
- { ExecStart = "@${cfg.package}/bin/hydra-queue-runner hydra-queue-runner -v --option build-use-substitutes ${boolToString cfg.useSubstitutes}";
+ { ExecStart = "@${cfg.package}/bin/hydra-queue-runner hydra-queue-runner -v";
ExecStopPost = "${cfg.package}/bin/hydra-queue-runner --unlock";
User = "hydra-queue-runner";
Restart = "always";
diff --git a/nixpkgs/nixos/modules/services/continuous-integration/jenkins/slave.nix b/nixpkgs/nixos/modules/services/continuous-integration/jenkins/slave.nix
index 26368cb94e4..3c0e6f78e74 100644
--- a/nixpkgs/nixos/modules/services/continuous-integration/jenkins/slave.nix
+++ b/nixpkgs/nixos/modules/services/continuous-integration/jenkins/slave.nix
@@ -50,7 +50,7 @@ in {
};
config = mkIf (cfg.enable && !masterCfg.enable) {
- users.groups = optional (cfg.group == "jenkins") {
+ users.groups = optionalAttrs (cfg.group == "jenkins") {
jenkins.gid = config.ids.gids.jenkins;
};
diff --git a/nixpkgs/nixos/modules/services/databases/openldap.nix b/nixpkgs/nixos/modules/services/databases/openldap.nix
index 5bf57a1bf9c..809f61cfa81 100644
--- a/nixpkgs/nixos/modules/services/databases/openldap.nix
+++ b/nixpkgs/nixos/modules/services/databases/openldap.nix
@@ -259,6 +259,8 @@ in
${openldap.out}/bin/slapadd ${configOpts} -l ${dataFile}
''}
chown -R "${cfg.user}:${cfg.group}" "${cfg.dataDir}"
+
+ ${openldap}/bin/slaptest ${configOpts}
'';
serviceConfig.ExecStart =
"${openldap.out}/libexec/slapd -d '${cfg.logLevel}' " +
diff --git a/nixpkgs/nixos/modules/services/databases/victoriametrics.nix b/nixpkgs/nixos/modules/services/databases/victoriametrics.nix
new file mode 100644
index 00000000000..cb6bf8508fb
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/databases/victoriametrics.nix
@@ -0,0 +1,70 @@
+{ config, pkgs, lib, ... }:
+let cfg = config.services.victoriametrics; in
+{
+ options.services.victoriametrics = with lib; {
+ enable = mkEnableOption "victoriametrics";
+ package = mkOption {
+ type = types.package;
+ default = pkgs.victoriametrics;
+ defaultText = "pkgs.victoriametrics";
+ description = ''
+ The VictoriaMetrics distribution to use.
+ '';
+ };
+ listenAddress = mkOption {
+ default = ":8428";
+ type = types.str;
+ description = ''
+ The listen address for the http interface.
+ '';
+ };
+ retentionPeriod = mkOption {
+ type = types.int;
+ default = 1;
+ description = ''
+ Retention period in months.
+ '';
+ };
+ extraOptions = mkOption {
+ type = types.listOf types.str;
+ default = [];
+ description = ''
+ Extra options to pass to VictoriaMetrics. See the README: <link
+ xlink:href="https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/README.md" />
+ or <command>victoriametrics -help</command> for more
+ information.
+ '';
+ };
+ };
+ config = lib.mkIf cfg.enable {
+ systemd.services.victoriametrics = {
+ description = "VictoriaMetrics time series database";
+ after = [ "network.target" ];
+ serviceConfig = {
+ Restart = "on-failure";
+ RestartSec = 1;
+ StartLimitBurst = 5;
+ StateDirectory = "victoriametrics";
+ DynamicUser = true;
+ ExecStart = ''
+ ${cfg.package}/bin/victoria-metrics \
+ -storageDataPath=/var/lib/victoriametrics \
+ -httpListenAddr ${cfg.listenAddress}
+ -retentionPeriod ${toString cfg.retentionPeriod}
+ ${lib.escapeShellArgs cfg.extraOptions}
+ '';
+ };
+ wantedBy = [ "multi-user.target" ];
+
+ postStart =
+ let
+ bindAddr = (lib.optionalString (lib.hasPrefix ":" cfg.listenAddress) "127.0.0.1") + cfg.listenAddress;
+ in
+ lib.mkBefore ''
+ until ${lib.getBin pkgs.curl}/bin/curl -s -o /dev/null http://${bindAddr}/ping; do
+ sleep 1;
+ done
+ '';
+ };
+ };
+}
diff --git a/nixpkgs/nixos/modules/services/desktops/gnome3/at-spi2-core.nix b/nixpkgs/nixos/modules/services/desktops/gnome3/at-spi2-core.nix
index cca98c43dc7..8fa108c4f9d 100644
--- a/nixpkgs/nixos/modules/services/desktops/gnome3/at-spi2-core.nix
+++ b/nixpkgs/nixos/modules/services/desktops/gnome3/at-spi2-core.nix
@@ -18,6 +18,9 @@ with lib;
description = ''
Whether to enable at-spi2-core, a service for the Assistive Technologies
available on the GNOME platform.
+
+ Enable this if you get the error or warning
+ <literal>The name org.a11y.Bus was not provided by any .service files</literal>.
'';
};
diff --git a/nixpkgs/nixos/modules/services/hardware/actkbd.nix b/nixpkgs/nixos/modules/services/hardware/actkbd.nix
index 4168140b287..daa407ca1f0 100644
--- a/nixpkgs/nixos/modules/services/hardware/actkbd.nix
+++ b/nixpkgs/nixos/modules/services/hardware/actkbd.nix
@@ -83,7 +83,7 @@ in
See <command>actkbd</command> <filename>README</filename> for documentation.
- The example shows a piece of what <option>sound.enableMediaKeys</option> does when enabled.
+ The example shows a piece of what <option>sound.mediaKeys.enable</option> does when enabled.
'';
};
diff --git a/nixpkgs/nixos/modules/services/hardware/usbmuxd.nix b/nixpkgs/nixos/modules/services/hardware/usbmuxd.nix
index 50b931dcb48..11a4b0a858f 100644
--- a/nixpkgs/nixos/modules/services/hardware/usbmuxd.nix
+++ b/nixpkgs/nixos/modules/services/hardware/usbmuxd.nix
@@ -51,7 +51,7 @@ in
};
};
- users.groups = optional (cfg.group == defaultUserGroup) {
+ users.groups = optionalAttrs (cfg.group == defaultUserGroup) {
${cfg.group} = { };
};
diff --git a/nixpkgs/nixos/modules/services/mail/mailman.nix b/nixpkgs/nixos/modules/services/mail/mailman.nix
index e917209f3d1..43dc185cdd7 100644
--- a/nixpkgs/nixos/modules/services/mail/mailman.nix
+++ b/nixpkgs/nixos/modules/services/mail/mailman.nix
@@ -6,37 +6,18 @@ let
cfg = config.services.mailman;
- mailmanPyEnv = pkgs.python3.withPackages (ps: with ps; [mailman mailman-hyperkitty]);
-
- mailmanExe = with pkgs; stdenv.mkDerivation {
- name = "mailman-" + python3Packages.mailman.version;
- buildInputs = [makeWrapper];
- unpackPhase = ":";
- installPhase = ''
- mkdir -p $out/bin
- makeWrapper ${mailmanPyEnv}/bin/mailman $out/bin/mailman \
- --set MAILMAN_CONFIG_FILE /etc/mailman.cfg
- '';
- };
-
- mailmanWeb = pkgs.python3Packages.mailman-web.override {
- serverEMail = cfg.siteOwner;
- archiverKey = cfg.hyperkittyApiKey;
- allowedHosts = cfg.webHosts;
- };
-
- mailmanWebPyEnv = pkgs.python3.withPackages (x: with x; [mailman-web]);
-
- mailmanWebExe = with pkgs; stdenv.mkDerivation {
- inherit (mailmanWeb) name;
- buildInputs = [makeWrapper];
- unpackPhase = ":";
- installPhase = ''
- mkdir -p $out/bin
- makeWrapper ${mailmanWebPyEnv}/bin/django-admin $out/bin/mailman-web \
- --set DJANGO_SETTINGS_MODULE settings
- '';
- };
+ # This deliberately doesn't use recursiveUpdate so users can
+ # override the defaults.
+ settings = {
+ DEFAULT_FROM_EMAIL = cfg.siteOwner;
+ SERVER_EMAIL = cfg.siteOwner;
+ ALLOWED_HOSTS = [ "localhost" "127.0.0.1" ] ++ cfg.webHosts;
+ COMPRESS_OFFLINE = true;
+ STATIC_ROOT = "/var/lib/mailman-web/static";
+ MEDIA_ROOT = "/var/lib/mailman-web/media";
+ } // cfg.webSettings;
+
+ settingsJSON = pkgs.writeText "settings.json" (builtins.toJSON settings);
mailmanCfg = ''
[mailman]
@@ -53,30 +34,42 @@ let
etc_dir: /etc
ext_dir: $etc_dir/mailman.d
pid_file: /run/mailman/master.pid
- '' + optionalString (cfg.hyperkittyApiKey != null) ''
+ '' + optionalString cfg.hyperkitty.enable ''
+
[archiver.hyperkitty]
class: mailman_hyperkitty.Archiver
enable: yes
- configuration: ${pkgs.writeText "mailman-hyperkitty.cfg" mailmanHyperkittyCfg}
+ configuration: /var/lib/mailman/mailman-hyperkitty.cfg
'';
- mailmanHyperkittyCfg = ''
+ mailmanHyperkittyCfg = pkgs.writeText "mailman-hyperkitty.cfg" ''
[general]
# This is your HyperKitty installation, preferably on the localhost. This
# address will be used by Mailman to forward incoming emails to HyperKitty
# for archiving. It does not need to be publicly available, in fact it's
# better if it is not.
- base_url: ${cfg.hyperkittyBaseUrl}
+ base_url: ${cfg.hyperkitty.baseUrl}
# Shared API key, must be the identical to the value in HyperKitty's
# settings.
- api_key: ${cfg.hyperkittyApiKey}
+ api_key: @API_KEY@
'';
in {
###### interface
+ imports = [
+ (mkRenamedOptionModule [ "services" "mailman" "hyperkittyBaseUrl" ]
+ [ "services" "mailman" "hyperkitty" "baseUrl" ])
+
+ (mkRemovedOptionModule [ "services" "mailman" "hyperkittyApiKey" ] ''
+ The Hyperkitty API key is now generated on first run, and not
+ stored in the world-readable Nix store. To continue using
+ Hyperkitty, you must set services.mailman.hyperkitty.enable = true.
+ '')
+ ];
+
options = {
services.mailman = {
@@ -87,9 +80,17 @@ in {
description = "Enable Mailman on this host. Requires an active Postfix installation.";
};
+ package = mkOption {
+ type = types.package;
+ default = pkgs.mailman;
+ defaultText = "pkgs.mailman";
+ example = "pkgs.mailman.override { archivers = []; }";
+ description = "Mailman package to use";
+ };
+
siteOwner = mkOption {
type = types.str;
- default = "postmaster@example.org";
+ example = "postmaster@example.org";
description = ''
Certain messages that must be delivered to a human, but which can't
be delivered to a list owner (e.g. a bounce from a list owner), will
@@ -99,12 +100,13 @@ in {
webRoot = mkOption {
type = types.path;
- default = "${mailmanWeb}/${pkgs.python3.sitePackages}";
- defaultText = "pkgs.python3Packages.mailman-web";
+ default = "${pkgs.mailman-web}/${pkgs.python3.sitePackages}";
+ defaultText = "\${pkgs.mailman-web}/\${pkgs.python3.sitePackages}";
description = ''
The web root for the Hyperkity + Postorius apps provided by Mailman.
This variable can be set, of course, but it mainly exists so that site
- admins can refer to it in their own hand-written httpd configuration files.
+ admins can refer to it in their own hand-written web server
+ configuration files.
'';
};
@@ -120,26 +122,35 @@ in {
'';
};
- hyperkittyBaseUrl = mkOption {
+ webUser = mkOption {
type = types.str;
- default = "http://localhost/hyperkitty/";
+ default = config.services.httpd.user;
description = ''
- Where can Mailman connect to Hyperkitty's internal API, preferably on
- localhost?
+ User to run mailman-web as
'';
};
- hyperkittyApiKey = mkOption {
- type = types.nullOr types.str;
- default = null;
+ webSettings = mkOption {
+ type = types.attrs;
+ default = {};
description = ''
- The shared secret used to authenticate Mailman's internal
- communication with Hyperkitty. Must be set to enable support for the
- Hyperkitty archiver. Note that this secret is going to be visible to
- all local users in the Nix store.
+ Overrides for the default mailman-web Django settings.
'';
};
+ hyperkitty = {
+ enable = mkEnableOption "the Hyperkitty archiver for Mailman";
+
+ baseUrl = mkOption {
+ type = types.str;
+ default = "http://localhost/hyperkitty/";
+ description = ''
+ Where can Mailman connect to Hyperkitty's internal API, preferably on
+ localhost?
+ '';
+ };
+ };
+
};
};
@@ -147,25 +158,58 @@ in {
config = mkIf cfg.enable {
- assertions = [
- { assertion = cfg.enable -> config.services.postfix.enable;
+ assertions = let
+ inherit (config.services) postfix;
+
+ requirePostfixHash = optionPath: dataFile:
+ with lib;
+ let
+ expected = "hash:/var/lib/mailman/data/${dataFile}";
+ value = attrByPath optionPath [] postfix;
+ in
+ { assertion = postfix.enable -> isList value && elem expected value;
+ message = ''
+ services.postfix.${concatStringsSep "." optionPath} must contain
+ "${expected}".
+ See <https://mailman.readthedocs.io/en/latest/src/mailman/docs/mta.html>.
+ '';
+ };
+ in [
+ { assertion = postfix.enable;
message = "Mailman requires Postfix";
}
+ (requirePostfixHash [ "relayDomains" ] "postfix_domains")
+ (requirePostfixHash [ "config" "transport_maps" ] "postfix_lmtp")
+ (requirePostfixHash [ "config" "local_recipient_maps" ] "postfix_lmtp")
];
users.users.mailman = { description = "GNU Mailman"; isSystemUser = true; };
- environment = {
- systemPackages = [ mailmanExe mailmanWebExe pkgs.sassc ];
- etc."mailman.cfg".text = mailmanCfg;
- };
+ environment.etc."mailman.cfg".text = mailmanCfg;
+
+ environment.etc."mailman3/settings.py".text = ''
+ import os
+
+ # Required by mailman_web.settings, but will be overridden when
+ # settings_local.json is loaded.
+ os.environ["SECRET_KEY"] = ""
+
+ from mailman_web.settings import *
+
+ import json
+
+ with open('${settingsJSON}') as f:
+ globals().update(json.load(f))
+
+ with open('/var/lib/mailman-web/settings_local.json') as f:
+ globals().update(json.load(f))
+ '';
+
+ environment.systemPackages = [ cfg.package ] ++ (with pkgs; [ mailman-web ]);
services.postfix = {
- relayDomains = [ "hash:/var/lib/mailman/data/postfix_domains" ];
recipientDelimiter = "+"; # bake recipient addresses in mail envelopes via VERP
config = {
- transport_maps = [ "hash:/var/lib/mailman/data/postfix_lmtp" ];
- local_recipient_maps = [ "hash:/var/lib/mailman/data/postfix_lmtp" ];
owner_request_special = "no"; # Mailman handles -owner addresses on its own
};
};
@@ -173,34 +217,71 @@ in {
systemd.services.mailman = {
description = "GNU Mailman Master Process";
after = [ "network.target" ];
+ restartTriggers = [ config.environment.etc."mailman.cfg".source ];
wantedBy = [ "multi-user.target" ];
serviceConfig = {
- ExecStart = "${mailmanExe}/bin/mailman start";
- ExecStop = "${mailmanExe}/bin/mailman stop";
+ ExecStart = "${cfg.package}/bin/mailman start";
+ ExecStop = "${cfg.package}/bin/mailman stop";
User = "mailman";
Type = "forking";
- StateDirectory = "mailman";
- StateDirectoryMode = "0700";
RuntimeDirectory = "mailman";
PIDFile = "/run/mailman/master.pid";
};
};
+ systemd.services.mailman-settings = {
+ description = "Generate settings files (including secrets) for Mailman";
+ before = [ "mailman.service" "mailman-web.service" "hyperkitty.service" "httpd.service" "uwsgi.service" ];
+ requiredBy = [ "mailman.service" "mailman-web.service" "hyperkitty.service" "httpd.service" "uwsgi.service" ];
+ path = with pkgs; [ jq ];
+ script = ''
+ mailmanDir=/var/lib/mailman
+ mailmanWebDir=/var/lib/mailman-web
+
+ mailmanCfg=$mailmanDir/mailman-hyperkitty.cfg
+ mailmanWebCfg=$mailmanWebDir/settings_local.json
+
+ install -m 0700 -o mailman -g nogroup -d $mailmanDir
+ install -m 0700 -o ${cfg.webUser} -g nogroup -d $mailmanWebDir
+
+ if [ ! -e $mailmanWebCfg ]; then
+ hyperkittyApiKey=$(tr -dc A-Za-z0-9 < /dev/urandom | head -c 64)
+ secretKey=$(tr -dc A-Za-z0-9 < /dev/urandom | head -c 64)
+
+ mailmanWebCfgTmp=$(mktemp)
+ jq -n '.MAILMAN_ARCHIVER_KEY=$archiver_key | .SECRET_KEY=$secret_key' \
+ --arg archiver_key "$hyperkittyApiKey" \
+ --arg secret_key "$secretKey" \
+ >"$mailmanWebCfgTmp"
+ chown ${cfg.webUser} "$mailmanWebCfgTmp"
+ mv -n "$mailmanWebCfgTmp" $mailmanWebCfg
+ fi
+
+ hyperkittyApiKey="$(jq -r .MAILMAN_ARCHIVER_KEY $mailmanWebCfg)"
+ mailmanCfgTmp=$(mktemp)
+ sed "s/@API_KEY@/$hyperkittyApiKey/g" ${mailmanHyperkittyCfg} >"$mailmanCfgTmp"
+ chown mailman "$mailmanCfgTmp"
+ mv "$mailmanCfgTmp" $mailmanCfg
+ '';
+ serviceConfig = {
+ Type = "oneshot";
+ };
+ };
+
systemd.services.mailman-web = {
description = "Init Postorius DB";
- before = [ "httpd.service" ];
- requiredBy = [ "httpd.service" ];
+ before = [ "httpd.service" "uwsgi.service" ];
+ requiredBy = [ "httpd.service" "uwsgi.service" ];
+ restartTriggers = [ config.environment.etc."mailman3/settings.py".source ];
script = ''
- ${mailmanWebExe}/bin/mailman-web migrate
+ ${pkgs.mailman-web}/bin/mailman-web migrate
rm -rf static
- ${mailmanWebExe}/bin/mailman-web collectstatic
- ${mailmanWebExe}/bin/mailman-web compress
+ ${pkgs.mailman-web}/bin/mailman-web collectstatic
+ ${pkgs.mailman-web}/bin/mailman-web compress
'';
serviceConfig = {
- User = config.services.httpd.user;
+ User = cfg.webUser;
Type = "oneshot";
- StateDirectory = "mailman-web";
- StateDirectoryMode = "0700";
WorkingDirectory = "/var/lib/mailman-web";
};
};
@@ -208,86 +289,94 @@ in {
systemd.services.mailman-daily = {
description = "Trigger daily Mailman events";
startAt = "daily";
+ restartTriggers = [ config.environment.etc."mailman.cfg".source ];
serviceConfig = {
- ExecStart = "${mailmanExe}/bin/mailman digests --send";
+ ExecStart = "${cfg.package}/bin/mailman digests --send";
User = "mailman";
};
};
systemd.services.hyperkitty = {
- enable = cfg.hyperkittyApiKey != null;
+ inherit (cfg.hyperkitty) enable;
description = "GNU Hyperkitty QCluster Process";
after = [ "network.target" ];
+ restartTriggers = [ config.environment.etc."mailman3/settings.py".source ];
wantedBy = [ "mailman.service" "multi-user.target" ];
serviceConfig = {
- ExecStart = "${mailmanWebExe}/bin/mailman-web qcluster";
- User = config.services.httpd.user;
+ ExecStart = "${pkgs.mailman-web}/bin/mailman-web qcluster";
+ User = cfg.webUser;
WorkingDirectory = "/var/lib/mailman-web";
};
};
systemd.services.hyperkitty-minutely = {
- enable = cfg.hyperkittyApiKey != null;
+ inherit (cfg.hyperkitty) enable;
description = "Trigger minutely Hyperkitty events";
startAt = "minutely";
+ restartTriggers = [ config.environment.etc."mailman3/settings.py".source ];
serviceConfig = {
- ExecStart = "${mailmanWebExe}/bin/mailman-web runjobs minutely";
- User = config.services.httpd.user;
+ ExecStart = "${pkgs.mailman-web}/bin/mailman-web runjobs minutely";
+ User = cfg.webUser;
WorkingDirectory = "/var/lib/mailman-web";
};
};
systemd.services.hyperkitty-quarter-hourly = {
- enable = cfg.hyperkittyApiKey != null;
+ inherit (cfg.hyperkitty) enable;
description = "Trigger quarter-hourly Hyperkitty events";
startAt = "*:00/15";
+ restartTriggers = [ config.environment.etc."mailman3/settings.py".source ];
serviceConfig = {
- ExecStart = "${mailmanWebExe}/bin/mailman-web runjobs quarter_hourly";
- User = config.services.httpd.user;
+ ExecStart = "${pkgs.mailman-web}/bin/mailman-web runjobs quarter_hourly";
+ User = cfg.webUser;
WorkingDirectory = "/var/lib/mailman-web";
};
};
systemd.services.hyperkitty-hourly = {
- enable = cfg.hyperkittyApiKey != null;
+ inherit (cfg.hyperkitty) enable;
description = "Trigger hourly Hyperkitty events";
startAt = "hourly";
+ restartTriggers = [ config.environment.etc."mailman3/settings.py".source ];
serviceConfig = {
- ExecStart = "${mailmanWebExe}/bin/mailman-web runjobs hourly";
- User = config.services.httpd.user;
+ ExecStart = "${pkgs.mailman-web}/bin/mailman-web runjobs hourly";
+ User = cfg.webUser;
WorkingDirectory = "/var/lib/mailman-web";
};
};
systemd.services.hyperkitty-daily = {
- enable = cfg.hyperkittyApiKey != null;
+ inherit (cfg.hyperkitty) enable;
description = "Trigger daily Hyperkitty events";
startAt = "daily";
+ restartTriggers = [ config.environment.etc."mailman3/settings.py".source ];
serviceConfig = {
- ExecStart = "${mailmanWebExe}/bin/mailman-web runjobs daily";
- User = config.services.httpd.user;
+ ExecStart = "${pkgs.mailman-web}/bin/mailman-web runjobs daily";
+ User = cfg.webUser;
WorkingDirectory = "/var/lib/mailman-web";
};
};
systemd.services.hyperkitty-weekly = {
- enable = cfg.hyperkittyApiKey != null;
+ inherit (cfg.hyperkitty) enable;
description = "Trigger weekly Hyperkitty events";
startAt = "weekly";
+ restartTriggers = [ config.environment.etc."mailman3/settings.py".source ];
serviceConfig = {
- ExecStart = "${mailmanWebExe}/bin/mailman-web runjobs weekly";
- User = config.services.httpd.user;
+ ExecStart = "${pkgs.mailman-web}/bin/mailman-web runjobs weekly";
+ User = cfg.webUser;
WorkingDirectory = "/var/lib/mailman-web";
};
};
systemd.services.hyperkitty-yearly = {
- enable = cfg.hyperkittyApiKey != null;
+ inherit (cfg.hyperkitty) enable;
description = "Trigger yearly Hyperkitty events";
startAt = "yearly";
+ restartTriggers = [ config.environment.etc."mailman3/settings.py".source ];
serviceConfig = {
- ExecStart = "${mailmanWebExe}/bin/mailman-web runjobs yearly";
- User = config.services.httpd.user;
+ ExecStart = "${pkgs.mailman-web}/bin/mailman-web runjobs yearly";
+ User = cfg.webUser;
WorkingDirectory = "/var/lib/mailman-web";
};
};
diff --git a/nixpkgs/nixos/modules/services/mail/postfix.nix b/nixpkgs/nixos/modules/services/mail/postfix.nix
index d7378821440..19e11b31d9c 100644
--- a/nixpkgs/nixos/modules/services/mail/postfix.nix
+++ b/nixpkgs/nixos/modules/services/mail/postfix.nix
@@ -612,10 +612,7 @@ in
{
environment = {
- etc = singleton
- { source = "/var/lib/postfix/conf";
- target = "postfix";
- };
+ etc.postfix.source = "/var/lib/postfix/conf";
# This makes it comfortable to run 'postqueue/postdrop' for example.
systemPackages = [ pkgs.postfix ];
diff --git a/nixpkgs/nixos/modules/services/mail/roundcube.nix b/nixpkgs/nixos/modules/services/mail/roundcube.nix
index 36dda619ad0..0bb0eaedad5 100644
--- a/nixpkgs/nixos/modules/services/mail/roundcube.nix
+++ b/nixpkgs/nixos/modules/services/mail/roundcube.nix
@@ -5,6 +5,8 @@ with lib;
let
cfg = config.services.roundcube;
fpm = config.services.phpfpm.pools.roundcube;
+ localDB = cfg.database.host == "localhost";
+ user = cfg.database.username;
in
{
options.services.roundcube = {
@@ -44,7 +46,10 @@ in
username = mkOption {
type = types.str;
default = "roundcube";
- description = "Username for the postgresql connection";
+ description = ''
+ Username for the postgresql connection.
+ If <literal>database.host</literal> is set to <literal>localhost</literal>, a unix user and group of the same name will be created as well.
+ '';
};
host = mkOption {
type = types.str;
@@ -58,7 +63,12 @@ in
};
password = mkOption {
type = types.str;
- description = "Password for the postgresql connection";
+ description = "Password for the postgresql connection. Do not use: the password will be stored world readable in the store; use <literal>passwordFile</literal> instead.";
+ default = "";
+ };
+ passwordFile = mkOption {
+ type = types.str;
+ description = "Password file for the postgresql connection. Must be readable by user <literal>nginx</literal>. Ignored if <literal>database.host</literal> is set to <literal>localhost</literal>, as peer authentication will be used.";
};
dbname = mkOption {
type = types.str;
@@ -83,14 +93,22 @@ in
};
config = mkIf cfg.enable {
+ # backward compatibility: if password is set but not passwordFile, make one.
+ services.roundcube.database.passwordFile = mkIf (!localDB && cfg.database.password != "") (mkDefault ("${pkgs.writeText "roundcube-password" cfg.database.password}"));
+ warnings = lib.optional (!localDB && cfg.database.password != "") "services.roundcube.database.password is deprecated and insecure; use services.roundcube.database.passwordFile instead";
+
environment.etc."roundcube/config.inc.php".text = ''
<?php
+ ${lib.optionalString (!localDB) "$password = file_get_contents('${cfg.database.passwordFile}');"}
+
$config = array();
- $config['db_dsnw'] = 'pgsql://${cfg.database.username}:${cfg.database.password}@${cfg.database.host}/${cfg.database.dbname}';
+ $config['db_dsnw'] = 'pgsql://${cfg.database.username}${lib.optionalString (!localDB) ":' . $password . '"}@${if localDB then "unix(/run/postgresql)" else cfg.database.host}/${cfg.database.dbname}';
$config['log_driver'] = 'syslog';
$config['max_message_size'] = '25M';
$config['plugins'] = [${concatMapStringsSep "," (p: "'${p}'") cfg.plugins}];
+ $config['des_key'] = file_get_contents('/var/lib/roundcube/des_key');
+ $config['mime_types'] = '${pkgs.nginx}/conf/mime.types';
${cfg.extraConfig}
'';
@@ -116,12 +134,26 @@ in
};
};
- services.postgresql = mkIf (cfg.database.host == "localhost") {
+ services.postgresql = mkIf localDB {
enable = true;
+ ensureDatabases = [ cfg.database.dbname ];
+ ensureUsers = [ {
+ name = cfg.database.username;
+ ensurePermissions = {
+ "DATABASE ${cfg.database.username}" = "ALL PRIVILEGES";
+ };
+ } ];
+ };
+
+ users.users.${user} = mkIf localDB {
+ group = user;
+ isSystemUser = true;
+ createHome = false;
};
+ users.groups.${user} = mkIf localDB {};
services.phpfpm.pools.roundcube = {
- user = "nginx";
+ user = if localDB then user else "nginx";
phpOptions = ''
error_log = 'stderr'
log_errors = on
@@ -143,9 +175,7 @@ in
};
systemd.services.phpfpm-roundcube.after = [ "roundcube-setup.service" ];
- systemd.services.roundcube-setup = let
- pgSuperUser = config.services.postgresql.superUser;
- in mkMerge [
+ systemd.services.roundcube-setup = mkMerge [
(mkIf (cfg.database.host == "localhost") {
requires = [ "postgresql.service" ];
after = [ "postgresql.service" ];
@@ -153,22 +183,31 @@ in
})
{
wantedBy = [ "multi-user.target" ];
- script = ''
- mkdir -p /var/lib/roundcube
- if [ ! -f /var/lib/roundcube/db-created ]; then
- if [ "${cfg.database.host}" = "localhost" ]; then
- ${pkgs.sudo}/bin/sudo -u ${pgSuperUser} psql postgres -c "create role ${cfg.database.username} with login password '${cfg.database.password}'";
- ${pkgs.sudo}/bin/sudo -u ${pgSuperUser} psql postgres -c "create database ${cfg.database.dbname} with owner ${cfg.database.username}";
- fi
- PGPASSWORD="${cfg.database.password}" ${pkgs.postgresql}/bin/psql -U ${cfg.database.username} \
- -f ${cfg.package}/SQL/postgres.initial.sql \
- -h ${cfg.database.host} ${cfg.database.dbname}
- touch /var/lib/roundcube/db-created
+ script = let
+ psql = "${lib.optionalString (!localDB) "PGPASSFILE=${cfg.database.passwordFile}"} ${pkgs.postgresql}/bin/psql ${lib.optionalString (!localDB) "-h ${cfg.database.host} -U ${cfg.database.username} "} ${cfg.database.dbname}";
+ in
+ ''
+ version="$(${psql} -t <<< "select value from system where name = 'roundcube-version';" || true)"
+ if ! (grep -E '[a-zA-Z0-9]' <<< "$version"); then
+ ${psql} -f ${cfg.package}/SQL/postgres.initial.sql
+ fi
+
+ if [ ! -f /var/lib/roundcube/des_key ]; then
+ base64 /dev/urandom | head -c 24 > /var/lib/roundcube/des_key;
+ # we need to log out everyone in case change the des_key
+ # from the default when upgrading from nixos 19.09
+ ${psql} <<< 'TRUNCATE TABLE session;'
fi
${pkgs.php}/bin/php ${cfg.package}/bin/update.sh
'';
- serviceConfig.Type = "oneshot";
+ serviceConfig = {
+ Type = "oneshot";
+ StateDirectory = "roundcube";
+ User = if localDB then user else "nginx";
+ # so that the des_key is not world readable
+ StateDirectoryMode = "0700";
+ };
}
];
};
diff --git a/nixpkgs/nixos/modules/services/mail/spamassassin.nix b/nixpkgs/nixos/modules/services/mail/spamassassin.nix
index 107280f7c14..2d5fb40fad3 100644
--- a/nixpkgs/nixos/modules/services/mail/spamassassin.nix
+++ b/nixpkgs/nixos/modules/services/mail/spamassassin.nix
@@ -5,16 +5,6 @@ with lib;
let
cfg = config.services.spamassassin;
spamassassin-local-cf = pkgs.writeText "local.cf" cfg.config;
- spamassassin-init-pre = pkgs.writeText "init.pre" cfg.initPreConf;
-
- spamdEnv = pkgs.buildEnv {
- name = "spamd-env";
- paths = [];
- postBuild = ''
- ln -sf ${spamassassin-init-pre} $out/init.pre
- ln -sf ${spamassassin-local-cf} $out/local.cf
- '';
- };
in
@@ -65,8 +55,9 @@ in
};
initPreConf = mkOption {
- type = types.str;
+ type = with types; either str path;
description = "The SpamAssassin init.pre config.";
+ apply = val: if builtins.isPath val then val else pkgs.writeText "init.pre" val;
default =
''
#
@@ -120,13 +111,11 @@ in
};
config = mkIf cfg.enable {
+ environment.etc."mail/spamassassin/init.pre".source = cfg.initPreConf;
+ environment.etc."mail/spamassassin/local.cf".source = spamassassin-local-cf;
# Allow users to run 'spamc'.
-
- environment = {
- etc = singleton { source = spamdEnv; target = "spamassassin"; };
- systemPackages = [ pkgs.spamassassin ];
- };
+ environment.systemPackages = [ pkgs.spamassassin ];
users.users.spamd = {
description = "Spam Assassin Daemon";
@@ -141,7 +130,7 @@ in
systemd.services.sa-update = {
script = ''
set +e
- ${pkgs.su}/bin/su -s "${pkgs.bash}/bin/bash" -c "${pkgs.spamassassin}/bin/sa-update --gpghomedir=/var/lib/spamassassin/sa-update-keys/ --siteconfigpath=${spamdEnv}/" spamd
+ ${pkgs.su}/bin/su -s "${pkgs.bash}/bin/bash" -c "${pkgs.spamassassin}/bin/sa-update --gpghomedir=/var/lib/spamassassin/sa-update-keys/" spamd
v=$?
set -e
@@ -172,7 +161,7 @@ in
after = [ "network.target" ];
serviceConfig = {
- ExecStart = "${pkgs.spamassassin}/bin/spamd ${optionalString cfg.debug "-D"} --username=spamd --groupname=spamd --siteconfigpath=${spamdEnv} --virtual-config-dir=/var/lib/spamassassin/user-%u --allow-tell --pidfile=/run/spamd.pid";
+ ExecStart = "${pkgs.spamassassin}/bin/spamd ${optionalString cfg.debug "-D"} --username=spamd --groupname=spamd --virtual-config-dir=/var/lib/spamassassin/user-%u --allow-tell --pidfile=/run/spamd.pid";
ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
};
@@ -183,7 +172,7 @@ in
mkdir -p /var/lib/spamassassin
chown spamd:spamd /var/lib/spamassassin -R
set +e
- ${pkgs.su}/bin/su -s "${pkgs.bash}/bin/bash" -c "${pkgs.spamassassin}/bin/sa-update --gpghomedir=/var/lib/spamassassin/sa-update-keys/ --siteconfigpath=${spamdEnv}/" spamd
+ ${pkgs.su}/bin/su -s "${pkgs.bash}/bin/bash" -c "${pkgs.spamassassin}/bin/sa-update --gpghomedir=/var/lib/spamassassin/sa-update-keys/" spamd
v=$?
set -e
if [ $v -gt 1 ]; then
diff --git a/nixpkgs/nixos/modules/services/misc/freeswitch.nix b/nixpkgs/nixos/modules/services/misc/freeswitch.nix
new file mode 100644
index 00000000000..0de5ba42811
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/freeswitch.nix
@@ -0,0 +1,103 @@
+{ config, lib, pkgs, ...}:
+with lib;
+let
+ cfg = config.services.freeswitch;
+ pkg = cfg.package;
+ configDirectory = pkgs.runCommand "freeswitch-config-d" { } ''
+ mkdir -p $out
+ cp -rT ${cfg.configTemplate} $out
+ chmod -R +w $out
+ ${concatStringsSep "\n" (mapAttrsToList (fileName: filePath: ''
+ mkdir -p $out/$(dirname ${fileName})
+ cp ${filePath} $out/${fileName}
+ '') cfg.configDir)}
+ '';
+ configPath = if cfg.enableReload
+ then "/etc/freeswitch"
+ else configDirectory;
+in {
+ options = {
+ services.freeswitch = {
+ enable = mkEnableOption "FreeSWITCH";
+ enableReload = mkOption {
+ default = false;
+ type = types.bool;
+ description = ''
+ Issue the <literal>reloadxml</literal> command to FreeSWITCH when configuration directory changes (instead of restart).
+ See <link xlink:href="https://freeswitch.org/confluence/display/FREESWITCH/Reloading">FreeSWITCH documentation</link> for more info.
+ The configuration directory is exposed at <filename>/etc/freeswitch</filename>.
+ See also <literal>systemd.services.*.restartIfChanged</literal>.
+ '';
+ };
+ configTemplate = mkOption {
+ type = types.path;
+ default = "${config.services.freeswitch.package}/share/freeswitch/conf/vanilla";
+ defaultText = literalExample "\${config.services.freeswitch.package}/share/freeswitch/conf/vanilla";
+ example = literalExample "\${config.services.freeswitch.package}/share/freeswitch/conf/minimal";
+ description = ''
+ Configuration template to use.
+ See available templates in <link xlink:href="https://github.com/signalwire/freeswitch/tree/master/conf">FreeSWITCH repository</link>.
+ You can also set your own configuration directory.
+ '';
+ };
+ configDir = mkOption {
+ type = with types; attrsOf path;
+ default = { };
+ example = literalExample ''
+ {
+ "freeswitch.xml" = ./freeswitch.xml;
+ "dialplan/default.xml" = pkgs.writeText "dialplan-default.xml" '''
+ [xml lines]
+ ''';
+ }
+ '';
+ description = ''
+ Override file in FreeSWITCH config template directory.
+ Each top-level attribute denotes a file path in the configuration directory, its value is the file path.
+ See <link xlink:href="https://freeswitch.org/confluence/display/FREESWITCH/Default+Configuration">FreeSWITCH documentation</link> for more info.
+ Also check available templates in <link xlink:href="https://github.com/signalwire/freeswitch/tree/master/conf">FreeSWITCH repository</link>.
+ '';
+ };
+ package = mkOption {
+ type = types.package;
+ default = pkgs.freeswitch;
+ defaultText = literalExample "pkgs.freeswitch";
+ example = literalExample "pkgs.freeswitch";
+ description = ''
+ FreeSWITCH package.
+ '';
+ };
+ };
+ };
+ config = mkIf cfg.enable {
+ environment.etc.freeswitch = mkIf cfg.enableReload {
+ source = configDirectory;
+ };
+ systemd.services.freeswitch-config-reload = mkIf cfg.enableReload {
+ before = [ "freeswitch.service" ];
+ wantedBy = [ "multi-user.target" ];
+ restartTriggers = [ configDirectory ];
+ serviceConfig = {
+ ExecStart = "${pkgs.systemd}/bin/systemctl try-reload-or-restart freeswitch.service";
+ RemainAfterExit = true;
+ Type = "oneshot";
+ };
+ };
+ systemd.services.freeswitch = {
+ description = "Free and open-source application server for real-time communication";
+ after = [ "network.target" ];
+ wantedBy = [ "multi-user.target" ];
+ serviceConfig = {
+ DynamicUser = true;
+ StateDirectory = "freeswitch";
+ ExecStart = "${pkg}/bin/freeswitch -nf \\
+ -mod ${pkg}/lib/freeswitch/mod \\
+ -conf ${configPath} \\
+ -base /var/lib/freeswitch";
+ ExecReload = "${pkg}/bin/fs_cli -x reloadxml";
+ Restart = "always";
+ RestartSec = "5s";
+ };
+ };
+ };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/gitea.nix b/nixpkgs/nixos/modules/services/misc/gitea.nix
index 258476dd9fe..38910a5a005 100644
--- a/nixpkgs/nixos/modules/services/misc/gitea.nix
+++ b/nixpkgs/nixos/modules/services/misc/gitea.nix
@@ -364,7 +364,7 @@ in
''}
sed -e "s,#secretkey#,$KEY,g" \
-e "s,#dbpass#,$DBPASS,g" \
- -e "s,#jwtsecet#,$JWTSECET,g" \
+ -e "s,#jwtsecret#,$JWTSECRET,g" \
-e "s,#mailerpass#,$MAILERPASSWORD,g" \
-i ${runConfig}
chmod 640 ${runConfig} ${secretKey} ${jwtSecret}
diff --git a/nixpkgs/nixos/modules/services/misc/home-assistant.nix b/nixpkgs/nixos/modules/services/misc/home-assistant.nix
index cc113ca2d0c..d63f38e93b8 100644
--- a/nixpkgs/nixos/modules/services/misc/home-assistant.nix
+++ b/nixpkgs/nixos/modules/services/misc/home-assistant.nix
@@ -251,6 +251,7 @@ in {
home = cfg.configDir;
createHome = true;
group = "hass";
+ extraGroups = [ "dialout" ];
uid = config.ids.uids.hass;
};
diff --git a/nixpkgs/nixos/modules/services/misc/paperless.nix b/nixpkgs/nixos/modules/services/misc/paperless.nix
index 3985dc0b303..bfaf760fb83 100644
--- a/nixpkgs/nixos/modules/services/misc/paperless.nix
+++ b/nixpkgs/nixos/modules/services/misc/paperless.nix
@@ -123,9 +123,9 @@ in
config = mkIf cfg.enable {
systemd.tmpfiles.rules = [
- "d '${cfg.dataDir}' - ${cfg.user} ${cfg.user} - -"
+ "d '${cfg.dataDir}' - ${cfg.user} ${config.users.users.${cfg.user}.group} - -"
] ++ (optional cfg.consumptionDirIsPublic
- "d '${cfg.consumptionDir}' 777 ${cfg.user} ${cfg.user} - -"
+ "d '${cfg.consumptionDir}' 777 - - - -"
# If the consumption dir is not created here, it's automatically created by
# 'manage' with the default permissions.
);
@@ -169,17 +169,15 @@ in
};
users = optionalAttrs (cfg.user == defaultUser) {
- users = [{
- name = defaultUser;
+ users.${defaultUser} = {
group = defaultUser;
uid = config.ids.uids.paperless;
home = cfg.dataDir;
- }];
+ };
- groups = [{
- name = defaultUser;
+ groups.${defaultUser} = {
gid = config.ids.gids.paperless;
- }];
+ };
};
};
}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/alertmanager.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/alertmanager.nix
index 9af6b1d94f3..4534d150885 100644
--- a/nixpkgs/nixos/modules/services/monitoring/prometheus/alertmanager.nix
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/alertmanager.nix
@@ -18,7 +18,7 @@ let
in checkedConfig yml;
cmdlineArgs = cfg.extraFlags ++ [
- "--config.file ${alertmanagerYml}"
+ "--config.file /tmp/alert-manager-substituted.yaml"
"--web.listen-address ${cfg.listenAddress}:${toString cfg.port}"
"--log.level ${cfg.logLevel}"
] ++ (optional (cfg.webExternalUrl != null)
@@ -127,6 +127,18 @@ in {
Extra commandline options when launching the Alertmanager.
'';
};
+
+ environmentFile = mkOption {
+ type = types.nullOr types.path;
+ default = null;
+ example = "/root/alertmanager.env";
+ description = ''
+ File to load as environment file. Environment variables
+ from this file will be interpolated into the config file
+ using envsubst with this syntax:
+ <literal>$ENVIRONMENT ''${VARIABLE}</literal>
+ '';
+ };
};
};
@@ -144,9 +156,14 @@ in {
systemd.services.alertmanager = {
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
+ preStart = ''
+ ${lib.getBin pkgs.envsubst}/bin/envsubst -o "/tmp/alert-manager-substituted.yaml" \
+ -i "${alertmanagerYml}"
+ '';
serviceConfig = {
Restart = "always";
- DynamicUser = true;
+ DynamicUser = true; # implies PrivateTmp
+ EnvironmentFile = lib.mkIf (cfg.environmentFile != null) cfg.environmentFile;
WorkingDirectory = "/tmp";
ExecStart = "${cfg.package}/bin/alertmanager" +
optionalString (length cmdlineArgs != 0) (" \\\n " +
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/postfix.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/postfix.nix
index f40819e826b..d50564717ea 100644
--- a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/postfix.nix
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/postfix.nix
@@ -74,7 +74,7 @@ in
then "--systemd.slice ${cfg.systemd.slice}"
else "--systemd.unit ${cfg.systemd.unit}")
++ optional (cfg.systemd.enable && (cfg.systemd.journalPath != null))
- "--systemd.jounal_path ${cfg.systemd.journalPath}"
+ "--systemd.journal_path ${cfg.systemd.journalPath}"
++ optional (!cfg.systemd.enable) "--postfix.logfile_path ${cfg.logfilePath}")}
'';
};
diff --git a/nixpkgs/nixos/modules/services/network-filesystems/kbfs.nix b/nixpkgs/nixos/modules/services/network-filesystems/kbfs.nix
index 263b70d04a5..a43ac656f66 100644
--- a/nixpkgs/nixos/modules/services/network-filesystems/kbfs.nix
+++ b/nixpkgs/nixos/modules/services/network-filesystems/kbfs.nix
@@ -1,6 +1,7 @@
{ config, lib, pkgs, ... }:
with lib;
let
+ inherit (config.security) wrapperDir;
cfg = config.services.kbfs;
in {
@@ -17,6 +18,16 @@ in {
description = "Whether to mount the Keybase filesystem.";
};
+ enableRedirector = mkOption {
+ type = types.bool;
+ default = false;
+ description = ''
+ Whether to enable the Keybase root redirector service, allowing
+ any user to access KBFS files via <literal>/keybase</literal>,
+ which will show different contents depending on the requester.
+ '';
+ };
+
mountPoint = mkOption {
type = types.str;
default = "%h/keybase";
@@ -41,26 +52,67 @@ in {
###### implementation
- config = mkIf cfg.enable {
-
- systemd.user.services.kbfs = {
- description = "Keybase File System";
- requires = [ "keybase.service" ];
- after = [ "keybase.service" ];
- path = [ "/run/wrappers" ];
- unitConfig.ConditionUser = "!@system";
- serviceConfig = {
- ExecStartPre = "${pkgs.coreutils}/bin/mkdir -p ${cfg.mountPoint}";
- ExecStart = "${pkgs.kbfs}/bin/kbfsfuse ${toString cfg.extraFlags} ${cfg.mountPoint}";
- ExecStopPost = "/run/wrappers/bin/fusermount -u ${cfg.mountPoint}";
- Restart = "on-failure";
- PrivateTmp = true;
+ config = mkIf cfg.enable (mkMerge [
+ {
+ # Upstream: https://github.com/keybase/client/blob/master/packaging/linux/systemd/kbfs.service
+ systemd.user.services.kbfs = {
+ description = "Keybase File System";
+
+ # Note that the "Requires" directive will cause a unit to be restarted whenever its dependency is restarted.
+ # Do not issue a hard dependency on keybase, because kbfs can reconnect to a restarted service.
+ # Do not issue a hard dependency on keybase-redirector, because it's ok if it fails (e.g., if it is disabled).
+ wants = [ "keybase.service" ] ++ optional cfg.enableRedirector "keybase-redirector.service";
+ path = [ "/run/wrappers" ];
+ unitConfig.ConditionUser = "!@system";
+
+ serviceConfig = {
+ Type = "notify";
+ # Keybase notifies from a forked process
+ EnvironmentFile = [
+ "-%E/keybase/keybase.autogen.env"
+ "-%E/keybase/keybase.env"
+ ];
+ ExecStartPre = [
+ "${pkgs.coreutils}/bin/mkdir -p \"${cfg.mountPoint}\""
+ "-${wrapperDir}/fusermount -uz \"${cfg.mountPoint}\""
+ ];
+ ExecStart = "${pkgs.kbfs}/bin/kbfsfuse ${toString cfg.extraFlags} \"${cfg.mountPoint}\"";
+ ExecStop = "${wrapperDir}/fusermount -uz \"${cfg.mountPoint}\"";
+ Restart = "on-failure";
+ PrivateTmp = true;
+ };
+ wantedBy = [ "default.target" ];
};
- wantedBy = [ "default.target" ];
- };
- services.keybase.enable = true;
+ services.keybase.enable = true;
- environment.systemPackages = [ pkgs.kbfs ];
- };
+ environment.systemPackages = [ pkgs.kbfs ];
+ }
+
+ (mkIf cfg.enableRedirector {
+ security.wrappers."keybase-redirector".source = "${pkgs.kbfs}/bin/redirector";
+
+ systemd.tmpfiles.rules = [ "d /keybase 0755 root root 0" ];
+
+ # Upstream: https://github.com/keybase/client/blob/master/packaging/linux/systemd/keybase-redirector.service
+ systemd.user.services.keybase-redirector = {
+ description = "Keybase Root Redirector for KBFS";
+ wants = [ "keybase.service" ];
+ unitConfig.ConditionUser = "!@system";
+
+ serviceConfig = {
+ EnvironmentFile = [
+ "-%E/keybase/keybase.autogen.env"
+ "-%E/keybase/keybase.env"
+ ];
+ # Note: The /keybase mount point is not currently configurable upstream.
+ ExecStart = "${wrapperDir}/keybase-redirector /keybase";
+ Restart = "on-failure";
+ PrivateTmp = true;
+ };
+
+ wantedBy = [ "default.target" ];
+ };
+ })
+ ]);
}
diff --git a/nixpkgs/nixos/modules/services/networking/bitlbee.nix b/nixpkgs/nixos/modules/services/networking/bitlbee.nix
index 54fe70f7ccc..01a16698384 100644
--- a/nixpkgs/nixos/modules/services/networking/bitlbee.nix
+++ b/nixpkgs/nixos/modules/services/networking/bitlbee.nix
@@ -168,8 +168,7 @@ in
createHome = true;
};
- users.groups = singleton {
- name = "bitlbee";
+ users.groups.bitlbee = {
gid = config.ids.gids.bitlbee;
};
diff --git a/nixpkgs/nixos/modules/services/networking/corerad.nix b/nixpkgs/nixos/modules/services/networking/corerad.nix
new file mode 100644
index 00000000000..1a2c4aec665
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/corerad.nix
@@ -0,0 +1,46 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+ cfg = config.services.corerad;
+in {
+ meta = {
+ maintainers = with maintainers; [ mdlayher ];
+ };
+
+ options.services.corerad = {
+ enable = mkEnableOption "CoreRAD IPv6 NDP RA daemon";
+
+ configFile = mkOption {
+ type = types.path;
+ example = literalExample "\"\${pkgs.corerad}/etc/corerad/corerad.toml\"";
+ description = "Path to CoreRAD TOML configuration file.";
+ };
+
+ package = mkOption {
+ default = pkgs.corerad;
+ defaultText = literalExample "pkgs.corerad";
+ type = types.package;
+ description = "CoreRAD package to use.";
+ };
+ };
+
+ config = mkIf cfg.enable {
+ systemd.services.corerad = {
+ description = "CoreRAD IPv6 NDP RA daemon";
+ after = [ "network.target" ];
+ wantedBy = [ "multi-user.target" ];
+ serviceConfig = {
+ LimitNPROC = 512;
+ LimitNOFILE = 1048576;
+ CapabilityBoundingSet = "CAP_NET_ADMIN CAP_NET_RAW";
+ AmbientCapabilities = "CAP_NET_ADMIN CAP_NET_RAW";
+ NoNewPrivileges = true;
+ DynamicUser = true;
+ ExecStart = "${getBin cfg.package}/bin/corerad -c=${cfg.configFile}";
+ Restart = "on-failure";
+ };
+ };
+ };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/dhcpcd.nix b/nixpkgs/nixos/modules/services/networking/dhcpcd.nix
index 6fbc014db71..6972c833cc5 100644
--- a/nixpkgs/nixos/modules/services/networking/dhcpcd.nix
+++ b/nixpkgs/nixos/modules/services/networking/dhcpcd.nix
@@ -59,6 +59,16 @@ let
# Use the list of allowed interfaces if specified
${optionalString (allowInterfaces != null) "allowinterfaces ${toString allowInterfaces}"}
+ # Immediately fork to background if specified, otherwise wait for IP address to be assigned
+ ${{
+ background = "background";
+ any = "waitip";
+ ipv4 = "waitip 4";
+ ipv6 = "waitip 6";
+ both = "waitip 4\nwaitip 6";
+ if-carrier-up = "";
+ }.${cfg.wait}}
+
${cfg.extraConfig}
'';
@@ -146,6 +156,21 @@ in
'';
};
+ networking.dhcpcd.wait = mkOption {
+ type = types.enum [ "background" "any" "ipv4" "ipv6" "both" "if-carrier-up" ];
+ default = "any";
+ description = ''
+ This option specifies when the dhcpcd service will fork to background.
+ If set to "background", dhcpcd will fork to background immediately.
+ If set to "ipv4" or "ipv6", dhcpcd will wait for the corresponding IP
+ address to be assigned. If set to "any", dhcpcd will wait for any type
+ (IPv4 or IPv6) to be assigned. If set to "both", dhcpcd will wait for
+ both an IPv4 and an IPv6 address before forking.
+ The option "if-carrier-up" is equivalent to "any" if either ethernet
+ is plugged nor WiFi is powered, and to "background" otherwise.
+ '';
+ };
+
};
@@ -177,7 +202,7 @@ in
serviceConfig =
{ Type = "forking";
PIDFile = "/run/dhcpcd.pid";
- ExecStart = "@${dhcpcd}/sbin/dhcpcd dhcpcd -w --quiet ${optionalString cfg.persistent "--persistent"} --config ${dhcpcdConf}";
+ ExecStart = "@${dhcpcd}/sbin/dhcpcd dhcpcd --quiet ${optionalString cfg.persistent "--persistent"} --config ${dhcpcdConf}";
ExecReload = "${dhcpcd}/sbin/dhcpcd --rebind";
Restart = "always";
};
diff --git a/nixpkgs/nixos/modules/services/networking/keybase.nix b/nixpkgs/nixos/modules/services/networking/keybase.nix
index 85f52be8a6a..495102cb7ee 100644
--- a/nixpkgs/nixos/modules/services/networking/keybase.nix
+++ b/nixpkgs/nixos/modules/services/networking/keybase.nix
@@ -24,13 +24,18 @@ in {
config = mkIf cfg.enable {
+ # Upstream: https://github.com/keybase/client/blob/master/packaging/linux/systemd/keybase.service
systemd.user.services.keybase = {
description = "Keybase service";
unitConfig.ConditionUser = "!@system";
+ environment.KEYBASE_SERVICE_TYPE = "systemd";
serviceConfig = {
- ExecStart = ''
- ${pkgs.keybase}/bin/keybase service --auto-forked
- '';
+ Type = "notify";
+ EnvironmentFile = [
+ "-%E/keybase/keybase.autogen.env"
+ "-%E/keybase/keybase.env"
+ ];
+ ExecStart = "${pkgs.keybase}/bin/keybase service";
Restart = "on-failure";
PrivateTmp = true;
};
diff --git a/nixpkgs/nixos/modules/services/networking/knot.nix b/nixpkgs/nixos/modules/services/networking/knot.nix
index 1cc1dd3f2f6..47364ecb846 100644
--- a/nixpkgs/nixos/modules/services/networking/knot.nix
+++ b/nixpkgs/nixos/modules/services/networking/knot.nix
@@ -56,6 +56,7 @@ in {
package = mkOption {
type = types.package;
default = pkgs.knot-dns;
+ defaultText = "pkgs.knot-dns";
description = ''
Which Knot DNS package to use
'';
@@ -92,4 +93,3 @@ in {
environment.systemPackages = [ knot-cli-wrappers ];
};
}
-
diff --git a/nixpkgs/nixos/modules/services/networking/kresd.nix b/nixpkgs/nixos/modules/services/networking/kresd.nix
index 5eb50a13ca9..bb941e93e15 100644
--- a/nixpkgs/nixos/modules/services/networking/kresd.nix
+++ b/nixpkgs/nixos/modules/services/networking/kresd.nix
@@ -5,12 +5,15 @@ with lib;
let
cfg = config.services.kresd;
- package = pkgs.knot-resolver;
+ configFile = pkgs.writeText "kresd.conf" ''
+ ${optionalString (cfg.listenDoH != []) "modules.load('http')"}
+ ${cfg.extraConfig};
+ '';
- configFile = pkgs.writeText "kresd.conf" cfg.extraConfig;
-in
-
-{
+ package = pkgs.knot-resolver.override {
+ extraFeatures = cfg.listenDoH != [];
+ };
+in {
meta.maintainers = [ maintainers.vcunat /* upstream developer */ ];
imports = [
@@ -67,6 +70,15 @@ in
For detailed syntax see ListenStream in man systemd.socket.
'';
};
+ listenDoH = mkOption {
+ type = with types; listOf str;
+ default = [];
+ example = [ "198.51.100.1:443" "[2001:db8::1]:443" "443" ];
+ description = ''
+ Addresses and ports on which kresd should provide DNS over HTTPS (see RFC 7858).
+ For detailed syntax see ListenStream in man systemd.socket.
+ '';
+ };
# TODO: perhaps options for more common stuff like cache size or forwarding
};
@@ -104,6 +116,18 @@ in
};
};
+ systemd.sockets.kresd-doh = mkIf (cfg.listenDoH != []) rec {
+ wantedBy = [ "sockets.target" ];
+ before = wantedBy;
+ partOf = [ "kresd.socket" ];
+ listenStreams = cfg.listenDoH;
+ socketConfig = {
+ FileDescriptorName = "doh";
+ FreeBind = true;
+ Service = "kresd.service";
+ };
+ };
+
systemd.sockets.kresd-control = rec {
wantedBy = [ "sockets.target" ];
before = wantedBy;
diff --git a/nixpkgs/nixos/modules/services/networking/matterbridge.nix b/nixpkgs/nixos/modules/services/networking/matterbridge.nix
index bad35133459..b8b4f37c84a 100644
--- a/nixpkgs/nixos/modules/services/networking/matterbridge.nix
+++ b/nixpkgs/nixos/modules/services/networking/matterbridge.nix
@@ -111,7 +111,7 @@ in
serviceConfig = {
User = cfg.user;
Group = cfg.group;
- ExecStart = "${pkgs.matterbridge.bin}/bin/matterbridge -conf ${matterbridgeConfToml}";
+ ExecStart = "${pkgs.matterbridge}/bin/matterbridge -conf ${matterbridgeConfToml}";
Restart = "always";
RestartSec = "10";
};
diff --git a/nixpkgs/nixos/modules/services/networking/nat.nix b/nixpkgs/nixos/modules/services/networking/nat.nix
index f1238bc6b16..9c658af30f7 100644
--- a/nixpkgs/nixos/modules/services/networking/nat.nix
+++ b/nixpkgs/nixos/modules/services/networking/nat.nix
@@ -68,7 +68,7 @@ let
destinationPorts = if (m == null) then throw "bad ip:ports `${fwd.destination}'" else elemAt m 1;
in ''
# Allow connections to ${loopbackip}:${toString fwd.sourcePort} from the host itself
- iptables -w -t nat -A OUTPUT \
+ iptables -w -t nat -A nixos-nat-out \
-d ${loopbackip} -p ${fwd.proto} \
--dport ${builtins.toString fwd.sourcePort} \
-j DNAT --to-destination ${fwd.destination}
diff --git a/nixpkgs/nixos/modules/services/networking/ndppd.nix b/nixpkgs/nixos/modules/services/networking/ndppd.nix
index 92088623517..e015f76f622 100644
--- a/nixpkgs/nixos/modules/services/networking/ndppd.nix
+++ b/nixpkgs/nixos/modules/services/networking/ndppd.nix
@@ -161,7 +161,25 @@ in {
documentation = [ "man:ndppd(1)" "man:ndppd.conf(5)" ];
after = [ "network-pre.target" ];
wantedBy = [ "multi-user.target" ];
- serviceConfig.ExecStart = "${pkgs.ndppd}/bin/ndppd -c ${ndppdConf}";
+ serviceConfig = {
+ ExecStart = "${pkgs.ndppd}/bin/ndppd -c ${ndppdConf}";
+
+ # Sandboxing
+ CapabilityBoundingSet = "CAP_NET_RAW CAP_NET_ADMIN";
+ ProtectSystem = "strict";
+ ProtectHome = true;
+ PrivateTmp = true;
+ PrivateDevices = true;
+ ProtectKernelTunables = true;
+ ProtectKernelModules = true;
+ ProtectControlGroups = true;
+ RestrictAddressFamilies = "AF_INET6 AF_PACKET AF_NETLINK";
+ RestrictNamespaces = true;
+ LockPersonality = true;
+ MemoryDenyWriteExecute = true;
+ RestrictRealtime = true;
+ RestrictSUIDSGID = true;
+ };
};
};
}
diff --git a/nixpkgs/nixos/modules/services/networking/syncthing.nix b/nixpkgs/nixos/modules/services/networking/syncthing.nix
index 47b10e408c0..5b3eb6f04b4 100644
--- a/nixpkgs/nixos/modules/services/networking/syncthing.nix
+++ b/nixpkgs/nixos/modules/services/networking/syncthing.nix
@@ -484,6 +484,24 @@ in {
-gui-address=${cfg.guiAddress} \
-home=${cfg.configDir}
'';
+ MemoryDenyWriteExecute = true;
+ NoNewPrivileges = true;
+ PrivateDevices = true;
+ PrivateMounts = true;
+ PrivateTmp = true;
+ PrivateUsers = true;
+ ProtectControlGroups = true;
+ ProtectHostname = true;
+ ProtectKernelModules = true;
+ ProtectKernelTunables = true;
+ RestrictNamespaces = true;
+ RestrictRealtime = true;
+ RestrictSUIDSGID = true;
+ CapabilityBoundingSet = [
+ "~CAP_SYS_PTRACE" "~CAP_SYS_ADMIN"
+ "~CAP_SETGID" "~CAP_SETUID" "~CAP_SETPCAP"
+ "~CAP_SYS_TIME" "~CAP_KILL"
+ ];
};
};
syncthing-init = mkIf (
diff --git a/nixpkgs/nixos/modules/services/networking/unifi.nix b/nixpkgs/nixos/modules/services/networking/unifi.nix
index c922ba15960..4bdfa8143dc 100644
--- a/nixpkgs/nixos/modules/services/networking/unifi.nix
+++ b/nixpkgs/nixos/modules/services/networking/unifi.nix
@@ -147,8 +147,10 @@ in
}) mountPoints;
systemd.tmpfiles.rules = [
- "e '${stateDir}' 0700 unifi - - -"
+ "d '${stateDir}' 0700 unifi - - -"
"d '${stateDir}/data' 0700 unifi - - -"
+ "d '${stateDir}/webapps' 0700 unifi - - -"
+ "L+ '${stateDir}/webapps/ROOT' - - - - ${cfg.unifiPackage}/webapps/ROOT"
];
systemd.services.unifi = {
@@ -161,17 +163,6 @@ in
# This a HACK to fix missing dependencies of dynamic libs extracted from jars
environment.LD_LIBRARY_PATH = with pkgs.stdenv; "${cc.cc.lib}/lib";
- preStart = ''
- # Create the volatile webapps
- rm -rf "${stateDir}/webapps"
- mkdir -p "${stateDir}/webapps"
- ln -s "${cfg.unifiPackage}/webapps/ROOT" "${stateDir}/webapps/ROOT"
- '';
-
- postStop = ''
- rm -rf "${stateDir}/webapps"
- '';
-
serviceConfig = {
Type = "simple";
ExecStart = "${(removeSuffix "\n" cmd)} start";
diff --git a/nixpkgs/nixos/modules/services/networking/wpa_supplicant.nix b/nixpkgs/nixos/modules/services/networking/wpa_supplicant.nix
index 8f05c3949fb..de0f11595a9 100644
--- a/nixpkgs/nixos/modules/services/networking/wpa_supplicant.nix
+++ b/nixpkgs/nixos/modules/services/networking/wpa_supplicant.nix
@@ -233,6 +233,7 @@ in {
path = [ pkgs.wpa_supplicant ];
script = ''
+ iface_args="-s -u -D${cfg.driver} -c ${configFile}"
${if ifaces == [] then ''
for i in $(cd /sys/class/net && echo *); do
DEVTYPE=
@@ -240,14 +241,14 @@ in {
if [ -e "$UEVENT_PATH" ]; then
source "$UEVENT_PATH"
if [ "$DEVTYPE" = "wlan" -o -e /sys/class/net/$i/wireless ]; then
- ifaces="$ifaces''${ifaces:+ -N} -i$i"
+ args+="''${args:+ -N} -i$i $iface_args"
fi
fi
done
'' else ''
- ifaces="${concatStringsSep " -N " (map (i: "-i${i}") ifaces)}"
+ args="${concatMapStringsSep " -N " (i: "-i${i} $iface_args") ifaces}"
''}
- exec wpa_supplicant -s -u -D${cfg.driver} -c ${configFile} $ifaces
+ exec wpa_supplicant $args
'';
};
diff --git a/nixpkgs/nixos/modules/services/networking/xandikos.nix b/nixpkgs/nixos/modules/services/networking/xandikos.nix
new file mode 100644
index 00000000000..87c029156b9
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/xandikos.nix
@@ -0,0 +1,148 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+ cfg = config.services.xandikos;
+in
+{
+
+ options = {
+ services.xandikos = {
+ enable = mkEnableOption "Xandikos CalDAV and CardDAV server";
+
+ package = mkOption {
+ type = types.package;
+ default = pkgs.xandikos;
+ defaultText = "pkgs.xandikos";
+ description = "The Xandikos package to use.";
+ };
+
+ address = mkOption {
+ type = types.str;
+ default = "localhost";
+ description = ''
+ The IP address on which Xandikos will listen.
+ By default listens on localhost.
+ '';
+ };
+
+ port = mkOption {
+ type = types.port;
+ default = 8080;
+ description = "The port of the Xandikos web application";
+ };
+
+ routePrefix = mkOption {
+ type = types.str;
+ default = "/";
+ description = ''
+ Path to Xandikos.
+ Useful when Xandikos is behind a reverse proxy.
+ '';
+ };
+
+ extraOptions = mkOption {
+ default = [];
+ type = types.listOf types.str;
+ example = literalExample ''
+ [ "--autocreate"
+ "--defaults"
+ "--current-user-principal user"
+ "--dump-dav-xml"
+ ]
+ '';
+ description = ''
+ Extra command line arguments to pass to xandikos.
+ '';
+ };
+
+ nginx = mkOption {
+ default = {};
+ description = ''
+ Configuration for nginx reverse proxy.
+ '';
+
+ type = types.submodule {
+ options = {
+ enable = mkOption {
+ type = types.bool;
+ default = false;
+ description = ''
+ Configure the nginx reverse proxy settings.
+ '';
+ };
+
+ hostName = mkOption {
+ type = types.str;
+ description = ''
+ The hostname use to setup the virtualhost configuration
+ '';
+ };
+ };
+ };
+ };
+
+ };
+
+ };
+
+ config = mkIf cfg.enable (
+ mkMerge [
+ {
+ meta.maintainers = [ lib.maintainers."0x4A6F" ];
+
+ systemd.services.xandikos = {
+ description = "A Simple Calendar and Contact Server";
+ after = [ "network.target" ];
+ wantedBy = [ "multi-user.target" ];
+
+ serviceConfig = {
+ User = "xandikos";
+ Group = "xandikos";
+ DynamicUser = "yes";
+ RuntimeDirectory = "xandikos";
+ StateDirectory = "xandikos";
+ StateDirectoryMode = "0700";
+ PrivateDevices = true;
+ # Sandboxing
+ CapabilityBoundingSet = "CAP_NET_RAW CAP_NET_ADMIN";
+ ProtectSystem = "strict";
+ ProtectHome = true;
+ PrivateTmp = true;
+ ProtectKernelTunables = true;
+ ProtectKernelModules = true;
+ ProtectControlGroups = true;
+ RestrictAddressFamilies = "AF_INET AF_INET6 AF_UNIX AF_PACKET AF_NETLINK";
+ RestrictNamespaces = true;
+ LockPersonality = true;
+ MemoryDenyWriteExecute = true;
+ RestrictRealtime = true;
+ RestrictSUIDSGID = true;
+ ExecStart = ''
+ ${cfg.package}/bin/xandikos \
+ --directory /var/lib/xandikos \
+ --listen_address ${cfg.address} \
+ --port ${toString cfg.port} \
+ --route-prefix ${cfg.routePrefix} \
+ ${lib.concatStringsSep " " cfg.extraOptions}
+ '';
+ };
+ };
+ }
+
+ (
+ mkIf cfg.nginx.enable {
+ services.nginx = {
+ enable = true;
+ virtualHosts."${cfg.nginx.hostName}" = {
+ locations."/" = {
+ proxyPass = "http://${cfg.address}:${toString cfg.port}/";
+ };
+ };
+ };
+ }
+ )
+ ]
+ );
+}
diff --git a/nixpkgs/nixos/modules/services/networking/zerotierone.nix b/nixpkgs/nixos/modules/services/networking/zerotierone.nix
index 764af3846fe..069e15a909b 100644
--- a/nixpkgs/nixos/modules/services/networking/zerotierone.nix
+++ b/nixpkgs/nixos/modules/services/networking/zerotierone.nix
@@ -38,10 +38,13 @@ in
config = mkIf cfg.enable {
systemd.services.zerotierone = {
description = "ZeroTierOne";
- path = [ cfg.package ];
- bindsTo = [ "network-online.target" ];
- after = [ "network-online.target" ];
+
wantedBy = [ "multi-user.target" ];
+ after = [ "network.target" ];
+ wants = [ "network-online.target" ];
+
+ path = [ cfg.package ];
+
preStart = ''
mkdir -p /var/lib/zerotier-one/networks.d
chmod 700 /var/lib/zerotier-one
@@ -53,6 +56,7 @@ in
ExecStart = "${cfg.package}/bin/zerotier-one -p${toString cfg.port}";
Restart = "always";
KillMode = "process";
+ TimeoutStopSec = 5;
};
};
diff --git a/nixpkgs/nixos/modules/services/search/solr.nix b/nixpkgs/nixos/modules/services/search/solr.nix
index b2176225493..a8615a20a1c 100644
--- a/nixpkgs/nixos/modules/services/search/solr.nix
+++ b/nixpkgs/nixos/modules/services/search/solr.nix
@@ -13,19 +13,11 @@ in
services.solr = {
enable = mkEnableOption "Solr";
- # default to the 8.x series not forcing major version upgrade of those on the 7.x series
package = mkOption {
type = types.package;
- default = if versionAtLeast config.system.stateVersion "19.09"
- then pkgs.solr_8
- else pkgs.solr_7
- ;
+ default = pkgs.solr;
defaultText = "pkgs.solr";
- description = ''
- Which Solr package to use. This defaults to version 7.x if
- <literal>system.stateVersion &lt; 19.09</literal> and version 8.x
- otherwise.
- '';
+ description = "Which Solr package to use.";
};
port = mkOption {
diff --git a/nixpkgs/nixos/modules/services/security/bitwarden_rs/default.nix b/nixpkgs/nixos/modules/services/security/bitwarden_rs/default.nix
index d1817db0755..a63be0ee766 100644
--- a/nixpkgs/nixos/modules/services/security/bitwarden_rs/default.nix
+++ b/nixpkgs/nixos/modules/services/security/bitwarden_rs/default.nix
@@ -18,15 +18,33 @@ let
else key + toUpper x) "" parts;
in if builtins.match "[A-Z0-9_]+" name != null then name else partsToEnvVar parts;
- configFile = pkgs.writeText "bitwarden_rs.env" (concatMapStrings (s: s + "\n") (
- (concatLists (mapAttrsToList (name: value:
- if value != null then [ "${nameToEnvVar name}=${if isBool value then boolToString value else toString value}" ] else []
- ) cfg.config))));
+ # Due to the different naming schemes allowed for config keys,
+ # we can only check for values consistently after converting them to their corresponding environment variable name.
+ configEnv =
+ let
+ configEnv = listToAttrs (concatLists (mapAttrsToList (name: value:
+ if value != null then [ (nameValuePair (nameToEnvVar name) (if isBool value then boolToString value else toString value)) ] else []
+ ) cfg.config));
+ in { DATA_FOLDER = "/var/lib/bitwarden_rs"; } // optionalAttrs (!(configEnv ? WEB_VAULT_ENABLED) || configEnv.WEB_VAULT_ENABLED == "true") {
+ WEB_VAULT_FOLDER = "${pkgs.bitwarden_rs-vault}/share/bitwarden_rs/vault";
+ } // configEnv;
+
+ configFile = pkgs.writeText "bitwarden_rs.env" (concatStrings (mapAttrsToList (name: value: "${name}=${value}\n") configEnv));
+
+ bitwarden_rs = pkgs.bitwarden_rs.override { inherit (cfg) dbBackend; };
in {
options.services.bitwarden_rs = with types; {
enable = mkEnableOption "bitwarden_rs";
+ dbBackend = mkOption {
+ type = enum [ "sqlite" "mysql" "postgresql" ];
+ default = "sqlite";
+ description = ''
+ Which database backend bitwarden_rs will be using.
+ '';
+ };
+
backupDir = mkOption {
type = nullOr str;
default = null;
@@ -56,23 +74,20 @@ in {
even though foo2 would have been converted to FOO_2.
This allows working around any potential future conflicting naming conventions.
- Based on the attributes passed to this config option a environment file will be generated
+ Based on the attributes passed to this config option an environment file will be generated
that is passed to bitwarden_rs's systemd service.
The available configuration options can be found in
- <link xlink:href="https://github.com/dani-garcia/bitwarden_rs/blob/1.8.0/.env.template">the environment template file</link>.
+ <link xlink:href="https://github.com/dani-garcia/bitwarden_rs/blob/${bitwarden_rs.version}/.env.template">the environment template file</link>.
'';
- apply = config: optionalAttrs config.webVaultEnabled {
- webVaultFolder = "${pkgs.bitwarden_rs-vault}/share/bitwarden_rs/vault";
- } // config;
};
};
config = mkIf cfg.enable {
- services.bitwarden_rs.config = {
- dataFolder = "/var/lib/bitwarden_rs";
- webVaultEnabled = mkDefault true;
- };
+ assertions = [ {
+ assertion = cfg.backupDir != null -> cfg.dbBackend == "sqlite";
+ message = "Backups for database backends other than sqlite will need customization";
+ } ];
users.users.bitwarden_rs = {
inherit group;
@@ -87,7 +102,7 @@ in {
User = user;
Group = group;
EnvironmentFile = configFile;
- ExecStart = "${pkgs.bitwarden_rs}/bin/bitwarden_rs";
+ ExecStart = "${bitwarden_rs}/bin/bitwarden_rs";
LimitNOFILE = "1048576";
LimitNPROC = "64";
PrivateTmp = "true";
@@ -109,6 +124,7 @@ in {
path = with pkgs; [ sqlite ];
serviceConfig = {
SyslogIdentifier = "backup-bitwarden_rs";
+ Type = "oneshot";
User = mkDefault user;
Group = mkDefault group;
ExecStart = "${pkgs.bash}/bin/bash ${./backup.sh}";
diff --git a/nixpkgs/nixos/modules/services/security/certmgr.nix b/nixpkgs/nixos/modules/services/security/certmgr.nix
index e89078883eb..94c0ba14117 100644
--- a/nixpkgs/nixos/modules/services/security/certmgr.nix
+++ b/nixpkgs/nixos/modules/services/security/certmgr.nix
@@ -113,7 +113,7 @@ in
otherCert = "/var/certmgr/specs/other-cert.json";
}
'';
- type = with types; attrsOf (either (submodule {
+ type = with types; attrsOf (either path (submodule {
options = {
service = mkOption {
type = nullOr str;
@@ -148,7 +148,7 @@ in
description = "certmgr spec request object.";
};
};
- }) path);
+ }));
description = ''
Certificate specs as described by:
<link xlink:href="https://github.com/cloudflare/certmgr#certificate-specs" />
diff --git a/nixpkgs/nixos/modules/services/security/fail2ban.nix b/nixpkgs/nixos/modules/services/security/fail2ban.nix
index 716ae7a2d2f..cb748c93d24 100644
--- a/nixpkgs/nixos/modules/services/security/fail2ban.nix
+++ b/nixpkgs/nixos/modules/services/security/fail2ban.nix
@@ -6,15 +6,32 @@ let
cfg = config.services.fail2ban;
- fail2banConf = pkgs.writeText "fail2ban.conf" cfg.daemonConfig;
+ fail2banConf = pkgs.writeText "fail2ban.local" cfg.daemonConfig;
- jailConf = pkgs.writeText "jail.conf"
- (concatStringsSep "\n" (attrValues (flip mapAttrs cfg.jails (name: def:
+ jailConf = pkgs.writeText "jail.local" ''
+ [INCLUDES]
+
+ before = paths-nixos.conf
+
+ ${concatStringsSep "\n" (attrValues (flip mapAttrs cfg.jails (name: def:
optionalString (def != "")
''
[${name}]
${def}
- ''))));
+ '')))}
+ '';
+
+ pathsConf = pkgs.writeText "paths-nixos.conf" ''
+ # NixOS
+
+ [INCLUDES]
+
+ before = paths-common.conf
+
+ after = paths-overrides.local
+
+ [DEFAULT]
+ '';
in
@@ -31,21 +48,135 @@ in
description = "Whether to enable the fail2ban service.";
};
+ package = mkOption {
+ default = pkgs.fail2ban;
+ type = types.package;
+ example = "pkgs.fail2ban_0_11";
+ description = "The fail2ban package to use for running the fail2ban service.";
+ };
+
+ packageFirewall = mkOption {
+ default = pkgs.iptables;
+ type = types.package;
+ example = "pkgs.nftables";
+ description = "The firewall package used by fail2ban service.";
+ };
+
+ banaction = mkOption {
+ default = "iptables-multiport";
+ type = types.str;
+ example = "nftables-multiport";
+ description = ''
+ Default banning action (e.g. iptables, iptables-new, iptables-multiport,
+ shorewall, etc) It is used to define action_* variables. Can be overridden
+ globally or per section within jail.local file
+ '';
+ };
+
+ banaction-allports = mkOption {
+ default = "iptables-allport";
+ type = types.str;
+ example = "nftables-allport";
+ description = ''
+ Default banning action (e.g. iptables, iptables-new, iptables-multiport,
+ shorewall, etc) It is used to define action_* variables. Can be overridden
+ globally or per section within jail.local file
+ '';
+ };
+
+ bantime-increment.enable = mkOption {
+ default = false;
+ type = types.bool;
+ description = ''
+ Allows to use database for searching of previously banned ip's to increase
+ a default ban time using special formula, default it is banTime * 1, 2, 4, 8, 16, 32...
+ '';
+ };
+
+ bantime-increment.rndtime = mkOption {
+ default = "4m";
+ type = types.str;
+ example = "8m";
+ description = ''
+ "bantime-increment.rndtime" is the max number of seconds using for mixing with random time
+ to prevent "clever" botnets calculate exact time IP can be unbanned again
+ '';
+ };
+
+ bantime-increment.maxtime = mkOption {
+ default = "10h";
+ type = types.str;
+ example = "48h";
+ description = ''
+ "bantime-increment.maxtime" is the max number of seconds using the ban time can reach (don't grows further)
+ '';
+ };
+
+ bantime-increment.factor = mkOption {
+ default = "1";
+ type = types.str;
+ example = "4";
+ description = ''
+ "bantime-increment.factor" is a coefficient to calculate exponent growing of the formula or common multiplier,
+ default value of factor is 1 and with default value of formula, the ban time grows by 1, 2, 4, 8, 16 ...
+ '';
+ };
+
+ bantime-increment.formula = mkOption {
+ default = "ban.Time * (1<<(ban.Count if ban.Count<20 else 20)) * banFactor";
+ type = types.str;
+ example = "ban.Time * math.exp(float(ban.Count+1)*banFactor)/math.exp(1*banFactor)";
+ description = ''
+ "bantime-increment.formula" used by default to calculate next value of ban time, default value bellow,
+ the same ban time growing will be reached by multipliers 1, 2, 4, 8, 16, 32...
+ '';
+ };
+
+ bantime-increment.multipliers = mkOption {
+ default = "1 2 4 8 16 32 64";
+ type = types.str;
+ example = "2 4 16 128";
+ description = ''
+ "bantime-increment.multipliers" used to calculate next value of ban time instead of formula, coresponding
+ previously ban count and given "bantime.factor" (for multipliers default is 1);
+ following example grows ban time by 1, 2, 4, 8, 16 ... and if last ban count greater as multipliers count,
+ always used last multiplier (64 in example), for factor '1' and original ban time 600 - 10.6 hours
+ '';
+ };
+
+ bantime-increment.overalljails = mkOption {
+ default = false;
+ type = types.bool;
+ example = true;
+ description = ''
+ "bantime-increment.overalljails" (if true) specifies the search of IP in the database will be executed
+ cross over all jails, if false (dafault), only current jail of the ban IP will be searched
+ '';
+ };
+
+ ignoreIP = mkOption {
+ default = [ ];
+ type = types.listOf types.str;
+ example = [ "192.168.0.0/16" "2001:DB8::42" ];
+ description = ''
+ "ignoreIP" can be a list of IP addresses, CIDR masks or DNS hosts. Fail2ban will not ban a host which
+ matches an address in this list. Several addresses can be defined using space (and/or comma) separator.
+ '';
+ };
+
daemonConfig = mkOption {
- default =
- ''
- [Definition]
- loglevel = INFO
- logtarget = SYSLOG
- socket = /run/fail2ban/fail2ban.sock
- pidfile = /run/fail2ban/fail2ban.pid
- '';
+ default = ''
+ [Definition]
+ logtarget = SYSLOG
+ socket = /run/fail2ban/fail2ban.sock
+ pidfile = /run/fail2ban/fail2ban.pid
+ dbfile = /var/lib/fail2ban/fail2ban.sqlite3
+ '';
type = types.lines;
- description =
- ''
- The contents of Fail2ban's main configuration file. It's
- generally not necessary to change it.
- '';
+ description = ''
+ The contents of Fail2ban's main configuration file. It's
+ generally not necessary to change it.
+ '';
};
jails = mkOption {
@@ -65,88 +196,107 @@ in
}
'';
type = types.attrsOf types.lines;
- description =
- ''
- The configuration of each Fail2ban “jail”. A jail
- consists of an action (such as blocking a port using
- <command>iptables</command>) that is triggered when a
- filter applied to a log file triggers more than a certain
- number of times in a certain time period. Actions are
- defined in <filename>/etc/fail2ban/action.d</filename>,
- while filters are defined in
- <filename>/etc/fail2ban/filter.d</filename>.
- '';
+ description = ''
+ The configuration of each Fail2ban “jail”. A jail
+ consists of an action (such as blocking a port using
+ <command>iptables</command>) that is triggered when a
+ filter applied to a log file triggers more than a certain
+ number of times in a certain time period. Actions are
+ defined in <filename>/etc/fail2ban/action.d</filename>,
+ while filters are defined in
+ <filename>/etc/fail2ban/filter.d</filename>.
+ '';
};
};
};
-
###### implementation
config = mkIf cfg.enable {
- environment.systemPackages = [ pkgs.fail2ban ];
+ environment.systemPackages = [ cfg.package ];
- environment.etc."fail2ban/fail2ban.conf".source = fail2banConf;
- environment.etc."fail2ban/jail.conf".source = jailConf;
- environment.etc."fail2ban/action.d".source = "${pkgs.fail2ban}/etc/fail2ban/action.d/*.conf";
- environment.etc."fail2ban/filter.d".source = "${pkgs.fail2ban}/etc/fail2ban/filter.d/*.conf";
-
- systemd.services.fail2ban =
- { description = "Fail2ban Intrusion Prevention System";
+ environment.etc = {
+ "fail2ban/fail2ban.local".source = fail2banConf;
+ "fail2ban/jail.local".source = jailConf;
+ "fail2ban/fail2ban.conf".source = "${cfg.package}/etc/fail2ban/fail2ban.conf";
+ "fail2ban/jail.conf".source = "${cfg.package}/etc/fail2ban/jail.conf";
+ "fail2ban/paths-common.conf".source = "${cfg.package}/etc/fail2ban/paths-common.conf";
+ "fail2ban/paths-nixos.conf".source = pathsConf;
+ "fail2ban/action.d".source = "${cfg.package}/etc/fail2ban/action.d/*.conf";
+ "fail2ban/filter.d".source = "${cfg.package}/etc/fail2ban/filter.d/*.conf";
+ };
- wantedBy = [ "multi-user.target" ];
- after = [ "network.target" ];
- partOf = optional config.networking.firewall.enable "firewall.service";
+ systemd.services.fail2ban = {
+ description = "Fail2ban Intrusion Prevention System";
- restartTriggers = [ fail2banConf jailConf ];
- path = [ pkgs.fail2ban pkgs.iptables pkgs.iproute ];
+ wantedBy = [ "multi-user.target" ];
+ after = [ "network.target" ];
+ partOf = optional config.networking.firewall.enable "firewall.service";
- preStart =
- ''
- mkdir -p /var/lib/fail2ban
- '';
+ restartTriggers = [ fail2banConf jailConf pathsConf ];
+ reloadIfChanged = true;
- unitConfig.Documentation = "man:fail2ban(1)";
+ path = [ cfg.package cfg.packageFirewall pkgs.iproute ];
- serviceConfig =
- { Type = "forking";
- ExecStart = "${pkgs.fail2ban}/bin/fail2ban-client -x start";
- ExecStop = "${pkgs.fail2ban}/bin/fail2ban-client stop";
- ExecReload = "${pkgs.fail2ban}/bin/fail2ban-client reload";
- PIDFile = "/run/fail2ban/fail2ban.pid";
- Restart = "always";
+ unitConfig.Documentation = "man:fail2ban(1)";
- ReadOnlyDirectories = "/";
- ReadWriteDirectories = "/run/fail2ban /var/tmp /var/lib";
- PrivateTmp = "true";
- RuntimeDirectory = "fail2ban";
- CapabilityBoundingSet = "CAP_DAC_READ_SEARCH CAP_NET_ADMIN CAP_NET_RAW";
- };
+ serviceConfig = {
+ ExecStart = "${cfg.package}/bin/fail2ban-server -xf start";
+ ExecStop = "${cfg.package}/bin/fail2ban-server stop";
+ ExecReload = "${cfg.package}/bin/fail2ban-server reload";
+ Type = "simple";
+ Restart = "on-failure";
+ PIDFile = "/run/fail2ban/fail2ban.pid";
+ # Capabilities
+ CapabilityBoundingSet = [ "CAP_AUDIT_READ" "CAP_DAC_READ_SEARCH" "CAP_NET_ADMIN" "CAP_NET_RAW" ];
+ # Security
+ NoNewPrivileges = true;
+ # Directory
+ RuntimeDirectory = "fail2ban";
+ RuntimeDirectoryMode = "0750";
+ StateDirectory = "fail2ban";
+ StateDirectoryMode = "0750";
+ LogsDirectory = "fail2ban";
+ LogsDirectoryMode = "0750";
+ # Sandboxing
+ ProtectSystem = "strict";
+ ProtectHome = true;
+ PrivateTmp = true;
+ PrivateDevices = true;
+ ProtectHostname = true;
+ ProtectKernelTunables = true;
+ ProtectKernelModules = true;
+ ProtectControlGroups = true;
};
+ };
# Add some reasonable default jails. The special "DEFAULT" jail
# sets default values for all other jails.
- services.fail2ban.jails.DEFAULT =
- ''
- ignoreip = 127.0.0.1/8
- bantime = 600
- findtime = 600
- maxretry = 3
- backend = systemd
- enabled = true
- '';
-
+ services.fail2ban.jails.DEFAULT = ''
+ ${optionalString cfg.bantime-increment.enable ''
+ # Bantime incremental
+ bantime.increment = ${if cfg.bantime-increment.enable then "true" else "false"}
+ bantime.maxtime = ${cfg.bantime-increment.maxtime}
+ bantime.factor = ${cfg.bantime-increment.factor}
+ bantime.formula = ${cfg.bantime-increment.formula}
+ bantime.multipliers = ${cfg.bantime-increment.multipliers}
+ bantime.overalljails = ${if cfg.bantime-increment.overalljails then "true" else "false"}
+ ''}
+ # Miscellaneous options
+ ignoreip = 127.0.0.1/8 ${optionalString config.networking.enableIPv6 "::1"} ${concatStringsSep " " cfg.ignoreIP}
+ maxretry = 3
+ backend = systemd
+ # Actions
+ banaction = ${cfg.banaction}
+ banaction_allports = ${cfg.banaction-allports}
+ '';
# Block SSH if there are too many failing connection attempts.
- services.fail2ban.jails.ssh-iptables =
- ''
- filter = sshd
- action = iptables-multiport[name=SSH, port="${concatMapStringsSep "," (p: toString p) config.services.openssh.ports}", protocol=tcp]
- maxretry = 5
- '';
-
+ services.fail2ban.jails.sshd = mkDefault ''
+ enabled = true
+ port = ${concatMapStringsSep "," (p: toString p) config.services.openssh.ports}
+ '';
};
-
}
diff --git a/nixpkgs/nixos/modules/services/security/sshguard.nix b/nixpkgs/nixos/modules/services/security/sshguard.nix
index 4a174564dd2..e7a9cefdef3 100644
--- a/nixpkgs/nixos/modules/services/security/sshguard.nix
+++ b/nixpkgs/nixos/modules/services/security/sshguard.nix
@@ -92,8 +92,11 @@ in {
"-o cat"
"-n1"
] ++ (map (name: "-t ${escapeShellArg name}") cfg.services));
+ backend = if config.networking.nftables.enable
+ then "sshg-fw-nft-sets"
+ else "sshg-fw-ipset";
in ''
- BACKEND="${pkgs.sshguard}/libexec/sshg-fw-ipset"
+ BACKEND="${pkgs.sshguard}/libexec/${backend}"
LOGREADER="LANG=C ${pkgs.systemd}/bin/journalctl ${args}"
'';
@@ -104,7 +107,9 @@ in {
after = [ "network.target" ];
partOf = optional config.networking.firewall.enable "firewall.service";
- path = with pkgs; [ iptables ipset iproute systemd ];
+ path = with pkgs; if config.networking.nftables.enable
+ then [ nftables iproute systemd ]
+ else [ iptables ipset iproute systemd ];
# The sshguard ipsets must exist before we invoke
# iptables. sshguard creates the ipsets after startup if
@@ -112,14 +117,14 @@ in {
# the iptables rules because postStart races with the creation
# of the ipsets. So instead, we create both the ipsets and
# firewall rules before sshguard starts.
- preStart = ''
+ preStart = optionalString config.networking.firewall.enable ''
${pkgs.ipset}/bin/ipset -quiet create -exist sshguard4 hash:net family inet
${pkgs.ipset}/bin/ipset -quiet create -exist sshguard6 hash:net family inet6
${pkgs.iptables}/bin/iptables -I INPUT -m set --match-set sshguard4 src -j DROP
${pkgs.iptables}/bin/ip6tables -I INPUT -m set --match-set sshguard6 src -j DROP
'';
- postStop = ''
+ postStop = optionalString config.networking.firewall.enable ''
${pkgs.iptables}/bin/iptables -D INPUT -m set --match-set sshguard4 src -j DROP
${pkgs.iptables}/bin/ip6tables -D INPUT -m set --match-set sshguard6 src -j DROP
${pkgs.ipset}/bin/ipset -quiet destroy sshguard4
diff --git a/nixpkgs/nixos/modules/services/security/vault.nix b/nixpkgs/nixos/modules/services/security/vault.nix
index b0ab8fadcbe..6a8a3a93327 100644
--- a/nixpkgs/nixos/modules/services/security/vault.nix
+++ b/nixpkgs/nixos/modules/services/security/vault.nix
@@ -135,6 +135,7 @@ in
User = "vault";
Group = "vault";
ExecStart = "${cfg.package}/bin/vault server -config ${configFile}";
+ ExecReload = "${pkgs.coreutils}/bin/kill -SIGHUP $MAINPID";
PrivateDevices = true;
PrivateTmp = true;
ProtectSystem = "full";
diff --git a/nixpkgs/nixos/modules/services/torrent/transmission.nix b/nixpkgs/nixos/modules/services/torrent/transmission.nix
index aa1acdf7d20..5ba72e8d773 100644
--- a/nixpkgs/nixos/modules/services/torrent/transmission.nix
+++ b/nixpkgs/nixos/modules/services/torrent/transmission.nix
@@ -129,19 +129,23 @@ in
# It's useful to have transmission in path, e.g. for remote control
environment.systemPackages = [ pkgs.transmission ];
- users.users = optionalAttrs (cfg.user == "transmission") (singleton
- { name = "transmission";
+ users.users = optionalAttrs (cfg.user == "transmission") ({
+ transmission = {
+ name = "transmission";
group = cfg.group;
uid = config.ids.uids.transmission;
description = "Transmission BitTorrent user";
home = homeDir;
createHome = true;
- });
+ };
+ });
- users.groups = optionalAttrs (cfg.group == "transmission") (singleton
- { name = "transmission";
+ users.groups = optionalAttrs (cfg.group == "transmission") ({
+ transmission = {
+ name = "transmission";
gid = config.ids.gids.transmission;
- });
+ };
+ });
# AppArmor profile
security.apparmor.profiles = mkIf apparmor [
diff --git a/nixpkgs/nixos/modules/services/web-apps/dokuwiki.nix b/nixpkgs/nixos/modules/services/web-apps/dokuwiki.nix
new file mode 100644
index 00000000000..07af7aa0dfe
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/dokuwiki.nix
@@ -0,0 +1,272 @@
+{ config, lib, pkgs, ... }:
+
+let
+
+ inherit (lib) mkEnableOption mkForce mkIf mkMerge mkOption optionalAttrs recursiveUpdate types;
+
+ cfg = config.services.dokuwiki;
+
+ user = config.services.nginx.user;
+ group = config.services.nginx.group;
+
+ dokuwikiAclAuthConfig = pkgs.writeText "acl.auth.php" ''
+ # acl.auth.php
+ # <?php exit()?>
+ #
+ # Access Control Lists
+ #
+ ${toString cfg.acl}
+ '';
+
+ dokuwikiLocalConfig = pkgs.writeText "local.php" ''
+ <?php
+ $conf['savedir'] = '${cfg.stateDir}';
+ $conf['superuser'] = '${toString cfg.superUser}';
+ $conf['useacl'] = '${toString cfg.aclUse}';
+ ${toString cfg.extraConfig}
+ '';
+
+ dokuwikiPluginsLocalConfig = pkgs.writeText "plugins.local.php" ''
+ <?php
+ ${cfg.pluginsConfig}
+ '';
+
+in
+{
+ options.services.dokuwiki = {
+ enable = mkEnableOption "DokuWiki web application.";
+
+ hostName = mkOption {
+ type = types.str;
+ default = "localhost";
+ description = "FQDN for the instance.";
+ };
+
+ stateDir = mkOption {
+ type = types.path;
+ default = "/var/lib/dokuwiki/data";
+ description = "Location of the dokuwiki state directory.";
+ };
+
+ acl = mkOption {
+ type = types.nullOr types.lines;
+ default = null;
+ example = "* @ALL 8";
+ description = ''
+ Access Control Lists: see <link xlink:href="https://www.dokuwiki.org/acl"/>
+ Mutually exclusive with services.dokuwiki.aclFile
+ Set this to a value other than null to take precedence over aclFile option.
+ '';
+ };
+
+ aclFile = mkOption {
+ type = types.nullOr types.path;
+ default = null;
+ description = ''
+ Location of the dokuwiki acl rules. Mutually exclusive with services.dokuwiki.acl
+ Mutually exclusive with services.dokuwiki.acl which is preferred.
+ Consult documentation <link xlink:href="https://www.dokuwiki.org/acl"/> for further instructions.
+ Example: <link xlink:href="https://github.com/splitbrain/dokuwiki/blob/master/conf/acl.auth.php.dist"/>
+ '';
+ };
+
+ aclUse = mkOption {
+ type = types.bool;
+ default = true;
+ description = ''
+ Necessary for users to log in into the system.
+ Also limits anonymous users. When disabled,
+ everyone is able to create and edit content.
+ '';
+ };
+
+ pluginsConfig = mkOption {
+ type = types.lines;
+ default = ''
+ $plugins['authad'] = 0;
+ $plugins['authldap'] = 0;
+ $plugins['authmysql'] = 0;
+ $plugins['authpgsql'] = 0;
+ '';
+ description = ''
+ List of the dokuwiki (un)loaded plugins.
+ '';
+ };
+
+ superUser = mkOption {
+ type = types.nullOr types.str;
+ default = "@admin";
+ description = ''
+ You can set either a username, a list of usernames (“admin1,admin2”),
+ or the name of a group by prepending an @ char to the groupname
+ Consult documentation <link xlink:href="https://www.dokuwiki.org/config:superuser"/> for further instructions.
+ '';
+ };
+
+ usersFile = mkOption {
+ type = types.nullOr types.path;
+ default = null;
+ description = ''
+ Location of the dokuwiki users file. List of users. Format:
+ login:passwordhash:Real Name:email:groups,comma,separated
+ Create passwordHash easily by using:$ mkpasswd -5 password `pwgen 8 1`
+ Example: <link xlink:href="https://github.com/splitbrain/dokuwiki/blob/master/conf/users.auth.php.dist"/>
+ '';
+ };
+
+ extraConfig = mkOption {
+ type = types.nullOr types.lines;
+ default = null;
+ example = ''
+ $conf['title'] = 'My Wiki';
+ $conf['userewrite'] = 1;
+ '';
+ description = ''
+ DokuWiki configuration. Refer to
+ <link xlink:href="https://www.dokuwiki.org/config"/>
+ for details on supported values.
+ '';
+ };
+
+ poolConfig = mkOption {
+ type = with types; attrsOf (oneOf [ str int bool ]);
+ default = {
+ "pm" = "dynamic";
+ "pm.max_children" = 32;
+ "pm.start_servers" = 2;
+ "pm.min_spare_servers" = 2;
+ "pm.max_spare_servers" = 4;
+ "pm.max_requests" = 500;
+ };
+ description = ''
+ Options for the dokuwiki PHP pool. See the documentation on <literal>php-fpm.conf</literal>
+ for details on configuration directives.
+ '';
+ };
+
+ nginx = mkOption {
+ type = types.submodule (
+ recursiveUpdate
+ (import ../web-servers/nginx/vhost-options.nix { inherit config lib; })
+ {
+ # Enable encryption by default,
+ options.forceSSL.default = true;
+ options.enableACME.default = true;
+ }
+ );
+ default = {forceSSL = true; enableACME = true;};
+ example = {
+ serverAliases = [
+ "wiki.\${config.networking.domain}"
+ ];
+ enableACME = false;
+ };
+ description = ''
+ With this option, you can customize the nginx virtualHost which already has sensible defaults for DokuWiki.
+ '';
+ };
+ };
+
+ # implementation
+
+ config = mkIf cfg.enable {
+
+ warnings = mkIf (cfg.superUser == null) ["Not setting services.dokuwiki.superUser will impair your ability to administer DokuWiki"];
+
+ assertions = [
+ {
+ assertion = cfg.aclUse -> (cfg.acl != null || cfg.aclFile != null);
+ message = "Either services.dokuwiki.acl or services.dokuwiki.aclFile is mandatory when aclUse is true";
+ }
+ {
+ assertion = cfg.usersFile != null -> cfg.aclUse != false;
+ message = "services.dokuwiki.aclUse must be true when usersFile is not null";
+ }
+ ];
+
+ services.phpfpm.pools.dokuwiki = {
+ inherit user;
+ inherit group;
+ phpEnv = {
+ DOKUWIKI_LOCAL_CONFIG = "${dokuwikiLocalConfig}";
+ DOKUWIKI_PLUGINS_LOCAL_CONFIG = "${dokuwikiPluginsLocalConfig}";
+ } //optionalAttrs (cfg.usersFile != null) {
+ DOKUWIKI_USERS_AUTH_CONFIG = "${cfg.usersFile}";
+ } //optionalAttrs (cfg.aclUse) {
+ DOKUWIKI_ACL_AUTH_CONFIG = if (cfg.acl != null) then "${dokuwikiAclAuthConfig}" else "${toString cfg.aclFile}";
+ };
+
+ settings = {
+ "listen.mode" = "0660";
+ "listen.owner" = user;
+ "listen.group" = group;
+ } // cfg.poolConfig;
+ };
+
+ services.nginx = {
+ enable = true;
+
+ virtualHosts = {
+ ${cfg.hostName} = mkMerge [ cfg.nginx {
+ root = mkForce "${pkgs.dokuwiki}/share/dokuwiki/";
+ extraConfig = "fastcgi_param HTTPS on;";
+
+ locations."~ /(conf/|bin/|inc/|install.php)" = {
+ extraConfig = "deny all;";
+ };
+
+ locations."~ ^/data/" = {
+ root = "${cfg.stateDir}";
+ extraConfig = "internal;";
+ };
+
+ locations."~ ^/lib.*\.(js|css|gif|png|ico|jpg|jpeg)$" = {
+ extraConfig = "expires 365d;";
+ };
+
+ locations."/" = {
+ priority = 1;
+ index = "doku.php";
+ extraConfig = ''try_files $uri $uri/ @dokuwiki;'';
+ };
+
+ locations."@dokuwiki" = {
+ extraConfig = ''
+ # rewrites "doku.php/" out of the URLs if you set the userwrite setting to .htaccess in dokuwiki config page
+ rewrite ^/_media/(.*) /lib/exe/fetch.php?media=$1 last;
+ rewrite ^/_detail/(.*) /lib/exe/detail.php?media=$1 last;
+ rewrite ^/_export/([^/]+)/(.*) /doku.php?do=export_$1&id=$2 last;
+ rewrite ^/(.*) /doku.php?id=$1&$args last;
+ '';
+ };
+
+ locations."~ \.php$" = {
+ extraConfig = ''
+ try_files $uri $uri/ /doku.php;
+ include ${pkgs.nginx}/conf/fastcgi_params;
+ fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
+ fastcgi_param REDIRECT_STATUS 200;
+ fastcgi_pass unix:${config.services.phpfpm.pools.dokuwiki.socket};
+ fastcgi_param HTTPS on;
+ '';
+ };
+ }];
+ };
+
+ };
+
+ systemd.tmpfiles.rules = [
+ "d ${cfg.stateDir}/attic 0750 ${user} ${group} - -"
+ "d ${cfg.stateDir}/cache 0750 ${user} ${group} - -"
+ "d ${cfg.stateDir}/index 0750 ${user} ${group} - -"
+ "d ${cfg.stateDir}/locks 0750 ${user} ${group} - -"
+ "d ${cfg.stateDir}/media 0750 ${user} ${group} - -"
+ "d ${cfg.stateDir}/media_attic 0750 ${user} ${group} - -"
+ "d ${cfg.stateDir}/media_meta 0750 ${user} ${group} - -"
+ "d ${cfg.stateDir}/meta 0750 ${user} ${group} - -"
+ "d ${cfg.stateDir}/pages 0750 ${user} ${group} - -"
+ "d ${cfg.stateDir}/tmp 0750 ${user} ${group} - -"
+ ];
+
+ };
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/nextcloud.nix b/nixpkgs/nixos/modules/services/web-apps/nextcloud.nix
index f1dabadc119..d79f2bb735f 100644
--- a/nixpkgs/nixos/modules/services/web-apps/nextcloud.nix
+++ b/nixpkgs/nixos/modules/services/web-apps/nextcloud.nix
@@ -229,6 +229,15 @@ in {
'';
};
+ trustedProxies = mkOption {
+ type = types.listOf types.str;
+ default = [];
+ description = ''
+ Trusted proxies, to provide if the nextcloud installation is being
+ proxied to secure against e.g. spoofing.
+ '';
+ };
+
overwriteProtocol = mkOption {
type = types.nullOr (types.enum [ "http" "https" ]);
default = null;
@@ -352,6 +361,7 @@ in {
${optionalString (c.dbpassFile != null) "'dbpassword' => nix_read_pwd(),"}
'dbtype' => '${c.dbtype}',
'trusted_domains' => ${writePhpArrary ([ cfg.hostName ] ++ c.extraTrustedDomains)},
+ 'trusted_proxies' => ${writePhpArrary (c.trustedProxies)},
];
'';
occInstallCmd = let
diff --git a/nixpkgs/nixos/modules/services/web-servers/apache-httpd/default.nix b/nixpkgs/nixos/modules/services/web-servers/apache-httpd/default.nix
index 4460f89ec5c..9942c63acce 100644
--- a/nixpkgs/nixos/modules/services/web-servers/apache-httpd/default.nix
+++ b/nixpkgs/nixos/modules/services/web-servers/apache-httpd/default.nix
@@ -179,6 +179,28 @@ let
then hostOpts.documentRoot
else pkgs.runCommand "empty" { preferLocalBuild = true; } "mkdir -p $out"
;
+
+ mkLocations = locations: concatStringsSep "\n" (map (config: ''
+ <Location ${config.location}>
+ ${optionalString (config.proxyPass != null) ''
+ <IfModule mod_proxy.c>
+ ProxyPass ${config.proxyPass}
+ ProxyPassReverse ${config.proxyPass}
+ </IfModule>
+ ''}
+ ${optionalString (config.index != null) ''
+ <IfModule mod_dir.c>
+ DirectoryIndex ${config.index}
+ </IfModule>
+ ''}
+ ${optionalString (config.alias != null) ''
+ <IfModule mod_alias.c>
+ Alias "${config.alias}"
+ </IfModule>
+ ''}
+ ${config.extraConfig}
+ </Location>
+ '') (sortProperties (mapAttrsToList (k: v: v // { location = k; }) locations)));
in
''
${optionalString mainCfg.logPerVirtualHost ''
@@ -218,12 +240,6 @@ let
''}
${
- let makeFileConf = elem: ''
- Alias ${elem.urlPath} ${elem.file}
- '';
- in concatMapStrings makeFileConf hostOpts.servedFiles
- }
- ${
let makeDirConf = elem: ''
Alias ${elem.urlPath} ${elem.dir}/
<Directory ${elem.dir}>
@@ -235,6 +251,7 @@ let
in concatMapStrings makeDirConf hostOpts.servedDirs
}
+ ${mkLocations hostOpts.locations}
${hostOpts.extraConfig}
''
;
@@ -606,6 +623,11 @@ in
}
];
+ warnings =
+ mapAttrsToList (name: hostOpts: ''
+ Using config.services.httpd.virtualHosts."${name}".servedFiles is deprecated and will become unsupported in a future release. Your configuration will continue to work as is but please migrate your configuration to config.services.httpd.virtualHosts."${name}".locations before the 20.09 release of NixOS.
+ '') (filterAttrs (name: hostOpts: hostOpts.servedFiles != []) mainCfg.virtualHosts);
+
users.users = optionalAttrs (mainCfg.user == "wwwrun") {
wwwrun = {
group = mainCfg.group;
@@ -629,6 +651,9 @@ in
environment.systemPackages = [httpd];
+ # required for "apachectl configtest"
+ environment.etc."httpd/httpd.conf".source = httpdConf;
+
services.httpd.phpOptions =
''
; Needed for PHP's mail() function.
diff --git a/nixpkgs/nixos/modules/services/web-servers/apache-httpd/location-options.nix b/nixpkgs/nixos/modules/services/web-servers/apache-httpd/location-options.nix
new file mode 100644
index 00000000000..8ea88f94f97
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-servers/apache-httpd/location-options.nix
@@ -0,0 +1,54 @@
+{ config, lib, name, ... }:
+let
+ inherit (lib) mkOption types;
+in
+{
+ options = {
+
+ proxyPass = mkOption {
+ type = with types; nullOr str;
+ default = null;
+ example = "http://www.example.org/";
+ description = ''
+ Sets up a simple reverse proxy as described by <link xlink:href="https://httpd.apache.org/docs/2.4/howto/reverse_proxy.html#simple" />.
+ '';
+ };
+
+ index = mkOption {
+ type = with types; nullOr str;
+ default = null;
+ example = "index.php index.html";
+ description = ''
+ Adds DirectoryIndex directive. See <link xlink:href="https://httpd.apache.org/docs/2.4/mod/mod_dir.html#directoryindex" />.
+ '';
+ };
+
+ alias = mkOption {
+ type = with types; nullOr path;
+ default = null;
+ example = "/your/alias/directory";
+ description = ''
+ Alias directory for requests. See <link xlink:href="https://httpd.apache.org/docs/2.4/mod/mod_alias.html#alias" />.
+ '';
+ };
+
+ extraConfig = mkOption {
+ type = types.lines;
+ default = "";
+ description = ''
+ These lines go to the end of the location verbatim.
+ '';
+ };
+
+ priority = mkOption {
+ type = types.int;
+ default = 1000;
+ description = ''
+ Order of this location block in relation to the others in the vhost.
+ The semantics are the same as with `lib.mkOrder`. Smaller values have
+ a greater priority.
+ '';
+ };
+
+ };
+}
diff --git a/nixpkgs/nixos/modules/services/web-servers/apache-httpd/per-server-options.nix b/nixpkgs/nixos/modules/services/web-servers/apache-httpd/per-server-options.nix
index f2e92cda05f..f34f8b4acdf 100644
--- a/nixpkgs/nixos/modules/services/web-servers/apache-httpd/per-server-options.nix
+++ b/nixpkgs/nixos/modules/services/web-servers/apache-httpd/per-server-options.nix
@@ -1,6 +1,6 @@
{ config, lib, name, ... }:
let
- inherit (lib) mkOption types;
+ inherit (lib) literalExample mkOption nameValuePair types;
in
{
options = {
@@ -175,6 +175,12 @@ in
];
description = ''
This option provides a simple way to serve individual, static files.
+
+ <note><para>
+ This option has been deprecated and will be removed in a future
+ version of NixOS. You can achieve the same result by making use of
+ the <literal>locations.&lt;name&gt;.alias</literal> option.
+ </para></note>
'';
};
@@ -231,5 +237,30 @@ in
'';
};
+ locations = mkOption {
+ type = with types; attrsOf (submodule (import ./location-options.nix));
+ default = {};
+ example = literalExample ''
+ {
+ "/" = {
+ proxyPass = "http://localhost:3000";
+ };
+ "/foo/bar.png" = {
+ alias = "/home/eelco/some-file.png";
+ };
+ };
+ '';
+ description = ''
+ Declarative location config. See <link
+ xlink:href="https://httpd.apache.org/docs/2.4/mod/core.html#location"/> for details.
+ '';
+ };
+
+ };
+
+ config = {
+
+ locations = builtins.listToAttrs (map (elem: nameValuePair elem.urlPath { alias = elem.file; }) config.servedFiles);
+
};
}
diff --git a/nixpkgs/nixos/modules/services/web-servers/nginx/gitweb.nix b/nixpkgs/nixos/modules/services/web-servers/nginx/gitweb.nix
index 272fd148018..f7fb07bb797 100644
--- a/nixpkgs/nixos/modules/services/web-servers/nginx/gitweb.nix
+++ b/nixpkgs/nixos/modules/services/web-servers/nginx/gitweb.nix
@@ -3,8 +3,9 @@
with lib;
let
- cfg = config.services.gitweb;
- package = pkgs.gitweb.override (optionalAttrs cfg.gitwebTheme {
+ cfg = config.services.nginx.gitweb;
+ gitwebConfig = config.services.gitweb;
+ package = pkgs.gitweb.override (optionalAttrs gitwebConfig.gitwebTheme {
gitwebTheme = true;
});
@@ -17,13 +18,45 @@ in
default = false;
type = types.bool;
description = ''
- If true, enable gitweb in nginx. Access it at http://yourserver/gitweb
+ If true, enable gitweb in nginx.
+ '';
+ };
+
+ location = mkOption {
+ default = "/gitweb";
+ type = types.str;
+ description = ''
+ Location to serve gitweb on.
+ '';
+ };
+
+ user = mkOption {
+ default = "nginx";
+ type = types.str;
+ description = ''
+ Existing user that the CGI process will belong to. (Default almost surely will do.)
+ '';
+ };
+
+ group = mkOption {
+ default = "nginx";
+ type = types.str;
+ description = ''
+ Group that the CGI process will belong to. (Set to <literal>config.services.gitolite.group</literal> if you are using gitolite.)
+ '';
+ };
+
+ virtualHost = mkOption {
+ default = "_";
+ type = types.str;
+ description = ''
+ VirtualHost to serve gitweb on. Default is catch-all.
'';
};
};
- config = mkIf config.services.nginx.gitweb.enable {
+ config = mkIf cfg.enable {
systemd.services.gitweb = {
description = "GitWeb service";
@@ -32,22 +65,22 @@ in
FCGI_SOCKET_PATH = "/run/gitweb/gitweb.sock";
};
serviceConfig = {
- User = "nginx";
- Group = "nginx";
+ User = cfg.user;
+ Group = cfg.group;
RuntimeDirectory = [ "gitweb" ];
};
wantedBy = [ "multi-user.target" ];
};
services.nginx = {
- virtualHosts.default = {
- locations."/gitweb/static/" = {
+ virtualHosts.${cfg.virtualHost} = {
+ locations."${cfg.location}/static/" = {
alias = "${package}/static/";
};
- locations."/gitweb/" = {
+ locations."${cfg.location}/" = {
extraConfig = ''
include ${pkgs.nginx}/conf/fastcgi_params;
- fastcgi_param GITWEB_CONFIG ${cfg.gitwebConfigFile};
+ fastcgi_param GITWEB_CONFIG ${gitwebConfig.gitwebConfigFile};
fastcgi_pass unix:/run/gitweb/gitweb.sock;
'';
};
diff --git a/nixpkgs/nixos/modules/services/web-servers/unit/default.nix b/nixpkgs/nixos/modules/services/web-servers/unit/default.nix
index 2303dfa9540..f8a18954fc9 100644
--- a/nixpkgs/nixos/modules/services/web-servers/unit/default.nix
+++ b/nixpkgs/nixos/modules/services/web-servers/unit/default.nix
@@ -111,7 +111,7 @@ in {
AmbientCapabilities = [ "CAP_NET_BIND_SERVICE" "CAP_SETGID" "CAP_SETUID" ];
# Security
NoNewPrivileges = true;
- # Sanboxing
+ # Sandboxing
ProtectSystem = "full";
ProtectHome = true;
RuntimeDirectory = "unit";
@@ -130,8 +130,10 @@ in {
};
users.users = optionalAttrs (cfg.user == "unit") {
- unit.group = cfg.group;
- isSystemUser = true;
+ unit = {
+ group = cfg.group;
+ isSystemUser = true;
+ };
};
users.groups = optionalAttrs (cfg.group == "unit") {
diff --git a/nixpkgs/nixos/modules/services/x11/desktop-managers/default.nix b/nixpkgs/nixos/modules/services/x11/desktop-managers/default.nix
index 970fa620c6b..ea6aac9f6c9 100644
--- a/nixpkgs/nixos/modules/services/x11/desktop-managers/default.nix
+++ b/nixpkgs/nixos/modules/services/x11/desktop-managers/default.nix
@@ -68,21 +68,15 @@ in
scripts before forwarding the value to the
<varname>displayManager</varname>.
'';
- apply = list: {
- list = map (d: d // {
- manage = "desktop";
- start = d.start
- + optionalString (needBGCond d) ''
- if [ -e $HOME/.background-image ]; then
- ${pkgs.feh}/bin/feh --bg-${cfg.wallpaper.mode} ${optionalString cfg.wallpaper.combineScreens "--no-xinerama"} $HOME/.background-image
- else
- # Use a solid black background as fallback
- ${pkgs.xorg.xsetroot}/bin/xsetroot -solid black
- fi
- '';
- }) list;
- needBGPackages = [] != filter needBGCond list;
- };
+ apply = map (d: d // {
+ manage = "desktop";
+ start = d.start
+ + optionalString (needBGCond d) ''
+ if [ -e $HOME/.background-image ]; then
+ ${pkgs.feh}/bin/feh --bg-${cfg.wallpaper.mode} ${optionalString cfg.wallpaper.combineScreens "--no-xinerama"} $HOME/.background-image
+ fi
+ '';
+ });
};
default = mkOption {
@@ -100,5 +94,5 @@ in
};
- config.services.xserver.displayManager.session = cfg.session.list;
+ config.services.xserver.displayManager.session = cfg.session;
}
diff --git a/nixpkgs/nixos/modules/services/x11/desktop-managers/gnome3.nix b/nixpkgs/nixos/modules/services/x11/desktop-managers/gnome3.nix
index 6d9bd284bc7..5756cf14ed9 100644
--- a/nixpkgs/nixos/modules/services/x11/desktop-managers/gnome3.nix
+++ b/nixpkgs/nixos/modules/services/x11/desktop-managers/gnome3.nix
@@ -144,7 +144,7 @@ in
services.gnome3.core-shell.enable = true;
services.gnome3.core-utilities.enable = mkDefault true;
- services.xserver.displayManager.sessionPackages = [ pkgs.gnome3.gnome-session ];
+ services.xserver.displayManager.sessionPackages = [ pkgs.gnome3.gnome-session.sessions ];
environment.extraInit = ''
${concatMapStrings (p: ''
@@ -249,11 +249,17 @@ in
services.system-config-printer.enable = (mkIf config.services.printing.enable (mkDefault true));
services.telepathy.enable = mkDefault true;
- systemd.packages = with pkgs.gnome3; [ vino gnome-session ];
+ systemd.packages = with pkgs.gnome3; [
+ gnome-session
+ gnome-shell
+ vino
+ ];
services.avahi.enable = mkDefault true;
- xdg.portal.extraPortals = [ pkgs.gnome3.gnome-shell ];
+ xdg.portal.extraPortals = [
+ pkgs.gnome3.gnome-shell
+ ];
services.geoclue2.enable = mkDefault true;
services.geoclue2.enableDemoAgent = false; # GNOME has its own geoclue agent
@@ -328,7 +334,6 @@ in
cheese
eog
epiphany
- geary
gedit
gnome-calculator
gnome-calendar
@@ -355,6 +360,7 @@ in
# Enable default programs
programs.evince.enable = mkDefault true;
programs.file-roller.enable = mkDefault true;
+ programs.geary.enable = mkDefault true;
programs.gnome-disks.enable = mkDefault true;
programs.gnome-terminal.enable = mkDefault true;
programs.seahorse.enable = mkDefault true;
diff --git a/nixpkgs/nixos/modules/services/x11/desktop-managers/xfce.nix b/nixpkgs/nixos/modules/services/x11/desktop-managers/xfce.nix
index a08b1947f65..21f59074f3a 100644
--- a/nixpkgs/nixos/modules/services/x11/desktop-managers/xfce.nix
+++ b/nixpkgs/nixos/modules/services/x11/desktop-managers/xfce.nix
@@ -127,14 +127,9 @@ in
"/share/gtksourceview-4.0"
];
- services.xserver.desktopManager.session = [{
- name = "xfce";
- bgSupport = true;
- start = ''
- ${pkgs.runtimeShell} ${pkgs.xfce.xfce4-session.xinitrc} &
- waitPID=$!
- '';
- }];
+ services.xserver.displayManager.sessionPackages = [
+ pkgs.xfce.xfce4-session
+ ];
services.xserver.updateDbusEnvironment = true;
services.xserver.gdk-pixbuf.modulePackages = [ pkgs.librsvg ];
diff --git a/nixpkgs/nixos/modules/services/x11/display-managers/gdm.nix b/nixpkgs/nixos/modules/services/x11/display-managers/gdm.nix
index 2f8c8cc9013..325023f4121 100644
--- a/nixpkgs/nixos/modules/services/x11/display-managers/gdm.nix
+++ b/nixpkgs/nixos/modules/services/x11/display-managers/gdm.nix
@@ -174,6 +174,10 @@ in
"f /run/gdm/.config/gnome-initial-setup-done 0711 gdm gdm - yes"
];
+ # Otherwise GDM will not be able to start correctly and display Wayland sessions
+ systemd.packages = with pkgs.gnome3; [ gnome-session gnome-shell ];
+ environment.systemPackages = [ pkgs.gnome3.adwaita-icon-theme ];
+
systemd.services.display-manager.wants = [
# Because sd_login_monitor_new requires /run/systemd/machines
"systemd-machined.service"
diff --git a/nixpkgs/nixos/modules/services/x11/hardware/multitouch.nix b/nixpkgs/nixos/modules/services/x11/hardware/multitouch.nix
deleted file mode 100644
index c03bb3b494f..00000000000
--- a/nixpkgs/nixos/modules/services/x11/hardware/multitouch.nix
+++ /dev/null
@@ -1,94 +0,0 @@
-{ config, lib, pkgs, ... }:
-
-with lib;
-
-let cfg = config.services.xserver.multitouch;
- disabledTapConfig = ''
- Option "MaxTapTime" "0"
- Option "MaxTapMove" "0"
- Option "TapButton1" "0"
- Option "TapButton2" "0"
- Option "TapButton3" "0"
- '';
-in {
-
- options = {
-
- services.xserver.multitouch = {
-
- enable = mkOption {
- default = false;
- description = "Whether to enable multitouch touchpad support.";
- };
-
- invertScroll = mkOption {
- default = false;
- type = types.bool;
- description = "Whether to invert scrolling direction à la OSX Lion";
- };
-
- ignorePalm = mkOption {
- default = false;
- type = types.bool;
- description = "Whether to ignore touches detected as being the palm (i.e when typing)";
- };
-
- tapButtons = mkOption {
- type = types.bool;
- default = true;
- description = "Whether to enable tap buttons.";
- };
-
- buttonsMap = mkOption {
- type = types.listOf types.int;
- default = [3 2 0];
- example = [1 3 2];
- description = "Remap touchpad buttons.";
- apply = map toString;
- };
-
- additionalOptions = mkOption {
- type = types.str;
- default = "";
- example = ''
- Option "ScaleDistance" "50"
- Option "RotateDistance" "60"
- '';
- description = ''
- Additional options for mtrack touchpad driver.
- '';
- };
-
- };
-
- };
-
- config = mkIf cfg.enable {
-
- services.xserver.modules = [ pkgs.xf86_input_mtrack ];
-
- services.xserver.config =
- ''
- # Automatically enable the multitouch driver
- Section "InputClass"
- MatchIsTouchpad "on"
- Identifier "Touchpads"
- Driver "mtrack"
- Option "IgnorePalm" "${boolToString cfg.ignorePalm}"
- Option "ClickFinger1" "${builtins.elemAt cfg.buttonsMap 0}"
- Option "ClickFinger2" "${builtins.elemAt cfg.buttonsMap 1}"
- Option "ClickFinger3" "${builtins.elemAt cfg.buttonsMap 2}"
- ${optionalString (!cfg.tapButtons) disabledTapConfig}
- ${optionalString cfg.invertScroll ''
- Option "ScrollUpButton" "5"
- Option "ScrollDownButton" "4"
- Option "ScrollLeftButton" "7"
- Option "ScrollRightButton" "6"
- ''}
- ${cfg.additionalOptions}
- EndSection
- '';
-
- };
-
-}
diff --git a/nixpkgs/nixos/modules/services/x11/unclutter.nix b/nixpkgs/nixos/modules/services/x11/unclutter.nix
index 2478aaabb79..c0868604a68 100644
--- a/nixpkgs/nixos/modules/services/x11/unclutter.nix
+++ b/nixpkgs/nixos/modules/services/x11/unclutter.nix
@@ -32,7 +32,7 @@ in {
default = 1;
};
- threeshold = mkOption {
+ threshold = mkOption {
description = "Minimum number of pixels considered cursor movement";
type = types.int;
default = 1;
@@ -72,6 +72,11 @@ in {
};
};
+ imports = [
+ (mkRenamedOptionModule [ "services" "unclutter" "threeshold" ]
+ [ "services" "unclutter" "threshold" ])
+ ];
+
meta.maintainers = with lib.maintainers; [ rnhmjoj ];
}
diff --git a/nixpkgs/nixos/modules/services/x11/xserver.nix b/nixpkgs/nixos/modules/services/x11/xserver.nix
index 7029919170a..7f0de96d208 100644
--- a/nixpkgs/nixos/modules/services/x11/xserver.nix
+++ b/nixpkgs/nixos/modules/services/x11/xserver.nix
@@ -556,8 +556,7 @@ in
services.xserver.displayManager.lightdm.enable =
let dmconf = cfg.displayManager;
- default = !( dmconf.auto.enable
- || dmconf.gdm.enable
+ default = !(dmconf.gdm.enable
|| dmconf.sddm.enable
|| dmconf.xpra.enable );
in mkIf (default) true;
diff --git a/nixpkgs/nixos/modules/system/activation/activation-script.nix b/nixpkgs/nixos/modules/system/activation/activation-script.nix
index ddfd1af4a31..495d77dfd49 100644
--- a/nixpkgs/nixos/modules/system/activation/activation-script.nix
+++ b/nixpkgs/nixos/modules/system/activation/activation-script.nix
@@ -162,6 +162,16 @@ in
<literal>/usr/bin/env</literal>.
'';
};
+
+ environment.ld-linux = mkOption {
+ default = false;
+ type = types.bool;
+ visible = false;
+ description = ''
+ Install symlink to ld-linux(8) system-wide to allow running unmodified ELF binaries.
+ It might be useful to run games or executables distributed inside jar files.
+ '';
+ };
};
@@ -195,9 +205,30 @@ in
''
else ''
rm -f /usr/bin/env
- rmdir --ignore-fail-on-non-empty /usr/bin /usr
+ rmdir -p /usr/bin || true
'';
+ system.activationScripts.ld-linux =
+ concatStrings (
+ mapAttrsToList
+ (target: source:
+ if config.environment.ld-linux then ''
+ mkdir -m 0755 -p $(dirname ${target})
+ ln -sfn ${escapeShellArg source} ${target}.tmp
+ mv -f ${target}.tmp ${target} # atomically replace
+ '' else ''
+ rm -f ${target}
+ rmdir $(dirname ${target}) || true
+ '')
+ {
+ "i686-linux" ."/lib/ld-linux.so.2" = "${pkgs.glibc.out}/lib/ld-linux.so.2";
+ "x86_64-linux" ."/lib/ld-linux.so.2" = "${pkgs.pkgsi686Linux.glibc.out}/lib/ld-linux.so.2";
+ "x86_64-linux" ."/lib64/ld-linux-x86-64.so.2" = "${pkgs.glibc.out}/lib64/ld-linux-x86-64.so.2";
+ "aarch64-linux"."/lib/ld-linux-aarch64.so.1" = "${pkgs.glibc.out}/lib/ld-linux-aarch64.so.1";
+ "armv7l-linux" ."/lib/ld-linux-armhf.so.3" = "${pkgs.glibc.out}/lib/ld-linux-armhf.so.3";
+ }.${pkgs.stdenv.system} or {}
+ );
+
system.activationScripts.specialfs =
''
specialMount() {
diff --git a/nixpkgs/nixos/modules/system/boot/luksroot.nix b/nixpkgs/nixos/modules/system/boot/luksroot.nix
index 0bb8396a44f..31f1e22cda3 100644
--- a/nixpkgs/nixos/modules/system/boot/luksroot.nix
+++ b/nixpkgs/nixos/modules/system/boot/luksroot.nix
@@ -4,6 +4,7 @@ with lib;
let
luks = config.boot.initrd.luks;
+ kernelPackages = config.boot.kernelPackages;
commonFunctions = ''
die() {
@@ -139,7 +140,7 @@ let
umount /crypt-ramfs 2>/dev/null
'';
- openCommand = name': { name, device, header, keyFile, keyFileSize, keyFileOffset, allowDiscards, yubikey, gpgCard, fallbackToPassword, ... }: assert name' == name;
+ openCommand = name': { name, device, header, keyFile, keyFileSize, keyFileOffset, allowDiscards, yubikey, gpgCard, fido2, fallbackToPassword, ... }: assert name' == name;
let
csopen = "cryptsetup luksOpen ${device} ${name} ${optionalString allowDiscards "--allow-discards"} ${optionalString (header != null) "--header=${header}"}";
cschange = "cryptsetup luksChangeKey ${device} ${optionalString (header != null) "--header=${header}"}";
@@ -387,7 +388,31 @@ let
}
''}
- ${if (luks.yubikeySupport && (yubikey != null)) || (luks.gpgSupport && (gpgCard != null)) then ''
+ ${optionalString (luks.fido2Support && (fido2.credential != null)) ''
+
+ open_with_hardware() {
+ local passsphrase
+
+ ${if fido2.passwordLess then ''
+ export passphrase=""
+ '' else ''
+ read -rsp "FIDO2 salt for ${device}: " passphrase
+ echo
+ ''}
+ ${optionalString (lib.versionOlder kernelPackages.kernel.version "5.4") ''
+ echo "On systems with Linux Kernel < 5.4, it might take a while to initialize the CRNG, you might want to use linuxPackages_latest."
+ echo "Please move your mouse to create needed randomness."
+ ''}
+ echo "Waiting for your FIDO2 device..."
+ fido2luks -i open ${device} ${name} ${fido2.credential} --await-dev ${toString fido2.gracePeriod} --salt string:$passphrase
+ if [ $? -ne 0 ]; then
+ echo "No FIDO2 key found, falling back to normal open procedure"
+ open_normally
+ fi
+ }
+ ''}
+
+ ${if (luks.yubikeySupport && (yubikey != null)) || (luks.gpgSupport && (gpgCard != null)) || (luks.fido2Support && (fido2.credential != null)) then ''
open_with_hardware
'' else ''
open_normally
@@ -608,6 +633,31 @@ in
});
};
+ fido2 = {
+ credential = mkOption {
+ default = null;
+ example = "f1d00200d8dc783f7fb1e10ace8da27f8312d72692abfca2f7e4960a73f48e82e1f7571f6ebfcee9fb434f9886ccc8fcc52a6614d8d2";
+ type = types.str;
+ description = "The FIDO2 credential ID.";
+ };
+
+ gracePeriod = mkOption {
+ default = 10;
+ type = types.int;
+ description = "Time in seconds to wait for the FIDO2 key.";
+ };
+
+ passwordLess = mkOption {
+ default = false;
+ type = types.bool;
+ description = ''
+ Defines whatever to use an empty string as a default salt.
+
+ Enable only when your device is PIN protected, such as <link xlink:href="https://trezor.io/">Trezor</link>.
+ '';
+ };
+ };
+
yubikey = mkOption {
default = null;
description = ''
@@ -706,6 +756,15 @@ in
and a Yubikey to work with this feature.
'';
};
+
+ boot.initrd.luks.fido2Support = mkOption {
+ default = false;
+ type = types.bool;
+ description = ''
+ Enables support for authenticating with FIDO2 devices.
+ '';
+ };
+
};
config = mkIf (luks.devices != {} || luks.forceLuksSupportInInitrd) {
@@ -714,6 +773,14 @@ in
[ { assertion = !(luks.gpgSupport && luks.yubikeySupport);
message = "Yubikey and GPG Card may not be used at the same time.";
}
+
+ { assertion = !(luks.gpgSupport && luks.fido2Support);
+ message = "FIDO2 and GPG Card may not be used at the same time.";
+ }
+
+ { assertion = !(luks.fido2Support && luks.yubikeySupport);
+ message = "FIDO2 and Yubikey may not be used at the same time.";
+ }
];
# actually, sbp2 driver is the one enabling the DMA attack, but this needs to be tested
@@ -753,6 +820,11 @@ in
chmod +x $out/bin/openssl-wrap
''}
+ ${optionalString luks.fido2Support ''
+ copy_bin_and_libs ${pkgs.fido2luks}/bin/fido2luks
+ ''}
+
+
${optionalString luks.gpgSupport ''
copy_bin_and_libs ${pkgs.gnupg}/bin/gpg
copy_bin_and_libs ${pkgs.gnupg}/bin/gpg-agent
@@ -783,6 +855,9 @@ in
$out/bin/gpg-agent --version
$out/bin/scdaemon --version
''}
+ ${optionalString luks.fido2Support ''
+ $out/bin/fido2luks --version
+ ''}
'';
boot.initrd.preFailCommands = postCommands;
diff --git a/nixpkgs/nixos/modules/system/boot/networkd.nix b/nixpkgs/nixos/modules/system/boot/networkd.nix
index 3e289a63139..56a9d6b1138 100644
--- a/nixpkgs/nixos/modules/system/boot/networkd.nix
+++ b/nixpkgs/nixos/modules/system/boot/networkd.nix
@@ -49,7 +49,7 @@ let
(assertValueOneOf "Kind" [
"bond" "bridge" "dummy" "gre" "gretap" "ip6gre" "ip6tnl" "ip6gretap" "ipip"
"ipvlan" "macvlan" "macvtap" "sit" "tap" "tun" "veth" "vlan" "vti" "vti6"
- "vxlan" "geneve" "vrf" "vcan" "vxcan" "wireguard" "netdevsim"
+ "vxlan" "geneve" "vrf" "vcan" "vxcan" "wireguard" "netdevsim" "xfrm"
])
(assertByteFormat "MTUBytes")
(assertMacAddress "MACAddress")
@@ -172,6 +172,14 @@ let
(assertValueOneOf "AllSlavesActive" boolValues)
];
+ checkXfrm = checkUnitConfig "Xfrm" [
+ (assertOnlyFields [
+ "InterfaceId" "Independent"
+ ])
+ (assertRange "InterfaceId" 1 4294967295)
+ (assertValueOneOf "Independent" boolValues)
+ ];
+
checkNetwork = checkUnitConfig "Network" [
(assertOnlyFields [
"Description" "DHCP" "DHCPServer" "LinkLocalAddressing" "IPv4LLRoute"
@@ -182,7 +190,7 @@ let
"IPv6HopLimit" "IPv4ProxyARP" "IPv6ProxyNDP" "IPv6ProxyNDPAddress"
"IPv6PrefixDelegation" "IPv6MTUBytes" "Bridge" "Bond" "VRF" "VLAN"
"IPVLAN" "MACVLAN" "VXLAN" "Tunnel" "ActiveSlave" "PrimarySlave"
- "ConfigureWithoutCarrier"
+ "ConfigureWithoutCarrier" "Xfrm"
])
# Note: For DHCP the values both, none, v4, v6 are deprecated
(assertValueOneOf "DHCP" ["yes" "no" "ipv4" "ipv6" "both" "none" "v4" "v6"])
@@ -477,6 +485,18 @@ let
'';
};
+ xfrmConfig = mkOption {
+ default = {};
+ example = { InterfaceId = 1; };
+ type = types.addCheck (types.attrsOf unitOption) checkXfrm;
+ description = ''
+ Each attribute in this set specifies an option in the
+ <literal>[Xfrm]</literal> section of the unit. See
+ <citerefentry><refentrytitle>systemd.netdev</refentrytitle>
+ <manvolnum>5</manvolnum></citerefentry> for details.
+ '';
+ };
+
};
addressOptions = {
@@ -712,6 +732,16 @@ let
'';
};
+ xfrm = mkOption {
+ default = [ ];
+ type = types.listOf types.str;
+ description = ''
+ A list of xfrm interfaces to be added to the network section of the
+ unit. See <citerefentry><refentrytitle>systemd.network</refentrytitle>
+ <manvolnum>5</manvolnum></citerefentry> for details.
+ '';
+ };
+
addresses = mkOption {
default = [ ];
type = with types; listOf (submodule addressOptions);
@@ -810,6 +840,11 @@ let
${attrsToSection def.bondConfig}
''}
+ ${optionalString (def.xfrmConfig != { }) ''
+ [Xfrm]
+ ${attrsToSection def.xfrmConfig}
+
+ ''}
${optionalString (def.wireguardConfig != { }) ''
[WireGuard]
${attrsToSection def.wireguardConfig}
@@ -847,6 +882,7 @@ let
${concatStringsSep "\n" (map (s: "MACVLAN=${s}") def.macvlan)}
${concatStringsSep "\n" (map (s: "VXLAN=${s}") def.vxlan)}
${concatStringsSep "\n" (map (s: "Tunnel=${s}") def.tunnel)}
+ ${concatStringsSep "\n" (map (s: "Xfrm=${s}") def.xfrm)}
${optionalString (def.dhcpConfig != { }) ''
[DHCP]
diff --git a/nixpkgs/nixos/modules/system/boot/systemd-lib.nix b/nixpkgs/nixos/modules/system/boot/systemd-lib.nix
index 28ad4f121bb..fd1a5b9f62c 100644
--- a/nixpkgs/nixos/modules/system/boot/systemd-lib.nix
+++ b/nixpkgs/nixos/modules/system/boot/systemd-lib.nix
@@ -147,7 +147,13 @@ in rec {
done
# Symlink all units provided listed in systemd.packages.
- for i in ${toString cfg.packages}; do
+ packages="${toString cfg.packages}"
+
+ # Filter duplicate directories
+ declare -A unique_packages
+ for k in $packages ; do unique_packages[$k]=1 ; done
+
+ for i in ''${!unique_packages[@]}; do
for fn in $i/etc/systemd/${type}/* $i/lib/systemd/${type}/*; do
if ! [[ "$fn" =~ .wants$ ]]; then
if [[ -d "$fn" ]]; then
diff --git a/nixpkgs/nixos/modules/system/boot/systemd.nix b/nixpkgs/nixos/modules/system/boot/systemd.nix
index c438bb216e7..941df5797c6 100644
--- a/nixpkgs/nixos/modules/system/boot/systemd.nix
+++ b/nixpkgs/nixos/modules/system/boot/systemd.nix
@@ -869,11 +869,15 @@ in
"sysctl.d/50-coredump.conf".source = "${systemd}/example/sysctl.d/50-coredump.conf";
"sysctl.d/50-default.conf".source = "${systemd}/example/sysctl.d/50-default.conf";
+ "tmpfiles.d/home.conf".source = "${systemd}/example/tmpfiles.d/home.conf";
"tmpfiles.d/journal-nocow.conf".source = "${systemd}/example/tmpfiles.d/journal-nocow.conf";
+ "tmpfiles.d/portables.conf".source = "${systemd}/example/tmpfiles.d/portables.conf";
"tmpfiles.d/static-nodes-permissions.conf".source = "${systemd}/example/tmpfiles.d/static-nodes-permissions.conf";
"tmpfiles.d/systemd.conf".source = "${systemd}/example/tmpfiles.d/systemd.conf";
+ "tmpfiles.d/systemd-nologin.conf".source = "${systemd}/example/tmpfiles.d/systemd-nologin.conf";
"tmpfiles.d/systemd-nspawn.conf".source = "${systemd}/example/tmpfiles.d/systemd-nspawn.conf";
"tmpfiles.d/systemd-tmp.conf".source = "${systemd}/example/tmpfiles.d/systemd-tmp.conf";
+ "tmpfiles.d/tmp.conf".source = "${systemd}/example/tmpfiles.d/tmp.conf";
"tmpfiles.d/var.conf".source = "${systemd}/example/tmpfiles.d/var.conf";
"tmpfiles.d/x11.conf".source = "${systemd}/example/tmpfiles.d/x11.conf";
diff --git a/nixpkgs/nixos/modules/tasks/powertop.nix b/nixpkgs/nixos/modules/tasks/powertop.nix
index 609831506e1..e8064f9fa80 100644
--- a/nixpkgs/nixos/modules/tasks/powertop.nix
+++ b/nixpkgs/nixos/modules/tasks/powertop.nix
@@ -15,6 +15,7 @@ in {
systemd.services = {
powertop = {
wantedBy = [ "multi-user.target" ];
+ after = [ "multi-user.target" ];
description = "Powertop tunings";
path = [ pkgs.kmod ];
serviceConfig = {
diff --git a/nixpkgs/nixos/modules/virtualisation/amazon-init.nix b/nixpkgs/nixos/modules/virtualisation/amazon-init.nix
index 8032b2c6d7c..8c12e0e49bf 100644
--- a/nixpkgs/nixos/modules/virtualisation/amazon-init.nix
+++ b/nixpkgs/nixos/modules/virtualisation/amazon-init.nix
@@ -7,8 +7,8 @@ let
echo "attempting to fetch configuration from EC2 user data..."
export HOME=/root
- export PATH=${pkgs.lib.makeBinPath [ config.nix.package pkgs.systemd pkgs.gnugrep pkgs.gnused config.system.build.nixos-rebuild]}:$PATH
- export NIX_PATH=/nix/var/nix/profiles/per-user/root/channels/nixos:nixos-config=/etc/nixos/configuration.nix:/nix/var/nix/profiles/per-user/root/channels
+ export PATH=${pkgs.lib.makeBinPath [ config.nix.package pkgs.systemd pkgs.gnugrep pkgs.git pkgs.gnutar pkgs.gzip pkgs.gnused config.system.build.nixos-rebuild]}:$PATH
+ export NIX_PATH=nixpkgs=/nix/var/nix/profiles/per-user/root/channels/nixos:nixos-config=/etc/nixos/configuration.nix:/nix/var/nix/profiles/per-user/root/channels
userData=/etc/ec2-metadata/user-data
@@ -18,9 +18,9 @@ let
# that as the channel.
if sed '/^\(#\|SSH_HOST_.*\)/d' < "$userData" | grep -q '\S'; then
channels="$(grep '^###' "$userData" | sed 's|###\s*||')"
- printf "%s" "$channels" | while read channel; do
+ while IFS= read -r channel; do
echo "writing channel: $channel"
- done
+ done < <(printf "%s\n" "$channels")
if [[ -n "$channels" ]]; then
printf "%s" "$channels" > /root/.nix-channels
@@ -48,7 +48,7 @@ in {
wantedBy = [ "multi-user.target" ];
after = [ "multi-user.target" ];
requires = [ "network-online.target" ];
-
+
restartIfChanged = false;
unitConfig.X-StopOnRemoval = false;
@@ -58,4 +58,3 @@ in {
};
};
}
-
diff --git a/nixpkgs/nixos/modules/virtualisation/docker-containers.nix b/nixpkgs/nixos/modules/virtualisation/docker-containers.nix
index 760cb9122a2..3a2eb97d1bf 100644
--- a/nixpkgs/nixos/modules/virtualisation/docker-containers.nix
+++ b/nixpkgs/nixos/modules/virtualisation/docker-containers.nix
@@ -10,11 +10,24 @@ let
options = {
image = mkOption {
- type = types.str;
+ type = with types; str;
description = "Docker image to run.";
example = "library/hello-world";
};
+ imageFile = mkOption {
+ type = with types; nullOr package;
+ default = null;
+ description = ''
+ Path to an image file to load instead of pulling from a registry.
+ If defined, do not pull from registry.
+
+ You still need to set the <literal>image</literal> attribute, as it
+ will be used as the image name for docker to start a container.
+ '';
+ example = literalExample "pkgs.dockerTools.buildDockerImage {...};";
+ };
+
cmd = mkOption {
type = with types; listOf str;
default = [];
@@ -153,6 +166,24 @@ let
example = "/var/lib/hello_world";
};
+ dependsOn = mkOption {
+ type = with types; listOf str;
+ default = [];
+ description = ''
+ Define which other containers this one depends on. They will be added to both After and Requires for the unit.
+
+ Use the same name as the attribute under <literal>services.docker-containers</literal>.
+ '';
+ example = literalExample ''
+ services.docker-containers = {
+ node1 = {};
+ node2 = {
+ dependsOn = [ "node1" ];
+ }
+ }
+ '';
+ };
+
extraDockerOptions = mkOption {
type = with types; listOf str;
default = [];
@@ -164,15 +195,18 @@ let
};
};
- mkService = name: container: {
+ mkService = name: container: let
+ mkAfter = map (x: "docker-${x}.service") container.dependsOn;
+ in rec {
wantedBy = [ "multi-user.target" ];
- after = [ "docker.service" "docker.socket" ];
- requires = [ "docker.service" "docker.socket" ];
+ after = [ "docker.service" "docker.socket" ] ++ mkAfter;
+ requires = after;
+
serviceConfig = {
ExecStart = concatStringsSep " \\\n " ([
"${pkgs.docker}/bin/docker run"
"--rm"
- "--name=%n"
+ "--name=${name}"
"--log-driver=${container.log-driver}"
] ++ optional (container.entrypoint != null)
"--entrypoint=${escapeShellArg container.entrypoint}"
@@ -185,9 +219,14 @@ let
++ [container.image]
++ map escapeShellArg container.cmd
);
- ExecStartPre = "-${pkgs.docker}/bin/docker rm -f %n";
- ExecStop = ''${pkgs.bash}/bin/sh -c "[ $SERVICE_RESULT = success ] || ${pkgs.docker}/bin/docker stop %n"'';
- ExecStopPost = "-${pkgs.docker}/bin/docker rm -f %n";
+
+ ExecStartPre = ["-${pkgs.docker}/bin/docker rm -f ${name}"
+ "-${pkgs.docker}/bin/docker image prune -f"] ++
+ (optional (container.imageFile != null)
+ ["${pkgs.docker}/bin/docker load -i ${container.imageFile}"]);
+
+ ExecStop = ''${pkgs.bash}/bin/sh -c "[ $SERVICE_RESULT = success ] || ${pkgs.docker}/bin/docker stop ${name}"'';
+ ExecStopPost = "-${pkgs.docker}/bin/docker rm -f ${name}";
### There is no generalized way of supporting `reload` for docker
### containers. Some containers may respond well to SIGHUP sent to their
diff --git a/nixpkgs/nixos/modules/virtualisation/lxd.nix b/nixpkgs/nixos/modules/virtualisation/lxd.nix
index b4934a86cf5..de48d3a780e 100644
--- a/nixpkgs/nixos/modules/virtualisation/lxd.nix
+++ b/nixpkgs/nixos/modules/virtualisation/lxd.nix
@@ -7,6 +7,7 @@ with lib;
let
cfg = config.virtualisation.lxd;
+ zfsCfg = config.boot.zfs;
in
@@ -26,11 +27,40 @@ in
<command>lxc</command> command line tool, among others.
'';
};
+
+ package = mkOption {
+ type = types.package;
+ default = pkgs.lxd;
+ defaultText = "pkgs.lxd";
+ description = ''
+ The LXD package to use.
+ '';
+ };
+
+ lxcPackage = mkOption {
+ type = types.package;
+ default = pkgs.lxc;
+ defaultText = "pkgs.lxc";
+ description = ''
+ The LXC package to use with LXD (required for AppArmor profiles).
+ '';
+ };
+
+ zfsPackage = mkOption {
+ type = types.package;
+ default = with pkgs; if zfsCfg.enableUnstable then zfsUnstable else zfs;
+ defaultText = "pkgs.zfs";
+ description = ''
+ The ZFS package to use with LXD.
+ '';
+ };
+
zfsSupport = mkOption {
type = types.bool;
default = false;
description = ''
- enables lxd to use zfs as a storage for containers.
+ Enables lxd to use zfs as a storage for containers.
+
This option is enabled by default if a zfs pool is configured
with nixos.
'';
@@ -54,15 +84,15 @@ in
config = mkIf cfg.enable {
- environment.systemPackages = [ pkgs.lxd ];
+ environment.systemPackages = [ cfg.package ];
security.apparmor = {
enable = true;
profiles = [
- "${pkgs.lxc}/etc/apparmor.d/usr.bin.lxc-start"
- "${pkgs.lxc}/etc/apparmor.d/lxc-containers"
+ "${cfg.lxcPackage}/etc/apparmor.d/usr.bin.lxc-start"
+ "${cfg.lxcPackage}/etc/apparmor.d/lxc-containers"
];
- packages = [ pkgs.lxc ];
+ packages = [ cfg.lxcPackage ];
};
systemd.services.lxd = {
@@ -71,14 +101,14 @@ in
wantedBy = [ "multi-user.target" ];
after = [ "systemd-udev-settle.service" ];
- path = lib.optional cfg.zfsSupport pkgs.zfs;
+ path = lib.optional cfg.zfsSupport cfg.zfsPackage;
preStart = ''
mkdir -m 0755 -p /var/lib/lxc/rootfs
'';
serviceConfig = {
- ExecStart = "@${pkgs.lxd.bin}/bin/lxd lxd --group lxd";
+ ExecStart = "@${cfg.package.bin}/bin/lxd lxd --group lxd";
Type = "simple";
KillMode = "process"; # when stopping, leave the containers alone
LimitMEMLOCK = "infinity";
diff --git a/nixpkgs/nixos/release-combined.nix b/nixpkgs/nixos/release-combined.nix
index ca9c6f9a7f9..b46731863ca 100644
--- a/nixpkgs/nixos/release-combined.nix
+++ b/nixpkgs/nixos/release-combined.nix
@@ -54,7 +54,7 @@ in rec {
(all nixos.dummy)
(all nixos.manual)
- nixos.iso_graphical.x86_64-linux or []
+ nixos.iso_plasma5.x86_64-linux or []
nixos.iso_minimal.aarch64-linux or []
nixos.iso_minimal.i686-linux or []
nixos.iso_minimal.x86_64-linux or []
diff --git a/nixpkgs/nixos/release.nix b/nixpkgs/nixos/release.nix
index f40b5fa9bd7..512ba714397 100644
--- a/nixpkgs/nixos/release.nix
+++ b/nixpkgs/nixos/release.nix
@@ -149,9 +149,9 @@ in rec {
inherit system;
});
- iso_graphical = forMatchingSystems [ "x86_64-linux" ] (system: makeIso {
- module = ./modules/installer/cd-dvd/installation-cd-graphical-kde.nix;
- type = "graphical";
+ iso_plasma5 = forMatchingSystems [ "x86_64-linux" ] (system: makeIso {
+ module = ./modules/installer/cd-dvd/installation-cd-graphical-plasma5.nix;
+ type = "plasma5";
inherit system;
});
@@ -209,7 +209,8 @@ in rec {
hydraJob ((import lib/eval-config.nix {
inherit system;
modules =
- [ versionModule
+ [ configuration
+ versionModule
./maintainers/scripts/ec2/amazon-image.nix
];
}).config.system.build.amazonImage)
diff --git a/nixpkgs/nixos/tests/all-tests.nix b/nixpkgs/nixos/tests/all-tests.nix
index fe9c4df1416..6bace8ffe6d 100644
--- a/nixpkgs/nixos/tests/all-tests.nix
+++ b/nixpkgs/nixos/tests/all-tests.nix
@@ -32,7 +32,7 @@ in
bees = handleTest ./bees.nix {};
bind = handleTest ./bind.nix {};
bittorrent = handleTest ./bittorrent.nix {};
- #blivet = handleTest ./blivet.nix {}; # broken since 2017-07024
+ buildkite-agent = handleTest ./buildkite-agent.nix {};
boot = handleTestOn ["x86_64-linux"] ./boot.nix {}; # syslinux is unsupported on aarch64
boot-stage1 = handleTest ./boot-stage1.nix {};
borgbackup = handleTest ./borgbackup.nix {};
@@ -61,6 +61,7 @@ in
containers-portforward = handleTest ./containers-portforward.nix {};
containers-restart_networking = handleTest ./containers-restart_networking.nix {};
containers-tmpfs = handleTest ./containers-tmpfs.nix {};
+ corerad = handleTest ./corerad.nix {};
couchdb = handleTest ./couchdb.nix {};
deluge = handleTest ./deluge.nix {};
dhparams = handleTest ./dhparams.nix {};
@@ -73,6 +74,7 @@ in
docker-tools = handleTestOn ["x86_64-linux"] ./docker-tools.nix {};
docker-tools-overlay = handleTestOn ["x86_64-linux"] ./docker-tools-overlay.nix {};
documize = handleTest ./documize.nix {};
+ dokuwiki = handleTest ./dokuwiki.nix {};
dovecot = handleTest ./dovecot.nix {};
# ec2-config doesn't work in a sandbox as the simulated ec2 instance needs network access
#ec2-config = (handleTestOn ["x86_64-linux"] ./ec2.nix {}).boot-ec2-config or {};
@@ -91,6 +93,7 @@ in
flannel = handleTestOn ["x86_64-linux"] ./flannel.nix {};
fluentd = handleTest ./fluentd.nix {};
fontconfig-default-fonts = handleTest ./fontconfig-default-fonts.nix {};
+ freeswitch = handleTest ./freeswitch.nix {};
fsck = handleTest ./fsck.nix {};
gotify-server = handleTest ./gotify-server.nix {};
gitea = handleTest ./gitea.nix {};
@@ -290,11 +293,13 @@ in
upnp = handleTest ./upnp.nix {};
uwsgi = handleTest ./uwsgi.nix {};
vault = handleTest ./vault.nix {};
+ victoriametrics = handleTest ./victoriametrics.nix {};
virtualbox = handleTestOn ["x86_64-linux"] ./virtualbox.nix {};
wireguard = handleTest ./wireguard {};
wireguard-generated = handleTest ./wireguard/generated.nix {};
wireguard-namespaces = handleTest ./wireguard/namespaces.nix {};
wordpress = handleTest ./wordpress.nix {};
+ xandikos = handleTest ./xandikos.nix {};
xautolock = handleTest ./xautolock.nix {};
xfce = handleTest ./xfce.nix {};
xmonad = handleTest ./xmonad.nix {};
diff --git a/nixpkgs/nixos/tests/bittorrent.nix b/nixpkgs/nixos/tests/bittorrent.nix
index e5be652c711..0a97d5556a2 100644
--- a/nixpkgs/nixos/tests/bittorrent.nix
+++ b/nixpkgs/nixos/tests/bittorrent.nix
@@ -18,6 +18,17 @@ let
externalRouterAddress = "80.100.100.1";
externalClient2Address = "80.100.100.2";
externalTrackerAddress = "80.100.100.3";
+
+ transmissionConfig = { ... }: {
+ environment.systemPackages = [ pkgs.transmission ];
+ services.transmission = {
+ enable = true;
+ settings = {
+ dht-enabled = false;
+ message-level = 3;
+ };
+ };
+ };
in
{
@@ -26,88 +37,79 @@ in
maintainers = [ domenkozar eelco rob bobvanderlinden ];
};
- nodes =
- { tracker =
- { pkgs, ... }:
- { environment.systemPackages = [ pkgs.transmission ];
-
- virtualisation.vlans = [ 1 ];
- networking.interfaces.eth1.ipv4.addresses = [
- { address = externalTrackerAddress; prefixLength = 24; }
- ];
-
- # We need Apache on the tracker to serve the torrents.
- services.httpd.enable = true;
- services.httpd.adminAddr = "foo@example.org";
- services.httpd.documentRoot = "/tmp";
-
- networking.firewall.enable = false;
-
- services.opentracker.enable = true;
-
- services.transmission.enable = true;
- services.transmission.settings.dht-enabled = false;
- services.transmission.settings.port-forwaring-enabled = false;
- };
-
- router =
- { pkgs, nodes, ... }:
- { virtualisation.vlans = [ 1 2 ];
- networking.nat.enable = true;
- networking.nat.internalInterfaces = [ "eth2" ];
- networking.nat.externalInterface = "eth1";
- networking.firewall.enable = true;
- networking.firewall.trustedInterfaces = [ "eth2" ];
- networking.interfaces.eth0.ipv4.addresses = [];
- networking.interfaces.eth1.ipv4.addresses = [
- { address = externalRouterAddress; prefixLength = 24; }
- ];
- networking.interfaces.eth2.ipv4.addresses = [
- { address = internalRouterAddress; prefixLength = 24; }
- ];
- services.miniupnpd = {
- enable = true;
- externalInterface = "eth1";
- internalIPs = [ "eth2" ];
- appendConfig = ''
- ext_ip=${externalRouterAddress}
- '';
+ nodes = {
+ tracker = { pkgs, ... }: {
+ imports = [ transmissionConfig ];
+
+ virtualisation.vlans = [ 1 ];
+ networking.firewall.enable = false;
+ networking.interfaces.eth1.ipv4.addresses = [
+ { address = externalTrackerAddress; prefixLength = 24; }
+ ];
+
+ # We need Apache on the tracker to serve the torrents.
+ services.httpd = {
+ enable = true;
+ virtualHosts = {
+ "torrentserver.org" = {
+ adminAddr = "foo@example.org";
+ documentRoot = "/tmp";
};
};
+ };
+ services.opentracker.enable = true;
+ };
- client1 =
- { pkgs, nodes, ... }:
- { environment.systemPackages = [ pkgs.transmission pkgs.miniupnpc ];
- virtualisation.vlans = [ 2 ];
- networking.interfaces.eth0.ipv4.addresses = [];
- networking.interfaces.eth1.ipv4.addresses = [
- { address = internalClient1Address; prefixLength = 24; }
- ];
- networking.defaultGateway = internalRouterAddress;
- networking.firewall.enable = false;
- services.transmission.enable = true;
- services.transmission.settings.dht-enabled = false;
- services.transmission.settings.message-level = 3;
- };
+ router = { pkgs, nodes, ... }: {
+ virtualisation.vlans = [ 1 2 ];
+ networking.nat.enable = true;
+ networking.nat.internalInterfaces = [ "eth2" ];
+ networking.nat.externalInterface = "eth1";
+ networking.firewall.enable = true;
+ networking.firewall.trustedInterfaces = [ "eth2" ];
+ networking.interfaces.eth0.ipv4.addresses = [];
+ networking.interfaces.eth1.ipv4.addresses = [
+ { address = externalRouterAddress; prefixLength = 24; }
+ ];
+ networking.interfaces.eth2.ipv4.addresses = [
+ { address = internalRouterAddress; prefixLength = 24; }
+ ];
+ services.miniupnpd = {
+ enable = true;
+ externalInterface = "eth1";
+ internalIPs = [ "eth2" ];
+ appendConfig = ''
+ ext_ip=${externalRouterAddress}
+ '';
+ };
+ };
- client2 =
- { pkgs, ... }:
- { environment.systemPackages = [ pkgs.transmission ];
- virtualisation.vlans = [ 1 ];
- networking.interfaces.eth0.ipv4.addresses = [];
- networking.interfaces.eth1.ipv4.addresses = [
- { address = externalClient2Address; prefixLength = 24; }
- ];
- networking.firewall.enable = false;
- services.transmission.enable = true;
- services.transmission.settings.dht-enabled = false;
- services.transmission.settings.port-forwaring-enabled = false;
- };
+ client1 = { pkgs, nodes, ... }: {
+ imports = [ transmissionConfig ];
+ environment.systemPackages = [ pkgs.miniupnpc ];
+
+ virtualisation.vlans = [ 2 ];
+ networking.interfaces.eth0.ipv4.addresses = [];
+ networking.interfaces.eth1.ipv4.addresses = [
+ { address = internalClient1Address; prefixLength = 24; }
+ ];
+ networking.defaultGateway = internalRouterAddress;
+ networking.firewall.enable = false;
};
- testScript =
- { nodes, ... }:
- ''
+ client2 = { pkgs, ... }: {
+ imports = [ transmissionConfig ];
+
+ virtualisation.vlans = [ 1 ];
+ networking.interfaces.eth0.ipv4.addresses = [];
+ networking.interfaces.eth1.ipv4.addresses = [
+ { address = externalClient2Address; prefixLength = 24; }
+ ];
+ networking.firewall.enable = false;
+ };
+ };
+
+ testScript = { nodes, ... }: ''
start_all()
# Wait for network and miniupnpd.
@@ -159,5 +161,4 @@ in
"cmp /tmp/test.tar.bz2 ${file}"
)
'';
-
})
diff --git a/nixpkgs/nixos/tests/blivet.nix b/nixpkgs/nixos/tests/blivet.nix
deleted file mode 100644
index 2adc2ee1eee..00000000000
--- a/nixpkgs/nixos/tests/blivet.nix
+++ /dev/null
@@ -1,87 +0,0 @@
-import ./make-test.nix ({ pkgs, ... }: with pkgs.python2Packages; rec {
- name = "blivet";
- meta = with pkgs.stdenv.lib.maintainers; {
- maintainers = [ aszlig ];
- };
-
- machine = {
- environment.systemPackages = [ pkgs.python blivet mock ];
- boot.supportedFilesystems = [ "btrfs" "jfs" "reiserfs" "xfs" ];
- virtualisation.memorySize = 768;
- };
-
- debugBlivet = false;
- debugProgramCalls = false;
-
- pythonTestRunner = pkgs.writeText "run-blivet-tests.py" ''
- import sys
- import logging
-
- from unittest import TestLoader
- from unittest.runner import TextTestRunner
-
- ${pkgs.lib.optionalString debugProgramCalls ''
- blivet_program_log = logging.getLogger("program")
- blivet_program_log.setLevel(logging.DEBUG)
- blivet_program_log.addHandler(logging.StreamHandler(sys.stderr))
- ''}
-
- ${pkgs.lib.optionalString debugBlivet ''
- blivet_log = logging.getLogger("blivet")
- blivet_log.setLevel(logging.DEBUG)
- blivet_log.addHandler(logging.StreamHandler(sys.stderr))
- ''}
-
- runner = TextTestRunner(verbosity=2, failfast=False, buffer=False)
- result = runner.run(TestLoader().discover('tests/', pattern='*_test.py'))
- sys.exit(not result.wasSuccessful())
- '';
-
- blivetTest = pkgs.writeScript "blivet-test.sh" ''
- #!${pkgs.stdenv.shell} -e
-
- # Use the hosts temporary directory, because we have a tmpfs within the VM
- # and we don't want to increase the memory size of the VM for no reason.
- mkdir -p /tmp/xchg/bigtmp
- TMPDIR=/tmp/xchg/bigtmp
- export TMPDIR
-
- cp -Rd "${blivet.src}/tests" .
-
- # Skip SELinux tests
- rm -f tests/formats_test/selinux_test.py
-
- # Race conditions in growing/shrinking during resync
- rm -f tests/devicelibs_test/mdraid_*
-
- # Deactivate small BTRFS device test, because it fails with newer btrfsprogs
- sed -i -e '/^class *BTRFSAsRootTestCase3(/,/^[^ ]/ {
- /^class *BTRFSAsRootTestCase3(/d
- /^$/d
- /^ /d
- }' tests/devicelibs_test/btrfs_test.py
-
- # How on earth can these tests ever work even upstream? O_o
- sed -i -e '/def testDiskChunk[12]/,/^ *[^ ]/{n; s/^ */&return # /}' \
- tests/partitioning_test.py
-
- # fix hardcoded temporary directory
- sed -i \
- -e '1i import tempfile' \
- -e 's|_STORE_FILE_PATH = .*|_STORE_FILE_PATH = tempfile.gettempdir()|' \
- -e 's|DEFAULT_STORE_SIZE = .*|DEFAULT_STORE_SIZE = 409600|' \
- tests/loopbackedtestcase.py
-
- PYTHONPATH=".:$(< "${pkgs.stdenv.mkDerivation {
- name = "blivet-pythonpath";
- buildInputs = [ blivet mock ];
- buildCommand = "echo \"$PYTHONPATH\" > \"$out\"";
- }}")" python "${pythonTestRunner}"
- '';
-
- testScript = ''
- $machine->waitForUnit("multi-user.target");
- $machine->succeed("${blivetTest}");
- $machine->execute("rm -rf /tmp/xchg/bigtmp");
- '';
-})
diff --git a/nixpkgs/nixos/tests/buildkite-agent.nix b/nixpkgs/nixos/tests/buildkite-agent.nix
new file mode 100644
index 00000000000..3c824c9aedf
--- /dev/null
+++ b/nixpkgs/nixos/tests/buildkite-agent.nix
@@ -0,0 +1,36 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+
+{
+ name = "buildkite-agent";
+ meta = with pkgs.stdenv.lib.maintainers; {
+ maintainers = [ flokli ];
+ };
+
+ nodes = {
+ node1 = { pkgs, ... }: {
+ services.buildkite-agent = {
+ enable = true;
+ privateSshKeyPath = (import ./ssh-keys.nix pkgs).snakeOilPrivateKey;
+ tokenPath = (pkgs.writeText "my-token" "5678");
+ };
+ };
+ # don't configure ssh key, run as a separate user
+ node2 = { pkgs, ...}: {
+ services.buildkite-agent = {
+ enable = true;
+ tokenPath = (pkgs.writeText "my-token" "1234");
+ };
+ };
+ };
+
+ testScript = ''
+ start_all()
+ # we can't wait on the unit to start up, as we obviously can't connect to buildkite,
+ # but we can look whether files are set up correctly
+
+ node1.wait_for_file("/var/lib/buildkite-agent/buildkite-agent.cfg")
+ node1.wait_for_file("/var/lib/buildkite-agent/.ssh/id_rsa")
+
+ node2.wait_for_file("/var/lib/buildkite-agent/buildkite-agent.cfg")
+ '';
+})
diff --git a/nixpkgs/nixos/tests/certmgr.nix b/nixpkgs/nixos/tests/certmgr.nix
index cb69f35e862..ef32f54400e 100644
--- a/nixpkgs/nixos/tests/certmgr.nix
+++ b/nixpkgs/nixos/tests/certmgr.nix
@@ -9,8 +9,8 @@ let
inherit action;
authority = {
file = {
- group = "nobody";
- owner = "nobody";
+ group = "nginx";
+ owner = "nginx";
path = "/tmp/${host}-ca.pem";
};
label = "www_ca";
@@ -18,14 +18,14 @@ let
remote = "localhost:8888";
};
certificate = {
- group = "nobody";
- owner = "nobody";
+ group = "nginx";
+ owner = "nginx";
path = "/tmp/${host}-cert.pem";
};
private_key = {
- group = "nobody";
+ group = "nginx";
mode = "0600";
- owner = "nobody";
+ owner = "nginx";
path = "/tmp/${host}-key.pem";
};
request = {
diff --git a/nixpkgs/nixos/tests/chromium.nix b/nixpkgs/nixos/tests/chromium.nix
index a5531d112e3..3844255bd8a 100644
--- a/nixpkgs/nixos/tests/chromium.nix
+++ b/nixpkgs/nixos/tests/chromium.nix
@@ -23,7 +23,7 @@ mapAttrs (channel: chromiumPkg: makeTest rec {
machine.imports = [ ./common/user-account.nix ./common/x11.nix ];
machine.virtualisation.memorySize = 2047;
- machine.services.xserver.displayManager.auto.user = "alice";
+ machine.test-support.displayManager.auto.user = "alice";
machine.environment.systemPackages = [ chromiumPkg ];
startupHTML = pkgs.writeText "chromium-startup.html" ''
diff --git a/nixpkgs/nixos/modules/services/x11/display-managers/auto.nix b/nixpkgs/nixos/tests/common/auto.nix
index 1068a344e0c..2c21a8d5167 100644
--- a/nixpkgs/nixos/modules/services/x11/display-managers/auto.nix
+++ b/nixpkgs/nixos/tests/common/auto.nix
@@ -5,7 +5,7 @@ with lib;
let
dmcfg = config.services.xserver.displayManager;
- cfg = dmcfg.auto;
+ cfg = config.test-support.displayManager.auto;
in
@@ -15,7 +15,7 @@ in
options = {
- services.xserver.displayManager.auto = {
+ test-support.displayManager.auto = {
enable = mkOption {
default = false;
diff --git a/nixpkgs/nixos/tests/common/ec2.nix b/nixpkgs/nixos/tests/common/ec2.nix
index 1e69b63191a..ba087bb6009 100644
--- a/nixpkgs/nixos/tests/common/ec2.nix
+++ b/nixpkgs/nixos/tests/common/ec2.nix
@@ -25,7 +25,7 @@ with pkgs.lib;
my $imageDir = ($ENV{'TMPDIR'} // "/tmp") . "/vm-state-machine";
mkdir $imageDir, 0700;
my $diskImage = "$imageDir/machine.qcow2";
- system("qemu-img create -f qcow2 -o backing_file=${image}/nixos.qcow2 $diskImage") == 0 or die;
+ system("qemu-img create -f qcow2 -o backing_file=${image} $diskImage") == 0 or die;
system("qemu-img resize $diskImage 10G") == 0 or die;
# Note: we use net=169.0.0.0/8 rather than
@@ -35,7 +35,7 @@ with pkgs.lib;
# again when it deletes link-local addresses.) Ideally we'd
# turn off the DHCP server, but qemu does not have an option
# to do that.
- my $startCommand = "qemu-kvm -m 768";
+ my $startCommand = "qemu-kvm -m 1024";
$startCommand .= " -device virtio-net-pci,netdev=vlan0";
$startCommand .= " -netdev 'user,id=vlan0,net=169.0.0.0/8,guestfwd=tcp:169.254.169.254:80-cmd:${pkgs.micro-httpd}/bin/micro_httpd ${metaData}'";
$startCommand .= " -drive file=$diskImage,if=virtio,werror=report";
diff --git a/nixpkgs/nixos/tests/common/x11.nix b/nixpkgs/nixos/tests/common/x11.nix
index 5ad0ac20fac..0d76a0e972f 100644
--- a/nixpkgs/nixos/tests/common/x11.nix
+++ b/nixpkgs/nixos/tests/common/x11.nix
@@ -1,9 +1,14 @@
{ lib, ... }:
-{ services.xserver.enable = true;
+{
+ imports = [
+ ./auto.nix
+ ];
+
+ services.xserver.enable = true;
# Automatically log in.
- services.xserver.displayManager.auto.enable = true;
+ test-support.displayManager.auto.enable = true;
# Use IceWM as the window manager.
# Don't use a desktop manager.
diff --git a/nixpkgs/nixos/tests/corerad.nix b/nixpkgs/nixos/tests/corerad.nix
new file mode 100644
index 00000000000..950c9abc899
--- /dev/null
+++ b/nixpkgs/nixos/tests/corerad.nix
@@ -0,0 +1,70 @@
+import ./make-test-python.nix (
+ {
+ nodes = {
+ router = {config, pkgs, ...}: {
+ config = {
+ # This machines simulates a router with IPv6 forwarding and a static IPv6 address.
+ boot.kernel.sysctl = {
+ "net.ipv6.conf.all.forwarding" = true;
+ };
+ networking.interfaces.eth1 = {
+ ipv6.addresses = [ { address = "fd00:dead:beef:dead::1"; prefixLength = 64; } ];
+ };
+ services.corerad = {
+ enable = true;
+ # Serve router advertisements to the client machine with prefix information matching
+ # any IPv6 /64 prefixes configured on this interface.
+ configFile = pkgs.writeText "corerad.toml" ''
+ [[interfaces]]
+ name = "eth1"
+ send_advertisements = true
+ [[interfaces.prefix]]
+ prefix = "::/64"
+ '';
+ };
+ };
+ };
+ client = {config, pkgs, ...}: {
+ # Use IPv6 SLAAC from router advertisements, and install rdisc6 so we can
+ # trigger one immediately.
+ config = {
+ boot.kernel.sysctl = {
+ "net.ipv6.conf.all.autoconf" = true;
+ };
+ environment.systemPackages = with pkgs; [
+ ndisc6
+ ];
+ };
+ };
+ };
+
+ testScript = ''
+ start_all()
+
+ with subtest("Wait for CoreRAD and network ready"):
+ # Ensure networking is online and CoreRAD is ready.
+ router.wait_for_unit("network-online.target")
+ client.wait_for_unit("network-online.target")
+ router.wait_for_unit("corerad.service")
+
+ # Ensure the client can reach the router.
+ client.wait_until_succeeds("ping -c 1 fd00:dead:beef:dead::1")
+
+ with subtest("Verify SLAAC on client"):
+ # Trigger a router solicitation and verify a SLAAC address is assigned from
+ # the prefix configured on the router.
+ client.wait_until_succeeds("rdisc6 -1 -r 10 eth1")
+ client.wait_until_succeeds(
+ "ip -6 addr show dev eth1 | grep -q 'fd00:dead:beef:dead:'"
+ )
+
+ addrs = client.succeed("ip -6 addr show dev eth1")
+
+ assert (
+ "fd00:dead:beef:dead:" in addrs
+ ), "SLAAC prefix was not found in client addresses after router advertisement"
+ assert (
+ "/64 scope global temporary" in addrs
+ ), "SLAAC temporary address was not configured on client after router advertisement"
+ '';
+ })
diff --git a/nixpkgs/nixos/tests/docker-containers.nix b/nixpkgs/nixos/tests/docker-containers.nix
index 97255273520..9be9bfa80ce 100644
--- a/nixpkgs/nixos/tests/docker-containers.nix
+++ b/nixpkgs/nixos/tests/docker-containers.nix
@@ -1,9 +1,11 @@
# Test Docker containers as systemd units
-import ./make-test.nix ({ pkgs, lib, ... }: {
+import ./make-test.nix ({ pkgs, lib, ... }:
+
+{
name = "docker-containers";
meta = {
- maintainers = with lib.maintainers; [ benley ];
+ maintainers = with lib.maintainers; [ benley mkaito ];
};
nodes = {
@@ -11,10 +13,9 @@ import ./make-test.nix ({ pkgs, lib, ... }: {
{
virtualisation.docker.enable = true;
- virtualisation.dockerPreloader.images = [ pkgs.dockerTools.examples.nginx ];
-
docker-containers.nginx = {
image = "nginx-container";
+ imageFile = pkgs.dockerTools.examples.nginx;
ports = ["8181:80"];
};
};
diff --git a/nixpkgs/nixos/tests/docker-tools.nix b/nixpkgs/nixos/tests/docker-tools.nix
index 9ab1a71f331..07fac533680 100644
--- a/nixpkgs/nixos/tests/docker-tools.nix
+++ b/nixpkgs/nixos/tests/docker-tools.nix
@@ -80,5 +80,8 @@ import ./make-test.nix ({ pkgs, ... }: {
# This is to be sure the order of layers of the parent image is preserved
$docker->succeed("docker run --rm ${pkgs.dockerTools.examples.layersOrder.imageName} cat /tmp/layer2 | grep -q layer2");
$docker->succeed("docker run --rm ${pkgs.dockerTools.examples.layersOrder.imageName} cat /tmp/layer3 | grep -q layer3");
+
+ # Ensure image with only 2 layers can be loaded
+ $docker->succeed("docker load --input='${pkgs.dockerTools.examples.two-layered-image}'");
'';
})
diff --git a/nixpkgs/nixos/tests/dokuwiki.nix b/nixpkgs/nixos/tests/dokuwiki.nix
new file mode 100644
index 00000000000..38bde10f47e
--- /dev/null
+++ b/nixpkgs/nixos/tests/dokuwiki.nix
@@ -0,0 +1,29 @@
+import ./make-test-python.nix ({ lib, ... }:
+
+with lib;
+
+{
+ name = "dokuwiki";
+ meta.maintainers = with maintainers; [ maintainers."1000101" ];
+
+ nodes.machine =
+ { pkgs, ... }:
+ { services.dokuwiki = {
+ enable = true;
+ acl = " ";
+ superUser = null;
+ nginx = {
+ forceSSL = false;
+ enableACME = false;
+ };
+ };
+ };
+
+ testScript = ''
+ machine.start()
+ machine.wait_for_unit("phpfpm-dokuwiki.service")
+ machine.wait_for_unit("nginx.service")
+ machine.wait_for_open_port(80)
+ machine.succeed("curl -sSfL http://localhost/ | grep 'DokuWiki'")
+ '';
+})
diff --git a/nixpkgs/nixos/tests/ec2.nix b/nixpkgs/nixos/tests/ec2.nix
index c649ce852da..6aeeb17ba31 100644
--- a/nixpkgs/nixos/tests/ec2.nix
+++ b/nixpkgs/nixos/tests/ec2.nix
@@ -9,7 +9,7 @@ with pkgs.lib;
with import common/ec2.nix { inherit makeTest pkgs; };
let
- image =
+ imageCfg =
(import ../lib/eval-config.nix {
inherit system;
modules = [
@@ -26,20 +26,32 @@ let
'';
# Needed by nixos-rebuild due to the lack of network
- # access. Mostly copied from
- # modules/profiles/installation-device.nix.
+ # access. Determined by trial and error.
system.extraDependencies =
- with pkgs; [
- stdenv busybox perlPackages.ArchiveCpio unionfs-fuse mkinitcpio-nfs-utils
-
- # These are used in the configure-from-userdata tests for EC2. Httpd and valgrind are requested
- # directly by the configuration we set, and libxslt.bin is used indirectly as a build dependency
- # of the derivation for dbus configuration files.
- apacheHttpd valgrind.doc libxslt.bin
- ];
+ with pkgs; (
+ [
+ # Needed for a nixos-rebuild.
+ busybox
+ stdenv
+ stdenvNoCC
+ mkinitcpio-nfs-utils
+ unionfs-fuse
+ cloud-utils
+ desktop-file-utils
+ texinfo
+ libxslt.bin
+ xorg.lndir
+
+ # These are used in the configure-from-userdata tests
+ # for EC2. Httpd and valgrind are requested by the
+ # configuration.
+ apacheHttpd apacheHttpd.doc apacheHttpd.man valgrind.doc
+ ]
+ );
}
];
- }).config.system.build.amazonImage;
+ }).config;
+ image = "${imageCfg.system.build.amazonImage}/${imageCfg.amazonImage.name}.vhd";
sshKeys = import ./ssh-keys.nix pkgs;
snakeOilPrivateKey = sshKeys.snakeOilPrivateKey.text;
@@ -110,16 +122,23 @@ in {
text = "whoa";
};
+ networking.hostName = "ec2-test-vm"; # required by services.httpd
+
services.httpd = {
enable = true;
adminAddr = "test@example.org";
- virtualHosts.localhost.documentRoot = "${pkgs.valgrind.doc}/share/doc/valgrind/html";
+ virtualHosts.localhost.documentRoot = "''${pkgs.valgrind.doc}/share/doc/valgrind/html";
};
networking.firewall.allowedTCPPorts = [ 80 ];
}
'';
script = ''
$machine->start;
+
+ # amazon-init must succeed. if it fails, make the test fail
+ # immediately instead of timing out in waitForFile.
+ $machine->waitForUnit('amazon-init.service');
+
$machine->waitForFile("/etc/testFile");
$machine->succeed("cat /etc/testFile | grep -q 'whoa'");
diff --git a/nixpkgs/nixos/tests/elk.nix b/nixpkgs/nixos/tests/elk.nix
index b33d98b85d6..d3dc6dde135 100644
--- a/nixpkgs/nixos/tests/elk.nix
+++ b/nixpkgs/nixos/tests/elk.nix
@@ -6,20 +6,11 @@
# NIXPKGS_ALLOW_UNFREE=1 nix-build nixos/tests/elk.nix -A ELK-6 --arg enableUnfree true
}:
-with import ../lib/testing.nix { inherit system pkgs; };
-with pkgs.lib;
-
let
esUrl = "http://localhost:9200";
- totalHits = message :
- "curl --silent --show-error '${esUrl}/_search' -H 'Content-Type: application/json' " +
- ''-d '{\"query\" : { \"match\" : { \"message\" : \"${message}\"}}}' '' +
- "| jq .hits.total";
-
mkElkTest = name : elk :
- let elasticsearchGe7 = builtins.compareVersions elk.elasticsearch.version "7" >= 0;
- in makeTest {
+ import ./make-test-python.nix ({
inherit name;
meta = with pkgs.stdenv.lib.maintainers; {
maintainers = [ eelco offline basvandijk ];
@@ -50,15 +41,15 @@ let
elk.journalbeat.version "6" < 0; in {
enable = true;
package = elk.journalbeat;
- extraConfig = mkOptionDefault (''
+ extraConfig = pkgs.lib.mkOptionDefault (''
logging:
to_syslog: true
level: warning
metrics.enabled: false
output.elasticsearch:
hosts: [ "127.0.0.1:9200" ]
- ${optionalString lt6 "template.enabled: false"}
- '' + optionalString (!lt6) ''
+ ${pkgs.lib.optionalString lt6 "template.enabled: false"}
+ '' + pkgs.lib.optionalString (!lt6) ''
journalbeat.inputs:
- paths: []
seek: cursor
@@ -99,8 +90,7 @@ let
};
elasticsearch-curator = {
- # The current version of curator (5.6) doesn't support elasticsearch >= 7.0.0.
- enable = !elasticsearchGe7;
+ enable = true;
actionYAML = ''
---
actions:
@@ -130,11 +120,23 @@ let
};
testScript = ''
- startAll;
+ import json
+
+
+ def total_hits(message):
+ dictionary = {"query": {"match": {"message": message}}}
+ return (
+ "curl --silent --show-error '${esUrl}/_search' "
+ + "-H 'Content-Type: application/json' "
+ + "-d '{}' ".format(json.dumps(dictionary))
+ + "| jq .hits.total"
+ )
+
+
+ start_all()
- # Wait until elasticsearch is listening for connections.
- $one->waitForUnit("elasticsearch.service");
- $one->waitForOpenPort(9200);
+ one.wait_for_unit("elasticsearch.service")
+ one.wait_for_open_port(9200)
# Continue as long as the status is not "red". The status is probably
# "yellow" instead of "green" because we are using a single elasticsearch
@@ -142,42 +144,43 @@ let
#
# TODO: extend this test with multiple elasticsearch nodes
# and see if the status turns "green".
- $one->waitUntilSucceeds(
- "curl --silent --show-error '${esUrl}/_cluster/health' " .
- "| jq .status | grep -v red");
-
- # Perform some simple logstash tests.
- $one->waitForUnit("logstash.service");
- $one->waitUntilSucceeds("cat /tmp/logstash.out | grep flowers");
- $one->waitUntilSucceeds("cat /tmp/logstash.out | grep -v dragons");
-
- # See if kibana is healthy.
- $one->waitForUnit("kibana.service");
- $one->waitUntilSucceeds(
- "curl --silent --show-error 'http://localhost:5601/api/status' " .
- "| jq .status.overall.state | grep green");
-
- # See if logstash messages arive in elasticsearch.
- $one->waitUntilSucceeds("${totalHits "flowers"} | grep -v 0");
- $one->waitUntilSucceeds("${totalHits "dragons"} | grep 0");
-
- # Test if a message logged to the journal
- # is ingested by elasticsearch via journalbeat.
- $one->waitForUnit("journalbeat.service");
- $one->execute("echo 'Supercalifragilisticexpialidocious' | systemd-cat");
- $one->waitUntilSucceeds(
- "${totalHits "Supercalifragilisticexpialidocious"} | grep -v 0");
-
- '' + optionalString (!elasticsearchGe7) ''
- # Test elasticsearch-curator.
- $one->systemctl("stop logstash");
- $one->systemctl("start elasticsearch-curator");
- $one->waitUntilSucceeds(
- "! curl --silent --show-error '${esUrl}/_cat/indices' " .
- "| grep logstash | grep -q ^$1");
+ one.wait_until_succeeds(
+ "curl --silent --show-error '${esUrl}/_cluster/health' | jq .status | grep -v red"
+ )
+
+ with subtest("Perform some simple logstash tests"):
+ one.wait_for_unit("logstash.service")
+ one.wait_until_succeeds("cat /tmp/logstash.out | grep flowers")
+ one.wait_until_succeeds("cat /tmp/logstash.out | grep -v dragons")
+
+ with subtest("Kibana is healthy"):
+ one.wait_for_unit("kibana.service")
+ one.wait_until_succeeds(
+ "curl --silent --show-error 'http://localhost:5601/api/status' | jq .status.overall.state | grep green"
+ )
+
+ with subtest("Logstash messages arive in elasticsearch"):
+ one.wait_until_succeeds(total_hits("flowers") + " | grep -v 0")
+ one.wait_until_succeeds(total_hits("dragons") + " | grep 0")
+
+ with subtest(
+ "A message logged to the journal is ingested by elasticsearch via journalbeat"
+ ):
+ one.wait_for_unit("journalbeat.service")
+ one.execute("echo 'Supercalifragilisticexpialidocious' | systemd-cat")
+ one.wait_until_succeeds(
+ total_hits("Supercalifragilisticexpialidocious") + " | grep -v 0"
+ )
+
+ with subtest("Elasticsearch-curator works"):
+ one.systemctl("stop logstash")
+ one.systemctl("start elasticsearch-curator")
+ one.wait_until_succeeds(
+ '! curl --silent --show-error "${esUrl}/_cat/indices" | grep logstash | grep -q ^'
+ )
'';
- };
-in mapAttrs mkElkTest {
+ }) {};
+in pkgs.lib.mapAttrs mkElkTest {
ELK-6 =
if enableUnfree
then {
diff --git a/nixpkgs/nixos/tests/freeswitch.nix b/nixpkgs/nixos/tests/freeswitch.nix
new file mode 100644
index 00000000000..349d0e7bc6f
--- /dev/null
+++ b/nixpkgs/nixos/tests/freeswitch.nix
@@ -0,0 +1,29 @@
+import ./make-test-python.nix ({ pkgs, ...} : {
+ name = "freeswitch";
+ meta = with pkgs.stdenv.lib.maintainers; {
+ maintainers = [ misuzu ];
+ };
+ nodes = {
+ node0 = { config, lib, ... }: {
+ networking.useDHCP = false;
+ networking.interfaces.eth1 = {
+ ipv4.addresses = [
+ {
+ address = "192.168.0.1";
+ prefixLength = 24;
+ }
+ ];
+ };
+ services.freeswitch = {
+ enable = true;
+ enableReload = true;
+ configTemplate = "${config.services.freeswitch.package}/share/freeswitch/conf/minimal";
+ };
+ };
+ };
+ testScript = ''
+ node0.wait_for_unit("freeswitch.service")
+ # Wait for SIP port to be open
+ node0.wait_for_open_port("5060")
+ '';
+})
diff --git a/nixpkgs/nixos/tests/gnome3.nix b/nixpkgs/nixos/tests/gnome3.nix
index ab363efb6a1..486c146d8dc 100644
--- a/nixpkgs/nixos/tests/gnome3.nix
+++ b/nixpkgs/nixos/tests/gnome3.nix
@@ -1,4 +1,4 @@
-import ./make-test.nix ({ pkgs, ...} : {
+import ./make-test-python.nix ({ pkgs, ...} : {
name = "gnome3";
meta = with pkgs.stdenv.lib.maintainers; {
maintainers = pkgs.gnome3.maintainers;
@@ -24,41 +24,53 @@ import ./make-test.nix ({ pkgs, ...} : {
virtualisation.memorySize = 1024;
};
- testScript = let
+ testScript = { nodes, ... }: let
# Keep line widths somewhat managable
- bus = "DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/1000/bus";
+ user = nodes.machine.config.users.users.alice;
+ uid = toString user.uid;
+ bus = "DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/${uid}/bus";
gdbus = "${bus} gdbus";
+ su = command: "su - ${user.name} -c '${command}'";
+
# Call javascript in gnome shell, returns a tuple (success, output), where
# `success` is true if the dbus call was successful and output is what the
# javascript evaluates to.
eval = "call --session -d org.gnome.Shell -o /org/gnome/Shell -m org.gnome.Shell.Eval";
- # False when startup is done
- startingUp = "${gdbus} ${eval} Main.layoutManager._startingUp";
- # Hopefully gnome-terminal's wm class
- wmClass = "${gdbus} ${eval} global.display.focus_window.wm_class";
- in ''
- # wait for gdm to start
- $machine->waitForUnit("display-manager.service");
- # wait for alice to be logged in
- $machine->waitForUnit("default.target","alice");
-
- # Check that logging in has given the user ownership of devices.
- $machine->succeed("getfacl -p /dev/snd/timer | grep -q alice");
+ # False when startup is done
+ startingUp = su "${gdbus} ${eval} Main.layoutManager._startingUp";
- # Wait for the wayland server
- $machine->waitForFile("/run/user/1000/wayland-0");
+ # Start gnome-terminal
+ gnomeTerminalCommand = su "${bus} gnome-terminal";
- # Wait for gnome shell, correct output should be "(true, 'false')"
- $machine->waitUntilSucceeds("su - alice -c '${startingUp} | grep -q true,..false'");
+ # Hopefully gnome-terminal's wm class
+ wmClass = su "${gdbus} ${eval} global.display.focus_window.wm_class";
+ in ''
+ with subtest("Login to GNOME with GDM"):
+ # wait for gdm to start
+ machine.wait_for_unit("display-manager.service")
+ # wait for the wayland server
+ machine.wait_for_file("/run/user/${uid}/wayland-0")
+ # wait for alice to be logged in
+ machine.wait_for_unit("default.target", "${user.name}")
+ # check that logging in has given the user ownership of devices
+ assert "alice" in machine.succeed("getfacl -p /dev/snd/timer")
- # open a terminal
- $machine->succeed("su - alice -c '${bus} gnome-terminal'");
- # and check it's there
- $machine->waitUntilSucceeds("su - alice -c '${wmClass} | grep -q gnome-terminal-server'");
+ with subtest("Wait for GNOME Shell"):
+ # correct output should be (true, 'false')
+ machine.wait_until_succeeds(
+ "${startingUp} | grep -q 'true,..false'"
+ )
- # wait to get a nice screenshot
- $machine->sleep(20);
- $machine->screenshot("screen");
+ with subtest("Open Gnome Terminal"):
+ machine.succeed(
+ "${gnomeTerminalCommand}"
+ )
+ # correct output should be (true, '"gnome-terminal-server"')
+ machine.wait_until_succeeds(
+ "${wmClass} | grep -q 'gnome-terminal-server'"
+ )
+ machine.sleep(20)
+ machine.screenshot("screen")
'';
})
diff --git a/nixpkgs/nixos/tests/graphite.nix b/nixpkgs/nixos/tests/graphite.nix
index 27a87bdbb9f..ba3c73bb878 100644
--- a/nixpkgs/nixos/tests/graphite.nix
+++ b/nixpkgs/nixos/tests/graphite.nix
@@ -1,6 +1,11 @@
-import ./make-test.nix ({ pkgs, ... } :
+import ./make-test-python.nix ({ pkgs, ... } :
{
name = "graphite";
+ meta = {
+ # Fails on dependency `python-2.7-Twisted`'s test suite
+ # complaining `ImportError: No module named zope.interface`.
+ broken = true;
+ };
nodes = {
one =
{ ... }: {
@@ -22,20 +27,20 @@ import ./make-test.nix ({ pkgs, ... } :
};
testScript = ''
- startAll;
- $one->waitForUnit("default.target");
- $one->waitForUnit("graphiteWeb.service");
- $one->waitForUnit("graphiteApi.service");
- $one->waitForUnit("graphitePager.service");
- $one->waitForUnit("graphite-beacon.service");
- $one->waitForUnit("carbonCache.service");
- $one->waitForUnit("seyren.service");
+ start_all()
+ one.wait_for_unit("default.target")
+ one.wait_for_unit("graphiteWeb.service")
+ one.wait_for_unit("graphiteApi.service")
+ one.wait_for_unit("graphitePager.service")
+ one.wait_for_unit("graphite-beacon.service")
+ one.wait_for_unit("carbonCache.service")
+ one.wait_for_unit("seyren.service")
# The services above are of type "simple". systemd considers them active immediately
# even if they're still in preStart (which takes quite long for graphiteWeb).
# Wait for ports to open so we're sure the services are up and listening.
- $one->waitForOpenPort(8080);
- $one->waitForOpenPort(2003);
- $one->succeed("echo \"foo 1 `date +%s`\" | nc -N localhost 2003");
- $one->waitUntilSucceeds("curl 'http://localhost:8080/metrics/find/?query=foo&format=treejson' --silent | grep foo >&2");
+ one.wait_for_open_port(8080)
+ one.wait_for_open_port(2003)
+ one.succeed('echo "foo 1 `date +%s`" | nc -N localhost 2003')
+ one.wait_until_succeeds("curl 'http://localhost:8080/metrics/find/?query=foo&format=treejson' --silent | grep foo >&2")
'';
})
diff --git a/nixpkgs/nixos/tests/i3wm.nix b/nixpkgs/nixos/tests/i3wm.nix
index 126178d1187..b527aa706ad 100644
--- a/nixpkgs/nixos/tests/i3wm.nix
+++ b/nixpkgs/nixos/tests/i3wm.nix
@@ -6,7 +6,7 @@ import ./make-test-python.nix ({ pkgs, ...} : {
machine = { lib, ... }: {
imports = [ ./common/x11.nix ./common/user-account.nix ];
- services.xserver.displayManager.auto.user = "alice";
+ test-support.displayManager.auto.user = "alice";
services.xserver.displayManager.defaultSession = lib.mkForce "none+i3";
services.xserver.windowManager.i3.enable = true;
};
diff --git a/nixpkgs/nixos/tests/initdb.nix b/nixpkgs/nixos/tests/initdb.nix
deleted file mode 100644
index 749d7857a13..00000000000
--- a/nixpkgs/nixos/tests/initdb.nix
+++ /dev/null
@@ -1,26 +0,0 @@
-let
- pkgs = import <nixpkgs> { };
-in
-with import <nixpkgs/nixos/lib/testing.nix> { inherit pkgs; system = builtins.currentSystem; };
-with pkgs.lib;
-
-makeTest {
- name = "pg-initdb";
-
- machine = {...}:
- {
- documentation.enable = false;
- services.postgresql.enable = true;
- services.postgresql.package = pkgs.postgresql_9_6;
- environment.pathsToLink = [
- "/share/postgresql"
- ];
- };
-
- testScript = ''
- $machine->start;
- $machine->succeed("sudo -u postgres initdb -D /tmp/testpostgres2");
- $machine->shutdown;
- '';
-
- } \ No newline at end of file
diff --git a/nixpkgs/nixos/tests/kafka.nix b/nixpkgs/nixos/tests/kafka.nix
index 48ca98da8fa..f3de24e873b 100644
--- a/nixpkgs/nixos/tests/kafka.nix
+++ b/nixpkgs/nixos/tests/kafka.nix
@@ -3,11 +3,10 @@
pkgs ? import ../.. { inherit system config; }
}:
-with import ../lib/testing.nix { inherit system pkgs; };
with pkgs.lib;
let
- makeKafkaTest = name: kafkaPackage: (makeTest {
+ makeKafkaTest = name: kafkaPackage: (import ./make-test-python.nix ({
inherit name;
meta = with pkgs.stdenv.lib.maintainers; {
maintainers = [ nequissimus ];
@@ -45,24 +44,40 @@ let
};
testScript = ''
- startAll;
+ start_all()
- $zookeeper1->waitForUnit("default.target");
- $zookeeper1->waitForUnit("zookeeper.service");
- $zookeeper1->waitForOpenPort(2181);
+ zookeeper1.wait_for_unit("default.target")
+ zookeeper1.wait_for_unit("zookeeper.service")
+ zookeeper1.wait_for_open_port(2181)
- $kafka->waitForUnit("default.target");
- $kafka->waitForUnit("apache-kafka.service");
- $kafka->waitForOpenPort(9092);
+ kafka.wait_for_unit("default.target")
+ kafka.wait_for_unit("apache-kafka.service")
+ kafka.wait_for_open_port(9092)
- $kafka->waitUntilSucceeds("${kafkaPackage}/bin/kafka-topics.sh --create --zookeeper zookeeper1:2181 --partitions 1 --replication-factor 1 --topic testtopic");
- $kafka->mustSucceed("echo 'test 1' | ${kafkaPackage}/bin/kafka-console-producer.sh --broker-list localhost:9092 --topic testtopic");
+ kafka.wait_until_succeeds(
+ "${kafkaPackage}/bin/kafka-topics.sh --create "
+ + "--zookeeper zookeeper1:2181 --partitions 1 "
+ + "--replication-factor 1 --topic testtopic"
+ )
+ kafka.succeed(
+ "echo 'test 1' | "
+ + "${kafkaPackage}/bin/kafka-console-producer.sh "
+ + "--broker-list localhost:9092 --topic testtopic"
+ )
'' + (if name == "kafka_0_9" then ''
- $kafka->mustSucceed("${kafkaPackage}/bin/kafka-console-consumer.sh --zookeeper zookeeper1:2181 --topic testtopic --from-beginning --max-messages 1 | grep 'test 1'");
+ assert "test 1" in kafka.succeed(
+ "${kafkaPackage}/bin/kafka-console-consumer.sh "
+ + "--zookeeper zookeeper1:2181 --topic testtopic "
+ + "--from-beginning --max-messages 1"
+ )
'' else ''
- $kafka->mustSucceed("${kafkaPackage}/bin/kafka-console-consumer.sh --bootstrap-server localhost:9092 --topic testtopic --from-beginning --max-messages 1 | grep 'test 1'");
+ assert "test 1" in kafka.succeed(
+ "${kafkaPackage}/bin/kafka-console-consumer.sh "
+ + "--bootstrap-server localhost:9092 --topic testtopic "
+ + "--from-beginning --max-messages 1"
+ )
'');
- });
+ }) {});
in with pkgs; {
kafka_0_9 = makeKafkaTest "kafka_0_9" apacheKafka_0_9;
@@ -74,4 +89,5 @@ in with pkgs; {
kafka_2_1 = makeKafkaTest "kafka_2_1" apacheKafka_2_1;
kafka_2_2 = makeKafkaTest "kafka_2_2" apacheKafka_2_2;
kafka_2_3 = makeKafkaTest "kafka_2_3" apacheKafka_2_3;
+ kafka_2_4 = makeKafkaTest "kafka_2_4" apacheKafka_2_4;
}
diff --git a/nixpkgs/nixos/tests/limesurvey.nix b/nixpkgs/nixos/tests/limesurvey.nix
index ad66ada106b..7228fcb8331 100644
--- a/nixpkgs/nixos/tests/limesurvey.nix
+++ b/nixpkgs/nixos/tests/limesurvey.nix
@@ -1,21 +1,26 @@
-import ./make-test.nix ({ pkgs, ... }: {
+import ./make-test-python.nix ({ pkgs, ... }: {
name = "limesurvey";
meta.maintainers = [ pkgs.stdenv.lib.maintainers.aanderse ];
- machine =
- { ... }:
- { services.limesurvey.enable = true;
- services.limesurvey.virtualHost.hostName = "example.local";
- services.limesurvey.virtualHost.adminAddr = "root@example.local";
-
- # limesurvey won't work without a dot in the hostname
- networking.hosts."127.0.0.1" = [ "example.local" ];
+ machine = { ... }: {
+ services.limesurvey = {
+ enable = true;
+ virtualHost = {
+ hostName = "example.local";
+ adminAddr = "root@example.local";
+ };
};
+ # limesurvey won't work without a dot in the hostname
+ networking.hosts."127.0.0.1" = [ "example.local" ];
+ };
+
testScript = ''
- startAll;
+ start_all()
- $machine->waitForUnit('phpfpm-limesurvey.service');
- $machine->succeed('curl http://example.local/') =~ /The following surveys are available/ or die;
+ machine.wait_for_unit("phpfpm-limesurvey.service")
+ assert "The following surveys are available" in machine.succeed(
+ "curl http://example.local/"
+ )
'';
})
diff --git a/nixpkgs/nixos/tests/networking-proxy.nix b/nixpkgs/nixos/tests/networking-proxy.nix
index ab908c96e5e..bae9c66ed61 100644
--- a/nixpkgs/nixos/tests/networking-proxy.nix
+++ b/nixpkgs/nixos/tests/networking-proxy.nix
@@ -10,7 +10,7 @@ let default-config = {
virtualisation.memorySize = 128;
};
-in import ./make-test.nix ({ pkgs, ...} : {
+in import ./make-test-python.nix ({ pkgs, ...} : {
name = "networking-proxy";
meta = with pkgs.stdenv.lib.maintainers; {
maintainers = [ ];
@@ -66,46 +66,70 @@ in import ./make-test.nix ({ pkgs, ...} : {
testScript =
''
- startAll;
-
- # no proxy at all
- print $machine->execute("env | grep -i proxy");
- print $machine->execute("su - alice -c 'env | grep -i proxy'");
- $machine->mustFail("env | grep -i proxy");
- $machine->mustFail("su - alice -c 'env | grep -i proxy'");
-
- # Use a default proxy option
- print $machine2->execute("env | grep -i proxy");
- print $machine2->execute("su - alice -c 'env | grep -i proxy'");
- $machine2->mustSucceed("env | grep -i proxy");
- $machine2->mustSucceed("su - alice -c 'env | grep -i proxy'");
-
- # explicitly set each proxy option
- print $machine3->execute("env | grep -i proxy");
- print $machine3->execute("su - alice -c 'env | grep -i proxy'");
- $machine3->mustSucceed("env | grep -i http_proxy | grep 123");
- $machine3->mustSucceed("env | grep -i https_proxy | grep 456");
- $machine3->mustSucceed("env | grep -i rsync_proxy | grep 789");
- $machine3->mustSucceed("env | grep -i ftp_proxy | grep 101112");
- $machine3->mustSucceed("env | grep -i no_proxy | grep 131415");
- $machine3->mustSucceed("su - alice -c 'env | grep -i http_proxy | grep 123'");
- $machine3->mustSucceed("su - alice -c 'env | grep -i https_proxy | grep 456'");
- $machine3->mustSucceed("su - alice -c 'env | grep -i rsync_proxy | grep 789'");
- $machine3->mustSucceed("su - alice -c 'env | grep -i ftp_proxy | grep 101112'");
- $machine3->mustSucceed("su - alice -c 'env | grep -i no_proxy | grep 131415'");
-
- # set default proxy option + some other specifics
- print $machine4->execute("env | grep -i proxy");
- print $machine4->execute("su - alice -c 'env | grep -i proxy'");
- $machine4->mustSucceed("env | grep -i http_proxy | grep 000");
- $machine4->mustSucceed("env | grep -i https_proxy | grep 000");
- $machine4->mustSucceed("env | grep -i rsync_proxy | grep 123");
- $machine4->mustSucceed("env | grep -i ftp_proxy | grep 000");
- $machine4->mustSucceed("env | grep -i no_proxy | grep 131415");
- $machine4->mustSucceed("su - alice -c 'env | grep -i http_proxy | grep 000'");
- $machine4->mustSucceed("su - alice -c 'env | grep -i https_proxy | grep 000'");
- $machine4->mustSucceed("su - alice -c 'env | grep -i rsync_proxy | grep 123'");
- $machine4->mustSucceed("su - alice -c 'env | grep -i ftp_proxy | grep 000'");
- $machine4->mustSucceed("su - alice -c 'env | grep -i no_proxy | grep 131415'");
+ from typing import Dict, Optional
+
+
+ def get_machine_env(machine: Machine, user: Optional[str] = None) -> Dict[str, str]:
+ """
+ Gets the environment from a given machine, and returns it as a
+ dictionary in the form:
+ {"lowercase_var_name": "value"}
+
+ Duplicate environment variables with the same name
+ (e.g. "foo" and "FOO") are handled in an undefined manner.
+ """
+ if user is not None:
+ env = machine.succeed("su - {} -c 'env -0'".format(user))
+ else:
+ env = machine.succeed("env -0")
+ ret = {}
+ for line in env.split("\0"):
+ if "=" not in line:
+ continue
+
+ key, val = line.split("=", 1)
+ ret[key.lower()] = val
+ return ret
+
+
+ start_all()
+
+ with subtest("no proxy"):
+ assert "proxy" not in machine.succeed("env").lower()
+ assert "proxy" not in machine.succeed("su - alice -c env").lower()
+
+ with subtest("default proxy"):
+ assert "proxy" in machine2.succeed("env").lower()
+ assert "proxy" in machine2.succeed("su - alice -c env").lower()
+
+ with subtest("explicitly-set proxy"):
+ env = get_machine_env(machine3)
+ assert "123" in env["http_proxy"]
+ assert "456" in env["https_proxy"]
+ assert "789" in env["rsync_proxy"]
+ assert "101112" in env["ftp_proxy"]
+ assert "131415" in env["no_proxy"]
+
+ env = get_machine_env(machine3, "alice")
+ assert "123" in env["http_proxy"]
+ assert "456" in env["https_proxy"]
+ assert "789" in env["rsync_proxy"]
+ assert "101112" in env["ftp_proxy"]
+ assert "131415" in env["no_proxy"]
+
+ with subtest("default proxy + some other specifics"):
+ env = get_machine_env(machine4)
+ assert "000" in env["http_proxy"]
+ assert "000" in env["https_proxy"]
+ assert "123" in env["rsync_proxy"]
+ assert "000" in env["ftp_proxy"]
+ assert "131415" in env["no_proxy"]
+
+ env = get_machine_env(machine4, "alice")
+ assert "000" in env["http_proxy"]
+ assert "000" in env["https_proxy"]
+ assert "123" in env["rsync_proxy"]
+ assert "000" in env["ftp_proxy"]
+ assert "131415" in env["no_proxy"]
'';
})
diff --git a/nixpkgs/nixos/tests/openstack-image.nix b/nixpkgs/nixos/tests/openstack-image.nix
index d0225016ab7..8a21dd1b599 100644
--- a/nixpkgs/nixos/tests/openstack-image.nix
+++ b/nixpkgs/nixos/tests/openstack-image.nix
@@ -17,7 +17,7 @@ let
../modules/testing/test-instrumentation.nix
../modules/profiles/qemu-guest.nix
];
- }).config.system.build.openstackImage;
+ }).config.system.build.openstackImage + "/nixos.qcow2";
sshKeys = import ./ssh-keys.nix pkgs;
snakeOilPrivateKey = sshKeys.snakeOilPrivateKey.text;
diff --git a/nixpkgs/nixos/tests/postgresql.nix b/nixpkgs/nixos/tests/postgresql.nix
index e71c3888288..3201e22555e 100644
--- a/nixpkgs/nixos/tests/postgresql.nix
+++ b/nixpkgs/nixos/tests/postgresql.nix
@@ -29,11 +29,15 @@ let
machine = {...}:
{
- services.postgresql.enable = true;
- services.postgresql.package = postgresql-package;
+ services.postgresql = {
+ enable = true;
+ package = postgresql-package;
+ };
- services.postgresqlBackup.enable = true;
- services.postgresqlBackup.databases = optional (!backup-all) "postgres";
+ services.postgresqlBackup = {
+ enable = true;
+ databases = optional (!backup-all) "postgres";
+ };
};
testScript = let
@@ -49,23 +53,32 @@ let
machine.start()
machine.wait_for_unit("postgresql")
- # postgresql should be available just after unit start
- machine.succeed(
- "cat ${test-sql} | sudo -u postgres psql"
- )
- machine.shutdown() # make sure that postgresql survive restart (bug #1735)
- time.sleep(2)
- machine.start()
- machine.wait_for_unit("postgresql")
+ with subtest("Postgresql is available just after unit start"):
+ machine.succeed(
+ "cat ${test-sql} | sudo -u postgres psql"
+ )
+
+ with subtest("Postgresql survives restart (bug #1735)"):
+ machine.shutdown()
+ time.sleep(2)
+ machine.start()
+ machine.wait_for_unit("postgresql")
+
machine.fail(check_count("SELECT * FROM sth;", 3))
machine.succeed(check_count("SELECT * FROM sth;", 5))
machine.fail(check_count("SELECT * FROM sth;", 4))
machine.succeed(check_count("SELECT xpath('/test/text()', doc) FROM xmltest;", 1))
- # Check backup service
- machine.succeed("systemctl start ${backupService}.service")
- machine.succeed("zcat /var/backup/postgresql/${backupName}.sql.gz | grep '<test>ok</test>'")
- machine.succeed("stat -c '%a' /var/backup/postgresql/${backupName}.sql.gz | grep 600")
+ with subtest("Backup service works"):
+ machine.succeed(
+ "systemctl start ${backupService}.service",
+ "zcat /var/backup/postgresql/${backupName}.sql.gz | grep '<test>ok</test>'",
+ "stat -c '%a' /var/backup/postgresql/${backupName}.sql.gz | grep 600",
+ )
+
+ with subtest("Initdb works"):
+ machine.succeed("sudo -u postgres initdb -D /tmp/testpostgres2")
+
machine.shutdown()
'';
diff --git a/nixpkgs/nixos/tests/proxy.nix b/nixpkgs/nixos/tests/proxy.nix
index 3859d429c21..6a14a9af59a 100644
--- a/nixpkgs/nixos/tests/proxy.nix
+++ b/nixpkgs/nixos/tests/proxy.nix
@@ -1,97 +1,90 @@
-import ./make-test.nix ({ pkgs, ...} :
+import ./make-test-python.nix ({ pkgs, ...} :
let
-
- backend =
- { pkgs, ... }:
-
- { services.httpd.enable = true;
- services.httpd.adminAddr = "foo@example.org";
- services.httpd.virtualHosts.localhost.documentRoot = "${pkgs.valgrind.doc}/share/doc/valgrind/html";
- networking.firewall.allowedTCPPorts = [ 80 ];
+ backend = { pkgs, ... }: {
+ services.httpd = {
+ enable = true;
+ adminAddr = "foo@example.org";
+ virtualHosts.localhost.documentRoot = "${pkgs.valgrind.doc}/share/doc/valgrind/html";
};
-
-in
-
-{
+ networking.firewall.allowedTCPPorts = [ 80 ];
+ };
+in {
name = "proxy";
meta = with pkgs.stdenv.lib.maintainers; {
maintainers = [ eelco ];
};
- nodes =
- { proxy =
- { nodes, ... }:
-
- { services.httpd.enable = true;
- services.httpd.adminAddr = "bar@example.org";
- services.httpd.extraModules = [ "proxy_balancer" "lbmethod_byrequests" ];
- services.httpd.extraConfig = ''
- ExtendedStatus on
+ nodes = {
+ proxy = { nodes, ... }: {
+ services.httpd = {
+ enable = true;
+ adminAddr = "bar@example.org";
+ extraModules = [ "proxy_balancer" "lbmethod_byrequests" ];
+ extraConfig = ''
+ ExtendedStatus on
+ '';
+ virtualHosts.localhost = {
+ extraConfig = ''
+ <Location /server-status>
+ Require all granted
+ SetHandler server-status
+ </Location>
+
+ <Proxy balancer://cluster>
+ Require all granted
+ BalancerMember http://${nodes.backend1.config.networking.hostName} retry=0
+ BalancerMember http://${nodes.backend2.config.networking.hostName} retry=0
+ </Proxy>
+
+ ProxyStatus full
+ ProxyPass /server-status !
+ ProxyPass / balancer://cluster/
+ ProxyPassReverse / balancer://cluster/
+
+ # For testing; don't want to wait forever for dead backend servers.
+ ProxyTimeout 5
'';
- services.httpd.virtualHosts.localhost = {
- extraConfig = ''
- <Location /server-status>
- Require all granted
- SetHandler server-status
- </Location>
-
- <Proxy balancer://cluster>
- Require all granted
- BalancerMember http://${nodes.backend1.config.networking.hostName} retry=0
- BalancerMember http://${nodes.backend2.config.networking.hostName} retry=0
- </Proxy>
-
- ProxyStatus full
- ProxyPass /server-status !
- ProxyPass / balancer://cluster/
- ProxyPassReverse / balancer://cluster/
-
- # For testing; don't want to wait forever for dead backend servers.
- ProxyTimeout 5
- '';
- };
-
- networking.firewall.allowedTCPPorts = [ 80 ];
};
-
- backend1 = backend;
- backend2 = backend;
-
- client = { ... }: { };
+ };
+ networking.firewall.allowedTCPPorts = [ 80 ];
};
- testScript =
- ''
- startAll;
+ backend1 = backend;
+ backend2 = backend;
+
+ client = { ... }: { };
+ };
- $proxy->waitForUnit("httpd");
- $backend1->waitForUnit("httpd");
- $backend2->waitForUnit("httpd");
- $client->waitForUnit("network.target");
+ testScript = ''
+ start_all()
- # With the back-ends up, the proxy should work.
- $client->succeed("curl --fail http://proxy/");
+ proxy.wait_for_unit("httpd")
+ backend1.wait_for_unit("httpd")
+ backend2.wait_for_unit("httpd")
+ client.wait_for_unit("network.target")
- $client->succeed("curl --fail http://proxy/server-status");
+ # With the back-ends up, the proxy should work.
+ client.succeed("curl --fail http://proxy/")
- # Block the first back-end.
- $backend1->block;
+ client.succeed("curl --fail http://proxy/server-status")
- # The proxy should still work.
- $client->succeed("curl --fail http://proxy/");
+ # Block the first back-end.
+ backend1.block()
- $client->succeed("curl --fail http://proxy/");
+ # The proxy should still work.
+ client.succeed("curl --fail http://proxy/")
+ client.succeed("curl --fail http://proxy/")
- # Block the second back-end.
- $backend2->block;
+ # Block the second back-end.
+ backend2.block()
- # Now the proxy should fail as well.
- $client->fail("curl --fail http://proxy/");
+ # Now the proxy should fail as well.
+ client.fail("curl --fail http://proxy/")
- # But if the second back-end comes back, the proxy should start
- # working again.
- $backend2->unblock;
- $client->succeed("curl --fail http://proxy/");
- '';
+ # But if the second back-end comes back, the proxy should start
+ # working again.
+ backend2.unblock()
+ client.succeed("curl --fail http://proxy/")
+ '';
})
diff --git a/nixpkgs/nixos/tests/riak.nix b/nixpkgs/nixos/tests/riak.nix
index 68a9b7315b3..6915779e7e9 100644
--- a/nixpkgs/nixos/tests/riak.nix
+++ b/nixpkgs/nixos/tests/riak.nix
@@ -1,21 +1,18 @@
-import ./make-test.nix {
+import ./make-test-python.nix ({ lib, pkgs, ... }: {
name = "riak";
+ meta = with lib.maintainers; {
+ maintainers = [ filalex77 ];
+ };
- nodes = {
- master =
- { pkgs, ... }:
-
- {
- services.riak.enable = true;
- services.riak.package = pkgs.riak;
- };
+ machine = {
+ services.riak.enable = true;
+ services.riak.package = pkgs.riak;
};
testScript = ''
- startAll;
+ machine.start()
- $master->waitForUnit("riak");
- $master->sleep(20); # Hopefully this is long enough!!
- $master->succeed("riak ping 2>&1");
+ machine.wait_for_unit("riak")
+ machine.wait_until_succeeds("riak ping 2>&1")
'';
-}
+})
diff --git a/nixpkgs/nixos/tests/signal-desktop.nix b/nixpkgs/nixos/tests/signal-desktop.nix
index c746d46dc55..ae141fe116d 100644
--- a/nixpkgs/nixos/tests/signal-desktop.nix
+++ b/nixpkgs/nixos/tests/signal-desktop.nix
@@ -15,7 +15,7 @@ import ./make-test-python.nix ({ pkgs, ...} :
];
services.xserver.enable = true;
- services.xserver.displayManager.auto.user = "alice";
+ test-support.displayManager.auto.user = "alice";
environment.systemPackages = [ pkgs.signal-desktop ];
};
diff --git a/nixpkgs/nixos/tests/solr.nix b/nixpkgs/nixos/tests/solr.nix
index 2108e851bc5..23e1a960fb3 100644
--- a/nixpkgs/nixos/tests/solr.nix
+++ b/nixpkgs/nixos/tests/solr.nix
@@ -1,65 +1,48 @@
-{ system ? builtins.currentSystem,
- config ? {},
- pkgs ? import ../.. { inherit system config; }
-}:
+import ./make-test.nix ({ pkgs, ... }:
-with import ../lib/testing.nix { inherit system pkgs; };
-with pkgs.lib;
-
-let
- solrTest = package: makeTest {
- machine =
- { config, pkgs, ... }:
- {
- # Ensure the virtual machine has enough memory for Solr to avoid the following error:
- #
- # OpenJDK 64-Bit Server VM warning:
- # INFO: os::commit_memory(0x00000000e8000000, 402653184, 0)
- # failed; error='Cannot allocate memory' (errno=12)
- #
- # There is insufficient memory for the Java Runtime Environment to continue.
- # Native memory allocation (mmap) failed to map 402653184 bytes for committing reserved memory.
- virtualisation.memorySize = 2000;
+{
+ name = "solr";
+ meta.maintainers = [ pkgs.stdenv.lib.maintainers.aanderse ];
- services.solr.enable = true;
- services.solr.package = package;
- };
+ machine =
+ { config, pkgs, ... }:
+ {
+ # Ensure the virtual machine has enough memory for Solr to avoid the following error:
+ #
+ # OpenJDK 64-Bit Server VM warning:
+ # INFO: os::commit_memory(0x00000000e8000000, 402653184, 0)
+ # failed; error='Cannot allocate memory' (errno=12)
+ #
+ # There is insufficient memory for the Java Runtime Environment to continue.
+ # Native memory allocation (mmap) failed to map 402653184 bytes for committing reserved memory.
+ virtualisation.memorySize = 2000;
- testScript = ''
- startAll;
+ services.solr.enable = true;
+ };
- $machine->waitForUnit('solr.service');
- $machine->waitForOpenPort('8983');
- $machine->succeed('curl --fail http://localhost:8983/solr/');
+ testScript = ''
+ startAll;
- # adapted from pkgs.solr/examples/films/README.txt
- $machine->succeed('sudo -u solr solr create -c films');
- $machine->succeed(q(curl http://localhost:8983/solr/films/schema -X POST -H 'Content-type:application/json' --data-binary '{
- "add-field" : {
- "name":"name",
- "type":"text_general",
- "multiValued":false,
- "stored":true
- },
- "add-field" : {
- "name":"initial_release_date",
- "type":"pdate",
- "stored":true
- }
- }')) =~ /"status":0/ or die;
- $machine->succeed('sudo -u solr post -c films ${pkgs.solr}/example/films/films.json');
- $machine->succeed('curl http://localhost:8983/solr/films/query?q=name:batman') =~ /"name":"Batman Begins"/ or die;
- '';
- };
-in
-{
- solr_7 = solrTest pkgs.solr_7 // {
- name = "solr_7";
- meta.maintainers = [ lib.maintainers.aanderse ];
- };
+ $machine->waitForUnit('solr.service');
+ $machine->waitForOpenPort('8983');
+ $machine->succeed('curl --fail http://localhost:8983/solr/');
- solr_8 = solrTest pkgs.solr_8 // {
- name = "solr_8";
- meta.maintainers = [ lib.maintainers.aanderse ];
- };
-}
+ # adapted from pkgs.solr/examples/films/README.txt
+ $machine->succeed('sudo -u solr solr create -c films');
+ $machine->succeed(q(curl http://localhost:8983/solr/films/schema -X POST -H 'Content-type:application/json' --data-binary '{
+ "add-field" : {
+ "name":"name",
+ "type":"text_general",
+ "multiValued":false,
+ "stored":true
+ },
+ "add-field" : {
+ "name":"initial_release_date",
+ "type":"pdate",
+ "stored":true
+ }
+ }')) =~ /"status":0/ or die;
+ $machine->succeed('sudo -u solr post -c films ${pkgs.solr}/example/films/films.json');
+ $machine->succeed('curl http://localhost:8983/solr/films/query?q=name:batman') =~ /"name":"Batman Begins"/ or die;
+ '';
+})
diff --git a/nixpkgs/nixos/tests/systemd.nix b/nixpkgs/nixos/tests/systemd.nix
index 4b71b4d6759..8028145939b 100644
--- a/nixpkgs/nixos/tests/systemd.nix
+++ b/nixpkgs/nixos/tests/systemd.nix
@@ -1,4 +1,4 @@
-import ./make-test.nix ({ pkgs, ... }: {
+import ./make-test-python.nix ({ pkgs, ... }: {
name = "systemd";
machine = { lib, ... }: {
@@ -19,7 +19,7 @@ import ./make-test.nix ({ pkgs, ... }: {
systemd.extraConfig = "DefaultEnvironment=\"XXX_SYSTEM=foo\"";
systemd.user.extraConfig = "DefaultEnvironment=\"XXX_USER=bar\"";
services.journald.extraConfig = "Storage=volatile";
- services.xserver.displayManager.auto.user = "alice";
+ test-support.displayManager.auto.user = "alice";
systemd.shutdown.test = pkgs.writeScript "test.shutdown" ''
#!${pkgs.stdenv.shell}
@@ -53,50 +53,69 @@ import ./make-test.nix ({ pkgs, ... }: {
};
testScript = ''
- $machine->waitForX;
+ import re
+ import subprocess
+
+ machine.wait_for_x()
# wait for user services
- $machine->waitForUnit("default.target","alice");
+ machine.wait_for_unit("default.target", "alice")
# Regression test for https://github.com/NixOS/nixpkgs/issues/35415
- subtest "configuration files are recognized by systemd", sub {
- $machine->succeed('test -e /system_conf_read');
- $machine->succeed('test -e /home/alice/user_conf_read');
- $machine->succeed('test -z $(ls -1 /var/log/journal)');
- };
+ with subtest("configuration files are recognized by systemd"):
+ machine.succeed("test -e /system_conf_read")
+ machine.succeed("test -e /home/alice/user_conf_read")
+ machine.succeed("test -z $(ls -1 /var/log/journal)")
# Regression test for https://github.com/NixOS/nixpkgs/issues/50273
- subtest "DynamicUser actually allocates a user", sub {
- $machine->succeed('systemd-run --pty --property=Type=oneshot --property=DynamicUser=yes --property=User=iamatest whoami | grep iamatest');
- };
+ with subtest("DynamicUser actually allocates a user"):
+ assert "iamatest" in machine.succeed(
+ "systemd-run --pty --property=Type=oneshot --property=DynamicUser=yes --property=User=iamatest whoami"
+ )
# Regression test for https://github.com/NixOS/nixpkgs/issues/35268
- subtest "file system with x-initrd.mount is not unmounted", sub {
- $machine->succeed('mountpoint -q /test-x-initrd-mount');
- $machine->shutdown;
- system('qemu-img', 'convert', '-O', 'raw',
- 'vm-state-machine/empty2.qcow2', 'x-initrd-mount.raw');
- my $extinfo = `${pkgs.e2fsprogs}/bin/dumpe2fs x-initrd-mount.raw`;
- die "File system was not cleanly unmounted: $extinfo"
- unless $extinfo =~ /^Filesystem state: *clean$/m;
- };
+ with subtest("file system with x-initrd.mount is not unmounted"):
+ machine.succeed("mountpoint -q /test-x-initrd-mount")
+ machine.shutdown()
- subtest "systemd-shutdown works", sub {
- $machine->shutdown;
- $machine->waitForUnit('multi-user.target');
- $machine->succeed('test -e /tmp/shared/shutdown-test');
- };
+ subprocess.check_call(
+ [
+ "qemu-img",
+ "convert",
+ "-O",
+ "raw",
+ "vm-state-machine/empty0.qcow2",
+ "x-initrd-mount.raw",
+ ]
+ )
+ extinfo = subprocess.check_output(
+ [
+ "${pkgs.e2fsprogs}/bin/dumpe2fs",
+ "x-initrd-mount.raw",
+ ]
+ ).decode("utf-8")
+ assert (
+ re.search(r"^Filesystem state: *clean$", extinfo, re.MULTILINE) is not None
+ ), ("File system was not cleanly unmounted: " + extinfo)
+
+ with subtest("systemd-shutdown works"):
+ machine.shutdown()
+ machine.wait_for_unit("multi-user.target")
+ machine.succeed("test -e /tmp/shared/shutdown-test")
+
+ # Test settings from /etc/sysctl.d/50-default.conf are applied
+ with subtest("systemd sysctl settings are applied"):
+ machine.wait_for_unit("multi-user.target")
+ assert "fq_codel" in machine.succeed("sysctl net.core.default_qdisc")
+
+ # Test cgroup accounting is enabled
+ with subtest("systemd cgroup accounting is enabled"):
+ machine.wait_for_unit("multi-user.target")
+ assert "yes" in machine.succeed(
+ "systemctl show testservice1.service -p IOAccounting"
+ )
- # Test settings from /etc/sysctl.d/50-default.conf are applied
- subtest "systemd sysctl settings are applied", sub {
- $machine->waitForUnit('multi-user.target');
- $machine->succeed('sysctl net.core.default_qdisc | grep -q "fq_codel"');
- };
-
- # Test cgroup accounting is enabled
- subtest "systemd cgroup accounting is enabled", sub {
- $machine->waitForUnit('multi-user.target');
- $machine->succeed('systemctl show testservice1.service -p IOAccounting | grep -q "yes"');
- $machine->succeed('systemctl status testservice1.service | grep -q "CPU:"');
- };
+ retcode, output = machine.execute("systemctl status testservice1.service")
+ assert retcode in [0, 3] # https://bugs.freedesktop.org/show_bug.cgi?id=77507
+ assert "CPU:" in output
'';
})
diff --git a/nixpkgs/nixos/tests/victoriametrics.nix b/nixpkgs/nixos/tests/victoriametrics.nix
new file mode 100644
index 00000000000..73ef8b72861
--- /dev/null
+++ b/nixpkgs/nixos/tests/victoriametrics.nix
@@ -0,0 +1,31 @@
+# This test runs influxdb and checks if influxdb is up and running
+
+import ./make-test-python.nix ({ pkgs, ...} : {
+ name = "victoriametrics";
+ meta = with pkgs.stdenv.lib.maintainers; {
+ maintainers = [ yorickvp ];
+ };
+
+ nodes = {
+ one = { ... }: {
+ services.victoriametrics.enable = true;
+ };
+ };
+
+ testScript = ''
+ start_all()
+
+ one.wait_for_unit("victoriametrics.service")
+
+ # write some points and run simple query
+ out = one.succeed(
+ "curl -d 'measurement,tag1=value1,tag2=value2 field1=123,field2=1.23' -X POST 'http://localhost:8428/write'"
+ )
+ cmd = """curl -s -G 'http://localhost:8428/api/v1/export' -d 'match={__name__!=""}'"""
+ # data takes a while to appear
+ one.wait_until_succeeds(f"[[ $({cmd} | wc -l) -ne 0 ]]")
+ out = one.succeed(cmd)
+ assert '"values":[123]' in out
+ assert '"values":[1.23]' in out
+ '';
+})
diff --git a/nixpkgs/nixos/tests/virtualbox.nix b/nixpkgs/nixos/tests/virtualbox.nix
index 32637d2c1ef..f03dc1cc413 100644
--- a/nixpkgs/nixos/tests/virtualbox.nix
+++ b/nixpkgs/nixos/tests/virtualbox.nix
@@ -356,7 +356,7 @@ let
virtualisation.qemu.options =
if useKvmNestedVirt then ["-cpu" "kvm64,vmx=on"] else [];
virtualisation.virtualbox.host.enable = true;
- services.xserver.displayManager.auto.user = "alice";
+ test-support.displayManager.auto.user = "alice";
users.users.alice.extraGroups = let
inherit (config.virtualisation.virtualbox.host) enableHardening;
in lib.mkIf enableHardening (lib.singleton "vboxusers");
diff --git a/nixpkgs/nixos/tests/xandikos.nix b/nixpkgs/nixos/tests/xandikos.nix
new file mode 100644
index 00000000000..0fded20ff1a
--- /dev/null
+++ b/nixpkgs/nixos/tests/xandikos.nix
@@ -0,0 +1,70 @@
+import ./make-test-python.nix (
+ { pkgs, lib, ... }:
+
+ {
+ name = "xandikos";
+
+ meta.maintainers = [ lib.maintainers."0x4A6F" ];
+
+ nodes = {
+ xandikos_client = {};
+ xandikos_default = {
+ networking.firewall.allowedTCPPorts = [ 8080 ];
+ services.xandikos.enable = true;
+ };
+ xandikos_proxy = {
+ networking.firewall.allowedTCPPorts = [ 80 8080 ];
+ services.xandikos.enable = true;
+ services.xandikos.address = "localhost";
+ services.xandikos.port = 8080;
+ services.xandikos.routePrefix = "/xandikos/";
+ services.xandikos.extraOptions = [
+ "--defaults"
+ ];
+ services.nginx = {
+ enable = true;
+ recommendedProxySettings = true;
+ virtualHosts."xandikos" = {
+ serverName = "xandikos.local";
+ basicAuth.xandikos = "snakeOilPassword";
+ locations."/xandikos/" = {
+ proxyPass = "http://localhost:8080/";
+ };
+ };
+ };
+ };
+ };
+
+ testScript = ''
+ start_all()
+
+ with subtest("Xandikos default"):
+ xandikos_default.wait_for_unit("multi-user.target")
+ xandikos_default.wait_for_unit("xandikos.service")
+ xandikos_default.wait_for_open_port(8080)
+ xandikos_default.succeed("curl --fail http://localhost:8080/")
+ xandikos_default.succeed(
+ "curl -s --fail --location http://localhost:8080/ | grep -qi Xandikos"
+ )
+ xandikos_client.wait_for_unit("network.target")
+ xandikos_client.fail("curl --fail http://xandikos_default:8080/")
+
+ with subtest("Xandikos proxy"):
+ xandikos_proxy.wait_for_unit("multi-user.target")
+ xandikos_proxy.wait_for_unit("xandikos.service")
+ xandikos_proxy.wait_for_open_port(8080)
+ xandikos_proxy.succeed("curl --fail http://localhost:8080/")
+ xandikos_proxy.succeed(
+ "curl -s --fail --location http://localhost:8080/ | grep -qi Xandikos"
+ )
+ xandikos_client.wait_for_unit("network.target")
+ xandikos_client.fail("curl --fail http://xandikos_proxy:8080/")
+ xandikos_client.succeed(
+ "curl -s --fail -u xandikos:snakeOilPassword -H 'Host: xandikos.local' http://xandikos_proxy/xandikos/ | grep -qi Xandikos"
+ )
+ xandikos_client.succeed(
+ "curl -s --fail -u xandikos:snakeOilPassword -H 'Host: xandikos.local' http://xandikos_proxy/xandikos/user/ | grep -qi Xandikos"
+ )
+ '';
+ }
+)
diff --git a/nixpkgs/nixos/tests/xautolock.nix b/nixpkgs/nixos/tests/xautolock.nix
index 10e92b40e95..4a8d3f4cebf 100644
--- a/nixpkgs/nixos/tests/xautolock.nix
+++ b/nixpkgs/nixos/tests/xautolock.nix
@@ -9,7 +9,7 @@ with lib;
nodes.machine = {
imports = [ ./common/x11.nix ./common/user-account.nix ];
- services.xserver.displayManager.auto.user = "bob";
+ test-support.displayManager.auto.user = "bob";
services.xserver.xautolock.enable = true;
services.xserver.xautolock.time = 1;
};
diff --git a/nixpkgs/nixos/tests/xfce.nix b/nixpkgs/nixos/tests/xfce.nix
index 3ea96b38363..99065669661 100644
--- a/nixpkgs/nixos/tests/xfce.nix
+++ b/nixpkgs/nixos/tests/xfce.nix
@@ -4,12 +4,20 @@ import ./make-test-python.nix ({ pkgs, ...} : {
machine =
{ pkgs, ... }:
- { imports = [ ./common/user-account.nix ];
+ {
+ imports = [
+ ./common/user-account.nix
+ ];
services.xserver.enable = true;
- services.xserver.displayManager.auto.enable = true;
- services.xserver.displayManager.auto.user = "alice";
+ services.xserver.displayManager.lightdm = {
+ enable = true;
+ autoLogin = {
+ enable = true;
+ user = "alice";
+ };
+ };
services.xserver.desktopManager.xfce.enable = true;
diff --git a/nixpkgs/nixos/tests/xmonad.nix b/nixpkgs/nixos/tests/xmonad.nix
index ef711f8dcf6..56baae8b9d3 100644
--- a/nixpkgs/nixos/tests/xmonad.nix
+++ b/nixpkgs/nixos/tests/xmonad.nix
@@ -6,7 +6,7 @@ import ./make-test-python.nix ({ pkgs, ...} : {
machine = { pkgs, ... }: {
imports = [ ./common/x11.nix ./common/user-account.nix ];
- services.xserver.displayManager.auto.user = "alice";
+ test-support.displayManager.auto.user = "alice";
services.xserver.displayManager.defaultSession = "none+xmonad";
services.xserver.windowManager.xmonad = {
enable = true;
diff --git a/nixpkgs/nixos/tests/xrdp.nix b/nixpkgs/nixos/tests/xrdp.nix
index 1aceeffb955..6d7f2b9249f 100644
--- a/nixpkgs/nixos/tests/xrdp.nix
+++ b/nixpkgs/nixos/tests/xrdp.nix
@@ -14,7 +14,7 @@ import ./make-test-python.nix ({ pkgs, ...} : {
client = { pkgs, ... }: {
imports = [ ./common/x11.nix ./common/user-account.nix ];
- services.xserver.displayManager.auto.user = "alice";
+ test-support.displayManager.auto.user = "alice";
environment.systemPackages = [ pkgs.freerdp ];
services.xrdp.enable = true;
services.xrdp.defaultWindowManager = "${pkgs.icewm}/bin/icewm";
diff --git a/nixpkgs/nixos/tests/xss-lock.nix b/nixpkgs/nixos/tests/xss-lock.nix
index 3a7dea07d53..b77bbbbb3c4 100644
--- a/nixpkgs/nixos/tests/xss-lock.nix
+++ b/nixpkgs/nixos/tests/xss-lock.nix
@@ -10,12 +10,12 @@ with lib;
simple = {
imports = [ ./common/x11.nix ./common/user-account.nix ];
programs.xss-lock.enable = true;
- services.xserver.displayManager.auto.user = "alice";
+ test-support.displayManager.auto.user = "alice";
};
custom_lockcmd = { pkgs, ... }: {
imports = [ ./common/x11.nix ./common/user-account.nix ];
- services.xserver.displayManager.auto.user = "alice";
+ test-support.displayManager.auto.user = "alice";
programs.xss-lock = {
enable = true;
diff --git a/nixpkgs/nixos/tests/yabar.nix b/nixpkgs/nixos/tests/yabar.nix
index 9108004d4df..b374ef29680 100644
--- a/nixpkgs/nixos/tests/yabar.nix
+++ b/nixpkgs/nixos/tests/yabar.nix
@@ -11,7 +11,7 @@ with lib;
machine = {
imports = [ ./common/x11.nix ./common/user-account.nix ];
- services.xserver.displayManager.auto.user = "bob";
+ test-support.displayManager.auto.user = "bob";
programs.yabar.enable = true;
programs.yabar.bars = {