aboutsummaryrefslogtreecommitdiff
path: root/nixpkgs/nixos/lib/test-driver
diff options
context:
space:
mode:
Diffstat (limited to 'nixpkgs/nixos/lib/test-driver')
-rw-r--r--nixpkgs/nixos/lib/test-driver/Logger.pm75
-rw-r--r--nixpkgs/nixos/lib/test-driver/Machine.pm734
-rw-r--r--nixpkgs/nixos/lib/test-driver/test-driver.pl191
-rw-r--r--nixpkgs/nixos/lib/test-driver/test-driver.py963
4 files changed, 1963 insertions, 0 deletions
diff --git a/nixpkgs/nixos/lib/test-driver/Logger.pm b/nixpkgs/nixos/lib/test-driver/Logger.pm
new file mode 100644
index 00000000000..a3384084a0e
--- /dev/null
+++ b/nixpkgs/nixos/lib/test-driver/Logger.pm
@@ -0,0 +1,75 @@
+package Logger;
+
+use strict;
+use Thread::Queue;
+use XML::Writer;
+use Encode qw(decode encode);
+use Time::HiRes qw(clock_gettime CLOCK_MONOTONIC);
+
+sub new {
+ my ($class) = @_;
+
+ my $logFile = defined $ENV{LOGFILE} ? "$ENV{LOGFILE}" : "/dev/null";
+ my $log = new XML::Writer(OUTPUT => new IO::File(">$logFile"));
+
+ my $self = {
+ log => $log,
+ logQueue => Thread::Queue->new()
+ };
+
+ $self->{log}->startTag("logfile");
+
+ bless $self, $class;
+ return $self;
+}
+
+sub close {
+ my ($self) = @_;
+ $self->{log}->endTag("logfile");
+ $self->{log}->end;
+}
+
+sub drainLogQueue {
+ my ($self) = @_;
+ while (defined (my $item = $self->{logQueue}->dequeue_nb())) {
+ $self->{log}->dataElement("line", sanitise($item->{msg}), 'machine' => $item->{machine}, 'type' => 'serial');
+ }
+}
+
+sub maybePrefix {
+ my ($msg, $attrs) = @_;
+ $msg = $attrs->{machine} . ": " . $msg if defined $attrs->{machine};
+ return $msg;
+}
+
+sub nest {
+ my ($self, $msg, $coderef, $attrs) = @_;
+ print STDERR maybePrefix("$msg\n", $attrs);
+ $self->{log}->startTag("nest");
+ $self->{log}->dataElement("head", $msg, %{$attrs});
+ my $now = clock_gettime(CLOCK_MONOTONIC);
+ $self->drainLogQueue();
+ eval { &$coderef };
+ my $res = $@;
+ $self->drainLogQueue();
+ $self->log(sprintf("(%.2f seconds)", clock_gettime(CLOCK_MONOTONIC) - $now));
+ $self->{log}->endTag("nest");
+ die $@ if $@;
+}
+
+sub sanitise {
+ my ($s) = @_;
+ $s =~ s/[[:cntrl:]\xff]//g;
+ $s = decode('UTF-8', $s, Encode::FB_DEFAULT);
+ return encode('UTF-8', $s, Encode::FB_CROAK);
+}
+
+sub log {
+ my ($self, $msg, $attrs) = @_;
+ chomp $msg;
+ print STDERR maybePrefix("$msg\n", $attrs);
+ $self->drainLogQueue();
+ $self->{log}->dataElement("line", $msg, %{$attrs});
+}
+
+1;
diff --git a/nixpkgs/nixos/lib/test-driver/Machine.pm b/nixpkgs/nixos/lib/test-driver/Machine.pm
new file mode 100644
index 00000000000..4d3d63cd2db
--- /dev/null
+++ b/nixpkgs/nixos/lib/test-driver/Machine.pm
@@ -0,0 +1,734 @@
+package Machine;
+
+use strict;
+use threads;
+use Socket;
+use IO::Handle;
+use POSIX qw(dup2);
+use FileHandle;
+use Cwd;
+use File::Basename;
+use File::Path qw(make_path);
+use File::Slurp;
+use Time::HiRes qw(clock_gettime CLOCK_MONOTONIC);
+
+
+my $showGraphics = defined $ENV{'DISPLAY'};
+
+my $sharedDir;
+
+
+sub new {
+ my ($class, $args) = @_;
+
+ my $startCommand = $args->{startCommand};
+
+ my $name = $args->{name};
+ if (!$name) {
+ $startCommand =~ /run-(.*)-vm$/ if defined $startCommand;
+ $name = $1 || "machine";
+ }
+
+ if (!$startCommand) {
+ # !!! merge with qemu-vm.nix.
+ my $netBackend = "-netdev user,id=net0";
+ my $netFrontend = "-device virtio-net-pci,netdev=net0";
+
+ $netBackend .= "," . $args->{netBackendArgs}
+ if defined $args->{netBackendArgs};
+
+ $netFrontend .= "," . $args->{netFrontendArgs}
+ if defined $args->{netFrontendArgs};
+
+ $startCommand =
+ "qemu-kvm -m 384 $netBackend $netFrontend \$QEMU_OPTS ";
+
+ if (defined $args->{hda}) {
+ if ($args->{hdaInterface} eq "scsi") {
+ $startCommand .= "-drive id=hda,file="
+ . Cwd::abs_path($args->{hda})
+ . ",werror=report,if=none "
+ . "-device scsi-hd,drive=hda ";
+ } else {
+ $startCommand .= "-drive file=" . Cwd::abs_path($args->{hda})
+ . ",if=" . $args->{hdaInterface}
+ . ",werror=report ";
+ }
+ }
+
+ $startCommand .= "-cdrom $args->{cdrom} "
+ if defined $args->{cdrom};
+ $startCommand .= "-device piix3-usb-uhci -drive id=usbdisk,file=$args->{usb},if=none,readonly -device usb-storage,drive=usbdisk "
+ if defined $args->{usb};
+ $startCommand .= "-bios $args->{bios} "
+ if defined $args->{bios};
+ $startCommand .= $args->{qemuFlags} || "";
+ }
+
+ my $tmpDir = $ENV{'TMPDIR'} || "/tmp";
+ unless (defined $sharedDir) {
+ $sharedDir = $tmpDir . "/xchg-shared";
+ make_path($sharedDir, { mode => 0700, owner => $< });
+ }
+
+ my $allowReboot = 0;
+ $allowReboot = $args->{allowReboot} if defined $args->{allowReboot};
+
+ my $self = {
+ startCommand => $startCommand,
+ name => $name,
+ allowReboot => $allowReboot,
+ booted => 0,
+ pid => 0,
+ connected => 0,
+ socket => undef,
+ stateDir => "$tmpDir/vm-state-$name",
+ monitor => undef,
+ log => $args->{log},
+ redirectSerial => $args->{redirectSerial} // 1,
+ };
+
+ mkdir $self->{stateDir}, 0700;
+
+ bless $self, $class;
+ return $self;
+}
+
+
+sub log {
+ my ($self, $msg) = @_;
+ $self->{log}->log($msg, { machine => $self->{name} });
+}
+
+
+sub nest {
+ my ($self, $msg, $coderef, $attrs) = @_;
+ $self->{log}->nest($msg, $coderef, { %{$attrs || {}}, machine => $self->{name} });
+}
+
+
+sub name {
+ my ($self) = @_;
+ return $self->{name};
+}
+
+
+sub stateDir {
+ my ($self) = @_;
+ return $self->{stateDir};
+}
+
+
+sub start {
+ my ($self) = @_;
+ return if $self->{booted};
+
+ $self->log("starting vm");
+
+ # Create a socket pair for the serial line input/output of the VM.
+ my ($serialP, $serialC);
+ socketpair($serialP, $serialC, PF_UNIX, SOCK_STREAM, 0) or die;
+
+ # Create a Unix domain socket to which QEMU's monitor will connect.
+ my $monitorPath = $self->{stateDir} . "/monitor";
+ unlink $monitorPath;
+ my $monitorS;
+ socket($monitorS, PF_UNIX, SOCK_STREAM, 0) or die;
+ bind($monitorS, sockaddr_un($monitorPath)) or die "cannot bind monitor socket: $!";
+ listen($monitorS, 1) or die;
+
+ # Create a Unix domain socket to which the root shell in the guest will connect.
+ my $shellPath = $self->{stateDir} . "/shell";
+ unlink $shellPath;
+ my $shellS;
+ socket($shellS, PF_UNIX, SOCK_STREAM, 0) or die;
+ bind($shellS, sockaddr_un($shellPath)) or die "cannot bind shell socket: $!";
+ listen($shellS, 1) or die;
+
+ # Start the VM.
+ my $pid = fork();
+ die if $pid == -1;
+
+ if ($pid == 0) {
+ close $serialP;
+ close $monitorS;
+ close $shellS;
+ if ($self->{redirectSerial}) {
+ open NUL, "</dev/null" or die;
+ dup2(fileno(NUL), fileno(STDIN));
+ dup2(fileno($serialC), fileno(STDOUT));
+ dup2(fileno($serialC), fileno(STDERR));
+ }
+ $ENV{TMPDIR} = $self->{stateDir};
+ $ENV{SHARED_DIR} = $sharedDir;
+ $ENV{USE_TMPDIR} = 1;
+ $ENV{QEMU_OPTS} =
+ ($self->{allowReboot} ? "" : "-no-reboot ") .
+ "-monitor unix:./monitor -chardev socket,id=shell,path=./shell " .
+ "-device virtio-serial -device virtconsole,chardev=shell " .
+ "-device virtio-rng-pci " .
+ ($showGraphics ? "-serial stdio" : "-nographic") . " " . ($ENV{QEMU_OPTS} || "");
+ chdir $self->{stateDir} or die;
+ exec $self->{startCommand};
+ die "running VM script: $!";
+ }
+
+ # Process serial line output.
+ close $serialC;
+
+ threads->create(\&processSerialOutput, $self, $serialP)->detach;
+
+ sub processSerialOutput {
+ my ($self, $serialP) = @_;
+ while (<$serialP>) {
+ chomp;
+ s/\r$//;
+ print STDERR $self->{name}, "# $_\n";
+ $self->{log}->{logQueue}->enqueue({msg => $_, machine => $self->{name}}); # !!!
+ }
+ }
+
+ eval {
+ local $SIG{CHLD} = sub { die "QEMU died prematurely\n"; };
+
+ # Wait until QEMU connects to the monitor.
+ accept($self->{monitor}, $monitorS) or die;
+
+ # Wait until QEMU connects to the root shell socket. QEMU
+ # does so immediately; this doesn't mean that the root shell
+ # has connected yet inside the guest.
+ accept($self->{socket}, $shellS) or die;
+ $self->{socket}->autoflush(1);
+ };
+ die "$@" if $@;
+
+ $self->waitForMonitorPrompt;
+
+ $self->log("QEMU running (pid $pid)");
+
+ $self->{pid} = $pid;
+ $self->{booted} = 1;
+}
+
+
+# Send a command to the monitor and wait for it to finish. TODO: QEMU
+# also has a JSON-based monitor interface now, but it doesn't support
+# all commands yet. We should use it once it does.
+sub sendMonitorCommand {
+ my ($self, $command) = @_;
+ $self->log("sending monitor command: $command");
+ syswrite $self->{monitor}, "$command\n";
+ return $self->waitForMonitorPrompt;
+}
+
+
+# Wait until the monitor sends "(qemu) ".
+sub waitForMonitorPrompt {
+ my ($self) = @_;
+ my $res = "";
+ my $s;
+ while (sysread($self->{monitor}, $s, 1024)) {
+ $res .= $s;
+ last if $res =~ s/\(qemu\) $//;
+ }
+ return $res;
+}
+
+
+# Call the given code reference repeatedly, with 1 second intervals,
+# until it returns 1 or a timeout is reached.
+sub retry {
+ my ($coderef) = @_;
+ my $n;
+ for ($n = 899; $n >=0; $n--) {
+ return if &$coderef($n);
+ sleep 1;
+ }
+ die "action timed out after $n seconds";
+}
+
+
+sub connect {
+ my ($self) = @_;
+ return if $self->{connected};
+
+ $self->nest("waiting for the VM to finish booting", sub {
+
+ $self->start;
+
+ my $now = clock_gettime(CLOCK_MONOTONIC);
+ local $SIG{ALRM} = sub { die "timed out waiting for the VM to connect\n"; };
+ alarm 600;
+ readline $self->{socket} or die "the VM quit before connecting\n";
+ alarm 0;
+
+ $self->log("connected to guest root shell");
+ # We're interested in tracking how close we are to `alarm`.
+ $self->log(sprintf("(connecting took %.2f seconds)", clock_gettime(CLOCK_MONOTONIC) - $now));
+ $self->{connected} = 1;
+
+ });
+}
+
+
+sub waitForShutdown {
+ my ($self) = @_;
+ return unless $self->{booted};
+
+ $self->nest("waiting for the VM to power off", sub {
+ waitpid $self->{pid}, 0;
+ $self->{pid} = 0;
+ $self->{booted} = 0;
+ $self->{connected} = 0;
+ });
+}
+
+
+sub isUp {
+ my ($self) = @_;
+ return $self->{booted} && $self->{connected};
+}
+
+
+sub execute_ {
+ my ($self, $command) = @_;
+
+ $self->connect;
+
+ print { $self->{socket} } ("( $command ); echo '|!=EOF' \$?\n");
+
+ my $out = "";
+
+ while (1) {
+ my $line = readline($self->{socket});
+ die "connection to VM lost unexpectedly" unless defined $line;
+ #$self->log("got line: $line");
+ if ($line =~ /^(.*)\|\!\=EOF\s+(\d+)$/) {
+ $out .= $1;
+ $self->log("exit status $2");
+ return ($2, $out);
+ }
+ $out .= $line;
+ }
+}
+
+
+sub execute {
+ my ($self, $command) = @_;
+ my @res;
+ $self->nest("running command: $command", sub {
+ @res = $self->execute_($command);
+ });
+ return @res;
+}
+
+
+sub succeed {
+ my ($self, @commands) = @_;
+
+ my $res;
+ foreach my $command (@commands) {
+ $self->nest("must succeed: $command", sub {
+ my ($status, $out) = $self->execute_($command);
+ if ($status != 0) {
+ $self->log("output: $out");
+ die "command `$command' did not succeed (exit code $status)\n";
+ }
+ $res .= $out;
+ });
+ }
+
+ return $res;
+}
+
+
+sub mustSucceed {
+ succeed @_;
+}
+
+
+sub waitUntilSucceeds {
+ my ($self, $command) = @_;
+ $self->nest("waiting for success: $command", sub {
+ retry sub {
+ my ($status, $out) = $self->execute($command);
+ return 1 if $status == 0;
+ };
+ });
+}
+
+
+sub waitUntilFails {
+ my ($self, $command) = @_;
+ $self->nest("waiting for failure: $command", sub {
+ retry sub {
+ my ($status, $out) = $self->execute($command);
+ return 1 if $status != 0;
+ };
+ });
+}
+
+
+sub fail {
+ my ($self, $command) = @_;
+ $self->nest("must fail: $command", sub {
+ my ($status, $out) = $self->execute_($command);
+ die "command `$command' unexpectedly succeeded"
+ if $status == 0;
+ });
+}
+
+
+sub mustFail {
+ fail @_;
+}
+
+
+sub getUnitInfo {
+ my ($self, $unit, $user) = @_;
+ my ($status, $lines) = $self->systemctl("--no-pager show \"$unit\"", $user);
+ return undef if $status != 0;
+ my $info = {};
+ foreach my $line (split '\n', $lines) {
+ $line =~ /^([^=]+)=(.*)$/ or next;
+ $info->{$1} = $2;
+ }
+ return $info;
+}
+
+sub systemctl {
+ my ($self, $q, $user) = @_;
+ if ($user) {
+ $q =~ s/'/\\'/g;
+ return $self->execute("su -l $user -c \$'XDG_RUNTIME_DIR=/run/user/`id -u` systemctl --user $q'");
+ }
+
+ return $self->execute("systemctl $q");
+}
+
+# Fail if the given systemd unit is not in the "active" state.
+sub requireActiveUnit {
+ my ($self, $unit) = @_;
+ $self->nest("checking if unit ‘$unit’ has reached state 'active'", sub {
+ my $info = $self->getUnitInfo($unit);
+ my $state = $info->{ActiveState};
+ if ($state ne "active") {
+ die "Expected unit ‘$unit’ to to be in state 'active' but it is in state ‘$state’\n";
+ };
+ });
+}
+
+# Wait for a systemd unit to reach the "active" state.
+sub waitForUnit {
+ my ($self, $unit, $user) = @_;
+ $self->nest("waiting for unit ‘$unit’", sub {
+ retry sub {
+ my $info = $self->getUnitInfo($unit, $user);
+ my $state = $info->{ActiveState};
+ die "unit ‘$unit’ reached state ‘$state’\n" if $state eq "failed";
+ if ($state eq "inactive") {
+ # If there are no pending jobs, then assume this unit
+ # will never reach active state.
+ my ($status, $jobs) = $self->systemctl("list-jobs --full 2>&1", $user);
+ if ($jobs =~ /No jobs/) { # FIXME: fragile
+ # Handle the case where the unit may have started
+ # between the previous getUnitInfo() and
+ # list-jobs.
+ my $info2 = $self->getUnitInfo($unit);
+ die "unit ‘$unit’ is inactive and there are no pending jobs\n"
+ if $info2->{ActiveState} eq $state;
+ }
+ }
+ return 1 if $state eq "active";
+ };
+ });
+}
+
+
+sub waitForJob {
+ my ($self, $jobName) = @_;
+ return $self->waitForUnit($jobName);
+}
+
+
+# Wait until the specified file exists.
+sub waitForFile {
+ my ($self, $fileName) = @_;
+ $self->nest("waiting for file ‘$fileName’", sub {
+ retry sub {
+ my ($status, $out) = $self->execute("test -e $fileName");
+ return 1 if $status == 0;
+ }
+ });
+}
+
+sub startJob {
+ my ($self, $jobName, $user) = @_;
+ $self->systemctl("start $jobName", $user);
+ # FIXME: check result
+}
+
+sub stopJob {
+ my ($self, $jobName, $user) = @_;
+ $self->systemctl("stop $jobName", $user);
+}
+
+
+# Wait until the machine is listening on the given TCP port.
+sub waitForOpenPort {
+ my ($self, $port) = @_;
+ $self->nest("waiting for TCP port $port", sub {
+ retry sub {
+ my ($status, $out) = $self->execute("nc -z localhost $port");
+ return 1 if $status == 0;
+ }
+ });
+}
+
+
+# Wait until the machine is not listening on the given TCP port.
+sub waitForClosedPort {
+ my ($self, $port) = @_;
+ retry sub {
+ my ($status, $out) = $self->execute("nc -z localhost $port");
+ return 1 if $status != 0;
+ }
+}
+
+
+sub shutdown {
+ my ($self) = @_;
+ return unless $self->{booted};
+
+ print { $self->{socket} } ("poweroff\n");
+
+ $self->waitForShutdown;
+}
+
+
+sub crash {
+ my ($self) = @_;
+ return unless $self->{booted};
+
+ $self->log("forced crash");
+
+ $self->sendMonitorCommand("quit");
+
+ $self->waitForShutdown;
+}
+
+
+# Make the machine unreachable by shutting down eth1 (the multicast
+# interface used to talk to the other VMs). We keep eth0 up so that
+# the test driver can continue to talk to the machine.
+sub block {
+ my ($self) = @_;
+ $self->sendMonitorCommand("set_link virtio-net-pci.1 off");
+}
+
+
+# Make the machine reachable.
+sub unblock {
+ my ($self) = @_;
+ $self->sendMonitorCommand("set_link virtio-net-pci.1 on");
+}
+
+
+# Take a screenshot of the X server on :0.0.
+sub screenshot {
+ my ($self, $filename) = @_;
+ my $dir = $ENV{'out'} || Cwd::abs_path(".");
+ $filename = "$dir/${filename}.png" if $filename =~ /^\w+$/;
+ my $tmp = "${filename}.ppm";
+ my $name = basename($filename);
+ $self->nest("making screenshot ‘$name’", sub {
+ $self->sendMonitorCommand("screendump $tmp");
+ system("pnmtopng $tmp > ${filename}") == 0
+ or die "cannot convert screenshot";
+ unlink $tmp;
+ }, { image => $name } );
+}
+
+# Get the text of TTY<n>
+sub getTTYText {
+ my ($self, $tty) = @_;
+
+ my ($status, $out) = $self->execute("fold -w\$(stty -F /dev/tty${tty} size | awk '{print \$2}') /dev/vcs${tty}");
+ return $out;
+}
+
+# Wait until TTY<n>'s text matches a particular regular expression
+sub waitUntilTTYMatches {
+ my ($self, $tty, $regexp) = @_;
+
+ $self->nest("waiting for $regexp to appear on tty $tty", sub {
+ retry sub {
+ my ($retries_remaining) = @_;
+ if ($retries_remaining == 0) {
+ $self->log("Last chance to match /$regexp/ on TTY$tty, which currently contains:");
+ $self->log($self->getTTYText($tty));
+ }
+
+ return 1 if $self->getTTYText($tty) =~ /$regexp/;
+ }
+ });
+}
+
+# Debugging: Dump the contents of the TTY<n>
+sub dumpTTYContents {
+ my ($self, $tty) = @_;
+
+ $self->execute("fold -w 80 /dev/vcs${tty} | systemd-cat");
+}
+
+# Take a screenshot and return the result as text using optical character
+# recognition.
+sub getScreenText {
+ my ($self) = @_;
+
+ system("command -v tesseract &> /dev/null") == 0
+ or die "getScreenText used but enableOCR is false";
+
+ my $text;
+ $self->nest("performing optical character recognition", sub {
+ my $tmpbase = Cwd::abs_path(".")."/ocr";
+ my $tmpin = $tmpbase."in.ppm";
+
+ $self->sendMonitorCommand("screendump $tmpin");
+
+ my $magickArgs = "-filter Catrom -density 72 -resample 300 "
+ . "-contrast -normalize -despeckle -type grayscale "
+ . "-sharpen 1 -posterize 3 -negate -gamma 100 "
+ . "-blur 1x65535";
+ my $tessArgs = "-c debug_file=/dev/null --psm 11 --oem 2";
+
+ $text = `convert $magickArgs $tmpin tiff:- | tesseract - - $tessArgs`;
+ my $status = $? >> 8;
+ unlink $tmpin;
+
+ die "OCR failed with exit code $status" if $status != 0;
+ });
+ return $text;
+}
+
+
+# Wait until a specific regexp matches the textual contents of the screen.
+sub waitForText {
+ my ($self, $regexp) = @_;
+ $self->nest("waiting for $regexp to appear on the screen", sub {
+ retry sub {
+ my ($retries_remaining) = @_;
+ if ($retries_remaining == 0) {
+ $self->log("Last chance to match /$regexp/ on the screen, which currently contains:");
+ $self->log($self->getScreenText);
+ }
+
+ return 1 if $self->getScreenText =~ /$regexp/;
+ }
+ });
+}
+
+
+# Wait until it is possible to connect to the X server. Note that
+# testing the existence of /tmp/.X11-unix/X0 is insufficient.
+sub waitForX {
+ my ($self, $regexp) = @_;
+ $self->nest("waiting for the X11 server", sub {
+ retry sub {
+ my ($status, $out) = $self->execute("journalctl -b SYSLOG_IDENTIFIER=systemd | grep 'Reached target Current graphical'");
+ return 0 if $status != 0;
+ ($status, $out) = $self->execute("[ -e /tmp/.X11-unix/X0 ]");
+ return 1 if $status == 0;
+ }
+ });
+}
+
+
+sub getWindowNames {
+ my ($self) = @_;
+ my $res = $self->mustSucceed(
+ q{xwininfo -root -tree | sed 's/.*0x[0-9a-f]* \"\([^\"]*\)\".*/\1/; t; d'});
+ return split /\n/, $res;
+}
+
+
+sub waitForWindow {
+ my ($self, $regexp) = @_;
+ $self->nest("waiting for a window to appear", sub {
+ retry sub {
+ my @names = $self->getWindowNames;
+
+ my ($retries_remaining) = @_;
+ if ($retries_remaining == 0) {
+ $self->log("Last chance to match /$regexp/ on the the window list, which currently contains:");
+ $self->log(join(", ", @names));
+ }
+
+ foreach my $n (@names) {
+ return 1 if $n =~ /$regexp/;
+ }
+ }
+ });
+}
+
+
+sub copyFileFromHost {
+ my ($self, $from, $to) = @_;
+ my $s = `cat $from` or die;
+ $s =~ s/'/'\\''/g;
+ $self->mustSucceed("echo '$s' > $to");
+}
+
+
+my %charToKey = (
+ 'A' => "shift-a", 'N' => "shift-n", '-' => "0x0C", '_' => "shift-0x0C", '!' => "shift-0x02",
+ 'B' => "shift-b", 'O' => "shift-o", '=' => "0x0D", '+' => "shift-0x0D", '@' => "shift-0x03",
+ 'C' => "shift-c", 'P' => "shift-p", '[' => "0x1A", '{' => "shift-0x1A", '#' => "shift-0x04",
+ 'D' => "shift-d", 'Q' => "shift-q", ']' => "0x1B", '}' => "shift-0x1B", '$' => "shift-0x05",
+ 'E' => "shift-e", 'R' => "shift-r", ';' => "0x27", ':' => "shift-0x27", '%' => "shift-0x06",
+ 'F' => "shift-f", 'S' => "shift-s", '\'' => "0x28", '"' => "shift-0x28", '^' => "shift-0x07",
+ 'G' => "shift-g", 'T' => "shift-t", '`' => "0x29", '~' => "shift-0x29", '&' => "shift-0x08",
+ 'H' => "shift-h", 'U' => "shift-u", '\\' => "0x2B", '|' => "shift-0x2B", '*' => "shift-0x09",
+ 'I' => "shift-i", 'V' => "shift-v", ',' => "0x33", '<' => "shift-0x33", '(' => "shift-0x0A",
+ 'J' => "shift-j", 'W' => "shift-w", '.' => "0x34", '>' => "shift-0x34", ')' => "shift-0x0B",
+ 'K' => "shift-k", 'X' => "shift-x", '/' => "0x35", '?' => "shift-0x35",
+ 'L' => "shift-l", 'Y' => "shift-y", ' ' => "spc",
+ 'M' => "shift-m", 'Z' => "shift-z", "\n" => "ret",
+);
+
+
+sub sendKeys {
+ my ($self, @keys) = @_;
+ foreach my $key (@keys) {
+ $key = $charToKey{$key} if exists $charToKey{$key};
+ $self->sendMonitorCommand("sendkey $key");
+ }
+}
+
+
+sub sendChars {
+ my ($self, $chars) = @_;
+ $self->nest("sending keys ‘$chars’", sub {
+ $self->sendKeys(split //, $chars);
+ });
+}
+
+
+# Sleep N seconds (in virtual guest time, not real time).
+sub sleep {
+ my ($self, $time) = @_;
+ $self->succeed("sleep $time");
+}
+
+
+# Forward a TCP port on the host to a TCP port on the guest. Useful
+# during interactive testing.
+sub forwardPort {
+ my ($self, $hostPort, $guestPort) = @_;
+ $hostPort = 8080 unless defined $hostPort;
+ $guestPort = 80 unless defined $guestPort;
+ $self->sendMonitorCommand("hostfwd_add tcp::$hostPort-:$guestPort");
+}
+
+
+1;
diff --git a/nixpkgs/nixos/lib/test-driver/test-driver.pl b/nixpkgs/nixos/lib/test-driver/test-driver.pl
new file mode 100644
index 00000000000..a3354fb0e1e
--- /dev/null
+++ b/nixpkgs/nixos/lib/test-driver/test-driver.pl
@@ -0,0 +1,191 @@
+#! /somewhere/perl -w
+
+use strict;
+use Machine;
+use Term::ReadLine;
+use IO::File;
+use IO::Pty;
+use Logger;
+use Cwd;
+use POSIX qw(_exit dup2);
+use Time::HiRes qw(clock_gettime CLOCK_MONOTONIC);
+
+$SIG{PIPE} = 'IGNORE'; # because Unix domain sockets may die unexpectedly
+
+STDERR->autoflush(1);
+
+my $log = new Logger;
+
+
+# Start vde_switch for each network required by the test.
+my %vlans;
+foreach my $vlan (split / /, $ENV{VLANS} || "") {
+ next if defined $vlans{$vlan};
+ # Start vde_switch as a child process. We don't run it in daemon
+ # mode because we want the child process to be cleaned up when we
+ # die. Since we have to make sure that the control socket is
+ # ready, we send a dummy command to vde_switch (via stdin) and
+ # wait for a reply. Note that vde_switch requires stdin to be a
+ # TTY, so we create one.
+ $log->log("starting VDE switch for network $vlan");
+ my $socket = Cwd::abs_path "./vde$vlan.ctl";
+ my $pty = new IO::Pty;
+ my ($stdoutR, $stdoutW); pipe $stdoutR, $stdoutW;
+ my $pid = fork(); die "cannot fork" unless defined $pid;
+ if ($pid == 0) {
+ dup2(fileno($pty->slave), 0);
+ dup2(fileno($stdoutW), 1);
+ exec "vde_switch -s $socket --dirmode 0700" or _exit(1);
+ }
+ close $stdoutW;
+ print $pty "version\n";
+ readline $stdoutR or die "cannot start vde_switch";
+ $ENV{"QEMU_VDE_SOCKET_$vlan"} = $socket;
+ $vlans{$vlan} = $pty;
+ die unless -e "$socket/ctl";
+}
+
+
+my %vms;
+my $context = "";
+
+sub createMachine {
+ my ($args) = @_;
+ my $vm = Machine->new({%{$args}, log => $log, redirectSerial => ($ENV{USE_SERIAL} // "0") ne "1"});
+ $vms{$vm->name} = $vm;
+ $context .= "my \$" . $vm->name . " = \$vms{'" . $vm->name . "'}; ";
+ return $vm;
+}
+
+foreach my $vmScript (@ARGV) {
+ my $vm = createMachine({startCommand => $vmScript});
+}
+
+
+sub startAll {
+ $log->nest("starting all VMs", sub {
+ $_->start foreach values %vms;
+ });
+}
+
+
+# Wait until all VMs have terminated.
+sub joinAll {
+ $log->nest("waiting for all VMs to finish", sub {
+ $_->waitForShutdown foreach values %vms;
+ });
+}
+
+
+# In interactive tests, this allows the non-interactive test script to
+# be executed conveniently.
+sub testScript {
+ eval "$context $ENV{testScript};\n";
+ warn $@ if $@;
+}
+
+
+my $nrTests = 0;
+my $nrSucceeded = 0;
+
+
+sub subtest {
+ my ($name, $coderef) = @_;
+ $log->nest("subtest: $name", sub {
+ $nrTests++;
+ eval { &$coderef };
+ if ($@) {
+ $log->log("error: $@", { error => 1 });
+ } else {
+ $nrSucceeded++;
+ }
+ });
+}
+
+
+sub runTests {
+ if (defined $ENV{tests}) {
+ $log->nest("running the VM test script", sub {
+ eval "$context $ENV{tests}";
+ if ($@) {
+ $log->log("error: $@", { error => 1 });
+ die $@;
+ }
+ }, { expanded => 1 });
+ } else {
+ my $term = Term::ReadLine->new('nixos-vm-test');
+ $term->ReadHistory;
+ while (defined ($_ = $term->readline("> "))) {
+ eval "$context $_\n";
+ warn $@ if $@;
+ }
+ $term->WriteHistory;
+ }
+
+ # Copy the kernel coverage data for each machine, if the kernel
+ # has been compiled with coverage instrumentation.
+ $log->nest("collecting coverage data", sub {
+ foreach my $vm (values %vms) {
+ my $gcovDir = "/sys/kernel/debug/gcov";
+
+ next unless $vm->isUp();
+
+ my ($status, $out) = $vm->execute("test -e $gcovDir");
+ next if $status != 0;
+
+ # Figure out where to put the *.gcda files so that the
+ # report generator can find the corresponding kernel
+ # sources.
+ my $kernelDir = $vm->mustSucceed("echo \$(dirname \$(readlink -f /run/current-system/kernel))/.build/linux-*");
+ chomp $kernelDir;
+ my $coverageDir = "/tmp/xchg/coverage-data/$kernelDir";
+
+ # Copy all the *.gcda files.
+ $vm->execute("for d in $gcovDir/nix/store/*/.build/linux-*; do for i in \$(cd \$d && find -name '*.gcda'); do echo \$i; mkdir -p $coverageDir/\$(dirname \$i); cp -v \$d/\$i $coverageDir/\$i; done; done");
+ }
+ });
+
+ $log->nest("syncing", sub {
+ foreach my $vm (values %vms) {
+ next unless $vm->isUp();
+ $vm->execute("sync");
+ }
+ });
+
+ if ($nrTests != 0) {
+ $log->log("$nrSucceeded out of $nrTests tests succeeded",
+ ($nrSucceeded < $nrTests ? { error => 1 } : { }));
+ }
+}
+
+
+# Create an empty raw virtual disk with the given name and size (in
+# MiB).
+sub createDisk {
+ my ($name, $size) = @_;
+ system("qemu-img create -f raw $name ${size}M") == 0
+ or die "cannot create image of size $size";
+}
+
+
+END {
+ $log->nest("cleaning up", sub {
+ foreach my $vm (values %vms) {
+ if ($vm->{pid}) {
+ $log->log("killing " . $vm->{name} . " (pid " . $vm->{pid} . ")");
+ kill 9, $vm->{pid};
+ }
+ }
+ });
+ $log->close();
+}
+
+my $now1 = clock_gettime(CLOCK_MONOTONIC);
+
+runTests;
+
+my $now2 = clock_gettime(CLOCK_MONOTONIC);
+
+printf STDERR "test script finished in %.2fs\n", $now2 - $now1;
+
+exit ($nrSucceeded < $nrTests ? 1 : 0);
diff --git a/nixpkgs/nixos/lib/test-driver/test-driver.py b/nixpkgs/nixos/lib/test-driver/test-driver.py
new file mode 100644
index 00000000000..7b8d5803aa5
--- /dev/null
+++ b/nixpkgs/nixos/lib/test-driver/test-driver.py
@@ -0,0 +1,963 @@
+#! /somewhere/python3
+from contextlib import contextmanager, _GeneratorContextManager
+from queue import Queue, Empty
+from typing import Tuple, Any, Callable, Dict, Iterator, Optional, List
+from xml.sax.saxutils import XMLGenerator
+import queue
+import io
+import _thread
+import argparse
+import atexit
+import base64
+import codecs
+import os
+import pathlib
+import ptpython.repl
+import pty
+import re
+import shlex
+import shutil
+import socket
+import subprocess
+import sys
+import tempfile
+import time
+import traceback
+import unicodedata
+
+CHAR_TO_KEY = {
+ "A": "shift-a",
+ "N": "shift-n",
+ "-": "0x0C",
+ "_": "shift-0x0C",
+ "B": "shift-b",
+ "O": "shift-o",
+ "=": "0x0D",
+ "+": "shift-0x0D",
+ "C": "shift-c",
+ "P": "shift-p",
+ "[": "0x1A",
+ "{": "shift-0x1A",
+ "D": "shift-d",
+ "Q": "shift-q",
+ "]": "0x1B",
+ "}": "shift-0x1B",
+ "E": "shift-e",
+ "R": "shift-r",
+ ";": "0x27",
+ ":": "shift-0x27",
+ "F": "shift-f",
+ "S": "shift-s",
+ "'": "0x28",
+ '"': "shift-0x28",
+ "G": "shift-g",
+ "T": "shift-t",
+ "`": "0x29",
+ "~": "shift-0x29",
+ "H": "shift-h",
+ "U": "shift-u",
+ "\\": "0x2B",
+ "|": "shift-0x2B",
+ "I": "shift-i",
+ "V": "shift-v",
+ ",": "0x33",
+ "<": "shift-0x33",
+ "J": "shift-j",
+ "W": "shift-w",
+ ".": "0x34",
+ ">": "shift-0x34",
+ "K": "shift-k",
+ "X": "shift-x",
+ "/": "0x35",
+ "?": "shift-0x35",
+ "L": "shift-l",
+ "Y": "shift-y",
+ " ": "spc",
+ "M": "shift-m",
+ "Z": "shift-z",
+ "\n": "ret",
+ "!": "shift-0x02",
+ "@": "shift-0x03",
+ "#": "shift-0x04",
+ "$": "shift-0x05",
+ "%": "shift-0x06",
+ "^": "shift-0x07",
+ "&": "shift-0x08",
+ "*": "shift-0x09",
+ "(": "shift-0x0A",
+ ")": "shift-0x0B",
+}
+
+# Forward references
+log: "Logger"
+machines: "List[Machine]"
+
+
+def eprint(*args: object, **kwargs: Any) -> None:
+ print(*args, file=sys.stderr, **kwargs)
+
+
+def make_command(args: list) -> str:
+ return " ".join(map(shlex.quote, (map(str, args))))
+
+
+def create_vlan(vlan_nr: str) -> Tuple[str, str, "subprocess.Popen[bytes]", Any]:
+ global log
+ log.log("starting VDE switch for network {}".format(vlan_nr))
+ vde_socket = tempfile.mkdtemp(
+ prefix="nixos-test-vde-", suffix="-vde{}.ctl".format(vlan_nr)
+ )
+ pty_master, pty_slave = pty.openpty()
+ vde_process = subprocess.Popen(
+ ["vde_switch", "-s", vde_socket, "--dirmode", "0700"],
+ bufsize=1,
+ stdin=pty_slave,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ shell=False,
+ )
+ fd = os.fdopen(pty_master, "w")
+ fd.write("version\n")
+ # TODO: perl version checks if this can be read from
+ # an if not, dies. we could hang here forever. Fix it.
+ assert vde_process.stdout is not None
+ vde_process.stdout.readline()
+ if not os.path.exists(os.path.join(vde_socket, "ctl")):
+ raise Exception("cannot start vde_switch")
+
+ return (vlan_nr, vde_socket, vde_process, fd)
+
+
+def retry(fn: Callable) -> None:
+ """Call the given function repeatedly, with 1 second intervals,
+ until it returns True or a timeout is reached.
+ """
+
+ for _ in range(900):
+ if fn(False):
+ return
+ time.sleep(1)
+
+ if not fn(True):
+ raise Exception("action timed out")
+
+
+class Logger:
+ def __init__(self) -> None:
+ self.logfile = os.environ.get("LOGFILE", "/dev/null")
+ self.logfile_handle = codecs.open(self.logfile, "wb")
+ self.xml = XMLGenerator(self.logfile_handle, encoding="utf-8")
+ self.queue: "Queue[Dict[str, str]]" = Queue()
+
+ self.xml.startDocument()
+ self.xml.startElement("logfile", attrs={})
+
+ def close(self) -> None:
+ self.xml.endElement("logfile")
+ self.xml.endDocument()
+ self.logfile_handle.close()
+
+ def sanitise(self, message: str) -> str:
+ return "".join(ch for ch in message if unicodedata.category(ch)[0] != "C")
+
+ def maybe_prefix(self, message: str, attributes: Dict[str, str]) -> str:
+ if "machine" in attributes:
+ return "{}: {}".format(attributes["machine"], message)
+ return message
+
+ def log_line(self, message: str, attributes: Dict[str, str]) -> None:
+ self.xml.startElement("line", attributes)
+ self.xml.characters(message)
+ self.xml.endElement("line")
+
+ def log(self, message: str, attributes: Dict[str, str] = {}) -> None:
+ eprint(self.maybe_prefix(message, attributes))
+ self.drain_log_queue()
+ self.log_line(message, attributes)
+
+ def enqueue(self, message: Dict[str, str]) -> None:
+ self.queue.put(message)
+
+ def drain_log_queue(self) -> None:
+ try:
+ while True:
+ item = self.queue.get_nowait()
+ attributes = {"machine": item["machine"], "type": "serial"}
+ self.log_line(self.sanitise(item["msg"]), attributes)
+ except Empty:
+ pass
+
+ @contextmanager
+ def nested(self, message: str, attributes: Dict[str, str] = {}) -> Iterator[None]:
+ eprint(self.maybe_prefix(message, attributes))
+
+ self.xml.startElement("nest", attrs={})
+ self.xml.startElement("head", attributes)
+ self.xml.characters(message)
+ self.xml.endElement("head")
+
+ tic = time.time()
+ self.drain_log_queue()
+ yield
+ self.drain_log_queue()
+ toc = time.time()
+ self.log("({:.2f} seconds)".format(toc - tic))
+
+ self.xml.endElement("nest")
+
+
+class Machine:
+ def __init__(self, args: Dict[str, Any]) -> None:
+ if "name" in args:
+ self.name = args["name"]
+ else:
+ self.name = "machine"
+ cmd = args.get("startCommand", None)
+ if cmd:
+ match = re.search("run-(.+)-vm$", cmd)
+ if match:
+ self.name = match.group(1)
+
+ self.script = args.get("startCommand", self.create_startcommand(args))
+
+ tmp_dir = os.environ.get("TMPDIR", tempfile.gettempdir())
+
+ def create_dir(name: str) -> str:
+ path = os.path.join(tmp_dir, name)
+ os.makedirs(path, mode=0o700, exist_ok=True)
+ return path
+
+ self.state_dir = create_dir("vm-state-{}".format(self.name))
+ self.shared_dir = create_dir("shared-xchg")
+
+ self.booted = False
+ self.connected = False
+ self.pid: Optional[int] = None
+ self.socket = None
+ self.monitor: Optional[socket.socket] = None
+ self.logger: Logger = args["log"]
+ self.allow_reboot = args.get("allowReboot", False)
+
+ @staticmethod
+ def create_startcommand(args: Dict[str, str]) -> str:
+ net_backend = "-netdev user,id=net0"
+ net_frontend = "-device virtio-net-pci,netdev=net0"
+
+ if "netBackendArgs" in args:
+ net_backend += "," + args["netBackendArgs"]
+
+ if "netFrontendArgs" in args:
+ net_frontend += "," + args["netFrontendArgs"]
+
+ start_command = (
+ "qemu-kvm -m 384 " + net_backend + " " + net_frontend + " $QEMU_OPTS "
+ )
+
+ if "hda" in args:
+ hda_path = os.path.abspath(args["hda"])
+ if args.get("hdaInterface", "") == "scsi":
+ start_command += (
+ "-drive id=hda,file="
+ + hda_path
+ + ",werror=report,if=none "
+ + "-device scsi-hd,drive=hda "
+ )
+ else:
+ start_command += (
+ "-drive file="
+ + hda_path
+ + ",if="
+ + args["hdaInterface"]
+ + ",werror=report "
+ )
+
+ if "cdrom" in args:
+ start_command += "-cdrom " + args["cdrom"] + " "
+
+ if "usb" in args:
+ start_command += (
+ "-device piix3-usb-uhci -drive "
+ + "id=usbdisk,file="
+ + args["usb"]
+ + ",if=none,readonly "
+ + "-device usb-storage,drive=usbdisk "
+ )
+ if "bios" in args:
+ start_command += "-bios " + args["bios"] + " "
+
+ start_command += args.get("qemuFlags", "")
+
+ return start_command
+
+ def is_up(self) -> bool:
+ return self.booted and self.connected
+
+ def log(self, msg: str) -> None:
+ self.logger.log(msg, {"machine": self.name})
+
+ def nested(self, msg: str, attrs: Dict[str, str] = {}) -> _GeneratorContextManager:
+ my_attrs = {"machine": self.name}
+ my_attrs.update(attrs)
+ return self.logger.nested(msg, my_attrs)
+
+ def wait_for_monitor_prompt(self) -> str:
+ assert self.monitor is not None
+ answer = ""
+ while True:
+ undecoded_answer = self.monitor.recv(1024)
+ if not undecoded_answer:
+ break
+ answer += undecoded_answer.decode()
+ if answer.endswith("(qemu) "):
+ break
+ return answer
+
+ def send_monitor_command(self, command: str) -> str:
+ message = ("{}\n".format(command)).encode()
+ self.log("sending monitor command: {}".format(command))
+ assert self.monitor is not None
+ self.monitor.send(message)
+ return self.wait_for_monitor_prompt()
+
+ def wait_for_unit(self, unit: str, user: Optional[str] = None) -> None:
+ """Wait for a systemd unit to get into "active" state.
+ Throws exceptions on "failed" and "inactive" states as well as
+ after timing out.
+ """
+
+ def check_active(_: Any) -> bool:
+ info = self.get_unit_info(unit, user)
+ state = info["ActiveState"]
+ if state == "failed":
+ raise Exception('unit "{}" reached state "{}"'.format(unit, state))
+
+ if state == "inactive":
+ status, jobs = self.systemctl("list-jobs --full 2>&1", user)
+ if "No jobs" in jobs:
+ info = self.get_unit_info(unit, user)
+ if info["ActiveState"] == state:
+ raise Exception(
+ (
+ 'unit "{}" is inactive and there ' "are no pending jobs"
+ ).format(unit)
+ )
+
+ return state == "active"
+
+ retry(check_active)
+
+ def get_unit_info(self, unit: str, user: Optional[str] = None) -> Dict[str, str]:
+ status, lines = self.systemctl('--no-pager show "{}"'.format(unit), user)
+ if status != 0:
+ raise Exception(
+ 'retrieving systemctl info for unit "{}" {} failed with exit code {}'.format(
+ unit, "" if user is None else 'under user "{}"'.format(user), status
+ )
+ )
+
+ line_pattern = re.compile(r"^([^=]+)=(.*)$")
+
+ def tuple_from_line(line: str) -> Tuple[str, str]:
+ match = line_pattern.match(line)
+ assert match is not None
+ return match[1], match[2]
+
+ return dict(
+ tuple_from_line(line)
+ for line in lines.split("\n")
+ if line_pattern.match(line)
+ )
+
+ def systemctl(self, q: str, user: Optional[str] = None) -> Tuple[int, str]:
+ if user is not None:
+ q = q.replace("'", "\\'")
+ return self.execute(
+ (
+ "su -l {} --shell /bin/sh -c "
+ "$'XDG_RUNTIME_DIR=/run/user/`id -u` "
+ "systemctl --user {}'"
+ ).format(user, q)
+ )
+ return self.execute("systemctl {}".format(q))
+
+ def require_unit_state(self, unit: str, require_state: str = "active") -> None:
+ with self.nested(
+ "checking if unit ‘{}’ has reached state '{}'".format(unit, require_state)
+ ):
+ info = self.get_unit_info(unit)
+ state = info["ActiveState"]
+ if state != require_state:
+ raise Exception(
+ "Expected unit ‘{}’ to to be in state ".format(unit)
+ + "'{}' but it is in state ‘{}’".format(require_state, state)
+ )
+
+ def execute(self, command: str) -> Tuple[int, str]:
+ self.connect()
+
+ out_command = "( {} ); echo '|!=EOF' $?\n".format(command)
+ self.shell.send(out_command.encode())
+
+ output = ""
+ status_code_pattern = re.compile(r"(.*)\|\!=EOF\s+(\d+)")
+
+ while True:
+ chunk = self.shell.recv(4096).decode(errors="ignore")
+ match = status_code_pattern.match(chunk)
+ if match:
+ output += match[1]
+ status_code = int(match[2])
+ return (status_code, output)
+ output += chunk
+
+ def succeed(self, *commands: str) -> str:
+ """Execute each command and check that it succeeds."""
+ output = ""
+ for command in commands:
+ with self.nested("must succeed: {}".format(command)):
+ (status, out) = self.execute(command)
+ if status != 0:
+ self.log("output: {}".format(out))
+ raise Exception(
+ "command `{}` failed (exit code {})".format(command, status)
+ )
+ output += out
+ return output
+
+ def fail(self, *commands: str) -> None:
+ """Execute each command and check that it fails."""
+ for command in commands:
+ with self.nested("must fail: {}".format(command)):
+ status, output = self.execute(command)
+ if status == 0:
+ raise Exception(
+ "command `{}` unexpectedly succeeded".format(command)
+ )
+
+ def wait_until_succeeds(self, command: str) -> str:
+ """Wait until a command returns success and return its output.
+ Throws an exception on timeout.
+ """
+ output = ""
+
+ def check_success(_: Any) -> bool:
+ nonlocal output
+ status, output = self.execute(command)
+ return status == 0
+
+ with self.nested("waiting for success: {}".format(command)):
+ retry(check_success)
+ return output
+
+ def wait_until_fails(self, command: str) -> str:
+ """Wait until a command returns failure.
+ Throws an exception on timeout.
+ """
+ output = ""
+
+ def check_failure(_: Any) -> bool:
+ nonlocal output
+ status, output = self.execute(command)
+ return status != 0
+
+ with self.nested("waiting for failure: {}".format(command)):
+ retry(check_failure)
+ return output
+
+ def wait_for_shutdown(self) -> None:
+ if not self.booted:
+ return
+
+ with self.nested("waiting for the VM to power off"):
+ sys.stdout.flush()
+ self.process.wait()
+
+ self.pid = None
+ self.booted = False
+ self.connected = False
+
+ def get_tty_text(self, tty: str) -> str:
+ status, output = self.execute(
+ "fold -w$(stty -F /dev/tty{0} size | "
+ "awk '{{print $2}}') /dev/vcs{0}".format(tty)
+ )
+ return output
+
+ def wait_until_tty_matches(self, tty: str, regexp: str) -> None:
+ """Wait until the visible output on the chosen TTY matches regular
+ expression. Throws an exception on timeout.
+ """
+ matcher = re.compile(regexp)
+
+ def tty_matches(last: bool) -> bool:
+ text = self.get_tty_text(tty)
+ if last:
+ self.log(
+ f"Last chance to match /{regexp}/ on TTY{tty}, "
+ f"which currently contains: {text}"
+ )
+ return len(matcher.findall(text)) > 0
+
+ with self.nested("waiting for {} to appear on tty {}".format(regexp, tty)):
+ retry(tty_matches)
+
+ def send_chars(self, chars: List[str]) -> None:
+ with self.nested("sending keys ‘{}‘".format(chars)):
+ for char in chars:
+ self.send_key(char)
+
+ def wait_for_file(self, filename: str) -> None:
+ """Waits until the file exists in machine's file system."""
+
+ def check_file(_: Any) -> bool:
+ status, _ = self.execute("test -e {}".format(filename))
+ return status == 0
+
+ with self.nested("waiting for file ‘{}‘".format(filename)):
+ retry(check_file)
+
+ def wait_for_open_port(self, port: int) -> None:
+ def port_is_open(_: Any) -> bool:
+ status, _ = self.execute("nc -z localhost {}".format(port))
+ return status == 0
+
+ with self.nested("waiting for TCP port {}".format(port)):
+ retry(port_is_open)
+
+ def wait_for_closed_port(self, port: int) -> None:
+ def port_is_closed(_: Any) -> bool:
+ status, _ = self.execute("nc -z localhost {}".format(port))
+ return status != 0
+
+ retry(port_is_closed)
+
+ def start_job(self, jobname: str, user: Optional[str] = None) -> Tuple[int, str]:
+ return self.systemctl("start {}".format(jobname), user)
+
+ def stop_job(self, jobname: str, user: Optional[str] = None) -> Tuple[int, str]:
+ return self.systemctl("stop {}".format(jobname), user)
+
+ def wait_for_job(self, jobname: str) -> None:
+ self.wait_for_unit(jobname)
+
+ def connect(self) -> None:
+ if self.connected:
+ return
+
+ with self.nested("waiting for the VM to finish booting"):
+ self.start()
+
+ tic = time.time()
+ self.shell.recv(1024)
+ # TODO: Timeout
+ toc = time.time()
+
+ self.log("connected to guest root shell")
+ self.log("(connecting took {:.2f} seconds)".format(toc - tic))
+ self.connected = True
+
+ def screenshot(self, filename: str) -> None:
+ out_dir = os.environ.get("out", os.getcwd())
+ word_pattern = re.compile(r"^\w+$")
+ if word_pattern.match(filename):
+ filename = os.path.join(out_dir, "{}.png".format(filename))
+ tmp = "{}.ppm".format(filename)
+
+ with self.nested(
+ "making screenshot {}".format(filename),
+ {"image": os.path.basename(filename)},
+ ):
+ self.send_monitor_command("screendump {}".format(tmp))
+ ret = subprocess.run("pnmtopng {} > {}".format(tmp, filename), shell=True)
+ os.unlink(tmp)
+ if ret.returncode != 0:
+ raise Exception("Cannot convert screenshot")
+
+ def copy_from_host_via_shell(self, source: str, target: str) -> None:
+ """Copy a file from the host into the guest by piping it over the
+ shell into the destination file. Works without host-guest shared folder.
+ Prefer copy_from_host for whenever possible.
+ """
+ with open(source, "rb") as fh:
+ content_b64 = base64.b64encode(fh.read()).decode()
+ self.succeed(
+ f"mkdir -p $(dirname {target})",
+ f"echo -n {content_b64} | base64 -d > {target}",
+ )
+
+ def copy_from_host(self, source: str, target: str) -> None:
+ """Copy a file from the host into the guest via the `shared_dir` shared
+ among all the VMs (using a temporary directory).
+ """
+ host_src = pathlib.Path(source)
+ vm_target = pathlib.Path(target)
+ with tempfile.TemporaryDirectory(dir=self.shared_dir) as shared_td:
+ shared_temp = pathlib.Path(shared_td)
+ host_intermediate = shared_temp / host_src.name
+ vm_shared_temp = pathlib.Path("/tmp/shared") / shared_temp.name
+ vm_intermediate = vm_shared_temp / host_src.name
+
+ self.succeed(make_command(["mkdir", "-p", vm_shared_temp]))
+ if host_src.is_dir():
+ shutil.copytree(host_src, host_intermediate)
+ else:
+ shutil.copy(host_src, host_intermediate)
+ self.succeed(make_command(["mkdir", "-p", vm_target.parent]))
+ self.succeed(make_command(["cp", "-r", vm_intermediate, vm_target]))
+
+ def copy_from_vm(self, source: str, target_dir: str = "") -> None:
+ """Copy a file from the VM (specified by an in-VM source path) to a path
+ relative to `$out`. The file is copied via the `shared_dir` shared among
+ all the VMs (using a temporary directory).
+ """
+ # Compute the source, target, and intermediate shared file names
+ out_dir = pathlib.Path(os.environ.get("out", os.getcwd()))
+ vm_src = pathlib.Path(source)
+ with tempfile.TemporaryDirectory(dir=self.shared_dir) as shared_td:
+ shared_temp = pathlib.Path(shared_td)
+ vm_shared_temp = pathlib.Path("/tmp/shared") / shared_temp.name
+ vm_intermediate = vm_shared_temp / vm_src.name
+ intermediate = shared_temp / vm_src.name
+ # Copy the file to the shared directory inside VM
+ self.succeed(make_command(["mkdir", "-p", vm_shared_temp]))
+ self.succeed(make_command(["cp", "-r", vm_src, vm_intermediate]))
+ abs_target = out_dir / target_dir / vm_src.name
+ abs_target.parent.mkdir(exist_ok=True, parents=True)
+ # Copy the file from the shared directory outside VM
+ if intermediate.is_dir():
+ shutil.copytree(intermediate, abs_target)
+ else:
+ shutil.copy(intermediate, abs_target)
+
+ def dump_tty_contents(self, tty: str) -> None:
+ """Debugging: Dump the contents of the TTY<n>
+ """
+ self.execute("fold -w 80 /dev/vcs{} | systemd-cat".format(tty))
+
+ def get_screen_text(self) -> str:
+ if shutil.which("tesseract") is None:
+ raise Exception("get_screen_text used but enableOCR is false")
+
+ magick_args = (
+ "-filter Catrom -density 72 -resample 300 "
+ + "-contrast -normalize -despeckle -type grayscale "
+ + "-sharpen 1 -posterize 3 -negate -gamma 100 "
+ + "-blur 1x65535"
+ )
+
+ tess_args = "-c debug_file=/dev/null --psm 11 --oem 2"
+
+ with self.nested("performing optical character recognition"):
+ with tempfile.NamedTemporaryFile() as tmpin:
+ self.send_monitor_command("screendump {}".format(tmpin.name))
+
+ cmd = "convert {} {} tiff:- | tesseract - - {}".format(
+ magick_args, tmpin.name, tess_args
+ )
+ ret = subprocess.run(cmd, shell=True, capture_output=True)
+ if ret.returncode != 0:
+ raise Exception(
+ "OCR failed with exit code {}".format(ret.returncode)
+ )
+
+ return ret.stdout.decode("utf-8")
+
+ def wait_for_text(self, regex: str) -> None:
+ def screen_matches(last: bool) -> bool:
+ text = self.get_screen_text()
+ matches = re.search(regex, text) is not None
+
+ if last and not matches:
+ self.log("Last OCR attempt failed. Text was: {}".format(text))
+
+ return matches
+
+ with self.nested("waiting for {} to appear on screen".format(regex)):
+ retry(screen_matches)
+
+ def wait_for_console_text(self, regex: str) -> None:
+ self.log("waiting for {} to appear on console".format(regex))
+ # Buffer the console output, this is needed
+ # to match multiline regexes.
+ console = io.StringIO()
+ while True:
+ try:
+ console.write(self.last_lines.get())
+ except queue.Empty:
+ self.sleep(1)
+ continue
+ console.seek(0)
+ matches = re.search(regex, console.read())
+ if matches is not None:
+ return
+
+ def send_key(self, key: str) -> None:
+ key = CHAR_TO_KEY.get(key, key)
+ self.send_monitor_command("sendkey {}".format(key))
+
+ def start(self) -> None:
+ if self.booted:
+ return
+
+ self.log("starting vm")
+
+ def create_socket(path: str) -> socket.socket:
+ if os.path.exists(path):
+ os.unlink(path)
+ s = socket.socket(family=socket.AF_UNIX, type=socket.SOCK_STREAM)
+ s.bind(path)
+ s.listen(1)
+ return s
+
+ monitor_path = os.path.join(self.state_dir, "monitor")
+ self.monitor_socket = create_socket(monitor_path)
+
+ shell_path = os.path.join(self.state_dir, "shell")
+ self.shell_socket = create_socket(shell_path)
+
+ qemu_options = (
+ " ".join(
+ [
+ "" if self.allow_reboot else "-no-reboot",
+ "-monitor unix:{}".format(monitor_path),
+ "-chardev socket,id=shell,path={}".format(shell_path),
+ "-device virtio-serial",
+ "-device virtconsole,chardev=shell",
+ "-device virtio-rng-pci",
+ "-serial stdio" if "DISPLAY" in os.environ else "-nographic",
+ ]
+ )
+ + " "
+ + os.environ.get("QEMU_OPTS", "")
+ )
+
+ environment = dict(os.environ)
+ environment.update(
+ {
+ "TMPDIR": self.state_dir,
+ "SHARED_DIR": self.shared_dir,
+ "USE_TMPDIR": "1",
+ "QEMU_OPTS": qemu_options,
+ }
+ )
+
+ self.process = subprocess.Popen(
+ self.script,
+ bufsize=1,
+ stdin=subprocess.DEVNULL,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ shell=True,
+ cwd=self.state_dir,
+ env=environment,
+ )
+ self.monitor, _ = self.monitor_socket.accept()
+ self.shell, _ = self.shell_socket.accept()
+
+ # Store last serial console lines for use
+ # of wait_for_console_text
+ self.last_lines: Queue = Queue()
+
+ def process_serial_output() -> None:
+ assert self.process.stdout is not None
+ for _line in self.process.stdout:
+ # Ignore undecodable bytes that may occur in boot menus
+ line = _line.decode(errors="ignore").replace("\r", "").rstrip()
+ self.last_lines.put(line)
+ eprint("{} # {}".format(self.name, line))
+ self.logger.enqueue({"msg": line, "machine": self.name})
+
+ _thread.start_new_thread(process_serial_output, ())
+
+ self.wait_for_monitor_prompt()
+
+ self.pid = self.process.pid
+ self.booted = True
+
+ self.log("QEMU running (pid {})".format(self.pid))
+
+ def cleanup_statedir(self) -> None:
+ self.log("delete the VM state directory")
+ if os.path.isfile(self.state_dir):
+ shutil.rmtree(self.state_dir)
+
+ def shutdown(self) -> None:
+ if not self.booted:
+ return
+
+ self.shell.send("poweroff\n".encode())
+ self.wait_for_shutdown()
+
+ def crash(self) -> None:
+ if not self.booted:
+ return
+
+ self.log("forced crash")
+ self.send_monitor_command("quit")
+ self.wait_for_shutdown()
+
+ def wait_for_x(self) -> None:
+ """Wait until it is possible to connect to the X server. Note that
+ testing the existence of /tmp/.X11-unix/X0 is insufficient.
+ """
+
+ def check_x(_: Any) -> bool:
+ cmd = (
+ "journalctl -b SYSLOG_IDENTIFIER=systemd | "
+ + 'grep "Reached target Current graphical"'
+ )
+ status, _ = self.execute(cmd)
+ if status != 0:
+ return False
+ status, _ = self.execute("[ -e /tmp/.X11-unix/X0 ]")
+ return status == 0
+
+ with self.nested("waiting for the X11 server"):
+ retry(check_x)
+
+ def get_window_names(self) -> List[str]:
+ return self.succeed(
+ r"xwininfo -root -tree | sed 's/.*0x[0-9a-f]* \"\([^\"]*\)\".*/\1/; t; d'"
+ ).splitlines()
+
+ def wait_for_window(self, regexp: str) -> None:
+ pattern = re.compile(regexp)
+
+ def window_is_visible(last_try: bool) -> bool:
+ names = self.get_window_names()
+ if last_try:
+ self.log(
+ "Last chance to match {} on the window list,".format(regexp)
+ + " which currently contains: "
+ + ", ".join(names)
+ )
+ return any(pattern.search(name) for name in names)
+
+ with self.nested("Waiting for a window to appear"):
+ retry(window_is_visible)
+
+ def sleep(self, secs: int) -> None:
+ time.sleep(secs)
+
+ def forward_port(self, host_port: int = 8080, guest_port: int = 80) -> None:
+ """Forward a TCP port on the host to a TCP port on the guest.
+ Useful during interactive testing.
+ """
+ self.send_monitor_command(
+ "hostfwd_add tcp::{}-:{}".format(host_port, guest_port)
+ )
+
+ def block(self) -> None:
+ """Make the machine unreachable by shutting down eth1 (the multicast
+ interface used to talk to the other VMs). We keep eth0 up so that
+ the test driver can continue to talk to the machine.
+ """
+ self.send_monitor_command("set_link virtio-net-pci.1 off")
+
+ def unblock(self) -> None:
+ """Make the machine reachable.
+ """
+ self.send_monitor_command("set_link virtio-net-pci.1 on")
+
+
+def create_machine(args: Dict[str, Any]) -> Machine:
+ global log
+ args["log"] = log
+ args["redirectSerial"] = os.environ.get("USE_SERIAL", "0") == "1"
+ return Machine(args)
+
+
+def start_all() -> None:
+ global machines
+ with log.nested("starting all VMs"):
+ for machine in machines:
+ machine.start()
+
+
+def join_all() -> None:
+ global machines
+ with log.nested("waiting for all VMs to finish"):
+ for machine in machines:
+ machine.wait_for_shutdown()
+
+
+def test_script() -> None:
+ exec(os.environ["testScript"])
+
+
+def run_tests() -> None:
+ global machines
+ tests = os.environ.get("tests", None)
+ if tests is not None:
+ with log.nested("running the VM test script"):
+ try:
+ exec(tests, globals())
+ except Exception as e:
+ eprint("error: ")
+ traceback.print_exc()
+ sys.exit(1)
+ else:
+ ptpython.repl.embed(locals(), globals())
+
+ # TODO: Collect coverage data
+
+ for machine in machines:
+ if machine.is_up():
+ machine.execute("sync")
+
+
+@contextmanager
+def subtest(name: str) -> Iterator[None]:
+ with log.nested(name):
+ try:
+ yield
+ return True
+ except Exception as e:
+ log.log(f'Test "{name}" failed with error: "{e}"')
+ raise e
+
+ return False
+
+
+if __name__ == "__main__":
+ arg_parser = argparse.ArgumentParser()
+ arg_parser.add_argument(
+ "-K",
+ "--keep-vm-state",
+ help="re-use a VM state coming from a previous run",
+ action="store_true",
+ )
+ (cli_args, vm_scripts) = arg_parser.parse_known_args()
+
+ log = Logger()
+
+ vlan_nrs = list(dict.fromkeys(os.environ.get("VLANS", "").split()))
+ vde_sockets = [create_vlan(v) for v in vlan_nrs]
+ for nr, vde_socket, _, _ in vde_sockets:
+ os.environ["QEMU_VDE_SOCKET_{}".format(nr)] = vde_socket
+
+ machines = [create_machine({"startCommand": s}) for s in vm_scripts]
+ for machine in machines:
+ if not cli_args.keep_vm_state:
+ machine.cleanup_statedir()
+ machine_eval = [
+ "{0} = machines[{1}]".format(m.name, idx) for idx, m in enumerate(machines)
+ ]
+ exec("\n".join(machine_eval))
+
+ @atexit.register
+ def clean_up() -> None:
+ with log.nested("cleaning up"):
+ for machine in machines:
+ if machine.pid is None:
+ continue
+ log.log("killing {} (pid {})".format(machine.name, machine.pid))
+ machine.process.kill()
+ for _, _, process, _ in vde_sockets:
+ process.terminate()
+ log.close()
+
+ tic = time.time()
+ run_tests()
+ toc = time.time()
+ print("test script finished in {:.2f}s".format(toc - tic))