tstest/integration/vms: build and run NixOS (#2190)

Okay, so, at a high level testing NixOS is a lot different than
other distros due to NixOS' determinism. Normally NixOS wants packages to
be defined in either an overlay, a custom packageOverrides or even
yolo-inline as a part of the system configuration. This is going to have
us take a different approach compared to other distributions. The overall
plan here is as following:

1. make the binaries as normal
2. template in their paths as raw strings to the nixos system module
3. run `nixos-generators -f qcow -o $CACHE_DIR/tailscale/nixos/version -c generated-config.nix`
4. pass that to the steps that make the virtual machine

It doesn't really make sense for us to use a premade virtual machine image
for this as that will make it harder to deterministically create the image.

Nix commands generate a lot of output, so their output is hidden behind the
`-verbose-nix-output` flag.

This unfortunately makes this test suite have a hard dependency on
Nix/NixOS, however the test suite has only ever been run on NixOS (and I
am not sure if it runs on other distros at all), so this probably isn't too
big of an issue.

Signed-off-by: Christine Dodrill <xe@tailscale.com>
pull/2267/head
Christine Dodrill 3 years ago committed by GitHub
parent 72a0b5f042
commit b131a74f99
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -29,7 +29,7 @@ jobs:
XDG_CACHE_HOME: "/var/lib/ghrunner/cache"
- name: Run VM tests
run: go test ./tstest/integration/vms -v -run-vm-tests
run: go test ./tstest/integration/vms -run-vm-tests
env:
TMPDIR: "/tmp"
XDG_CACHE_HOME: "/var/lib/ghrunner/cache"

@ -0,0 +1,206 @@
// Copyright (c) 2021 Tailscale Inc & AUTHORS All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux
package vms
import (
"flag"
"os"
"os/exec"
"path/filepath"
"testing"
"text/template"
"tailscale.com/tstest/integration"
"tailscale.com/types/logger"
)
var (
verboseNixOutput = flag.Bool("verbose-nix-output", false, "if set, use verbose nix output (lots of noise)")
)
/*
NOTE(Xe): Okay, so, at a high level testing NixOS is a lot different than
other distros due to NixOS' determinism. Normally NixOS wants packages to
be defined in either an overlay, a custom packageOverrides or even
yolo-inline as a part of the system configuration. This is going to have
us take a different approach compared to other distributions. The overall
plan here is as following:
1. make the binaries as normal
2. template in their paths as raw strings to the nixos system module
3. run `nixos-generators -f qcow -o $CACHE_DIR/tailscale/nixos/version -c generated-config.nix`
4. pass that to the steps that make the virtual machine
It doesn't really make sense for us to use a premade virtual machine image
for this as that will make it harder to deterministically create the image.
*/
const nixosConfigTemplate = `
# NOTE(Xe): This template is going to be heavily commented.
# All NixOS modules are functions. Here is the function prelude for this NixOS
# module that defines the system. It is a function that takes in an attribute
# set (effectively a map[string]nix.Value) and destructures it to some variables:
{
# other NixOS settings as defined in other modules
config,
# nixpkgs, which is basically the standard library of NixOS
pkgs,
# the path to some system-scoped NixOS modules that aren't imported by default
modulesPath,
# the rest of the arguments don't matter
...
}:
# Nix's syntax was inspired by Haskell and other functional languages, so the
# let .. in pattern is used to create scoped variables:
let
# Define the package (derivation) for Tailscale based on the binaries we
# just built for this test:
testTailscale = pkgs.stdenv.mkDerivation {
# The name of the package. This usually includes a version however it
# doesn't matter here.
name = "tailscale-test";
# The path on disk to the "source code" of the package, in this case it is
# the path to the binaries that are built. This needs to be the raw
# unquoted slash-separated path, not a string contaning the path because Nix
# has a special path type.
src = {{.BinPath}};
# We only need to worry about the install phase because we've already
# built the binaries.
phases = "installPhase";
# We need to wrap tailscaled such that it has iptables in its $PATH.
nativeBuildInputs = [ pkgs.makeWrapper ];
# The install instructions for this package ('' ''defines a multi-line string).
# The with statement lets us bring in values into scope as if they were
# defined in the current scope.
installPhase = with pkgs; ''
# This is bash.
# Make the output folders for the package (systemd unit and binary folders).
mkdir -p $out/bin
# Install tailscale{,d}
cp $src/tailscale $out/bin/tailscale
cp $src/tailscaled $out/bin/tailscaled
# Wrap tailscaled with the ip and iptables commands.
wrapProgram $out/bin/tailscaled --prefix PATH : ${
lib.makeBinPath [ iproute iptables ]
}
# Install systemd unit.
cp $src/systemd/tailscaled.service .
sed -i -e "s#/usr/sbin#$out/bin#" -e "/^EnvironmentFile/d" ./tailscaled.service
install -D -m0444 -t $out/lib/systemd/system ./tailscaled.service
'';
};
in {
# This is a QEMU VM. This module has a lot of common qemu VM settings so you
# don't have to set them manually.
imports = [ (modulesPath + "/profiles/qemu-guest.nix") ];
# We need virtio support to boot.
boot.initrd.availableKernelModules =
[ "ata_piix" "uhci_hcd" "virtio_pci" "sr_mod" "virtio_blk" ];
boot.initrd.kernelModules = [ ];
boot.kernelModules = [ ];
boot.extraModulePackages = [ ];
# Curl is needed for one of the steps in cloud-final
systemd.services.cloud-final.path = [ pkgs.curl ];
# yolo, this vm can sudo freely.
security.sudo.wheelNeedsPassword = false;
# Enable cloud-init so we can set VM hostnames and the like the same as other
# distros. This will also take care of SSH keys. It's pretty handy.
services.cloud-init = {
enable = true;
ext4.enable = true;
};
# We want sshd running.
services.openssh.enable = true;
# Tailscale settings:
services.tailscale = {
# We want Tailscale to start at boot.
enable = true;
# Use the Tailscale package we just assembled.
package = testTailscale;
};
}`
func copyUnit(t *testing.T, bins *integration.Binaries) {
t.Helper()
data, err := os.ReadFile("../../../cmd/tailscaled/tailscaled.service")
if err != nil {
t.Fatal(err)
}
os.MkdirAll(filepath.Join(bins.Dir, "systemd"), 0755)
err = os.WriteFile(filepath.Join(bins.Dir, "systemd", "tailscaled.service"), data, 0666)
if err != nil {
t.Fatal(err)
}
}
func makeNixOSImage(t *testing.T, d Distro, cdir string, bins *integration.Binaries) string {
copyUnit(t, bins)
dir := t.TempDir()
fname := filepath.Join(dir, d.name+".nix")
fout, err := os.Create(fname)
if err != nil {
t.Fatal(err)
}
tmpl := template.Must(template.New("base.nix").Parse(nixosConfigTemplate))
err = tmpl.Execute(fout, struct{ BinPath string }{BinPath: bins.Dir})
if err != nil {
t.Fatal(err)
}
err = fout.Close()
if err != nil {
t.Fatal(err)
}
outpath := filepath.Join(cdir, "nixos")
os.MkdirAll(outpath, 0755)
t.Cleanup(func() {
os.RemoveAll(filepath.Join(outpath, d.name)) // makes the disk image a candidate for GC
})
cmd := exec.Command("nixos-generate", "-f", "qcow", "-o", filepath.Join(outpath, d.name), "-c", fname)
if *verboseNixOutput {
cmd.Stdout = logger.FuncWriter(t.Logf)
cmd.Stderr = logger.FuncWriter(t.Logf)
} else {
t.Log("building nixos image...")
}
cmd.Env = append(os.Environ(), "NIX_PATH=nixpkgs="+d.url)
cmd.Dir = outpath
if err := cmd.Run(); err != nil {
t.Fatalf("error while making NixOS image for %s: %v", d.name, err)
}
if !*verboseNixOutput {
t.Log("done")
}
return filepath.Join(outpath, d.name, "nixos.qcow2")
}

@ -28,6 +28,18 @@
# The C complier so cgo builds work.
gcc
# The package manager Nix, just in case.
nix
# Used to generate a NixOS image for testing.
nixos-generators
# Used to extract things.
gnutar
# Used to decompress things.
lzma
];
# Customize this to include your GitHub username so we can track

@ -100,6 +100,8 @@ func TestDownloadImages(t *testing.T) {
t.Skip("not running integration tests (need --run-vm-tests)")
}
bins := integration.BuildTestBinaries(t)
for _, d := range distros {
distro := d
t.Run(distro.name, func(t *testing.T) {
@ -107,9 +109,13 @@ func TestDownloadImages(t *testing.T) {
t.Skipf("distro name %q doesn't match regex: %s", distro.name, distroRex)
}
if strings.HasPrefix(distro.name, "nixos") {
t.Skip("NixOS is built on the fly, no need to download it")
}
t.Parallel()
fetchDistro(t, distro)
fetchDistro(t, distro, bins)
})
}
}
@ -158,6 +164,14 @@ var distros = []Distro{
{"ubuntu-20-04", "https://cloud-images.ubuntu.com/focal/20210603/focal-server-cloudimg-amd64.img", "1c0969323b058ba8b91fec245527069c2f0502fc119b9138b213b6bfebd965cb", 512, "apt", "systemd"},
{"ubuntu-20-10", "https://cloud-images.ubuntu.com/groovy/20210604/groovy-server-cloudimg-amd64.img", "2196df5f153faf96443e5502bfdbcaa0baaefbaec614348fec344a241855b0ef", 512, "apt", "systemd"},
{"ubuntu-21-04", "https://cloud-images.ubuntu.com/hirsute/20210603/hirsute-server-cloudimg-amd64.img", "bf07f36fc99ff521d3426e7d257e28f0c81feebc9780b0c4f4e25ae594ff4d3b", 512, "apt", "systemd"},
// NOTE(Xe): We build fresh NixOS images for every test run, so the URL being
// used here is actually the URL of the NixOS channel being built from and the
// shasum is meaningless. This `channel:name` syntax is documented at [1].
//
// [1]: https://nixos.org/manual/nix/unstable/command-ref/env-common.html
{"nixos-unstable", "channel:nixos-unstable", "lolfakesha", 512, "nix", "systemd"},
{"nixos-21-05", "channel:nixos-21.05", "lolfakesha", 512, "nix", "systemd"},
}
// fetchFromS3 fetches a distribution image from Amazon S3 or reports whether
@ -212,7 +226,7 @@ func fetchFromS3(t *testing.T, fout *os.File, d Distro) bool {
// fetchDistro fetches a distribution from the internet if it doesn't already exist locally. It
// also validates the sha256 sum from a known good hash.
func fetchDistro(t *testing.T, resultDistro Distro) {
func fetchDistro(t *testing.T, resultDistro Distro, bins *integration.Binaries) string {
t.Helper()
cdir, err := os.UserCacheDir()
@ -221,6 +235,10 @@ func fetchDistro(t *testing.T, resultDistro Distro) {
}
cdir = filepath.Join(cdir, "tailscale", "vm-test")
if strings.HasPrefix(resultDistro.name, "nixos") {
return makeNixOSImage(t, resultDistro, cdir, bins)
}
qcowPath := filepath.Join(cdir, "qcow2", resultDistro.sha256sum)
_, err = os.Stat(qcowPath)
@ -267,6 +285,8 @@ func fetchDistro(t *testing.T, resultDistro Distro) {
}
}
}
return qcowPath
}
func checkCachedImageHash(t *testing.T, d Distro, cacheDir string) (gotHash string) {
@ -311,18 +331,12 @@ func run(t *testing.T, dir, prog string, args ...string) {
// mkLayeredQcow makes a layered qcow image that allows us to keep the upstream
// VM images pristine and only do our changes on an overlay.
func mkLayeredQcow(t *testing.T, tdir string, d Distro) {
func mkLayeredQcow(t *testing.T, tdir string, d Distro, qcowBase string) {
t.Helper()
cdir, err := os.UserCacheDir()
if err != nil {
t.Fatalf("can't find cache dir: %v", err)
}
cdir = filepath.Join(cdir, "tailscale", "vm-test")
run(t, tdir, "qemu-img", "create",
"-f", "qcow2",
"-o", "backing_file="+filepath.Join(cdir, "qcow2", d.sha256sum),
"-o", "backing_file="+qcowBase,
filepath.Join(tdir, d.name+".qcow2"),
)
}
@ -413,7 +427,7 @@ func mkSeed(t *testing.T, d Distro, sshKey, hostURL, tdir string, port int) {
// mkVM makes a KVM-accelerated virtual machine and prepares it for introduction
// to the testcontrol server. The function it returns is for killing the virtual
// machine when it is time for it to die.
func mkVM(t *testing.T, n int, d Distro, sshKey, hostURL, tdir string) {
func mkVM(t *testing.T, n int, d Distro, sshKey, hostURL, tdir string, bins *integration.Binaries) {
t.Helper()
cdir, err := os.UserCacheDir()
@ -428,8 +442,7 @@ func mkVM(t *testing.T, n int, d Distro, sshKey, hostURL, tdir string) {
t.Fatal(err)
}
fetchDistro(t, d)
mkLayeredQcow(t, tdir, d)
mkLayeredQcow(t, tdir, d, fetchDistro(t, d, bins))
mkSeed(t, d, sshKey, hostURL, tdir, port)
driveArg := fmt.Sprintf("file=%s,if=virtio", filepath.Join(tdir, d.name+".qcow2"))
@ -634,7 +647,7 @@ func TestVMIntegrationEndToEnd(t *testing.T) {
}
defer ramsem.Release(int64(distro.mem))
mkVM(t, n, distro, string(pubkey), loginServer, dir)
mkVM(t, n, distro, string(pubkey), loginServer, dir, bins)
var ipm ipMapping
t.Run("wait-for-start", func(t *testing.T) {
@ -747,6 +760,10 @@ func testDistro(t *testing.T, loginServer string, d Distro, signer ssh.Signer, i
}
func copyBinaries(t *testing.T, d Distro, conn *ssh.Client, bins *integration.Binaries) {
if strings.HasPrefix(d.name, "nixos") {
return
}
cli, err := sftp.NewClient(conn)
if err != nil {
t.Fatalf("can't connect over sftp to copy binaries: %v", err)

Loading…
Cancel
Save