tstest/integration: build binaries only once

The existing code relied on the Go build cache to avoid
needless work when obtaining the tailscale binaries.

For non-obvious reasons, the binaries were getting re-linked
every time, which added 600ms or so on my machine to every test.

Instead, build the binaries exactly once, on demand.
This reduces the time to run 'go test -count=5' from 34s to 10s
on my machine.

Signed-off-by: Josh Bleecher Snyder <josh@tailscale.com>
pull/3566/head
Josh Bleecher Snyder 3 years ago committed by Josh Bleecher Snyder
parent e133bb570b
commit 4691e012a9

@ -43,38 +43,68 @@ import (
"tailscale.com/version" "tailscale.com/version"
) )
// Binaries are the paths to a tailscaled and tailscale binary. // CleanupBinaries cleans up any resources created by calls to BinaryDir, TailscaleBinary, or TailscaledBinary.
// These can be shared by multiple nodes. // It should be called from TestMain after all tests have completed.
type Binaries struct { func CleanupBinaries() {
Dir string // temp dir for tailscale & tailscaled buildOnce.Do(func() {})
Daemon string // tailscaled if binDir != "" {
CLI string // tailscale os.RemoveAll(binDir)
}
} }
// BuildTestBinaries builds tailscale and tailscaled, failing the test // BinaryDir returns a directory containing test tailscale and tailscaled binaries.
// if they fail to compile. // If any test calls BinaryDir, there must be a TestMain function that calls
func BuildTestBinaries(t testing.TB) *Binaries { // CleanupBinaries after all tests are complete.
td := t.TempDir() func BinaryDir(tb testing.TB) string {
build(t, td, "tailscale.com/cmd/tailscaled", "tailscale.com/cmd/tailscale") buildOnce.Do(func() {
return &Binaries{ binDir, buildErr = buildTestBinaries()
Dir: td, })
Daemon: filepath.Join(td, "tailscaled"+exe()), if buildErr != nil {
CLI: filepath.Join(td, "tailscale"+exe()), tb.Fatal(buildErr)
} }
return binDir
} }
// buildMu limits our use of "go build" to one at a time, so we don't // TailscaleBinary returns the path to the test tailscale binary.
// fight Go's built-in caching trying to do the same build concurrently. // If any test calls TailscaleBinary, there must be a TestMain function that calls
var buildMu sync.Mutex // CleanupBinaries after all tests are complete.
func TailscaleBinary(tb testing.TB) string {
return filepath.Join(BinaryDir(tb), "tailscale"+exe())
}
func build(t testing.TB, outDir string, targets ...string) { // TailscaledBinary returns the path to the test tailscaled binary.
buildMu.Lock() // If any test calls TailscaleBinary, there must be a TestMain function that calls
defer buildMu.Unlock() // CleanupBinaries after all tests are complete.
func TailscaledBinary(tb testing.TB) string {
return filepath.Join(BinaryDir(tb), "tailscaled"+exe())
}
t0 := time.Now() var (
defer func() { t.Logf("built %s in %v", targets, time.Since(t0).Round(time.Millisecond)) }() buildOnce sync.Once
buildErr error
binDir string
)
goBin := findGo(t) // buildTestBinaries builds tailscale and tailscaled.
// It returns the dir containing the binaries.
func buildTestBinaries() (string, error) {
bindir, err := ioutil.TempDir("", "")
if err != nil {
return "", err
}
err = build(bindir, "tailscale.com/cmd/tailscaled", "tailscale.com/cmd/tailscale")
if err != nil {
os.RemoveAll(bindir)
return "", err
}
return bindir, nil
}
func build(outDir string, targets ...string) error {
goBin, err := findGo()
if err != nil {
return err
}
cmd := exec.Command(goBin, "install") cmd := exec.Command(goBin, "install")
if version.IsRace() { if version.IsRace() {
cmd.Args = append(cmd.Args, "-race") cmd.Args = append(cmd.Args, "-race")
@ -83,7 +113,7 @@ func build(t testing.TB, outDir string, targets ...string) {
cmd.Env = append(os.Environ(), "GOARCH="+runtime.GOARCH, "GOBIN="+outDir) cmd.Env = append(os.Environ(), "GOARCH="+runtime.GOARCH, "GOBIN="+outDir)
errOut, err := cmd.CombinedOutput() errOut, err := cmd.CombinedOutput()
if err == nil { if err == nil {
return return nil
} }
if strings.Contains(string(errOut), "when GOBIN is set") { if strings.Contains(string(errOut), "when GOBIN is set") {
// Fallback slow path for cross-compiled binaries. // Fallback slow path for cross-compiled binaries.
@ -92,25 +122,25 @@ func build(t testing.TB, outDir string, targets ...string) {
cmd := exec.Command(goBin, "build", "-o", outFile, target) cmd := exec.Command(goBin, "build", "-o", outFile, target)
cmd.Env = append(os.Environ(), "GOARCH="+runtime.GOARCH) cmd.Env = append(os.Environ(), "GOARCH="+runtime.GOARCH)
if errOut, err := cmd.CombinedOutput(); err != nil { if errOut, err := cmd.CombinedOutput(); err != nil {
t.Fatalf("failed to build %v with %v: %v, %s", target, goBin, err, errOut) return fmt.Errorf("failed to build %v with %v: %v, %s", target, goBin, err, errOut)
} }
} }
return return nil
} }
t.Fatalf("failed to build %v with %v: %v, %s", targets, goBin, err, errOut) return fmt.Errorf("failed to build %v with %v: %v, %s", targets, goBin, err, errOut)
} }
func findGo(t testing.TB) string { func findGo() (string, error) {
goBin := filepath.Join(runtime.GOROOT(), "bin", "go"+exe()) goBin := filepath.Join(runtime.GOROOT(), "bin", "go"+exe())
if fi, err := os.Stat(goBin); err != nil { if fi, err := os.Stat(goBin); err != nil {
if os.IsNotExist(err) { if os.IsNotExist(err) {
t.Fatalf("failed to find go at %v", goBin) return "", fmt.Errorf("failed to find go at %v", goBin)
} }
t.Fatalf("looking for go binary: %v", err) return "", fmt.Errorf("looking for go binary: %v", err)
} else if !fi.Mode().IsRegular() { } else if !fi.Mode().IsRegular() {
t.Fatalf("%v is unexpected %v", goBin, fi.Mode()) return "", fmt.Errorf("%v is unexpected %v", goBin, fi.Mode())
} }
return goBin return goBin, nil
} }
func exe() string { func exe() string {

@ -52,6 +52,7 @@ func TestMain(m *testing.M) {
os.Setenv("TS_DISABLE_UPNP", "true") os.Setenv("TS_DISABLE_UPNP", "true")
flag.Parse() flag.Parse()
v := m.Run() v := m.Run()
CleanupBinaries()
if v != 0 { if v != 0 {
os.Exit(v) os.Exit(v)
} }
@ -64,9 +65,7 @@ func TestMain(m *testing.M) {
func TestOneNodeUpNoAuth(t *testing.T) { func TestOneNodeUpNoAuth(t *testing.T) {
t.Parallel() t.Parallel()
bins := BuildTestBinaries(t) env := newTestEnv(t)
env := newTestEnv(t, bins)
n1 := newTestNode(t, env) n1 := newTestNode(t, env)
d1 := n1.StartDaemon(t) d1 := n1.StartDaemon(t)
@ -83,9 +82,7 @@ func TestOneNodeUpNoAuth(t *testing.T) {
func TestOneNodeExpiredKey(t *testing.T) { func TestOneNodeExpiredKey(t *testing.T) {
t.Parallel() t.Parallel()
bins := BuildTestBinaries(t) env := newTestEnv(t)
env := newTestEnv(t, bins)
n1 := newTestNode(t, env) n1 := newTestNode(t, env)
d1 := n1.StartDaemon(t) d1 := n1.StartDaemon(t)
@ -121,12 +118,10 @@ func TestOneNodeExpiredKey(t *testing.T) {
func TestCollectPanic(t *testing.T) { func TestCollectPanic(t *testing.T) {
t.Parallel() t.Parallel()
bins := BuildTestBinaries(t) env := newTestEnv(t)
env := newTestEnv(t, bins)
n := newTestNode(t, env) n := newTestNode(t, env)
cmd := exec.Command(n.env.Binaries.Daemon, "--cleanup") cmd := exec.Command(env.daemon, "--cleanup")
cmd.Env = append(os.Environ(), cmd.Env = append(os.Environ(),
"TS_PLEASE_PANIC=1", "TS_PLEASE_PANIC=1",
"TS_LOG_TARGET="+n.env.LogCatcherServer.URL, "TS_LOG_TARGET="+n.env.LogCatcherServer.URL,
@ -135,7 +130,7 @@ func TestCollectPanic(t *testing.T) {
t.Logf("initial run: %s", got) t.Logf("initial run: %s", got)
// Now we run it again, and on start, it will upload the logs to logcatcher. // Now we run it again, and on start, it will upload the logs to logcatcher.
cmd = exec.Command(n.env.Binaries.Daemon, "--cleanup") cmd = exec.Command(env.daemon, "--cleanup")
cmd.Env = append(os.Environ(), "TS_LOG_TARGET="+n.env.LogCatcherServer.URL) cmd.Env = append(os.Environ(), "TS_LOG_TARGET="+n.env.LogCatcherServer.URL)
if out, err := cmd.CombinedOutput(); err != nil { if out, err := cmd.CombinedOutput(); err != nil {
t.Fatalf("cleanup failed: %v: %q", err, out) t.Fatalf("cleanup failed: %v: %q", err, out)
@ -154,9 +149,7 @@ func TestCollectPanic(t *testing.T) {
// test Issue 2321: Start with UpdatePrefs should save prefs to disk // test Issue 2321: Start with UpdatePrefs should save prefs to disk
func TestStateSavedOnStart(t *testing.T) { func TestStateSavedOnStart(t *testing.T) {
t.Parallel() t.Parallel()
bins := BuildTestBinaries(t) env := newTestEnv(t)
env := newTestEnv(t, bins)
n1 := newTestNode(t, env) n1 := newTestNode(t, env)
d1 := n1.StartDaemon(t) d1 := n1.StartDaemon(t)
@ -192,9 +185,7 @@ func TestStateSavedOnStart(t *testing.T) {
func TestOneNodeUpAuth(t *testing.T) { func TestOneNodeUpAuth(t *testing.T) {
t.Parallel() t.Parallel()
bins := BuildTestBinaries(t) env := newTestEnv(t, configureControl(func(control *testcontrol.Server) {
env := newTestEnv(t, bins, configureControl(func(control *testcontrol.Server) {
control.RequireAuth = true control.RequireAuth = true
})) }))
@ -237,9 +228,7 @@ func TestOneNodeUpAuth(t *testing.T) {
func TestTwoNodes(t *testing.T) { func TestTwoNodes(t *testing.T) {
t.Parallel() t.Parallel()
bins := BuildTestBinaries(t) env := newTestEnv(t)
env := newTestEnv(t, bins)
// Create two nodes: // Create two nodes:
n1 := newTestNode(t, env) n1 := newTestNode(t, env)
@ -285,9 +274,7 @@ func TestTwoNodes(t *testing.T) {
func TestNodeAddressIPFields(t *testing.T) { func TestNodeAddressIPFields(t *testing.T) {
t.Parallel() t.Parallel()
bins := BuildTestBinaries(t) env := newTestEnv(t)
env := newTestEnv(t, bins)
n1 := newTestNode(t, env) n1 := newTestNode(t, env)
d1 := n1.StartDaemon(t) d1 := n1.StartDaemon(t)
@ -313,9 +300,7 @@ func TestNodeAddressIPFields(t *testing.T) {
func TestAddPingRequest(t *testing.T) { func TestAddPingRequest(t *testing.T) {
t.Parallel() t.Parallel()
bins := BuildTestBinaries(t) env := newTestEnv(t)
env := newTestEnv(t, bins)
n1 := newTestNode(t, env) n1 := newTestNode(t, env)
n1.StartDaemon(t) n1.StartDaemon(t)
@ -369,9 +354,7 @@ func TestAddPingRequest(t *testing.T) {
// be connected to control. // be connected to control.
func TestNoControlConnWhenDown(t *testing.T) { func TestNoControlConnWhenDown(t *testing.T) {
t.Parallel() t.Parallel()
bins := BuildTestBinaries(t) env := newTestEnv(t)
env := newTestEnv(t, bins)
n1 := newTestNode(t, env) n1 := newTestNode(t, env)
d1 := n1.StartDaemon(t) d1 := n1.StartDaemon(t)
@ -412,9 +395,7 @@ func TestNoControlConnWhenDown(t *testing.T) {
// without the GUI to kick off a Start. // without the GUI to kick off a Start.
func TestOneNodeUpWindowsStyle(t *testing.T) { func TestOneNodeUpWindowsStyle(t *testing.T) {
t.Parallel() t.Parallel()
bins := BuildTestBinaries(t) env := newTestEnv(t)
env := newTestEnv(t, bins)
n1 := newTestNode(t, env) n1 := newTestNode(t, env)
n1.upFlagGOOS = "windows" n1.upFlagGOOS = "windows"
@ -432,7 +413,8 @@ func TestOneNodeUpWindowsStyle(t *testing.T) {
// or more nodes. // or more nodes.
type testEnv struct { type testEnv struct {
t testing.TB t testing.TB
Binaries *Binaries cli string
daemon string
LogCatcher *LogCatcher LogCatcher *LogCatcher
LogCatcherServer *httptest.Server LogCatcherServer *httptest.Server
@ -456,7 +438,7 @@ func (f configureControl) modifyTestEnv(te *testEnv) {
// newTestEnv starts a bunch of services and returns a new test environment. // newTestEnv starts a bunch of services and returns a new test environment.
// newTestEnv arranges for the environment's resources to be cleaned up on exit. // newTestEnv arranges for the environment's resources to be cleaned up on exit.
func newTestEnv(t testing.TB, bins *Binaries, opts ...testEnvOpt) *testEnv { func newTestEnv(t testing.TB, opts ...testEnvOpt) *testEnv {
if runtime.GOOS == "windows" { if runtime.GOOS == "windows" {
t.Skip("not tested/working on Windows yet") t.Skip("not tested/working on Windows yet")
} }
@ -469,7 +451,8 @@ func newTestEnv(t testing.TB, bins *Binaries, opts ...testEnvOpt) *testEnv {
trafficTrap := new(trafficTrap) trafficTrap := new(trafficTrap)
e := &testEnv{ e := &testEnv{
t: t, t: t,
Binaries: bins, cli: TailscaleBinary(t),
daemon: TailscaledBinary(t),
LogCatcher: logc, LogCatcher: logc,
LogCatcherServer: httptest.NewServer(logc), LogCatcherServer: httptest.NewServer(logc),
Control: control, Control: control,
@ -666,7 +649,7 @@ func (n *testNode) StartDaemon(t testing.TB) *Daemon {
} }
func (n *testNode) StartDaemonAsIPNGOOS(t testing.TB, ipnGOOS string) *Daemon { func (n *testNode) StartDaemonAsIPNGOOS(t testing.TB, ipnGOOS string) *Daemon {
cmd := exec.Command(n.env.Binaries.Daemon, cmd := exec.Command(n.env.daemon,
"--tun=userspace-networking", "--tun=userspace-networking",
"--state="+n.stateFile, "--state="+n.stateFile,
"--socket="+n.sockFile, "--socket="+n.sockFile,
@ -810,7 +793,7 @@ func (n *testNode) AwaitNeedsLogin(t testing.TB) {
// Tailscale returns a command that runs the tailscale CLI with the provided arguments. // Tailscale returns a command that runs the tailscale CLI with the provided arguments.
// It does not start the process. // It does not start the process.
func (n *testNode) Tailscale(arg ...string) *exec.Cmd { func (n *testNode) Tailscale(arg ...string) *exec.Cmd {
cmd := exec.Command(n.env.Binaries.CLI, "--socket="+n.sockFile) cmd := exec.Command(n.env.cli, "--socket="+n.sockFile)
cmd.Args = append(cmd.Args, arg...) cmd.Args = append(cmd.Args, arg...)
cmd.Dir = n.dir cmd.Dir = n.dir
cmd.Env = append(os.Environ(), cmd.Env = append(os.Environ(),

@ -35,7 +35,9 @@ import (
type Harness struct { type Harness struct {
testerDialer proxy.Dialer testerDialer proxy.Dialer
testerDir string testerDir string
bins *integration.Binaries binaryDir string
cli string
daemon string
pubKey string pubKey string
signer ssh.Signer signer ssh.Signer
cs *testcontrol.Server cs *testcontrol.Server
@ -134,11 +136,11 @@ func newHarness(t *testing.T) *Harness {
loginServer := fmt.Sprintf("http://%s", ln.Addr()) loginServer := fmt.Sprintf("http://%s", ln.Addr())
t.Logf("loginServer: %s", loginServer) t.Logf("loginServer: %s", loginServer)
bins := integration.BuildTestBinaries(t)
h := &Harness{ h := &Harness{
pubKey: string(pubkey), pubKey: string(pubkey),
bins: bins, binaryDir: integration.BinaryDir(t),
cli: integration.TailscaleBinary(t),
daemon: integration.TailscaledBinary(t),
signer: signer, signer: signer,
loginServerURL: loginServer, loginServerURL: loginServer,
cs: cs, cs: cs,
@ -146,7 +148,7 @@ func newHarness(t *testing.T) *Harness {
ipMap: ipMap, ipMap: ipMap,
} }
h.makeTestNode(t, bins, loginServer) h.makeTestNode(t, loginServer)
return h return h
} }
@ -156,7 +158,7 @@ func (h *Harness) Tailscale(t *testing.T, args ...string) []byte {
args = append([]string{"--socket=" + filepath.Join(h.testerDir, "sock")}, args...) args = append([]string{"--socket=" + filepath.Join(h.testerDir, "sock")}, args...)
cmd := exec.Command(h.bins.CLI, args...) cmd := exec.Command(h.cli, args...)
out, err := cmd.CombinedOutput() out, err := cmd.CombinedOutput()
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@ -169,7 +171,7 @@ func (h *Harness) Tailscale(t *testing.T, args ...string) []byte {
// enables us to make connections to and from the tailscale network being // enables us to make connections to and from the tailscale network being
// tested. This mutates the Harness to allow tests to dial into the tailscale // tested. This mutates the Harness to allow tests to dial into the tailscale
// network as well as control the tester's tailscaled. // network as well as control the tester's tailscaled.
func (h *Harness) makeTestNode(t *testing.T, bins *integration.Binaries, controlURL string) { func (h *Harness) makeTestNode(t *testing.T, controlURL string) {
dir := t.TempDir() dir := t.TempDir()
h.testerDir = dir h.testerDir = dir
@ -179,7 +181,7 @@ func (h *Harness) makeTestNode(t *testing.T, bins *integration.Binaries, control
} }
cmd := exec.Command( cmd := exec.Command(
bins.Daemon, h.daemon,
"--tun=userspace-networking", "--tun=userspace-networking",
"--state="+filepath.Join(dir, "state.json"), "--state="+filepath.Join(dir, "state.json"),
"--socket="+filepath.Join(dir, "sock"), "--socket="+filepath.Join(dir, "sock"),
@ -222,7 +224,7 @@ outer:
} }
} }
run(t, dir, bins.CLI, run(t, dir, h.cli,
"--socket="+filepath.Join(dir, "sock"), "--socket="+filepath.Join(dir, "sock"),
"up", "up",
"--login-server="+controlURL, "--login-server="+controlURL,

@ -17,7 +17,6 @@ import (
"testing" "testing"
"text/template" "text/template"
"tailscale.com/tstest/integration"
"tailscale.com/types/logger" "tailscale.com/types/logger"
) )
@ -153,15 +152,15 @@ in {
systemd.services.tailscaled.environment."TS_LOG_TARGET" = "{{.LogTarget}}"; systemd.services.tailscaled.environment."TS_LOG_TARGET" = "{{.LogTarget}}";
}` }`
func copyUnit(t *testing.T, bins *integration.Binaries) { func (h *Harness) copyUnit(t *testing.T) {
t.Helper() t.Helper()
data, err := os.ReadFile("../../../cmd/tailscaled/tailscaled.service") data, err := os.ReadFile("../../../cmd/tailscaled/tailscaled.service")
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
os.MkdirAll(filepath.Join(bins.Dir, "systemd"), 0755) os.MkdirAll(filepath.Join(h.binaryDir, "systemd"), 0755)
err = os.WriteFile(filepath.Join(bins.Dir, "systemd", "tailscaled.service"), data, 0666) err = os.WriteFile(filepath.Join(h.binaryDir, "systemd", "tailscaled.service"), data, 0666)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -172,7 +171,7 @@ func (h *Harness) makeNixOSImage(t *testing.T, d Distro, cdir string) string {
t.Skip("https://github.com/NixOS/nixpkgs/issues/131098") t.Skip("https://github.com/NixOS/nixpkgs/issues/131098")
} }
copyUnit(t, h.bins) h.copyUnit(t)
dir := t.TempDir() dir := t.TempDir()
fname := filepath.Join(dir, d.Name+".nix") fname := filepath.Join(dir, d.Name+".nix")
fout, err := os.Create(fname) fout, err := os.Create(fname)
@ -185,7 +184,7 @@ func (h *Harness) makeNixOSImage(t *testing.T, d Distro, cdir string) string {
BinPath string BinPath string
LogTarget string LogTarget string
}{ }{
BinPath: h.bins.Dir, BinPath: h.binaryDir,
LogTarget: h.loginServerURL, LogTarget: h.loginServerURL,
}) })
if err != nil { if err != nil {

@ -290,7 +290,6 @@ func checkCachedImageHash(t *testing.T, d Distro, cacheDir string) string {
} }
func (h *Harness) copyBinaries(t *testing.T, d Distro, conn *ssh.Client) { func (h *Harness) copyBinaries(t *testing.T, d Distro, conn *ssh.Client) {
bins := h.bins
if strings.HasPrefix(d.Name, "nixos") { if strings.HasPrefix(d.Name, "nixos") {
return return
} }
@ -305,8 +304,8 @@ func (h *Harness) copyBinaries(t *testing.T, d Distro, conn *ssh.Client) {
mkdir(t, cli, "/etc/default") mkdir(t, cli, "/etc/default")
mkdir(t, cli, "/var/lib/tailscale") mkdir(t, cli, "/var/lib/tailscale")
copyFile(t, cli, bins.Daemon, "/usr/sbin/tailscaled") copyFile(t, cli, h.daemon, "/usr/sbin/tailscaled")
copyFile(t, cli, bins.CLI, "/usr/bin/tailscale") copyFile(t, cli, h.cli, "/usr/bin/tailscale")
// TODO(Xe): revisit this assumption before it breaks the test. // TODO(Xe): revisit this assumption before it breaks the test.
copyFile(t, cli, "../../../cmd/tailscaled/tailscaled.defaults", "/etc/default/tailscaled") copyFile(t, cli, "../../../cmd/tailscaled/tailscaled.defaults", "/etc/default/tailscaled")

@ -30,6 +30,7 @@ import (
"golang.org/x/sync/semaphore" "golang.org/x/sync/semaphore"
"inet.af/netaddr" "inet.af/netaddr"
"tailscale.com/tstest" "tailscale.com/tstest"
"tailscale.com/tstest/integration"
"tailscale.com/types/logger" "tailscale.com/types/logger"
) )
@ -52,6 +53,13 @@ var (
}() }()
) )
func TestMain(m *testing.M) {
flag.Parse()
v := m.Run()
integration.CleanupBinaries()
os.Exit(v)
}
func TestDownloadImages(t *testing.T) { func TestDownloadImages(t *testing.T) {
if !*runVMTests { if !*runVMTests {
t.Skip("not running integration tests (need --run-vm-tests)") t.Skip("not running integration tests (need --run-vm-tests)")

Loading…
Cancel
Save