Merge branch 'main' into claude/analyze-test-coverage-01BnJsiXhLinMJyRe78R29e9

Signed-off-by: Ofer Erez <ofer43211@users.noreply.github.com>
pull/17963/head
Ofer Erez 2 weeks ago committed by GitHub
commit d6f61e54b8
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -0,0 +1,17 @@
PRs welcome! But please file bugs first and explain the problem or
motivation. For new or changed functionality, strike up a discussion
and get agreement on the design/solution before spending too much time writing
code.
Commit messages should [reference
bugs](https://docs.github.com/en/github/writing-on-github/autolinked-references-and-urls).
We require [Developer Certificate of
Origin](https://en.wikipedia.org/wiki/Developer_Certificate_of_Origin) (DCO)
`Signed-off-by` lines in commits. (`git commit -s`)
Please squash your code review edits & force push. Multiple commits in
a PR are fine, but only if they're each logically separate and all tests pass
at each stage. No fixup commits.
See [commit-messages.md](docs/commit-messages.md) (or skim `git log`) for our commit message style.

@ -10,7 +10,7 @@ on:
- '.github/workflows/checklocks.yml' - '.github/workflows/checklocks.yml'
concurrency: concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }} group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
@ -18,7 +18,7 @@ jobs:
runs-on: [ ubuntu-latest ] runs-on: [ ubuntu-latest ]
steps: steps:
- name: Check out code - name: Check out code
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Build checklocks - name: Build checklocks
run: ./tool/go build -o /tmp/checklocks gvisor.dev/gvisor/tools/checklocks/cmd/checklocks run: ./tool/go build -o /tmp/checklocks gvisor.dev/gvisor/tools/checklocks/cmd/checklocks

@ -23,7 +23,7 @@ on:
- cron: '31 14 * * 5' - cron: '31 14 * * 5'
concurrency: concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }} group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
@ -45,17 +45,17 @@ jobs:
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
# Install a more recent Go that understands modern go.mod content. # Install a more recent Go that understands modern go.mod content.
- name: Install Go - name: Install Go
uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
with: with:
go-version-file: go.mod go-version-file: go.mod
# Initializes the CodeQL tools for scanning. # Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL - name: Initialize CodeQL
uses: github/codeql-action/init@461ef6c76dfe95d5c364de2f431ddbd31a417628 # v3.26.9 uses: github/codeql-action/init@76621b61decf072c1cee8dd1ce2d2a82d33c17ed # v3.29.5
with: with:
languages: ${{ matrix.language }} languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file. # If you wish to specify custom queries, you can do so here or in a config file.
@ -66,7 +66,7 @@ jobs:
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below) # If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild - name: Autobuild
uses: github/codeql-action/autobuild@461ef6c76dfe95d5c364de2f431ddbd31a417628 # v3.26.9 uses: github/codeql-action/autobuild@76621b61decf072c1cee8dd1ce2d2a82d33c17ed # v3.29.5
# Command-line programs to run using the OS shell. # Command-line programs to run using the OS shell.
# 📚 https://git.io/JvXDl # 📚 https://git.io/JvXDl
@ -80,4 +80,4 @@ jobs:
# make release # make release
- name: Perform CodeQL Analysis - name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@461ef6c76dfe95d5c364de2f431ddbd31a417628 # v3.26.9 uses: github/codeql-action/analyze@76621b61decf072c1cee8dd1ce2d2a82d33c17ed # v3.29.5

@ -4,12 +4,10 @@ on:
branches: branches:
- main - main
pull_request: pull_request:
branches:
- "*"
jobs: jobs:
deploy: deploy:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: "Build Docker image" - name: "Build Docker image"
run: docker build . run: docker build .

@ -17,11 +17,11 @@ jobs:
id-token: "write" id-token: "write"
contents: "read" contents: "read"
steps: steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with: with:
ref: "${{ (inputs.tag != null) && format('refs/tags/{0}', inputs.tag) || '' }}" ref: "${{ (inputs.tag != null) && format('refs/tags/{0}', inputs.tag) || '' }}"
- uses: "DeterminateSystems/nix-installer-action@main" - uses: DeterminateSystems/nix-installer-action@786fff0690178f1234e4e1fe9b536e94f5433196 # v20
- uses: "DeterminateSystems/flakehub-push@main" - uses: DeterminateSystems/flakehub-push@71f57208810a5d299fc6545350981de98fdbc860 # v6
with: with:
visibility: "public" visibility: "public"
tag: "${{ inputs.tag }}" tag: "${{ inputs.tag }}"

@ -15,7 +15,7 @@ permissions:
pull-requests: read pull-requests: read
concurrency: concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }} group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
@ -23,18 +23,17 @@ jobs:
name: lint name: lint
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
with: with:
go-version-file: go.mod go-version-file: go.mod
cache: false cache: false
- name: golangci-lint - name: golangci-lint
# Note: this is the 'v6.1.0' tag as of 2024-08-21 uses: golangci/golangci-lint-action@1481404843c368bc19ca9406f87d6e0fc97bdcfd # v7.0.0
uses: golangci/golangci-lint-action@aaa42aa0628b4ae2578232a66b541047968fac86
with: with:
version: v1.60 version: v2.4.0
# Show only new issues if it's a pull request. # Show only new issues if it's a pull request.
only-new-issues: true only-new-issues: true

@ -14,7 +14,7 @@ jobs:
steps: steps:
- name: Check out code into the Go module directory - name: Check out code into the Go module directory
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Install govulncheck - name: Install govulncheck
run: ./tool/go install golang.org/x/vuln/cmd/govulncheck@latest run: ./tool/go install golang.org/x/vuln/cmd/govulncheck@latest
@ -24,13 +24,13 @@ jobs:
- name: Post to slack - name: Post to slack
if: failure() && github.event_name == 'schedule' if: failure() && github.event_name == 'schedule'
uses: slackapi/slack-github-action@37ebaef184d7626c5f204ab8d3baff4262dd30f0 # v1.27.0 uses: slackapi/slack-github-action@91efab103c0de0a537f72a35f6b8cda0ee76bf0a # v2.1.1
env:
SLACK_BOT_TOKEN: ${{ secrets.GOVULNCHECK_BOT_TOKEN }}
with: with:
channel-id: 'C05PXRM304B' method: chat.postMessage
token: ${{ secrets.GOVULNCHECK_BOT_TOKEN }}
payload: | payload: |
{ {
"channel": "C08FGKZCQTW",
"blocks": [ "blocks": [
{ {
"type": "section", "type": "section",

@ -1,16 +1,18 @@
name: test installer.sh name: test installer.sh
on: on:
schedule:
- cron: '0 15 * * *' # 10am EST (UTC-4/5)
push: push:
branches: branches:
- "main" - "main"
paths: paths:
- scripts/installer.sh - scripts/installer.sh
- .github/workflows/installer.yml
pull_request: pull_request:
branches:
- "*"
paths: paths:
- scripts/installer.sh - scripts/installer.sh
- .github/workflows/installer.yml
jobs: jobs:
test: test:
@ -29,13 +31,11 @@ jobs:
- "debian:stable-slim" - "debian:stable-slim"
- "debian:testing-slim" - "debian:testing-slim"
- "debian:sid-slim" - "debian:sid-slim"
- "ubuntu:18.04"
- "ubuntu:20.04" - "ubuntu:20.04"
- "ubuntu:22.04" - "ubuntu:22.04"
- "ubuntu:23.04" - "ubuntu:24.04"
- "elementary/docker:stable" - "elementary/docker:stable"
- "elementary/docker:unstable" - "elementary/docker:unstable"
- "parrotsec/core:lts-amd64"
- "parrotsec/core:latest" - "parrotsec/core:latest"
- "kalilinux/kali-rolling" - "kalilinux/kali-rolling"
- "kalilinux/kali-dev" - "kalilinux/kali-dev"
@ -48,7 +48,7 @@ jobs:
- "opensuse/leap:latest" - "opensuse/leap:latest"
- "opensuse/tumbleweed:latest" - "opensuse/tumbleweed:latest"
- "archlinux:latest" - "archlinux:latest"
- "alpine:3.14" - "alpine:3.21"
- "alpine:latest" - "alpine:latest"
- "alpine:edge" - "alpine:edge"
deps: deps:
@ -58,10 +58,6 @@ jobs:
# Check a few images with wget rather than curl. # Check a few images with wget rather than curl.
- { image: "debian:oldstable-slim", deps: "wget" } - { image: "debian:oldstable-slim", deps: "wget" }
- { image: "debian:sid-slim", deps: "wget" } - { image: "debian:sid-slim", deps: "wget" }
- { image: "ubuntu:23.04", deps: "wget" }
# Ubuntu 16.04 also needs apt-transport-https installed.
- { image: "ubuntu:16.04", deps: "curl apt-transport-https" }
- { image: "ubuntu:16.04", deps: "wget apt-transport-https" }
runs-on: ubuntu-latest runs-on: ubuntu-latest
container: container:
image: ${{ matrix.image }} image: ${{ matrix.image }}
@ -76,10 +72,10 @@ jobs:
# tar and gzip are needed by the actions/checkout below. # tar and gzip are needed by the actions/checkout below.
run: yum install -y --allowerasing tar gzip ${{ matrix.deps }} run: yum install -y --allowerasing tar gzip ${{ matrix.deps }}
if: | if: |
contains(matrix.image, 'centos') contains(matrix.image, 'centos') ||
|| contains(matrix.image, 'oraclelinux') contains(matrix.image, 'oraclelinux') ||
|| contains(matrix.image, 'fedora') contains(matrix.image, 'fedora') ||
|| contains(matrix.image, 'amazonlinux') contains(matrix.image, 'amazonlinux')
- name: install dependencies (zypper) - name: install dependencies (zypper)
# tar and gzip are needed by the actions/checkout below. # tar and gzip are needed by the actions/checkout below.
run: zypper --non-interactive install tar gzip ${{ matrix.deps }} run: zypper --non-interactive install tar gzip ${{ matrix.deps }}
@ -89,16 +85,13 @@ jobs:
apt-get update apt-get update
apt-get install -y ${{ matrix.deps }} apt-get install -y ${{ matrix.deps }}
if: | if: |
contains(matrix.image, 'debian') contains(matrix.image, 'debian') ||
|| contains(matrix.image, 'ubuntu') contains(matrix.image, 'ubuntu') ||
|| contains(matrix.image, 'elementary') contains(matrix.image, 'elementary') ||
|| contains(matrix.image, 'parrotsec') contains(matrix.image, 'parrotsec') ||
|| contains(matrix.image, 'kalilinux') contains(matrix.image, 'kalilinux')
- name: checkout - name: checkout
# We cannot use v4, as it requires a newer glibc version than some of the uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
# tested images provide. See
# https://github.com/actions/checkout/issues/1487
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0
- name: run installer - name: run installer
run: scripts/installer.sh run: scripts/installer.sh
# Package installation can fail in docker because systemd is not running # Package installation can fail in docker because systemd is not running
@ -107,3 +100,30 @@ jobs:
continue-on-error: true continue-on-error: true
- name: check tailscale version - name: check tailscale version
run: tailscale --version run: tailscale --version
notify-slack:
needs: test
runs-on: ubuntu-latest
steps:
- name: Notify Slack of failure on scheduled runs
if: failure() && github.event_name == 'schedule'
uses: slackapi/slack-github-action@91efab103c0de0a537f72a35f6b8cda0ee76bf0a # v2.1.1
with:
webhook: ${{ secrets.SLACK_WEBHOOK_URL }}
webhook-type: incoming-webhook
payload: |
{
"attachments": [{
"title": "Tailscale installer test failed",
"title_link": "https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}",
"text": "One or more OSes in the test matrix failed. See the run for details.",
"fields": [
{
"title": "Ref",
"value": "${{ github.ref_name }}",
"short": true
}
],
"footer": "${{ github.workflow }} on schedule",
"color": "danger"
}]
}

@ -9,7 +9,7 @@ on:
# Cancel workflow run if there is a newer push to the same PR for which it is # Cancel workflow run if there is a newer push to the same PR for which it is
# running # running
concurrency: concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }} group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
@ -17,7 +17,7 @@ jobs:
runs-on: [ ubuntu-latest ] runs-on: [ ubuntu-latest ]
steps: steps:
- name: Check out code - name: Check out code
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Build and lint Helm chart - name: Build and lint Helm chart
run: | run: |
eval `./tool/go run ./cmd/mkversion` eval `./tool/go run ./cmd/mkversion`

@ -0,0 +1,27 @@
# Run some natlab integration tests.
# See https://github.com/tailscale/tailscale/issues/13038
name: "natlab-integrationtest"
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
on:
pull_request:
paths:
- "tstest/integration/nat/nat_test.go"
jobs:
natlab-integrationtest:
runs-on: ubuntu-latest
steps:
- name: Check out code
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Install qemu
run: |
sudo rm /var/lib/man-db/auto-update
sudo apt-get -y update
sudo apt-get -y remove man-db
sudo apt-get install -y qemu-system-x86 qemu-utils
- name: Run natlab integration tests
run: |
./tool/go test -v -run=^TestEasyEasy$ -timeout=3m -count=1 ./tstest/integration/nat --run-vm-tests

@ -0,0 +1,29 @@
# Pin images used in github actions to a hash instead of a version tag.
name: pin-github-actions
on:
pull_request:
branches:
- main
paths:
- ".github/workflows/**"
workflow_dispatch:
permissions:
contents: read
pull-requests: read
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
run:
name: pin-github-actions
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: pin
run: make pin-github-actions
- name: check for changed workflow files
run: git diff --no-ext-diff --exit-code .github/workflows || (echo "Some github actions versions need pinning, run make pin-github-actions."; exit 1)

@ -0,0 +1,30 @@
name: request-dataplane-review
on:
pull_request:
paths:
- ".github/workflows/request-dataplane-review.yml"
- "**/*derp*"
- "**/derp*/**"
- "!**/depaware.txt"
jobs:
request-dataplane-review:
name: Request Dataplane Review
runs-on: ubuntu-latest
steps:
- name: Check out code
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Get access token
uses: actions/create-github-app-token@df432ceedc7162793a195dd1713ff69aefc7379e # v2.0.6
id: generate-token
with:
# Get token for app: https://github.com/apps/change-visibility-bot
app-id: ${{ secrets.VISIBILITY_BOT_APP_ID }}
private-key: ${{ secrets.VISIBILITY_BOT_APP_PRIVATE_KEY }}
- name: Add reviewers
env:
GH_TOKEN: ${{ steps.generate-token.outputs.token }}
url: ${{ github.event.pull_request.html_url }}
run: |
gh pr edit "$url" --add-reviewer tailscale/dataplane

@ -3,7 +3,7 @@
name: "ssh-integrationtest" name: "ssh-integrationtest"
concurrency: concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }} group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true cancel-in-progress: true
on: on:
@ -17,7 +17,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Check out code - name: Check out code
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Run SSH integration tests - name: Run SSH integration tests
run: | run: |
make sshintegrationtest make sshintegrationtest

@ -15,6 +15,10 @@ env:
# - false: we expect fuzzing to be happy, and should report failure if it's not. # - false: we expect fuzzing to be happy, and should report failure if it's not.
# - true: we expect fuzzing is broken, and should report failure if it start working. # - true: we expect fuzzing is broken, and should report failure if it start working.
TS_FUZZ_CURRENTLY_BROKEN: false TS_FUZZ_CURRENTLY_BROKEN: false
# GOMODCACHE is the same definition on all OSes. Within the workspace, we use
# toplevel directories "src" (for the checked out source code), and "gomodcache"
# and other caches as siblings to follow.
GOMODCACHE: ${{ github.workspace }}/gomodcache
on: on:
push: push:
@ -38,8 +42,42 @@ concurrency:
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
gomod-cache:
runs-on: ubuntu-24.04
outputs:
cache-key: ${{ steps.hash.outputs.key }}
steps:
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
path: src
- name: Compute cache key from go.{mod,sum}
id: hash
run: echo "key=gomod-cross3-${{ hashFiles('src/go.mod', 'src/go.sum') }}" >> $GITHUB_OUTPUT
# See if the cache entry already exists to avoid downloading it
# and doing the cache write again.
- id: check-cache
uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4
with:
path: gomodcache # relative to workspace; see env note at top of file
key: ${{ steps.hash.outputs.key }}
lookup-only: true
enableCrossOsArchive: true
- name: Download modules
if: steps.check-cache.outputs.cache-hit != 'true'
working-directory: src
run: go mod download
- name: Cache Go modules
if: steps.check-cache.outputs.cache-hit != 'true'
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
with:
path: gomodcache # relative to workspace; see env note at top of file
key: ${{ steps.hash.outputs.key }}
enableCrossOsArchive: true
race-root-integration: race-root-integration:
runs-on: ubuntu-22.04 runs-on: ubuntu-24.04
needs: gomod-cache
strategy: strategy:
fail-fast: false # don't abort the entire matrix if one element fails fail-fast: false # don't abort the entire matrix if one element fails
matrix: matrix:
@ -50,10 +88,20 @@ jobs:
- shard: '4/4' - shard: '4/4'
steps: steps:
- name: checkout - name: checkout
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
path: src
- name: Restore Go module cache
uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
with:
path: gomodcache
key: ${{ needs.gomod-cache.outputs.cache-key }}
enableCrossOsArchive: true
- name: build test wrapper - name: build test wrapper
working-directory: src
run: ./tool/go build -o /tmp/testwrapper ./cmd/testwrapper run: ./tool/go build -o /tmp/testwrapper ./cmd/testwrapper
- name: integration tests as root - name: integration tests as root
working-directory: src
run: PATH=$PWD/tool:$PATH /tmp/testwrapper -exec "sudo -E" -race ./tstest/integration/ run: PATH=$PWD/tool:$PATH /tmp/testwrapper -exec "sudo -E" -race ./tstest/integration/
env: env:
TS_TEST_SHARD: ${{ matrix.shard }} TS_TEST_SHARD: ${{ matrix.shard }}
@ -64,7 +112,6 @@ jobs:
matrix: matrix:
include: include:
- goarch: amd64 - goarch: amd64
coverflags: "-coverprofile=/tmp/coverage.out"
- goarch: amd64 - goarch: amd64
buildflags: "-race" buildflags: "-race"
shard: '1/3' shard: '1/3'
@ -75,12 +122,21 @@ jobs:
buildflags: "-race" buildflags: "-race"
shard: '3/3' shard: '3/3'
- goarch: "386" # thanks yaml - goarch: "386" # thanks yaml
runs-on: ubuntu-22.04 runs-on: ubuntu-24.04
needs: gomod-cache
steps: steps:
- name: checkout - name: checkout
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
path: src
- name: Restore Go module cache
uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
with:
path: gomodcache
key: ${{ needs.gomod-cache.outputs.cache-key }}
enableCrossOsArchive: true
- name: Restore Cache - name: Restore Cache
uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
with: with:
# Note: unlike the other setups, this is only grabbing the mod download # Note: unlike the other setups, this is only grabbing the mod download
# cache, rather than the whole mod directory, as the download cache # cache, rather than the whole mod directory, as the download cache
@ -88,7 +144,6 @@ jobs:
# fetched and extracted by tar # fetched and extracted by tar
path: | path: |
~/.cache/go-build ~/.cache/go-build
~/go/pkg/mod/cache
~\AppData\Local\go-build ~\AppData\Local\go-build
# The -2- here should be incremented when the scheme of data to be # The -2- here should be incremented when the scheme of data to be
# cached changes (e.g. path above changes). # cached changes (e.g. path above changes).
@ -98,13 +153,14 @@ jobs:
${{ github.job }}-${{ runner.os }}-${{ matrix.goarch }}-${{ matrix.buildflags }}-go-2- ${{ github.job }}-${{ runner.os }}-${{ matrix.goarch }}-${{ matrix.buildflags }}-go-2-
- name: build all - name: build all
if: matrix.buildflags == '' # skip on race builder if: matrix.buildflags == '' # skip on race builder
working-directory: src
run: ./tool/go build ${{matrix.buildflags}} ./... run: ./tool/go build ${{matrix.buildflags}} ./...
env: env:
GOARCH: ${{ matrix.goarch }} GOARCH: ${{ matrix.goarch }}
- name: build variant CLIs - name: build variant CLIs
if: matrix.buildflags == '' # skip on race builder if: matrix.buildflags == '' # skip on race builder
working-directory: src
run: | run: |
export TS_USE_TOOLCHAIN=1
./build_dist.sh --extra-small ./cmd/tailscaled ./build_dist.sh --extra-small ./cmd/tailscaled
./build_dist.sh --box ./cmd/tailscaled ./build_dist.sh --box ./cmd/tailscaled
./build_dist.sh --extra-small --box ./cmd/tailscaled ./build_dist.sh --extra-small --box ./cmd/tailscaled
@ -117,24 +173,24 @@ jobs:
sudo apt-get -y update sudo apt-get -y update
sudo apt-get -y install qemu-user sudo apt-get -y install qemu-user
- name: build test wrapper - name: build test wrapper
working-directory: src
run: ./tool/go build -o /tmp/testwrapper ./cmd/testwrapper run: ./tool/go build -o /tmp/testwrapper ./cmd/testwrapper
- name: test all - name: test all
run: NOBASHDEBUG=true PATH=$PWD/tool:$PATH /tmp/testwrapper ${{matrix.coverflags}} ./... ${{matrix.buildflags}} working-directory: src
run: NOBASHDEBUG=true NOPWSHDEBUG=true PATH=$PWD/tool:$PATH /tmp/testwrapper ./... ${{matrix.buildflags}}
env: env:
GOARCH: ${{ matrix.goarch }} GOARCH: ${{ matrix.goarch }}
TS_TEST_SHARD: ${{ matrix.shard }} TS_TEST_SHARD: ${{ matrix.shard }}
- name: Publish to coveralls.io
if: matrix.coverflags != '' # only publish results if we've tracked coverage
uses: shogo82148/actions-goveralls@v1
with:
path-to-profile: /tmp/coverage.out
- name: bench all - name: bench all
working-directory: src
run: ./tool/go test ${{matrix.buildflags}} -bench=. -benchtime=1x -run=^$ $(for x in $(git grep -l "^func Benchmark" | xargs dirname | sort | uniq); do echo "./$x"; done) run: ./tool/go test ${{matrix.buildflags}} -bench=. -benchtime=1x -run=^$ $(for x in $(git grep -l "^func Benchmark" | xargs dirname | sort | uniq); do echo "./$x"; done)
env: env:
GOARCH: ${{ matrix.goarch }} GOARCH: ${{ matrix.goarch }}
- name: check that no tracked files changed - name: check that no tracked files changed
working-directory: src
run: git diff --no-ext-diff --name-only --exit-code || (echo "Build/test modified the files above."; exit 1) run: git diff --no-ext-diff --name-only --exit-code || (echo "Build/test modified the files above."; exit 1)
- name: check that no new files were added - name: check that no new files were added
working-directory: src
run: | run: |
# Note: The "error: pathspec..." you see below is normal! # Note: The "error: pathspec..." you see below is normal!
# In the success case in which there are no new untracked files, # In the success case in which there are no new untracked files,
@ -145,82 +201,141 @@ jobs:
echo "Build/test created untracked files in the repo (file names above)." echo "Build/test created untracked files in the repo (file names above)."
exit 1 exit 1
fi fi
- name: Tidy cache
working-directory: src
shell: bash
run: |
find $(go env GOCACHE) -type f -mmin +90 -delete
windows: windows:
runs-on: windows-2022 # windows-8vpu is a 2022 GitHub-managed runner in our
# org with 8 cores and 32 GB of RAM:
# https://github.com/organizations/tailscale/settings/actions/github-hosted-runners/1
runs-on: windows-8vcpu
needs: gomod-cache
name: Windows (${{ matrix.name || matrix.shard}})
strategy:
fail-fast: false # don't abort the entire matrix if one element fails
matrix:
include:
- key: "win-bench"
name: "benchmarks"
- key: "win-tool-go"
name: "./tool/go"
- key: "win-shard-1-2"
shard: "1/2"
- key: "win-shard-2-2"
shard: "2/2"
steps: steps:
- name: checkout - name: checkout
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
path: src
- name: Install Go - name: Install Go
uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 if: matrix.key != 'win-tool-go'
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
with: with:
go-version-file: go.mod go-version-file: src/go.mod
cache: false cache: false
- name: Restore Go module cache
if: matrix.key != 'win-tool-go'
uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
with:
path: gomodcache
key: ${{ needs.gomod-cache.outputs.cache-key }}
enableCrossOsArchive: true
- name: Restore Cache - name: Restore Cache
uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 if: matrix.key != 'win-tool-go'
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
with: with:
# Note: unlike the other setups, this is only grabbing the mod download
# cache, rather than the whole mod directory, as the download cache
# contains zips that can be unpacked in parallel faster than they can be
# fetched and extracted by tar
path: | path: |
~/.cache/go-build ~/.cache/go-build
~/go/pkg/mod/cache
~\AppData\Local\go-build ~\AppData\Local\go-build
# The -2- here should be incremented when the scheme of data to be # The -2- here should be incremented when the scheme of data to be
# cached changes (e.g. path above changes). # cached changes (e.g. path above changes).
key: ${{ github.job }}-${{ runner.os }}-go-2-${{ hashFiles('**/go.sum') }}-${{ github.run_id }} key: ${{ github.job }}-${{ matrix.key }}-go-2-${{ hashFiles('**/go.sum') }}-${{ github.run_id }}
restore-keys: | restore-keys: |
${{ github.job }}-${{ runner.os }}-go-2-${{ hashFiles('**/go.sum') }} ${{ github.job }}-${{ matrix.key }}-go-2-${{ hashFiles('**/go.sum') }}
${{ github.job }}-${{ runner.os }}-go-2- ${{ github.job }}-${{ matrix.key }}-go-2-
- name: test-tool-go
if: matrix.key == 'win-tool-go'
working-directory: src
run: ./tool/go version
- name: test - name: test
run: go run ./cmd/testwrapper ./... if: matrix.key != 'win-bench' && matrix.key != 'win-tool-go' # skip on bench builder
working-directory: src
run: go run ./cmd/testwrapper sharded:${{ matrix.shard }}
- name: bench all - name: bench all
if: matrix.key == 'win-bench'
working-directory: src
# Don't use -bench=. -benchtime=1x. # Don't use -bench=. -benchtime=1x.
# Somewhere in the layers (powershell?) # Somewhere in the layers (powershell?)
# the equals signs cause great confusion. # the equals signs cause great confusion.
run: go test ./... -bench . -benchtime 1x -run "^$" run: go test ./... -bench . -benchtime 1x -run "^$"
- name: Tidy cache
if: matrix.key != 'win-tool-go'
working-directory: src
shell: bash
run: |
find $(go env GOCACHE) -type f -mmin +90 -delete
privileged: privileged:
runs-on: ubuntu-22.04 needs: gomod-cache
runs-on: ubuntu-24.04
container: container:
image: golang:latest image: golang:latest
options: --privileged options: --privileged
steps: steps:
- name: checkout - name: checkout
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
path: src
- name: Restore Go module cache
uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
with:
path: gomodcache
key: ${{ needs.gomod-cache.outputs.cache-key }}
enableCrossOsArchive: true
- name: chown - name: chown
working-directory: src
run: chown -R $(id -u):$(id -g) $PWD run: chown -R $(id -u):$(id -g) $PWD
- name: privileged tests - name: privileged tests
working-directory: src
run: ./tool/go test ./util/linuxfw ./derp/xdp run: ./tool/go test ./util/linuxfw ./derp/xdp
vm: vm:
needs: gomod-cache
runs-on: ["self-hosted", "linux", "vm"] runs-on: ["self-hosted", "linux", "vm"]
# VM tests run with some privileges, don't let them run on 3p PRs. # VM tests run with some privileges, don't let them run on 3p PRs.
if: github.repository == 'tailscale/tailscale' if: github.repository == 'tailscale/tailscale'
steps: steps:
- name: checkout - name: checkout
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
path: src
- name: Restore Go module cache
uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
with:
path: gomodcache
key: ${{ needs.gomod-cache.outputs.cache-key }}
enableCrossOsArchive: true
- name: Run VM tests - name: Run VM tests
run: ./tool/go test ./tstest/integration/vms -v -no-s3 -run-vm-tests -run=TestRunUbuntu2004 working-directory: src
run: ./tool/go test ./tstest/integration/vms -v -no-s3 -run-vm-tests -run=TestRunUbuntu2404
env: env:
HOME: "/var/lib/ghrunner/home" HOME: "/var/lib/ghrunner/home"
TMPDIR: "/tmp" TMPDIR: "/tmp"
XDG_CACHE_HOME: "/var/lib/ghrunner/cache" XDG_CACHE_HOME: "/var/lib/ghrunner/cache"
race-build:
runs-on: ubuntu-22.04
steps:
- name: checkout
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
- name: build all
run: ./tool/go install -race ./cmd/...
- name: build tests
run: ./tool/go test -race -exec=true ./...
cross: # cross-compile checks, build only. cross: # cross-compile checks, build only.
needs: gomod-cache
strategy: strategy:
fail-fast: false # don't abort the entire matrix if one element fails fail-fast: false # don't abort the entire matrix if one element fails
matrix: matrix:
@ -255,12 +370,14 @@ jobs:
- goos: openbsd - goos: openbsd
goarch: amd64 goarch: amd64
runs-on: ubuntu-22.04 runs-on: ubuntu-24.04
steps: steps:
- name: checkout - name: checkout
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
path: src
- name: Restore Cache - name: Restore Cache
uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
with: with:
# Note: unlike the other setups, this is only grabbing the mod download # Note: unlike the other setups, this is only grabbing the mod download
# cache, rather than the whole mod directory, as the download cache # cache, rather than the whole mod directory, as the download cache
@ -268,7 +385,6 @@ jobs:
# fetched and extracted by tar # fetched and extracted by tar
path: | path: |
~/.cache/go-build ~/.cache/go-build
~/go/pkg/mod/cache
~\AppData\Local\go-build ~\AppData\Local\go-build
# The -2- here should be incremented when the scheme of data to be # The -2- here should be incremented when the scheme of data to be
# cached changes (e.g. path above changes). # cached changes (e.g. path above changes).
@ -276,7 +392,14 @@ jobs:
restore-keys: | restore-keys: |
${{ github.job }}-${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-go-2-${{ hashFiles('**/go.sum') }} ${{ github.job }}-${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-go-2-${{ hashFiles('**/go.sum') }}
${{ github.job }}-${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-go-2- ${{ github.job }}-${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-go-2-
- name: Restore Go module cache
uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
with:
path: gomodcache
key: ${{ needs.gomod-cache.outputs.cache-key }}
enableCrossOsArchive: true
- name: build all - name: build all
working-directory: src
run: ./tool/go build ./cmd/... run: ./tool/go build ./cmd/...
env: env:
GOOS: ${{ matrix.goos }} GOOS: ${{ matrix.goos }}
@ -284,25 +407,42 @@ jobs:
GOARM: ${{ matrix.goarm }} GOARM: ${{ matrix.goarm }}
CGO_ENABLED: "0" CGO_ENABLED: "0"
- name: build tests - name: build tests
working-directory: src
run: ./tool/go test -exec=true ./... run: ./tool/go test -exec=true ./...
env: env:
GOOS: ${{ matrix.goos }} GOOS: ${{ matrix.goos }}
GOARCH: ${{ matrix.goarch }} GOARCH: ${{ matrix.goarch }}
CGO_ENABLED: "0" CGO_ENABLED: "0"
- name: Tidy cache
working-directory: src
shell: bash
run: |
find $(go env GOCACHE) -type f -mmin +90 -delete
ios: # similar to cross above, but iOS can't build most of the repo. So, just ios: # similar to cross above, but iOS can't build most of the repo. So, just
# make it build a few smoke packages. # make it build a few smoke packages.
runs-on: ubuntu-22.04 runs-on: ubuntu-24.04
needs: gomod-cache
steps: steps:
- name: checkout - name: checkout
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
path: src
- name: Restore Go module cache
uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
with:
path: gomodcache
key: ${{ needs.gomod-cache.outputs.cache-key }}
enableCrossOsArchive: true
- name: build some - name: build some
run: ./tool/go build ./ipn/... ./wgengine/ ./types/... ./control/controlclient working-directory: src
run: ./tool/go build ./ipn/... ./ssh/tailssh ./wgengine/ ./types/... ./control/controlclient
env: env:
GOOS: ios GOOS: ios
GOARCH: arm64 GOARCH: arm64
crossmin: # cross-compile for platforms where we only check cmd/tailscale{,d} crossmin: # cross-compile for platforms where we only check cmd/tailscale{,d}
needs: gomod-cache
strategy: strategy:
fail-fast: false # don't abort the entire matrix if one element fails fail-fast: false # don't abort the entire matrix if one element fails
matrix: matrix:
@ -313,13 +453,21 @@ jobs:
# AIX # AIX
- goos: aix - goos: aix
goarch: ppc64 goarch: ppc64
# Solaris
- goos: solaris
goarch: amd64
# illumos
- goos: illumos
goarch: amd64
runs-on: ubuntu-22.04 runs-on: ubuntu-24.04
steps: steps:
- name: checkout - name: checkout
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
path: src
- name: Restore Cache - name: Restore Cache
uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
with: with:
# Note: unlike the other setups, this is only grabbing the mod download # Note: unlike the other setups, this is only grabbing the mod download
# cache, rather than the whole mod directory, as the download cache # cache, rather than the whole mod directory, as the download cache
@ -327,7 +475,6 @@ jobs:
# fetched and extracted by tar # fetched and extracted by tar
path: | path: |
~/.cache/go-build ~/.cache/go-build
~/go/pkg/mod/cache
~\AppData\Local\go-build ~\AppData\Local\go-build
# The -2- here should be incremented when the scheme of data to be # The -2- here should be incremented when the scheme of data to be
# cached changes (e.g. path above changes). # cached changes (e.g. path above changes).
@ -335,39 +482,64 @@ jobs:
restore-keys: | restore-keys: |
${{ github.job }}-${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-go-2-${{ hashFiles('**/go.sum') }} ${{ github.job }}-${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-go-2-${{ hashFiles('**/go.sum') }}
${{ github.job }}-${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-go-2- ${{ github.job }}-${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-go-2-
- name: Restore Go module cache
uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
with:
path: gomodcache
key: ${{ needs.gomod-cache.outputs.cache-key }}
enableCrossOsArchive: true
- name: build core - name: build core
working-directory: src
run: ./tool/go build ./cmd/tailscale ./cmd/tailscaled run: ./tool/go build ./cmd/tailscale ./cmd/tailscaled
env: env:
GOOS: ${{ matrix.goos }} GOOS: ${{ matrix.goos }}
GOARCH: ${{ matrix.goarch }} GOARCH: ${{ matrix.goarch }}
GOARM: ${{ matrix.goarm }} GOARM: ${{ matrix.goarm }}
CGO_ENABLED: "0" CGO_ENABLED: "0"
- name: Tidy cache
working-directory: src
shell: bash
run: |
find $(go env GOCACHE) -type f -mmin +90 -delete
android: android:
# similar to cross above, but android fails to build a few pieces of the # similar to cross above, but android fails to build a few pieces of the
# repo. We should fix those pieces, they're small, but as a stepping stone, # repo. We should fix those pieces, they're small, but as a stepping stone,
# only test the subset of android that our past smoke test checked. # only test the subset of android that our past smoke test checked.
runs-on: ubuntu-22.04 runs-on: ubuntu-24.04
needs: gomod-cache
steps: steps:
- name: checkout - name: checkout
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
path: src
# Super minimal Android build that doesn't even use CGO and doesn't build everything that's needed # Super minimal Android build that doesn't even use CGO and doesn't build everything that's needed
# and is only arm64. But it's a smoke build: it's not meant to catch everything. But it'll catch # and is only arm64. But it's a smoke build: it's not meant to catch everything. But it'll catch
# some Android breakages early. # some Android breakages early.
# TODO(bradfitz): better; see https://github.com/tailscale/tailscale/issues/4482 # TODO(bradfitz): better; see https://github.com/tailscale/tailscale/issues/4482
- name: Restore Go module cache
uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
with:
path: gomodcache
key: ${{ needs.gomod-cache.outputs.cache-key }}
enableCrossOsArchive: true
- name: build some - name: build some
run: ./tool/go install ./net/netns ./ipn/ipnlocal ./wgengine/magicsock/ ./wgengine/ ./wgengine/router/ ./wgengine/netstack ./util/dnsname/ ./ipn/ ./net/netmon ./wgengine/router/ ./tailcfg/ ./types/logger/ ./net/dns ./hostinfo ./version working-directory: src
run: ./tool/go install ./net/netns ./ipn/ipnlocal ./wgengine/magicsock/ ./wgengine/ ./wgengine/router/ ./wgengine/netstack ./util/dnsname/ ./ipn/ ./net/netmon ./wgengine/router/ ./tailcfg/ ./types/logger/ ./net/dns ./hostinfo ./version ./ssh/tailssh
env: env:
GOOS: android GOOS: android
GOARCH: arm64 GOARCH: arm64
wasm: # builds tsconnect, which is the only wasm build we support wasm: # builds tsconnect, which is the only wasm build we support
runs-on: ubuntu-22.04 runs-on: ubuntu-24.04
needs: gomod-cache
steps: steps:
- name: checkout - name: checkout
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
path: src
- name: Restore Cache - name: Restore Cache
uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
with: with:
# Note: unlike the other setups, this is only grabbing the mod download # Note: unlike the other setups, this is only grabbing the mod download
# cache, rather than the whole mod directory, as the download cache # cache, rather than the whole mod directory, as the download cache
@ -375,7 +547,6 @@ jobs:
# fetched and extracted by tar # fetched and extracted by tar
path: | path: |
~/.cache/go-build ~/.cache/go-build
~/go/pkg/mod/cache
~\AppData\Local\go-build ~\AppData\Local\go-build
# The -2- here should be incremented when the scheme of data to be # The -2- here should be incremented when the scheme of data to be
# cached changes (e.g. path above changes). # cached changes (e.g. path above changes).
@ -383,23 +554,45 @@ jobs:
restore-keys: | restore-keys: |
${{ github.job }}-${{ runner.os }}-go-2-${{ hashFiles('**/go.sum') }} ${{ github.job }}-${{ runner.os }}-go-2-${{ hashFiles('**/go.sum') }}
${{ github.job }}-${{ runner.os }}-go-2- ${{ github.job }}-${{ runner.os }}-go-2-
- name: Restore Go module cache
uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
with:
path: gomodcache
key: ${{ needs.gomod-cache.outputs.cache-key }}
enableCrossOsArchive: true
- name: build tsconnect client - name: build tsconnect client
working-directory: src
run: ./tool/go build ./cmd/tsconnect/wasm ./cmd/tailscale/cli run: ./tool/go build ./cmd/tsconnect/wasm ./cmd/tailscale/cli
env: env:
GOOS: js GOOS: js
GOARCH: wasm GOARCH: wasm
- name: build tsconnect server - name: build tsconnect server
working-directory: src
# Note, no GOOS/GOARCH in env on this build step, we're running a build # Note, no GOOS/GOARCH in env on this build step, we're running a build
# tool that handles the build itself. # tool that handles the build itself.
run: | run: |
./tool/go run ./cmd/tsconnect --fast-compression build ./tool/go run ./cmd/tsconnect --fast-compression build
./tool/go run ./cmd/tsconnect --fast-compression build-pkg ./tool/go run ./cmd/tsconnect --fast-compression build-pkg
- name: Tidy cache
working-directory: src
shell: bash
run: |
find $(go env GOCACHE) -type f -mmin +90 -delete
tailscale_go: # Subset of tests that depend on our custom Go toolchain. tailscale_go: # Subset of tests that depend on our custom Go toolchain.
runs-on: ubuntu-22.04 runs-on: ubuntu-24.04
needs: gomod-cache
steps: steps:
- name: checkout - name: checkout
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Set GOMODCACHE env
run: echo "GOMODCACHE=$HOME/.cache/go-mod" >> $GITHUB_ENV
- name: Restore Go module cache
uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
with:
path: gomodcache
key: ${{ needs.gomod-cache.outputs.cache-key }}
enableCrossOsArchive: true
- name: test tailscale_go - name: test tailscale_go
run: ./tool/go test -tags=tailscale_go,ts_enable_sockstats ./net/sockstats/... run: ./tool/go test -tags=tailscale_go,ts_enable_sockstats ./net/sockstats/...
@ -416,11 +609,13 @@ jobs:
# explicit 'if' condition, because the default condition for steps is # explicit 'if' condition, because the default condition for steps is
# 'success()', meaning "only run this if no previous steps failed". # 'success()', meaning "only run this if no previous steps failed".
if: github.event_name == 'pull_request' if: github.event_name == 'pull_request'
runs-on: ubuntu-22.04 runs-on: ubuntu-24.04
steps: steps:
- name: build fuzzers - name: build fuzzers
id: build id: build
uses: google/oss-fuzz/infra/cifuzz/actions/build_fuzzers@master # As of 21 October 2025, this repo doesn't tag releases, so this commit
# hash is just the tip of master.
uses: google/oss-fuzz/infra/cifuzz/actions/build_fuzzers@1242ccb5b6352601e73c00f189ac2ae397242264
# continue-on-error makes steps.build.conclusion be 'success' even if # continue-on-error makes steps.build.conclusion be 'success' even if
# steps.build.outcome is 'failure'. This means this step does not # steps.build.outcome is 'failure'. This means this step does not
# contribute to the job's overall pass/fail evaluation. # contribute to the job's overall pass/fail evaluation.
@ -450,10 +645,12 @@ jobs:
# report a failure because TS_FUZZ_CURRENTLY_BROKEN is set to the wrong # report a failure because TS_FUZZ_CURRENTLY_BROKEN is set to the wrong
# value. # value.
if: steps.build.outcome == 'success' if: steps.build.outcome == 'success'
uses: google/oss-fuzz/infra/cifuzz/actions/run_fuzzers@master # As of 21 October 2025, this repo doesn't tag releases, so this commit
# hash is just the tip of master.
uses: google/oss-fuzz/infra/cifuzz/actions/run_fuzzers@1242ccb5b6352601e73c00f189ac2ae397242264
with: with:
oss-fuzz-project-name: 'tailscale' oss-fuzz-project-name: 'tailscale'
fuzz-seconds: 300 fuzz-seconds: 150
dry-run: false dry-run: false
language: go language: go
- name: Set artifacts_path in env (workaround for actions/upload-artifact#176) - name: Set artifacts_path in env (workaround for actions/upload-artifact#176)
@ -461,78 +658,154 @@ jobs:
run: | run: |
echo "artifacts_path=$(realpath .)" >> $GITHUB_ENV echo "artifacts_path=$(realpath .)" >> $GITHUB_ENV
- name: upload crash - name: upload crash
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0 uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
if: steps.run.outcome != 'success' && steps.build.outcome == 'success' if: steps.run.outcome != 'success' && steps.build.outcome == 'success'
with: with:
name: artifacts name: artifacts
path: ${{ env.artifacts_path }}/out/artifacts path: ${{ env.artifacts_path }}/out/artifacts
depaware: depaware:
runs-on: ubuntu-22.04 runs-on: ubuntu-24.04
needs: gomod-cache
steps: steps:
- name: checkout - name: checkout
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
path: src
- name: Set GOMODCACHE env
run: echo "GOMODCACHE=$HOME/.cache/go-mod" >> $GITHUB_ENV
- name: Restore Go module cache
uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
with:
path: gomodcache
key: ${{ needs.gomod-cache.outputs.cache-key }}
enableCrossOsArchive: true
- name: check depaware - name: check depaware
run: | working-directory: src
export PATH=$(./tool/go env GOROOT)/bin:$PATH run: make depaware
find . -name 'depaware.txt' | xargs -n1 dirname | xargs ./tool/go run github.com/tailscale/depaware --check
go_generate: go_generate:
runs-on: ubuntu-22.04 runs-on: ubuntu-24.04
needs: gomod-cache
steps: steps:
- name: checkout - name: checkout
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
path: src
- name: Restore Go module cache
uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
with:
path: gomodcache
key: ${{ needs.gomod-cache.outputs.cache-key }}
enableCrossOsArchive: true
- name: check that 'go generate' is clean - name: check that 'go generate' is clean
working-directory: src
run: | run: |
pkgs=$(./tool/go list ./... | grep -Ev 'dnsfallback|k8s-operator|xdp') pkgs=$(./tool/go list ./... | grep -Ev 'dnsfallback|k8s-operator|xdp')
./tool/go generate $pkgs ./tool/go generate $pkgs
git add -N . # ensure untracked files are noticed
echo echo
echo echo
git diff --name-only --exit-code || (echo "The files above need updating. Please run 'go generate'."; exit 1) git diff --name-only --exit-code || (echo "The files above need updating. Please run 'go generate'."; exit 1)
go_mod_tidy: go_mod_tidy:
runs-on: ubuntu-22.04 runs-on: ubuntu-24.04
needs: gomod-cache
steps: steps:
- name: checkout - name: checkout
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
path: src
- name: Restore Go module cache
uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
with:
path: gomodcache
key: ${{ needs.gomod-cache.outputs.cache-key }}
enableCrossOsArchive: true
- name: check that 'go mod tidy' is clean - name: check that 'go mod tidy' is clean
working-directory: src
run: | run: |
./tool/go mod tidy make tidy
echo echo
echo echo
git diff --name-only --exit-code || (echo "Please run 'go mod tidy'."; exit 1) git diff --name-only --exit-code || (echo "Please run 'make tidy'"; exit 1)
licenses: licenses:
runs-on: ubuntu-22.04 runs-on: ubuntu-24.04
needs: gomod-cache
steps: steps:
- name: checkout - name: checkout
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
path: src
- name: Restore Go module cache
uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
with:
path: gomodcache
key: ${{ needs.gomod-cache.outputs.cache-key }}
enableCrossOsArchive: true
- name: check licenses - name: check licenses
run: ./scripts/check_license_headers.sh . working-directory: src
run: |
grep -q TestLicenseHeaders *.go || (echo "Expected a test named TestLicenseHeaders"; exit 1)
./tool/go test -v -run=TestLicenseHeaders
staticcheck: staticcheck:
runs-on: ubuntu-22.04 runs-on: ubuntu-24.04
needs: gomod-cache
name: staticcheck (${{ matrix.name }})
strategy: strategy:
fail-fast: false # don't abort the entire matrix if one element fails fail-fast: false # don't abort the entire matrix if one element fails
matrix: matrix:
goos: ["linux", "windows", "darwin"]
goarch: ["amd64"]
include: include:
- goos: "windows" - name: "macOS"
goarch: "386" goos: "darwin"
goarch: "arm64"
flags: "--with-tags-all=darwin"
- name: "Windows"
goos: "windows"
goarch: "amd64"
flags: "--with-tags-all=windows"
- name: "Linux"
goos: "linux"
goarch: "amd64"
flags: "--with-tags-all=linux"
- name: "Portable (1/4)"
goos: "linux"
goarch: "amd64"
flags: "--without-tags-any=windows,darwin,linux --shard=1/4"
- name: "Portable (2/4)"
goos: "linux"
goarch: "amd64"
flags: "--without-tags-any=windows,darwin,linux --shard=2/4"
- name: "Portable (3/4)"
goos: "linux"
goarch: "amd64"
flags: "--without-tags-any=windows,darwin,linux --shard=3/4"
- name: "Portable (4/4)"
goos: "linux"
goarch: "amd64"
flags: "--without-tags-any=windows,darwin,linux --shard=4/4"
steps: steps:
- name: checkout - name: checkout
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: install staticcheck with:
run: GOBIN=~/.local/bin ./tool/go install honnef.co/go/tools/cmd/staticcheck path: src
- name: run staticcheck - name: Restore Go module cache
uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
with:
path: gomodcache
key: ${{ needs.gomod-cache.outputs.cache-key }}
enableCrossOsArchive: true
- name: run staticcheck (${{ matrix.name }})
working-directory: src
run: | run: |
export GOROOT=$(./tool/go env GOROOT) export GOROOT=$(./tool/go env GOROOT)
export PATH=$GOROOT/bin:$PATH ./tool/go run -exec \
staticcheck -- $(./tool/go list ./... | grep -v tempfork) "env GOOS=${{ matrix.goos }} GOARCH=${{ matrix.goarch }}" \
env: honnef.co/go/tools/cmd/staticcheck -- \
GOOS: ${{ matrix.goos }} $(./tool/go run ./tool/listpkgs --ignore-3p --goos=${{ matrix.goos }} --goarch=${{ matrix.goarch }} ${{ matrix.flags }} ./...)
GOARCH: ${{ matrix.goarch }}
notify_slack: notify_slack:
if: always() if: always()
@ -552,7 +825,7 @@ jobs:
- go_mod_tidy - go_mod_tidy
- licenses - licenses
- staticcheck - staticcheck
runs-on: ubuntu-22.04 runs-on: ubuntu-24.04
steps: steps:
- name: notify - name: notify
# Only notify slack for merged commits, not PR failures. # Only notify slack for merged commits, not PR failures.
@ -563,8 +836,10 @@ jobs:
# By having the job always run, but skipping its only step as needed, we # By having the job always run, but skipping its only step as needed, we
# let the CI output collapse nicely in PRs. # let the CI output collapse nicely in PRs.
if: failure() && github.event_name == 'push' if: failure() && github.event_name == 'push'
uses: slackapi/slack-github-action@37ebaef184d7626c5f204ab8d3baff4262dd30f0 # v1.27.0 uses: slackapi/slack-github-action@91efab103c0de0a537f72a35f6b8cda0ee76bf0a # v2.1.1
with: with:
webhook: ${{ secrets.SLACK_WEBHOOK_URL }}
webhook-type: incoming-webhook
payload: | payload: |
{ {
"attachments": [{ "attachments": [{
@ -576,13 +851,10 @@ jobs:
"color": "danger" "color": "danger"
}] }]
} }
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
check_mergeability: merge_blocker:
if: always() if: always()
runs-on: ubuntu-22.04 runs-on: ubuntu-24.04
needs: needs:
- android - android
- test - test
@ -604,3 +876,46 @@ jobs:
uses: re-actors/alls-green@05ac9388f0aebcb5727afa17fcccfecd6f8ec5fe # v1.2.2 uses: re-actors/alls-green@05ac9388f0aebcb5727afa17fcccfecd6f8ec5fe # v1.2.2
with: with:
jobs: ${{ toJSON(needs) }} jobs: ${{ toJSON(needs) }}
# This waits on all the jobs which must never fail. Branch protection rules
# enforce these. No flaky tests are allowed in these jobs. (We don't want flaky
# tests anywhere, really, but a flaky test here prevents merging.)
check_mergeability_strict:
if: always()
runs-on: ubuntu-24.04
needs:
- android
- cross
- crossmin
- ios
- tailscale_go
- depaware
- go_generate
- go_mod_tidy
- licenses
- staticcheck
steps:
- name: Decide if change is okay to merge
if: github.event_name != 'push'
uses: re-actors/alls-green@05ac9388f0aebcb5727afa17fcccfecd6f8ec5fe # v1.2.2
with:
jobs: ${{ toJSON(needs) }}
check_mergeability:
if: always()
runs-on: ubuntu-24.04
needs:
- check_mergeability_strict
- test
- windows
- vm
- wasm
- fuzz
- race-root-integration
- privileged
steps:
- name: Decide if change is okay to merge
if: github.event_name != 'push'
uses: re-actors/alls-green@05ac9388f0aebcb5727afa17fcccfecd6f8ec5fe # v1.2.2
with:
jobs: ${{ toJSON(needs) }}

@ -8,11 +8,11 @@ on:
- main - main
paths: paths:
- go.mod - go.mod
- .github/workflows/update-flakes.yml - .github/workflows/update-flake.yml
workflow_dispatch: workflow_dispatch:
concurrency: concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }} group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
@ -21,22 +21,21 @@ jobs:
steps: steps:
- name: Check out code - name: Check out code
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Run update-flakes - name: Run update-flakes
run: ./update-flake.sh run: ./update-flake.sh
- name: Get access token - name: Get access token
uses: tibdex/github-app-token@3beb63f4bd073e61482598c45c71c1019b59b73a # v2.1.0 uses: actions/create-github-app-token@df432ceedc7162793a195dd1713ff69aefc7379e # v2.0.6
id: generate-token id: generate-token
with: with:
app_id: ${{ secrets.LICENSING_APP_ID }} # Get token for app: https://github.com/apps/tailscale-code-updater
installation_retrieval_mode: "id" app-id: ${{ secrets.CODE_UPDATER_APP_ID }}
installation_retrieval_payload: ${{ secrets.LICENSING_APP_INSTALLATION_ID }} private-key: ${{ secrets.CODE_UPDATER_APP_PRIVATE_KEY }}
private_key: ${{ secrets.LICENSING_APP_PRIVATE_KEY }}
- name: Send pull request - name: Send pull request
uses: peter-evans/create-pull-request@5e914681df9dc83aa4e4905692ca88beb2f9e91f #v7.0.5 uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e #v7.0.8
with: with:
token: ${{ steps.generate-token.outputs.token }} token: ${{ steps.generate-token.outputs.token }}
author: Flakes Updater <noreply+flakes-updater@tailscale.com> author: Flakes Updater <noreply+flakes-updater@tailscale.com>

@ -5,7 +5,7 @@ on:
workflow_dispatch: workflow_dispatch:
concurrency: concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }} group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
@ -14,7 +14,7 @@ jobs:
steps: steps:
- name: Check out code - name: Check out code
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Run go get - name: Run go get
run: | run: |
@ -23,19 +23,16 @@ jobs:
./tool/go mod tidy ./tool/go mod tidy
- name: Get access token - name: Get access token
uses: tibdex/github-app-token@3beb63f4bd073e61482598c45c71c1019b59b73a # v2.1.0 uses: actions/create-github-app-token@df432ceedc7162793a195dd1713ff69aefc7379e # v2.0.6
id: generate-token id: generate-token
with: with:
# TODO(will): this should use the code updater app rather than licensing. # Get token for app: https://github.com/apps/tailscale-code-updater
# It has the same permissions, so not a big deal, but still. app-id: ${{ secrets.CODE_UPDATER_APP_ID }}
app_id: ${{ secrets.LICENSING_APP_ID }} private-key: ${{ secrets.CODE_UPDATER_APP_PRIVATE_KEY }}
installation_retrieval_mode: "id"
installation_retrieval_payload: ${{ secrets.LICENSING_APP_INSTALLATION_ID }}
private_key: ${{ secrets.LICENSING_APP_PRIVATE_KEY }}
- name: Send pull request - name: Send pull request
id: pull-request id: pull-request
uses: peter-evans/create-pull-request@5e914681df9dc83aa4e4905692ca88beb2f9e91f #v7.0.5 uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e #v7.0.8
with: with:
token: ${{ steps.generate-token.outputs.token }} token: ${{ steps.generate-token.outputs.token }}
author: OSS Updater <noreply+oss-updater@tailscale.com> author: OSS Updater <noreply+oss-updater@tailscale.com>

@ -0,0 +1,38 @@
name: tailscale.com/cmd/vet
env:
HOME: ${{ github.workspace }}
# GOMODCACHE is the same definition on all OSes. Within the workspace, we use
# toplevel directories "src" (for the checked out source code), and "gomodcache"
# and other caches as siblings to follow.
GOMODCACHE: ${{ github.workspace }}/gomodcache
on:
push:
branches:
- main
- "release-branch/*"
paths:
- "**.go"
pull_request:
paths:
- "**.go"
jobs:
vet:
runs-on: [ self-hosted, linux ]
timeout-minutes: 5
steps:
- name: Check out code
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
path: src
- name: Build 'go vet' tool
working-directory: src
run: ./tool/go build -o /tmp/vettool tailscale.com/cmd/vet
- name: Run 'go vet'
working-directory: src
run: ./tool/go vet -vettool=/tmp/vettool tailscale.com/...

@ -3,8 +3,6 @@ on:
workflow_dispatch: workflow_dispatch:
# For now, only run on requests, not the main branches. # For now, only run on requests, not the main branches.
pull_request: pull_request:
branches:
- "*"
paths: paths:
- "client/web/**" - "client/web/**"
- ".github/workflows/webclient.yml" - ".github/workflows/webclient.yml"
@ -15,7 +13,7 @@ on:
# - main # - main
concurrency: concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }} group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
@ -24,7 +22,7 @@ jobs:
steps: steps:
- name: Check out code - name: Check out code
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Install deps - name: Install deps
run: ./tool/yarn --cwd client/web run: ./tool/yarn --cwd client/web
- name: Run lint - name: Run lint

3
.gitignore vendored

@ -49,3 +49,6 @@ client/web/build/assets
*.xcworkspacedata *.xcworkspacedata
/tstest/tailmac/bin /tstest/tailmac/bin
/tstest/tailmac/build /tstest/tailmac/build
# Ignore personal IntelliJ settings
.idea/

@ -1,43 +1,20 @@
version: "2"
# Configuration for how we run golangci-lint
# Timeout of 5m was the default in v1.
run:
timeout: 5m
linters: linters:
# Don't enable any linters by default; just the ones that we explicitly # Don't enable any linters by default; just the ones that we explicitly
# enable in the list below. # enable in the list below.
disable-all: true default: none
enable: enable:
- bidichk - bidichk
- gofmt
- goimports
- govet - govet
- misspell - misspell
- revive - revive
settings:
# Configuration for how we run golangci-lint
run:
timeout: 5m
issues:
# Excluding configuration per-path, per-linter, per-text and per-source
exclude-rules:
# These are forks of an upstream package and thus are exempt from stylistic
# changes that would make pulling in upstream changes harder.
- path: tempfork/.*\.go
text: "File is not `gofmt`-ed with `-s` `-r 'interface{} -> any'`"
- path: util/singleflight/.*\.go
text: "File is not `gofmt`-ed with `-s` `-r 'interface{} -> any'`"
# Per-linter settings are contained in this top-level key
linters-settings:
# Enable all rules by default; we don't use invisible unicode runes.
bidichk:
gofmt:
rewrite-rules:
- pattern: 'interface{}'
replacement: 'any'
goimports:
govet:
# Matches what we use in corp as of 2023-12-07 # Matches what we use in corp as of 2023-12-07
govet:
enable: enable:
- asmdecl - asmdecl
- assign - assign
@ -77,12 +54,8 @@ linters-settings:
# NOTE(andrew-d): this doesn't currently work because the printf # NOTE(andrew-d): this doesn't currently work because the printf
# analyzer doesn't support type declarations # analyzer doesn't support type declarations
#- github.com/tailscale/tailscale/types/logger.Logf #- github.com/tailscale/tailscale/types/logger.Logf
misspell:
revive: revive:
enable-all-rules: false enable-all-rules: false
ignore-generated-header: true
rules: rules:
- name: atomic - name: atomic
- name: context-keys-type - name: context-keys-type
@ -102,3 +75,36 @@ linters-settings:
- name: unconditional-recursion - name: unconditional-recursion
- name: useless-break - name: useless-break
- name: waitgroup-by-value - name: waitgroup-by-value
exclusions:
generated: lax
presets:
- comments
- common-false-positives
- legacy
- std-error-handling
rules:
# These are forks of an upstream package and thus are exempt from stylistic
# changes that would make pulling in upstream changes harder.
- path: tempfork/.*\.go
text: File is not `gofmt`-ed with `-s` `-r 'interface{} -> any'`
- path: util/singleflight/.*\.go
text: File is not `gofmt`-ed with `-s` `-r 'interface{} -> any'`
paths:
- third_party$
- builtin$
- examples$
formatters:
enable:
- gofmt
- goimports
settings:
gofmt:
rewrite-rules:
- pattern: interface{}
replacement: any
exclusions:
generated: lax
paths:
- third_party$
- builtin$
- examples$

@ -1 +1 @@
3.18 3.22

@ -1,135 +1,103 @@
# Contributor Covenant Code of Conduct # Tailscale Community Code of Conduct
## Our Pledge ## Our Pledge
We as members, contributors, and leaders pledge to make participation We are committed to creating an open, welcoming, diverse, inclusive, healthy and respectful community.
in our community a harassment-free experience for everyone, regardless Unacceptable, harmful and inappropriate behavior will not be tolerated.
of age, body size, visible or invisible disability, ethnicity, sex
characteristics, gender identity and expression, level of experience,
education, socio-economic status, nationality, personal appearance,
race, religion, or sexual identity and orientation.
We pledge to act and interact in ways that contribute to an open,
welcoming, diverse, inclusive, and healthy community.
## Our Standards ## Our Standards
Examples of behavior that contributes to a positive environment for Examples of behavior that contributes to a positive environment for our community include:
our community include:
* Demonstrating empathy and kindness toward other people
* Being respectful of differing opinions, viewpoints, and experiences
* Giving and gracefully accepting constructive feedback
* Accepting responsibility and apologizing to those affected by our
mistakes, and learning from the experience
* Focusing on what is best not just for us as individuals, but for the
overall community
Examples of unacceptable behavior include:
* The use of sexualized language or imagery, and sexual attention or - Demonstrating empathy and kindness toward other people.
advances of any kind - Being respectful of differing opinions, viewpoints, and experiences.
* Trolling, insulting or derogatory comments, and personal or - Giving and gracefully accepting constructive feedback.
political attacks - Accepting responsibility and apologizing to those affected by our mistakes, and learning from the experience.
* Public or private harassment - Focusing on what is best not just for us as individuals, but for the overall community.
* Publishing others' private information, such as a physical or email
address, without their explicit permission
* Other conduct which could reasonably be considered inappropriate in
a professional setting
## Enforcement Responsibilities Examples of unacceptable behavior include without limitation:
Community leaders are responsible for clarifying and enforcing our - The use of language, imagery or emojis (collectively "content") that is racist, sexist, homophobic, transphobic, or otherwise harassing or discriminatory based on any protected characteristic.
standards of acceptable behavior and will take appropriate and fair - The use of sexualized content and sexual attention or advances of any kind.
corrective action in response to any behavior that they deem - The use of violent, intimidating or bullying content.
inappropriate, threatening, offensive, or harmful. - Trolling, concern trolling, insulting or derogatory comments, and personal or political attacks.
- Public or private harassment.
- Publishing others' personal information, such as a photo, physical address, email address, online profile information, or other personal information, without their explicit permission or with the intent to bully or harass the other person.
- Posting deep fake or other AI generated content about or involving another person without the explicit permission.
- Spamming community channels and members, such as sending repeat messages, low-effort content, or automated messages.
- Phishing or any similar activity.
- Distributing or promoting malware.
- The use of any coded or suggestive content to hide or provoke otherwise unacceptable behavior.
- Other conduct which could reasonably be considered harmful, illegal, or inappropriate in a professional setting.
Community leaders have the right and responsibility to remove, edit, Please also see the Tailscale Acceptable Use Policy, available at [tailscale.com/tailscale-aup](https://tailscale.com/tailscale-aup).
or reject comments, commits, code, wiki edits, issues, and other
contributions that are not aligned to this Code of Conduct, and will
communicate reasons for moderation decisions when appropriate.
## Scope ## Reporting Incidents
This Code of Conduct applies within all community spaces, and also Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to Tailscale directly via <info@tailscale.com>, or to the community leaders or moderators via DM or similar.
applies when an individual is officially representing the community in All complaints will be reviewed and investigated promptly and fairly.
public spaces. Examples of representing our community include using an We will respect the privacy and safety of the reporter of any issues.
official e-mail address, posting via an official social media account,
or acting as an appointed representative at an online or offline
event.
## Enforcement Please note that this community is not moderated by staff 24/7, and we do not have, and do not undertake, any obligation to prescreen, monitor, edit, or remove any content or data, or to actively seek facts or circumstances indicating illegal activity.
While we strive to keep the community safe and welcoming, moderation may not be immediate at all hours.
If you encounter any issues, report them using the appropriate channels.
Instances of abusive, harassing, or otherwise unacceptable behavior ## Enforcement Guidelines
may be reported to the community leaders responsible for enforcement
at [info@tailscale.com](mailto:info@tailscale.com). All complaints
will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and Community leaders and moderators are responsible for clarifying and enforcing our standards of acceptable behavior and will take appropriate and fair corrective action in response to any behavior that they deem inappropriate, threatening, offensive, or harmful.
security of the reporter of any incident.
## Enforcement Guidelines Community leaders and moderators have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Community Code of Conduct.
Tailscale retains full discretion to take action (or not) in response to a violation of these guidelines with or without notice or liability to you.
We will interpret our policies and resolve disputes in favor of protecting users, customers, the public, our community and our company, as a whole.
Community leaders will follow these Community Impact Guidelines in Community leaders will follow these community enforcement guidelines in determining the consequences for any action they deem in violation of this Code of Conduct,
determining the consequences for any action they deem in violation of and retain full discretion to apply the enforcement guidelines as necessary depending on the circumstances:
this Code of Conduct:
### 1. Correction ### 1. Correction
**Community Impact**: Use of inappropriate language or other behavior Community Impact: Use of inappropriate language or other behavior deemed unprofessional or unwelcome in the community.
deemed unprofessional or unwelcome in the community.
**Consequence**: A private, written warning from community leaders, Consequence: A private, written warning from community leaders, providing clarity around the nature of the violation and an explanation of why the behavior was inappropriate.
providing clarity around the nature of the violation and an A public apology may be requested.
explanation of why the behavior was inappropriate. A public apology
may be requested.
### 2. Warning ### 2. Warning
**Community Impact**: A violation through a single incident or series Community Impact: A violation through a single incident or series of actions.
of actions.
**Consequence**: A warning with consequences for continued Consequence: A warning with consequences for continued behavior.
behavior. No interaction with the people involved, including No interaction with the people involved, including unsolicited interaction with those enforcing this Community Code of Conduct, for a specified period of time.
unsolicited interaction with those enforcing the Code of Conduct, for This includes avoiding interactions in community spaces as well as external channels like social media.
a specified period of time. This includes avoiding interactions in Violating these terms may lead to a temporary or permanent ban.
community spaces as well as external channels like social
media. Violating these terms may lead to a temporary or permanent ban.
### 3. Temporary Ban ### 3. Temporary Ban
**Community Impact**: A serious violation of community standards, Community Impact: A serious violation of community standards, including sustained inappropriate behavior.
including sustained inappropriate behavior.
**Consequence**: A temporary ban from any sort of interaction or Consequence: A temporary ban from any sort of interaction or public communication with the community for a specified period of time.
public communication with the community for a specified period of No public or private interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, is allowed during this period. Violating these terms may lead to a permanent ban.
time. No public or private interaction with the people involved,
including unsolicited interaction with those enforcing the Code of
Conduct, is allowed during this period. Violating these terms may lead
to a permanent ban.
### 4. Permanent Ban ### 4. Permanent Ban
**Community Impact**: Demonstrating a pattern of violation of Community Impact: Demonstrating a pattern of violation of community standards, including sustained inappropriate behavior, harassment of an individual, or aggression toward or disparagement of classes of individuals.
community standards, including sustained inappropriate behavior,
harassment of an individual, or aggression toward or disparagement of
classes of individuals.
**Consequence**: A permanent ban from any sort of public interaction Consequence: A permanent ban from any sort of public interaction within the community.
within the community.
## Acceptable Use Policy
Violation of this Community Code of Conduct may also violate the Tailscale Acceptable Use Policy, which may result in suspension or termination of your Tailscale account.
For more information, please see the Tailscale Acceptable Use Policy, available at [tailscale.com/tailscale-aup](https://tailscale.com/tailscale-aup).
## Privacy
Please see the Tailscale [Privacy Policy](https://tailscale.com/privacy-policy) for more information about how Tailscale collects, uses, discloses and protects information.
## Attribution ## Attribution
This Code of Conduct is adapted from the [Contributor This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 2.0, available at <https://www.contributor-covenant.org/version/2/0/code_of_conduct.html>.
Covenant][homepage], version 2.0, available at
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
Community Impact Guidelines were inspired by [Mozilla's code of Community Impact Guidelines were inspired by [Mozilla's code of conduct enforcement ladder](https://github.com/mozilla/diversity).
conduct enforcement ladder](https://github.com/mozilla/diversity).
[homepage]: https://www.contributor-covenant.org [homepage]: https://www.contributor-covenant.org
For answers to common questions about this code of conduct, see the For answers to common questions about this code of conduct, see the FAQ at <https://www.contributor-covenant.org/faq>.
FAQ at https://www.contributor-covenant.org/faq. Translations are Translations are available at <https://www.contributor-covenant.org/translations>.
available at https://www.contributor-covenant.org/translations.

@ -7,6 +7,15 @@
# Tailscale images are currently built using https://github.com/tailscale/mkctr, # Tailscale images are currently built using https://github.com/tailscale/mkctr,
# and the build script can be found in ./build_docker.sh. # and the build script can be found in ./build_docker.sh.
# #
# If you want to build local images for testing, you can use make.
#
# To build a Tailscale image and push to the local docker registry:
#
# $ REPO=local/tailscale TAGS=v0.0.1 PLATFORM=local make publishdevimage
#
# To build a Tailscale image and push to a remote docker registry:
#
# $ REPO=<your-registry>/<your-repo>/tailscale TAGS=v0.0.1 make publishdevimage
# #
# This Dockerfile includes all the tailscale binaries. # This Dockerfile includes all the tailscale binaries.
# #
@ -27,7 +36,7 @@
# $ docker exec tailscaled tailscale status # $ docker exec tailscaled tailscale status
FROM golang:1.23-alpine AS build-env FROM golang:1.25-alpine AS build-env
WORKDIR /go/src/tailscale WORKDIR /go/src/tailscale
@ -62,8 +71,10 @@ RUN GOARCH=$TARGETARCH go install -ldflags="\
-X tailscale.com/version.gitCommitStamp=$VERSION_GIT_HASH" \ -X tailscale.com/version.gitCommitStamp=$VERSION_GIT_HASH" \
-v ./cmd/tailscale ./cmd/tailscaled ./cmd/containerboot -v ./cmd/tailscale ./cmd/tailscaled ./cmd/containerboot
FROM alpine:3.18 FROM alpine:3.22
RUN apk add --no-cache ca-certificates iptables iproute2 ip6tables RUN apk add --no-cache ca-certificates iptables iproute2 ip6tables
RUN ln -s /sbin/iptables-legacy /sbin/iptables
RUN ln -s /sbin/ip6tables-legacy /sbin/ip6tables
COPY --from=build-env /go/bin/* /usr/local/bin/ COPY --from=build-env /go/bin/* /usr/local/bin/
# For compat with the previous run.sh, although ideally you should be # For compat with the previous run.sh, although ideally you should be

@ -1,5 +1,12 @@
# Copyright (c) Tailscale Inc & AUTHORS # Copyright (c) Tailscale Inc & AUTHORS
# SPDX-License-Identifier: BSD-3-Clause # SPDX-License-Identifier: BSD-3-Clause
FROM alpine:3.18 FROM alpine:3.22
RUN apk add --no-cache ca-certificates iptables iproute2 ip6tables iputils RUN apk add --no-cache ca-certificates iptables iptables-legacy iproute2 ip6tables iputils
# Alpine 3.19 replaced legacy iptables with nftables based implementation. We
# can't be certain that all hosts that run Tailscale containers currently
# suppport nftables, so link back to legacy for backwards compatibility reasons.
# TODO(irbekrm): add some way how to determine if we still run on nodes that
# don't support nftables, so that we can eventually remove these symlinks.
RUN ln -s /sbin/iptables-legacy /sbin/iptables
RUN ln -s /sbin/ip6tables-legacy /sbin/ip6tables

@ -8,8 +8,9 @@ PLATFORM ?= "flyio" ## flyio==linux/amd64. Set to "" to build all platforms.
vet: ## Run go vet vet: ## Run go vet
./tool/go vet ./... ./tool/go vet ./...
tidy: ## Run go mod tidy tidy: ## Run go mod tidy and update nix flake hashes
./tool/go mod tidy ./tool/go mod tidy
./update-flake.sh
lint: ## Run golangci-lint lint: ## Run golangci-lint
./tool/go run github.com/golangci/golangci-lint/cmd/golangci-lint run ./tool/go run github.com/golangci/golangci-lint/cmd/golangci-lint run
@ -17,22 +18,36 @@ lint: ## Run golangci-lint
updatedeps: ## Update depaware deps updatedeps: ## Update depaware deps
# depaware (via x/tools/go/packages) shells back to "go", so make sure the "go" # depaware (via x/tools/go/packages) shells back to "go", so make sure the "go"
# it finds in its $$PATH is the right one. # it finds in its $$PATH is the right one.
PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --update \ PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --update --vendor --internal \
tailscale.com/cmd/tailscaled \ tailscale.com/cmd/tailscaled \
tailscale.com/cmd/tailscale \ tailscale.com/cmd/tailscale \
tailscale.com/cmd/derper \ tailscale.com/cmd/derper \
tailscale.com/cmd/k8s-operator \ tailscale.com/cmd/k8s-operator \
tailscale.com/cmd/stund tailscale.com/cmd/stund \
tailscale.com/cmd/tsidp
PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --update --goos=linux,darwin,windows,android,ios --vendor --internal \
tailscale.com/tsnet
PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --update --file=depaware-minbox.txt --goos=linux --tags="$$(./tool/go run ./cmd/featuretags --min --add=cli)" --vendor --internal \
tailscale.com/cmd/tailscaled
PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --update --file=depaware-min.txt --goos=linux --tags="$$(./tool/go run ./cmd/featuretags --min)" --vendor --internal \
tailscale.com/cmd/tailscaled
depaware: ## Run depaware checks depaware: ## Run depaware checks
# depaware (via x/tools/go/packages) shells back to "go", so make sure the "go" # depaware (via x/tools/go/packages) shells back to "go", so make sure the "go"
# it finds in its $$PATH is the right one. # it finds in its $$PATH is the right one.
PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --check \ PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --check --vendor --internal \
tailscale.com/cmd/tailscaled \ tailscale.com/cmd/tailscaled \
tailscale.com/cmd/tailscale \ tailscale.com/cmd/tailscale \
tailscale.com/cmd/derper \ tailscale.com/cmd/derper \
tailscale.com/cmd/k8s-operator \ tailscale.com/cmd/k8s-operator \
tailscale.com/cmd/stund tailscale.com/cmd/stund \
tailscale.com/cmd/tsidp
PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --check --goos=linux,darwin,windows,android,ios --vendor --internal \
tailscale.com/tsnet
PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --check --file=depaware-minbox.txt --goos=linux --tags="$$(./tool/go run ./cmd/featuretags --min --add=cli)" --vendor --internal \
tailscale.com/cmd/tailscaled
PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --check --file=depaware-min.txt --goos=linux --tags="$$(./tool/go run ./cmd/featuretags --min)" --vendor --internal \
tailscale.com/cmd/tailscaled
buildwindows: ## Build tailscale CLI for windows/amd64 buildwindows: ## Build tailscale CLI for windows/amd64
GOOS=windows GOARCH=amd64 ./tool/go install tailscale.com/cmd/tailscale tailscale.com/cmd/tailscaled GOOS=windows GOARCH=amd64 ./tool/go install tailscale.com/cmd/tailscale tailscale.com/cmd/tailscaled
@ -58,7 +73,7 @@ buildmultiarchimage: ## Build (and optionally push) multiarch docker image
check: staticcheck vet depaware buildwindows build386 buildlinuxarm buildwasm ## Perform basic checks and compilation tests check: staticcheck vet depaware buildwindows build386 buildlinuxarm buildwasm ## Perform basic checks and compilation tests
staticcheck: ## Run staticcheck.io checks staticcheck: ## Run staticcheck.io checks
./tool/go run honnef.co/go/tools/cmd/staticcheck -- $$(./tool/go list ./... | grep -v tempfork) ./tool/go run honnef.co/go/tools/cmd/staticcheck -- $$(./tool/go run ./tool/listpkgs --ignore-3p ./...)
kube-generate-all: kube-generate-deepcopy ## Refresh generated files for Tailscale Kubernetes Operator kube-generate-all: kube-generate-deepcopy ## Refresh generated files for Tailscale Kubernetes Operator
./tool/go generate ./cmd/k8s-operator ./tool/go generate ./cmd/k8s-operator
@ -86,43 +101,60 @@ pushspk: spk ## Push and install synology package on ${SYNO_HOST} host
scp tailscale.spk root@${SYNO_HOST}: scp tailscale.spk root@${SYNO_HOST}:
ssh root@${SYNO_HOST} /usr/syno/bin/synopkg install tailscale.spk ssh root@${SYNO_HOST} /usr/syno/bin/synopkg install tailscale.spk
publishdevimage: ## Build and publish tailscale image to location specified by ${REPO} .PHONY: check-image-repo
@test -n "${REPO}" || (echo "REPO=... required; e.g. REPO=ghcr.io/${USER}/tailscale" && exit 1) check-image-repo:
@test "${REPO}" != "tailscale/tailscale" || (echo "REPO=... must not be tailscale/tailscale" && exit 1) @if [ -z "$(REPO)" ]; then \
@test "${REPO}" != "ghcr.io/tailscale/tailscale" || (echo "REPO=... must not be ghcr.io/tailscale/tailscale" && exit 1) echo "REPO=... required; e.g. REPO=ghcr.io/$$USER/tailscale" >&2; \
@test "${REPO}" != "tailscale/k8s-operator" || (echo "REPO=... must not be tailscale/k8s-operator" && exit 1) exit 1; \
@test "${REPO}" != "ghcr.io/tailscale/k8s-operator" || (echo "REPO=... must not be ghcr.io/tailscale/k8s-operator" && exit 1) fi
@for repo in tailscale/tailscale ghcr.io/tailscale/tailscale \
tailscale/k8s-operator ghcr.io/tailscale/k8s-operator \
tailscale/k8s-nameserver ghcr.io/tailscale/k8s-nameserver \
tailscale/tsidp ghcr.io/tailscale/tsidp \
tailscale/k8s-proxy ghcr.io/tailscale/k8s-proxy; do \
if [ "$(REPO)" = "$$repo" ]; then \
echo "REPO=... must not be $$repo" >&2; \
exit 1; \
fi; \
done
publishdevimage: check-image-repo ## Build and publish tailscale image to location specified by ${REPO}
TAGS="${TAGS}" REPOS=${REPO} PLATFORM=${PLATFORM} PUSH=true TARGET=client ./build_docker.sh TAGS="${TAGS}" REPOS=${REPO} PLATFORM=${PLATFORM} PUSH=true TARGET=client ./build_docker.sh
publishdevoperator: ## Build and publish k8s-operator image to location specified by ${REPO} publishdevoperator: check-image-repo ## Build and publish k8s-operator image to location specified by ${REPO}
@test -n "${REPO}" || (echo "REPO=... required; e.g. REPO=ghcr.io/${USER}/tailscale" && exit 1) TAGS="${TAGS}" REPOS=${REPO} PLATFORM=${PLATFORM} PUSH=true TARGET=k8s-operator ./build_docker.sh
@test "${REPO}" != "tailscale/tailscale" || (echo "REPO=... must not be tailscale/tailscale" && exit 1)
@test "${REPO}" != "ghcr.io/tailscale/tailscale" || (echo "REPO=... must not be ghcr.io/tailscale/tailscale" && exit 1) publishdevnameserver: check-image-repo ## Build and publish k8s-nameserver image to location specified by ${REPO}
@test "${REPO}" != "tailscale/k8s-operator" || (echo "REPO=... must not be tailscale/k8s-operator" && exit 1)
@test "${REPO}" != "ghcr.io/tailscale/k8s-operator" || (echo "REPO=... must not be ghcr.io/tailscale/k8s-operator" && exit 1)
TAGS="${TAGS}" REPOS=${REPO} PLATFORM=${PLATFORM} PUSH=true TARGET=operator ./build_docker.sh
publishdevnameserver: ## Build and publish k8s-nameserver image to location specified by ${REPO}
@test -n "${REPO}" || (echo "REPO=... required; e.g. REPO=ghcr.io/${USER}/tailscale" && exit 1)
@test "${REPO}" != "tailscale/tailscale" || (echo "REPO=... must not be tailscale/tailscale" && exit 1)
@test "${REPO}" != "ghcr.io/tailscale/tailscale" || (echo "REPO=... must not be ghcr.io/tailscale/tailscale" && exit 1)
@test "${REPO}" != "tailscale/k8s-nameserver" || (echo "REPO=... must not be tailscale/k8s-nameserver" && exit 1)
@test "${REPO}" != "ghcr.io/tailscale/k8s-nameserver" || (echo "REPO=... must not be ghcr.io/tailscale/k8s-nameserver" && exit 1)
TAGS="${TAGS}" REPOS=${REPO} PLATFORM=${PLATFORM} PUSH=true TARGET=k8s-nameserver ./build_docker.sh TAGS="${TAGS}" REPOS=${REPO} PLATFORM=${PLATFORM} PUSH=true TARGET=k8s-nameserver ./build_docker.sh
publishdevtsidp: check-image-repo ## Build and publish tsidp image to location specified by ${REPO}
TAGS="${TAGS}" REPOS=${REPO} PLATFORM=${PLATFORM} PUSH=true TARGET=tsidp ./build_docker.sh
publishdevproxy: check-image-repo ## Build and publish k8s-proxy image to location specified by ${REPO}
TAGS="${TAGS}" REPOS=${REPO} PLATFORM=${PLATFORM} PUSH=true TARGET=k8s-proxy ./build_docker.sh
.PHONY: sshintegrationtest .PHONY: sshintegrationtest
sshintegrationtest: ## Run the SSH integration tests in various Docker containers sshintegrationtest: ## Run the SSH integration tests in various Docker containers
@GOOS=linux GOARCH=amd64 ./tool/go test -tags integrationtest -c ./ssh/tailssh -o ssh/tailssh/testcontainers/tailssh.test && \ @GOOS=linux GOARCH=amd64 CGO_ENABLED=0 ./tool/go test -tags integrationtest -c ./ssh/tailssh -o ssh/tailssh/testcontainers/tailssh.test && \
GOOS=linux GOARCH=amd64 ./tool/go build -o ssh/tailssh/testcontainers/tailscaled ./cmd/tailscaled && \ GOOS=linux GOARCH=amd64 CGO_ENABLED=0 ./tool/go build -o ssh/tailssh/testcontainers/tailscaled ./cmd/tailscaled && \
echo "Testing on ubuntu:focal" && docker build --build-arg="BASE=ubuntu:focal" -t ssh-ubuntu-focal ssh/tailssh/testcontainers && \ echo "Testing on ubuntu:focal" && docker build --build-arg="BASE=ubuntu:focal" -t ssh-ubuntu-focal ssh/tailssh/testcontainers && \
echo "Testing on ubuntu:jammy" && docker build --build-arg="BASE=ubuntu:jammy" -t ssh-ubuntu-jammy ssh/tailssh/testcontainers && \ echo "Testing on ubuntu:jammy" && docker build --build-arg="BASE=ubuntu:jammy" -t ssh-ubuntu-jammy ssh/tailssh/testcontainers && \
echo "Testing on ubuntu:mantic" && docker build --build-arg="BASE=ubuntu:mantic" -t ssh-ubuntu-mantic ssh/tailssh/testcontainers && \
echo "Testing on ubuntu:noble" && docker build --build-arg="BASE=ubuntu:noble" -t ssh-ubuntu-noble ssh/tailssh/testcontainers && \ echo "Testing on ubuntu:noble" && docker build --build-arg="BASE=ubuntu:noble" -t ssh-ubuntu-noble ssh/tailssh/testcontainers && \
echo "Testing on alpine:latest" && docker build --build-arg="BASE=alpine:latest" -t ssh-alpine-latest ssh/tailssh/testcontainers echo "Testing on alpine:latest" && docker build --build-arg="BASE=alpine:latest" -t ssh-alpine-latest ssh/tailssh/testcontainers
.PHONY: generate
generate: ## Generate code
./tool/go generate ./...
.PHONY: pin-github-actions
pin-github-actions:
./tool/go tool github.com/stacklok/frizbee actions .github/workflows
help: ## Show this help help: ## Show this help
@echo "\nSpecify a command. The choices are:\n" @echo ""
@grep -hE '^[0-9a-zA-Z_-]+:.*?## .*$$' ${MAKEFILE_LIST} | awk 'BEGIN {FS = ":.*?## "}; {printf " \033[0;36m%-20s\033[m %s\n", $$1, $$2}' @echo "Specify a command. The choices are:"
@echo ""
@grep -hE '^[0-9a-zA-Z_-]+:.*?## .*$$' ${MAKEFILE_LIST} | sort | awk 'BEGIN {FS = ":.*?## "}; {printf " \033[0;36m%-20s\033[m %s\n", $$1, $$2}'
@echo "" @echo ""
.PHONY: help .PHONY: help

@ -37,7 +37,7 @@ not open source.
## Building ## Building
We always require the latest Go release, currently Go 1.23. (While we build We always require the latest Go release, currently Go 1.25. (While we build
releases with our [Go fork](https://github.com/tailscale/go/), its use is not releases with our [Go fork](https://github.com/tailscale/go/), its use is not
required.) required.)
@ -71,8 +71,7 @@ We require [Developer Certificate of
Origin](https://en.wikipedia.org/wiki/Developer_Certificate_of_Origin) Origin](https://en.wikipedia.org/wiki/Developer_Certificate_of_Origin)
`Signed-off-by` lines in commits. `Signed-off-by` lines in commits.
See `git log` for our commit message style. It's basically the same as See [commit-messages.md](docs/commit-messages.md) (or skim `git log`) for our commit message style.
[Go's style](https://github.com/golang/go/wiki/CommitMessage).
## About Us ## About Us

@ -1 +1 @@
1.75.0 1.91.0

@ -12,20 +12,20 @@ package appc
import ( import (
"context" "context"
"fmt" "fmt"
"maps"
"net/netip" "net/netip"
"slices" "slices"
"strings" "strings"
"sync"
"time" "time"
xmaps "golang.org/x/exp/maps" "tailscale.com/syncs"
"golang.org/x/net/dns/dnsmessage" "tailscale.com/types/appctype"
"tailscale.com/types/logger" "tailscale.com/types/logger"
"tailscale.com/types/views" "tailscale.com/types/views"
"tailscale.com/util/clientmetric" "tailscale.com/util/clientmetric"
"tailscale.com/util/dnsname" "tailscale.com/util/dnsname"
"tailscale.com/util/eventbus"
"tailscale.com/util/execqueue" "tailscale.com/util/execqueue"
"tailscale.com/util/mak"
"tailscale.com/util/slicesx" "tailscale.com/util/slicesx"
) )
@ -116,19 +116,6 @@ func metricStoreRoutes(rate, nRoutes int64) {
recordMetric(nRoutes, metricStoreRoutesNBuckets, metricStoreRoutesN) recordMetric(nRoutes, metricStoreRoutesNBuckets, metricStoreRoutesN)
} }
// RouteInfo is a data structure used to persist the in memory state of an AppConnector
// so that we can know, even after a restart, which routes came from ACLs and which were
// learned from domains.
type RouteInfo struct {
// Control is the routes from the 'routes' section of an app connector acl.
Control []netip.Prefix `json:",omitempty"`
// Domains are the routes discovered by observing DNS lookups for configured domains.
Domains map[string][]netip.Addr `json:",omitempty"`
// Wildcards are the configured DNS lookup domains to observe. When a DNS query matches Wildcards,
// its result is added to Domains.
Wildcards []string `json:",omitempty"`
}
// AppConnector is an implementation of an AppConnector that performs // AppConnector is an implementation of an AppConnector that performs
// its function as a subsystem inside of a tailscale node. At the control plane // its function as a subsystem inside of a tailscale node. At the control plane
// side App Connector routing is configured in terms of domains rather than IP // side App Connector routing is configured in terms of domains rather than IP
@ -139,14 +126,20 @@ type RouteInfo struct {
// routes not yet served by the AppConnector the local node configuration is // routes not yet served by the AppConnector the local node configuration is
// updated to advertise the new route. // updated to advertise the new route.
type AppConnector struct { type AppConnector struct {
// These fields are immutable after initialization.
logf logger.Logf logf logger.Logf
eventBus *eventbus.Bus
routeAdvertiser RouteAdvertiser routeAdvertiser RouteAdvertiser
pubClient *eventbus.Client
updatePub *eventbus.Publisher[appctype.RouteUpdate]
storePub *eventbus.Publisher[appctype.RouteInfo]
// storeRoutesFunc will be called to persist routes if it is not nil. // hasStoredRoutes records whether the connector was initialized with
storeRoutesFunc func(*RouteInfo) error // persisted route information.
hasStoredRoutes bool
// mu guards the fields that follow // mu guards the fields that follow
mu sync.Mutex mu syncs.Mutex
// domains is a map of lower case domain names with no trailing dot, to an // domains is a map of lower case domain names with no trailing dot, to an
// ordered list of resolved IP addresses. // ordered list of resolved IP addresses.
@ -165,40 +158,68 @@ type AppConnector struct {
writeRateDay *rateLogger writeRateDay *rateLogger
} }
// Config carries the settings for an [AppConnector].
type Config struct {
// Logf is the logger to which debug logs from the connector will be sent.
// It must be non-nil.
Logf logger.Logf
// EventBus receives events when the collection of routes maintained by the
// connector is updated. It must be non-nil.
EventBus *eventbus.Bus
// RouteAdvertiser allows the connector to update the set of advertised routes.
RouteAdvertiser RouteAdvertiser
// RouteInfo, if non-nil, use used as the initial set of routes for the
// connector. If nil, the connector starts empty.
RouteInfo *appctype.RouteInfo
// HasStoredRoutes indicates that the connector should assume stored routes.
HasStoredRoutes bool
}
// NewAppConnector creates a new AppConnector. // NewAppConnector creates a new AppConnector.
func NewAppConnector(logf logger.Logf, routeAdvertiser RouteAdvertiser, routeInfo *RouteInfo, storeRoutesFunc func(*RouteInfo) error) *AppConnector { func NewAppConnector(c Config) *AppConnector {
switch {
case c.Logf == nil:
panic("missing logger")
case c.EventBus == nil:
panic("missing event bus")
}
ec := c.EventBus.Client("appc.AppConnector")
ac := &AppConnector{ ac := &AppConnector{
logf: logger.WithPrefix(logf, "appc: "), logf: logger.WithPrefix(c.Logf, "appc: "),
routeAdvertiser: routeAdvertiser, eventBus: c.EventBus,
storeRoutesFunc: storeRoutesFunc, pubClient: ec,
} updatePub: eventbus.Publish[appctype.RouteUpdate](ec),
if routeInfo != nil { storePub: eventbus.Publish[appctype.RouteInfo](ec),
ac.domains = routeInfo.Domains routeAdvertiser: c.RouteAdvertiser,
ac.wildcards = routeInfo.Wildcards hasStoredRoutes: c.HasStoredRoutes,
ac.controlRoutes = routeInfo.Control }
} if c.RouteInfo != nil {
ac.writeRateMinute = newRateLogger(time.Now, time.Minute, func(c int64, s time.Time, l int64) { ac.domains = c.RouteInfo.Domains
ac.logf("routeInfo write rate: %d in minute starting at %v (%d routes)", c, s, l) ac.wildcards = c.RouteInfo.Wildcards
metricStoreRoutes(c, l) ac.controlRoutes = c.RouteInfo.Control
}
ac.writeRateMinute = newRateLogger(time.Now, time.Minute, func(c int64, s time.Time, ln int64) {
ac.logf("routeInfo write rate: %d in minute starting at %v (%d routes)", c, s, ln)
metricStoreRoutes(c, ln)
}) })
ac.writeRateDay = newRateLogger(time.Now, 24*time.Hour, func(c int64, s time.Time, l int64) { ac.writeRateDay = newRateLogger(time.Now, 24*time.Hour, func(c int64, s time.Time, ln int64) {
ac.logf("routeInfo write rate: %d in 24 hours starting at %v (%d routes)", c, s, l) ac.logf("routeInfo write rate: %d in 24 hours starting at %v (%d routes)", c, s, ln)
}) })
return ac return ac
} }
// ShouldStoreRoutes returns true if the appconnector was created with the controlknob on // ShouldStoreRoutes returns true if the appconnector was created with the controlknob on
// and is storing its discovered routes persistently. // and is storing its discovered routes persistently.
func (e *AppConnector) ShouldStoreRoutes() bool { func (e *AppConnector) ShouldStoreRoutes() bool { return e.hasStoredRoutes }
return e.storeRoutesFunc != nil
}
// storeRoutesLocked takes the current state of the AppConnector and persists it // storeRoutesLocked takes the current state of the AppConnector and persists it
func (e *AppConnector) storeRoutesLocked() error { func (e *AppConnector) storeRoutesLocked() {
if !e.ShouldStoreRoutes() { if e.storePub.ShouldPublish() {
return nil
}
// log write rate and write size // log write rate and write size
numRoutes := int64(len(e.controlRoutes)) numRoutes := int64(len(e.controlRoutes))
for _, rs := range e.domains { for _, rs := range e.domains {
@ -207,12 +228,14 @@ func (e *AppConnector) storeRoutesLocked() error {
e.writeRateMinute.update(numRoutes) e.writeRateMinute.update(numRoutes)
e.writeRateDay.update(numRoutes) e.writeRateDay.update(numRoutes)
return e.storeRoutesFunc(&RouteInfo{ e.storePub.Publish(appctype.RouteInfo{
Control: e.controlRoutes, // Clone here, as the subscriber will handle these outside our lock.
Domains: e.domains, Control: slices.Clone(e.controlRoutes),
Wildcards: e.wildcards, Domains: maps.Clone(e.domains),
Wildcards: slices.Clone(e.wildcards),
}) })
} }
}
// ClearRoutes removes all route state from the AppConnector. // ClearRoutes removes all route state from the AppConnector.
func (e *AppConnector) ClearRoutes() error { func (e *AppConnector) ClearRoutes() error {
@ -221,7 +244,8 @@ func (e *AppConnector) ClearRoutes() error {
e.controlRoutes = nil e.controlRoutes = nil
e.domains = nil e.domains = nil
e.wildcards = nil e.wildcards = nil
return e.storeRoutesLocked() e.storeRoutesLocked()
return nil
} }
// UpdateDomainsAndRoutes starts an asynchronous update of the configuration // UpdateDomainsAndRoutes starts an asynchronous update of the configuration
@ -250,6 +274,18 @@ func (e *AppConnector) Wait(ctx context.Context) {
e.queue.Wait(ctx) e.queue.Wait(ctx)
} }
// Close closes the connector and cleans up resources associated with it.
// It is safe (and a noop) to call Close on nil.
func (e *AppConnector) Close() {
if e == nil {
return
}
e.mu.Lock()
defer e.mu.Unlock()
e.queue.Shutdown() // TODO(creachadair): Should we wait for it too?
e.pubClient.Close()
}
func (e *AppConnector) updateDomains(domains []string) { func (e *AppConnector) updateDomains(domains []string) {
e.mu.Lock() e.mu.Lock()
defer e.mu.Unlock() defer e.mu.Unlock()
@ -281,21 +317,29 @@ func (e *AppConnector) updateDomains(domains []string) {
} }
} }
// Everything left in oldDomains is a domain we're no longer tracking // Everything left in oldDomains is a domain we're no longer tracking and we
// and if we are storing route info we can unadvertise the routes // can unadvertise the routes.
if e.ShouldStoreRoutes() { if e.hasStoredRoutes {
toRemove := []netip.Prefix{} toRemove := []netip.Prefix{}
for _, addrs := range oldDomains { for _, addrs := range oldDomains {
for _, a := range addrs { for _, a := range addrs {
toRemove = append(toRemove, netip.PrefixFrom(a, a.BitLen())) toRemove = append(toRemove, netip.PrefixFrom(a, a.BitLen()))
} }
} }
if len(toRemove) != 0 {
if ra := e.routeAdvertiser; ra != nil {
e.queue.Add(func() {
if err := e.routeAdvertiser.UnadvertiseRoute(toRemove...); err != nil { if err := e.routeAdvertiser.UnadvertiseRoute(toRemove...); err != nil {
e.logf("failed to unadvertise routes on domain removal: %v: %v: %v", xmaps.Keys(oldDomains), toRemove, err) e.logf("failed to unadvertise routes on domain removal: %v: %v: %v", slicesx.MapKeys(oldDomains), toRemove, err)
}
})
}
e.updatePub.Publish(appctype.RouteUpdate{Unadvertise: toRemove})
} }
} }
e.logf("handling domains: %v and wildcards: %v", xmaps.Keys(e.domains), e.wildcards) e.logf("handling domains: %v and wildcards: %v", slicesx.MapKeys(e.domains), e.wildcards)
} }
// updateRoutes merges the supplied routes into the currently configured routes. The routes supplied // updateRoutes merges the supplied routes into the currently configured routes. The routes supplied
@ -311,18 +355,12 @@ func (e *AppConnector) updateRoutes(routes []netip.Prefix) {
return return
} }
if err := e.routeAdvertiser.AdvertiseRoute(routes...); err != nil {
e.logf("failed to advertise routes: %v: %v", routes, err)
return
}
var toRemove []netip.Prefix var toRemove []netip.Prefix
// If we're storing routes and know e.controlRoutes is a good // If we know e.controlRoutes is a good representation of what should be in
// representation of what should be in AdvertisedRoutes we can stop // AdvertisedRoutes we can stop advertising routes that used to be in
// advertising routes that used to be in e.controlRoutes but are not // e.controlRoutes but are not in routes.
// in routes. if e.hasStoredRoutes {
if e.ShouldStoreRoutes() {
toRemove = routesWithout(e.controlRoutes, routes) toRemove = routesWithout(e.controlRoutes, routes)
} }
@ -339,14 +377,23 @@ nextRoute:
} }
} }
if e.routeAdvertiser != nil {
e.queue.Add(func() {
if err := e.routeAdvertiser.AdvertiseRoute(routes...); err != nil {
e.logf("failed to advertise routes: %v: %v", routes, err)
}
if err := e.routeAdvertiser.UnadvertiseRoute(toRemove...); err != nil { if err := e.routeAdvertiser.UnadvertiseRoute(toRemove...); err != nil {
e.logf("failed to unadvertise routes: %v: %v", toRemove, err) e.logf("failed to unadvertise routes: %v: %v", toRemove, err)
} }
})
}
e.updatePub.Publish(appctype.RouteUpdate{
Advertise: routes,
Unadvertise: toRemove,
})
e.controlRoutes = routes e.controlRoutes = routes
if err := e.storeRoutesLocked(); err != nil { e.storeRoutesLocked()
e.logf("failed to store route info: %v", err)
}
} }
// Domains returns the currently configured domain list. // Domains returns the currently configured domain list.
@ -354,7 +401,7 @@ func (e *AppConnector) Domains() views.Slice[string] {
e.mu.Lock() e.mu.Lock()
defer e.mu.Unlock() defer e.mu.Unlock()
return views.SliceOf(xmaps.Keys(e.domains)) return views.SliceOf(slicesx.MapKeys(e.domains))
} }
// DomainRoutes returns a map of domains to resolved IP // DomainRoutes returns a map of domains to resolved IP
@ -371,123 +418,6 @@ func (e *AppConnector) DomainRoutes() map[string][]netip.Addr {
return drCopy return drCopy
} }
// ObserveDNSResponse is a callback invoked by the DNS resolver when a DNS
// response is being returned over the PeerAPI. The response is parsed and
// matched against the configured domains, if matched the routeAdvertiser is
// advised to advertise the discovered route.
func (e *AppConnector) ObserveDNSResponse(res []byte) {
var p dnsmessage.Parser
if _, err := p.Start(res); err != nil {
return
}
if err := p.SkipAllQuestions(); err != nil {
return
}
// cnameChain tracks a chain of CNAMEs for a given query in order to reverse
// a CNAME chain back to the original query for flattening. The keys are
// CNAME record targets, and the value is the name the record answers, so
// for www.example.com CNAME example.com, the map would contain
// ["example.com"] = "www.example.com".
var cnameChain map[string]string
// addressRecords is a list of address records found in the response.
var addressRecords map[string][]netip.Addr
for {
h, err := p.AnswerHeader()
if err == dnsmessage.ErrSectionDone {
break
}
if err != nil {
return
}
if h.Class != dnsmessage.ClassINET {
if err := p.SkipAnswer(); err != nil {
return
}
continue
}
switch h.Type {
case dnsmessage.TypeCNAME, dnsmessage.TypeA, dnsmessage.TypeAAAA:
default:
if err := p.SkipAnswer(); err != nil {
return
}
continue
}
domain := strings.TrimSuffix(strings.ToLower(h.Name.String()), ".")
if len(domain) == 0 {
continue
}
if h.Type == dnsmessage.TypeCNAME {
res, err := p.CNAMEResource()
if err != nil {
return
}
cname := strings.TrimSuffix(strings.ToLower(res.CNAME.String()), ".")
if len(cname) == 0 {
continue
}
mak.Set(&cnameChain, cname, domain)
continue
}
switch h.Type {
case dnsmessage.TypeA:
r, err := p.AResource()
if err != nil {
return
}
addr := netip.AddrFrom4(r.A)
mak.Set(&addressRecords, domain, append(addressRecords[domain], addr))
case dnsmessage.TypeAAAA:
r, err := p.AAAAResource()
if err != nil {
return
}
addr := netip.AddrFrom16(r.AAAA)
mak.Set(&addressRecords, domain, append(addressRecords[domain], addr))
default:
if err := p.SkipAnswer(); err != nil {
return
}
continue
}
}
e.mu.Lock()
defer e.mu.Unlock()
for domain, addrs := range addressRecords {
domain, isRouted := e.findRoutedDomainLocked(domain, cnameChain)
// domain and none of the CNAMEs in the chain are routed
if !isRouted {
continue
}
// advertise each address we have learned for the routed domain, that
// was not already known.
var toAdvertise []netip.Prefix
for _, addr := range addrs {
if !e.isAddrKnownLocked(domain, addr) {
toAdvertise = append(toAdvertise, netip.PrefixFrom(addr, addr.BitLen()))
}
}
if len(toAdvertise) > 0 {
e.logf("[v2] observed new routes for %s: %s", domain, toAdvertise)
e.scheduleAdvertisement(domain, toAdvertise...)
}
}
}
// starting from the given domain that resolved to an address, find it, or any // starting from the given domain that resolved to an address, find it, or any
// of the domains in the CNAME chain toward resolving it, that are routed // of the domains in the CNAME chain toward resolving it, that are routed
// domains, returning the routed domain name and a bool indicating whether a // domains, returning the routed domain name and a bool indicating whether a
@ -542,10 +472,13 @@ func (e *AppConnector) isAddrKnownLocked(domain string, addr netip.Addr) bool {
// associated with the given domain. // associated with the given domain.
func (e *AppConnector) scheduleAdvertisement(domain string, routes ...netip.Prefix) { func (e *AppConnector) scheduleAdvertisement(domain string, routes ...netip.Prefix) {
e.queue.Add(func() { e.queue.Add(func() {
if e.routeAdvertiser != nil {
if err := e.routeAdvertiser.AdvertiseRoute(routes...); err != nil { if err := e.routeAdvertiser.AdvertiseRoute(routes...); err != nil {
e.logf("failed to advertise routes for %s: %v: %v", domain, routes, err) e.logf("failed to advertise routes for %s: %v: %v", domain, routes, err)
return return
} }
}
e.updatePub.Publish(appctype.RouteUpdate{Advertise: routes})
e.mu.Lock() e.mu.Lock()
defer e.mu.Unlock() defer e.mu.Unlock()
@ -559,9 +492,7 @@ func (e *AppConnector) scheduleAdvertisement(domain string, routes ...netip.Pref
e.logf("[v2] advertised route for %v: %v", domain, addr) e.logf("[v2] advertised route for %v: %v", domain, addr)
} }
} }
if err := e.storeRoutesLocked(); err != nil { e.storeRoutesLocked()
e.logf("failed to store route info: %v", err)
}
}) })
} }
@ -579,8 +510,8 @@ func (e *AppConnector) addDomainAddrLocked(domain string, addr netip.Addr) {
slices.SortFunc(e.domains[domain], compareAddr) slices.SortFunc(e.domains[domain], compareAddr)
} }
func compareAddr(l, r netip.Addr) int { func compareAddr(a, b netip.Addr) int {
return l.Compare(r) return a.Compare(b)
} }
// routesWithout returns a without b where a and b // routesWithout returns a without b where a and b

@ -4,35 +4,40 @@
package appc package appc
import ( import (
"context" stdcmp "cmp"
"fmt"
"net/netip" "net/netip"
"reflect" "reflect"
"slices" "slices"
"sync/atomic"
"testing" "testing"
"time" "time"
xmaps "golang.org/x/exp/maps" "github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"golang.org/x/net/dns/dnsmessage" "golang.org/x/net/dns/dnsmessage"
"tailscale.com/appc/appctest" "tailscale.com/appc/appctest"
"tailscale.com/tstest" "tailscale.com/tstest"
"tailscale.com/types/appctype"
"tailscale.com/util/clientmetric" "tailscale.com/util/clientmetric"
"tailscale.com/util/eventbus/eventbustest"
"tailscale.com/util/mak" "tailscale.com/util/mak"
"tailscale.com/util/must" "tailscale.com/util/must"
"tailscale.com/util/slicesx"
) )
func fakeStoreRoutes(*RouteInfo) error { return nil }
func TestUpdateDomains(t *testing.T) { func TestUpdateDomains(t *testing.T) {
ctx := t.Context()
bus := eventbustest.NewBus(t)
for _, shouldStore := range []bool{false, true} { for _, shouldStore := range []bool{false, true} {
ctx := context.Background() a := NewAppConnector(Config{
var a *AppConnector Logf: t.Logf,
if shouldStore { EventBus: bus,
a = NewAppConnector(t.Logf, &appctest.RouteCollector{}, &RouteInfo{}, fakeStoreRoutes) HasStoredRoutes: shouldStore,
} else { })
a = NewAppConnector(t.Logf, &appctest.RouteCollector{}, nil, nil) t.Cleanup(a.Close)
}
a.UpdateDomains([]string{"example.com"})
a.UpdateDomains([]string{"example.com"})
a.Wait(ctx) a.Wait(ctx)
if got, want := a.Domains().AsSlice(), []string{"example.com"}; !slices.Equal(got, want) { if got, want := a.Domains().AsSlice(), []string{"example.com"}; !slices.Equal(got, want) {
t.Errorf("got %v; want %v", got, want) t.Errorf("got %v; want %v", got, want)
@ -50,26 +55,32 @@ func TestUpdateDomains(t *testing.T) {
// domains are explicitly downcased on set. // domains are explicitly downcased on set.
a.UpdateDomains([]string{"UP.EXAMPLE.COM"}) a.UpdateDomains([]string{"UP.EXAMPLE.COM"})
a.Wait(ctx) a.Wait(ctx)
if got, want := xmaps.Keys(a.domains), []string{"up.example.com"}; !slices.Equal(got, want) { if got, want := slicesx.MapKeys(a.domains), []string{"up.example.com"}; !slices.Equal(got, want) {
t.Errorf("got %v; want %v", got, want) t.Errorf("got %v; want %v", got, want)
} }
} }
} }
func TestUpdateRoutes(t *testing.T) { func TestUpdateRoutes(t *testing.T) {
ctx := t.Context()
bus := eventbustest.NewBus(t)
for _, shouldStore := range []bool{false, true} { for _, shouldStore := range []bool{false, true} {
ctx := context.Background() w := eventbustest.NewWatcher(t, bus)
rc := &appctest.RouteCollector{} rc := &appctest.RouteCollector{}
var a *AppConnector a := NewAppConnector(Config{
if shouldStore { Logf: t.Logf,
a = NewAppConnector(t.Logf, rc, &RouteInfo{}, fakeStoreRoutes) EventBus: bus,
} else { RouteAdvertiser: rc,
a = NewAppConnector(t.Logf, rc, nil, nil) HasStoredRoutes: shouldStore,
} })
t.Cleanup(a.Close)
a.updateDomains([]string{"*.example.com"}) a.updateDomains([]string{"*.example.com"})
// This route should be collapsed into the range // This route should be collapsed into the range
a.ObserveDNSResponse(dnsResponse("a.example.com.", "192.0.2.1")) if err := a.ObserveDNSResponse(dnsResponse("a.example.com.", "192.0.2.1")); err != nil {
t.Errorf("ObserveDNSResponse: %v", err)
}
a.Wait(ctx) a.Wait(ctx)
if !slices.Equal(rc.Routes(), []netip.Prefix{netip.MustParsePrefix("192.0.2.1/32")}) { if !slices.Equal(rc.Routes(), []netip.Prefix{netip.MustParsePrefix("192.0.2.1/32")}) {
@ -77,11 +88,14 @@ func TestUpdateRoutes(t *testing.T) {
} }
// This route should not be collapsed or removed // This route should not be collapsed or removed
a.ObserveDNSResponse(dnsResponse("b.example.com.", "192.0.0.1")) if err := a.ObserveDNSResponse(dnsResponse("b.example.com.", "192.0.0.1")); err != nil {
t.Errorf("ObserveDNSResponse: %v", err)
}
a.Wait(ctx) a.Wait(ctx)
routes := []netip.Prefix{netip.MustParsePrefix("192.0.2.0/24"), netip.MustParsePrefix("192.0.0.1/32")} routes := []netip.Prefix{netip.MustParsePrefix("192.0.2.0/24"), netip.MustParsePrefix("192.0.0.1/32")}
a.updateRoutes(routes) a.updateRoutes(routes)
a.Wait(ctx)
slices.SortFunc(rc.Routes(), prefixCompare) slices.SortFunc(rc.Routes(), prefixCompare)
rc.SetRoutes(slices.Compact(rc.Routes())) rc.SetRoutes(slices.Compact(rc.Routes()))
@ -97,41 +111,76 @@ func TestUpdateRoutes(t *testing.T) {
if !slices.EqualFunc(rc.RemovedRoutes(), wantRemoved, prefixEqual) { if !slices.EqualFunc(rc.RemovedRoutes(), wantRemoved, prefixEqual) {
t.Fatalf("unexpected removed routes: %v", rc.RemovedRoutes()) t.Fatalf("unexpected removed routes: %v", rc.RemovedRoutes())
} }
if err := eventbustest.Expect(w,
eqUpdate(appctype.RouteUpdate{Advertise: prefixes("192.0.2.1/32")}),
eventbustest.Type[appctype.RouteInfo](),
eqUpdate(appctype.RouteUpdate{Advertise: prefixes("192.0.0.1/32")}),
eventbustest.Type[appctype.RouteInfo](),
eqUpdate(appctype.RouteUpdate{
Advertise: prefixes("192.0.0.1/32", "192.0.2.0/24"),
Unadvertise: prefixes("192.0.2.1/32"),
}),
eventbustest.Type[appctype.RouteInfo](),
); err != nil {
t.Error(err)
}
} }
} }
func TestUpdateRoutesUnadvertisesContainedRoutes(t *testing.T) { func TestUpdateRoutesUnadvertisesContainedRoutes(t *testing.T) {
ctx := t.Context()
bus := eventbustest.NewBus(t)
for _, shouldStore := range []bool{false, true} { for _, shouldStore := range []bool{false, true} {
w := eventbustest.NewWatcher(t, bus)
rc := &appctest.RouteCollector{} rc := &appctest.RouteCollector{}
var a *AppConnector a := NewAppConnector(Config{
if shouldStore { Logf: t.Logf,
a = NewAppConnector(t.Logf, rc, &RouteInfo{}, fakeStoreRoutes) EventBus: bus,
} else { RouteAdvertiser: rc,
a = NewAppConnector(t.Logf, rc, nil, nil) HasStoredRoutes: shouldStore,
} })
t.Cleanup(a.Close)
mak.Set(&a.domains, "example.com", []netip.Addr{netip.MustParseAddr("192.0.2.1")}) mak.Set(&a.domains, "example.com", []netip.Addr{netip.MustParseAddr("192.0.2.1")})
rc.SetRoutes([]netip.Prefix{netip.MustParsePrefix("192.0.2.1/32")}) rc.SetRoutes([]netip.Prefix{netip.MustParsePrefix("192.0.2.1/32")})
routes := []netip.Prefix{netip.MustParsePrefix("192.0.2.0/24")} routes := []netip.Prefix{netip.MustParsePrefix("192.0.2.0/24")}
a.updateRoutes(routes) a.updateRoutes(routes)
a.Wait(ctx)
if !slices.EqualFunc(routes, rc.Routes(), prefixEqual) { if !slices.EqualFunc(routes, rc.Routes(), prefixEqual) {
t.Fatalf("got %v, want %v", rc.Routes(), routes) t.Fatalf("got %v, want %v", rc.Routes(), routes)
} }
if err := eventbustest.ExpectExactly(w,
eqUpdate(appctype.RouteUpdate{
Advertise: prefixes("192.0.2.0/24"),
Unadvertise: prefixes("192.0.2.1/32"),
}),
eventbustest.Type[appctype.RouteInfo](),
); err != nil {
t.Error(err)
}
} }
} }
func TestDomainRoutes(t *testing.T) { func TestDomainRoutes(t *testing.T) {
bus := eventbustest.NewBus(t)
for _, shouldStore := range []bool{false, true} { for _, shouldStore := range []bool{false, true} {
w := eventbustest.NewWatcher(t, bus)
rc := &appctest.RouteCollector{} rc := &appctest.RouteCollector{}
var a *AppConnector a := NewAppConnector(Config{
if shouldStore { Logf: t.Logf,
a = NewAppConnector(t.Logf, rc, &RouteInfo{}, fakeStoreRoutes) EventBus: bus,
} else { RouteAdvertiser: rc,
a = NewAppConnector(t.Logf, rc, nil, nil) HasStoredRoutes: shouldStore,
} })
t.Cleanup(a.Close)
a.updateDomains([]string{"example.com"}) a.updateDomains([]string{"example.com"})
a.ObserveDNSResponse(dnsResponse("example.com.", "192.0.0.8")) if err := a.ObserveDNSResponse(dnsResponse("example.com.", "192.0.0.8")); err != nil {
a.Wait(context.Background()) t.Errorf("ObserveDNSResponse: %v", err)
}
a.Wait(t.Context())
want := map[string][]netip.Addr{ want := map[string][]netip.Addr{
"example.com": {netip.MustParseAddr("192.0.0.8")}, "example.com": {netip.MustParseAddr("192.0.0.8")},
@ -140,22 +189,34 @@ func TestDomainRoutes(t *testing.T) {
if got := a.DomainRoutes(); !reflect.DeepEqual(got, want) { if got := a.DomainRoutes(); !reflect.DeepEqual(got, want) {
t.Fatalf("DomainRoutes: got %v, want %v", got, want) t.Fatalf("DomainRoutes: got %v, want %v", got, want)
} }
if err := eventbustest.ExpectExactly(w,
eqUpdate(appctype.RouteUpdate{Advertise: prefixes("192.0.0.8/32")}),
eventbustest.Type[appctype.RouteInfo](),
); err != nil {
t.Error(err)
}
} }
} }
func TestObserveDNSResponse(t *testing.T) { func TestObserveDNSResponse(t *testing.T) {
ctx := t.Context()
bus := eventbustest.NewBus(t)
for _, shouldStore := range []bool{false, true} { for _, shouldStore := range []bool{false, true} {
ctx := context.Background() w := eventbustest.NewWatcher(t, bus)
rc := &appctest.RouteCollector{} rc := &appctest.RouteCollector{}
var a *AppConnector a := NewAppConnector(Config{
if shouldStore { Logf: t.Logf,
a = NewAppConnector(t.Logf, rc, &RouteInfo{}, fakeStoreRoutes) EventBus: bus,
} else { RouteAdvertiser: rc,
a = NewAppConnector(t.Logf, rc, nil, nil) HasStoredRoutes: shouldStore,
} })
t.Cleanup(a.Close)
// a has no domains configured, so it should not advertise any routes // a has no domains configured, so it should not advertise any routes
a.ObserveDNSResponse(dnsResponse("example.com.", "192.0.0.8")) if err := a.ObserveDNSResponse(dnsResponse("example.com.", "192.0.0.8")); err != nil {
t.Errorf("ObserveDNSResponse: %v", err)
}
if got, want := rc.Routes(), ([]netip.Prefix)(nil); !slices.Equal(got, want) { if got, want := rc.Routes(), ([]netip.Prefix)(nil); !slices.Equal(got, want) {
t.Errorf("got %v; want %v", got, want) t.Errorf("got %v; want %v", got, want)
} }
@ -163,7 +224,9 @@ func TestObserveDNSResponse(t *testing.T) {
wantRoutes := []netip.Prefix{netip.MustParsePrefix("192.0.0.8/32")} wantRoutes := []netip.Prefix{netip.MustParsePrefix("192.0.0.8/32")}
a.updateDomains([]string{"example.com"}) a.updateDomains([]string{"example.com"})
a.ObserveDNSResponse(dnsResponse("example.com.", "192.0.0.8")) if err := a.ObserveDNSResponse(dnsResponse("example.com.", "192.0.0.8")); err != nil {
t.Errorf("ObserveDNSResponse: %v", err)
}
a.Wait(ctx) a.Wait(ctx)
if got, want := rc.Routes(), wantRoutes; !slices.Equal(got, want) { if got, want := rc.Routes(), wantRoutes; !slices.Equal(got, want) {
t.Errorf("got %v; want %v", got, want) t.Errorf("got %v; want %v", got, want)
@ -172,7 +235,9 @@ func TestObserveDNSResponse(t *testing.T) {
// a CNAME record chain should result in a route being added if the chain // a CNAME record chain should result in a route being added if the chain
// matches a routed domain. // matches a routed domain.
a.updateDomains([]string{"www.example.com", "example.com"}) a.updateDomains([]string{"www.example.com", "example.com"})
a.ObserveDNSResponse(dnsCNAMEResponse("192.0.0.9", "www.example.com.", "chain.example.com.", "example.com.")) if err := a.ObserveDNSResponse(dnsCNAMEResponse("192.0.0.9", "www.example.com.", "chain.example.com.", "example.com.")); err != nil {
t.Errorf("ObserveDNSResponse: %v", err)
}
a.Wait(ctx) a.Wait(ctx)
wantRoutes = append(wantRoutes, netip.MustParsePrefix("192.0.0.9/32")) wantRoutes = append(wantRoutes, netip.MustParsePrefix("192.0.0.9/32"))
if got, want := rc.Routes(), wantRoutes; !slices.Equal(got, want) { if got, want := rc.Routes(), wantRoutes; !slices.Equal(got, want) {
@ -181,7 +246,9 @@ func TestObserveDNSResponse(t *testing.T) {
// a CNAME record chain should result in a route being added if the chain // a CNAME record chain should result in a route being added if the chain
// even if only found in the middle of the chain // even if only found in the middle of the chain
a.ObserveDNSResponse(dnsCNAMEResponse("192.0.0.10", "outside.example.org.", "www.example.com.", "example.org.")) if err := a.ObserveDNSResponse(dnsCNAMEResponse("192.0.0.10", "outside.example.org.", "www.example.com.", "example.org.")); err != nil {
t.Errorf("ObserveDNSResponse: %v", err)
}
a.Wait(ctx) a.Wait(ctx)
wantRoutes = append(wantRoutes, netip.MustParsePrefix("192.0.0.10/32")) wantRoutes = append(wantRoutes, netip.MustParsePrefix("192.0.0.10/32"))
if got, want := rc.Routes(), wantRoutes; !slices.Equal(got, want) { if got, want := rc.Routes(), wantRoutes; !slices.Equal(got, want) {
@ -190,14 +257,18 @@ func TestObserveDNSResponse(t *testing.T) {
wantRoutes = append(wantRoutes, netip.MustParsePrefix("2001:db8::1/128")) wantRoutes = append(wantRoutes, netip.MustParsePrefix("2001:db8::1/128"))
a.ObserveDNSResponse(dnsResponse("example.com.", "2001:db8::1")) if err := a.ObserveDNSResponse(dnsResponse("example.com.", "2001:db8::1")); err != nil {
t.Errorf("ObserveDNSResponse: %v", err)
}
a.Wait(ctx) a.Wait(ctx)
if got, want := rc.Routes(), wantRoutes; !slices.Equal(got, want) { if got, want := rc.Routes(), wantRoutes; !slices.Equal(got, want) {
t.Errorf("got %v; want %v", got, want) t.Errorf("got %v; want %v", got, want)
} }
// don't re-advertise routes that have already been advertised // don't re-advertise routes that have already been advertised
a.ObserveDNSResponse(dnsResponse("example.com.", "2001:db8::1")) if err := a.ObserveDNSResponse(dnsResponse("example.com.", "2001:db8::1")); err != nil {
t.Errorf("ObserveDNSResponse: %v", err)
}
a.Wait(ctx) a.Wait(ctx)
if !slices.Equal(rc.Routes(), wantRoutes) { if !slices.Equal(rc.Routes(), wantRoutes) {
t.Errorf("rc.Routes(): got %v; want %v", rc.Routes(), wantRoutes) t.Errorf("rc.Routes(): got %v; want %v", rc.Routes(), wantRoutes)
@ -207,7 +278,9 @@ func TestObserveDNSResponse(t *testing.T) {
pfx := netip.MustParsePrefix("192.0.2.0/24") pfx := netip.MustParsePrefix("192.0.2.0/24")
a.updateRoutes([]netip.Prefix{pfx}) a.updateRoutes([]netip.Prefix{pfx})
wantRoutes = append(wantRoutes, pfx) wantRoutes = append(wantRoutes, pfx)
a.ObserveDNSResponse(dnsResponse("example.com.", "192.0.2.1")) if err := a.ObserveDNSResponse(dnsResponse("example.com.", "192.0.2.1")); err != nil {
t.Errorf("ObserveDNSResponse: %v", err)
}
a.Wait(ctx) a.Wait(ctx)
if !slices.Equal(rc.Routes(), wantRoutes) { if !slices.Equal(rc.Routes(), wantRoutes) {
t.Errorf("rc.Routes(): got %v; want %v", rc.Routes(), wantRoutes) t.Errorf("rc.Routes(): got %v; want %v", rc.Routes(), wantRoutes)
@ -215,22 +288,43 @@ func TestObserveDNSResponse(t *testing.T) {
if !slices.Contains(a.domains["example.com"], netip.MustParseAddr("192.0.2.1")) { if !slices.Contains(a.domains["example.com"], netip.MustParseAddr("192.0.2.1")) {
t.Errorf("missing %v from %v", "192.0.2.1", a.domains["exmaple.com"]) t.Errorf("missing %v from %v", "192.0.2.1", a.domains["exmaple.com"])
} }
if err := eventbustest.ExpectExactly(w,
eqUpdate(appctype.RouteUpdate{Advertise: prefixes("192.0.0.8/32")}), // from initial DNS response, via example.com
eventbustest.Type[appctype.RouteInfo](),
eqUpdate(appctype.RouteUpdate{Advertise: prefixes("192.0.0.9/32")}), // from CNAME response
eventbustest.Type[appctype.RouteInfo](),
eqUpdate(appctype.RouteUpdate{Advertise: prefixes("192.0.0.10/32")}), // from CNAME response, mid-chain
eventbustest.Type[appctype.RouteInfo](),
eqUpdate(appctype.RouteUpdate{Advertise: prefixes("2001:db8::1/128")}), // v6 DNS response
eventbustest.Type[appctype.RouteInfo](),
eqUpdate(appctype.RouteUpdate{Advertise: prefixes("192.0.2.0/24")}), // additional prefix
eventbustest.Type[appctype.RouteInfo](),
// N.B. no update for 192.0.2.1 as it is already covered
); err != nil {
t.Error(err)
}
} }
} }
func TestWildcardDomains(t *testing.T) { func TestWildcardDomains(t *testing.T) {
ctx := t.Context()
bus := eventbustest.NewBus(t)
for _, shouldStore := range []bool{false, true} { for _, shouldStore := range []bool{false, true} {
ctx := context.Background() w := eventbustest.NewWatcher(t, bus)
rc := &appctest.RouteCollector{} rc := &appctest.RouteCollector{}
var a *AppConnector a := NewAppConnector(Config{
if shouldStore { Logf: t.Logf,
a = NewAppConnector(t.Logf, rc, &RouteInfo{}, fakeStoreRoutes) EventBus: bus,
} else { RouteAdvertiser: rc,
a = NewAppConnector(t.Logf, rc, nil, nil) HasStoredRoutes: shouldStore,
} })
t.Cleanup(a.Close)
a.updateDomains([]string{"*.example.com"}) a.updateDomains([]string{"*.example.com"})
a.ObserveDNSResponse(dnsResponse("foo.example.com.", "192.0.0.8")) if err := a.ObserveDNSResponse(dnsResponse("foo.example.com.", "192.0.0.8")); err != nil {
t.Errorf("ObserveDNSResponse: %v", err)
}
a.Wait(ctx) a.Wait(ctx)
if got, want := rc.Routes(), []netip.Prefix{netip.MustParsePrefix("192.0.0.8/32")}; !slices.Equal(got, want) { if got, want := rc.Routes(), []netip.Prefix{netip.MustParsePrefix("192.0.0.8/32")}; !slices.Equal(got, want) {
t.Errorf("routes: got %v; want %v", got, want) t.Errorf("routes: got %v; want %v", got, want)
@ -252,6 +346,13 @@ func TestWildcardDomains(t *testing.T) {
if len(a.wildcards) != 1 { if len(a.wildcards) != 1 {
t.Errorf("expected only one wildcard domain, got %v", a.wildcards) t.Errorf("expected only one wildcard domain, got %v", a.wildcards)
} }
if err := eventbustest.ExpectExactly(w,
eqUpdate(appctype.RouteUpdate{Advertise: prefixes("192.0.0.8/32")}),
eventbustest.Type[appctype.RouteInfo](),
); err != nil {
t.Error(err)
}
} }
} }
@ -367,8 +468,10 @@ func prefixes(in ...string) []netip.Prefix {
} }
func TestUpdateRouteRouteRemoval(t *testing.T) { func TestUpdateRouteRouteRemoval(t *testing.T) {
ctx := t.Context()
bus := eventbustest.NewBus(t)
for _, shouldStore := range []bool{false, true} { for _, shouldStore := range []bool{false, true} {
ctx := context.Background() w := eventbustest.NewWatcher(t, bus)
rc := &appctest.RouteCollector{} rc := &appctest.RouteCollector{}
assertRoutes := func(prefix string, routes, removedRoutes []netip.Prefix) { assertRoutes := func(prefix string, routes, removedRoutes []netip.Prefix) {
@ -380,12 +483,14 @@ func TestUpdateRouteRouteRemoval(t *testing.T) {
} }
} }
var a *AppConnector a := NewAppConnector(Config{
if shouldStore { Logf: t.Logf,
a = NewAppConnector(t.Logf, rc, &RouteInfo{}, fakeStoreRoutes) EventBus: bus,
} else { RouteAdvertiser: rc,
a = NewAppConnector(t.Logf, rc, nil, nil) HasStoredRoutes: shouldStore,
} })
t.Cleanup(a.Close)
// nothing has yet been advertised // nothing has yet been advertised
assertRoutes("appc init", []netip.Prefix{}, []netip.Prefix{}) assertRoutes("appc init", []netip.Prefix{}, []netip.Prefix{})
@ -408,12 +513,21 @@ func TestUpdateRouteRouteRemoval(t *testing.T) {
wantRemovedRoutes = prefixes("1.2.3.2/32") wantRemovedRoutes = prefixes("1.2.3.2/32")
} }
assertRoutes("removal", wantRoutes, wantRemovedRoutes) assertRoutes("removal", wantRoutes, wantRemovedRoutes)
if err := eventbustest.Expect(w,
eqUpdate(appctype.RouteUpdate{Advertise: prefixes("1.2.3.1/32", "1.2.3.2/32")}), // no duplicates here
eventbustest.Type[appctype.RouteInfo](),
); err != nil {
t.Error(err)
}
} }
} }
func TestUpdateDomainRouteRemoval(t *testing.T) { func TestUpdateDomainRouteRemoval(t *testing.T) {
ctx := t.Context()
bus := eventbustest.NewBus(t)
for _, shouldStore := range []bool{false, true} { for _, shouldStore := range []bool{false, true} {
ctx := context.Background() w := eventbustest.NewWatcher(t, bus)
rc := &appctest.RouteCollector{} rc := &appctest.RouteCollector{}
assertRoutes := func(prefix string, routes, removedRoutes []netip.Prefix) { assertRoutes := func(prefix string, routes, removedRoutes []netip.Prefix) {
@ -425,12 +539,14 @@ func TestUpdateDomainRouteRemoval(t *testing.T) {
} }
} }
var a *AppConnector a := NewAppConnector(Config{
if shouldStore { Logf: t.Logf,
a = NewAppConnector(t.Logf, rc, &RouteInfo{}, fakeStoreRoutes) EventBus: bus,
} else { RouteAdvertiser: rc,
a = NewAppConnector(t.Logf, rc, nil, nil) HasStoredRoutes: shouldStore,
} })
t.Cleanup(a.Close)
assertRoutes("appc init", []netip.Prefix{}, []netip.Prefix{}) assertRoutes("appc init", []netip.Prefix{}, []netip.Prefix{})
a.UpdateDomainsAndRoutes([]string{"a.example.com", "b.example.com"}, []netip.Prefix{}) a.UpdateDomainsAndRoutes([]string{"a.example.com", "b.example.com"}, []netip.Prefix{})
@ -438,10 +554,16 @@ func TestUpdateDomainRouteRemoval(t *testing.T) {
// adding domains doesn't immediately cause any routes to be advertised // adding domains doesn't immediately cause any routes to be advertised
assertRoutes("update domains", []netip.Prefix{}, []netip.Prefix{}) assertRoutes("update domains", []netip.Prefix{}, []netip.Prefix{})
a.ObserveDNSResponse(dnsResponse("a.example.com.", "1.2.3.1")) for _, res := range [][]byte{
a.ObserveDNSResponse(dnsResponse("a.example.com.", "1.2.3.2")) dnsResponse("a.example.com.", "1.2.3.1"),
a.ObserveDNSResponse(dnsResponse("b.example.com.", "1.2.3.3")) dnsResponse("a.example.com.", "1.2.3.2"),
a.ObserveDNSResponse(dnsResponse("b.example.com.", "1.2.3.4")) dnsResponse("b.example.com.", "1.2.3.3"),
dnsResponse("b.example.com.", "1.2.3.4"),
} {
if err := a.ObserveDNSResponse(res); err != nil {
t.Errorf("ObserveDNSResponse: %v", err)
}
}
a.Wait(ctx) a.Wait(ctx)
// observing dns responses causes routes to be advertised // observing dns responses causes routes to be advertised
assertRoutes("observed dns", prefixes("1.2.3.1/32", "1.2.3.2/32", "1.2.3.3/32", "1.2.3.4/32"), []netip.Prefix{}) assertRoutes("observed dns", prefixes("1.2.3.1/32", "1.2.3.2/32", "1.2.3.3/32", "1.2.3.4/32"), []netip.Prefix{})
@ -457,12 +579,30 @@ func TestUpdateDomainRouteRemoval(t *testing.T) {
wantRemovedRoutes = prefixes("1.2.3.3/32", "1.2.3.4/32") wantRemovedRoutes = prefixes("1.2.3.3/32", "1.2.3.4/32")
} }
assertRoutes("removal", wantRoutes, wantRemovedRoutes) assertRoutes("removal", wantRoutes, wantRemovedRoutes)
wantEvents := []any{
// Each DNS record observed triggers an update.
eqUpdate(appctype.RouteUpdate{Advertise: prefixes("1.2.3.1/32")}),
eqUpdate(appctype.RouteUpdate{Advertise: prefixes("1.2.3.2/32")}),
eqUpdate(appctype.RouteUpdate{Advertise: prefixes("1.2.3.3/32")}),
eqUpdate(appctype.RouteUpdate{Advertise: prefixes("1.2.3.4/32")}),
}
if shouldStore {
wantEvents = append(wantEvents, eqUpdate(appctype.RouteUpdate{
Unadvertise: prefixes("1.2.3.3/32", "1.2.3.4/32"),
}))
}
if err := eventbustest.Expect(w, wantEvents...); err != nil {
t.Error(err)
}
} }
} }
func TestUpdateWildcardRouteRemoval(t *testing.T) { func TestUpdateWildcardRouteRemoval(t *testing.T) {
ctx := t.Context()
bus := eventbustest.NewBus(t)
for _, shouldStore := range []bool{false, true} { for _, shouldStore := range []bool{false, true} {
ctx := context.Background() w := eventbustest.NewWatcher(t, bus)
rc := &appctest.RouteCollector{} rc := &appctest.RouteCollector{}
assertRoutes := func(prefix string, routes, removedRoutes []netip.Prefix) { assertRoutes := func(prefix string, routes, removedRoutes []netip.Prefix) {
@ -474,12 +614,14 @@ func TestUpdateWildcardRouteRemoval(t *testing.T) {
} }
} }
var a *AppConnector a := NewAppConnector(Config{
if shouldStore { Logf: t.Logf,
a = NewAppConnector(t.Logf, rc, &RouteInfo{}, fakeStoreRoutes) EventBus: bus,
} else { RouteAdvertiser: rc,
a = NewAppConnector(t.Logf, rc, nil, nil) HasStoredRoutes: shouldStore,
} })
t.Cleanup(a.Close)
assertRoutes("appc init", []netip.Prefix{}, []netip.Prefix{}) assertRoutes("appc init", []netip.Prefix{}, []netip.Prefix{})
a.UpdateDomainsAndRoutes([]string{"a.example.com", "*.b.example.com"}, []netip.Prefix{}) a.UpdateDomainsAndRoutes([]string{"a.example.com", "*.b.example.com"}, []netip.Prefix{})
@ -487,10 +629,16 @@ func TestUpdateWildcardRouteRemoval(t *testing.T) {
// adding domains doesn't immediately cause any routes to be advertised // adding domains doesn't immediately cause any routes to be advertised
assertRoutes("update domains", []netip.Prefix{}, []netip.Prefix{}) assertRoutes("update domains", []netip.Prefix{}, []netip.Prefix{})
a.ObserveDNSResponse(dnsResponse("a.example.com.", "1.2.3.1")) for _, res := range [][]byte{
a.ObserveDNSResponse(dnsResponse("a.example.com.", "1.2.3.2")) dnsResponse("a.example.com.", "1.2.3.1"),
a.ObserveDNSResponse(dnsResponse("1.b.example.com.", "1.2.3.3")) dnsResponse("a.example.com.", "1.2.3.2"),
a.ObserveDNSResponse(dnsResponse("2.b.example.com.", "1.2.3.4")) dnsResponse("1.b.example.com.", "1.2.3.3"),
dnsResponse("2.b.example.com.", "1.2.3.4"),
} {
if err := a.ObserveDNSResponse(res); err != nil {
t.Errorf("ObserveDNSResponse: %v", err)
}
}
a.Wait(ctx) a.Wait(ctx)
// observing dns responses causes routes to be advertised // observing dns responses causes routes to be advertised
assertRoutes("observed dns", prefixes("1.2.3.1/32", "1.2.3.2/32", "1.2.3.3/32", "1.2.3.4/32"), []netip.Prefix{}) assertRoutes("observed dns", prefixes("1.2.3.1/32", "1.2.3.2/32", "1.2.3.3/32", "1.2.3.4/32"), []netip.Prefix{})
@ -506,6 +654,22 @@ func TestUpdateWildcardRouteRemoval(t *testing.T) {
wantRemovedRoutes = prefixes("1.2.3.3/32", "1.2.3.4/32") wantRemovedRoutes = prefixes("1.2.3.3/32", "1.2.3.4/32")
} }
assertRoutes("removal", wantRoutes, wantRemovedRoutes) assertRoutes("removal", wantRoutes, wantRemovedRoutes)
wantEvents := []any{
// Each DNS record observed triggers an update.
eqUpdate(appctype.RouteUpdate{Advertise: prefixes("1.2.3.1/32")}),
eqUpdate(appctype.RouteUpdate{Advertise: prefixes("1.2.3.2/32")}),
eqUpdate(appctype.RouteUpdate{Advertise: prefixes("1.2.3.3/32")}),
eqUpdate(appctype.RouteUpdate{Advertise: prefixes("1.2.3.4/32")}),
}
if shouldStore {
wantEvents = append(wantEvents, eqUpdate(appctype.RouteUpdate{
Unadvertise: prefixes("1.2.3.3/32", "1.2.3.4/32"),
}))
}
if err := eventbustest.Expect(w, wantEvents...); err != nil {
t.Error(err)
}
} }
} }
@ -602,3 +766,107 @@ func TestMetricBucketsAreSorted(t *testing.T) {
t.Errorf("metricStoreRoutesNBuckets must be in order") t.Errorf("metricStoreRoutesNBuckets must be in order")
} }
} }
// TestUpdateRoutesDeadlock is a regression test for a deadlock in
// LocalBackend<->AppConnector interaction. When using real LocalBackend as the
// routeAdvertiser, calls to Advertise/UnadvertiseRoutes can end up calling
// back into AppConnector via authReconfig. If everything is called
// synchronously, this results in a deadlock on AppConnector.mu.
//
// TODO(creachadair, 2025-09-18): Remove this along with the advertiser
// interface once the LocalBackend is switched to use the event bus and the
// tests have been updated not to need it.
func TestUpdateRoutesDeadlock(t *testing.T) {
ctx := t.Context()
bus := eventbustest.NewBus(t)
w := eventbustest.NewWatcher(t, bus)
rc := &appctest.RouteCollector{}
a := NewAppConnector(Config{
Logf: t.Logf,
EventBus: bus,
RouteAdvertiser: rc,
HasStoredRoutes: true,
})
t.Cleanup(a.Close)
advertiseCalled := new(atomic.Bool)
unadvertiseCalled := new(atomic.Bool)
rc.AdvertiseCallback = func() {
// Call something that requires a.mu to be held.
a.DomainRoutes()
advertiseCalled.Store(true)
}
rc.UnadvertiseCallback = func() {
// Call something that requires a.mu to be held.
a.DomainRoutes()
unadvertiseCalled.Store(true)
}
a.updateDomains([]string{"example.com"})
a.Wait(ctx)
// Trigger rc.AdveriseRoute.
a.updateRoutes(
[]netip.Prefix{
netip.MustParsePrefix("127.0.0.1/32"),
netip.MustParsePrefix("127.0.0.2/32"),
},
)
a.Wait(ctx)
// Trigger rc.UnadveriseRoute.
a.updateRoutes(
[]netip.Prefix{
netip.MustParsePrefix("127.0.0.1/32"),
},
)
a.Wait(ctx)
if !advertiseCalled.Load() {
t.Error("AdvertiseRoute was not called")
}
if !unadvertiseCalled.Load() {
t.Error("UnadvertiseRoute was not called")
}
if want := []netip.Prefix{netip.MustParsePrefix("127.0.0.1/32")}; !slices.Equal(slices.Compact(rc.Routes()), want) {
t.Fatalf("got %v, want %v", rc.Routes(), want)
}
if err := eventbustest.ExpectExactly(w,
eqUpdate(appctype.RouteUpdate{Advertise: prefixes("127.0.0.1/32", "127.0.0.2/32")}),
eventbustest.Type[appctype.RouteInfo](),
eqUpdate(appctype.RouteUpdate{Advertise: prefixes("127.0.0.1/32"), Unadvertise: prefixes("127.0.0.2/32")}),
eventbustest.Type[appctype.RouteInfo](),
); err != nil {
t.Error(err)
}
}
type textUpdate struct {
Advertise []string
Unadvertise []string
}
func routeUpdateToText(u appctype.RouteUpdate) textUpdate {
var out textUpdate
for _, p := range u.Advertise {
out.Advertise = append(out.Advertise, p.String())
}
for _, p := range u.Unadvertise {
out.Unadvertise = append(out.Unadvertise, p.String())
}
return out
}
// eqUpdate generates an eventbus test filter that matches a appctype.RouteUpdate
// message equal to want, or reports an error giving a human-readable diff.
func eqUpdate(want appctype.RouteUpdate) func(appctype.RouteUpdate) error {
return func(got appctype.RouteUpdate) error {
if diff := cmp.Diff(routeUpdateToText(got), routeUpdateToText(want),
cmpopts.SortSlices(stdcmp.Less[string]),
); diff != "" {
return fmt.Errorf("wrong update (-got, +want):\n%s", diff)
}
return nil
}
}

@ -11,12 +11,22 @@ import (
// RouteCollector is a test helper that collects the list of routes advertised // RouteCollector is a test helper that collects the list of routes advertised
type RouteCollector struct { type RouteCollector struct {
// AdvertiseCallback (optional) is called synchronously from
// AdvertiseRoute.
AdvertiseCallback func()
// UnadvertiseCallback (optional) is called synchronously from
// UnadvertiseRoute.
UnadvertiseCallback func()
routes []netip.Prefix routes []netip.Prefix
removedRoutes []netip.Prefix removedRoutes []netip.Prefix
} }
func (rc *RouteCollector) AdvertiseRoute(pfx ...netip.Prefix) error { func (rc *RouteCollector) AdvertiseRoute(pfx ...netip.Prefix) error {
rc.routes = append(rc.routes, pfx...) rc.routes = append(rc.routes, pfx...)
if rc.AdvertiseCallback != nil {
rc.AdvertiseCallback()
}
return nil return nil
} }
@ -30,6 +40,9 @@ func (rc *RouteCollector) UnadvertiseRoute(toRemove ...netip.Prefix) error {
rc.removedRoutes = append(rc.removedRoutes, r) rc.removedRoutes = append(rc.removedRoutes, r)
} }
} }
if rc.UnadvertiseCallback != nil {
rc.UnadvertiseCallback()
}
return nil return nil
} }

@ -0,0 +1,61 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package appc
import (
"errors"
"net/netip"
"go4.org/netipx"
)
// errPoolExhausted is returned when there are no more addresses to iterate over.
var errPoolExhausted = errors.New("ip pool exhausted")
// ippool allows for iteration over all the addresses within a netipx.IPSet.
// netipx.IPSet has a Ranges call that returns the "minimum and sorted set of IP ranges that covers [the set]".
// netipx.IPRange is "an inclusive range of IP addresses from the same address family.". So we can iterate over
// all the addresses in the set by keeping a track of the last address we returned, calling Next on the last address
// to get the new one, and if we run off the edge of the current range, starting on the next one.
type ippool struct {
// ranges defines the addresses in the pool
ranges []netipx.IPRange
// last is internal tracking of which the last address provided was.
last netip.Addr
// rangeIdx is internal tracking of which netipx.IPRange from the IPSet we are currently on.
rangeIdx int
}
func newIPPool(ipset *netipx.IPSet) *ippool {
if ipset == nil {
return &ippool{}
}
return &ippool{ranges: ipset.Ranges()}
}
// next returns the next address from the set, or errPoolExhausted if we have
// iterated over the whole set.
func (ipp *ippool) next() (netip.Addr, error) {
if ipp.rangeIdx >= len(ipp.ranges) {
// ipset is empty or we have iterated off the end
return netip.Addr{}, errPoolExhausted
}
if !ipp.last.IsValid() {
// not initialized yet
ipp.last = ipp.ranges[0].From()
return ipp.last, nil
}
currRange := ipp.ranges[ipp.rangeIdx]
if ipp.last == currRange.To() {
// then we need to move to the next range
ipp.rangeIdx++
if ipp.rangeIdx >= len(ipp.ranges) {
return netip.Addr{}, errPoolExhausted
}
ipp.last = ipp.ranges[ipp.rangeIdx].From()
return ipp.last, nil
}
ipp.last = ipp.last.Next()
return ipp.last, nil
}

@ -0,0 +1,60 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package appc
import (
"errors"
"net/netip"
"testing"
"go4.org/netipx"
"tailscale.com/util/must"
)
func TestNext(t *testing.T) {
a := ippool{}
_, err := a.next()
if !errors.Is(err, errPoolExhausted) {
t.Fatalf("expected errPoolExhausted, got %v", err)
}
var isb netipx.IPSetBuilder
ipset := must.Get(isb.IPSet())
b := newIPPool(ipset)
_, err = b.next()
if !errors.Is(err, errPoolExhausted) {
t.Fatalf("expected errPoolExhausted, got %v", err)
}
isb.AddRange(netipx.IPRangeFrom(netip.MustParseAddr("192.168.0.0"), netip.MustParseAddr("192.168.0.2")))
isb.AddRange(netipx.IPRangeFrom(netip.MustParseAddr("200.0.0.0"), netip.MustParseAddr("200.0.0.0")))
isb.AddRange(netipx.IPRangeFrom(netip.MustParseAddr("201.0.0.0"), netip.MustParseAddr("201.0.0.1")))
ipset = must.Get(isb.IPSet())
c := newIPPool(ipset)
expected := []string{
"192.168.0.0",
"192.168.0.1",
"192.168.0.2",
"200.0.0.0",
"201.0.0.0",
"201.0.0.1",
}
for i, want := range expected {
addr, err := c.next()
if err != nil {
t.Fatal(err)
}
if addr != netip.MustParseAddr(want) {
t.Fatalf("next call %d want: %s, got: %v", i, want, addr)
}
}
_, err = c.next()
if !errors.Is(err, errPoolExhausted) {
t.Fatalf("expected errPoolExhausted, got %v", err)
}
_, err = c.next()
if !errors.Is(err, errPoolExhausted) {
t.Fatalf("expected errPoolExhausted, got %v", err)
}
}

@ -0,0 +1,132 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build !ts_omit_appconnectors
package appc
import (
"net/netip"
"strings"
"golang.org/x/net/dns/dnsmessage"
"tailscale.com/util/mak"
)
// ObserveDNSResponse is a callback invoked by the DNS resolver when a DNS
// response is being returned over the PeerAPI. The response is parsed and
// matched against the configured domains, if matched the routeAdvertiser is
// advised to advertise the discovered route.
func (e *AppConnector) ObserveDNSResponse(res []byte) error {
var p dnsmessage.Parser
if _, err := p.Start(res); err != nil {
return err
}
if err := p.SkipAllQuestions(); err != nil {
return err
}
// cnameChain tracks a chain of CNAMEs for a given query in order to reverse
// a CNAME chain back to the original query for flattening. The keys are
// CNAME record targets, and the value is the name the record answers, so
// for www.example.com CNAME example.com, the map would contain
// ["example.com"] = "www.example.com".
var cnameChain map[string]string
// addressRecords is a list of address records found in the response.
var addressRecords map[string][]netip.Addr
for {
h, err := p.AnswerHeader()
if err == dnsmessage.ErrSectionDone {
break
}
if err != nil {
return err
}
if h.Class != dnsmessage.ClassINET {
if err := p.SkipAnswer(); err != nil {
return err
}
continue
}
switch h.Type {
case dnsmessage.TypeCNAME, dnsmessage.TypeA, dnsmessage.TypeAAAA:
default:
if err := p.SkipAnswer(); err != nil {
return err
}
continue
}
domain := strings.TrimSuffix(strings.ToLower(h.Name.String()), ".")
if len(domain) == 0 {
continue
}
if h.Type == dnsmessage.TypeCNAME {
res, err := p.CNAMEResource()
if err != nil {
return err
}
cname := strings.TrimSuffix(strings.ToLower(res.CNAME.String()), ".")
if len(cname) == 0 {
continue
}
mak.Set(&cnameChain, cname, domain)
continue
}
switch h.Type {
case dnsmessage.TypeA:
r, err := p.AResource()
if err != nil {
return err
}
addr := netip.AddrFrom4(r.A)
mak.Set(&addressRecords, domain, append(addressRecords[domain], addr))
case dnsmessage.TypeAAAA:
r, err := p.AAAAResource()
if err != nil {
return err
}
addr := netip.AddrFrom16(r.AAAA)
mak.Set(&addressRecords, domain, append(addressRecords[domain], addr))
default:
if err := p.SkipAnswer(); err != nil {
return err
}
continue
}
}
e.mu.Lock()
defer e.mu.Unlock()
for domain, addrs := range addressRecords {
domain, isRouted := e.findRoutedDomainLocked(domain, cnameChain)
// domain and none of the CNAMEs in the chain are routed
if !isRouted {
continue
}
// advertise each address we have learned for the routed domain, that
// was not already known.
var toAdvertise []netip.Prefix
for _, addr := range addrs {
if !e.isAddrKnownLocked(domain, addr) {
toAdvertise = append(toAdvertise, netip.PrefixFrom(addr, addr.BitLen()))
}
}
if len(toAdvertise) > 0 {
e.logf("[v2] observed new routes for %s: %s", domain, toAdvertise)
e.scheduleAdvertisement(domain, toAdvertise...)
}
}
return nil
}

@ -0,0 +1,8 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build ts_omit_appconnectors
package appc
func (e *AppConnector) ObserveDNSResponse(res []byte) error { return nil }

@ -0,0 +1,27 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build tailscale_go
package tailscaleroot
import (
"fmt"
"os"
"strings"
)
func init() {
tsRev, ok := tailscaleToolchainRev()
if !ok {
panic("binary built with tailscale_go build tag but failed to read build info or find tailscale.toolchain.rev in build info")
}
want := strings.TrimSpace(GoToolchainRev)
if tsRev != want {
if os.Getenv("TS_PERMIT_TOOLCHAIN_MISMATCH") == "1" {
fmt.Fprintf(os.Stderr, "tailscale.toolchain.rev = %q, want %q; but ignoring due to TS_PERMIT_TOOLCHAIN_MISMATCH=1\n", tsRev, want)
return
}
panic(fmt.Sprintf("binary built with tailscale_go build tag but Go toolchain %q doesn't match github.com/tailscale/tailscale expected value %q; override this failure with TS_PERMIT_TOOLCHAIN_MISMATCH=1", tsRev, want))
}
}

@ -15,8 +15,9 @@ import (
) )
// WriteFile writes data to filename+some suffix, then renames it into filename. // WriteFile writes data to filename+some suffix, then renames it into filename.
// The perm argument is ignored on Windows. If the target filename already // The perm argument is ignored on Windows, but if the target filename already
// exists but is not a regular file, WriteFile returns an error. // exists then the target file's attributes and ACLs are preserved. If the target
// filename already exists but is not a regular file, WriteFile returns an error.
func WriteFile(filename string, data []byte, perm os.FileMode) (err error) { func WriteFile(filename string, data []byte, perm os.FileMode) (err error) {
fi, err := os.Stat(filename) fi, err := os.Stat(filename)
if err == nil && !fi.Mode().IsRegular() { if err == nil && !fi.Mode().IsRegular() {
@ -47,5 +48,9 @@ func WriteFile(filename string, data []byte, perm os.FileMode) (err error) {
if err := f.Close(); err != nil { if err := f.Close(); err != nil {
return err return err
} }
return os.Rename(tmpName, filename) return Rename(tmpName, filename)
} }
// Rename srcFile to dstFile, similar to [os.Rename] but preserving file
// attributes and ACLs on Windows.
func Rename(srcFile, dstFile string) error { return rename(srcFile, dstFile) }

@ -0,0 +1,14 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build !windows
package atomicfile
import (
"os"
)
func rename(srcFile, destFile string) error {
return os.Rename(srcFile, destFile)
}

@ -31,11 +31,11 @@ func TestDoesNotOverwriteIrregularFiles(t *testing.T) {
// The least troublesome thing to make that is not a file is a unix socket. // The least troublesome thing to make that is not a file is a unix socket.
// Making a null device sadly requires root. // Making a null device sadly requires root.
l, err := net.ListenUnix("unix", &net.UnixAddr{Name: path, Net: "unix"}) ln, err := net.ListenUnix("unix", &net.UnixAddr{Name: path, Net: "unix"})
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
defer l.Close() defer ln.Close()
err = WriteFile(path, []byte("hello"), 0644) err = WriteFile(path, []byte("hello"), 0644)
if err == nil { if err == nil {

@ -0,0 +1,33 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package atomicfile
import (
"os"
"golang.org/x/sys/windows"
)
func rename(srcFile, destFile string) error {
// Use replaceFile when possible to preserve the original file's attributes and ACLs.
if err := replaceFile(destFile, srcFile); err == nil || err != windows.ERROR_FILE_NOT_FOUND {
return err
}
// destFile doesn't exist. Just do a normal rename.
return os.Rename(srcFile, destFile)
}
func replaceFile(destFile, srcFile string) error {
destFile16, err := windows.UTF16PtrFromString(destFile)
if err != nil {
return err
}
srcFile16, err := windows.UTF16PtrFromString(srcFile)
if err != nil {
return err
}
return replaceFileW(destFile16, srcFile16, nil, 0, nil, nil)
}

@ -0,0 +1,146 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package atomicfile
import (
"os"
"testing"
"unsafe"
"golang.org/x/sys/windows"
)
var _SECURITY_RESOURCE_MANAGER_AUTHORITY = windows.SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 9}}
// makeRandomSID generates a SID derived from a v4 GUID.
// This is basically the same algorithm used by browser sandboxes for generating
// random SIDs.
func makeRandomSID() (*windows.SID, error) {
guid, err := windows.GenerateGUID()
if err != nil {
return nil, err
}
rids := *((*[4]uint32)(unsafe.Pointer(&guid)))
var pSID *windows.SID
if err := windows.AllocateAndInitializeSid(&_SECURITY_RESOURCE_MANAGER_AUTHORITY, 4, rids[0], rids[1], rids[2], rids[3], 0, 0, 0, 0, &pSID); err != nil {
return nil, err
}
defer windows.FreeSid(pSID)
// Make a copy that lives on the Go heap
return pSID.Copy()
}
func getExistingFileSD(name string) (*windows.SECURITY_DESCRIPTOR, error) {
const infoFlags = windows.DACL_SECURITY_INFORMATION
return windows.GetNamedSecurityInfo(name, windows.SE_FILE_OBJECT, infoFlags)
}
func getExistingFileDACL(name string) (*windows.ACL, error) {
sd, err := getExistingFileSD(name)
if err != nil {
return nil, err
}
dacl, _, err := sd.DACL()
return dacl, err
}
func addDenyACEForRandomSID(dacl *windows.ACL) (*windows.ACL, error) {
randomSID, err := makeRandomSID()
if err != nil {
return nil, err
}
randomSIDTrustee := windows.TRUSTEE{nil, windows.NO_MULTIPLE_TRUSTEE,
windows.TRUSTEE_IS_SID, windows.TRUSTEE_IS_UNKNOWN,
windows.TrusteeValueFromSID(randomSID)}
entries := []windows.EXPLICIT_ACCESS{
{
windows.GENERIC_ALL,
windows.DENY_ACCESS,
windows.NO_INHERITANCE,
randomSIDTrustee,
},
}
return windows.ACLFromEntries(entries, dacl)
}
func setExistingFileDACL(name string, dacl *windows.ACL) error {
return windows.SetNamedSecurityInfo(name, windows.SE_FILE_OBJECT,
windows.DACL_SECURITY_INFORMATION, nil, nil, dacl, nil)
}
// makeOrigFileWithCustomDACL creates a new, temporary file with a custom
// DACL that we can check for later. It returns the name of the temporary
// file and the security descriptor for the file in SDDL format.
func makeOrigFileWithCustomDACL() (name, sddl string, err error) {
f, err := os.CreateTemp("", "foo*.tmp")
if err != nil {
return "", "", err
}
name = f.Name()
if err := f.Close(); err != nil {
return "", "", err
}
f = nil
defer func() {
if err != nil {
os.Remove(name)
}
}()
dacl, err := getExistingFileDACL(name)
if err != nil {
return "", "", err
}
// Add a harmless, deny-only ACE for a random SID that isn't used for anything
// (but that we can check for later).
dacl, err = addDenyACEForRandomSID(dacl)
if err != nil {
return "", "", err
}
if err := setExistingFileDACL(name, dacl); err != nil {
return "", "", err
}
sd, err := getExistingFileSD(name)
if err != nil {
return "", "", err
}
return name, sd.String(), nil
}
func TestPreserveSecurityInfo(t *testing.T) {
// Make a test file with a custom ACL.
origFileName, want, err := makeOrigFileWithCustomDACL()
if err != nil {
t.Fatalf("makeOrigFileWithCustomDACL returned %v", err)
}
t.Cleanup(func() {
os.Remove(origFileName)
})
if err := WriteFile(origFileName, []byte{}, 0); err != nil {
t.Fatalf("WriteFile returned %v", err)
}
// We expect origFileName's security descriptor to be unchanged despite
// the WriteFile call.
sd, err := getExistingFileSD(origFileName)
if err != nil {
t.Fatalf("getExistingFileSD(%q) returned %v", origFileName, err)
}
if got := sd.String(); got != want {
t.Errorf("security descriptor comparison failed: got %q, want %q", got, want)
}
}

@ -0,0 +1,8 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package atomicfile
//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output zsyscall_windows.go mksyscall.go
//sys replaceFileW(replaced *uint16, replacement *uint16, backup *uint16, flags uint32, exclude unsafe.Pointer, reserved unsafe.Pointer) (err error) [int32(failretval)==0] = kernel32.ReplaceFileW

@ -0,0 +1,52 @@
// Code generated by 'go generate'; DO NOT EDIT.
package atomicfile
import (
"syscall"
"unsafe"
"golang.org/x/sys/windows"
)
var _ unsafe.Pointer
// Do the interface allocations only once for common
// Errno values.
const (
errnoERROR_IO_PENDING = 997
)
var (
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
errERROR_EINVAL error = syscall.EINVAL
)
// errnoErr returns common boxed Errno values, to prevent
// allocations at runtime.
func errnoErr(e syscall.Errno) error {
switch e {
case 0:
return errERROR_EINVAL
case errnoERROR_IO_PENDING:
return errERROR_IO_PENDING
}
// TODO: add more here, after collecting data on the common
// error values see on Windows. (perhaps when running
// all.bat?)
return e
}
var (
modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
procReplaceFileW = modkernel32.NewProc("ReplaceFileW")
)
func replaceFileW(replaced *uint16, replacement *uint16, backup *uint16, flags uint32, exclude unsafe.Pointer, reserved unsafe.Pointer) (err error) {
r1, _, e1 := syscall.SyscallN(procReplaceFileW.Addr(), uintptr(unsafe.Pointer(replaced)), uintptr(unsafe.Pointer(replacement)), uintptr(unsafe.Pointer(backup)), uintptr(flags), uintptr(exclude), uintptr(reserved))
if int32(r1) == 0 {
err = errnoErr(e1)
}
return
}

@ -18,7 +18,7 @@ fi
eval `CGO_ENABLED=0 GOOS=$($go env GOHOSTOS) GOARCH=$($go env GOHOSTARCH) $go run ./cmd/mkversion` eval `CGO_ENABLED=0 GOOS=$($go env GOHOSTOS) GOARCH=$($go env GOHOSTARCH) $go run ./cmd/mkversion`
if [ "$1" = "shellvars" ]; then if [ "$#" -ge 1 ] && [ "$1" = "shellvars" ]; then
cat <<EOF cat <<EOF
VERSION_MINOR="$VERSION_MINOR" VERSION_MINOR="$VERSION_MINOR"
VERSION_SHORT="$VERSION_SHORT" VERSION_SHORT="$VERSION_SHORT"
@ -28,18 +28,34 @@ EOF
exit 0 exit 0
fi fi
tags="" tags="${TAGS:-}"
ldflags="-X tailscale.com/version.longStamp=${VERSION_LONG} -X tailscale.com/version.shortStamp=${VERSION_SHORT}" ldflags="-X tailscale.com/version.longStamp=${VERSION_LONG} -X tailscale.com/version.shortStamp=${VERSION_SHORT}"
# build_dist.sh arguments must precede go build arguments. # build_dist.sh arguments must precede go build arguments.
while [ "$#" -gt 1 ]; do while [ "$#" -gt 1 ]; do
case "$1" in case "$1" in
--extra-small) --extra-small)
if [ ! -z "${TAGS:-}" ]; then
echo "set either --extra-small or \$TAGS, but not both"
exit 1
fi
shift shift
ldflags="$ldflags -w -s" ldflags="$ldflags -w -s"
tags="${tags:+$tags,}ts_omit_aws,ts_omit_bird,ts_omit_tap,ts_omit_kube,ts_omit_completion" tags="${tags:+$tags,},$(GOOS= GOARCH= $go run ./cmd/featuretags --min --add=osrouter)"
;;
--min)
# --min is like --extra-small but even smaller, removing all features,
# even if it results in a useless binary (e.g. removing both netstack +
# osrouter). It exists for benchmarking purposes only.
shift
ldflags="$ldflags -w -s"
tags="${tags:+$tags,},$(GOOS= GOARCH= $go run ./cmd/featuretags --min)"
;; ;;
--box) --box)
if [ ! -z "${TAGS:-}" ]; then
echo "set either --box or \$TAGS, but not both"
exit 1
fi
shift shift
tags="${tags:+$tags,}ts_include_cli" tags="${tags:+$tags,}ts_include_cli"
;; ;;
@ -49,4 +65,4 @@ while [ "$#" -gt 1 ]; do
esac esac
done done
exec $go build ${tags:+-tags=$tags} -ldflags "$ldflags" "$@" exec $go build ${tags:+-tags=$tags} -trimpath -ldflags "$ldflags" "$@"

@ -6,6 +6,16 @@
# hash of this repository as produced by ./cmd/mkversion. # hash of this repository as produced by ./cmd/mkversion.
# This is the image build mechanim used to build the official Tailscale # This is the image build mechanim used to build the official Tailscale
# container images. # container images.
#
# If you want to build local images for testing, you can use make, which provides few convenience wrappers around this script.
#
# To build a Tailscale image and push to the local docker registry:
# $ REPO=local/tailscale TAGS=v0.0.1 PLATFORM=local make publishdevimage
#
# To build a Tailscale image and push to a remote docker registry:
#
# $ REPO=<your-registry>/<your-repo>/tailscale TAGS=v0.0.1 make publishdevimage
set -eu set -eu
@ -16,13 +26,22 @@ eval "$(./build_dist.sh shellvars)"
DEFAULT_TARGET="client" DEFAULT_TARGET="client"
DEFAULT_TAGS="v${VERSION_SHORT},v${VERSION_MINOR}" DEFAULT_TAGS="v${VERSION_SHORT},v${VERSION_MINOR}"
DEFAULT_BASE="tailscale/alpine-base:3.18" DEFAULT_BASE="tailscale/alpine-base:3.22"
# Set a few pre-defined OCI annotations. The source annotation is used by tools such as Renovate that scan the linked
# Github repo to find release notes for any new image tags. Note that for official Tailscale images the default
# annotations defined here will be overriden by release scripts that call this script.
# https://github.com/opencontainers/image-spec/blob/main/annotations.md#pre-defined-annotation-keys
DEFAULT_ANNOTATIONS="org.opencontainers.image.source=https://github.com/tailscale/tailscale/blob/main/build_docker.sh,org.opencontainers.image.vendor=Tailscale"
PUSH="${PUSH:-false}" PUSH="${PUSH:-false}"
TARGET="${TARGET:-${DEFAULT_TARGET}}" TARGET="${TARGET:-${DEFAULT_TARGET}}"
TAGS="${TAGS:-${DEFAULT_TAGS}}" TAGS="${TAGS:-${DEFAULT_TAGS}}"
BASE="${BASE:-${DEFAULT_BASE}}" BASE="${BASE:-${DEFAULT_BASE}}"
PLATFORM="${PLATFORM:-}" # default to all platforms PLATFORM="${PLATFORM:-}" # default to all platforms
FILES="${FILES:-}" # default to no extra files
# OCI annotations that will be added to the image.
# https://github.com/opencontainers/image-spec/blob/main/annotations.md
ANNOTATIONS="${ANNOTATIONS:-${DEFAULT_ANNOTATIONS}}"
case "$TARGET" in case "$TARGET" in
client) client)
@ -43,9 +62,11 @@ case "$TARGET" in
--repos="${REPOS}" \ --repos="${REPOS}" \
--push="${PUSH}" \ --push="${PUSH}" \
--target="${PLATFORM}" \ --target="${PLATFORM}" \
--annotations="${ANNOTATIONS}" \
--files="${FILES}" \
/usr/local/bin/containerboot /usr/local/bin/containerboot
;; ;;
operator) k8s-operator)
DEFAULT_REPOS="tailscale/k8s-operator" DEFAULT_REPOS="tailscale/k8s-operator"
REPOS="${REPOS:-${DEFAULT_REPOS}}" REPOS="${REPOS:-${DEFAULT_REPOS}}"
go run github.com/tailscale/mkctr \ go run github.com/tailscale/mkctr \
@ -56,9 +77,12 @@ case "$TARGET" in
-X tailscale.com/version.gitCommitStamp=${VERSION_GIT_HASH}" \ -X tailscale.com/version.gitCommitStamp=${VERSION_GIT_HASH}" \
--base="${BASE}" \ --base="${BASE}" \
--tags="${TAGS}" \ --tags="${TAGS}" \
--gotags="ts_kube,ts_package_container" \
--repos="${REPOS}" \ --repos="${REPOS}" \
--push="${PUSH}" \ --push="${PUSH}" \
--target="${PLATFORM}" \ --target="${PLATFORM}" \
--annotations="${ANNOTATIONS}" \
--files="${FILES}" \
/usr/local/bin/operator /usr/local/bin/operator
;; ;;
k8s-nameserver) k8s-nameserver)
@ -72,11 +96,52 @@ case "$TARGET" in
-X tailscale.com/version.gitCommitStamp=${VERSION_GIT_HASH}" \ -X tailscale.com/version.gitCommitStamp=${VERSION_GIT_HASH}" \
--base="${BASE}" \ --base="${BASE}" \
--tags="${TAGS}" \ --tags="${TAGS}" \
--gotags="ts_kube,ts_package_container" \
--repos="${REPOS}" \ --repos="${REPOS}" \
--push="${PUSH}" \ --push="${PUSH}" \
--target="${PLATFORM}" \ --target="${PLATFORM}" \
--annotations="${ANNOTATIONS}" \
--files="${FILES}" \
/usr/local/bin/k8s-nameserver /usr/local/bin/k8s-nameserver
;; ;;
tsidp)
DEFAULT_REPOS="tailscale/tsidp"
REPOS="${REPOS:-${DEFAULT_REPOS}}"
go run github.com/tailscale/mkctr \
--gopaths="tailscale.com/cmd/tsidp:/usr/local/bin/tsidp" \
--ldflags=" \
-X tailscale.com/version.longStamp=${VERSION_LONG} \
-X tailscale.com/version.shortStamp=${VERSION_SHORT} \
-X tailscale.com/version.gitCommitStamp=${VERSION_GIT_HASH}" \
--base="${BASE}" \
--tags="${TAGS}" \
--gotags="ts_package_container" \
--repos="${REPOS}" \
--push="${PUSH}" \
--target="${PLATFORM}" \
--annotations="${ANNOTATIONS}" \
--files="${FILES}" \
/usr/local/bin/tsidp
;;
k8s-proxy)
DEFAULT_REPOS="tailscale/k8s-proxy"
REPOS="${REPOS:-${DEFAULT_REPOS}}"
go run github.com/tailscale/mkctr \
--gopaths="tailscale.com/cmd/k8s-proxy:/usr/local/bin/k8s-proxy" \
--ldflags=" \
-X tailscale.com/version.longStamp=${VERSION_LONG} \
-X tailscale.com/version.shortStamp=${VERSION_SHORT} \
-X tailscale.com/version.gitCommitStamp=${VERSION_GIT_HASH}" \
--base="${BASE}" \
--tags="${TAGS}" \
--gotags="ts_kube,ts_package_container" \
--repos="${REPOS}" \
--push="${PUSH}" \
--target="${PLATFORM}" \
--annotations="${ANNOTATIONS}" \
--files="${FILES}" \
/usr/local/bin/k8s-proxy
;;
*) *)
echo "unknown target: $TARGET" echo "unknown target: $TARGET"
exit 1 exit 1

@ -1,5 +1,6 @@
// Copyright (c) Tailscale Inc & AUTHORS // Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause // SPDX-License-Identifier: BSD-3-Clause
package chirp package chirp
import ( import (
@ -23,7 +24,7 @@ type fakeBIRD struct {
func newFakeBIRD(t *testing.T, protocols ...string) *fakeBIRD { func newFakeBIRD(t *testing.T, protocols ...string) *fakeBIRD {
sock := filepath.Join(t.TempDir(), "sock") sock := filepath.Join(t.TempDir(), "sock")
l, err := net.Listen("unix", sock) ln, err := net.Listen("unix", sock)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -32,7 +33,7 @@ func newFakeBIRD(t *testing.T, protocols ...string) *fakeBIRD {
pe[p] = false pe[p] = false
} }
return &fakeBIRD{ return &fakeBIRD{
Listener: l, Listener: ln,
protocolsEnabled: pe, protocolsEnabled: pe,
sock: sock, sock: sock,
} }
@ -122,12 +123,12 @@ type hangingListener struct {
func newHangingListener(t *testing.T) *hangingListener { func newHangingListener(t *testing.T) *hangingListener {
sock := filepath.Join(t.TempDir(), "sock") sock := filepath.Join(t.TempDir(), "sock")
l, err := net.Listen("unix", sock) ln, err := net.Listen("unix", sock)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
return &hangingListener{ return &hangingListener{
Listener: l, Listener: ln,
t: t, t: t,
done: make(chan struct{}), done: make(chan struct{}),
sock: sock, sock: sock,

@ -0,0 +1,151 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build !js && !ts_omit_acme
package local
import (
"context"
"crypto/tls"
"errors"
"fmt"
"net/url"
"strings"
"time"
"go4.org/mem"
)
// SetDNS adds a DNS TXT record for the given domain name, containing
// the provided TXT value. The intended use case is answering
// LetsEncrypt/ACME dns-01 challenges.
//
// The control plane will only permit SetDNS requests with very
// specific names and values. The name should be
// "_acme-challenge." + your node's MagicDNS name. It's expected that
// clients cache the certs from LetsEncrypt (or whichever CA is
// providing them) and only request new ones as needed; the control plane
// rate limits SetDNS requests.
//
// This is a low-level interface; it's expected that most Tailscale
// users use a higher level interface to getting/using TLS
// certificates.
func (lc *Client) SetDNS(ctx context.Context, name, value string) error {
v := url.Values{}
v.Set("name", name)
v.Set("value", value)
_, err := lc.send(ctx, "POST", "/localapi/v0/set-dns?"+v.Encode(), 200, nil)
return err
}
// CertPair returns a cert and private key for the provided DNS domain.
//
// It returns a cached certificate from disk if it's still valid.
//
// Deprecated: use [Client.CertPair].
func CertPair(ctx context.Context, domain string) (certPEM, keyPEM []byte, err error) {
return defaultClient.CertPair(ctx, domain)
}
// CertPair returns a cert and private key for the provided DNS domain.
//
// It returns a cached certificate from disk if it's still valid.
//
// API maturity: this is considered a stable API.
func (lc *Client) CertPair(ctx context.Context, domain string) (certPEM, keyPEM []byte, err error) {
return lc.CertPairWithValidity(ctx, domain, 0)
}
// CertPairWithValidity returns a cert and private key for the provided DNS
// domain.
//
// It returns a cached certificate from disk if it's still valid.
// When minValidity is non-zero, the returned certificate will be valid for at
// least the given duration, if permitted by the CA. If the certificate is
// valid, but for less than minValidity, it will be synchronously renewed.
//
// API maturity: this is considered a stable API.
func (lc *Client) CertPairWithValidity(ctx context.Context, domain string, minValidity time.Duration) (certPEM, keyPEM []byte, err error) {
res, err := lc.send(ctx, "GET", fmt.Sprintf("/localapi/v0/cert/%s?type=pair&min_validity=%s", domain, minValidity), 200, nil)
if err != nil {
return nil, nil, err
}
// with ?type=pair, the response PEM is first the one private
// key PEM block, then the cert PEM blocks.
i := mem.Index(mem.B(res), mem.S("--\n--"))
if i == -1 {
return nil, nil, fmt.Errorf("unexpected output: no delimiter")
}
i += len("--\n")
keyPEM, certPEM = res[:i], res[i:]
if mem.Contains(mem.B(certPEM), mem.S(" PRIVATE KEY-----")) {
return nil, nil, fmt.Errorf("unexpected output: key in cert")
}
return certPEM, keyPEM, nil
}
// GetCertificate fetches a TLS certificate for the TLS ClientHello in hi.
//
// It returns a cached certificate from disk if it's still valid.
//
// It's the right signature to use as the value of
// [tls.Config.GetCertificate].
//
// Deprecated: use [Client.GetCertificate].
func GetCertificate(hi *tls.ClientHelloInfo) (*tls.Certificate, error) {
return defaultClient.GetCertificate(hi)
}
// GetCertificate fetches a TLS certificate for the TLS ClientHello in hi.
//
// It returns a cached certificate from disk if it's still valid.
//
// It's the right signature to use as the value of
// [tls.Config.GetCertificate].
//
// API maturity: this is considered a stable API.
func (lc *Client) GetCertificate(hi *tls.ClientHelloInfo) (*tls.Certificate, error) {
if hi == nil || hi.ServerName == "" {
return nil, errors.New("no SNI ServerName")
}
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel()
name := hi.ServerName
if !strings.Contains(name, ".") {
if v, ok := lc.ExpandSNIName(ctx, name); ok {
name = v
}
}
certPEM, keyPEM, err := lc.CertPair(ctx, name)
if err != nil {
return nil, err
}
cert, err := tls.X509KeyPair(certPEM, keyPEM)
if err != nil {
return nil, err
}
return &cert, nil
}
// ExpandSNIName expands bare label name into the most likely actual TLS cert name.
//
// Deprecated: use [Client.ExpandSNIName].
func ExpandSNIName(ctx context.Context, name string) (fqdn string, ok bool) {
return defaultClient.ExpandSNIName(ctx, name)
}
// ExpandSNIName expands bare label name into the most likely actual TLS cert name.
func (lc *Client) ExpandSNIName(ctx context.Context, name string) (fqdn string, ok bool) {
st, err := lc.StatusWithoutPeers(ctx)
if err != nil {
return "", false
}
for _, d := range st.CertDomains {
if len(d) > len(name)+1 && strings.HasPrefix(d, name) && d[len(name)] == '.' {
return d, true
}
}
return "", false
}

@ -0,0 +1,84 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build !ts_omit_debugportmapper
package local
import (
"cmp"
"context"
"fmt"
"io"
"net/http"
"net/netip"
"net/url"
"strconv"
"time"
"tailscale.com/client/tailscale/apitype"
)
// DebugPortmapOpts contains options for the [Client.DebugPortmap] command.
type DebugPortmapOpts struct {
// Duration is how long the mapping should be created for. It defaults
// to 5 seconds if not set.
Duration time.Duration
// Type is the kind of portmap to debug. The empty string instructs the
// portmap client to perform all known types. Other valid options are
// "pmp", "pcp", and "upnp".
Type string
// GatewayAddr specifies the gateway address used during portmapping.
// If set, SelfAddr must also be set. If unset, it will be
// autodetected.
GatewayAddr netip.Addr
// SelfAddr specifies the gateway address used during portmapping. If
// set, GatewayAddr must also be set. If unset, it will be
// autodetected.
SelfAddr netip.Addr
// LogHTTP instructs the debug-portmap endpoint to print all HTTP
// requests and responses made to the logs.
LogHTTP bool
}
// DebugPortmap invokes the debug-portmap endpoint, and returns an
// io.ReadCloser that can be used to read the logs that are printed during this
// process.
//
// opts can be nil; if so, default values will be used.
func (lc *Client) DebugPortmap(ctx context.Context, opts *DebugPortmapOpts) (io.ReadCloser, error) {
vals := make(url.Values)
if opts == nil {
opts = &DebugPortmapOpts{}
}
vals.Set("duration", cmp.Or(opts.Duration, 5*time.Second).String())
vals.Set("type", opts.Type)
vals.Set("log_http", strconv.FormatBool(opts.LogHTTP))
if opts.GatewayAddr.IsValid() != opts.SelfAddr.IsValid() {
return nil, fmt.Errorf("both GatewayAddr and SelfAddr must be provided if one is")
} else if opts.GatewayAddr.IsValid() {
vals.Set("gateway_and_self", fmt.Sprintf("%s/%s", opts.GatewayAddr, opts.SelfAddr))
}
req, err := http.NewRequestWithContext(ctx, "GET", "http://"+apitype.LocalAPIHost+"/localapi/v0/debug-portmap?"+vals.Encode(), nil)
if err != nil {
return nil, err
}
res, err := lc.doLocalRequestNiceError(req)
if err != nil {
return nil, err
}
if res.StatusCode != 200 {
body, _ := io.ReadAll(res.Body)
res.Body.Close()
return nil, fmt.Errorf("HTTP %s: %s", res.Status, body)
}
return res.Body, nil
}

File diff suppressed because it is too large Load Diff

@ -3,16 +3,16 @@
//go:build go1.19 //go:build go1.19
package tailscale package local
import ( import (
"context" "context"
"net" "net"
"net/http" "net/http"
"net/http/httptest"
"testing" "testing"
"tailscale.com/tstest/deptest" "tailscale.com/tstest/deptest"
"tailscale.com/tstest/nettest"
"tailscale.com/types/key" "tailscale.com/types/key"
) )
@ -36,15 +36,15 @@ func TestGetServeConfigFromJSON(t *testing.T) {
} }
func TestWhoIsPeerNotFound(t *testing.T) { func TestWhoIsPeerNotFound(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { nw := nettest.GetNetwork(t)
ts := nettest.NewHTTPServer(nw, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(404) w.WriteHeader(404)
})) }))
defer ts.Close() defer ts.Close()
lc := &LocalClient{ lc := &Client{
Dial: func(ctx context.Context, network, addr string) (net.Conn, error) { Dial: func(ctx context.Context, network, addr string) (net.Conn, error) {
var std net.Dialer return nw.Dial(ctx, network, ts.Listener.Addr().String())
return std.DialContext(ctx, network, ts.Listener.Addr().(*net.TCPAddr).String())
}, },
} }
var k key.NodePublic var k key.NodePublic

@ -0,0 +1,55 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build !ts_omit_serve
package local
import (
"context"
"encoding/json"
"fmt"
"net/http"
"tailscale.com/ipn"
)
// GetServeConfig return the current serve config.
//
// If the serve config is empty, it returns (nil, nil).
func (lc *Client) GetServeConfig(ctx context.Context) (*ipn.ServeConfig, error) {
body, h, err := lc.sendWithHeaders(ctx, "GET", "/localapi/v0/serve-config", 200, nil, nil)
if err != nil {
return nil, fmt.Errorf("getting serve config: %w", err)
}
sc, err := getServeConfigFromJSON(body)
if err != nil {
return nil, err
}
if sc == nil {
sc = new(ipn.ServeConfig)
}
sc.ETag = h.Get("Etag")
return sc, nil
}
func getServeConfigFromJSON(body []byte) (sc *ipn.ServeConfig, err error) {
if err := json.Unmarshal(body, &sc); err != nil {
return nil, err
}
return sc, nil
}
// SetServeConfig sets or replaces the serving settings.
// If config is nil, settings are cleared and serving is disabled.
func (lc *Client) SetServeConfig(ctx context.Context, config *ipn.ServeConfig) error {
h := make(http.Header)
if config != nil {
h.Set("If-Match", config.ETag)
}
_, _, err := lc.sendWithHeaders(ctx, "POST", "/localapi/v0/serve-config", 200, jsonBody(config), h)
if err != nil {
return fmt.Errorf("sending serve config: %w", err)
}
return nil
}

@ -0,0 +1,40 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build !ts_omit_syspolicy
package local
import (
"context"
"net/http"
"tailscale.com/util/syspolicy/setting"
)
// GetEffectivePolicy returns the effective policy for the specified scope.
func (lc *Client) GetEffectivePolicy(ctx context.Context, scope setting.PolicyScope) (*setting.Snapshot, error) {
scopeID, err := scope.MarshalText()
if err != nil {
return nil, err
}
body, err := lc.get200(ctx, "/localapi/v0/policy/"+string(scopeID))
if err != nil {
return nil, err
}
return decodeJSON[*setting.Snapshot](body)
}
// ReloadEffectivePolicy reloads the effective policy for the specified scope
// by reading and merging policy settings from all applicable policy sources.
func (lc *Client) ReloadEffectivePolicy(ctx context.Context, scope setting.PolicyScope) (*setting.Snapshot, error) {
scopeID, err := scope.MarshalText()
if err != nil {
return nil, err
}
body, err := lc.send(ctx, "POST", "/localapi/v0/policy/"+string(scopeID), 200, http.NoBody)
if err != nil {
return nil, err
}
return decodeJSON[*setting.Snapshot](body)
}

@ -0,0 +1,204 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build !ts_omit_tailnetlock
package local
import (
"bytes"
"context"
"encoding/json"
"fmt"
"net/url"
"tailscale.com/ipn/ipnstate"
"tailscale.com/tka"
"tailscale.com/types/key"
"tailscale.com/types/tkatype"
)
// NetworkLockStatus fetches information about the tailnet key authority, if one is configured.
func (lc *Client) NetworkLockStatus(ctx context.Context) (*ipnstate.NetworkLockStatus, error) {
body, err := lc.send(ctx, "GET", "/localapi/v0/tka/status", 200, nil)
if err != nil {
return nil, fmt.Errorf("error: %w", err)
}
return decodeJSON[*ipnstate.NetworkLockStatus](body)
}
// NetworkLockInit initializes the tailnet key authority.
//
// TODO(tom): Plumb through disablement secrets.
func (lc *Client) NetworkLockInit(ctx context.Context, keys []tka.Key, disablementValues [][]byte, supportDisablement []byte) (*ipnstate.NetworkLockStatus, error) {
var b bytes.Buffer
type initRequest struct {
Keys []tka.Key
DisablementValues [][]byte
SupportDisablement []byte
}
if err := json.NewEncoder(&b).Encode(initRequest{Keys: keys, DisablementValues: disablementValues, SupportDisablement: supportDisablement}); err != nil {
return nil, err
}
body, err := lc.send(ctx, "POST", "/localapi/v0/tka/init", 200, &b)
if err != nil {
return nil, fmt.Errorf("error: %w", err)
}
return decodeJSON[*ipnstate.NetworkLockStatus](body)
}
// NetworkLockWrapPreauthKey wraps a pre-auth key with information to
// enable unattended bringup in the locked tailnet.
func (lc *Client) NetworkLockWrapPreauthKey(ctx context.Context, preauthKey string, tkaKey key.NLPrivate) (string, error) {
encodedPrivate, err := tkaKey.MarshalText()
if err != nil {
return "", err
}
var b bytes.Buffer
type wrapRequest struct {
TSKey string
TKAKey string // key.NLPrivate.MarshalText
}
if err := json.NewEncoder(&b).Encode(wrapRequest{TSKey: preauthKey, TKAKey: string(encodedPrivate)}); err != nil {
return "", err
}
body, err := lc.send(ctx, "POST", "/localapi/v0/tka/wrap-preauth-key", 200, &b)
if err != nil {
return "", fmt.Errorf("error: %w", err)
}
return string(body), nil
}
// NetworkLockModify adds and/or removes key(s) to the tailnet key authority.
func (lc *Client) NetworkLockModify(ctx context.Context, addKeys, removeKeys []tka.Key) error {
var b bytes.Buffer
type modifyRequest struct {
AddKeys []tka.Key
RemoveKeys []tka.Key
}
if err := json.NewEncoder(&b).Encode(modifyRequest{AddKeys: addKeys, RemoveKeys: removeKeys}); err != nil {
return err
}
if _, err := lc.send(ctx, "POST", "/localapi/v0/tka/modify", 204, &b); err != nil {
return fmt.Errorf("error: %w", err)
}
return nil
}
// NetworkLockSign signs the specified node-key and transmits that signature to the control plane.
// rotationPublic, if specified, must be an ed25519 public key.
func (lc *Client) NetworkLockSign(ctx context.Context, nodeKey key.NodePublic, rotationPublic []byte) error {
var b bytes.Buffer
type signRequest struct {
NodeKey key.NodePublic
RotationPublic []byte
}
if err := json.NewEncoder(&b).Encode(signRequest{NodeKey: nodeKey, RotationPublic: rotationPublic}); err != nil {
return err
}
if _, err := lc.send(ctx, "POST", "/localapi/v0/tka/sign", 200, &b); err != nil {
return fmt.Errorf("error: %w", err)
}
return nil
}
// NetworkLockAffectedSigs returns all signatures signed by the specified keyID.
func (lc *Client) NetworkLockAffectedSigs(ctx context.Context, keyID tkatype.KeyID) ([]tkatype.MarshaledSignature, error) {
body, err := lc.send(ctx, "POST", "/localapi/v0/tka/affected-sigs", 200, bytes.NewReader(keyID))
if err != nil {
return nil, fmt.Errorf("error: %w", err)
}
return decodeJSON[[]tkatype.MarshaledSignature](body)
}
// NetworkLockLog returns up to maxEntries number of changes to network-lock state.
func (lc *Client) NetworkLockLog(ctx context.Context, maxEntries int) ([]ipnstate.NetworkLockUpdate, error) {
v := url.Values{}
v.Set("limit", fmt.Sprint(maxEntries))
body, err := lc.send(ctx, "GET", "/localapi/v0/tka/log?"+v.Encode(), 200, nil)
if err != nil {
return nil, fmt.Errorf("error %w: %s", err, body)
}
return decodeJSON[[]ipnstate.NetworkLockUpdate](body)
}
// NetworkLockForceLocalDisable forcibly shuts down network lock on this node.
func (lc *Client) NetworkLockForceLocalDisable(ctx context.Context) error {
// This endpoint expects an empty JSON stanza as the payload.
var b bytes.Buffer
if err := json.NewEncoder(&b).Encode(struct{}{}); err != nil {
return err
}
if _, err := lc.send(ctx, "POST", "/localapi/v0/tka/force-local-disable", 200, &b); err != nil {
return fmt.Errorf("error: %w", err)
}
return nil
}
// NetworkLockVerifySigningDeeplink verifies the network lock deeplink contained
// in url and returns information extracted from it.
func (lc *Client) NetworkLockVerifySigningDeeplink(ctx context.Context, url string) (*tka.DeeplinkValidationResult, error) {
vr := struct {
URL string
}{url}
body, err := lc.send(ctx, "POST", "/localapi/v0/tka/verify-deeplink", 200, jsonBody(vr))
if err != nil {
return nil, fmt.Errorf("sending verify-deeplink: %w", err)
}
return decodeJSON[*tka.DeeplinkValidationResult](body)
}
// NetworkLockGenRecoveryAUM generates an AUM for recovering from a tailnet-lock key compromise.
func (lc *Client) NetworkLockGenRecoveryAUM(ctx context.Context, removeKeys []tkatype.KeyID, forkFrom tka.AUMHash) ([]byte, error) {
vr := struct {
Keys []tkatype.KeyID
ForkFrom string
}{removeKeys, forkFrom.String()}
body, err := lc.send(ctx, "POST", "/localapi/v0/tka/generate-recovery-aum", 200, jsonBody(vr))
if err != nil {
return nil, fmt.Errorf("sending generate-recovery-aum: %w", err)
}
return body, nil
}
// NetworkLockCosignRecoveryAUM co-signs a recovery AUM using the node's tailnet lock key.
func (lc *Client) NetworkLockCosignRecoveryAUM(ctx context.Context, aum tka.AUM) ([]byte, error) {
r := bytes.NewReader(aum.Serialize())
body, err := lc.send(ctx, "POST", "/localapi/v0/tka/cosign-recovery-aum", 200, r)
if err != nil {
return nil, fmt.Errorf("sending cosign-recovery-aum: %w", err)
}
return body, nil
}
// NetworkLockSubmitRecoveryAUM submits a recovery AUM to the control plane.
func (lc *Client) NetworkLockSubmitRecoveryAUM(ctx context.Context, aum tka.AUM) error {
r := bytes.NewReader(aum.Serialize())
_, err := lc.send(ctx, "POST", "/localapi/v0/tka/submit-recovery-aum", 200, r)
if err != nil {
return fmt.Errorf("sending cosign-recovery-aum: %w", err)
}
return nil
}
// NetworkLockDisable shuts down network-lock across the tailnet.
func (lc *Client) NetworkLockDisable(ctx context.Context, secret []byte) error {
if _, err := lc.send(ctx, "POST", "/localapi/v0/tka/disable", 200, bytes.NewReader(secret)); err != nil {
return fmt.Errorf("error: %w", err)
}
return nil
}

@ -0,0 +1,327 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build cgo || !darwin
package systray
import (
"bytes"
"context"
"image"
"image/color"
"image/png"
"runtime"
"sync"
"time"
"fyne.io/systray"
ico "github.com/Kodeworks/golang-image-ico"
"github.com/fogleman/gg"
)
// tsLogo represents the Tailscale logo displayed as the systray icon.
type tsLogo struct {
// dots represents the state of the 3x3 dot grid in the logo.
// A 0 represents a gray dot, any other value is a white dot.
dots [9]byte
// dotMask returns an image mask to be used when rendering the logo dots.
dotMask func(dc *gg.Context, borderUnits int, radius int) *image.Alpha
// overlay is called after the dots are rendered to draw an additional overlay.
overlay func(dc *gg.Context, borderUnits int, radius int)
}
var (
// disconnected is all gray dots
disconnected = tsLogo{dots: [9]byte{
0, 0, 0,
0, 0, 0,
0, 0, 0,
}}
// connected is the normal Tailscale logo
connected = tsLogo{dots: [9]byte{
0, 0, 0,
1, 1, 1,
0, 1, 0,
}}
// loading is a special tsLogo value that is not meant to be rendered directly,
// but indicates that the loading animation should be shown.
loading = tsLogo{dots: [9]byte{'l', 'o', 'a', 'd', 'i', 'n', 'g'}}
// loadingIcons are shown in sequence as an animated loading icon.
loadingLogos = []tsLogo{
{dots: [9]byte{
0, 1, 1,
1, 0, 1,
0, 0, 1,
}},
{dots: [9]byte{
0, 1, 1,
0, 0, 1,
0, 1, 0,
}},
{dots: [9]byte{
0, 1, 1,
0, 0, 0,
0, 0, 1,
}},
{dots: [9]byte{
0, 0, 1,
0, 1, 0,
0, 0, 0,
}},
{dots: [9]byte{
0, 1, 0,
0, 0, 0,
0, 0, 0,
}},
{dots: [9]byte{
0, 0, 0,
0, 0, 1,
0, 0, 0,
}},
{dots: [9]byte{
0, 0, 0,
0, 0, 0,
0, 0, 0,
}},
{dots: [9]byte{
0, 0, 1,
0, 0, 0,
0, 0, 0,
}},
{dots: [9]byte{
0, 0, 0,
0, 0, 0,
1, 0, 0,
}},
{dots: [9]byte{
0, 0, 0,
0, 0, 0,
1, 1, 0,
}},
{dots: [9]byte{
0, 0, 0,
1, 0, 0,
1, 1, 0,
}},
{dots: [9]byte{
0, 0, 0,
1, 1, 0,
0, 1, 0,
}},
{dots: [9]byte{
0, 0, 0,
1, 1, 0,
0, 1, 1,
}},
{dots: [9]byte{
0, 0, 0,
1, 1, 1,
0, 0, 1,
}},
{dots: [9]byte{
0, 1, 0,
0, 1, 1,
1, 0, 1,
}},
}
// exitNodeOnline is the Tailscale logo with an additional arrow overlay in the corner.
exitNodeOnline = tsLogo{
dots: [9]byte{
0, 0, 0,
1, 1, 1,
0, 1, 0,
},
// draw an arrow mask in the bottom right corner with a reasonably thick line width.
dotMask: func(dc *gg.Context, borderUnits int, radius int) *image.Alpha {
bu, r := float64(borderUnits), float64(radius)
x1 := r * (bu + 3.5)
y := r * (bu + 7)
x2 := x1 + (r * 5)
mc := gg.NewContext(dc.Width(), dc.Height())
mc.DrawLine(x1, y, x2, y) // arrow center line
mc.DrawLine(x2-(1.5*r), y-(1.5*r), x2, y) // top of arrow tip
mc.DrawLine(x2-(1.5*r), y+(1.5*r), x2, y) // bottom of arrow tip
mc.SetLineWidth(r * 3)
mc.Stroke()
return mc.AsMask()
},
// draw an arrow in the bottom right corner over the masked area.
overlay: func(dc *gg.Context, borderUnits int, radius int) {
bu, r := float64(borderUnits), float64(radius)
x1 := r * (bu + 3.5)
y := r * (bu + 7)
x2 := x1 + (r * 5)
dc.DrawLine(x1, y, x2, y) // arrow center line
dc.DrawLine(x2-(1.5*r), y-(1.5*r), x2, y) // top of arrow tip
dc.DrawLine(x2-(1.5*r), y+(1.5*r), x2, y) // bottom of arrow tip
dc.SetColor(fg)
dc.SetLineWidth(r)
dc.Stroke()
},
}
// exitNodeOffline is the Tailscale logo with a red "x" in the corner.
exitNodeOffline = tsLogo{
dots: [9]byte{
0, 0, 0,
1, 1, 1,
0, 1, 0,
},
// Draw a square that hides the four dots in the bottom right corner,
dotMask: func(dc *gg.Context, borderUnits int, radius int) *image.Alpha {
bu, r := float64(borderUnits), float64(radius)
x := r * (bu + 3)
mc := gg.NewContext(dc.Width(), dc.Height())
mc.DrawRectangle(x, x, r*6, r*6)
mc.Fill()
return mc.AsMask()
},
// draw a red "x" over the bottom right corner.
overlay: func(dc *gg.Context, borderUnits int, radius int) {
bu, r := float64(borderUnits), float64(radius)
x1 := r * (bu + 4)
x2 := x1 + (r * 3.5)
dc.DrawLine(x1, x1, x2, x2) // top-left to bottom-right stroke
dc.DrawLine(x1, x2, x2, x1) // bottom-left to top-right stroke
dc.SetColor(red)
dc.SetLineWidth(r)
dc.Stroke()
},
}
)
var (
bg = color.NRGBA{0, 0, 0, 255}
fg = color.NRGBA{255, 255, 255, 255}
gray = color.NRGBA{255, 255, 255, 102}
red = color.NRGBA{229, 111, 74, 255}
)
// render returns a PNG image of the logo.
func (logo tsLogo) render() *bytes.Buffer {
const borderUnits = 1
return logo.renderWithBorder(borderUnits)
}
// renderWithBorder returns a PNG image of the logo with the specified border width.
// One border unit is equal to the radius of a tailscale logo dot.
func (logo tsLogo) renderWithBorder(borderUnits int) *bytes.Buffer {
const radius = 25
dim := radius * (8 + borderUnits*2)
dc := gg.NewContext(dim, dim)
dc.DrawRectangle(0, 0, float64(dim), float64(dim))
dc.SetColor(bg)
dc.Fill()
if logo.dotMask != nil {
mask := logo.dotMask(dc, borderUnits, radius)
dc.SetMask(mask)
dc.InvertMask()
}
for y := 0; y < 3; y++ {
for x := 0; x < 3; x++ {
px := (borderUnits + 1 + 3*x) * radius
py := (borderUnits + 1 + 3*y) * radius
col := fg
if logo.dots[y*3+x] == 0 {
col = gray
}
dc.DrawCircle(float64(px), float64(py), radius)
dc.SetColor(col)
dc.Fill()
}
}
if logo.overlay != nil {
dc.ResetClip()
logo.overlay(dc, borderUnits, radius)
}
b := bytes.NewBuffer(nil)
// Encode as ICO format on Windows, PNG on all other platforms.
if runtime.GOOS == "windows" {
_ = ico.Encode(b, dc.Image())
} else {
_ = png.Encode(b, dc.Image())
}
return b
}
// setAppIcon renders logo and sets it as the systray icon.
func setAppIcon(icon tsLogo) {
if icon.dots == loading.dots {
startLoadingAnimation()
} else {
stopLoadingAnimation()
systray.SetIcon(icon.render().Bytes())
}
}
var (
loadingMu sync.Mutex // protects loadingCancel
// loadingCancel stops the loading animation in the systray icon.
// This is nil if the animation is not currently active.
loadingCancel func()
)
// startLoadingAnimation starts the animated loading icon in the system tray.
// The animation continues until [stopLoadingAnimation] is called.
// If the loading animation is already active, this func does nothing.
func startLoadingAnimation() {
loadingMu.Lock()
defer loadingMu.Unlock()
if loadingCancel != nil {
// loading icon already displayed
return
}
ctx := context.Background()
ctx, loadingCancel = context.WithCancel(ctx)
go func() {
t := time.NewTicker(500 * time.Millisecond)
var i int
for {
select {
case <-ctx.Done():
return
case <-t.C:
systray.SetIcon(loadingLogos[i].render().Bytes())
i++
if i >= len(loadingLogos) {
i = 0
}
}
}
}()
}
// stopLoadingAnimation stops the animated loading icon in the system tray.
// If the loading animation is not currently active, this func does nothing.
func stopLoadingAnimation() {
loadingMu.Lock()
defer loadingMu.Unlock()
if loadingCancel != nil {
loadingCancel()
loadingCancel = nil
}
}

@ -0,0 +1,76 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build cgo || !darwin
// Package systray provides a minimal Tailscale systray application.
package systray
import (
"bufio"
"bytes"
_ "embed"
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
)
//go:embed tailscale-systray.service
var embedSystemd string
func InstallStartupScript(initSystem string) error {
switch initSystem {
case "systemd":
return installSystemd()
default:
return fmt.Errorf("unsupported init system '%s'", initSystem)
}
}
func installSystemd() error {
// Find the path to tailscale, just in case it's not where the example file
// has it placed, and replace that before writing the file.
tailscaleBin, err := exec.LookPath("tailscale")
if err != nil {
return fmt.Errorf("failed to find tailscale binary %w", err)
}
var output bytes.Buffer
scanner := bufio.NewScanner(strings.NewReader(embedSystemd))
for scanner.Scan() {
line := scanner.Text()
if strings.HasPrefix(line, "ExecStart=") {
line = fmt.Sprintf("ExecStart=%s systray", tailscaleBin)
}
output.WriteString(line + "\n")
}
configDir, err := os.UserConfigDir()
if err != nil {
homeDir, err := os.UserHomeDir()
if err != nil {
return fmt.Errorf("unable to locate user home: %w", err)
}
configDir = filepath.Join(homeDir, ".config")
}
systemdDir := filepath.Join(configDir, "systemd", "user")
if err := os.MkdirAll(systemdDir, 0o755); err != nil {
return fmt.Errorf("failed creating systemd uuser dir: %w", err)
}
serviceFile := filepath.Join(systemdDir, "tailscale-systray.service")
if err := os.WriteFile(serviceFile, output.Bytes(), 0o755); err != nil {
return fmt.Errorf("failed writing systemd user service: %w", err)
}
fmt.Printf("Successfully installed systemd service to: %s\n", serviceFile)
fmt.Println("To enable and start the service, run:")
fmt.Println(" systemctl --user daemon-reload")
fmt.Println(" systemctl --user enable --now tailscale-systray")
return nil
}

@ -0,0 +1,801 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build cgo || !darwin
// Package systray provides a minimal Tailscale systray application.
package systray
import (
"bytes"
"context"
"errors"
"fmt"
"image"
"io"
"log"
"net/http"
"os"
"os/signal"
"runtime"
"slices"
"strings"
"sync"
"syscall"
"time"
"fyne.io/systray"
ico "github.com/Kodeworks/golang-image-ico"
"github.com/atotto/clipboard"
dbus "github.com/godbus/dbus/v5"
"github.com/toqueteos/webbrowser"
"tailscale.com/client/local"
"tailscale.com/ipn"
"tailscale.com/ipn/ipnstate"
"tailscale.com/tailcfg"
"tailscale.com/util/slicesx"
"tailscale.com/util/stringsx"
)
var (
// newMenuDelay is the amount of time to sleep after creating a new menu,
// but before adding items to it. This works around a bug in some dbus implementations.
newMenuDelay time.Duration
// if true, treat all mullvad exit node countries as single-city.
// Instead of rendering a submenu with cities, just select the highest-priority peer.
hideMullvadCities bool
)
// Run starts the systray menu and blocks until the menu exits.
// If client is nil, a default local.Client is used.
func (menu *Menu) Run(client *local.Client) {
if client == nil {
client = &local.Client{}
}
menu.lc = client
menu.updateState()
// exit cleanly on SIGINT and SIGTERM
go func() {
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM)
select {
case <-interrupt:
menu.onExit()
case <-menu.bgCtx.Done():
}
}()
go menu.lc.IncrementGauge(menu.bgCtx, "systray_running", 1)
defer menu.lc.IncrementGauge(menu.bgCtx, "systray_running", -1)
systray.Run(menu.onReady, menu.onExit)
}
// Menu represents the systray menu, its items, and the current Tailscale state.
type Menu struct {
mu sync.Mutex // protects the entire Menu
lc *local.Client
status *ipnstate.Status
curProfile ipn.LoginProfile
allProfiles []ipn.LoginProfile
// readonly is whether the systray app is running in read-only mode.
// This is set if LocalAPI returns a permission error,
// typically because the user needs to run `tailscale set --operator=$USER`.
readonly bool
bgCtx context.Context // ctx for background tasks not involving menu item clicks
bgCancel context.CancelFunc
// Top-level menu items
connect *systray.MenuItem
disconnect *systray.MenuItem
self *systray.MenuItem
exitNodes *systray.MenuItem
more *systray.MenuItem
rebuildMenu *systray.MenuItem
quit *systray.MenuItem
rebuildCh chan struct{} // triggers a menu rebuild
accountsCh chan ipn.ProfileID
exitNodeCh chan tailcfg.StableNodeID // ID of selected exit node
eventCancel context.CancelFunc // cancel eventLoop
notificationIcon *os.File // icon used for desktop notifications
}
func (menu *Menu) init() {
if menu.bgCtx != nil {
// already initialized
return
}
menu.rebuildCh = make(chan struct{}, 1)
menu.accountsCh = make(chan ipn.ProfileID)
menu.exitNodeCh = make(chan tailcfg.StableNodeID)
// dbus wants a file path for notification icons, so copy to a temp file.
menu.notificationIcon, _ = os.CreateTemp("", "tailscale-systray.png")
io.Copy(menu.notificationIcon, connected.renderWithBorder(3))
menu.bgCtx, menu.bgCancel = context.WithCancel(context.Background())
go menu.watchIPNBus()
}
func init() {
if runtime.GOOS != "linux" {
// so far, these tweaks are only needed on Linux
return
}
desktop := strings.ToLower(os.Getenv("XDG_CURRENT_DESKTOP"))
switch desktop {
case "gnome", "ubuntu:gnome":
// GNOME expands submenus downward in the main menu, rather than flyouts to the side.
// Either as a result of that or another limitation, there seems to be a maximum depth of submenus.
// Mullvad countries that have a city submenu are not being rendered, and so can't be selected.
// Handle this by simply treating all mullvad countries as single-city and select the best peer.
hideMullvadCities = true
case "kde":
// KDE doesn't need a delay, and actually won't render submenus
// if we delay for more than about 400µs.
newMenuDelay = 0
default:
// Add a slight delay to ensure the menu is created before adding items.
//
// Systray implementations that use libdbusmenu sometimes process messages out of order,
// resulting in errors such as:
// (waybar:153009): LIBDBUSMENU-GTK-WARNING **: 18:07:11.551: Children but no menu, someone's been naughty with their 'children-display' property: 'submenu'
//
// See also: https://github.com/fyne-io/systray/issues/12
newMenuDelay = 10 * time.Millisecond
}
}
// onReady is called by the systray package when the menu is ready to be built.
func (menu *Menu) onReady() {
log.Printf("starting")
if os.Getuid() == 0 || os.Getuid() != os.Geteuid() || os.Getenv("SUDO_USER") != "" || os.Getenv("DOAS_USER") != "" {
fmt.Fprintln(os.Stderr, `
It appears that you might be running the systray with sudo/doas.
This can lead to issues with D-Bus, and should be avoided.
The systray application should be run with the same user as your desktop session.
This usually means that you should run the application like:
tailscale systray
See https://tailscale.com/kb/1597/linux-systray for more information.`)
}
setAppIcon(disconnected)
menu.rebuild()
menu.mu.Lock()
if menu.readonly {
fmt.Fprintln(os.Stderr, `
No permission to manage Tailscale. Set operator by running:
sudo tailscale set --operator=$USER
See https://tailscale.com/s/cli-operator for more information.`)
}
menu.mu.Unlock()
}
// updateState updates the Menu state from the Tailscale local client.
func (menu *Menu) updateState() {
menu.mu.Lock()
defer menu.mu.Unlock()
menu.init()
menu.readonly = false
var err error
menu.status, err = menu.lc.Status(menu.bgCtx)
if err != nil {
log.Print(err)
}
menu.curProfile, menu.allProfiles, err = menu.lc.ProfileStatus(menu.bgCtx)
if err != nil {
if local.IsAccessDeniedError(err) {
menu.readonly = true
}
log.Print(err)
}
}
// rebuild the systray menu based on the current Tailscale state.
//
// We currently rebuild the entire menu because it is not easy to update the existing menu.
// You cannot iterate over the items in a menu, nor can you remove some items like separators.
// So for now we rebuild the whole thing, and can optimize this later if needed.
func (menu *Menu) rebuild() {
menu.mu.Lock()
defer menu.mu.Unlock()
menu.init()
if menu.eventCancel != nil {
menu.eventCancel()
}
ctx := context.Background()
ctx, menu.eventCancel = context.WithCancel(ctx)
systray.ResetMenu()
if menu.readonly {
const readonlyMsg = "No permission to manage Tailscale.\nSee tailscale.com/s/cli-operator"
m := systray.AddMenuItem(readonlyMsg, "")
onClick(ctx, m, func(_ context.Context) {
webbrowser.Open("https://tailscale.com/s/cli-operator")
})
systray.AddSeparator()
}
menu.connect = systray.AddMenuItem("Connect", "")
menu.disconnect = systray.AddMenuItem("Disconnect", "")
menu.disconnect.Hide()
systray.AddSeparator()
// delay to prevent race setting icon on first start
time.Sleep(newMenuDelay)
// Set systray menu icon and title.
// Also adjust connect/disconnect menu items if needed.
var backendState string
if menu.status != nil {
backendState = menu.status.BackendState
}
switch backendState {
case ipn.Running.String():
if menu.status.ExitNodeStatus != nil && !menu.status.ExitNodeStatus.ID.IsZero() {
if menu.status.ExitNodeStatus.Online {
setTooltip("Using exit node")
setAppIcon(exitNodeOnline)
} else {
setTooltip("Exit node offline")
setAppIcon(exitNodeOffline)
}
} else {
setTooltip(fmt.Sprintf("Connected to %s", menu.status.CurrentTailnet.Name))
setAppIcon(connected)
}
menu.connect.SetTitle("Connected")
menu.connect.Disable()
menu.disconnect.Show()
menu.disconnect.Enable()
case ipn.Starting.String():
setTooltip("Connecting")
setAppIcon(loading)
default:
setTooltip("Disconnected")
setAppIcon(disconnected)
}
if menu.readonly {
menu.connect.Disable()
menu.disconnect.Disable()
}
account := "Account"
if pt := profileTitle(menu.curProfile); pt != "" {
account = pt
}
if !menu.readonly {
accounts := systray.AddMenuItem(account, "")
setRemoteIcon(accounts, menu.curProfile.UserProfile.ProfilePicURL)
time.Sleep(newMenuDelay)
for _, profile := range menu.allProfiles {
title := profileTitle(profile)
var item *systray.MenuItem
if profile.ID == menu.curProfile.ID {
item = accounts.AddSubMenuItemCheckbox(title, "", true)
} else {
item = accounts.AddSubMenuItem(title, "")
}
setRemoteIcon(item, profile.UserProfile.ProfilePicURL)
onClick(ctx, item, func(ctx context.Context) {
select {
case <-ctx.Done():
case menu.accountsCh <- profile.ID:
}
})
}
}
if menu.status != nil && menu.status.Self != nil && len(menu.status.Self.TailscaleIPs) > 0 {
title := fmt.Sprintf("This Device: %s (%s)", menu.status.Self.HostName, menu.status.Self.TailscaleIPs[0])
menu.self = systray.AddMenuItem(title, "")
} else {
menu.self = systray.AddMenuItem("This Device: not connected", "")
menu.self.Disable()
}
systray.AddSeparator()
if !menu.readonly {
menu.rebuildExitNodeMenu(ctx)
}
menu.more = systray.AddMenuItem("More settings", "")
if menu.status != nil && menu.status.BackendState == "Running" {
// web client is only available if backend is running
onClick(ctx, menu.more, func(_ context.Context) {
webbrowser.Open("http://100.100.100.100/")
})
} else {
menu.more.Disable()
}
// TODO(#15528): this menu item shouldn't be necessary at all,
// but is at least more discoverable than having users switch profiles or exit nodes.
menu.rebuildMenu = systray.AddMenuItem("Rebuild menu", "Fix missing menu items")
onClick(ctx, menu.rebuildMenu, func(ctx context.Context) {
select {
case <-ctx.Done():
case menu.rebuildCh <- struct{}{}:
}
})
menu.rebuildMenu.Enable()
menu.quit = systray.AddMenuItem("Quit", "Quit the app")
menu.quit.Enable()
go menu.eventLoop(ctx)
}
// profileTitle returns the title string for a profile menu item.
func profileTitle(profile ipn.LoginProfile) string {
title := profile.Name
if profile.NetworkProfile.DomainName != "" {
if runtime.GOOS == "windows" || runtime.GOOS == "darwin" {
// windows and mac don't support multi-line menu
title += " (" + profile.NetworkProfile.DisplayNameOrDefault() + ")"
} else {
title += "\n" + profile.NetworkProfile.DisplayNameOrDefault()
}
}
return title
}
var (
cacheMu sync.Mutex
httpCache = map[string][]byte{} // URL => response body
)
// setRemoteIcon sets the icon for menu to the specified remote image.
// Remote images are fetched as needed and cached.
func setRemoteIcon(menu *systray.MenuItem, urlStr string) {
if menu == nil || urlStr == "" {
return
}
cacheMu.Lock()
b, ok := httpCache[urlStr]
if !ok {
resp, err := http.Get(urlStr)
if err == nil && resp.StatusCode == http.StatusOK {
b, _ = io.ReadAll(resp.Body)
// Convert image to ICO format on Windows
if runtime.GOOS == "windows" {
im, _, err := image.Decode(bytes.NewReader(b))
if err != nil {
return
}
buf := bytes.NewBuffer(nil)
if err := ico.Encode(buf, im); err != nil {
return
}
b = buf.Bytes()
}
httpCache[urlStr] = b
resp.Body.Close()
}
}
cacheMu.Unlock()
if len(b) > 0 {
menu.SetIcon(b)
}
}
// setTooltip sets the tooltip text for the systray icon.
func setTooltip(text string) {
if runtime.GOOS == "darwin" || runtime.GOOS == "windows" {
systray.SetTooltip(text)
} else {
// on Linux, SetTitle actually sets the tooltip
systray.SetTitle(text)
}
}
// eventLoop is the main event loop for handling click events on menu items
// and responding to Tailscale state changes.
// This method does not return until ctx.Done is closed.
func (menu *Menu) eventLoop(ctx context.Context) {
for {
select {
case <-ctx.Done():
return
case <-menu.rebuildCh:
menu.updateState()
menu.rebuild()
case <-menu.connect.ClickedCh:
_, err := menu.lc.EditPrefs(ctx, &ipn.MaskedPrefs{
Prefs: ipn.Prefs{
WantRunning: true,
},
WantRunningSet: true,
})
if err != nil {
log.Printf("error connecting: %v", err)
}
case <-menu.disconnect.ClickedCh:
_, err := menu.lc.EditPrefs(ctx, &ipn.MaskedPrefs{
Prefs: ipn.Prefs{
WantRunning: false,
},
WantRunningSet: true,
})
if err != nil {
log.Printf("error disconnecting: %v", err)
}
case <-menu.self.ClickedCh:
menu.copyTailscaleIP(menu.status.Self)
case id := <-menu.accountsCh:
if err := menu.lc.SwitchProfile(ctx, id); err != nil {
log.Printf("error switching to profile ID %v: %v", id, err)
}
case exitNode := <-menu.exitNodeCh:
if exitNode.IsZero() {
log.Print("disable exit node")
if err := menu.lc.SetUseExitNode(ctx, false); err != nil {
log.Printf("error disabling exit node: %v", err)
}
} else {
log.Printf("enable exit node: %v", exitNode)
mp := &ipn.MaskedPrefs{
Prefs: ipn.Prefs{
ExitNodeID: exitNode,
},
ExitNodeIDSet: true,
}
if _, err := menu.lc.EditPrefs(ctx, mp); err != nil {
log.Printf("error setting exit node: %v", err)
}
}
case <-menu.quit.ClickedCh:
systray.Quit()
}
}
}
// onClick registers a click handler for a menu item.
func onClick(ctx context.Context, item *systray.MenuItem, fn func(ctx context.Context)) {
go func() {
for {
select {
case <-ctx.Done():
return
case <-item.ClickedCh:
fn(ctx)
}
}
}()
}
// watchIPNBus subscribes to the tailscale event bus and sends state updates to chState.
// This method does not return.
func (menu *Menu) watchIPNBus() {
for {
if err := menu.watchIPNBusInner(); err != nil {
log.Println(err)
if errors.Is(err, context.Canceled) {
// If the context got canceled, we will never be able to
// reconnect to IPN bus, so exit the process.
log.Fatalf("watchIPNBus: %v", err)
}
}
// If our watch connection breaks, wait a bit before reconnecting. No
// reason to spam the logs if e.g. tailscaled is restarting or goes
// down.
time.Sleep(3 * time.Second)
}
}
func (menu *Menu) watchIPNBusInner() error {
watcher, err := menu.lc.WatchIPNBus(menu.bgCtx, 0)
if err != nil {
return fmt.Errorf("watching ipn bus: %w", err)
}
defer watcher.Close()
for {
select {
case <-menu.bgCtx.Done():
return nil
default:
n, err := watcher.Next()
if err != nil {
return fmt.Errorf("ipnbus error: %w", err)
}
var rebuild bool
if n.State != nil {
log.Printf("new state: %v", n.State)
rebuild = true
}
if n.Prefs != nil {
rebuild = true
}
if rebuild {
menu.rebuildCh <- struct{}{}
}
}
}
}
// copyTailscaleIP copies the first Tailscale IP of the given device to the clipboard
// and sends a notification with the copied value.
func (menu *Menu) copyTailscaleIP(device *ipnstate.PeerStatus) {
if device == nil || len(device.TailscaleIPs) == 0 {
return
}
name := strings.Split(device.DNSName, ".")[0]
ip := device.TailscaleIPs[0].String()
err := clipboard.WriteAll(ip)
if err != nil {
log.Printf("clipboard error: %v", err)
} else {
menu.sendNotification(fmt.Sprintf("Copied Address for %v", name), ip)
}
}
// sendNotification sends a desktop notification with the given title and content.
func (menu *Menu) sendNotification(title, content string) {
conn, err := dbus.SessionBus()
if err != nil {
log.Printf("dbus: %v", err)
return
}
timeout := 3 * time.Second
obj := conn.Object("org.freedesktop.Notifications", "/org/freedesktop/Notifications")
call := obj.Call("org.freedesktop.Notifications.Notify", 0, "Tailscale", uint32(0),
menu.notificationIcon.Name(), title, content, []string{}, map[string]dbus.Variant{}, int32(timeout.Milliseconds()))
if call.Err != nil {
log.Printf("dbus: %v", call.Err)
}
}
func (menu *Menu) rebuildExitNodeMenu(ctx context.Context) {
if menu.status == nil {
return
}
status := menu.status
menu.exitNodes = systray.AddMenuItem("Exit Nodes", "")
time.Sleep(newMenuDelay)
// register a click handler for a menu item to set nodeID as the exit node.
setExitNodeOnClick := func(item *systray.MenuItem, nodeID tailcfg.StableNodeID) {
onClick(ctx, item, func(ctx context.Context) {
select {
case <-ctx.Done():
case menu.exitNodeCh <- nodeID:
}
})
}
noExitNodeMenu := menu.exitNodes.AddSubMenuItemCheckbox("None", "", status.ExitNodeStatus == nil)
setExitNodeOnClick(noExitNodeMenu, "")
// Show recommended exit node if available.
if status.Self.CapMap.Contains(tailcfg.NodeAttrSuggestExitNodeUI) {
sugg, err := menu.lc.SuggestExitNode(ctx)
if err == nil {
title := "Recommended: "
if loc := sugg.Location; loc.Valid() && loc.Country() != "" {
flag := countryFlag(loc.CountryCode())
title += fmt.Sprintf("%s %s: %s", flag, loc.Country(), loc.City())
} else {
title += strings.Split(sugg.Name, ".")[0]
}
menu.exitNodes.AddSeparator()
rm := menu.exitNodes.AddSubMenuItemCheckbox(title, "", false)
setExitNodeOnClick(rm, sugg.ID)
if status.ExitNodeStatus != nil && sugg.ID == status.ExitNodeStatus.ID {
rm.Check()
}
}
}
// Add tailnet exit nodes if present.
var tailnetExitNodes []*ipnstate.PeerStatus
for _, ps := range status.Peer {
if ps.ExitNodeOption && ps.Location == nil {
tailnetExitNodes = append(tailnetExitNodes, ps)
}
}
if len(tailnetExitNodes) > 0 {
menu.exitNodes.AddSeparator()
menu.exitNodes.AddSubMenuItem("Tailnet Exit Nodes", "").Disable()
for _, ps := range status.Peer {
if !ps.ExitNodeOption || ps.Location != nil {
continue
}
name := strings.Split(ps.DNSName, ".")[0]
if !ps.Online {
name += " (offline)"
}
sm := menu.exitNodes.AddSubMenuItemCheckbox(name, "", false)
if !ps.Online {
sm.Disable()
}
if status.ExitNodeStatus != nil && ps.ID == status.ExitNodeStatus.ID {
sm.Check()
}
setExitNodeOnClick(sm, ps.ID)
}
}
// Add mullvad exit nodes if present.
var mullvadExitNodes mullvadPeers
if status.Self.CapMap.Contains("mullvad") {
mullvadExitNodes = newMullvadPeers(status)
}
if len(mullvadExitNodes.countries) > 0 {
menu.exitNodes.AddSeparator()
menu.exitNodes.AddSubMenuItem("Location-based Exit Nodes", "").Disable()
mullvadMenu := menu.exitNodes.AddSubMenuItemCheckbox("Mullvad VPN", "", false)
for _, country := range mullvadExitNodes.sortedCountries() {
flag := countryFlag(country.code)
countryMenu := mullvadMenu.AddSubMenuItemCheckbox(flag+" "+country.name, "", false)
// single-city country, no submenu
if len(country.cities) == 1 || hideMullvadCities {
setExitNodeOnClick(countryMenu, country.best.ID)
if status.ExitNodeStatus != nil {
for _, city := range country.cities {
for _, ps := range city.peers {
if status.ExitNodeStatus.ID == ps.ID {
mullvadMenu.Check()
countryMenu.Check()
}
}
}
}
continue
}
// multi-city country, build submenu with "best available" option and cities.
time.Sleep(newMenuDelay)
bm := countryMenu.AddSubMenuItemCheckbox("Best Available", "", false)
setExitNodeOnClick(bm, country.best.ID)
countryMenu.AddSeparator()
for _, city := range country.sortedCities() {
cityMenu := countryMenu.AddSubMenuItemCheckbox(city.name, "", false)
setExitNodeOnClick(cityMenu, city.best.ID)
if status.ExitNodeStatus != nil {
for _, ps := range city.peers {
if status.ExitNodeStatus.ID == ps.ID {
mullvadMenu.Check()
countryMenu.Check()
cityMenu.Check()
}
}
}
}
}
}
// TODO: "Allow Local Network Access" and "Run Exit Node" menu items
}
// mullvadPeers contains all mullvad peer nodes, sorted by country and city.
type mullvadPeers struct {
countries map[string]*mvCountry // country code (uppercase) => country
}
// sortedCountries returns countries containing mullvad nodes, sorted by name.
func (mp mullvadPeers) sortedCountries() []*mvCountry {
countries := slicesx.MapValues(mp.countries)
slices.SortFunc(countries, func(a, b *mvCountry) int {
return stringsx.CompareFold(a.name, b.name)
})
return countries
}
type mvCountry struct {
code string
name string
best *ipnstate.PeerStatus // highest priority peer in the country
cities map[string]*mvCity // city code => city
}
// sortedCities returns cities containing mullvad nodes, sorted by name.
func (mc *mvCountry) sortedCities() []*mvCity {
cities := slicesx.MapValues(mc.cities)
slices.SortFunc(cities, func(a, b *mvCity) int {
return stringsx.CompareFold(a.name, b.name)
})
return cities
}
// countryFlag takes a 2-character ASCII string and returns the corresponding emoji flag.
// It returns the empty string on error.
func countryFlag(code string) string {
if len(code) != 2 {
return ""
}
runes := make([]rune, 0, 2)
for i := range 2 {
b := code[i] | 32 // lowercase
if b < 'a' || b > 'z' {
return ""
}
// https://en.wikipedia.org/wiki/Regional_indicator_symbol
runes = append(runes, 0x1F1E6+rune(b-'a'))
}
return string(runes)
}
type mvCity struct {
name string
best *ipnstate.PeerStatus // highest priority peer in the city
peers []*ipnstate.PeerStatus
}
func newMullvadPeers(status *ipnstate.Status) mullvadPeers {
countries := make(map[string]*mvCountry)
for _, ps := range status.Peer {
if !ps.ExitNodeOption || ps.Location == nil {
continue
}
loc := ps.Location
country, ok := countries[loc.CountryCode]
if !ok {
country = &mvCountry{
code: loc.CountryCode,
name: loc.Country,
cities: make(map[string]*mvCity),
}
countries[loc.CountryCode] = country
}
city, ok := countries[loc.CountryCode].cities[loc.CityCode]
if !ok {
city = &mvCity{
name: loc.City,
}
countries[loc.CountryCode].cities[loc.CityCode] = city
}
city.peers = append(city.peers, ps)
if city.best == nil || ps.Location.Priority > city.best.Location.Priority {
city.best = ps
}
if country.best == nil || ps.Location.Priority > country.best.Location.Priority {
country.best = ps
}
}
return mullvadPeers{countries}
}
// onExit is called by the systray package when the menu is exiting.
func (menu *Menu) onExit() {
log.Printf("exiting")
if menu.bgCancel != nil {
menu.bgCancel()
}
if menu.eventCancel != nil {
menu.eventCancel()
}
os.Remove(menu.notificationIcon.Name())
}

@ -0,0 +1,10 @@
[Unit]
Description=Tailscale System Tray
After=systemd.service
[Service]
Type=simple
ExecStart=/usr/bin/tailscale systray
[Install]
WantedBy=default.target

@ -12,6 +12,7 @@ import (
"fmt" "fmt"
"net/http" "net/http"
"net/netip" "net/netip"
"net/url"
) )
// ACLRow defines a rule that grants access by a set of users or groups to a set // ACLRow defines a rule that grants access by a set of users or groups to a set
@ -83,7 +84,7 @@ func (c *Client) ACL(ctx context.Context) (acl *ACL, err error) {
} }
}() }()
path := fmt.Sprintf("%s/api/v2/tailnet/%s/acl", c.baseURL(), c.tailnet) path := c.BuildTailnetURL("acl")
req, err := http.NewRequestWithContext(ctx, "GET", path, nil) req, err := http.NewRequestWithContext(ctx, "GET", path, nil)
if err != nil { if err != nil {
return nil, err return nil, err
@ -97,7 +98,7 @@ func (c *Client) ACL(ctx context.Context) (acl *ACL, err error) {
// If status code was not successful, return the error. // If status code was not successful, return the error.
// TODO: Change the check for the StatusCode to include other 2XX success codes. // TODO: Change the check for the StatusCode to include other 2XX success codes.
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
return nil, handleErrorResponse(b, resp) return nil, HandleErrorResponse(b, resp)
} }
// Otherwise, try to decode the response. // Otherwise, try to decode the response.
@ -126,7 +127,7 @@ func (c *Client) ACLHuJSON(ctx context.Context) (acl *ACLHuJSON, err error) {
} }
}() }()
path := fmt.Sprintf("%s/api/v2/tailnet/%s/acl?details=1", c.baseURL(), c.tailnet) path := c.BuildTailnetURL("acl", url.Values{"details": {"1"}})
req, err := http.NewRequestWithContext(ctx, "GET", path, nil) req, err := http.NewRequestWithContext(ctx, "GET", path, nil)
if err != nil { if err != nil {
return nil, err return nil, err
@ -138,7 +139,7 @@ func (c *Client) ACLHuJSON(ctx context.Context) (acl *ACLHuJSON, err error) {
} }
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
return nil, handleErrorResponse(b, resp) return nil, HandleErrorResponse(b, resp)
} }
data := struct { data := struct {
@ -146,7 +147,7 @@ func (c *Client) ACLHuJSON(ctx context.Context) (acl *ACLHuJSON, err error) {
Warnings []string `json:"warnings"` Warnings []string `json:"warnings"`
}{} }{}
if err := json.Unmarshal(b, &data); err != nil { if err := json.Unmarshal(b, &data); err != nil {
return nil, err return nil, fmt.Errorf("json.Unmarshal %q: %w", b, err)
} }
acl = &ACLHuJSON{ acl = &ACLHuJSON{
@ -184,7 +185,7 @@ func (e ACLTestError) Error() string {
} }
func (c *Client) aclPOSTRequest(ctx context.Context, body []byte, avoidCollisions bool, etag, acceptHeader string) ([]byte, string, error) { func (c *Client) aclPOSTRequest(ctx context.Context, body []byte, avoidCollisions bool, etag, acceptHeader string) ([]byte, string, error) {
path := fmt.Sprintf("%s/api/v2/tailnet/%s/acl", c.baseURL(), c.tailnet) path := c.BuildTailnetURL("acl")
req, err := http.NewRequestWithContext(ctx, "POST", path, bytes.NewBuffer(body)) req, err := http.NewRequestWithContext(ctx, "POST", path, bytes.NewBuffer(body))
if err != nil { if err != nil {
return nil, "", err return nil, "", err
@ -328,7 +329,7 @@ type ACLPreview struct {
} }
func (c *Client) previewACLPostRequest(ctx context.Context, body []byte, previewType string, previewFor string) (res *ACLPreviewResponse, err error) { func (c *Client) previewACLPostRequest(ctx context.Context, body []byte, previewType string, previewFor string) (res *ACLPreviewResponse, err error) {
path := fmt.Sprintf("%s/api/v2/tailnet/%s/acl/preview", c.baseURL(), c.tailnet) path := c.BuildTailnetURL("acl", "preview")
req, err := http.NewRequestWithContext(ctx, "POST", path, bytes.NewBuffer(body)) req, err := http.NewRequestWithContext(ctx, "POST", path, bytes.NewBuffer(body))
if err != nil { if err != nil {
return nil, err return nil, err
@ -350,7 +351,7 @@ func (c *Client) previewACLPostRequest(ctx context.Context, body []byte, preview
// If status code was not successful, return the error. // If status code was not successful, return the error.
// TODO: Change the check for the StatusCode to include other 2XX success codes. // TODO: Change the check for the StatusCode to include other 2XX success codes.
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
return nil, handleErrorResponse(b, resp) return nil, HandleErrorResponse(b, resp)
} }
if err = json.Unmarshal(b, &res); err != nil { if err = json.Unmarshal(b, &res); err != nil {
return nil, err return nil, err
@ -488,7 +489,7 @@ func (c *Client) ValidateACLJSON(ctx context.Context, source, dest string) (test
return nil, err return nil, err
} }
path := fmt.Sprintf("%s/api/v2/tailnet/%s/acl/validate", c.baseURL(), c.tailnet) path := c.BuildTailnetURL("acl", "validate")
req, err := http.NewRequestWithContext(ctx, "POST", path, bytes.NewBuffer(postData)) req, err := http.NewRequestWithContext(ctx, "POST", path, bytes.NewBuffer(postData))
if err != nil { if err != nil {
return nil, err return nil, err

@ -7,11 +7,29 @@ package apitype
import ( import (
"tailscale.com/tailcfg" "tailscale.com/tailcfg"
"tailscale.com/types/dnstype" "tailscale.com/types/dnstype"
"tailscale.com/util/ctxkey"
) )
// LocalAPIHost is the Host header value used by the LocalAPI. // LocalAPIHost is the Host header value used by the LocalAPI.
const LocalAPIHost = "local-tailscaled.sock" const LocalAPIHost = "local-tailscaled.sock"
// RequestReasonHeader is the header used to pass justification for a LocalAPI request,
// such as when a user wants to perform an action they don't have permission for,
// and a policy allows it with justification. As of 2025-01-29, it is only used to
// allow a user to disconnect Tailscale when the "always-on" mode is enabled.
//
// The header value is base64-encoded using the standard encoding defined in RFC 4648.
//
// See tailscale/corp#26146.
const RequestReasonHeader = "X-Tailscale-Reason"
// RequestReasonKey is the context key used to pass the request reason
// when making a LocalAPI request via [local.Client].
// It's value is a raw string. An empty string means no reason was provided.
//
// See tailscale/corp#26146.
var RequestReasonKey = ctxkey.New(RequestReasonHeader, "")
// WhoIsResponse is the JSON type returned by tailscaled debug server's /whois?ip=$IP handler. // WhoIsResponse is the JSON type returned by tailscaled debug server's /whois?ip=$IP handler.
// In successful whois responses, Node and UserProfile are never nil. // In successful whois responses, Node and UserProfile are never nil.
type WhoIsResponse struct { type WhoIsResponse struct {
@ -76,3 +94,13 @@ type DNSQueryResponse struct {
// Resolvers is the list of resolvers that the forwarder deemed able to resolve the query. // Resolvers is the list of resolvers that the forwarder deemed able to resolve the query.
Resolvers []*dnstype.Resolver Resolvers []*dnstype.Resolver
} }
// OptionalFeatures describes which optional features are enabled in the build.
type OptionalFeatures struct {
// Features is the map of optional feature names to whether they are
// enabled.
//
// Disabled features may be absent from the map. (That is, false values
// are not guaranteed to be present.)
Features map[string]bool
}

@ -3,17 +3,50 @@
package apitype package apitype
// DNSConfig is the DNS configuration for a tailnet
// used in /tailnet/{tailnet}/dns/config.
type DNSConfig struct { type DNSConfig struct {
// Resolvers are the global DNS resolvers to use
// overriding the local OS configuration.
Resolvers []DNSResolver `json:"resolvers"` Resolvers []DNSResolver `json:"resolvers"`
// FallbackResolvers are used as global resolvers when
// the client is unable to determine the OS's preferred DNS servers.
FallbackResolvers []DNSResolver `json:"fallbackResolvers"` FallbackResolvers []DNSResolver `json:"fallbackResolvers"`
// Routes map DNS name suffixes to a set of DNS resolvers,
// used for Split DNS and other advanced routing overlays.
Routes map[string][]DNSResolver `json:"routes"` Routes map[string][]DNSResolver `json:"routes"`
// Domains are the search domains to use.
Domains []string `json:"domains"` Domains []string `json:"domains"`
Nameservers []string `json:"nameservers"`
// Proxied means MagicDNS is enabled.
Proxied bool `json:"proxied"` Proxied bool `json:"proxied"`
// TempCorpIssue13969 is from an internal hack day prototype,
// See tailscale/corp#13969.
TempCorpIssue13969 string `json:"TempCorpIssue13969,omitempty"` TempCorpIssue13969 string `json:"TempCorpIssue13969,omitempty"`
// Nameservers are the IP addresses of global nameservers to use.
// This is a deprecated format but may still be found in tailnets
// that were configured a long time ago. When making updates,
// set Resolvers and leave Nameservers empty.
Nameservers []string `json:"nameservers"`
} }
// DNSResolver is a DNS resolver in a DNS configuration.
type DNSResolver struct { type DNSResolver struct {
// Addr is the address of the DNS resolver.
// It is usually an IP address or a DoH URL.
// See dnstype.Resolver.Addr for full details.
Addr string `json:"addr"` Addr string `json:"addr"`
// BootstrapResolution is an optional suggested resolution for
// the DoT/DoH resolver.
BootstrapResolution []string `json:"bootstrapResolution,omitempty"` BootstrapResolution []string `json:"bootstrapResolution,omitempty"`
// UseWithExitNode signals this resolver should be used
// even when a tailscale exit node is configured on a device.
UseWithExitNode bool `json:"useWithExitNode,omitempty"`
} }

@ -0,0 +1,34 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build !js && !ts_omit_acme
package tailscale
import (
"context"
"crypto/tls"
"tailscale.com/client/local"
)
// GetCertificate is an alias for [tailscale.com/client/local.GetCertificate].
//
// Deprecated: import [tailscale.com/client/local] instead and use [local.Client.GetCertificate].
func GetCertificate(hi *tls.ClientHelloInfo) (*tls.Certificate, error) {
return local.GetCertificate(hi)
}
// CertPair is an alias for [tailscale.com/client/local.CertPair].
//
// Deprecated: import [tailscale.com/client/local] instead and use [local.Client.CertPair].
func CertPair(ctx context.Context, domain string) (certPEM, keyPEM []byte, err error) {
return local.CertPair(ctx, domain)
}
// ExpandSNIName is an alias for [tailscale.com/client/local.ExpandSNIName].
//
// Deprecated: import [tailscale.com/client/local] instead and use [local.Client.ExpandSNIName].
func ExpandSNIName(ctx context.Context, name string) (fqdn string, ok bool) {
return local.ExpandSNIName(ctx, name)
}

@ -79,6 +79,13 @@ type Device struct {
// Tailscale have attempted to collect this from the device but it has not // Tailscale have attempted to collect this from the device but it has not
// opted in, PostureIdentity will have Disabled=true. // opted in, PostureIdentity will have Disabled=true.
PostureIdentity *DevicePostureIdentity `json:"postureIdentity"` PostureIdentity *DevicePostureIdentity `json:"postureIdentity"`
// TailnetLockKey is the tailnet lock public key of the node as a hex string.
TailnetLockKey string `json:"tailnetLockKey,omitempty"`
// TailnetLockErr indicates an issue with the tailnet lock node-key signature
// on this device. This field is only populated when tailnet lock is enabled.
TailnetLockErr string `json:"tailnetLockError,omitempty"`
} }
type DevicePostureIdentity struct { type DevicePostureIdentity struct {
@ -131,7 +138,7 @@ func (c *Client) Devices(ctx context.Context, fields *DeviceFieldsOpts) (deviceL
} }
}() }()
path := fmt.Sprintf("%s/api/v2/tailnet/%s/devices", c.baseURL(), c.tailnet) path := c.BuildTailnetURL("devices")
req, err := http.NewRequestWithContext(ctx, "GET", path, nil) req, err := http.NewRequestWithContext(ctx, "GET", path, nil)
if err != nil { if err != nil {
return nil, err return nil, err
@ -149,7 +156,7 @@ func (c *Client) Devices(ctx context.Context, fields *DeviceFieldsOpts) (deviceL
// If status code was not successful, return the error. // If status code was not successful, return the error.
// TODO: Change the check for the StatusCode to include other 2XX success codes. // TODO: Change the check for the StatusCode to include other 2XX success codes.
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
return nil, handleErrorResponse(b, resp) return nil, HandleErrorResponse(b, resp)
} }
var devices GetDevicesResponse var devices GetDevicesResponse
@ -188,7 +195,7 @@ func (c *Client) Device(ctx context.Context, deviceID string, fields *DeviceFiel
// If status code was not successful, return the error. // If status code was not successful, return the error.
// TODO: Change the check for the StatusCode to include other 2XX success codes. // TODO: Change the check for the StatusCode to include other 2XX success codes.
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
return nil, handleErrorResponse(b, resp) return nil, HandleErrorResponse(b, resp)
} }
err = json.Unmarshal(b, &device) err = json.Unmarshal(b, &device)
@ -221,7 +228,7 @@ func (c *Client) DeleteDevice(ctx context.Context, deviceID string) (err error)
// If status code was not successful, return the error. // If status code was not successful, return the error.
// TODO: Change the check for the StatusCode to include other 2XX success codes. // TODO: Change the check for the StatusCode to include other 2XX success codes.
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
return handleErrorResponse(b, resp) return HandleErrorResponse(b, resp)
} }
return nil return nil
} }
@ -253,7 +260,7 @@ func (c *Client) SetAuthorized(ctx context.Context, deviceID string, authorized
// If status code was not successful, return the error. // If status code was not successful, return the error.
// TODO: Change the check for the StatusCode to include other 2XX success codes. // TODO: Change the check for the StatusCode to include other 2XX success codes.
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
return handleErrorResponse(b, resp) return HandleErrorResponse(b, resp)
} }
return nil return nil
@ -281,7 +288,7 @@ func (c *Client) SetTags(ctx context.Context, deviceID string, tags []string) er
// If status code was not successful, return the error. // If status code was not successful, return the error.
// TODO: Change the check for the StatusCode to include other 2XX success codes. // TODO: Change the check for the StatusCode to include other 2XX success codes.
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
return handleErrorResponse(b, resp) return HandleErrorResponse(b, resp)
} }
return nil return nil

@ -44,7 +44,7 @@ type DNSPreferences struct {
} }
func (c *Client) dnsGETRequest(ctx context.Context, endpoint string) ([]byte, error) { func (c *Client) dnsGETRequest(ctx context.Context, endpoint string) ([]byte, error) {
path := fmt.Sprintf("%s/api/v2/tailnet/%s/dns/%s", c.baseURL(), c.tailnet, endpoint) path := c.BuildTailnetURL("dns", endpoint)
req, err := http.NewRequestWithContext(ctx, "GET", path, nil) req, err := http.NewRequestWithContext(ctx, "GET", path, nil)
if err != nil { if err != nil {
return nil, err return nil, err
@ -57,14 +57,14 @@ func (c *Client) dnsGETRequest(ctx context.Context, endpoint string) ([]byte, er
// If status code was not successful, return the error. // If status code was not successful, return the error.
// TODO: Change the check for the StatusCode to include other 2XX success codes. // TODO: Change the check for the StatusCode to include other 2XX success codes.
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
return nil, handleErrorResponse(b, resp) return nil, HandleErrorResponse(b, resp)
} }
return b, nil return b, nil
} }
func (c *Client) dnsPOSTRequest(ctx context.Context, endpoint string, postData any) ([]byte, error) { func (c *Client) dnsPOSTRequest(ctx context.Context, endpoint string, postData any) ([]byte, error) {
path := fmt.Sprintf("%s/api/v2/tailnet/%s/dns/%s", c.baseURL(), c.tailnet, endpoint) path := c.BuildTailnetURL("dns", endpoint)
data, err := json.Marshal(&postData) data, err := json.Marshal(&postData)
if err != nil { if err != nil {
return nil, err return nil, err
@ -84,7 +84,7 @@ func (c *Client) dnsPOSTRequest(ctx context.Context, endpoint string, postData a
// If status code was not successful, return the error. // If status code was not successful, return the error.
// TODO: Change the check for the StatusCode to include other 2XX success codes. // TODO: Change the check for the StatusCode to include other 2XX success codes.
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
return nil, handleErrorResponse(b, resp) return nil, HandleErrorResponse(b, resp)
} }
return b, nil return b, nil

@ -11,13 +11,14 @@ import (
"log" "log"
"net/http" "net/http"
"tailscale.com/client/tailscale" "tailscale.com/client/local"
) )
func main() { func main() {
var lc local.Client
s := &http.Server{ s := &http.Server{
TLSConfig: &tls.Config{ TLSConfig: &tls.Config{
GetCertificate: tailscale.GetCertificate, GetCertificate: lc.GetCertificate,
}, },
Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
io.WriteString(w, "<h1>Hello from Tailscale!</h1> It works.") io.WriteString(w, "<h1>Hello from Tailscale!</h1> It works.")

@ -40,7 +40,7 @@ type KeyDeviceCreateCapabilities struct {
// Keys returns the list of keys for the current user. // Keys returns the list of keys for the current user.
func (c *Client) Keys(ctx context.Context) ([]string, error) { func (c *Client) Keys(ctx context.Context) ([]string, error) {
path := fmt.Sprintf("%s/api/v2/tailnet/%s/keys", c.baseURL(), c.tailnet) path := c.BuildTailnetURL("keys")
req, err := http.NewRequestWithContext(ctx, "GET", path, nil) req, err := http.NewRequestWithContext(ctx, "GET", path, nil)
if err != nil { if err != nil {
return nil, err return nil, err
@ -51,7 +51,7 @@ func (c *Client) Keys(ctx context.Context) ([]string, error) {
return nil, err return nil, err
} }
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
return nil, handleErrorResponse(b, resp) return nil, HandleErrorResponse(b, resp)
} }
var keys struct { var keys struct {
@ -99,7 +99,7 @@ func (c *Client) CreateKeyWithExpiry(ctx context.Context, caps KeyCapabilities,
return "", nil, err return "", nil, err
} }
path := fmt.Sprintf("%s/api/v2/tailnet/%s/keys", c.baseURL(), c.tailnet) path := c.BuildTailnetURL("keys")
req, err := http.NewRequestWithContext(ctx, "POST", path, bytes.NewReader(bs)) req, err := http.NewRequestWithContext(ctx, "POST", path, bytes.NewReader(bs))
if err != nil { if err != nil {
return "", nil, err return "", nil, err
@ -110,7 +110,7 @@ func (c *Client) CreateKeyWithExpiry(ctx context.Context, caps KeyCapabilities,
return "", nil, err return "", nil, err
} }
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
return "", nil, handleErrorResponse(b, resp) return "", nil, HandleErrorResponse(b, resp)
} }
var key struct { var key struct {
@ -126,7 +126,7 @@ func (c *Client) CreateKeyWithExpiry(ctx context.Context, caps KeyCapabilities,
// Key returns the metadata for the given key ID. Currently, capabilities are // Key returns the metadata for the given key ID. Currently, capabilities are
// only returned for auth keys, API keys only return general metadata. // only returned for auth keys, API keys only return general metadata.
func (c *Client) Key(ctx context.Context, id string) (*Key, error) { func (c *Client) Key(ctx context.Context, id string) (*Key, error) {
path := fmt.Sprintf("%s/api/v2/tailnet/%s/keys/%s", c.baseURL(), c.tailnet, id) path := c.BuildTailnetURL("keys", id)
req, err := http.NewRequestWithContext(ctx, "GET", path, nil) req, err := http.NewRequestWithContext(ctx, "GET", path, nil)
if err != nil { if err != nil {
return nil, err return nil, err
@ -137,7 +137,7 @@ func (c *Client) Key(ctx context.Context, id string) (*Key, error) {
return nil, err return nil, err
} }
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
return nil, handleErrorResponse(b, resp) return nil, HandleErrorResponse(b, resp)
} }
var key Key var key Key
@ -149,7 +149,7 @@ func (c *Client) Key(ctx context.Context, id string) (*Key, error) {
// DeleteKey deletes the key with the given ID. // DeleteKey deletes the key with the given ID.
func (c *Client) DeleteKey(ctx context.Context, id string) error { func (c *Client) DeleteKey(ctx context.Context, id string) error {
path := fmt.Sprintf("%s/api/v2/tailnet/%s/keys/%s", c.baseURL(), c.tailnet, id) path := c.BuildTailnetURL("keys", id)
req, err := http.NewRequestWithContext(ctx, "DELETE", path, nil) req, err := http.NewRequestWithContext(ctx, "DELETE", path, nil)
if err != nil { if err != nil {
return err return err
@ -160,7 +160,7 @@ func (c *Client) DeleteKey(ctx context.Context, id string) error {
return err return err
} }
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
return handleErrorResponse(b, resp) return HandleErrorResponse(b, resp)
} }
return nil return nil
} }

@ -0,0 +1,79 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package tailscale
import (
"context"
"tailscale.com/client/local"
"tailscale.com/client/tailscale/apitype"
"tailscale.com/ipn/ipnstate"
)
// ErrPeerNotFound is an alias for [tailscale.com/client/local.ErrPeerNotFound].
//
// Deprecated: import [tailscale.com/client/local] instead.
var ErrPeerNotFound = local.ErrPeerNotFound
// LocalClient is an alias for [tailscale.com/client/local.Client].
//
// Deprecated: import [tailscale.com/client/local] instead.
type LocalClient = local.Client
// IPNBusWatcher is an alias for [tailscale.com/client/local.IPNBusWatcher].
//
// Deprecated: import [tailscale.com/client/local] instead.
type IPNBusWatcher = local.IPNBusWatcher
// BugReportOpts is an alias for [tailscale.com/client/local.BugReportOpts].
//
// Deprecated: import [tailscale.com/client/local] instead.
type BugReportOpts = local.BugReportOpts
// PingOpts is an alias for [tailscale.com/client/local.PingOpts].
//
// Deprecated: import [tailscale.com/client/local] instead.
type PingOpts = local.PingOpts
// SetVersionMismatchHandler is an alias for [tailscale.com/client/local.SetVersionMismatchHandler].
//
// Deprecated: import [tailscale.com/client/local] instead.
func SetVersionMismatchHandler(f func(clientVer, serverVer string)) {
local.SetVersionMismatchHandler(f)
}
// IsAccessDeniedError is an alias for [tailscale.com/client/local.IsAccessDeniedError].
//
// Deprecated: import [tailscale.com/client/local] instead.
func IsAccessDeniedError(err error) bool {
return local.IsAccessDeniedError(err)
}
// IsPreconditionsFailedError is an alias for [tailscale.com/client/local.IsPreconditionsFailedError].
//
// Deprecated: import [tailscale.com/client/local] instead.
func IsPreconditionsFailedError(err error) bool {
return local.IsPreconditionsFailedError(err)
}
// WhoIs is an alias for [tailscale.com/client/local.WhoIs].
//
// Deprecated: import [tailscale.com/client/local] instead and use [local.Client.WhoIs].
func WhoIs(ctx context.Context, remoteAddr string) (*apitype.WhoIsResponse, error) {
return local.WhoIs(ctx, remoteAddr)
}
// Status is an alias for [tailscale.com/client/local.Status].
//
// Deprecated: import [tailscale.com/client/local] instead.
func Status(ctx context.Context) (*ipnstate.Status, error) {
return local.Status(ctx)
}
// StatusWithoutPeers is an alias for [tailscale.com/client/local.StatusWithoutPeers].
//
// Deprecated: import [tailscale.com/client/local] instead.
func StatusWithoutPeers(ctx context.Context) (*ipnstate.Status, error) {
return local.StatusWithoutPeers(ctx)
}

@ -44,7 +44,7 @@ func (c *Client) Routes(ctx context.Context, deviceID string) (routes *Routes, e
// If status code was not successful, return the error. // If status code was not successful, return the error.
// TODO: Change the check for the StatusCode to include other 2XX success codes. // TODO: Change the check for the StatusCode to include other 2XX success codes.
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
return nil, handleErrorResponse(b, resp) return nil, HandleErrorResponse(b, resp)
} }
var sr Routes var sr Routes
@ -84,7 +84,7 @@ func (c *Client) SetRoutes(ctx context.Context, deviceID string, subnets []netip
// If status code was not successful, return the error. // If status code was not successful, return the error.
// TODO: Change the check for the StatusCode to include other 2XX success codes. // TODO: Change the check for the StatusCode to include other 2XX success codes.
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
return nil, handleErrorResponse(b, resp) return nil, HandleErrorResponse(b, resp)
} }
var srr *Routes var srr *Routes

@ -9,7 +9,6 @@ import (
"context" "context"
"fmt" "fmt"
"net/http" "net/http"
"net/url"
"tailscale.com/util/httpm" "tailscale.com/util/httpm"
) )
@ -22,7 +21,7 @@ func (c *Client) TailnetDeleteRequest(ctx context.Context, tailnetID string) (er
} }
}() }()
path := fmt.Sprintf("%s/api/v2/tailnet/%s", c.baseURL(), url.PathEscape(string(tailnetID))) path := c.BuildTailnetURL("tailnet")
req, err := http.NewRequestWithContext(ctx, httpm.DELETE, path, nil) req, err := http.NewRequestWithContext(ctx, httpm.DELETE, path, nil)
if err != nil { if err != nil {
return err return err
@ -35,7 +34,7 @@ func (c *Client) TailnetDeleteRequest(ctx context.Context, tailnetID string) (er
} }
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
return handleErrorResponse(b, resp) return HandleErrorResponse(b, resp)
} }
return nil return nil

@ -3,11 +3,12 @@
//go:build go1.19 //go:build go1.19
// Package tailscale contains Go clients for the Tailscale LocalAPI and // Package tailscale contains a Go client for the Tailscale control plane API.
// Tailscale control plane API.
// //
// Warning: this package is in development and makes no API compatibility // This package is only intended for internal and transitional use.
// promises as of 2022-04-29. It is subject to change at any time. //
// Deprecated: the official control plane client is available at
// [tailscale.com/client/tailscale/v2].
package tailscale package tailscale
import ( import (
@ -16,13 +17,12 @@ import (
"fmt" "fmt"
"io" "io"
"net/http" "net/http"
"net/url"
"path"
) )
// I_Acknowledge_This_API_Is_Unstable must be set true to use this package // I_Acknowledge_This_API_Is_Unstable must be set true to use this package
// for now. It was added 2022-04-29 when it was moved to this git repo // for now. This package is being replaced by [tailscale.com/client/tailscale/v2].
// and will be removed when the public API has settled.
//
// TODO(bradfitz): remove this after the we're happy with the public API.
var I_Acknowledge_This_API_Is_Unstable = false var I_Acknowledge_This_API_Is_Unstable = false
// TODO: use url.PathEscape() for deviceID and tailnets when constructing requests. // TODO: use url.PathEscape() for deviceID and tailnets when constructing requests.
@ -34,8 +34,10 @@ const maxReadSize = 10 << 20
// Client makes API calls to the Tailscale control plane API server. // Client makes API calls to the Tailscale control plane API server.
// //
// Use NewClient to instantiate one. Exported fields should be set before // Use [NewClient] to instantiate one. Exported fields should be set before
// the client is used and not changed thereafter. // the client is used and not changed thereafter.
//
// Deprecated: use [tailscale.com/client/tailscale/v2] instead.
type Client struct { type Client struct {
// tailnet is the globally unique identifier for a Tailscale network, such // tailnet is the globally unique identifier for a Tailscale network, such
// as "example.com" or "user@gmail.com". // as "example.com" or "user@gmail.com".
@ -49,8 +51,11 @@ type Client struct {
BaseURL string BaseURL string
// HTTPClient optionally specifies an alternate HTTP client to use. // HTTPClient optionally specifies an alternate HTTP client to use.
// If nil, http.DefaultClient is used. // If nil, [http.DefaultClient] is used.
HTTPClient *http.Client HTTPClient *http.Client
// UserAgent optionally specifies an alternate User-Agent header
UserAgent string
} }
func (c *Client) httpClient() *http.Client { func (c *Client) httpClient() *http.Client {
@ -60,6 +65,46 @@ func (c *Client) httpClient() *http.Client {
return http.DefaultClient return http.DefaultClient
} }
// BuildURL builds a url to http(s)://<apiserver>/api/v2/<slash-separated-pathElements>
// using the given pathElements. It url escapes each path element, so the
// caller doesn't need to worry about that. The last item of pathElements can
// be of type url.Values to add a query string to the URL.
//
// For example, BuildURL(devices, 5) with the default server URL would result in
// https://api.tailscale.com/api/v2/devices/5.
func (c *Client) BuildURL(pathElements ...any) string {
elem := make([]string, 1, len(pathElements)+1)
elem[0] = "/api/v2"
var query string
for i, pathElement := range pathElements {
if uv, ok := pathElement.(url.Values); ok && i == len(pathElements)-1 {
query = uv.Encode()
} else {
elem = append(elem, url.PathEscape(fmt.Sprint(pathElement)))
}
}
url := c.baseURL() + path.Join(elem...)
if query != "" {
url += "?" + query
}
return url
}
// BuildTailnetURL builds a url to http(s)://<apiserver>/api/v2/tailnet/<tailnet>/<slash-separated-pathElements>
// using the given pathElements. It url escapes each path element, so the
// caller doesn't need to worry about that. The last item of pathElements can
// be of type url.Values to add a query string to the URL.
//
// For example, BuildTailnetURL(policy, validate) with the default server URL and a tailnet of "example.com"
// would result in https://api.tailscale.com/api/v2/tailnet/example.com/policy/validate.
func (c *Client) BuildTailnetURL(pathElements ...any) string {
allElements := make([]any, 2, len(pathElements)+2)
allElements[0] = "tailnet"
allElements[1] = c.tailnet
allElements = append(allElements, pathElements...)
return c.BuildURL(allElements...)
}
func (c *Client) baseURL() string { func (c *Client) baseURL() string {
if c.BaseURL != "" { if c.BaseURL != "" {
return c.BaseURL return c.BaseURL
@ -74,7 +119,7 @@ type AuthMethod interface {
modifyRequest(req *http.Request) modifyRequest(req *http.Request)
} }
// APIKey is an AuthMethod for NewClient that authenticates requests // APIKey is an [AuthMethod] for [NewClient] that authenticates requests
// using an authkey. // using an authkey.
type APIKey string type APIKey string
@ -88,17 +133,20 @@ func (c *Client) setAuth(r *http.Request) {
} }
} }
// NewClient is a convenience method for instantiating a new Client. // NewClient is a convenience method for instantiating a new [Client].
// //
// tailnet is the globally unique identifier for a Tailscale network, such // tailnet is the globally unique identifier for a Tailscale network, such
// as "example.com" or "user@gmail.com". // as "example.com" or "user@gmail.com".
// If httpClient is nil, then http.DefaultClient is used. // If httpClient is nil, then [http.DefaultClient] is used.
// "api.tailscale.com" is set as the BaseURL for the returned client // "api.tailscale.com" is set as the BaseURL for the returned client
// and can be changed manually by the user. // and can be changed manually by the user.
//
// Deprecated: use [tailscale.com/client/tailscale/v2] instead.
func NewClient(tailnet string, auth AuthMethod) *Client { func NewClient(tailnet string, auth AuthMethod) *Client {
return &Client{ return &Client{
tailnet: tailnet, tailnet: tailnet,
auth: auth, auth: auth,
UserAgent: "tailscale-client-oss",
} }
} }
@ -110,17 +158,16 @@ func (c *Client) Do(req *http.Request) (*http.Response, error) {
return nil, errors.New("use of Client without setting I_Acknowledge_This_API_Is_Unstable") return nil, errors.New("use of Client without setting I_Acknowledge_This_API_Is_Unstable")
} }
c.setAuth(req) c.setAuth(req)
if c.UserAgent != "" {
req.Header.Set("User-Agent", c.UserAgent)
}
return c.httpClient().Do(req) return c.httpClient().Do(req)
} }
// sendRequest add the authentication key to the request and sends it. It // sendRequest add the authentication key to the request and sends it. It
// receives the response and reads up to 10MB of it. // receives the response and reads up to 10MB of it.
func (c *Client) sendRequest(req *http.Request) ([]byte, *http.Response, error) { func (c *Client) sendRequest(req *http.Request) ([]byte, *http.Response, error) {
if !I_Acknowledge_This_API_Is_Unstable { resp, err := c.Do(req)
return nil, nil, errors.New("use of Client without setting I_Acknowledge_This_API_Is_Unstable")
}
c.setAuth(req)
resp, err := c.httpClient().Do(req)
if err != nil { if err != nil {
return nil, resp, err return nil, resp, err
} }
@ -145,12 +192,14 @@ func (e ErrResponse) Error() string {
return fmt.Sprintf("Status: %d, Message: %q", e.Status, e.Message) return fmt.Sprintf("Status: %d, Message: %q", e.Status, e.Message)
} }
// handleErrorResponse decodes the error message from the server and returns // HandleErrorResponse decodes the error message from the server and returns
// an ErrResponse from it. // an [ErrResponse] from it.
func handleErrorResponse(b []byte, resp *http.Response) error { //
// Deprecated: use [tailscale.com/client/tailscale/v2] instead.
func HandleErrorResponse(b []byte, resp *http.Response) error {
var errResp ErrResponse var errResp ErrResponse
if err := json.Unmarshal(b, &errResp); err != nil { if err := json.Unmarshal(b, &errResp); err != nil {
return err return fmt.Errorf("json.Unmarshal %q: %w", b, err)
} }
errResp.Status = resp.StatusCode errResp.Status = resp.StatusCode
return errResp return errResp

@ -0,0 +1,86 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package tailscale
import (
"net/url"
"testing"
)
func TestClientBuildURL(t *testing.T) {
c := Client{BaseURL: "http://127.0.0.1:1234"}
for _, tt := range []struct {
desc string
elements []any
want string
}{
{
desc: "single-element",
elements: []any{"devices"},
want: "http://127.0.0.1:1234/api/v2/devices",
},
{
desc: "multiple-elements",
elements: []any{"tailnet", "example.com"},
want: "http://127.0.0.1:1234/api/v2/tailnet/example.com",
},
{
desc: "escape-element",
elements: []any{"tailnet", "example dot com?foo=bar"},
want: `http://127.0.0.1:1234/api/v2/tailnet/example%20dot%20com%3Ffoo=bar`,
},
{
desc: "url.Values",
elements: []any{"tailnet", "example.com", "acl", url.Values{"details": {"1"}}},
want: `http://127.0.0.1:1234/api/v2/tailnet/example.com/acl?details=1`,
},
} {
t.Run(tt.desc, func(t *testing.T) {
got := c.BuildURL(tt.elements...)
if got != tt.want {
t.Errorf("got %q, want %q", got, tt.want)
}
})
}
}
func TestClientBuildTailnetURL(t *testing.T) {
c := Client{
BaseURL: "http://127.0.0.1:1234",
tailnet: "example.com",
}
for _, tt := range []struct {
desc string
elements []any
want string
}{
{
desc: "single-element",
elements: []any{"devices"},
want: "http://127.0.0.1:1234/api/v2/tailnet/example.com/devices",
},
{
desc: "multiple-elements",
elements: []any{"devices", 123},
want: "http://127.0.0.1:1234/api/v2/tailnet/example.com/devices/123",
},
{
desc: "escape-element",
elements: []any{"foo bar?baz=qux"},
want: `http://127.0.0.1:1234/api/v2/tailnet/example.com/foo%20bar%3Fbaz=qux`,
},
{
desc: "url.Values",
elements: []any{"acl", url.Values{"details": {"1"}}},
want: `http://127.0.0.1:1234/api/v2/tailnet/example.com/acl?details=1`,
},
} {
t.Run(tt.desc, func(t *testing.T) {
got := c.BuildTailnetURL(tt.elements...)
if got != tt.want {
t.Errorf("got %q, want %q", got, tt.want)
}
})
}
}

@ -192,7 +192,7 @@ func (s *Server) controlSupportsCheckMode(ctx context.Context) bool {
if err != nil { if err != nil {
return true return true
} }
controlURL, err := url.Parse(prefs.ControlURLOrDefault()) controlURL, err := url.Parse(prefs.ControlURLOrDefault(s.polc))
if err != nil { if err != nil {
return true return true
} }

@ -3,7 +3,7 @@
"version": "0.0.1", "version": "0.0.1",
"license": "BSD-3-Clause", "license": "BSD-3-Clause",
"engines": { "engines": {
"node": "18.20.4", "node": "22.14.0",
"yarn": "1.22.19" "yarn": "1.22.19"
}, },
"type": "module", "type": "module",
@ -20,7 +20,7 @@
"zustand": "^4.4.7" "zustand": "^4.4.7"
}, },
"devDependencies": { "devDependencies": {
"@types/node": "^18.16.1", "@types/node": "^22.14.0",
"@types/react": "^18.0.20", "@types/react": "^18.0.20",
"@types/react-dom": "^18.0.6", "@types/react-dom": "^18.0.6",
"@vitejs/plugin-react-swc": "^3.6.0", "@vitejs/plugin-react-swc": "^3.6.0",

@ -249,7 +249,6 @@ export function useAPI() {
return api return api
} }
let csrfToken: string
let synoToken: string | undefined // required for synology API requests let synoToken: string | undefined // required for synology API requests
let unraidCsrfToken: string | undefined // required for unraid POST requests (#8062) let unraidCsrfToken: string | undefined // required for unraid POST requests (#8062)
@ -298,12 +297,10 @@ export function apiFetch<T>(
headers: { headers: {
Accept: "application/json", Accept: "application/json",
"Content-Type": contentType, "Content-Type": contentType,
"X-CSRF-Token": csrfToken,
}, },
body: body, body: body,
}) })
.then((r) => { .then((r) => {
updateCsrfToken(r)
if (!r.ok) { if (!r.ok) {
return r.text().then((err) => { return r.text().then((err) => {
throw new Error(err) throw new Error(err)
@ -322,13 +319,6 @@ export function apiFetch<T>(
}) })
} }
function updateCsrfToken(r: Response) {
const tok = r.headers.get("X-CSRF-Token")
if (tok) {
csrfToken = tok
}
}
export function setSynoToken(token?: string) { export function setSynoToken(token?: string) {
synoToken = token synoToken = token
} }

@ -1,13 +1,11 @@
// Copyright (c) Tailscale Inc & AUTHORS // Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause // SPDX-License-Identifier: BSD-3-Clause
import React, { useState } from "react" import React from "react"
import { useAPI } from "src/api" import { useAPI } from "src/api"
import TailscaleIcon from "src/assets/icons/tailscale-icon.svg?react" import TailscaleIcon from "src/assets/icons/tailscale-icon.svg?react"
import { NodeData } from "src/types" import { NodeData } from "src/types"
import Button from "src/ui/button" import Button from "src/ui/button"
import Collapsible from "src/ui/collapsible"
import Input from "src/ui/input"
/** /**
* LoginView is rendered when the client is not authenticated * LoginView is rendered when the client is not authenticated
@ -15,8 +13,6 @@ import Input from "src/ui/input"
*/ */
export default function LoginView({ data }: { data: NodeData }) { export default function LoginView({ data }: { data: NodeData }) {
const api = useAPI() const api = useAPI()
const [controlURL, setControlURL] = useState<string>("")
const [authKey, setAuthKey] = useState<string>("")
return ( return (
<div className="mb-8 py-6 px-8 bg-white rounded-md shadow-2xl"> <div className="mb-8 py-6 px-8 bg-white rounded-md shadow-2xl">
@ -88,8 +84,6 @@ export default function LoginView({ data }: { data: NodeData }) {
action: "up", action: "up",
data: { data: {
Reauthenticate: true, Reauthenticate: true,
ControlURL: controlURL,
AuthKey: authKey,
}, },
}) })
} }
@ -98,34 +92,6 @@ export default function LoginView({ data }: { data: NodeData }) {
> >
Log In Log In
</Button> </Button>
<Collapsible trigger="Advanced options">
<h4 className="font-medium mb-1 mt-2">Auth Key</h4>
<p className="text-sm text-gray-500">
Connect with a pre-authenticated key.{" "}
<a
href="https://tailscale.com/kb/1085/auth-keys/"
className="link"
target="_blank"
rel="noreferrer"
>
Learn more &rarr;
</a>
</p>
<Input
className="mt-2"
value={authKey}
onChange={(e) => setAuthKey(e.target.value)}
placeholder="tskey-auth-XXX"
/>
<h4 className="font-medium mt-3 mb-1">Server URL</h4>
<p className="text-sm text-gray-500">Base URL of control server.</p>
<Input
className="mt-2"
value={controlURL}
onChange={(e) => setControlURL(e.target.value)}
placeholder="https://login.tailscale.com/"
/>
</Collapsible>
</> </>
)} )}
</div> </div>

@ -66,7 +66,7 @@ export default function useExitNodes(node: NodeData, filter?: string) {
// match from a list of exit node `options` to `nodes`. // match from a list of exit node `options` to `nodes`.
const addBestMatchNode = ( const addBestMatchNode = (
options: ExitNode[], options: ExitNode[],
name: (l: ExitNodeLocation) => string name: (loc: ExitNodeLocation) => string
) => { ) => {
const bestNode = highestPriorityNode(options) const bestNode = highestPriorityNode(options)
if (!bestNode || !bestNode.Location) { if (!bestNode || !bestNode.Location) {
@ -86,7 +86,7 @@ export default function useExitNodes(node: NodeData, filter?: string) {
locationNodesMap.forEach( locationNodesMap.forEach(
// add one node per country // add one node per country
(countryNodes) => (countryNodes) =>
addBestMatchNode(flattenMap(countryNodes), (l) => l.Country) addBestMatchNode(flattenMap(countryNodes), (loc) => loc.Country)
) )
} else { } else {
// Otherwise, show the best match on a city-level, // Otherwise, show the best match on a city-level,
@ -97,12 +97,12 @@ export default function useExitNodes(node: NodeData, filter?: string) {
countryNodes.forEach( countryNodes.forEach(
// add one node per city // add one node per city
(cityNodes) => (cityNodes) =>
addBestMatchNode(cityNodes, (l) => `${l.Country}: ${l.City}`) addBestMatchNode(cityNodes, (loc) => `${loc.Country}: ${loc.City}`)
) )
// add the "Country: Best Match" node // add the "Country: Best Match" node
addBestMatchNode( addBestMatchNode(
flattenMap(countryNodes), flattenMap(countryNodes),
(l) => `${l.Country}: Best Match` (loc) => `${loc.Country}: Best Match`
) )
}) })
} }

@ -5,8 +5,8 @@
package web package web
import ( import (
"cmp"
"context" "context"
"crypto/rand"
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
@ -14,18 +14,20 @@ import (
"log" "log"
"net/http" "net/http"
"net/netip" "net/netip"
"net/url"
"os" "os"
"path" "path"
"path/filepath" "slices"
"strings" "strings"
"sync" "sync"
"time" "time"
"github.com/gorilla/csrf" "tailscale.com/client/local"
"tailscale.com/client/tailscale"
"tailscale.com/client/tailscale/apitype" "tailscale.com/client/tailscale/apitype"
"tailscale.com/clientupdate"
"tailscale.com/envknob" "tailscale.com/envknob"
"tailscale.com/envknob/featureknob"
"tailscale.com/feature"
"tailscale.com/feature/buildfeatures"
"tailscale.com/hostinfo" "tailscale.com/hostinfo"
"tailscale.com/ipn" "tailscale.com/ipn"
"tailscale.com/ipn/ipnstate" "tailscale.com/ipn/ipnstate"
@ -36,6 +38,7 @@ import (
"tailscale.com/types/logger" "tailscale.com/types/logger"
"tailscale.com/types/views" "tailscale.com/types/views"
"tailscale.com/util/httpm" "tailscale.com/util/httpm"
"tailscale.com/util/syspolicy/policyclient"
"tailscale.com/version" "tailscale.com/version"
"tailscale.com/version/distro" "tailscale.com/version/distro"
) )
@ -49,7 +52,8 @@ type Server struct {
mode ServerMode mode ServerMode
logf logger.Logf logf logger.Logf
lc *tailscale.LocalClient polc policyclient.Client // must be non-nil
lc *local.Client
timeNow func() time.Time timeNow func() time.Time
// devMode indicates that the server run with frontend assets // devMode indicates that the server run with frontend assets
@ -59,6 +63,12 @@ type Server struct {
cgiMode bool cgiMode bool
pathPrefix string pathPrefix string
// originOverride is the origin that the web UI is accessible from.
// This value is used in the fallback CSRF checks when Sec-Fetch-Site is not
// available. In this case the application will compare Host and Origin
// header values to determine if the request is from the same origin.
originOverride string
apiHandler http.Handler // serves api endpoints; csrf-protected apiHandler http.Handler // serves api endpoints; csrf-protected
assetsHandler http.Handler // serves frontend assets assetsHandler http.Handler // serves frontend assets
assetsCleanup func() // called from Server.Shutdown assetsCleanup func() // called from Server.Shutdown
@ -88,8 +98,8 @@ type Server struct {
type ServerMode string type ServerMode string
const ( const (
// LoginServerMode serves a readonly login client for logging a // LoginServerMode serves a read-only login client for logging a
// node into a tailnet, and viewing a readonly interface of the // node into a tailnet, and viewing a read-only interface of the
// node's current Tailscale settings. // node's current Tailscale settings.
// //
// In this mode, API calls are authenticated via platform auth. // In this mode, API calls are authenticated via platform auth.
@ -109,7 +119,7 @@ const (
// This mode restricts the app to only being assessible over Tailscale, // This mode restricts the app to only being assessible over Tailscale,
// and API calls are authenticated via browser sessions associated with // and API calls are authenticated via browser sessions associated with
// the source's Tailscale identity. If the source browser does not have // the source's Tailscale identity. If the source browser does not have
// a valid session, a readonly version of the app is displayed. // a valid session, a read-only version of the app is displayed.
ManageServerMode ServerMode = "manage" ManageServerMode ServerMode = "manage"
) )
@ -124,18 +134,22 @@ type ServerOpts struct {
// PathPrefix is the URL prefix added to requests by CGI or reverse proxy. // PathPrefix is the URL prefix added to requests by CGI or reverse proxy.
PathPrefix string PathPrefix string
// LocalClient is the tailscale.LocalClient to use for this web server. // LocalClient is the local.Client to use for this web server.
// If nil, a new one will be created. // If nil, a new one will be created.
LocalClient *tailscale.LocalClient LocalClient *local.Client
// TimeNow optionally provides a time function. // TimeNow optionally provides a time function.
// time.Now is used as default. // time.Now is used as default.
TimeNow func() time.Time TimeNow func() time.Time
// Logf optionally provides a logger function. // Logf optionally provides a logger function.
// log.Printf is used as default. // If nil, log.Printf is used as default.
Logf logger.Logf Logf logger.Logf
// PolicyClient, if non-nil, will be used to fetch policy settings.
// If nil, the default policy client will be used.
PolicyClient policyclient.Client
// The following two fields are required and used exclusively // The following two fields are required and used exclusively
// in ManageServerMode to facilitate the control server login // in ManageServerMode to facilitate the control server login
// check step for authorizing browser sessions. // check step for authorizing browser sessions.
@ -149,6 +163,9 @@ type ServerOpts struct {
// as completed. // as completed.
// This field is required for ManageServerMode mode. // This field is required for ManageServerMode mode.
WaitAuthURL func(ctx context.Context, id string, src tailcfg.NodeID) (*tailcfg.WebClientAuthResponse, error) WaitAuthURL func(ctx context.Context, id string, src tailcfg.NodeID) (*tailcfg.WebClientAuthResponse, error)
// OriginOverride specifies the origin that the web UI will be accessible from if hosted behind a reverse proxy or CGI.
OriginOverride string
} }
// NewServer constructs a new Tailscale web client server. // NewServer constructs a new Tailscale web client server.
@ -165,10 +182,11 @@ func NewServer(opts ServerOpts) (s *Server, err error) {
return nil, fmt.Errorf("invalid Mode provided") return nil, fmt.Errorf("invalid Mode provided")
} }
if opts.LocalClient == nil { if opts.LocalClient == nil {
opts.LocalClient = &tailscale.LocalClient{} opts.LocalClient = &local.Client{}
} }
s = &Server{ s = &Server{
mode: opts.Mode, mode: opts.Mode,
polc: cmp.Or(opts.PolicyClient, policyclient.Get()),
logf: opts.Logf, logf: opts.Logf,
devMode: envknob.Bool("TS_DEBUG_WEB_CLIENT_DEV"), devMode: envknob.Bool("TS_DEBUG_WEB_CLIENT_DEV"),
lc: opts.LocalClient, lc: opts.LocalClient,
@ -177,6 +195,7 @@ func NewServer(opts ServerOpts) (s *Server, err error) {
timeNow: opts.TimeNow, timeNow: opts.TimeNow,
newAuthURL: opts.NewAuthURL, newAuthURL: opts.NewAuthURL,
waitAuthURL: opts.WaitAuthURL, waitAuthURL: opts.WaitAuthURL,
originOverride: opts.OriginOverride,
} }
if opts.PathPrefix != "" { if opts.PathPrefix != "" {
// Enforce that path prefix always has a single leading '/' // Enforce that path prefix always has a single leading '/'
@ -202,25 +221,9 @@ func NewServer(opts ServerOpts) (s *Server, err error) {
} }
s.assetsHandler, s.assetsCleanup = assetsHandler(s.devMode) s.assetsHandler, s.assetsCleanup = assetsHandler(s.devMode)
var metric string // clientmetric to report on startup var metric string
s.apiHandler, metric = s.modeAPIHandler(s.mode)
// Create handler for "/api" requests with CSRF protection. s.apiHandler = s.csrfProtect(s.apiHandler)
// We don't require secure cookies, since the web client is regularly used
// on network appliances that are served on local non-https URLs.
// The client is secured by limiting the interface it listens on,
// or by authenticating requests before they reach the web client.
csrfProtect := csrf.Protect(s.csrfKey(), csrf.Secure(false))
switch s.mode {
case LoginServerMode:
s.apiHandler = csrfProtect(http.HandlerFunc(s.serveLoginAPI))
metric = "web_login_client_initialization"
case ReadOnlyServerMode:
s.apiHandler = csrfProtect(http.HandlerFunc(s.serveLoginAPI))
metric = "web_readonly_client_initialization"
case ManageServerMode:
s.apiHandler = csrfProtect(http.HandlerFunc(s.serveAPI))
metric = "web_client_initialization"
}
// Don't block startup on reporting metric. // Don't block startup on reporting metric.
// Report in separate go routine with 5 second timeout. // Report in separate go routine with 5 second timeout.
@ -233,6 +236,80 @@ func NewServer(opts ServerOpts) (s *Server, err error) {
return s, nil return s, nil
} }
func (s *Server) csrfProtect(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// CSRF is not required for GET, HEAD, or OPTIONS requests.
if slices.Contains([]string{"GET", "HEAD", "OPTIONS"}, r.Method) {
h.ServeHTTP(w, r)
return
}
// first attempt to use Sec-Fetch-Site header (sent by all modern
// browsers to "potentially trustworthy" origins i.e. localhost or those
// served over HTTPS)
secFetchSite := r.Header.Get("Sec-Fetch-Site")
if secFetchSite == "same-origin" {
h.ServeHTTP(w, r)
return
} else if secFetchSite != "" {
http.Error(w, fmt.Sprintf("CSRF request denied with Sec-Fetch-Site %q", secFetchSite), http.StatusForbidden)
return
}
// if Sec-Fetch-Site is not available we presume we are operating over HTTP.
// We fall back to comparing the Origin & Host headers.
// use the Host header to determine the expected origin
// (use the override if set to allow for reverse proxying)
host := r.Host
if host == "" {
http.Error(w, "CSRF request denied with no Host header", http.StatusForbidden)
return
}
if s.originOverride != "" {
host = s.originOverride
}
originHeader := r.Header.Get("Origin")
if originHeader == "" {
http.Error(w, "CSRF request denied with no Origin header", http.StatusForbidden)
return
}
parsedOrigin, err := url.Parse(originHeader)
if err != nil {
http.Error(w, fmt.Sprintf("CSRF request denied with invalid Origin %q", r.Header.Get("Origin")), http.StatusForbidden)
return
}
origin := parsedOrigin.Host
if origin == "" {
http.Error(w, "CSRF request denied with no host in the Origin header", http.StatusForbidden)
return
}
if origin != host {
http.Error(w, fmt.Sprintf("CSRF request denied with mismatched Origin %q and Host %q", origin, host), http.StatusForbidden)
return
}
h.ServeHTTP(w, r)
})
}
func (s *Server) modeAPIHandler(mode ServerMode) (http.Handler, string) {
switch mode {
case LoginServerMode:
return http.HandlerFunc(s.serveLoginAPI), "web_login_client_initialization"
case ReadOnlyServerMode:
return http.HandlerFunc(s.serveLoginAPI), "web_readonly_client_initialization"
case ManageServerMode:
return http.HandlerFunc(s.serveAPI), "web_client_initialization"
default: // invalid mode
log.Fatalf("invalid mode: %v", mode)
}
return nil, ""
}
func (s *Server) Shutdown() { func (s *Server) Shutdown() {
s.logf("web.Server: shutting down") s.logf("web.Server: shutting down")
if s.assetsCleanup != nil { if s.assetsCleanup != nil {
@ -317,7 +394,8 @@ func (s *Server) requireTailscaleIP(w http.ResponseWriter, r *http.Request) (han
ipv6ServiceHost = "[" + tsaddr.TailscaleServiceIPv6String + "]" ipv6ServiceHost = "[" + tsaddr.TailscaleServiceIPv6String + "]"
) )
// allow requests on quad-100 (or ipv6 equivalent) // allow requests on quad-100 (or ipv6 equivalent)
if r.Host == ipv4ServiceHost || r.Host == ipv6ServiceHost { host := strings.TrimSuffix(r.Host, ":80")
if host == ipv4ServiceHost || host == ipv6ServiceHost {
return false return false
} }
@ -419,6 +497,10 @@ func (s *Server) authorizeRequest(w http.ResponseWriter, r *http.Request) (ok bo
// Client using system-specific auth. // Client using system-specific auth.
switch distro.Get() { switch distro.Get() {
case distro.Synology: case distro.Synology:
if !buildfeatures.HasSynology {
// Synology support not built in.
return false
}
authorized, _ := authorizeSynology(r) authorized, _ := authorizeSynology(r)
return authorized return authorized
case distro.QNAP: case distro.QNAP:
@ -433,7 +515,6 @@ func (s *Server) authorizeRequest(w http.ResponseWriter, r *http.Request) (ok bo
// It should only be called by Server.ServeHTTP, via Server.apiHandler, // It should only be called by Server.ServeHTTP, via Server.apiHandler,
// which protects the handler using gorilla csrf. // which protects the handler using gorilla csrf.
func (s *Server) serveLoginAPI(w http.ResponseWriter, r *http.Request) { func (s *Server) serveLoginAPI(w http.ResponseWriter, r *http.Request) {
w.Header().Set("X-CSRF-Token", csrf.Token(r))
switch { switch {
case r.URL.Path == "/api/data" && r.Method == httpm.GET: case r.URL.Path == "/api/data" && r.Method == httpm.GET:
s.serveGetNodeData(w, r) s.serveGetNodeData(w, r)
@ -556,7 +637,6 @@ func (s *Server) serveAPI(w http.ResponseWriter, r *http.Request) {
} }
} }
w.Header().Set("X-CSRF-Token", csrf.Token(r))
path := strings.TrimPrefix(r.URL.Path, "/api") path := strings.TrimPrefix(r.URL.Path, "/api")
switch { switch {
case path == "/data" && r.Method == httpm.GET: case path == "/data" && r.Method == httpm.GET:
@ -694,16 +774,16 @@ func (s *Server) serveAPIAuth(w http.ResponseWriter, r *http.Request) {
switch { switch {
case sErr != nil && errors.Is(sErr, errNotUsingTailscale): case sErr != nil && errors.Is(sErr, errNotUsingTailscale):
s.lc.IncrementCounter(r.Context(), "web_client_viewing_local", 1) s.lc.IncrementCounter(r.Context(), "web_client_viewing_local", 1)
resp.Authorized = false // restricted to the readonly view resp.Authorized = false // restricted to the read-only view
case sErr != nil && errors.Is(sErr, errNotOwner): case sErr != nil && errors.Is(sErr, errNotOwner):
s.lc.IncrementCounter(r.Context(), "web_client_viewing_not_owner", 1) s.lc.IncrementCounter(r.Context(), "web_client_viewing_not_owner", 1)
resp.Authorized = false // restricted to the readonly view resp.Authorized = false // restricted to the read-only view
case sErr != nil && errors.Is(sErr, errTaggedLocalSource): case sErr != nil && errors.Is(sErr, errTaggedLocalSource):
s.lc.IncrementCounter(r.Context(), "web_client_viewing_local_tag", 1) s.lc.IncrementCounter(r.Context(), "web_client_viewing_local_tag", 1)
resp.Authorized = false // restricted to the readonly view resp.Authorized = false // restricted to the read-only view
case sErr != nil && errors.Is(sErr, errTaggedRemoteSource): case sErr != nil && errors.Is(sErr, errTaggedRemoteSource):
s.lc.IncrementCounter(r.Context(), "web_client_viewing_remote_tag", 1) s.lc.IncrementCounter(r.Context(), "web_client_viewing_remote_tag", 1)
resp.Authorized = false // restricted to the readonly view resp.Authorized = false // restricted to the read-only view
case sErr != nil && !errors.Is(sErr, errNoSession): case sErr != nil && !errors.Is(sErr, errNoSession):
// Any other error. // Any other error.
http.Error(w, sErr.Error(), http.StatusInternalServerError) http.Error(w, sErr.Error(), http.StatusInternalServerError)
@ -803,8 +883,8 @@ type nodeData struct {
DeviceName string DeviceName string
TailnetName string // TLS cert name TailnetName string // TLS cert name
DomainName string DomainName string
IPv4 string IPv4 netip.Addr
IPv6 string IPv6 netip.Addr
OS string OS string
IPNVersion string IPNVersion string
@ -863,10 +943,14 @@ func (s *Server) serveGetNodeData(w http.ResponseWriter, r *http.Request) {
return return
} }
filterRules, _ := s.lc.DebugPacketFilterRules(r.Context()) filterRules, _ := s.lc.DebugPacketFilterRules(r.Context())
ipv4, ipv6 := s.selfNodeAddresses(r, st)
data := &nodeData{ data := &nodeData{
ID: st.Self.ID, ID: st.Self.ID,
Status: st.BackendState, Status: st.BackendState,
DeviceName: strings.Split(st.Self.DNSName, ".")[0], DeviceName: strings.Split(st.Self.DNSName, ".")[0],
IPv4: ipv4,
IPv6: ipv6,
OS: st.Self.OS, OS: st.Self.OS,
IPNVersion: strings.Split(st.Version, "-")[0], IPNVersion: strings.Split(st.Version, "-")[0],
Profile: st.User[st.Self.UserID], Profile: st.User[st.Self.UserID],
@ -879,17 +963,13 @@ func (s *Server) serveGetNodeData(w http.ResponseWriter, r *http.Request) {
UnraidToken: os.Getenv("UNRAID_CSRF_TOKEN"), UnraidToken: os.Getenv("UNRAID_CSRF_TOKEN"),
RunningSSHServer: prefs.RunSSH, RunningSSHServer: prefs.RunSSH,
URLPrefix: strings.TrimSuffix(s.pathPrefix, "/"), URLPrefix: strings.TrimSuffix(s.pathPrefix, "/"),
ControlAdminURL: prefs.AdminPageURL(), ControlAdminURL: prefs.AdminPageURL(s.polc),
LicensesURL: licenses.LicensesURL(), LicensesURL: licenses.LicensesURL(),
Features: availableFeatures(), Features: availableFeatures(),
ACLAllowsAnyIncomingTraffic: s.aclsAllowAccess(filterRules), ACLAllowsAnyIncomingTraffic: s.aclsAllowAccess(filterRules),
} }
ipv4, ipv6 := s.selfNodeAddresses(r, st)
data.IPv4 = ipv4.String()
data.IPv6 = ipv6.String()
if hostinfo.GetEnvType() == hostinfo.HomeAssistantAddOn && data.URLPrefix == "" { if hostinfo.GetEnvType() == hostinfo.HomeAssistantAddOn && data.URLPrefix == "" {
// X-Ingress-Path is the path prefix in use for Home Assistant // X-Ingress-Path is the path prefix in use for Home Assistant
// https://developers.home-assistant.io/docs/add-ons/presentation#ingress // https://developers.home-assistant.io/docs/add-ons/presentation#ingress
@ -903,10 +983,19 @@ func (s *Server) serveGetNodeData(w http.ResponseWriter, r *http.Request) {
data.ClientVersion = cv data.ClientVersion = cv
} }
profile, _, err := s.lc.ProfileStatus(r.Context())
if err != nil {
s.logf("error fetching profiles: %v", err)
// If for some reason we can't fetch profiles,
// continue to use st.CurrentTailnet if set.
if st.CurrentTailnet != nil { if st.CurrentTailnet != nil {
data.TailnetName = st.CurrentTailnet.MagicDNSSuffix data.TailnetName = st.CurrentTailnet.MagicDNSSuffix
data.DomainName = st.CurrentTailnet.Name data.DomainName = st.CurrentTailnet.Name
} }
} else {
data.TailnetName = profile.NetworkProfile.MagicDNSName
data.DomainName = profile.NetworkProfile.DisplayNameOrDefault()
}
if st.Self.Tags != nil { if st.Self.Tags != nil {
data.Tags = st.Self.Tags.AsSlice() data.Tags = st.Self.Tags.AsSlice()
} }
@ -960,37 +1049,16 @@ func (s *Server) serveGetNodeData(w http.ResponseWriter, r *http.Request) {
} }
func availableFeatures() map[string]bool { func availableFeatures() map[string]bool {
env := hostinfo.GetEnvType()
features := map[string]bool{ features := map[string]bool{
"advertise-exit-node": true, // available on all platforms "advertise-exit-node": true, // available on all platforms
"advertise-routes": true, // available on all platforms "advertise-routes": true, // available on all platforms
"use-exit-node": canUseExitNode(env) == nil, "use-exit-node": featureknob.CanUseExitNode() == nil,
"ssh": envknob.CanRunTailscaleSSH() == nil, "ssh": featureknob.CanRunTailscaleSSH() == nil,
"auto-update": version.IsUnstableBuild() && clientupdate.CanAutoUpdate(), "auto-update": version.IsUnstableBuild() && feature.CanAutoUpdate(),
}
if env == hostinfo.HomeAssistantAddOn {
// Setting SSH on Home Assistant causes trouble on startup
// (since the flag is not being passed to `tailscale up`).
// Although Tailscale SSH does work here,
// it's not terribly useful since it's running in a separate container.
features["ssh"] = false
} }
return features return features
} }
func canUseExitNode(env hostinfo.EnvType) error {
switch dist := distro.Get(); dist {
case distro.Synology, // see https://github.com/tailscale/tailscale/issues/1995
distro.QNAP,
distro.Unraid:
return fmt.Errorf("Tailscale exit nodes cannot be used on %s.", dist)
}
if env == hostinfo.HomeAssistantAddOn {
return errors.New("Tailscale exit nodes cannot be used on Home Assistant.")
}
return nil
}
// aclsAllowAccess returns whether tailnet ACLs (as expressed in the provided filter rules) // aclsAllowAccess returns whether tailnet ACLs (as expressed in the provided filter rules)
// permit any devices to access the local web client. // permit any devices to access the local web client.
// This does not currently check whether a specific device can connect, just any device. // This does not currently check whether a specific device can connect, just any device.
@ -1278,37 +1346,6 @@ func (s *Server) proxyRequestToLocalAPI(w http.ResponseWriter, r *http.Request)
} }
} }
// csrfKey returns a key that can be used for CSRF protection.
// If an error occurs during key creation, the error is logged and the active process terminated.
// If the server is running in CGI mode, the key is cached to disk and reused between requests.
// If an error occurs during key storage, the error is logged and the active process terminated.
func (s *Server) csrfKey() []byte {
csrfFile := filepath.Join(os.TempDir(), "tailscale-web-csrf.key")
// if running in CGI mode, try to read from disk, but ignore errors
if s.cgiMode {
key, _ := os.ReadFile(csrfFile)
if len(key) == 32 {
return key
}
}
// create a new key
key := make([]byte, 32)
if _, err := rand.Read(key); err != nil {
log.Fatalf("error generating CSRF key: %v", err)
}
// if running in CGI mode, try to write the newly created key to disk, and exit if it fails.
if s.cgiMode {
if err := os.WriteFile(csrfFile, key, 0600); err != nil {
log.Fatalf("unable to store CSRF key: %v", err)
}
}
return key
}
// enforcePrefix returns a HandlerFunc that enforces a given path prefix is used in requests, // enforcePrefix returns a HandlerFunc that enforces a given path prefix is used in requests,
// then strips it before invoking h. // then strips it before invoking h.
// Unlike http.StripPrefix, it does not return a 404 if the prefix is not present. // Unlike http.StripPrefix, it does not return a 404 if the prefix is not present.

@ -20,7 +20,7 @@ import (
"time" "time"
"github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp"
"tailscale.com/client/tailscale" "tailscale.com/client/local"
"tailscale.com/client/tailscale/apitype" "tailscale.com/client/tailscale/apitype"
"tailscale.com/ipn" "tailscale.com/ipn"
"tailscale.com/ipn/ipnstate" "tailscale.com/ipn/ipnstate"
@ -28,6 +28,7 @@ import (
"tailscale.com/tailcfg" "tailscale.com/tailcfg"
"tailscale.com/types/views" "tailscale.com/types/views"
"tailscale.com/util/httpm" "tailscale.com/util/httpm"
"tailscale.com/util/syspolicy/policyclient"
) )
func TestQnapAuthnURL(t *testing.T) { func TestQnapAuthnURL(t *testing.T) {
@ -120,7 +121,7 @@ func TestServeAPI(t *testing.T) {
s := &Server{ s := &Server{
mode: ManageServerMode, mode: ManageServerMode,
lc: &tailscale.LocalClient{Dial: lal.Dial}, lc: &local.Client{Dial: lal.Dial},
timeNow: time.Now, timeNow: time.Now,
} }
@ -288,7 +289,7 @@ func TestGetTailscaleBrowserSession(t *testing.T) {
s := &Server{ s := &Server{
timeNow: time.Now, timeNow: time.Now,
lc: &tailscale.LocalClient{Dial: lal.Dial}, lc: &local.Client{Dial: lal.Dial},
} }
// Add some browser sessions to cache state. // Add some browser sessions to cache state.
@ -457,7 +458,7 @@ func TestAuthorizeRequest(t *testing.T) {
s := &Server{ s := &Server{
mode: ManageServerMode, mode: ManageServerMode,
lc: &tailscale.LocalClient{Dial: lal.Dial}, lc: &local.Client{Dial: lal.Dial},
timeNow: time.Now, timeNow: time.Now,
} }
validCookie := "ts-cookie" validCookie := "ts-cookie"
@ -572,10 +573,11 @@ func TestServeAuth(t *testing.T) {
s := &Server{ s := &Server{
mode: ManageServerMode, mode: ManageServerMode,
lc: &tailscale.LocalClient{Dial: lal.Dial}, lc: &local.Client{Dial: lal.Dial},
timeNow: func() time.Time { return timeNow }, timeNow: func() time.Time { return timeNow },
newAuthURL: mockNewAuthURL, newAuthURL: mockNewAuthURL,
waitAuthURL: mockWaitAuthURL, waitAuthURL: mockWaitAuthURL,
polc: policyclient.NoPolicyClient{},
} }
successCookie := "ts-cookie-success" successCookie := "ts-cookie-success"
@ -914,7 +916,7 @@ func TestServeAPIAuthMetricLogging(t *testing.T) {
s := &Server{ s := &Server{
mode: ManageServerMode, mode: ManageServerMode,
lc: &tailscale.LocalClient{Dial: lal.Dial}, lc: &local.Client{Dial: lal.Dial},
timeNow: func() time.Time { return timeNow }, timeNow: func() time.Time { return timeNow },
newAuthURL: mockNewAuthURL, newAuthURL: mockNewAuthURL,
waitAuthURL: mockWaitAuthURL, waitAuthURL: mockWaitAuthURL,
@ -1126,7 +1128,7 @@ func TestRequireTailscaleIP(t *testing.T) {
s := &Server{ s := &Server{
mode: ManageServerMode, mode: ManageServerMode,
lc: &tailscale.LocalClient{Dial: lal.Dial}, lc: &local.Client{Dial: lal.Dial},
timeNow: time.Now, timeNow: time.Now,
logf: t.Logf, logf: t.Logf,
} }
@ -1175,6 +1177,16 @@ func TestRequireTailscaleIP(t *testing.T) {
target: "http://[fd7a:115c:a1e0::53]/", target: "http://[fd7a:115c:a1e0::53]/",
wantHandled: false, wantHandled: false,
}, },
{
name: "quad-100:80",
target: "http://100.100.100.100:80/",
wantHandled: false,
},
{
name: "ipv6-service-addr:80",
target: "http://[fd7a:115c:a1e0::53]:80/",
wantHandled: false,
},
} }
for _, tt := range tests { for _, tt := range tests {
@ -1477,3 +1489,101 @@ func mockWaitAuthURL(_ context.Context, id string, src tailcfg.NodeID) (*tailcfg
return nil, errors.New("unknown id") return nil, errors.New("unknown id")
} }
} }
func TestCSRFProtect(t *testing.T) {
tests := []struct {
name string
method string
secFetchSite string
host string
origin string
originOverride string
wantError bool
}{
{
name: "GET requests with no header are allowed",
method: "GET",
},
{
name: "POST requests with same-origin are allowed",
method: "POST",
secFetchSite: "same-origin",
},
{
name: "POST requests with cross-site are not allowed",
method: "POST",
secFetchSite: "cross-site",
wantError: true,
},
{
name: "POST requests with unknown sec-fetch-site values are not allowed",
method: "POST",
secFetchSite: "new-unknown-value",
wantError: true,
},
{
name: "POST requests with none are not allowed",
method: "POST",
secFetchSite: "none",
wantError: true,
},
{
name: "POST requests with no sec-fetch-site header but matching host and origin are allowed",
method: "POST",
host: "example.com",
origin: "https://example.com",
},
{
name: "POST requests with no sec-fetch-site and non-matching host and origin are not allowed",
method: "POST",
host: "example.com",
origin: "https://example.net",
wantError: true,
},
{
name: "POST requests with no sec-fetch-site and and origin that matches the override are allowed",
method: "POST",
originOverride: "example.net",
host: "internal.example.foo", // Host can be changed by reverse proxies
origin: "http://example.net",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "OK")
})
s := &Server{
originOverride: tt.originOverride,
}
withCSRF := s.csrfProtect(handler)
r := httptest.NewRequest(tt.method, "http://example.com/", nil)
if tt.secFetchSite != "" {
r.Header.Set("Sec-Fetch-Site", tt.secFetchSite)
}
if tt.host != "" {
r.Host = tt.host
}
if tt.origin != "" {
r.Header.Set("Origin", tt.origin)
}
w := httptest.NewRecorder()
withCSRF.ServeHTTP(w, r)
res := w.Result()
defer res.Body.Close()
if tt.wantError {
if res.StatusCode != http.StatusForbidden {
t.Errorf("expected status forbidden, got %v", res.StatusCode)
}
return
}
if res.StatusCode != http.StatusOK {
t.Errorf("expected status ok, got %v", res.StatusCode)
}
})
}
}

@ -1087,11 +1087,9 @@
integrity sha512-x/rqGMdzj+fWZvCOYForTghzbtqPDZ5gPwaoNGHdgDfF2QA/XZbCBp4Moo5scrkAMPhB7z26XM/AaHuIJdgauA== integrity sha512-x/rqGMdzj+fWZvCOYForTghzbtqPDZ5gPwaoNGHdgDfF2QA/XZbCBp4Moo5scrkAMPhB7z26XM/AaHuIJdgauA==
"@babel/runtime@^7.12.5", "@babel/runtime@^7.13.10", "@babel/runtime@^7.16.3", "@babel/runtime@^7.23.2", "@babel/runtime@^7.8.4": "@babel/runtime@^7.12.5", "@babel/runtime@^7.13.10", "@babel/runtime@^7.16.3", "@babel/runtime@^7.23.2", "@babel/runtime@^7.8.4":
version "7.23.4" version "7.28.2"
resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.23.4.tgz#36fa1d2b36db873d25ec631dcc4923fdc1cf2e2e" resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.28.2.tgz#2ae5a9d51cc583bd1f5673b3bb70d6d819682473"
integrity sha512-2Yv65nlWnWlSpe3fXEyX5i7fx5kIKo4Qbcj+hMO0odwaneFjfXw5fdum+4yL20O0QiaHpia0cYQ9xpNMqrBwHg== integrity sha512-KHp2IflsnGywDjBWDkR9iEqiWSpc8GIi0lgTT3mOElT0PP1tG26P4tmFI2YvAdzgq9RGyoHZQEIEdZy6Ec5xCA==
dependencies:
regenerator-runtime "^0.14.0"
"@babel/template@^7.22.15": "@babel/template@^7.22.15":
version "7.22.15" version "7.22.15"
@ -1880,12 +1878,12 @@
resolved "https://registry.yarnpkg.com/@types/json5/-/json5-0.0.29.tgz#ee28707ae94e11d2b827bcbe5270bcea7f3e71ee" resolved "https://registry.yarnpkg.com/@types/json5/-/json5-0.0.29.tgz#ee28707ae94e11d2b827bcbe5270bcea7f3e71ee"
integrity sha512-dRLjCWHYg4oaA77cxO64oO+7JwCwnIzkZPdrrC71jQmQtlhM556pwKo5bUzqvZndkVbeFLIIi+9TC40JNF5hNQ== integrity sha512-dRLjCWHYg4oaA77cxO64oO+7JwCwnIzkZPdrrC71jQmQtlhM556pwKo5bUzqvZndkVbeFLIIi+9TC40JNF5hNQ==
"@types/node@^18.16.1": "@types/node@^22.14.0":
version "18.19.18" version "22.14.0"
resolved "https://registry.yarnpkg.com/@types/node/-/node-18.19.18.tgz#7526471b28828d1fef1f7e4960fb9477e6e4369c" resolved "https://registry.yarnpkg.com/@types/node/-/node-22.14.0.tgz#d3bfa3936fef0dbacd79ea3eb17d521c628bb47e"
integrity sha512-80CP7B8y4PzZF0GWx15/gVWRrB5y/bIjNI84NK3cmQJu0WZwvmj2WMA5LcofQFVfLqqCSp545+U2LsrVzX36Zg== integrity sha512-Kmpl+z84ILoG+3T/zQFyAJsU6EPTmOCj8/2+83fSN6djd6I4o7uOuGIH6vq3PrjY5BGitSbFuMN18j3iknubbA==
dependencies: dependencies:
undici-types "~5.26.4" undici-types "~6.21.0"
"@types/parse-json@^4.0.0": "@types/parse-json@^4.0.0":
version "4.0.2" version "4.0.2"
@ -2450,6 +2448,14 @@ cac@^6.7.14:
resolved "https://registry.yarnpkg.com/cac/-/cac-6.7.14.tgz#804e1e6f506ee363cb0e3ccbb09cad5dd9870959" resolved "https://registry.yarnpkg.com/cac/-/cac-6.7.14.tgz#804e1e6f506ee363cb0e3ccbb09cad5dd9870959"
integrity sha512-b6Ilus+c3RrdDk+JhLKUAQfzzgLEPy6wcXqS7f/xe1EETvsDP6GORG7SFuOs6cID5YkqchW/LXZbX5bc8j7ZcQ== integrity sha512-b6Ilus+c3RrdDk+JhLKUAQfzzgLEPy6wcXqS7f/xe1EETvsDP6GORG7SFuOs6cID5YkqchW/LXZbX5bc8j7ZcQ==
call-bind-apply-helpers@^1.0.1, call-bind-apply-helpers@^1.0.2:
version "1.0.2"
resolved "https://registry.yarnpkg.com/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz#4b5428c222be985d79c3d82657479dbe0b59b2d6"
integrity sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==
dependencies:
es-errors "^1.3.0"
function-bind "^1.1.2"
call-bind@^1.0.0, call-bind@^1.0.2, call-bind@^1.0.4, call-bind@^1.0.5: call-bind@^1.0.0, call-bind@^1.0.2, call-bind@^1.0.4, call-bind@^1.0.5:
version "1.0.5" version "1.0.5"
resolved "https://registry.yarnpkg.com/call-bind/-/call-bind-1.0.5.tgz#6fa2b7845ce0ea49bf4d8b9ef64727a2c2e2e513" resolved "https://registry.yarnpkg.com/call-bind/-/call-bind-1.0.5.tgz#6fa2b7845ce0ea49bf4d8b9ef64727a2c2e2e513"
@ -2767,6 +2773,15 @@ dot-case@^3.0.4:
no-case "^3.0.4" no-case "^3.0.4"
tslib "^2.0.3" tslib "^2.0.3"
dunder-proto@^1.0.1:
version "1.0.1"
resolved "https://registry.yarnpkg.com/dunder-proto/-/dunder-proto-1.0.1.tgz#d7ae667e1dc83482f8b70fd0f6eefc50da30f58a"
integrity sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==
dependencies:
call-bind-apply-helpers "^1.0.1"
es-errors "^1.3.0"
gopd "^1.2.0"
electron-to-chromium@^1.4.535: electron-to-chromium@^1.4.535:
version "1.4.596" version "1.4.596"
resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.4.596.tgz#6752d1aa795d942d49dfc5d3764d6ea283fab1d7" resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.4.596.tgz#6752d1aa795d942d49dfc5d3764d6ea283fab1d7"
@ -2834,6 +2849,16 @@ es-abstract@^1.22.1:
unbox-primitive "^1.0.2" unbox-primitive "^1.0.2"
which-typed-array "^1.1.13" which-typed-array "^1.1.13"
es-define-property@^1.0.1:
version "1.0.1"
resolved "https://registry.yarnpkg.com/es-define-property/-/es-define-property-1.0.1.tgz#983eb2f9a6724e9303f61addf011c72e09e0b0fa"
integrity sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==
es-errors@^1.3.0:
version "1.3.0"
resolved "https://registry.yarnpkg.com/es-errors/-/es-errors-1.3.0.tgz#05f75a25dab98e4fb1dcd5e1472c0546d5057c8f"
integrity sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==
es-iterator-helpers@^1.0.12, es-iterator-helpers@^1.0.15: es-iterator-helpers@^1.0.12, es-iterator-helpers@^1.0.15:
version "1.0.15" version "1.0.15"
resolved "https://registry.yarnpkg.com/es-iterator-helpers/-/es-iterator-helpers-1.0.15.tgz#bd81d275ac766431d19305923707c3efd9f1ae40" resolved "https://registry.yarnpkg.com/es-iterator-helpers/-/es-iterator-helpers-1.0.15.tgz#bd81d275ac766431d19305923707c3efd9f1ae40"
@ -2854,6 +2879,13 @@ es-iterator-helpers@^1.0.12, es-iterator-helpers@^1.0.15:
iterator.prototype "^1.1.2" iterator.prototype "^1.1.2"
safe-array-concat "^1.0.1" safe-array-concat "^1.0.1"
es-object-atoms@^1.0.0, es-object-atoms@^1.1.1:
version "1.1.1"
resolved "https://registry.yarnpkg.com/es-object-atoms/-/es-object-atoms-1.1.1.tgz#1c4f2c4837327597ce69d2ca190a7fdd172338c1"
integrity sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==
dependencies:
es-errors "^1.3.0"
es-set-tostringtag@^2.0.1: es-set-tostringtag@^2.0.1:
version "2.0.2" version "2.0.2"
resolved "https://registry.yarnpkg.com/es-set-tostringtag/-/es-set-tostringtag-2.0.2.tgz#11f7cc9f63376930a5f20be4915834f4bc74f9c9" resolved "https://registry.yarnpkg.com/es-set-tostringtag/-/es-set-tostringtag-2.0.2.tgz#11f7cc9f63376930a5f20be4915834f4bc74f9c9"
@ -2863,6 +2895,16 @@ es-set-tostringtag@^2.0.1:
has-tostringtag "^1.0.0" has-tostringtag "^1.0.0"
hasown "^2.0.0" hasown "^2.0.0"
es-set-tostringtag@^2.1.0:
version "2.1.0"
resolved "https://registry.yarnpkg.com/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz#f31dbbe0c183b00a6d26eb6325c810c0fd18bd4d"
integrity sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==
dependencies:
es-errors "^1.3.0"
get-intrinsic "^1.2.6"
has-tostringtag "^1.0.2"
hasown "^2.0.2"
es-shim-unscopables@^1.0.0: es-shim-unscopables@^1.0.0:
version "1.0.2" version "1.0.2"
resolved "https://registry.yarnpkg.com/es-shim-unscopables/-/es-shim-unscopables-1.0.2.tgz#1f6942e71ecc7835ed1c8a83006d8771a63a3763" resolved "https://registry.yarnpkg.com/es-shim-unscopables/-/es-shim-unscopables-1.0.2.tgz#1f6942e71ecc7835ed1c8a83006d8771a63a3763"
@ -3270,12 +3312,14 @@ for-each@^0.3.3:
is-callable "^1.1.3" is-callable "^1.1.3"
form-data@^4.0.0: form-data@^4.0.0:
version "4.0.0" version "4.0.4"
resolved "https://registry.yarnpkg.com/form-data/-/form-data-4.0.0.tgz#93919daeaf361ee529584b9b31664dc12c9fa452" resolved "https://registry.yarnpkg.com/form-data/-/form-data-4.0.4.tgz#784cdcce0669a9d68e94d11ac4eea98088edd2c4"
integrity sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww== integrity sha512-KrGhL9Q4zjj0kiUt5OO4Mr/A/jlI2jDYs5eHBpYHPcBEVSiipAvn2Ko2HnPe20rmcuuvMHNdZFp+4IlGTMF0Ow==
dependencies: dependencies:
asynckit "^0.4.0" asynckit "^0.4.0"
combined-stream "^1.0.8" combined-stream "^1.0.8"
es-set-tostringtag "^2.1.0"
hasown "^2.0.2"
mime-types "^2.1.12" mime-types "^2.1.12"
fraction.js@^4.2.0: fraction.js@^4.2.0:
@ -3333,11 +3377,35 @@ get-intrinsic@^1.0.2, get-intrinsic@^1.1.1, get-intrinsic@^1.1.3, get-intrinsic@
has-symbols "^1.0.3" has-symbols "^1.0.3"
hasown "^2.0.0" hasown "^2.0.0"
get-intrinsic@^1.2.6:
version "1.3.0"
resolved "https://registry.yarnpkg.com/get-intrinsic/-/get-intrinsic-1.3.0.tgz#743f0e3b6964a93a5491ed1bffaae054d7f98d01"
integrity sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==
dependencies:
call-bind-apply-helpers "^1.0.2"
es-define-property "^1.0.1"
es-errors "^1.3.0"
es-object-atoms "^1.1.1"
function-bind "^1.1.2"
get-proto "^1.0.1"
gopd "^1.2.0"
has-symbols "^1.1.0"
hasown "^2.0.2"
math-intrinsics "^1.1.0"
get-nonce@^1.0.0: get-nonce@^1.0.0:
version "1.0.1" version "1.0.1"
resolved "https://registry.yarnpkg.com/get-nonce/-/get-nonce-1.0.1.tgz#fdf3f0278073820d2ce9426c18f07481b1e0cdf3" resolved "https://registry.yarnpkg.com/get-nonce/-/get-nonce-1.0.1.tgz#fdf3f0278073820d2ce9426c18f07481b1e0cdf3"
integrity sha512-FJhYRoDaiatfEkUK8HKlicmu/3SGFD51q3itKDGoSTysQJBnfOcxU5GxnhE1E6soB76MbT0MBtnKJuXyAx+96Q== integrity sha512-FJhYRoDaiatfEkUK8HKlicmu/3SGFD51q3itKDGoSTysQJBnfOcxU5GxnhE1E6soB76MbT0MBtnKJuXyAx+96Q==
get-proto@^1.0.1:
version "1.0.1"
resolved "https://registry.yarnpkg.com/get-proto/-/get-proto-1.0.1.tgz#150b3f2743869ef3e851ec0c49d15b1d14d00ee1"
integrity sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==
dependencies:
dunder-proto "^1.0.1"
es-object-atoms "^1.0.0"
get-stream@^8.0.1: get-stream@^8.0.1:
version "8.0.1" version "8.0.1"
resolved "https://registry.yarnpkg.com/get-stream/-/get-stream-8.0.1.tgz#def9dfd71742cd7754a7761ed43749a27d02eca2" resolved "https://registry.yarnpkg.com/get-stream/-/get-stream-8.0.1.tgz#def9dfd71742cd7754a7761ed43749a27d02eca2"
@ -3437,6 +3505,11 @@ gopd@^1.0.1:
dependencies: dependencies:
get-intrinsic "^1.1.3" get-intrinsic "^1.1.3"
gopd@^1.2.0:
version "1.2.0"
resolved "https://registry.yarnpkg.com/gopd/-/gopd-1.2.0.tgz#89f56b8217bdbc8802bd299df6d7f1081d7e51a1"
integrity sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==
graphemer@^1.4.0: graphemer@^1.4.0:
version "1.4.0" version "1.4.0"
resolved "https://registry.yarnpkg.com/graphemer/-/graphemer-1.4.0.tgz#fb2f1d55e0e3a1849aeffc90c4fa0dd53a0e66c6" resolved "https://registry.yarnpkg.com/graphemer/-/graphemer-1.4.0.tgz#fb2f1d55e0e3a1849aeffc90c4fa0dd53a0e66c6"
@ -3474,6 +3547,11 @@ has-symbols@^1.0.2, has-symbols@^1.0.3:
resolved "https://registry.yarnpkg.com/has-symbols/-/has-symbols-1.0.3.tgz#bb7b2c4349251dce87b125f7bdf874aa7c8b39f8" resolved "https://registry.yarnpkg.com/has-symbols/-/has-symbols-1.0.3.tgz#bb7b2c4349251dce87b125f7bdf874aa7c8b39f8"
integrity sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A== integrity sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==
has-symbols@^1.1.0:
version "1.1.0"
resolved "https://registry.yarnpkg.com/has-symbols/-/has-symbols-1.1.0.tgz#fc9c6a783a084951d0b971fe1018de813707a338"
integrity sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==
has-tostringtag@^1.0.0: has-tostringtag@^1.0.0:
version "1.0.0" version "1.0.0"
resolved "https://registry.yarnpkg.com/has-tostringtag/-/has-tostringtag-1.0.0.tgz#7e133818a7d394734f941e73c3d3f9291e658b25" resolved "https://registry.yarnpkg.com/has-tostringtag/-/has-tostringtag-1.0.0.tgz#7e133818a7d394734f941e73c3d3f9291e658b25"
@ -3481,6 +3559,13 @@ has-tostringtag@^1.0.0:
dependencies: dependencies:
has-symbols "^1.0.2" has-symbols "^1.0.2"
has-tostringtag@^1.0.2:
version "1.0.2"
resolved "https://registry.yarnpkg.com/has-tostringtag/-/has-tostringtag-1.0.2.tgz#2cdc42d40bef2e5b4eeab7c01a73c54ce7ab5abc"
integrity sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==
dependencies:
has-symbols "^1.0.3"
hasown@^2.0.0: hasown@^2.0.0:
version "2.0.0" version "2.0.0"
resolved "https://registry.yarnpkg.com/hasown/-/hasown-2.0.0.tgz#f4c513d454a57b7c7e1650778de226b11700546c" resolved "https://registry.yarnpkg.com/hasown/-/hasown-2.0.0.tgz#f4c513d454a57b7c7e1650778de226b11700546c"
@ -3488,6 +3573,13 @@ hasown@^2.0.0:
dependencies: dependencies:
function-bind "^1.1.2" function-bind "^1.1.2"
hasown@^2.0.2:
version "2.0.2"
resolved "https://registry.yarnpkg.com/hasown/-/hasown-2.0.2.tgz#003eaf91be7adc372e84ec59dc37252cedb80003"
integrity sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==
dependencies:
function-bind "^1.1.2"
html-encoding-sniffer@^4.0.0: html-encoding-sniffer@^4.0.0:
version "4.0.0" version "4.0.0"
resolved "https://registry.yarnpkg.com/html-encoding-sniffer/-/html-encoding-sniffer-4.0.0.tgz#696df529a7cfd82446369dc5193e590a3735b448" resolved "https://registry.yarnpkg.com/html-encoding-sniffer/-/html-encoding-sniffer-4.0.0.tgz#696df529a7cfd82446369dc5193e590a3735b448"
@ -3992,6 +4084,11 @@ magic-string@^0.30.5:
dependencies: dependencies:
"@jridgewell/sourcemap-codec" "^1.4.15" "@jridgewell/sourcemap-codec" "^1.4.15"
math-intrinsics@^1.1.0:
version "1.1.0"
resolved "https://registry.yarnpkg.com/math-intrinsics/-/math-intrinsics-1.1.0.tgz#a0dd74be81e2aa5c2f27e65ce283605ee4e2b7f9"
integrity sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==
merge-stream@^2.0.0: merge-stream@^2.0.0:
version "2.0.0" version "2.0.0"
resolved "https://registry.yarnpkg.com/merge-stream/-/merge-stream-2.0.0.tgz#52823629a14dd00c9770fb6ad47dc6310f2c1f60" resolved "https://registry.yarnpkg.com/merge-stream/-/merge-stream-2.0.0.tgz#52823629a14dd00c9770fb6ad47dc6310f2c1f60"
@ -4543,11 +4640,6 @@ regenerate@^1.4.2:
resolved "https://registry.yarnpkg.com/regenerate/-/regenerate-1.4.2.tgz#b9346d8827e8f5a32f7ba29637d398b69014848a" resolved "https://registry.yarnpkg.com/regenerate/-/regenerate-1.4.2.tgz#b9346d8827e8f5a32f7ba29637d398b69014848a"
integrity sha512-zrceR/XhGYU/d/opr2EKO7aRHUeiBI8qjtfHqADTwZd6Szfy16la6kqD0MIUs5z5hx6AaKa+PixpPrR289+I0A== integrity sha512-zrceR/XhGYU/d/opr2EKO7aRHUeiBI8qjtfHqADTwZd6Szfy16la6kqD0MIUs5z5hx6AaKa+PixpPrR289+I0A==
regenerator-runtime@^0.14.0:
version "0.14.0"
resolved "https://registry.yarnpkg.com/regenerator-runtime/-/regenerator-runtime-0.14.0.tgz#5e19d68eb12d486f797e15a3c6a918f7cec5eb45"
integrity sha512-srw17NI0TUWHuGa5CFGGmhfNIeja30WMBfbslPNhf6JrqQlLN5gcrvig1oqPxiVaXb0oW0XRKtH6Nngs5lKCIA==
regenerator-transform@^0.15.2: regenerator-transform@^0.15.2:
version "0.15.2" version "0.15.2"
resolved "https://registry.yarnpkg.com/regenerator-transform/-/regenerator-transform-0.15.2.tgz#5bbae58b522098ebdf09bca2f83838929001c7a4" resolved "https://registry.yarnpkg.com/regenerator-transform/-/regenerator-transform-0.15.2.tgz#5bbae58b522098ebdf09bca2f83838929001c7a4"
@ -5124,10 +5216,10 @@ unbox-primitive@^1.0.2:
has-symbols "^1.0.3" has-symbols "^1.0.3"
which-boxed-primitive "^1.0.2" which-boxed-primitive "^1.0.2"
undici-types@~5.26.4: undici-types@~6.21.0:
version "5.26.5" version "6.21.0"
resolved "https://registry.yarnpkg.com/undici-types/-/undici-types-5.26.5.tgz#bcd539893d00b56e964fd2657a4866b221a65617" resolved "https://registry.yarnpkg.com/undici-types/-/undici-types-6.21.0.tgz#691d00af3909be93a7faa13be61b3a5b50ef12cb"
integrity sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA== integrity sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==
unicode-canonical-property-names-ecmascript@^2.0.0: unicode-canonical-property-names-ecmascript@^2.0.0:
version "2.0.0" version "2.0.0"

@ -27,11 +27,11 @@ import (
"strconv" "strconv"
"strings" "strings"
"github.com/google/uuid" "tailscale.com/feature"
"tailscale.com/clientupdate/distsign" "tailscale.com/hostinfo"
"tailscale.com/types/lazy"
"tailscale.com/types/logger" "tailscale.com/types/logger"
"tailscale.com/util/cmpver" "tailscale.com/util/cmpver"
"tailscale.com/util/winutil"
"tailscale.com/version" "tailscale.com/version"
"tailscale.com/version/distro" "tailscale.com/version/distro"
) )
@ -172,6 +172,12 @@ func NewUpdater(args Arguments) (*Updater, error) {
type updateFunction func() error type updateFunction func() error
func (up *Updater) getUpdateFunction() (fn updateFunction, canAutoUpdate bool) { func (up *Updater) getUpdateFunction() (fn updateFunction, canAutoUpdate bool) {
hi := hostinfo.New()
// We don't know how to update custom tsnet binaries, it's up to the user.
if hi.Package == "tsnet" {
return nil, false
}
switch runtime.GOOS { switch runtime.GOOS {
case "windows": case "windows":
return up.updateWindows, true return up.updateWindows, true
@ -245,9 +251,17 @@ func (up *Updater) getUpdateFunction() (fn updateFunction, canAutoUpdate bool) {
return nil, false return nil, false
} }
// CanAutoUpdate reports whether auto-updating via the clientupdate package var canAutoUpdateCache lazy.SyncValue[bool]
func init() {
feature.HookCanAutoUpdate.Set(canAutoUpdate)
}
// canAutoUpdate reports whether auto-updating via the clientupdate package
// is supported for the current os/distro. // is supported for the current os/distro.
func CanAutoUpdate() bool { func canAutoUpdate() bool { return canAutoUpdateCache.Get(canAutoUpdateUncached) }
func canAutoUpdateUncached() bool {
if version.IsMacSysExt() { if version.IsMacSysExt() {
// Macsys uses Sparkle for auto-updates, which doesn't have an update // Macsys uses Sparkle for auto-updates, which doesn't have an update
// function in this package. // function in this package.
@ -404,13 +418,13 @@ func parseSynoinfo(path string) (string, error) {
// Extract the CPU in the middle (88f6282 in the above example). // Extract the CPU in the middle (88f6282 in the above example).
s := bufio.NewScanner(f) s := bufio.NewScanner(f)
for s.Scan() { for s.Scan() {
l := s.Text() line := s.Text()
if !strings.HasPrefix(l, "unique=") { if !strings.HasPrefix(line, "unique=") {
continue continue
} }
parts := strings.SplitN(l, "_", 3) parts := strings.SplitN(line, "_", 3)
if len(parts) != 3 { if len(parts) != 3 {
return "", fmt.Errorf(`malformed %q: found %q, expected format like 'unique="synology_$cpu_$model'`, path, l) return "", fmt.Errorf(`malformed %q: found %q, expected format like 'unique="synology_$cpu_$model'`, path, line)
} }
return parts[1], nil return parts[1], nil
} }
@ -756,164 +770,6 @@ func (up *Updater) updateMacAppStore() error {
return nil return nil
} }
const (
// winMSIEnv is the environment variable that, if set, is the MSI file for
// the update command to install. It's passed like this so we can stop the
// tailscale.exe process from running before the msiexec process runs and
// tries to overwrite ourselves.
winMSIEnv = "TS_UPDATE_WIN_MSI"
// winExePathEnv is the environment variable that is set along with
// winMSIEnv and carries the full path of the calling tailscale.exe binary.
// It is used to re-launch the GUI process (tailscale-ipn.exe) after
// install is complete.
winExePathEnv = "TS_UPDATE_WIN_EXE_PATH"
)
var (
verifyAuthenticode func(string) error // set non-nil only on Windows
markTempFileFunc func(string) error // set non-nil only on Windows
)
func (up *Updater) updateWindows() error {
if msi := os.Getenv(winMSIEnv); msi != "" {
// stdout/stderr from this part of the install could be lost since the
// parent tailscaled is replaced. Create a temp log file to have some
// output to debug with in case update fails.
close, err := up.switchOutputToFile()
if err != nil {
up.Logf("failed to create log file for installation: %v; proceeding with existing outputs", err)
} else {
defer close.Close()
}
up.Logf("installing %v ...", msi)
if err := up.installMSI(msi); err != nil {
up.Logf("MSI install failed: %v", err)
return err
}
up.Logf("success.")
return nil
}
if !winutil.IsCurrentProcessElevated() {
return errors.New(`update must be run as Administrator
you can run the command prompt as Administrator one of these ways:
* right-click cmd.exe, select 'Run as administrator'
* press Windows+x, then press a
* press Windows+r, type in "cmd", then press Ctrl+Shift+Enter`)
}
ver, err := requestedTailscaleVersion(up.Version, up.Track)
if err != nil {
return err
}
arch := runtime.GOARCH
if arch == "386" {
arch = "x86"
}
if !up.confirm(ver) {
return nil
}
tsDir := filepath.Join(os.Getenv("ProgramData"), "Tailscale")
msiDir := filepath.Join(tsDir, "MSICache")
if fi, err := os.Stat(tsDir); err != nil {
return fmt.Errorf("expected %s to exist, got stat error: %w", tsDir, err)
} else if !fi.IsDir() {
return fmt.Errorf("expected %s to be a directory; got %v", tsDir, fi.Mode())
}
if err := os.MkdirAll(msiDir, 0700); err != nil {
return err
}
up.cleanupOldDownloads(filepath.Join(msiDir, "*.msi"))
pkgsPath := fmt.Sprintf("%s/tailscale-setup-%s-%s.msi", up.Track, ver, arch)
msiTarget := filepath.Join(msiDir, path.Base(pkgsPath))
if err := up.downloadURLToFile(pkgsPath, msiTarget); err != nil {
return err
}
up.Logf("verifying MSI authenticode...")
if err := verifyAuthenticode(msiTarget); err != nil {
return fmt.Errorf("authenticode verification of %s failed: %w", msiTarget, err)
}
up.Logf("authenticode verification succeeded")
up.Logf("making tailscale.exe copy to switch to...")
up.cleanupOldDownloads(filepath.Join(os.TempDir(), "tailscale-updater-*.exe"))
selfOrig, selfCopy, err := makeSelfCopy()
if err != nil {
return err
}
defer os.Remove(selfCopy)
up.Logf("running tailscale.exe copy for final install...")
cmd := exec.Command(selfCopy, "update")
cmd.Env = append(os.Environ(), winMSIEnv+"="+msiTarget, winExePathEnv+"="+selfOrig)
cmd.Stdout = up.Stderr
cmd.Stderr = up.Stderr
cmd.Stdin = os.Stdin
if err := cmd.Start(); err != nil {
return err
}
// Once it's started, exit ourselves, so the binary is free
// to be replaced.
os.Exit(0)
panic("unreachable")
}
func (up *Updater) switchOutputToFile() (io.Closer, error) {
var logFilePath string
exePath, err := os.Executable()
if err != nil {
logFilePath = filepath.Join(os.TempDir(), "tailscale-updater.log")
} else {
logFilePath = strings.TrimSuffix(exePath, ".exe") + ".log"
}
up.Logf("writing update output to %q", logFilePath)
logFile, err := os.Create(logFilePath)
if err != nil {
return nil, err
}
up.Logf = func(m string, args ...any) {
fmt.Fprintf(logFile, m+"\n", args...)
}
up.Stdout = logFile
up.Stderr = logFile
return logFile, nil
}
func (up *Updater) installMSI(msi string) error {
var err error
for tries := 0; tries < 2; tries++ {
cmd := exec.Command("msiexec.exe", "/i", filepath.Base(msi), "/quiet", "/norestart", "/qn")
cmd.Dir = filepath.Dir(msi)
cmd.Stdout = up.Stdout
cmd.Stderr = up.Stderr
cmd.Stdin = os.Stdin
err = cmd.Run()
if err == nil {
break
}
up.Logf("Install attempt failed: %v", err)
uninstallVersion := up.currentVersion
if v := os.Getenv("TS_DEBUG_UNINSTALL_VERSION"); v != "" {
uninstallVersion = v
}
// Assume it's a downgrade, which msiexec won't permit. Uninstall our current version first.
up.Logf("Uninstalling current version %q for downgrade...", uninstallVersion)
cmd = exec.Command("msiexec.exe", "/x", msiUUIDForVersion(uninstallVersion), "/norestart", "/qn")
cmd.Stdout = up.Stdout
cmd.Stderr = up.Stderr
cmd.Stdin = os.Stdin
err = cmd.Run()
up.Logf("msiexec uninstall: %v", err)
}
return err
}
// cleanupOldDownloads removes all files matching glob (see filepath.Glob). // cleanupOldDownloads removes all files matching glob (see filepath.Glob).
// Only regular files are removed, so the glob must match specific files and // Only regular files are removed, so the glob must match specific files and
// not directories. // not directories.
@ -938,53 +794,6 @@ func (up *Updater) cleanupOldDownloads(glob string) {
} }
} }
func msiUUIDForVersion(ver string) string {
arch := runtime.GOARCH
if arch == "386" {
arch = "x86"
}
track, err := versionToTrack(ver)
if err != nil {
track = UnstableTrack
}
msiURL := fmt.Sprintf("https://pkgs.tailscale.com/%s/tailscale-setup-%s-%s.msi", track, ver, arch)
return "{" + strings.ToUpper(uuid.NewSHA1(uuid.NameSpaceURL, []byte(msiURL)).String()) + "}"
}
func makeSelfCopy() (origPathExe, tmpPathExe string, err error) {
selfExe, err := os.Executable()
if err != nil {
return "", "", err
}
f, err := os.Open(selfExe)
if err != nil {
return "", "", err
}
defer f.Close()
f2, err := os.CreateTemp("", "tailscale-updater-*.exe")
if err != nil {
return "", "", err
}
if f := markTempFileFunc; f != nil {
if err := f(f2.Name()); err != nil {
return "", "", err
}
}
if _, err := io.Copy(f2, f); err != nil {
f2.Close()
return "", "", err
}
return selfExe, f2.Name(), f2.Close()
}
func (up *Updater) downloadURLToFile(pathSrc, fileDst string) (ret error) {
c, err := distsign.NewClient(up.Logf, up.PkgsAddr)
if err != nil {
return err
}
return c.Download(context.Background(), pathSrc, fileDst)
}
func (up *Updater) updateFreeBSD() (err error) { func (up *Updater) updateFreeBSD() (err error) {
if up.Version != "" { if up.Version != "" {
return errors.New("installing a specific version on FreeBSD is not supported") return errors.New("installing a specific version on FreeBSD is not supported")

@ -0,0 +1,20 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build (linux && !android) || windows
package clientupdate
import (
"context"
"tailscale.com/clientupdate/distsign"
)
func (up *Updater) downloadURLToFile(pathSrc, fileDst string) (ret error) {
c, err := distsign.NewClient(up.Logf, up.PkgsAddr)
if err != nil {
return err
}
return c.Download(context.Background(), pathSrc, fileDst)
}

@ -0,0 +1,10 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build !((linux && !android) || windows)
package clientupdate
func (up *Updater) downloadURLToFile(pathSrc, fileDst string) (ret error) {
panic("unreachable")
}

@ -0,0 +1,10 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build !windows
package clientupdate
func (up *Updater) updateWindows() error {
panic("unreachable")
}

@ -7,13 +7,59 @@
package clientupdate package clientupdate
import ( import (
"errors"
"fmt"
"io"
"os"
"os/exec"
"path"
"path/filepath"
"runtime"
"strings"
"time"
"github.com/google/uuid"
"golang.org/x/sys/windows" "golang.org/x/sys/windows"
"tailscale.com/util/winutil"
"tailscale.com/util/winutil/authenticode" "tailscale.com/util/winutil/authenticode"
) )
func init() { const (
markTempFileFunc = markTempFileWindows // winMSIEnv is the environment variable that, if set, is the MSI file for
verifyAuthenticode = verifyTailscale // the update command to install. It's passed like this so we can stop the
// tailscale.exe process from running before the msiexec process runs and
// tries to overwrite ourselves.
winMSIEnv = "TS_UPDATE_WIN_MSI"
// winVersionEnv is the environment variable that is set along with
// winMSIEnv and carries the version of tailscale that is being installed.
// It is used for logging purposes.
winVersionEnv = "TS_UPDATE_WIN_VERSION"
// updaterPrefix is the prefix for the temporary executable created by [makeSelfCopy].
updaterPrefix = "tailscale-updater"
)
func makeSelfCopy() (origPathExe, tmpPathExe string, err error) {
selfExe, err := os.Executable()
if err != nil {
return "", "", err
}
f, err := os.Open(selfExe)
if err != nil {
return "", "", err
}
defer f.Close()
f2, err := os.CreateTemp("", updaterPrefix+"-*.exe")
if err != nil {
return "", "", err
}
if err := markTempFileWindows(f2.Name()); err != nil {
return "", "", err
}
if _, err := io.Copy(f2, f); err != nil {
f2.Close()
return "", "", err
}
return selfExe, f2.Name(), f2.Close()
} }
func markTempFileWindows(name string) error { func markTempFileWindows(name string) error {
@ -23,6 +69,236 @@ func markTempFileWindows(name string) error {
const certSubjectTailscale = "Tailscale Inc." const certSubjectTailscale = "Tailscale Inc."
func verifyTailscale(path string) error { func verifyAuthenticode(path string) error {
return authenticode.Verify(path, certSubjectTailscale) return authenticode.Verify(path, certSubjectTailscale)
} }
func isTSGUIPresent() bool {
us, err := os.Executable()
if err != nil {
return false
}
tsgui := filepath.Join(filepath.Dir(us), "tsgui.dll")
_, err = os.Stat(tsgui)
return err == nil
}
func (up *Updater) updateWindows() error {
if msi := os.Getenv(winMSIEnv); msi != "" {
// stdout/stderr from this part of the install could be lost since the
// parent tailscaled is replaced. Create a temp log file to have some
// output to debug with in case update fails.
close, err := up.switchOutputToFile()
if err != nil {
up.Logf("failed to create log file for installation: %v; proceeding with existing outputs", err)
} else {
defer close.Close()
}
up.Logf("installing %v ...", msi)
if err := up.installMSI(msi); err != nil {
up.Logf("MSI install failed: %v", err)
return err
}
up.Logf("success.")
return nil
}
if !winutil.IsCurrentProcessElevated() {
return errors.New(`update must be run as Administrator
you can run the command prompt as Administrator one of these ways:
* right-click cmd.exe, select 'Run as administrator'
* press Windows+x, then press a
* press Windows+r, type in "cmd", then press Ctrl+Shift+Enter`)
}
ver, err := requestedTailscaleVersion(up.Version, up.Track)
if err != nil {
return err
}
arch := runtime.GOARCH
if arch == "386" {
arch = "x86"
}
if !up.confirm(ver) {
return nil
}
tsDir := filepath.Join(os.Getenv("ProgramData"), "Tailscale")
msiDir := filepath.Join(tsDir, "MSICache")
if fi, err := os.Stat(tsDir); err != nil {
return fmt.Errorf("expected %s to exist, got stat error: %w", tsDir, err)
} else if !fi.IsDir() {
return fmt.Errorf("expected %s to be a directory; got %v", tsDir, fi.Mode())
}
if err := os.MkdirAll(msiDir, 0700); err != nil {
return err
}
up.cleanupOldDownloads(filepath.Join(msiDir, "*.msi"))
qualifiers := []string{ver, arch}
// TODO(aaron): Temporary hack so autoupdate still works on winui builds;
// remove when we enable winui by default on the unstable track.
if isTSGUIPresent() {
qualifiers = append(qualifiers, "winui")
}
pkgsPath := fmt.Sprintf("%s/tailscale-setup-%s.msi", up.Track, strings.Join(qualifiers, "-"))
msiTarget := filepath.Join(msiDir, path.Base(pkgsPath))
if err := up.downloadURLToFile(pkgsPath, msiTarget); err != nil {
return err
}
up.Logf("verifying MSI authenticode...")
if err := verifyAuthenticode(msiTarget); err != nil {
return fmt.Errorf("authenticode verification of %s failed: %w", msiTarget, err)
}
up.Logf("authenticode verification succeeded")
up.Logf("making tailscale.exe copy to switch to...")
up.cleanupOldDownloads(filepath.Join(os.TempDir(), updaterPrefix+"-*.exe"))
_, selfCopy, err := makeSelfCopy()
if err != nil {
return err
}
defer os.Remove(selfCopy)
up.Logf("running tailscale.exe copy for final install...")
cmd := exec.Command(selfCopy, "update")
cmd.Env = append(os.Environ(), winMSIEnv+"="+msiTarget, winVersionEnv+"="+ver)
cmd.Stdout = up.Stderr
cmd.Stderr = up.Stderr
cmd.Stdin = os.Stdin
if err := cmd.Start(); err != nil {
return err
}
// Once it's started, exit ourselves, so the binary is free
// to be replaced.
os.Exit(0)
panic("unreachable")
}
func (up *Updater) installMSI(msi string) error {
var err error
for tries := 0; tries < 2; tries++ {
// msiexec.exe requires exclusive access to the log file, so create a dedicated one for each run.
installLogPath := up.startNewLogFile("tailscale-installer", os.Getenv(winVersionEnv))
up.Logf("Install log: %s", installLogPath)
cmd := exec.Command("msiexec.exe", "/i", filepath.Base(msi), "/quiet", "/norestart", "/qn", "/L*v", installLogPath)
cmd.Dir = filepath.Dir(msi)
cmd.Stdout = up.Stdout
cmd.Stderr = up.Stderr
cmd.Stdin = os.Stdin
err = cmd.Run()
switch err := err.(type) {
case nil:
// Success.
return nil
case *exec.ExitError:
// For possible error codes returned by Windows Installer, see
// https://web.archive.org/web/20250409144914/https://learn.microsoft.com/en-us/windows/win32/msi/error-codes
switch windows.Errno(err.ExitCode()) {
case windows.ERROR_SUCCESS_REBOOT_REQUIRED:
// In most cases, updating Tailscale should not require a reboot.
// If it does, it might be because we failed to close the GUI
// and the installer couldn't replace its executable.
// The old GUI will continue to run until the next reboot.
// Not ideal, but also not a retryable error.
up.Logf("[unexpected] reboot required")
return nil
case windows.ERROR_SUCCESS_REBOOT_INITIATED:
// Same as above, but perhaps the device is configured to prompt
// the user to reboot and the user has chosen to reboot now.
up.Logf("[unexpected] reboot initiated")
return nil
case windows.ERROR_INSTALL_ALREADY_RUNNING:
// The Windows Installer service is currently busy.
// It could be our own install initiated by user/MDM/GP, another MSI install or perhaps a Windows Update install.
// Anyway, we can't do anything about it right now. The user (or tailscaled) can retry later.
// Retrying now will likely fail, and is risky since we might uninstall the current version
// and then fail to install the new one, leaving the user with no Tailscale at all.
//
// TODO(nickkhyl,awly): should we check if this is actually a downgrade before uninstalling the current version?
// Also, maybe keep retrying the install longer if we uninstalled the current version due to a failed install attempt?
up.Logf("another installation is already in progress")
return err
}
default:
// Everything else is a retryable error.
}
up.Logf("Install attempt failed: %v", err)
uninstallVersion := up.currentVersion
if v := os.Getenv("TS_DEBUG_UNINSTALL_VERSION"); v != "" {
uninstallVersion = v
}
uninstallLogPath := up.startNewLogFile("tailscale-uninstaller", uninstallVersion)
// Assume it's a downgrade, which msiexec won't permit. Uninstall our current version first.
up.Logf("Uninstalling current version %q for downgrade...", uninstallVersion)
up.Logf("Uninstall log: %s", uninstallLogPath)
cmd = exec.Command("msiexec.exe", "/x", msiUUIDForVersion(uninstallVersion), "/norestart", "/qn", "/L*v", uninstallLogPath)
cmd.Stdout = up.Stdout
cmd.Stderr = up.Stderr
cmd.Stdin = os.Stdin
err = cmd.Run()
up.Logf("msiexec uninstall: %v", err)
}
return err
}
func msiUUIDForVersion(ver string) string {
arch := runtime.GOARCH
if arch == "386" {
arch = "x86"
}
track, err := versionToTrack(ver)
if err != nil {
track = UnstableTrack
}
msiURL := fmt.Sprintf("https://pkgs.tailscale.com/%s/tailscale-setup-%s-%s.msi", track, ver, arch)
return "{" + strings.ToUpper(uuid.NewSHA1(uuid.NameSpaceURL, []byte(msiURL)).String()) + "}"
}
func (up *Updater) switchOutputToFile() (io.Closer, error) {
var logFilePath string
exePath, err := os.Executable()
if err != nil {
logFilePath = up.startNewLogFile(updaterPrefix, os.Getenv(winVersionEnv))
} else {
// Use the same suffix as the self-copy executable.
suffix := strings.TrimSuffix(strings.TrimPrefix(filepath.Base(exePath), updaterPrefix), ".exe")
logFilePath = up.startNewLogFile(updaterPrefix, os.Getenv(winVersionEnv)+suffix)
}
up.Logf("writing update output to: %s", logFilePath)
logFile, err := os.Create(logFilePath)
if err != nil {
return nil, err
}
up.Logf = func(m string, args ...any) {
fmt.Fprintf(logFile, m+"\n", args...)
}
up.Stdout = logFile
up.Stderr = logFile
return logFile, nil
}
// startNewLogFile returns a name for a new log file.
// It cleans up any old log files with the same baseNamePrefix.
func (up *Updater) startNewLogFile(baseNamePrefix, baseNameSuffix string) string {
baseName := fmt.Sprintf("%s-%s-%s.log", baseNamePrefix,
time.Now().Format("20060102T150405"), baseNameSuffix)
dir := filepath.Join(os.Getenv("ProgramData"), "Tailscale", "Logs")
if err := os.MkdirAll(dir, 0700); err != nil {
up.Logf("failed to create log directory: %v", err)
return filepath.Join(os.TempDir(), baseName)
}
// TODO(nickkhyl): preserve up to N old log files?
up.cleanupOldDownloads(filepath.Join(dir, baseNamePrefix+"-*.log"))
return filepath.Join(dir, baseName)
}

@ -55,7 +55,7 @@ import (
"github.com/hdevalence/ed25519consensus" "github.com/hdevalence/ed25519consensus"
"golang.org/x/crypto/blake2s" "golang.org/x/crypto/blake2s"
"tailscale.com/net/tshttpproxy" "tailscale.com/feature"
"tailscale.com/types/logger" "tailscale.com/types/logger"
"tailscale.com/util/httpm" "tailscale.com/util/httpm"
"tailscale.com/util/must" "tailscale.com/util/must"
@ -330,7 +330,7 @@ func fetch(url string, limit int64) ([]byte, error) {
// limit bytes. On success, the returned value is a BLAKE2s hash of the file. // limit bytes. On success, the returned value is a BLAKE2s hash of the file.
func (c *Client) download(ctx context.Context, url, dst string, limit int64) ([]byte, int64, error) { func (c *Client) download(ctx context.Context, url, dst string, limit int64) ([]byte, int64, error) {
tr := http.DefaultTransport.(*http.Transport).Clone() tr := http.DefaultTransport.(*http.Transport).Clone()
tr.Proxy = tshttpproxy.ProxyFromEnvironment tr.Proxy = feature.HookProxyFromEnvironment.GetOrNil()
defer tr.CloseIdleConnections() defer tr.CloseIdleConnections()
hc := &http.Client{Transport: tr} hc := &http.Client{Transport: tr}

@ -18,12 +18,12 @@ var (
) )
func usage() { func usage() {
fmt.Fprintf(os.Stderr, ` fmt.Fprint(os.Stderr, `
usage: addlicense -file FILE <subcommand args...> usage: addlicense -file FILE <subcommand args...>
`[1:]) `[1:])
flag.PrintDefaults() flag.PrintDefaults()
fmt.Fprintf(os.Stderr, ` fmt.Fprint(os.Stderr, `
addlicense adds a Tailscale license to the beginning of file. addlicense adds a Tailscale license to the beginning of file.
It is intended for use with 'go generate', so it also runs a subcommand, It is intended for use with 'go generate', so it also runs a subcommand,

@ -0,0 +1,131 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// checkmetrics validates that all metrics in the tailscale client-metrics
// are documented in a given path or URL.
package main
import (
"context"
"flag"
"fmt"
"io"
"log"
"net/http"
"net/http/httptest"
"os"
"strings"
"time"
"tailscale.com/ipn/store/mem"
"tailscale.com/tsnet"
"tailscale.com/tstest/integration/testcontrol"
"tailscale.com/util/httpm"
)
var (
kbPath = flag.String("kb-path", "", "filepath to the client-metrics knowledge base")
kbUrl = flag.String("kb-url", "", "URL to the client-metrics knowledge base page")
)
func main() {
flag.Parse()
if *kbPath == "" && *kbUrl == "" {
log.Fatalf("either -kb-path or -kb-url must be set")
}
var control testcontrol.Server
ts := httptest.NewServer(&control)
defer ts.Close()
td, err := os.MkdirTemp("", "testcontrol")
if err != nil {
log.Fatal(err)
}
defer os.RemoveAll(td)
// tsnet is used not used as a Tailscale client, but as a way to
// boot up Tailscale, have all the metrics registered, and then
// verifiy that all the metrics are documented.
tsn := &tsnet.Server{
Dir: td,
Store: new(mem.Store),
UserLogf: log.Printf,
Ephemeral: true,
ControlURL: ts.URL,
}
if err := tsn.Start(); err != nil {
log.Fatal(err)
}
defer tsn.Close()
log.Printf("checking that all metrics are documented, looking for: %s", tsn.Sys().UserMetricsRegistry().MetricNames())
if *kbPath != "" {
kb, err := readKB(*kbPath)
if err != nil {
log.Fatalf("reading kb: %v", err)
}
missing := undocumentedMetrics(kb, tsn.Sys().UserMetricsRegistry().MetricNames())
if len(missing) > 0 {
log.Fatalf("found undocumented metrics in %q: %v", *kbPath, missing)
}
}
if *kbUrl != "" {
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
kb, err := getKB(ctx, *kbUrl)
if err != nil {
log.Fatalf("getting kb: %v", err)
}
missing := undocumentedMetrics(kb, tsn.Sys().UserMetricsRegistry().MetricNames())
if len(missing) > 0 {
log.Fatalf("found undocumented metrics in %q: %v", *kbUrl, missing)
}
}
}
func readKB(path string) (string, error) {
b, err := os.ReadFile(path)
if err != nil {
return "", fmt.Errorf("reading file: %w", err)
}
return string(b), nil
}
func getKB(ctx context.Context, url string) (string, error) {
req, err := http.NewRequestWithContext(ctx, httpm.GET, url, nil)
if err != nil {
return "", fmt.Errorf("creating request: %w", err)
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
return "", fmt.Errorf("getting kb page: %w", err)
}
if resp.StatusCode != http.StatusOK {
return "", fmt.Errorf("unexpected status code: %d", resp.StatusCode)
}
b, err := io.ReadAll(resp.Body)
if err != nil {
return "", fmt.Errorf("reading body: %w", err)
}
return string(b), nil
}
func undocumentedMetrics(b string, metrics []string) []string {
var missing []string
for _, metric := range metrics {
if !strings.Contains(b, metric) {
missing = append(missing, metric)
}
}
return missing
}

@ -121,7 +121,12 @@ func gen(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named) {
continue continue
} }
if !hasBasicUnderlying(ft) { if !hasBasicUnderlying(ft) {
// don't dereference if the underlying type is an interface
if _, isInterface := ft.Underlying().(*types.Interface); isInterface {
writef("if src.%s != nil { dst.%s = src.%s.Clone() }", fname, fname, fname)
} else {
writef("dst.%s = *src.%s.Clone()", fname, fname) writef("dst.%s = *src.%s.Clone()", fname, fname)
}
continue continue
} }
} }
@ -136,13 +141,13 @@ func gen(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named) {
writef("if src.%s[i] == nil { dst.%s[i] = nil } else {", fname, fname) writef("if src.%s[i] == nil { dst.%s[i] = nil } else {", fname, fname)
if codegen.ContainsPointers(ptr.Elem()) { if codegen.ContainsPointers(ptr.Elem()) {
if _, isIface := ptr.Elem().Underlying().(*types.Interface); isIface { if _, isIface := ptr.Elem().Underlying().(*types.Interface); isIface {
it.Import("tailscale.com/types/ptr") it.Import("", "tailscale.com/types/ptr")
writef("\tdst.%s[i] = ptr.To((*src.%s[i]).Clone())", fname, fname) writef("\tdst.%s[i] = ptr.To((*src.%s[i]).Clone())", fname, fname)
} else { } else {
writef("\tdst.%s[i] = src.%s[i].Clone()", fname, fname) writef("\tdst.%s[i] = src.%s[i].Clone()", fname, fname)
} }
} else { } else {
it.Import("tailscale.com/types/ptr") it.Import("", "tailscale.com/types/ptr")
writef("\tdst.%s[i] = ptr.To(*src.%s[i])", fname, fname) writef("\tdst.%s[i] = ptr.To(*src.%s[i])", fname, fname)
} }
writef("}") writef("}")
@ -165,7 +170,7 @@ func gen(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named) {
writef("dst.%s = src.%s.Clone()", fname, fname) writef("dst.%s = src.%s.Clone()", fname, fname)
continue continue
} }
it.Import("tailscale.com/types/ptr") it.Import("", "tailscale.com/types/ptr")
writef("if dst.%s != nil {", fname) writef("if dst.%s != nil {", fname)
if _, isIface := base.Underlying().(*types.Interface); isIface && hasPtrs { if _, isIface := base.Underlying().(*types.Interface); isIface && hasPtrs {
writef("\tdst.%s = ptr.To((*src.%s).Clone())", fname, fname) writef("\tdst.%s = ptr.To((*src.%s).Clone())", fname, fname)
@ -187,45 +192,34 @@ func gen(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named) {
writef("\t\tdst.%s[k] = append([]%s{}, src.%s[k]...)", fname, n, fname) writef("\t\tdst.%s[k] = append([]%s{}, src.%s[k]...)", fname, n, fname)
writef("\t}") writef("\t}")
writef("}") writef("}")
} else if codegen.ContainsPointers(elem) { } else if codegen.IsViewType(elem) || !codegen.ContainsPointers(elem) {
// If the map values are view types (which are
// immutable and don't need cloning) or don't
// themselves contain pointers, we can just
// clone the map itself.
it.Import("", "maps")
writef("\tdst.%s = maps.Clone(src.%s)", fname, fname)
} else {
// Otherwise we need to clone each element of
// the map using our recursive helper.
writef("if dst.%s != nil {", fname) writef("if dst.%s != nil {", fname)
writef("\tdst.%s = map[%s]%s{}", fname, it.QualifiedName(ft.Key()), it.QualifiedName(elem)) writef("\tdst.%s = map[%s]%s{}", fname, it.QualifiedName(ft.Key()), it.QualifiedName(elem))
writef("\tfor k, v := range src.%s {", fname) writef("\tfor k, v := range src.%s {", fname)
switch elem := elem.Underlying().(type) { // Use a recursive helper here; this handles
case *types.Pointer: // arbitrarily nested maps in addition to
writef("\t\tif v == nil { dst.%s[k] = nil } else {", fname) // simpler types.
if base := elem.Elem().Underlying(); codegen.ContainsPointers(base) { writeMapValueClone(mapValueCloneParams{
if _, isIface := base.(*types.Interface); isIface { Buf: buf,
it.Import("tailscale.com/types/ptr") It: it,
writef("\t\t\tdst.%s[k] = ptr.To((*v).Clone())", fname) Elem: elem,
} else { SrcExpr: "v",
writef("\t\t\tdst.%s[k] = v.Clone()", fname) DstExpr: fmt.Sprintf("dst.%s[k]", fname),
} BaseIndent: "\t",
} else { Depth: 1,
it.Import("tailscale.com/types/ptr") })
writef("\t\t\tdst.%s[k] = ptr.To(*v)", fname)
}
writef("}")
case *types.Interface:
if cloneResultType := methodResultType(elem, "Clone"); cloneResultType != nil {
if _, isPtr := cloneResultType.(*types.Pointer); isPtr {
writef("\t\tdst.%s[k] = *(v.Clone())", fname)
} else {
writef("\t\tdst.%s[k] = v.Clone()", fname)
}
} else {
writef(`panic("%s (%v) does not have a Clone method")`, fname, elem)
}
default:
writef("\t\tdst.%s[k] = *(v.Clone())", fname)
}
writef("\t}") writef("\t}")
writef("}") writef("}")
} else {
it.Import("maps")
writef("\tdst.%s = maps.Clone(src.%s)", fname, fname)
} }
case *types.Interface: case *types.Interface:
// If ft is an interface with a "Clone() ft" method, it can be used to clone the field. // If ft is an interface with a "Clone() ft" method, it can be used to clone the field.
@ -266,3 +260,99 @@ func methodResultType(typ types.Type, method string) types.Type {
} }
return sig.Results().At(0).Type() return sig.Results().At(0).Type()
} }
type mapValueCloneParams struct {
// Buf is the buffer to write generated code to
Buf *bytes.Buffer
// It is the import tracker for managing imports.
It *codegen.ImportTracker
// Elem is the type of the map value to clone
Elem types.Type
// SrcExpr is the expression for the source value (e.g., "v", "v2", "v3")
SrcExpr string
// DstExpr is the expression for the destination (e.g., "dst.Field[k]", "dst.Field[k][k2]")
DstExpr string
// BaseIndent is the "base" indentation string for the generated code
// (i.e. 1 or more tabs). Additional indentation will be added based on
// the Depth parameter.
BaseIndent string
// Depth is the current nesting depth (1 for first level, 2 for second, etc.)
Depth int
}
// writeMapValueClone generates code to clone a map value recursively.
// It handles arbitrary nesting of maps, pointers, and interfaces.
func writeMapValueClone(params mapValueCloneParams) {
indent := params.BaseIndent + strings.Repeat("\t", params.Depth)
writef := func(format string, args ...any) {
fmt.Fprintf(params.Buf, indent+format+"\n", args...)
}
switch elem := params.Elem.Underlying().(type) {
case *types.Pointer:
writef("if %s == nil { %s = nil } else {", params.SrcExpr, params.DstExpr)
if base := elem.Elem().Underlying(); codegen.ContainsPointers(base) {
if _, isIface := base.(*types.Interface); isIface {
params.It.Import("", "tailscale.com/types/ptr")
writef("\t%s = ptr.To((*%s).Clone())", params.DstExpr, params.SrcExpr)
} else {
writef("\t%s = %s.Clone()", params.DstExpr, params.SrcExpr)
}
} else {
params.It.Import("", "tailscale.com/types/ptr")
writef("\t%s = ptr.To(*%s)", params.DstExpr, params.SrcExpr)
}
writef("}")
case *types.Map:
// Recursively handle nested maps
innerElem := elem.Elem()
if codegen.IsViewType(innerElem) || !codegen.ContainsPointers(innerElem) {
// Inner map values don't need deep cloning
params.It.Import("", "maps")
writef("%s = maps.Clone(%s)", params.DstExpr, params.SrcExpr)
} else {
// Inner map values need cloning
keyType := params.It.QualifiedName(elem.Key())
valueType := params.It.QualifiedName(innerElem)
// Generate unique variable names for nested loops based on depth
keyVar := fmt.Sprintf("k%d", params.Depth+1)
valVar := fmt.Sprintf("v%d", params.Depth+1)
writef("if %s == nil {", params.SrcExpr)
writef("\t%s = nil", params.DstExpr)
writef("\tcontinue")
writef("}")
writef("%s = map[%s]%s{}", params.DstExpr, keyType, valueType)
writef("for %s, %s := range %s {", keyVar, valVar, params.SrcExpr)
// Recursively generate cloning code for the nested map value
nestedDstExpr := fmt.Sprintf("%s[%s]", params.DstExpr, keyVar)
writeMapValueClone(mapValueCloneParams{
Buf: params.Buf,
It: params.It,
Elem: innerElem,
SrcExpr: valVar,
DstExpr: nestedDstExpr,
BaseIndent: params.BaseIndent,
Depth: params.Depth + 1,
})
writef("}")
}
case *types.Interface:
if cloneResultType := methodResultType(elem, "Clone"); cloneResultType != nil {
if _, isPtr := cloneResultType.(*types.Pointer); isPtr {
writef("%s = *(%s.Clone())", params.DstExpr, params.SrcExpr)
} else {
writef("%s = %s.Clone()", params.DstExpr, params.SrcExpr)
}
} else {
writef(`panic("map value (%%v) does not have a Clone method")`, elem)
}
default:
writef("%s = *(%s.Clone())", params.DstExpr, params.SrcExpr)
}
}

@ -1,5 +1,6 @@
// Copyright (c) Tailscale Inc & AUTHORS // Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause // SPDX-License-Identifier: BSD-3-Clause
package main package main
import ( import (
@ -58,3 +59,158 @@ func TestSliceContainer(t *testing.T) {
}) })
} }
} }
func TestInterfaceContainer(t *testing.T) {
examples := []struct {
name string
in *clonerex.InterfaceContainer
}{
{
name: "nil",
in: nil,
},
{
name: "zero",
in: &clonerex.InterfaceContainer{},
},
{
name: "with_interface",
in: &clonerex.InterfaceContainer{
Interface: &clonerex.CloneableImpl{Value: 42},
},
},
{
name: "with_nil_interface",
in: &clonerex.InterfaceContainer{
Interface: nil,
},
},
}
for _, ex := range examples {
t.Run(ex.name, func(t *testing.T) {
out := ex.in.Clone()
if !reflect.DeepEqual(ex.in, out) {
t.Errorf("Clone() = %v, want %v", out, ex.in)
}
// Verify no aliasing: modifying the clone should not affect the original
if ex.in != nil && ex.in.Interface != nil {
if impl, ok := out.Interface.(*clonerex.CloneableImpl); ok {
impl.Value = 999
if origImpl, ok := ex.in.Interface.(*clonerex.CloneableImpl); ok {
if origImpl.Value == 999 {
t.Errorf("Clone() aliased memory with original")
}
}
}
}
})
}
}
func TestMapWithPointers(t *testing.T) {
num1, num2 := 42, 100
orig := &clonerex.MapWithPointers{
Nested: map[string]*int{
"foo": &num1,
"bar": &num2,
},
WithCloneMethod: map[string]*clonerex.SliceContainer{
"container1": {Slice: []*int{&num1, &num2}},
"container2": {Slice: []*int{&num1}},
},
CloneInterface: map[string]clonerex.Cloneable{
"impl1": &clonerex.CloneableImpl{Value: 123},
"impl2": &clonerex.CloneableImpl{Value: 456},
},
}
cloned := orig.Clone()
if !reflect.DeepEqual(orig, cloned) {
t.Errorf("Clone() = %v, want %v", cloned, orig)
}
// Mutate cloned.Nested pointer values
*cloned.Nested["foo"] = 999
if *orig.Nested["foo"] == 999 {
t.Errorf("Clone() aliased memory in Nested: original was modified")
}
// Mutate cloned.WithCloneMethod slice values
*cloned.WithCloneMethod["container1"].Slice[0] = 888
if *orig.WithCloneMethod["container1"].Slice[0] == 888 {
t.Errorf("Clone() aliased memory in WithCloneMethod: original was modified")
}
// Mutate cloned.CloneInterface values
if impl, ok := cloned.CloneInterface["impl1"].(*clonerex.CloneableImpl); ok {
impl.Value = 777
if origImpl, ok := orig.CloneInterface["impl1"].(*clonerex.CloneableImpl); ok {
if origImpl.Value == 777 {
t.Errorf("Clone() aliased memory in CloneInterface: original was modified")
}
}
}
}
func TestDeeplyNestedMap(t *testing.T) {
num := 123
orig := &clonerex.DeeplyNestedMap{
ThreeLevels: map[string]map[string]map[string]int{
"a": {
"b": {"c": 1, "d": 2},
"e": {"f": 3},
},
"g": {
"h": {"i": 4},
},
},
FourLevels: map[string]map[string]map[string]map[string]*clonerex.SliceContainer{
"l1a": {
"l2a": {
"l3a": {
"l4a": {Slice: []*int{&num}},
"l4b": {Slice: []*int{&num, &num}},
},
},
},
},
}
cloned := orig.Clone()
if !reflect.DeepEqual(orig, cloned) {
t.Errorf("Clone() = %v, want %v", cloned, orig)
}
// Mutate the clone's ThreeLevels map
cloned.ThreeLevels["a"]["b"]["c"] = 777
if orig.ThreeLevels["a"]["b"]["c"] == 777 {
t.Errorf("Clone() aliased memory in ThreeLevels: original was modified")
}
// Mutate the clone's FourLevels map at the deepest pointer level
*cloned.FourLevels["l1a"]["l2a"]["l3a"]["l4a"].Slice[0] = 666
if *orig.FourLevels["l1a"]["l2a"]["l3a"]["l4a"].Slice[0] == 666 {
t.Errorf("Clone() aliased memory in FourLevels: original was modified")
}
// Add a new top-level key to the clone's FourLevels map
newNum := 999
cloned.FourLevels["l1b"] = map[string]map[string]map[string]*clonerex.SliceContainer{
"l2b": {
"l3b": {
"l4c": {Slice: []*int{&newNum}},
},
},
}
if _, exists := orig.FourLevels["l1b"]; exists {
t.Errorf("Clone() aliased FourLevels map: new top-level key appeared in original")
}
// Add a new nested key to the clone's FourLevels map
cloned.FourLevels["l1a"]["l2a"]["l3a"]["l4c"] = &clonerex.SliceContainer{Slice: []*int{&newNum}}
if _, exists := orig.FourLevels["l1a"]["l2a"]["l3a"]["l4c"]; exists {
t.Errorf("Clone() aliased FourLevels map: new nested key appeared in original")
}
}

@ -1,7 +1,7 @@
// Copyright (c) Tailscale Inc & AUTHORS // Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause // SPDX-License-Identifier: BSD-3-Clause
//go:generate go run tailscale.com/cmd/cloner -clonefunc=true -type SliceContainer //go:generate go run tailscale.com/cmd/cloner -clonefunc=true -type SliceContainer,InterfaceContainer,MapWithPointers,DeeplyNestedMap
// Package clonerex is an example package for the cloner tool. // Package clonerex is an example package for the cloner tool.
package clonerex package clonerex
@ -9,3 +9,38 @@ package clonerex
type SliceContainer struct { type SliceContainer struct {
Slice []*int Slice []*int
} }
// Cloneable is an interface with a Clone method.
type Cloneable interface {
Clone() Cloneable
}
// CloneableImpl is a concrete type that implements Cloneable.
type CloneableImpl struct {
Value int
}
func (c *CloneableImpl) Clone() Cloneable {
if c == nil {
return nil
}
return &CloneableImpl{Value: c.Value}
}
// InterfaceContainer has a pointer to an interface field, which tests
// the special handling for interface types in the cloner.
type InterfaceContainer struct {
Interface Cloneable
}
type MapWithPointers struct {
Nested map[string]*int
WithCloneMethod map[string]*SliceContainer
CloneInterface map[string]Cloneable
}
// DeeplyNestedMap tests arbitrary depth of map nesting (3+ levels)
type DeeplyNestedMap struct {
ThreeLevels map[string]map[string]map[string]int
FourLevels map[string]map[string]map[string]map[string]*SliceContainer
}

@ -6,6 +6,8 @@
package clonerex package clonerex
import ( import (
"maps"
"tailscale.com/types/ptr" "tailscale.com/types/ptr"
) )
@ -35,9 +37,133 @@ var _SliceContainerCloneNeedsRegeneration = SliceContainer(struct {
Slice []*int Slice []*int
}{}) }{})
// Clone makes a deep copy of InterfaceContainer.
// The result aliases no memory with the original.
func (src *InterfaceContainer) Clone() *InterfaceContainer {
if src == nil {
return nil
}
dst := new(InterfaceContainer)
*dst = *src
if src.Interface != nil {
dst.Interface = src.Interface.Clone()
}
return dst
}
// A compilation failure here means this code must be regenerated, with the command at the top of this file.
var _InterfaceContainerCloneNeedsRegeneration = InterfaceContainer(struct {
Interface Cloneable
}{})
// Clone makes a deep copy of MapWithPointers.
// The result aliases no memory with the original.
func (src *MapWithPointers) Clone() *MapWithPointers {
if src == nil {
return nil
}
dst := new(MapWithPointers)
*dst = *src
if dst.Nested != nil {
dst.Nested = map[string]*int{}
for k, v := range src.Nested {
if v == nil {
dst.Nested[k] = nil
} else {
dst.Nested[k] = ptr.To(*v)
}
}
}
if dst.WithCloneMethod != nil {
dst.WithCloneMethod = map[string]*SliceContainer{}
for k, v := range src.WithCloneMethod {
if v == nil {
dst.WithCloneMethod[k] = nil
} else {
dst.WithCloneMethod[k] = v.Clone()
}
}
}
if dst.CloneInterface != nil {
dst.CloneInterface = map[string]Cloneable{}
for k, v := range src.CloneInterface {
dst.CloneInterface[k] = v.Clone()
}
}
return dst
}
// A compilation failure here means this code must be regenerated, with the command at the top of this file.
var _MapWithPointersCloneNeedsRegeneration = MapWithPointers(struct {
Nested map[string]*int
WithCloneMethod map[string]*SliceContainer
CloneInterface map[string]Cloneable
}{})
// Clone makes a deep copy of DeeplyNestedMap.
// The result aliases no memory with the original.
func (src *DeeplyNestedMap) Clone() *DeeplyNestedMap {
if src == nil {
return nil
}
dst := new(DeeplyNestedMap)
*dst = *src
if dst.ThreeLevels != nil {
dst.ThreeLevels = map[string]map[string]map[string]int{}
for k, v := range src.ThreeLevels {
if v == nil {
dst.ThreeLevels[k] = nil
continue
}
dst.ThreeLevels[k] = map[string]map[string]int{}
for k2, v2 := range v {
dst.ThreeLevels[k][k2] = maps.Clone(v2)
}
}
}
if dst.FourLevels != nil {
dst.FourLevels = map[string]map[string]map[string]map[string]*SliceContainer{}
for k, v := range src.FourLevels {
if v == nil {
dst.FourLevels[k] = nil
continue
}
dst.FourLevels[k] = map[string]map[string]map[string]*SliceContainer{}
for k2, v2 := range v {
if v2 == nil {
dst.FourLevels[k][k2] = nil
continue
}
dst.FourLevels[k][k2] = map[string]map[string]*SliceContainer{}
for k3, v3 := range v2 {
if v3 == nil {
dst.FourLevels[k][k2][k3] = nil
continue
}
dst.FourLevels[k][k2][k3] = map[string]*SliceContainer{}
for k4, v4 := range v3 {
if v4 == nil {
dst.FourLevels[k][k2][k3][k4] = nil
} else {
dst.FourLevels[k][k2][k3][k4] = v4.Clone()
}
}
}
}
}
}
return dst
}
// A compilation failure here means this code must be regenerated, with the command at the top of this file.
var _DeeplyNestedMapCloneNeedsRegeneration = DeeplyNestedMap(struct {
ThreeLevels map[string]map[string]map[string]int
FourLevels map[string]map[string]map[string]map[string]*SliceContainer
}{})
// Clone duplicates src into dst and reports whether it succeeded. // Clone duplicates src into dst and reports whether it succeeded.
// To succeed, <src, dst> must be of types <*T, *T> or <*T, **T>, // To succeed, <src, dst> must be of types <*T, *T> or <*T, **T>,
// where T is one of SliceContainer. // where T is one of SliceContainer,InterfaceContainer,MapWithPointers,DeeplyNestedMap.
func Clone(dst, src any) bool { func Clone(dst, src any) bool {
switch src := src.(type) { switch src := src.(type) {
case *SliceContainer: case *SliceContainer:
@ -49,6 +175,33 @@ func Clone(dst, src any) bool {
*dst = src.Clone() *dst = src.Clone()
return true return true
} }
case *InterfaceContainer:
switch dst := dst.(type) {
case *InterfaceContainer:
*dst = *src.Clone()
return true
case **InterfaceContainer:
*dst = src.Clone()
return true
}
case *MapWithPointers:
switch dst := dst.(type) {
case *MapWithPointers:
*dst = *src.Clone()
return true
case **MapWithPointers:
*dst = src.Clone()
return true
}
case *DeeplyNestedMap:
switch dst := dst.(type) {
case *DeeplyNestedMap:
*dst = *src.Clone()
return true
case **DeeplyNestedMap:
*dst = src.Clone()
return true
}
} }
return false return false
} }

@ -11,24 +11,33 @@ import (
"errors" "errors"
"fmt" "fmt"
"log" "log"
"net/http"
"net/netip" "net/netip"
"os" "os"
"path/filepath" "path/filepath"
"reflect" "reflect"
"strconv"
"strings" "strings"
"sync"
"time" "time"
"github.com/fsnotify/fsnotify" "github.com/fsnotify/fsnotify"
"tailscale.com/client/local"
"tailscale.com/ipn" "tailscale.com/ipn"
"tailscale.com/kube/egressservices" "tailscale.com/kube/egressservices"
"tailscale.com/kube/kubeclient" "tailscale.com/kube/kubeclient"
"tailscale.com/kube/kubetypes"
"tailscale.com/tailcfg" "tailscale.com/tailcfg"
"tailscale.com/util/httpm"
"tailscale.com/util/linuxfw" "tailscale.com/util/linuxfw"
"tailscale.com/util/mak" "tailscale.com/util/mak"
) )
const tailscaleTunInterface = "tailscale0" const tailscaleTunInterface = "tailscale0"
// Modified using a build flag to speed up tests.
var testSleepDuration string
// This file contains functionality to run containerboot as a proxy that can // This file contains functionality to run containerboot as a proxy that can
// route cluster traffic to one or more tailnet targets, based on portmapping // route cluster traffic to one or more tailnet targets, based on portmapping
// rules read from a configfile. Currently (9/2024) this is only used for the // rules read from a configfile. Currently (9/2024) this is only used for the
@ -37,16 +46,18 @@ const tailscaleTunInterface = "tailscale0"
// egressProxy knows how to configure firewall rules to route cluster traffic to // egressProxy knows how to configure firewall rules to route cluster traffic to
// one or more tailnet services. // one or more tailnet services.
type egressProxy struct { type egressProxy struct {
cfgPath string // path to egress service config file cfgPath string // path to a directory with egress services config files
nfr linuxfw.NetfilterRunner // never nil nfr linuxfw.NetfilterRunner // never nil
kc kubeclient.Client // never nil kc kubeclient.Client // never nil
stateSecret string // name of the kube state Secret stateSecret string // name of the kube state Secret
tsClient *local.Client // never nil
netmapChan chan ipn.Notify // chan to receive netmap updates on netmapChan chan ipn.Notify // chan to receive netmap updates on
podIP string // never empty string podIPv4 string // never empty string, currently only IPv4 is supported
// tailnetFQDNs is the egress service FQDN to tailnet IP mappings that // tailnetFQDNs is the egress service FQDN to tailnet IP mappings that
// were last used to configure firewall rules for this proxy. // were last used to configure firewall rules for this proxy.
@ -55,15 +66,29 @@ type egressProxy struct {
// memory at all. // memory at all.
targetFQDNs map[string][]netip.Prefix targetFQDNs map[string][]netip.Prefix
// used to configure firewall rules. tailnetAddrs []netip.Prefix // tailnet IPs of this tailnet device
tailnetAddrs []netip.Prefix
// shortSleep is the backoff sleep between healthcheck endpoint calls - can be overridden in tests.
shortSleep time.Duration
// longSleep is the time to sleep after the routing rules are updated to increase the chance that kube
// proxies on all nodes have updated their routing configuration. It can be configured to 0 in
// tests.
longSleep time.Duration
// client is a client that can send HTTP requests.
client httpClient
}
// httpClient is a client that can send HTTP requests and can be mocked in tests.
type httpClient interface {
Do(*http.Request) (*http.Response, error)
} }
// run configures egress proxy firewall rules and ensures that the firewall rules are reconfigured when: // run configures egress proxy firewall rules and ensures that the firewall rules are reconfigured when:
// - the mounted egress config has changed // - the mounted egress config has changed
// - the proxy's tailnet IP addresses have changed // - the proxy's tailnet IP addresses have changed
// - tailnet IPs have changed for any backend targets specified by tailnet FQDN // - tailnet IPs have changed for any backend targets specified by tailnet FQDN
func (ep *egressProxy) run(ctx context.Context, n ipn.Notify) error { func (ep *egressProxy) run(ctx context.Context, n ipn.Notify, opts egressProxyRunOpts) error {
ep.configure(opts)
var tickChan <-chan time.Time var tickChan <-chan time.Time
var eventChan <-chan fsnotify.Event var eventChan <-chan fsnotify.Event
// TODO (irbekrm): take a look if this can be pulled into a single func // TODO (irbekrm): take a look if this can be pulled into a single func
@ -75,7 +100,7 @@ func (ep *egressProxy) run(ctx context.Context, n ipn.Notify) error {
tickChan = ticker.C tickChan = ticker.C
} else { } else {
defer w.Close() defer w.Close()
if err := w.Add(filepath.Dir(ep.cfgPath)); err != nil { if err := w.Add(ep.cfgPath); err != nil {
return fmt.Errorf("failed to add fsnotify watch: %w", err) return fmt.Errorf("failed to add fsnotify watch: %w", err)
} }
eventChan = w.Events eventChan = w.Events
@ -85,28 +110,57 @@ func (ep *egressProxy) run(ctx context.Context, n ipn.Notify) error {
return err return err
} }
for { for {
var err error
select { select {
case <-ctx.Done(): case <-ctx.Done():
return nil return nil
case <-tickChan: case <-tickChan:
err = ep.sync(ctx, n) log.Printf("periodic sync, ensuring firewall config is up to date...")
case <-eventChan: case <-eventChan:
log.Printf("config file change detected, ensuring firewall config is up to date...") log.Printf("config file change detected, ensuring firewall config is up to date...")
err = ep.sync(ctx, n)
case n = <-ep.netmapChan: case n = <-ep.netmapChan:
shouldResync := ep.shouldResync(n) shouldResync := ep.shouldResync(n)
if shouldResync { if !shouldResync {
log.Printf("netmap change detected, ensuring firewall config is up to date...") continue
err = ep.sync(ctx, n)
} }
log.Printf("netmap change detected, ensuring firewall config is up to date...")
} }
if err != nil { if err := ep.sync(ctx, n); err != nil {
return fmt.Errorf("error syncing egress service config: %w", err) return fmt.Errorf("error syncing egress service config: %w", err)
} }
} }
} }
type egressProxyRunOpts struct {
cfgPath string
nfr linuxfw.NetfilterRunner
kc kubeclient.Client
tsClient *local.Client
stateSecret string
netmapChan chan ipn.Notify
podIPv4 string
tailnetAddrs []netip.Prefix
}
// applyOpts configures egress proxy using the provided options.
func (ep *egressProxy) configure(opts egressProxyRunOpts) {
ep.cfgPath = opts.cfgPath
ep.nfr = opts.nfr
ep.kc = opts.kc
ep.tsClient = opts.tsClient
ep.stateSecret = opts.stateSecret
ep.netmapChan = opts.netmapChan
ep.podIPv4 = opts.podIPv4
ep.tailnetAddrs = opts.tailnetAddrs
ep.client = &http.Client{} // default HTTP client
sleepDuration := time.Second
if d, err := time.ParseDuration(testSleepDuration); err == nil && d > 0 {
log.Printf("using test sleep duration %v", d)
sleepDuration = d
}
ep.shortSleep = sleepDuration
ep.longSleep = sleepDuration * 10
}
// sync triggers an egress proxy config resync. The resync calculates the diff between config and status to determine if // sync triggers an egress proxy config resync. The resync calculates the diff between config and status to determine if
// any firewall rules need to be updated. Currently using status in state Secret as a reference for what is the current // any firewall rules need to be updated. Currently using status in state Secret as a reference for what is the current
// firewall configuration is good enough because - the status is keyed by the Pod IP - we crash the Pod on errors such // firewall configuration is good enough because - the status is keyed by the Pod IP - we crash the Pod on errors such
@ -235,7 +289,7 @@ func updatesForCfg(svcName string, cfg egressservices.Config, status *egressserv
log.Printf("tailnet target for egress service %s does not have any backend addresses, deleting all rules", svcName) log.Printf("tailnet target for egress service %s does not have any backend addresses, deleting all rules", svcName)
for _, ip := range currentConfig.TailnetTargetIPs { for _, ip := range currentConfig.TailnetTargetIPs {
for ports := range currentConfig.Ports { for ports := range currentConfig.Ports {
rulesToDelete = append(rulesToAdd, rule{tailnetPort: ports.TargetPort, containerPort: ports.MatchPort, protocol: ports.Protocol, tailnetIP: ip}) rulesToDelete = append(rulesToDelete, rule{tailnetPort: ports.TargetPort, containerPort: ports.MatchPort, protocol: ports.Protocol, tailnetIP: ip})
} }
} }
return rulesToAdd, rulesToDelete, nil return rulesToAdd, rulesToDelete, nil
@ -327,7 +381,8 @@ func (ep *egressProxy) deleteUnnecessaryServices(cfgs *egressservices.Configs, s
// getConfigs gets the mounted egress service configuration. // getConfigs gets the mounted egress service configuration.
func (ep *egressProxy) getConfigs() (*egressservices.Configs, error) { func (ep *egressProxy) getConfigs() (*egressservices.Configs, error) {
j, err := os.ReadFile(ep.cfgPath) svcsCfg := filepath.Join(ep.cfgPath, egressservices.KeyEgressServices)
j, err := os.ReadFile(svcsCfg)
if os.IsNotExist(err) { if os.IsNotExist(err) {
return nil, nil return nil, nil
} }
@ -361,7 +416,7 @@ func (ep *egressProxy) getStatus(ctx context.Context) (*egressservices.Status, e
if err := json.Unmarshal([]byte(raw), status); err != nil { if err := json.Unmarshal([]byte(raw), status); err != nil {
return nil, fmt.Errorf("error unmarshalling previous config: %w", err) return nil, fmt.Errorf("error unmarshalling previous config: %w", err)
} }
if reflect.DeepEqual(status.PodIP, ep.podIP) { if reflect.DeepEqual(status.PodIPv4, ep.podIPv4) {
return status, nil return status, nil
} }
return nil, nil return nil, nil
@ -374,7 +429,7 @@ func (ep *egressProxy) setStatus(ctx context.Context, status *egressservices.Sta
if status == nil { if status == nil {
status = &egressservices.Status{} status = &egressservices.Status{}
} }
status.PodIP = ep.podIP status.PodIPv4 = ep.podIPv4
secret, err := ep.kc.GetSecret(ctx, ep.stateSecret) secret, err := ep.kc.GetSecret(ctx, ep.stateSecret)
if err != nil { if err != nil {
return fmt.Errorf("error retrieving state Secret: %w", err) return fmt.Errorf("error retrieving state Secret: %w", err)
@ -389,7 +444,7 @@ func (ep *egressProxy) setStatus(ctx context.Context, status *egressservices.Sta
Path: fmt.Sprintf("/data/%s", egressservices.KeyEgressServices), Path: fmt.Sprintf("/data/%s", egressservices.KeyEgressServices),
Value: bs, Value: bs,
} }
if err := ep.kc.JSONPatchSecret(ctx, ep.stateSecret, []kubeclient.JSONPatch{patch}); err != nil { if err := ep.kc.JSONPatchResource(ctx, ep.stateSecret, kubeclient.TypeSecrets, []kubeclient.JSONPatch{patch}); err != nil {
return fmt.Errorf("error patching state Secret: %w", err) return fmt.Errorf("error patching state Secret: %w", err)
} }
ep.tailnetAddrs = n.NetMap.SelfNode.Addresses().AsSlice() ep.tailnetAddrs = n.NetMap.SelfNode.Addresses().AsSlice()
@ -470,9 +525,10 @@ func (ep *egressProxy) shouldResync(n ipn.Notify) bool {
if equalFQDNs(nn.Name(), fqdn) { if equalFQDNs(nn.Name(), fqdn) {
if !reflect.DeepEqual(ips, nn.Addresses().AsSlice()) { if !reflect.DeepEqual(ips, nn.Addresses().AsSlice()) {
log.Printf("backend addresses for egress target %q have changed old IPs %v, new IPs %v trigger egress config resync", nn.Name(), ips, nn.Addresses().AsSlice()) log.Printf("backend addresses for egress target %q have changed old IPs %v, new IPs %v trigger egress config resync", nn.Name(), ips, nn.Addresses().AsSlice())
}
return true return true
} }
break
}
} }
} }
return false return false
@ -514,7 +570,7 @@ func ensureRulesAdded(rulesPerSvc map[string][]rule, nfr linuxfw.NetfilterRunner
} }
// ensureRulesDeleted ensures that the given rules are deleted from the firewall // ensureRulesDeleted ensures that the given rules are deleted from the firewall
// configuration. For any rules that do not exist, calling this funcion is a // configuration. For any rules that do not exist, calling this function is a
// no-op. // no-op.
func ensureRulesDeleted(rulesPerSvc map[string][]rule, nfr linuxfw.NetfilterRunner) error { func ensureRulesDeleted(rulesPerSvc map[string][]rule, nfr linuxfw.NetfilterRunner) error {
for svc, rules := range rulesPerSvc { for svc, rules := range rulesPerSvc {
@ -565,7 +621,145 @@ func servicesStatusIsEqual(st, st1 *egressservices.Status) bool {
if st == nil || st1 == nil { if st == nil || st1 == nil {
return false return false
} }
st.PodIP = "" st.PodIPv4 = ""
st1.PodIP = "" st1.PodIPv4 = ""
return reflect.DeepEqual(*st, *st1) return reflect.DeepEqual(*st, *st1)
} }
// registerHandlers adds a new handler to the provided ServeMux that can be called as a Kubernetes prestop hook to
// delay shutdown till it's safe to do so.
func (ep *egressProxy) registerHandlers(mux *http.ServeMux) {
mux.Handle(fmt.Sprintf("GET %s", kubetypes.EgessServicesPreshutdownEP), ep)
}
// ServeHTTP serves /internal-egress-services-preshutdown endpoint, when it receives a request, it periodically polls
// the configured health check endpoint for each egress service till it the health check endpoint no longer hits this
// proxy Pod. It uses the Pod-IPv4 header to verify if health check response is received from this Pod.
func (ep *egressProxy) ServeHTTP(w http.ResponseWriter, r *http.Request) {
cfgs, err := ep.getConfigs()
if err != nil {
http.Error(w, fmt.Sprintf("error retrieving egress services configs: %v", err), http.StatusInternalServerError)
return
}
if cfgs == nil {
if _, err := w.Write([]byte("safe to terminate")); err != nil {
http.Error(w, fmt.Sprintf("error writing termination status: %v", err), http.StatusInternalServerError)
}
return
}
hp, err := ep.getHEPPings()
if err != nil {
http.Error(w, fmt.Sprintf("error determining the number of times health check endpoint should be pinged: %v", err), http.StatusInternalServerError)
return
}
ep.waitTillSafeToShutdown(r.Context(), cfgs, hp)
}
// waitTillSafeToShutdown looks up all egress targets configured to be proxied via this instance and, for each target
// whose configuration includes a healthcheck endpoint, pings the endpoint till none of the responses
// are returned by this instance or till the HTTP request times out. In practice, the endpoint will be a Kubernetes Service for whom one of the backends
// would normally be this Pod. When this Pod is being deleted, the operator should have removed it from the Service
// backends and eventually kube proxy routing rules should be updated to no longer route traffic for the Service to this
// Pod.
func (ep *egressProxy) waitTillSafeToShutdown(ctx context.Context, cfgs *egressservices.Configs, hp int) {
if cfgs == nil || len(*cfgs) == 0 { // avoid sleeping if no services are configured
return
}
log.Printf("Ensuring that cluster traffic for egress targets is no longer routed via this Pod...")
var wg sync.WaitGroup
for s, cfg := range *cfgs {
hep := cfg.HealthCheckEndpoint
if hep == "" {
log.Printf("Tailnet target %q does not have a cluster healthcheck specified, unable to verify if cluster traffic for the target is still routed via this Pod", s)
continue
}
svc := s
wg.Go(func() {
log.Printf("Ensuring that cluster traffic is no longer routed to %q via this Pod...", svc)
for {
if ctx.Err() != nil { // kubelet's HTTP request timeout
log.Printf("Cluster traffic for %s did not stop being routed to this Pod.", svc)
return
}
found, err := lookupPodRoute(ctx, hep, ep.podIPv4, hp, ep.client)
if err != nil {
log.Printf("unable to reach endpoint %q, assuming the routing rules for this Pod have been deleted: %v", hep, err)
break
}
if !found {
log.Printf("service %q is no longer routed through this Pod", svc)
break
}
log.Printf("service %q is still routed through this Pod, waiting...", svc)
time.Sleep(ep.shortSleep)
}
})
}
wg.Wait()
// The check above really only checked that the routing rules are updated on this node. Sleep for a bit to
// ensure that the routing rules are updated on other nodes. TODO(irbekrm): this may or may not be good enough.
// If it's not good enough, we'd probably want to do something more complex, where the proxies check each other.
log.Printf("Sleeping for %s before shutdown to ensure that kube proxies on all nodes have updated routing configuration", ep.longSleep)
time.Sleep(ep.longSleep)
}
// lookupPodRoute calls the healthcheck endpoint repeat times and returns true if the endpoint returns with the podIP
// header at least once.
func lookupPodRoute(ctx context.Context, hep, podIP string, repeat int, client httpClient) (bool, error) {
for range repeat {
f, err := lookup(ctx, hep, podIP, client)
if err != nil {
return false, err
}
if f {
return true, nil
}
}
return false, nil
}
// lookup calls the healthcheck endpoint and returns true if the response contains the podIP header.
func lookup(ctx context.Context, hep, podIP string, client httpClient) (bool, error) {
req, err := http.NewRequestWithContext(ctx, httpm.GET, hep, nil)
if err != nil {
return false, fmt.Errorf("error creating new HTTP request: %v", err)
}
// Close the TCP connection to ensure that the next request is routed to a different backend.
req.Close = true
resp, err := client.Do(req)
if err != nil {
log.Printf("Endpoint %q can not be reached: %v, likely because there are no (more) healthy backends", hep, err)
return true, nil
}
defer resp.Body.Close()
gotIP := resp.Header.Get(kubetypes.PodIPv4Header)
return strings.EqualFold(podIP, gotIP), nil
}
// getHEPPings gets the number of pings that should be sent to a health check endpoint to ensure that each configured
// backend is hit. This assumes that a health check endpoint is a Kubernetes Service and traffic to backend Pods is
// round robin load balanced.
func (ep *egressProxy) getHEPPings() (int, error) {
hepPingsPath := filepath.Join(ep.cfgPath, egressservices.KeyHEPPings)
j, err := os.ReadFile(hepPingsPath)
if os.IsNotExist(err) {
return 0, nil
}
if err != nil {
return -1, err
}
if len(j) == 0 || string(j) == "" {
return 0, nil
}
hp, err := strconv.Atoi(string(j))
if err != nil {
return -1, fmt.Errorf("error parsing hep pings as int: %v", err)
}
if hp < 0 {
log.Printf("[unexpected] hep pings is negative: %d", hp)
return 0, nil
}
return hp, nil
}

@ -6,11 +6,18 @@
package main package main
import ( import (
"context"
"fmt"
"io"
"net/http"
"net/netip" "net/netip"
"reflect" "reflect"
"strings"
"sync"
"testing" "testing"
"tailscale.com/kube/egressservices" "tailscale.com/kube/egressservices"
"tailscale.com/kube/kubetypes"
) )
func Test_updatesForSvc(t *testing.T) { func Test_updatesForSvc(t *testing.T) {
@ -173,3 +180,145 @@ func Test_updatesForSvc(t *testing.T) {
}) })
} }
} }
// A failure of this test will most likely look like a timeout.
func TestWaitTillSafeToShutdown(t *testing.T) {
podIP := "10.0.0.1"
anotherIP := "10.0.0.2"
tests := []struct {
name string
// services is a map of service name to the number of calls to make to the healthcheck endpoint before
// returning a response that does NOT contain this Pod's IP in headers.
services map[string]int
replicas int
healthCheckSet bool
}{
{
name: "no_configs",
},
{
name: "one_service_immediately_safe_to_shutdown",
services: map[string]int{
"svc1": 0,
},
replicas: 2,
healthCheckSet: true,
},
{
name: "multiple_services_immediately_safe_to_shutdown",
services: map[string]int{
"svc1": 0,
"svc2": 0,
"svc3": 0,
},
replicas: 2,
healthCheckSet: true,
},
{
name: "multiple_services_no_healthcheck_endpoints",
services: map[string]int{
"svc1": 0,
"svc2": 0,
"svc3": 0,
},
replicas: 2,
},
{
name: "one_service_eventually_safe_to_shutdown",
services: map[string]int{
"svc1": 3, // After 3 calls to health check endpoint, no longer returns this Pod's IP
},
replicas: 2,
healthCheckSet: true,
},
{
name: "multiple_services_eventually_safe_to_shutdown",
services: map[string]int{
"svc1": 1, // After 1 call to health check endpoint, no longer returns this Pod's IP
"svc2": 3, // After 3 calls to health check endpoint, no longer returns this Pod's IP
"svc3": 5, // After 5 calls to the health check endpoint, no longer returns this Pod's IP
},
replicas: 2,
healthCheckSet: true,
},
{
name: "multiple_services_eventually_safe_to_shutdown_with_higher_replica_count",
services: map[string]int{
"svc1": 7,
"svc2": 10,
},
replicas: 5,
healthCheckSet: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cfgs := &egressservices.Configs{}
switches := make(map[string]int)
for svc, callsToSwitch := range tt.services {
endpoint := fmt.Sprintf("http://%s.local", svc)
if tt.healthCheckSet {
(*cfgs)[svc] = egressservices.Config{
HealthCheckEndpoint: endpoint,
}
}
switches[endpoint] = callsToSwitch
}
ep := &egressProxy{
podIPv4: podIP,
client: &mockHTTPClient{
podIP: podIP,
anotherIP: anotherIP,
switches: switches,
},
}
ep.waitTillSafeToShutdown(context.Background(), cfgs, tt.replicas)
})
}
}
// mockHTTPClient is a client that receives an HTTP call for an egress service endpoint and returns a response with an
// IP address in a 'Pod-IPv4' header. It can be configured to return one IP address for N calls, then switch to another
// IP address to simulate a scenario where an IP is eventually no longer a backend for an endpoint.
// TODO(irbekrm): to test this more thoroughly, we should have the client take into account the number of replicas and
// return as if traffic was round robin load balanced across different Pods.
type mockHTTPClient struct {
// podIP - initial IP address to return, that matches the current proxy's IP address.
podIP string
anotherIP string
// after how many calls to an endpoint, the client should start returning 'anotherIP' instead of 'podIP.
switches map[string]int
mu sync.Mutex // protects the following
// calls tracks the number of calls received.
calls map[string]int
}
func (m *mockHTTPClient) Do(req *http.Request) (*http.Response, error) {
m.mu.Lock()
if m.calls == nil {
m.calls = make(map[string]int)
}
endpoint := req.URL.String()
m.calls[endpoint]++
calls := m.calls[endpoint]
m.mu.Unlock()
resp := &http.Response{
StatusCode: http.StatusOK,
Header: make(http.Header),
Body: io.NopCloser(strings.NewReader("")),
}
if calls <= m.switches[endpoint] {
resp.Header.Set(kubetypes.PodIPv4Header, m.podIP) // Pod is still routable
} else {
resp.Header.Set(kubetypes.PodIPv4Header, m.anotherIP) // Pod is no longer routable
}
return resp, nil
}

@ -1,51 +0,0 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build linux
package main
import (
"log"
"net"
"net/http"
"sync"
)
// healthz is a simple health check server, if enabled it returns 200 OK if
// this tailscale node currently has at least one tailnet IP address else
// returns 503.
type healthz struct {
sync.Mutex
hasAddrs bool
}
func (h *healthz) ServeHTTP(w http.ResponseWriter, r *http.Request) {
h.Lock()
defer h.Unlock()
if h.hasAddrs {
w.Write([]byte("ok"))
} else {
http.Error(w, "node currently has no tailscale IPs", http.StatusInternalServerError)
}
}
// runHealthz runs a simple HTTP health endpoint on /healthz, listening on the
// provided address. A containerized tailscale instance is considered healthy if
// it has at least one tailnet IP address.
func runHealthz(addr string, h *healthz) {
lis, err := net.Listen("tcp", addr)
if err != nil {
log.Fatalf("error listening on the provided health endpoint address %q: %v", addr, err)
}
mux := http.NewServeMux()
mux.Handle("/healthz", h)
log.Printf("Running healthcheck endpoint at %s/healthz", addr)
hs := &http.Server{Handler: mux}
go func() {
if err := hs.Serve(lis); err != nil {
log.Fatalf("failed running health endpoint: %v", err)
}
}()
}

@ -0,0 +1,331 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build linux
package main
import (
"context"
"encoding/json"
"fmt"
"log"
"net/netip"
"os"
"path/filepath"
"reflect"
"time"
"github.com/fsnotify/fsnotify"
"tailscale.com/kube/ingressservices"
"tailscale.com/kube/kubeclient"
"tailscale.com/util/linuxfw"
"tailscale.com/util/mak"
)
// ingressProxy corresponds to a Kubernetes Operator's network layer ingress
// proxy. It configures firewall rules (iptables or nftables) to proxy tailnet
// traffic to Kubernetes Services. Currently this is only used for network
// layer proxies in HA mode.
type ingressProxy struct {
cfgPath string // path to ingress configfile.
// nfr is the netfilter runner used to configure firewall rules.
// This is going to be either iptables or nftables based runner.
// Never nil.
nfr linuxfw.NetfilterRunner
kc kubeclient.Client // never nil
stateSecret string // Secret that holds Tailscale state
// Pod's IP addresses are used as an identifier of this particular Pod.
podIPv4 string // empty if Pod does not have IPv4 address
podIPv6 string // empty if Pod does not have IPv6 address
}
// run starts the ingress proxy and ensures that firewall rules are set on start
// and refreshed as ingress config changes.
func (p *ingressProxy) run(ctx context.Context, opts ingressProxyOpts) error {
log.Printf("starting ingress proxy...")
p.configure(opts)
var tickChan <-chan time.Time
var eventChan <-chan fsnotify.Event
if w, err := fsnotify.NewWatcher(); err != nil {
log.Printf("failed to create fsnotify watcher, timer-only mode: %v", err)
ticker := time.NewTicker(5 * time.Second)
defer ticker.Stop()
tickChan = ticker.C
} else {
defer w.Close()
dir := filepath.Dir(p.cfgPath)
if err := w.Add(dir); err != nil {
return fmt.Errorf("failed to add fsnotify watch for %v: %w", dir, err)
}
eventChan = w.Events
}
if err := p.sync(ctx); err != nil {
return err
}
for {
select {
case <-ctx.Done():
return nil
case <-tickChan:
log.Printf("periodic sync, ensuring firewall config is up to date...")
case <-eventChan:
log.Printf("config file change detected, ensuring firewall config is up to date...")
}
if err := p.sync(ctx); err != nil {
return fmt.Errorf("error syncing ingress service config: %w", err)
}
}
}
// sync reconciles proxy's firewall rules (iptables or nftables) on ingress config changes:
// - ensures that new firewall rules are added
// - ensures that old firewall rules are deleted
// - updates ingress proxy's status in the state Secret
func (p *ingressProxy) sync(ctx context.Context) error {
// 1. Get the desired firewall configuration
cfgs, err := p.getConfigs()
if err != nil {
return fmt.Errorf("ingress proxy: error retrieving configs: %w", err)
}
// 2. Get the recorded firewall status
status, err := p.getStatus(ctx)
if err != nil {
return fmt.Errorf("ingress proxy: error retrieving current status: %w", err)
}
// 3. Ensure that firewall configuration is up to date
if err := p.syncIngressConfigs(cfgs, status); err != nil {
return fmt.Errorf("ingress proxy: error syncing configs: %w", err)
}
var existingConfigs *ingressservices.Configs
if status != nil {
existingConfigs = &status.Configs
}
// 4. Update the recorded firewall status
if !(ingressServicesStatusIsEqual(cfgs, existingConfigs) && p.isCurrentStatus(status)) {
if err := p.recordStatus(ctx, cfgs); err != nil {
return fmt.Errorf("ingress proxy: error setting status: %w", err)
}
}
return nil
}
// getConfigs returns the desired ingress service configuration from the mounted
// configfile.
func (p *ingressProxy) getConfigs() (*ingressservices.Configs, error) {
j, err := os.ReadFile(p.cfgPath)
if os.IsNotExist(err) {
return nil, nil
}
if err != nil {
return nil, err
}
if len(j) == 0 || string(j) == "" {
return nil, nil
}
cfg := &ingressservices.Configs{}
if err := json.Unmarshal(j, &cfg); err != nil {
return nil, err
}
return cfg, nil
}
// getStatus gets the recorded status of the configured firewall. The status is
// stored in the proxy's state Secret. Note that the recorded status might not
// be the current status of the firewall if it belongs to a previous Pod- we
// take that into account further down the line when determining if the desired
// rules are actually present.
func (p *ingressProxy) getStatus(ctx context.Context) (*ingressservices.Status, error) {
secret, err := p.kc.GetSecret(ctx, p.stateSecret)
if err != nil {
return nil, fmt.Errorf("error retrieving state Secret: %w", err)
}
status := &ingressservices.Status{}
raw, ok := secret.Data[ingressservices.IngressConfigKey]
if !ok {
return nil, nil
}
if err := json.Unmarshal([]byte(raw), status); err != nil {
return nil, fmt.Errorf("error unmarshalling previous config: %w", err)
}
return status, nil
}
// syncIngressConfigs takes the desired firewall configuration and the recorded
// status and ensures that any missing rules are added and no longer needed
// rules are deleted.
func (p *ingressProxy) syncIngressConfigs(cfgs *ingressservices.Configs, status *ingressservices.Status) error {
rulesToAdd := p.getRulesToAdd(cfgs, status)
rulesToDelete := p.getRulesToDelete(cfgs, status)
if err := ensureIngressRulesDeleted(rulesToDelete, p.nfr); err != nil {
return fmt.Errorf("error deleting ingress rules: %w", err)
}
if err := ensureIngressRulesAdded(rulesToAdd, p.nfr); err != nil {
return fmt.Errorf("error adding ingress rules: %w", err)
}
return nil
}
// recordStatus writes the configured firewall status to the proxy's state
// Secret. This allows the Kubernetes Operator to determine whether this proxy
// Pod has setup firewall rules to route traffic for an ingress service.
func (p *ingressProxy) recordStatus(ctx context.Context, newCfg *ingressservices.Configs) error {
status := &ingressservices.Status{}
if newCfg != nil {
status.Configs = *newCfg
}
// Pod IPs are used to determine if recorded status applies to THIS proxy Pod.
status.PodIPv4 = p.podIPv4
status.PodIPv6 = p.podIPv6
secret, err := p.kc.GetSecret(ctx, p.stateSecret)
if err != nil {
return fmt.Errorf("error retrieving state Secret: %w", err)
}
bs, err := json.Marshal(status)
if err != nil {
return fmt.Errorf("error marshalling status: %w", err)
}
secret.Data[ingressservices.IngressConfigKey] = bs
patch := kubeclient.JSONPatch{
Op: "replace",
Path: fmt.Sprintf("/data/%s", ingressservices.IngressConfigKey),
Value: bs,
}
if err := p.kc.JSONPatchResource(ctx, p.stateSecret, kubeclient.TypeSecrets, []kubeclient.JSONPatch{patch}); err != nil {
return fmt.Errorf("error patching state Secret: %w", err)
}
return nil
}
// getRulesToAdd takes the desired firewall configuration and the recorded
// firewall status and returns a map of missing Tailscale Services and rules.
func (p *ingressProxy) getRulesToAdd(cfgs *ingressservices.Configs, status *ingressservices.Status) map[string]ingressservices.Config {
if cfgs == nil {
return nil
}
var rulesToAdd map[string]ingressservices.Config
for tsSvc, wantsCfg := range *cfgs {
if status == nil || !p.isCurrentStatus(status) {
mak.Set(&rulesToAdd, tsSvc, wantsCfg)
continue
}
gotCfg := status.Configs.GetConfig(tsSvc)
if gotCfg == nil || !reflect.DeepEqual(wantsCfg, *gotCfg) {
mak.Set(&rulesToAdd, tsSvc, wantsCfg)
}
}
return rulesToAdd
}
// getRulesToDelete takes the desired firewall configuration and the recorded
// status and returns a map of Tailscale Services and rules that need to be deleted.
func (p *ingressProxy) getRulesToDelete(cfgs *ingressservices.Configs, status *ingressservices.Status) map[string]ingressservices.Config {
if status == nil || !p.isCurrentStatus(status) {
return nil
}
var rulesToDelete map[string]ingressservices.Config
for tsSvc, gotCfg := range status.Configs {
if cfgs == nil {
mak.Set(&rulesToDelete, tsSvc, gotCfg)
continue
}
wantsCfg := cfgs.GetConfig(tsSvc)
if wantsCfg != nil && reflect.DeepEqual(*wantsCfg, gotCfg) {
continue
}
mak.Set(&rulesToDelete, tsSvc, gotCfg)
}
return rulesToDelete
}
// ensureIngressRulesAdded takes a map of Tailscale Services and rules and ensures that the firewall rules are added.
func ensureIngressRulesAdded(cfgs map[string]ingressservices.Config, nfr linuxfw.NetfilterRunner) error {
for serviceName, cfg := range cfgs {
if cfg.IPv4Mapping != nil {
if err := addDNATRuleForSvc(nfr, serviceName, cfg.IPv4Mapping.TailscaleServiceIP, cfg.IPv4Mapping.ClusterIP); err != nil {
return fmt.Errorf("error adding ingress rule for %s: %w", serviceName, err)
}
}
if cfg.IPv6Mapping != nil {
if err := addDNATRuleForSvc(nfr, serviceName, cfg.IPv6Mapping.TailscaleServiceIP, cfg.IPv6Mapping.ClusterIP); err != nil {
return fmt.Errorf("error adding ingress rule for %s: %w", serviceName, err)
}
}
}
return nil
}
func addDNATRuleForSvc(nfr linuxfw.NetfilterRunner, serviceName string, tsIP, clusterIP netip.Addr) error {
log.Printf("adding DNAT rule for Tailscale Service %s with IP %s to Kubernetes Service IP %s", serviceName, tsIP, clusterIP)
return nfr.EnsureDNATRuleForSvc(serviceName, tsIP, clusterIP)
}
// ensureIngressRulesDeleted takes a map of Tailscale Services and rules and ensures that the firewall rules are deleted.
func ensureIngressRulesDeleted(cfgs map[string]ingressservices.Config, nfr linuxfw.NetfilterRunner) error {
for serviceName, cfg := range cfgs {
if cfg.IPv4Mapping != nil {
if err := deleteDNATRuleForSvc(nfr, serviceName, cfg.IPv4Mapping.TailscaleServiceIP, cfg.IPv4Mapping.ClusterIP); err != nil {
return fmt.Errorf("error deleting ingress rule for %s: %w", serviceName, err)
}
}
if cfg.IPv6Mapping != nil {
if err := deleteDNATRuleForSvc(nfr, serviceName, cfg.IPv6Mapping.TailscaleServiceIP, cfg.IPv6Mapping.ClusterIP); err != nil {
return fmt.Errorf("error deleting ingress rule for %s: %w", serviceName, err)
}
}
}
return nil
}
func deleteDNATRuleForSvc(nfr linuxfw.NetfilterRunner, serviceName string, tsIP, clusterIP netip.Addr) error {
log.Printf("deleting DNAT rule for Tailscale Service %s with IP %s to Kubernetes Service IP %s", serviceName, tsIP, clusterIP)
return nfr.DeleteDNATRuleForSvc(serviceName, tsIP, clusterIP)
}
// isCurrentStatus returns true if the status of an ingress proxy as read from
// the proxy's state Secret is the status of the current proxy Pod. We use
// Pod's IP addresses to determine that the status is for this Pod.
func (p *ingressProxy) isCurrentStatus(status *ingressservices.Status) bool {
if status == nil {
return true
}
return status.PodIPv4 == p.podIPv4 && status.PodIPv6 == p.podIPv6
}
type ingressProxyOpts struct {
cfgPath string
nfr linuxfw.NetfilterRunner // never nil
kc kubeclient.Client // never nil
stateSecret string
podIPv4 string
podIPv6 string
}
// configure sets the ingress proxy's configuration. It is called once on start
// so we don't care about concurrent access to fields.
func (p *ingressProxy) configure(opts ingressProxyOpts) {
p.cfgPath = opts.cfgPath
p.nfr = opts.nfr
p.kc = opts.kc
p.stateSecret = opts.stateSecret
p.podIPv4 = opts.podIPv4
p.podIPv6 = opts.podIPv6
}
func ingressServicesStatusIsEqual(st, st1 *ingressservices.Configs) bool {
if st == nil && st1 == nil {
return true
}
if st == nil || st1 == nil {
return false
}
return reflect.DeepEqual(*st, *st1)
}

@ -0,0 +1,223 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build linux
package main
import (
"net/netip"
"testing"
"tailscale.com/kube/ingressservices"
"tailscale.com/util/linuxfw"
)
func TestSyncIngressConfigs(t *testing.T) {
tests := []struct {
name string
currentConfigs *ingressservices.Configs
currentStatus *ingressservices.Status
wantServices map[string]struct {
TailscaleServiceIP netip.Addr
ClusterIP netip.Addr
}
}{
{
name: "add_new_rules_when_no_existing_config",
currentConfigs: &ingressservices.Configs{
"svc:foo": makeServiceConfig("100.64.0.1", "10.0.0.1", "", ""),
},
currentStatus: nil,
wantServices: map[string]struct {
TailscaleServiceIP netip.Addr
ClusterIP netip.Addr
}{
"svc:foo": makeWantService("100.64.0.1", "10.0.0.1"),
},
},
{
name: "add_multiple_services",
currentConfigs: &ingressservices.Configs{
"svc:foo": makeServiceConfig("100.64.0.1", "10.0.0.1", "", ""),
"svc:bar": makeServiceConfig("100.64.0.2", "10.0.0.2", "", ""),
"svc:baz": makeServiceConfig("100.64.0.3", "10.0.0.3", "", ""),
},
currentStatus: nil,
wantServices: map[string]struct {
TailscaleServiceIP netip.Addr
ClusterIP netip.Addr
}{
"svc:foo": makeWantService("100.64.0.1", "10.0.0.1"),
"svc:bar": makeWantService("100.64.0.2", "10.0.0.2"),
"svc:baz": makeWantService("100.64.0.3", "10.0.0.3"),
},
},
{
name: "add_both_ipv4_and_ipv6_rules",
currentConfigs: &ingressservices.Configs{
"svc:foo": makeServiceConfig("100.64.0.1", "10.0.0.1", "2001:db8::1", "2001:db8::2"),
},
currentStatus: nil,
wantServices: map[string]struct {
TailscaleServiceIP netip.Addr
ClusterIP netip.Addr
}{
"svc:foo": makeWantService("2001:db8::1", "2001:db8::2"),
},
},
{
name: "add_ipv6_only_rules",
currentConfigs: &ingressservices.Configs{
"svc:ipv6": makeServiceConfig("", "", "2001:db8::10", "2001:db8::20"),
},
currentStatus: nil,
wantServices: map[string]struct {
TailscaleServiceIP netip.Addr
ClusterIP netip.Addr
}{
"svc:ipv6": makeWantService("2001:db8::10", "2001:db8::20"),
},
},
{
name: "delete_all_rules_when_config_removed",
currentConfigs: nil,
currentStatus: &ingressservices.Status{
Configs: ingressservices.Configs{
"svc:foo": makeServiceConfig("100.64.0.1", "10.0.0.1", "", ""),
"svc:bar": makeServiceConfig("100.64.0.2", "10.0.0.2", "", ""),
},
PodIPv4: "10.0.0.2", // Current pod IPv4
PodIPv6: "2001:db8::2", // Current pod IPv6
},
wantServices: map[string]struct {
TailscaleServiceIP netip.Addr
ClusterIP netip.Addr
}{},
},
{
name: "add_remove_modify",
currentConfigs: &ingressservices.Configs{
"svc:foo": makeServiceConfig("100.64.0.1", "10.0.0.2", "", ""), // Changed cluster IP
"svc:new": makeServiceConfig("100.64.0.4", "10.0.0.4", "", ""),
},
currentStatus: &ingressservices.Status{
Configs: ingressservices.Configs{
"svc:foo": makeServiceConfig("100.64.0.1", "10.0.0.1", "", ""),
"svc:bar": makeServiceConfig("100.64.0.2", "10.0.0.2", "", ""),
"svc:baz": makeServiceConfig("100.64.0.3", "10.0.0.3", "", ""),
},
PodIPv4: "10.0.0.2", // Current pod IPv4
PodIPv6: "2001:db8::2", // Current pod IPv6
},
wantServices: map[string]struct {
TailscaleServiceIP netip.Addr
ClusterIP netip.Addr
}{
"svc:foo": makeWantService("100.64.0.1", "10.0.0.2"),
"svc:new": makeWantService("100.64.0.4", "10.0.0.4"),
},
},
{
name: "update_with_outdated_status",
currentConfigs: &ingressservices.Configs{
"svc:web": makeServiceConfig("100.64.0.10", "10.0.0.10", "", ""),
"svc:web-ipv6": {
IPv6Mapping: &ingressservices.Mapping{
TailscaleServiceIP: netip.MustParseAddr("2001:db8::10"),
ClusterIP: netip.MustParseAddr("2001:db8::20"),
},
},
"svc:api": makeServiceConfig("100.64.0.20", "10.0.0.20", "", ""),
},
currentStatus: &ingressservices.Status{
Configs: ingressservices.Configs{
"svc:web": makeServiceConfig("100.64.0.10", "10.0.0.10", "", ""),
"svc:web-ipv6": {
IPv6Mapping: &ingressservices.Mapping{
TailscaleServiceIP: netip.MustParseAddr("2001:db8::10"),
ClusterIP: netip.MustParseAddr("2001:db8::20"),
},
},
"svc:old": makeServiceConfig("100.64.0.30", "10.0.0.30", "", ""),
},
PodIPv4: "10.0.0.1", // Outdated pod IP
PodIPv6: "2001:db8::1", // Outdated pod IP
},
wantServices: map[string]struct {
TailscaleServiceIP netip.Addr
ClusterIP netip.Addr
}{
"svc:web": makeWantService("100.64.0.10", "10.0.0.10"),
"svc:web-ipv6": makeWantService("2001:db8::10", "2001:db8::20"),
"svc:api": makeWantService("100.64.0.20", "10.0.0.20"),
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
var nfr linuxfw.NetfilterRunner = linuxfw.NewFakeNetfilterRunner()
ep := &ingressProxy{
nfr: nfr,
podIPv4: "10.0.0.2", // Current pod IPv4
podIPv6: "2001:db8::2", // Current pod IPv6
}
err := ep.syncIngressConfigs(tt.currentConfigs, tt.currentStatus)
if err != nil {
t.Fatalf("syncIngressConfigs failed: %v", err)
}
fake := nfr.(*linuxfw.FakeNetfilterRunner)
gotServices := fake.GetServiceState()
if len(gotServices) != len(tt.wantServices) {
t.Errorf("got %d services, want %d", len(gotServices), len(tt.wantServices))
}
for svc, want := range tt.wantServices {
got, ok := gotServices[svc]
if !ok {
t.Errorf("service %s not found", svc)
continue
}
if got.TailscaleServiceIP != want.TailscaleServiceIP {
t.Errorf("service %s: got TailscaleServiceIP %v, want %v", svc, got.TailscaleServiceIP, want.TailscaleServiceIP)
}
if got.ClusterIP != want.ClusterIP {
t.Errorf("service %s: got ClusterIP %v, want %v", svc, got.ClusterIP, want.ClusterIP)
}
}
})
}
}
func makeServiceConfig(tsIP, clusterIP string, tsIP6, clusterIP6 string) ingressservices.Config {
cfg := ingressservices.Config{}
if tsIP != "" && clusterIP != "" {
cfg.IPv4Mapping = &ingressservices.Mapping{
TailscaleServiceIP: netip.MustParseAddr(tsIP),
ClusterIP: netip.MustParseAddr(clusterIP),
}
}
if tsIP6 != "" && clusterIP6 != "" {
cfg.IPv6Mapping = &ingressservices.Mapping{
TailscaleServiceIP: netip.MustParseAddr(tsIP6),
ClusterIP: netip.MustParseAddr(clusterIP6),
}
}
return cfg
}
func makeWantService(tsIP, clusterIP string) struct {
TailscaleServiceIP netip.Addr
ClusterIP netip.Addr
} {
return struct {
TailscaleServiceIP netip.Addr
ClusterIP netip.Addr
}{
TailscaleServiceIP: netip.MustParseAddr(tsIP),
ClusterIP: netip.MustParseAddr(clusterIP),
}
}

@ -8,31 +8,67 @@ package main
import ( import (
"context" "context"
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"log" "log"
"net/http" "net/http"
"net/netip" "net/netip"
"os" "os"
"strings"
"time"
"tailscale.com/ipn"
"tailscale.com/kube/egressservices"
"tailscale.com/kube/ingressservices"
"tailscale.com/kube/kubeapi" "tailscale.com/kube/kubeapi"
"tailscale.com/kube/kubeclient" "tailscale.com/kube/kubeclient"
"tailscale.com/kube/kubetypes"
"tailscale.com/tailcfg" "tailscale.com/tailcfg"
"tailscale.com/types/logger"
"tailscale.com/util/backoff"
"tailscale.com/util/set"
) )
// storeDeviceID writes deviceID to 'device_id' data field of the named // kubeClient is a wrapper around Tailscale's internal kube client that knows how to talk to the kube API server. We use
// Kubernetes Secret. // this rather than any of the upstream Kubernetes client libaries to avoid extra imports.
func storeDeviceID(ctx context.Context, secretName string, deviceID tailcfg.StableNodeID) error { type kubeClient struct {
kubeclient.Client
stateSecret string
canPatch bool // whether the client has permissions to patch Kubernetes Secrets
}
func newKubeClient(root string, stateSecret string) (*kubeClient, error) {
if root != "/" {
// If we are running in a test, we need to set the root path to the fake
// service account directory.
kubeclient.SetRootPathForTesting(root)
}
var err error
kc, err := kubeclient.New("tailscale-container")
if err != nil {
return nil, fmt.Errorf("Error creating kube client: %w", err)
}
if (root != "/") || os.Getenv("TS_KUBERNETES_READ_API_SERVER_ADDRESS_FROM_ENV") == "true" {
// Derive the API server address from the environment variables
// Used to set http server in tests, or optionally enabled by flag
kc.SetURL(fmt.Sprintf("https://%s:%s", os.Getenv("KUBERNETES_SERVICE_HOST"), os.Getenv("KUBERNETES_SERVICE_PORT_HTTPS")))
}
return &kubeClient{Client: kc, stateSecret: stateSecret}, nil
}
// storeDeviceID writes deviceID to 'device_id' data field of the client's state Secret.
func (kc *kubeClient) storeDeviceID(ctx context.Context, deviceID tailcfg.StableNodeID) error {
s := &kubeapi.Secret{ s := &kubeapi.Secret{
Data: map[string][]byte{ Data: map[string][]byte{
"device_id": []byte(deviceID), kubetypes.KeyDeviceID: []byte(deviceID),
}, },
} }
return kc.StrategicMergePatchSecret(ctx, secretName, s, "tailscale-container") return kc.StrategicMergePatchSecret(ctx, kc.stateSecret, s, "tailscale-container")
} }
// storeDeviceEndpoints writes device's tailnet IPs and MagicDNS name to fields // storeDeviceEndpoints writes device's tailnet IPs and MagicDNS name to fields 'device_ips', 'device_fqdn' of client's
// 'device_ips', 'device_fqdn' of the named Kubernetes Secret. // state Secret.
func storeDeviceEndpoints(ctx context.Context, secretName string, fqdn string, addresses []netip.Prefix) error { func (kc *kubeClient) storeDeviceEndpoints(ctx context.Context, fqdn string, addresses []netip.Prefix) error {
var ips []string var ips []string
for _, addr := range addresses { for _, addr := range addresses {
ips = append(ips, addr.Addr().String()) ips = append(ips, addr.Addr().String())
@ -44,16 +80,28 @@ func storeDeviceEndpoints(ctx context.Context, secretName string, fqdn string, a
s := &kubeapi.Secret{ s := &kubeapi.Secret{
Data: map[string][]byte{ Data: map[string][]byte{
"device_fqdn": []byte(fqdn), kubetypes.KeyDeviceFQDN: []byte(fqdn),
"device_ips": deviceIPs, kubetypes.KeyDeviceIPs: deviceIPs,
},
}
return kc.StrategicMergePatchSecret(ctx, kc.stateSecret, s, "tailscale-container")
}
// storeHTTPSEndpoint writes an HTTPS endpoint exposed by this device via 'tailscale serve' to the client's state
// Secret. In practice this will be the same value that gets written to 'device_fqdn', but this should only be called
// when the serve config has been successfully set up.
func (kc *kubeClient) storeHTTPSEndpoint(ctx context.Context, ep string) error {
s := &kubeapi.Secret{
Data: map[string][]byte{
kubetypes.KeyHTTPSEndpoint: []byte(ep),
}, },
} }
return kc.StrategicMergePatchSecret(ctx, secretName, s, "tailscale-container") return kc.StrategicMergePatchSecret(ctx, kc.stateSecret, s, "tailscale-container")
} }
// deleteAuthKey deletes the 'authkey' field of the given kube // deleteAuthKey deletes the 'authkey' field of the given kube
// secret. No-op if there is no authkey in the secret. // secret. No-op if there is no authkey in the secret.
func deleteAuthKey(ctx context.Context, secretName string) error { func (kc *kubeClient) deleteAuthKey(ctx context.Context) error {
// m is a JSON Patch data structure, see https://jsonpatch.com/ or RFC 6902. // m is a JSON Patch data structure, see https://jsonpatch.com/ or RFC 6902.
m := []kubeclient.JSONPatch{ m := []kubeclient.JSONPatch{
{ {
@ -61,7 +109,7 @@ func deleteAuthKey(ctx context.Context, secretName string) error {
Path: "/data/authkey", Path: "/data/authkey",
}, },
} }
if err := kc.JSONPatchSecret(ctx, secretName, m); err != nil { if err := kc.JSONPatchResource(ctx, kc.stateSecret, kubeclient.TypeSecrets, m); err != nil {
if s, ok := err.(*kubeapi.Status); ok && s.Code == http.StatusUnprocessableEntity { if s, ok := err.(*kubeapi.Status); ok && s.Code == http.StatusUnprocessableEntity {
// This is kubernetes-ese for "the field you asked to // This is kubernetes-ese for "the field you asked to
// delete already doesn't exist", aka no-op. // delete already doesn't exist", aka no-op.
@ -72,22 +120,100 @@ func deleteAuthKey(ctx context.Context, secretName string) error {
return nil return nil
} }
var kc kubeclient.Client // resetContainerbootState resets state from previous runs of containerboot to
// ensure the operator doesn't use stale state when a Pod is first recreated.
func (kc *kubeClient) resetContainerbootState(ctx context.Context, podUID string) error {
existingSecret, err := kc.GetSecret(ctx, kc.stateSecret)
switch {
case kubeclient.IsNotFoundErr(err):
// In the case that the Secret doesn't exist, we don't have any state to reset and can return early.
return nil
case err != nil:
return fmt.Errorf("failed to read state Secret %q to reset state: %w", kc.stateSecret, err)
}
s := &kubeapi.Secret{
Data: map[string][]byte{
kubetypes.KeyCapVer: fmt.Appendf(nil, "%d", tailcfg.CurrentCapabilityVersion),
},
}
if podUID != "" {
s.Data[kubetypes.KeyPodUID] = []byte(podUID)
}
toClear := set.SetOf([]string{
kubetypes.KeyDeviceID,
kubetypes.KeyDeviceFQDN,
kubetypes.KeyDeviceIPs,
kubetypes.KeyHTTPSEndpoint,
egressservices.KeyEgressServices,
ingressservices.IngressConfigKey,
})
for key := range existingSecret.Data {
if toClear.Contains(key) {
// It's fine to leave the key in place as a debugging breadcrumb,
// it should get a new value soon.
s.Data[key] = nil
}
}
func initKubeClient(root string) { return kc.StrategicMergePatchSecret(ctx, kc.stateSecret, s, "tailscale-container")
if root != "/" { }
// If we are running in a test, we need to set the root path to the fake
// service account directory. // waitForConsistentState waits for tailscaled to finish writing state if it
kubeclient.SetRootPathForTesting(root) // looks like it's started. It is designed to reduce the likelihood that
// tailscaled gets shut down in the window between authenticating to control
// and finishing writing state. However, it's not bullet proof because we can't
// atomically authenticate and write state.
func (kc *kubeClient) waitForConsistentState(ctx context.Context) error {
var logged bool
bo := backoff.NewBackoff("", logger.Discard, 2*time.Second)
for {
select {
case <-ctx.Done():
return ctx.Err()
default:
}
secret, err := kc.GetSecret(ctx, kc.stateSecret)
if ctx.Err() != nil || kubeclient.IsNotFoundErr(err) {
return nil
} }
var err error
kc, err = kubeclient.New()
if err != nil { if err != nil {
log.Fatalf("Error creating kube client: %v", err) return fmt.Errorf("getting Secret %q: %v", kc.stateSecret, err)
}
if hasConsistentState(secret.Data) {
return nil
}
if !logged {
log.Printf("Waiting for tailscaled to finish writing state to Secret %q", kc.stateSecret)
logged = true
}
bo.BackOff(ctx, errors.New("")) // Fake error to trigger actual sleep.
} }
if (root != "/") || os.Getenv("TS_KUBERNETES_READ_API_SERVER_ADDRESS_FROM_ENV") == "true" {
// Derive the API server address from the environment variables
// Used to set http server in tests, or optionally enabled by flag
kc.SetURL(fmt.Sprintf("https://%s:%s", os.Getenv("KUBERNETES_SERVICE_HOST"), os.Getenv("KUBERNETES_SERVICE_PORT_HTTPS")))
} }
// hasConsistentState returns true is there is either no state or the full set
// of expected keys are present.
func hasConsistentState(d map[string][]byte) bool {
var (
_, hasCurrent = d[string(ipn.CurrentProfileStateKey)]
_, hasKnown = d[string(ipn.KnownProfilesStateKey)]
_, hasMachine = d[string(ipn.MachineKeyStateKey)]
hasProfile bool
)
for k := range d {
if strings.HasPrefix(k, "profile-") {
if hasProfile {
return false // We only expect one profile.
}
hasProfile = true
}
}
// Approximate check, we don't want to reimplement all of profileManager.
return (hasCurrent && hasKnown && hasMachine && hasProfile) ||
(!hasCurrent && !hasKnown && !hasMachine && !hasProfile)
} }

@ -8,11 +8,18 @@ package main
import ( import (
"context" "context"
"errors" "errors"
"fmt"
"testing" "testing"
"time"
"github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp"
"tailscale.com/ipn"
"tailscale.com/kube/egressservices"
"tailscale.com/kube/ingressservices"
"tailscale.com/kube/kubeapi" "tailscale.com/kube/kubeapi"
"tailscale.com/kube/kubeclient" "tailscale.com/kube/kubeclient"
"tailscale.com/kube/kubetypes"
"tailscale.com/tailcfg"
) )
func TestSetupKube(t *testing.T) { func TestSetupKube(t *testing.T) {
@ -21,7 +28,7 @@ func TestSetupKube(t *testing.T) {
cfg *settings cfg *settings
wantErr bool wantErr bool
wantCfg *settings wantCfg *settings
kc kubeclient.Client kc *kubeClient
}{ }{
{ {
name: "TS_AUTHKEY set, state Secret exists", name: "TS_AUTHKEY set, state Secret exists",
@ -29,14 +36,14 @@ func TestSetupKube(t *testing.T) {
AuthKey: "foo", AuthKey: "foo",
KubeSecret: "foo", KubeSecret: "foo",
}, },
kc: &kubeclient.FakeClient{ kc: &kubeClient{stateSecret: "foo", Client: &kubeclient.FakeClient{
CheckSecretPermissionsImpl: func(context.Context, string) (bool, bool, error) { CheckSecretPermissionsImpl: func(context.Context, string) (bool, bool, error) {
return false, false, nil return false, false, nil
}, },
GetSecretImpl: func(context.Context, string) (*kubeapi.Secret, error) { GetSecretImpl: func(context.Context, string) (*kubeapi.Secret, error) {
return nil, nil return nil, nil
}, },
}, }},
wantCfg: &settings{ wantCfg: &settings{
AuthKey: "foo", AuthKey: "foo",
KubeSecret: "foo", KubeSecret: "foo",
@ -48,14 +55,14 @@ func TestSetupKube(t *testing.T) {
AuthKey: "foo", AuthKey: "foo",
KubeSecret: "foo", KubeSecret: "foo",
}, },
kc: &kubeclient.FakeClient{ kc: &kubeClient{stateSecret: "foo", Client: &kubeclient.FakeClient{
CheckSecretPermissionsImpl: func(context.Context, string) (bool, bool, error) { CheckSecretPermissionsImpl: func(context.Context, string) (bool, bool, error) {
return false, true, nil return false, true, nil
}, },
GetSecretImpl: func(context.Context, string) (*kubeapi.Secret, error) { GetSecretImpl: func(context.Context, string) (*kubeapi.Secret, error) {
return nil, &kubeapi.Status{Code: 404} return nil, &kubeapi.Status{Code: 404}
}, },
}, }},
wantCfg: &settings{ wantCfg: &settings{
AuthKey: "foo", AuthKey: "foo",
KubeSecret: "foo", KubeSecret: "foo",
@ -67,14 +74,14 @@ func TestSetupKube(t *testing.T) {
AuthKey: "foo", AuthKey: "foo",
KubeSecret: "foo", KubeSecret: "foo",
}, },
kc: &kubeclient.FakeClient{ kc: &kubeClient{stateSecret: "foo", Client: &kubeclient.FakeClient{
CheckSecretPermissionsImpl: func(context.Context, string) (bool, bool, error) { CheckSecretPermissionsImpl: func(context.Context, string) (bool, bool, error) {
return false, false, nil return false, false, nil
}, },
GetSecretImpl: func(context.Context, string) (*kubeapi.Secret, error) { GetSecretImpl: func(context.Context, string) (*kubeapi.Secret, error) {
return nil, &kubeapi.Status{Code: 404} return nil, &kubeapi.Status{Code: 404}
}, },
}, }},
wantCfg: &settings{ wantCfg: &settings{
AuthKey: "foo", AuthKey: "foo",
KubeSecret: "foo", KubeSecret: "foo",
@ -87,14 +94,14 @@ func TestSetupKube(t *testing.T) {
AuthKey: "foo", AuthKey: "foo",
KubeSecret: "foo", KubeSecret: "foo",
}, },
kc: &kubeclient.FakeClient{ kc: &kubeClient{stateSecret: "foo", Client: &kubeclient.FakeClient{
CheckSecretPermissionsImpl: func(context.Context, string) (bool, bool, error) { CheckSecretPermissionsImpl: func(context.Context, string) (bool, bool, error) {
return false, false, nil return false, false, nil
}, },
GetSecretImpl: func(context.Context, string) (*kubeapi.Secret, error) { GetSecretImpl: func(context.Context, string) (*kubeapi.Secret, error) {
return nil, &kubeapi.Status{Code: 403} return nil, &kubeapi.Status{Code: 403}
}, },
}, }},
wantCfg: &settings{ wantCfg: &settings{
AuthKey: "foo", AuthKey: "foo",
KubeSecret: "foo", KubeSecret: "foo",
@ -111,11 +118,11 @@ func TestSetupKube(t *testing.T) {
AuthKey: "foo", AuthKey: "foo",
KubeSecret: "foo", KubeSecret: "foo",
}, },
kc: &kubeclient.FakeClient{ kc: &kubeClient{stateSecret: "foo", Client: &kubeclient.FakeClient{
CheckSecretPermissionsImpl: func(context.Context, string) (bool, bool, error) { CheckSecretPermissionsImpl: func(context.Context, string) (bool, bool, error) {
return false, false, errors.New("broken") return false, false, errors.New("broken")
}, },
}, }},
wantErr: true, wantErr: true,
}, },
{ {
@ -127,14 +134,14 @@ func TestSetupKube(t *testing.T) {
wantCfg: &settings{ wantCfg: &settings{
KubeSecret: "foo", KubeSecret: "foo",
}, },
kc: &kubeclient.FakeClient{ kc: &kubeClient{stateSecret: "foo", Client: &kubeclient.FakeClient{
CheckSecretPermissionsImpl: func(context.Context, string) (bool, bool, error) { CheckSecretPermissionsImpl: func(context.Context, string) (bool, bool, error) {
return false, true, nil return false, true, nil
}, },
GetSecretImpl: func(context.Context, string) (*kubeapi.Secret, error) { GetSecretImpl: func(context.Context, string) (*kubeapi.Secret, error) {
return nil, &kubeapi.Status{Code: 404} return nil, &kubeapi.Status{Code: 404}
}, },
}, }},
}, },
{ {
// Interactive login using URL in Pod logs // Interactive login using URL in Pod logs
@ -145,28 +152,28 @@ func TestSetupKube(t *testing.T) {
wantCfg: &settings{ wantCfg: &settings{
KubeSecret: "foo", KubeSecret: "foo",
}, },
kc: &kubeclient.FakeClient{ kc: &kubeClient{stateSecret: "foo", Client: &kubeclient.FakeClient{
CheckSecretPermissionsImpl: func(context.Context, string) (bool, bool, error) { CheckSecretPermissionsImpl: func(context.Context, string) (bool, bool, error) {
return false, false, nil return false, false, nil
}, },
GetSecretImpl: func(context.Context, string) (*kubeapi.Secret, error) { GetSecretImpl: func(context.Context, string) (*kubeapi.Secret, error) {
return &kubeapi.Secret{}, nil return &kubeapi.Secret{}, nil
}, },
}, }},
}, },
{ {
name: "TS_AUTHKEY not set, state Secret contains auth key, we do not have RBAC to patch it", name: "TS_AUTHKEY not set, state Secret contains auth key, we do not have RBAC to patch it",
cfg: &settings{ cfg: &settings{
KubeSecret: "foo", KubeSecret: "foo",
}, },
kc: &kubeclient.FakeClient{ kc: &kubeClient{stateSecret: "foo", Client: &kubeclient.FakeClient{
CheckSecretPermissionsImpl: func(context.Context, string) (bool, bool, error) { CheckSecretPermissionsImpl: func(context.Context, string) (bool, bool, error) {
return false, false, nil return false, false, nil
}, },
GetSecretImpl: func(context.Context, string) (*kubeapi.Secret, error) { GetSecretImpl: func(context.Context, string) (*kubeapi.Secret, error) {
return &kubeapi.Secret{Data: map[string][]byte{"authkey": []byte("foo")}}, nil return &kubeapi.Secret{Data: map[string][]byte{"authkey": []byte("foo")}}, nil
}, },
}, }},
wantCfg: &settings{ wantCfg: &settings{
KubeSecret: "foo", KubeSecret: "foo",
}, },
@ -177,14 +184,14 @@ func TestSetupKube(t *testing.T) {
cfg: &settings{ cfg: &settings{
KubeSecret: "foo", KubeSecret: "foo",
}, },
kc: &kubeclient.FakeClient{ kc: &kubeClient{stateSecret: "foo", Client: &kubeclient.FakeClient{
CheckSecretPermissionsImpl: func(context.Context, string) (bool, bool, error) { CheckSecretPermissionsImpl: func(context.Context, string) (bool, bool, error) {
return true, false, nil return true, false, nil
}, },
GetSecretImpl: func(context.Context, string) (*kubeapi.Secret, error) { GetSecretImpl: func(context.Context, string) (*kubeapi.Secret, error) {
return &kubeapi.Secret{Data: map[string][]byte{"authkey": []byte("foo")}}, nil return &kubeapi.Secret{Data: map[string][]byte{"authkey": []byte("foo")}}, nil
}, },
}, }},
wantCfg: &settings{ wantCfg: &settings{
KubeSecret: "foo", KubeSecret: "foo",
AuthKey: "foo", AuthKey: "foo",
@ -194,9 +201,9 @@ func TestSetupKube(t *testing.T) {
} }
for _, tt := range tests { for _, tt := range tests {
kc = tt.kc kc := tt.kc
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
if err := tt.cfg.setupKube(context.Background()); (err != nil) != tt.wantErr { if err := tt.cfg.setupKube(context.Background(), kc); (err != nil) != tt.wantErr {
t.Errorf("settings.setupKube() error = %v, wantErr %v", err, tt.wantErr) t.Errorf("settings.setupKube() error = %v, wantErr %v", err, tt.wantErr)
} }
if diff := cmp.Diff(*tt.cfg, *tt.wantCfg); diff != "" { if diff := cmp.Diff(*tt.cfg, *tt.wantCfg); diff != "" {
@ -205,3 +212,109 @@ func TestSetupKube(t *testing.T) {
}) })
} }
} }
func TestWaitForConsistentState(t *testing.T) {
data := map[string][]byte{
// Missing _current-profile.
string(ipn.KnownProfilesStateKey): []byte(""),
string(ipn.MachineKeyStateKey): []byte(""),
"profile-foo": []byte(""),
}
kc := &kubeClient{
Client: &kubeclient.FakeClient{
GetSecretImpl: func(context.Context, string) (*kubeapi.Secret, error) {
return &kubeapi.Secret{
Data: data,
}, nil
},
},
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
if err := kc.waitForConsistentState(ctx); err != context.DeadlineExceeded {
t.Fatalf("expected DeadlineExceeded, got %v", err)
}
ctx, cancel = context.WithTimeout(context.Background(), time.Second)
defer cancel()
data[string(ipn.CurrentProfileStateKey)] = []byte("")
if err := kc.waitForConsistentState(ctx); err != nil {
t.Fatalf("expected nil, got %v", err)
}
}
func TestResetContainerbootState(t *testing.T) {
capver := fmt.Appendf(nil, "%d", tailcfg.CurrentCapabilityVersion)
for name, tc := range map[string]struct {
podUID string
initial map[string][]byte
expected map[string][]byte
}{
"empty_initial": {
podUID: "1234",
initial: map[string][]byte{},
expected: map[string][]byte{
kubetypes.KeyCapVer: capver,
kubetypes.KeyPodUID: []byte("1234"),
},
},
"empty_initial_no_pod_uid": {
initial: map[string][]byte{},
expected: map[string][]byte{
kubetypes.KeyCapVer: capver,
},
},
"only_relevant_keys_updated": {
podUID: "1234",
initial: map[string][]byte{
kubetypes.KeyCapVer: []byte("1"),
kubetypes.KeyPodUID: []byte("5678"),
kubetypes.KeyDeviceID: []byte("device-id"),
kubetypes.KeyDeviceFQDN: []byte("device-fqdn"),
kubetypes.KeyDeviceIPs: []byte(`["192.0.2.1"]`),
kubetypes.KeyHTTPSEndpoint: []byte("https://example.com"),
egressservices.KeyEgressServices: []byte("egress-services"),
ingressservices.IngressConfigKey: []byte("ingress-config"),
"_current-profile": []byte("current-profile"),
"_machinekey": []byte("machine-key"),
"_profiles": []byte("profiles"),
"_serve_e0ce": []byte("serve-e0ce"),
"profile-e0ce": []byte("profile-e0ce"),
},
expected: map[string][]byte{
kubetypes.KeyCapVer: capver,
kubetypes.KeyPodUID: []byte("1234"),
// Cleared keys.
kubetypes.KeyDeviceID: nil,
kubetypes.KeyDeviceFQDN: nil,
kubetypes.KeyDeviceIPs: nil,
kubetypes.KeyHTTPSEndpoint: nil,
egressservices.KeyEgressServices: nil,
ingressservices.IngressConfigKey: nil,
// Tailscaled keys not included in patch.
},
},
} {
t.Run(name, func(t *testing.T) {
var actual map[string][]byte
kc := &kubeClient{stateSecret: "foo", Client: &kubeclient.FakeClient{
GetSecretImpl: func(context.Context, string) (*kubeapi.Secret, error) {
return &kubeapi.Secret{
Data: tc.initial,
}, nil
},
StrategicMergePatchSecretImpl: func(ctx context.Context, name string, secret *kubeapi.Secret, _ string) error {
actual = secret.Data
return nil
},
}}
if err := kc.resetContainerbootState(context.Background(), tc.podUID); err != nil {
t.Fatalf("resetContainerbootState() error = %v", err)
}
if diff := cmp.Diff(tc.expected, actual); diff != "" {
t.Errorf("resetContainerbootState() mismatch (-want +got):\n%s", diff)
}
})
}
}

@ -52,11 +52,17 @@
// ${TS_CERT_DOMAIN}, it will be replaced with the value of the available FQDN. // ${TS_CERT_DOMAIN}, it will be replaced with the value of the available FQDN.
// It cannot be used in conjunction with TS_DEST_IP. The file is watched for changes, // It cannot be used in conjunction with TS_DEST_IP. The file is watched for changes,
// and will be re-applied when it changes. // and will be re-applied when it changes.
// - TS_HEALTHCHECK_ADDR_PORT: if specified, an HTTP health endpoint will be // - TS_HEALTHCHECK_ADDR_PORT: deprecated, use TS_ENABLE_HEALTH_CHECK instead and optionally
// served at /healthz at the provided address, which should be in form [<address>]:<port>. // set TS_LOCAL_ADDR_PORT. Will be removed in 1.82.0.
// If not set, no health check will be run. If set to :<port>, addr will default to 0.0.0.0 // - TS_LOCAL_ADDR_PORT: the address and port to serve local metrics and health
// The health endpoint will return 200 OK if this node has at least one tailnet IP address, // check endpoints if enabled via TS_ENABLE_METRICS and/or TS_ENABLE_HEALTH_CHECK.
// otherwise returns 503. // Defaults to [::]:9002, serving on all available interfaces.
// - TS_ENABLE_METRICS: if true, a metrics endpoint will be served at /metrics on
// the address specified by TS_LOCAL_ADDR_PORT. See https://tailscale.com/kb/1482/client-metrics
// for more information on the metrics exposed.
// - TS_ENABLE_HEALTH_CHECK: if true, a health check endpoint will be served at /healthz on
// the address specified by TS_LOCAL_ADDR_PORT. The health endpoint will return 200
// OK if this node has at least one tailnet IP address, otherwise returns 503.
// NB: the health criteria might change in the future. // NB: the health criteria might change in the future.
// - TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR: if specified, a path to a // - TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR: if specified, a path to a
// directory that containers tailscaled config in file. The config file needs to be // directory that containers tailscaled config in file. The config file needs to be
@ -99,10 +105,10 @@ import (
"log" "log"
"math" "math"
"net" "net"
"net/http"
"net/netip" "net/netip"
"os" "os"
"os/signal" "os/signal"
"path"
"path/filepath" "path/filepath"
"slices" "slices"
"strings" "strings"
@ -115,6 +121,10 @@ import (
"tailscale.com/client/tailscale" "tailscale.com/client/tailscale"
"tailscale.com/ipn" "tailscale.com/ipn"
kubeutils "tailscale.com/k8s-operator" kubeutils "tailscale.com/k8s-operator"
healthz "tailscale.com/kube/health"
"tailscale.com/kube/kubetypes"
"tailscale.com/kube/metrics"
"tailscale.com/kube/services"
"tailscale.com/tailcfg" "tailscale.com/tailcfg"
"tailscale.com/types/logger" "tailscale.com/types/logger"
"tailscale.com/types/ptr" "tailscale.com/types/ptr"
@ -130,82 +140,134 @@ func newNetfilterRunner(logf logger.Logf) (linuxfw.NetfilterRunner, error) {
} }
func main() { func main() {
if err := run(); err != nil && !errors.Is(err, context.Canceled) {
log.Fatal(err)
}
}
func run() error {
log.SetPrefix("boot: ") log.SetPrefix("boot: ")
tailscale.I_Acknowledge_This_API_Is_Unstable = true tailscale.I_Acknowledge_This_API_Is_Unstable = true
cfg := &settings{
AuthKey: defaultEnvs([]string{"TS_AUTHKEY", "TS_AUTH_KEY"}, ""), cfg, err := configFromEnv()
Hostname: defaultEnv("TS_HOSTNAME", ""), if err != nil {
Routes: defaultEnvStringPointer("TS_ROUTES"), return fmt.Errorf("invalid configuration: %w", err)
ServeConfigPath: defaultEnv("TS_SERVE_CONFIG", ""),
ProxyTargetIP: defaultEnv("TS_DEST_IP", ""),
ProxyTargetDNSName: defaultEnv("TS_EXPERIMENTAL_DEST_DNS_NAME", ""),
TailnetTargetIP: defaultEnv("TS_TAILNET_TARGET_IP", ""),
TailnetTargetFQDN: defaultEnv("TS_TAILNET_TARGET_FQDN", ""),
DaemonExtraArgs: defaultEnv("TS_TAILSCALED_EXTRA_ARGS", ""),
ExtraArgs: defaultEnv("TS_EXTRA_ARGS", ""),
InKubernetes: os.Getenv("KUBERNETES_SERVICE_HOST") != "",
UserspaceMode: defaultBool("TS_USERSPACE", true),
StateDir: defaultEnv("TS_STATE_DIR", ""),
AcceptDNS: defaultEnvBoolPointer("TS_ACCEPT_DNS"),
KubeSecret: defaultEnv("TS_KUBE_SECRET", "tailscale"),
SOCKSProxyAddr: defaultEnv("TS_SOCKS5_SERVER", ""),
HTTPProxyAddr: defaultEnv("TS_OUTBOUND_HTTP_PROXY_LISTEN", ""),
Socket: defaultEnv("TS_SOCKET", "/tmp/tailscaled.sock"),
AuthOnce: defaultBool("TS_AUTH_ONCE", false),
Root: defaultEnv("TS_TEST_ONLY_ROOT", "/"),
TailscaledConfigFilePath: tailscaledConfigFilePath(),
AllowProxyingClusterTrafficViaIngress: defaultBool("EXPERIMENTAL_ALLOW_PROXYING_CLUSTER_TRAFFIC_VIA_INGRESS", false),
PodIP: defaultEnv("POD_IP", ""),
EnableForwardingOptimizations: defaultBool("TS_EXPERIMENTAL_ENABLE_FORWARDING_OPTIMIZATIONS", false),
HealthCheckAddrPort: defaultEnv("TS_HEALTHCHECK_ADDR_PORT", ""),
EgressSvcsCfgPath: defaultEnv("TS_EGRESS_SERVICES_CONFIG_PATH", ""),
}
if err := cfg.validate(); err != nil {
log.Fatalf("invalid configuration: %v", err)
} }
if !cfg.UserspaceMode { if !cfg.UserspaceMode {
if err := ensureTunFile(cfg.Root); err != nil { if err := ensureTunFile(cfg.Root); err != nil {
log.Fatalf("Unable to create tuntap device file: %v", err) return fmt.Errorf("unable to create tuntap device file: %w", err)
} }
if cfg.ProxyTargetIP != "" || cfg.ProxyTargetDNSName != "" || cfg.Routes != nil || cfg.TailnetTargetIP != "" || cfg.TailnetTargetFQDN != "" { if cfg.ProxyTargetIP != "" || cfg.ProxyTargetDNSName != "" || cfg.Routes != nil || cfg.TailnetTargetIP != "" || cfg.TailnetTargetFQDN != "" {
if err := ensureIPForwarding(cfg.Root, cfg.ProxyTargetIP, cfg.TailnetTargetIP, cfg.TailnetTargetFQDN, cfg.Routes); err != nil { if err := ensureIPForwarding(cfg.Root, cfg.ProxyTargetIP, cfg.TailnetTargetIP, cfg.TailnetTargetFQDN, cfg.Routes); err != nil {
log.Printf("Failed to enable IP forwarding: %v", err) log.Printf("Failed to enable IP forwarding: %v", err)
log.Printf("To run tailscale as a proxy or router container, IP forwarding must be enabled.") log.Printf("To run tailscale as a proxy or router container, IP forwarding must be enabled.")
if cfg.InKubernetes { if cfg.InKubernetes {
log.Fatalf("You can either set the sysctls as a privileged initContainer, or run the tailscale container with privileged=true.") return fmt.Errorf("you can either set the sysctls as a privileged initContainer, or run the tailscale container with privileged=true.")
} else { } else {
log.Fatalf("You can fix this by running the container with privileged=true, or the equivalent in your container runtime that permits access to sysctls.") return fmt.Errorf("you can fix this by running the container with privileged=true, or the equivalent in your container runtime that permits access to sysctls.")
} }
} }
} }
} }
// Context is used for all setup stuff until we're in steady // Root context for the whole containerboot process, used to make sure
// shutdown signals are promptly and cleanly handled.
ctx, cancel := contextWithExitSignalWatch()
defer cancel()
// bootCtx is used for all setup stuff until we're in steady
// state, so that if something is hanging we eventually time out // state, so that if something is hanging we eventually time out
// and crashloop the container. // and crashloop the container.
bootCtx, cancel := context.WithTimeout(context.Background(), 60*time.Second) bootCtx, cancel := context.WithTimeout(ctx, 60*time.Second)
defer cancel() defer cancel()
var kc *kubeClient
if cfg.InKubernetes { if cfg.InKubernetes {
initKubeClient(cfg.Root) kc, err = newKubeClient(cfg.Root, cfg.KubeSecret)
if err := cfg.setupKube(bootCtx); err != nil { if err != nil {
log.Fatalf("error setting up for running on Kubernetes: %v", err) return fmt.Errorf("error initializing kube client: %w", err)
}
if err := cfg.setupKube(bootCtx, kc); err != nil {
return fmt.Errorf("error setting up for running on Kubernetes: %w", err)
}
// Clear out any state from previous runs of containerboot. Check
// hasKubeStateStore because although we know we're in kube, that
// doesn't guarantee the state store is properly configured.
if hasKubeStateStore(cfg) {
if err := kc.resetContainerbootState(bootCtx, cfg.PodUID); err != nil {
return fmt.Errorf("error clearing previous state from Secret: %w", err)
}
} }
} }
client, daemonProcess, err := startTailscaled(bootCtx, cfg) client, daemonProcess, err := startTailscaled(bootCtx, cfg)
if err != nil { if err != nil {
log.Fatalf("failed to bring up tailscale: %v", err) return fmt.Errorf("failed to bring up tailscale: %w", err)
} }
killTailscaled := func() { killTailscaled := func() {
// The default termination grace period for a Pod is 30s. We wait 25s at
// most so that we still reserve some of that budget for tailscaled
// to receive and react to a SIGTERM before the SIGKILL that k8s
// will send at the end of the grace period.
ctx, cancel := context.WithTimeout(context.Background(), 25*time.Second)
defer cancel()
if err := services.EnsureServicesNotAdvertised(ctx, client, log.Printf); err != nil {
log.Printf("Error ensuring services are not advertised: %v", err)
}
if hasKubeStateStore(cfg) {
// Check we're not shutting tailscaled down while it's still writing
// state. If we authenticate and fail to write all the state, we'll
// never recover automatically.
log.Printf("Checking for consistent state")
err := kc.waitForConsistentState(ctx)
if err != nil {
log.Printf("Error waiting for consistent state on shutdown: %v", err)
}
}
log.Printf("Sending SIGTERM to tailscaled")
if err := daemonProcess.Signal(unix.SIGTERM); err != nil { if err := daemonProcess.Signal(unix.SIGTERM); err != nil {
log.Fatalf("error shutting tailscaled down: %v", err) log.Fatalf("error shutting tailscaled down: %v", err)
} }
} }
defer killTailscaled() defer killTailscaled()
var healthCheck *healthz.Healthz
ep := &egressProxy{}
if cfg.HealthCheckAddrPort != "" {
mux := http.NewServeMux()
log.Printf("Running healthcheck endpoint at %s/healthz", cfg.HealthCheckAddrPort)
healthCheck = healthz.RegisterHealthHandlers(mux, cfg.PodIPv4, log.Printf)
close := runHTTPServer(mux, cfg.HealthCheckAddrPort)
defer close()
}
if cfg.localMetricsEnabled() || cfg.localHealthEnabled() || cfg.egressSvcsTerminateEPEnabled() {
mux := http.NewServeMux()
if cfg.localMetricsEnabled() {
log.Printf("Running metrics endpoint at %s/metrics", cfg.LocalAddrPort)
metrics.RegisterMetricsHandlers(mux, client, cfg.DebugAddrPort)
}
if cfg.localHealthEnabled() {
log.Printf("Running healthcheck endpoint at %s/healthz", cfg.LocalAddrPort)
healthCheck = healthz.RegisterHealthHandlers(mux, cfg.PodIPv4, log.Printf)
}
if cfg.egressSvcsTerminateEPEnabled() {
log.Printf("Running egress preshutdown hook at %s%s", cfg.LocalAddrPort, kubetypes.EgessServicesPreshutdownEP)
ep.registerHandlers(mux)
}
close := runHTTPServer(mux, cfg.LocalAddrPort)
defer close()
}
if cfg.EnableForwardingOptimizations { if cfg.EnableForwardingOptimizations {
if err := client.SetUDPGROForwarding(bootCtx); err != nil { if err := client.SetUDPGROForwarding(bootCtx); err != nil {
log.Printf("[unexpected] error enabling UDP GRO forwarding: %v", err) log.Printf("[unexpected] error enabling UDP GRO forwarding: %v", err)
@ -214,7 +276,7 @@ func main() {
w, err := client.WatchIPNBus(bootCtx, ipn.NotifyInitialNetMap|ipn.NotifyInitialPrefs|ipn.NotifyInitialState) w, err := client.WatchIPNBus(bootCtx, ipn.NotifyInitialNetMap|ipn.NotifyInitialPrefs|ipn.NotifyInitialState)
if err != nil { if err != nil {
log.Fatalf("failed to watch tailscaled for updates: %v", err) return fmt.Errorf("failed to watch tailscaled for updates: %w", err)
} }
// Now that we've started tailscaled, we can symlink the socket to the // Now that we've started tailscaled, we can symlink the socket to the
@ -250,18 +312,18 @@ func main() {
didLogin = true didLogin = true
w.Close() w.Close()
if err := tailscaleUp(bootCtx, cfg); err != nil { if err := tailscaleUp(bootCtx, cfg); err != nil {
return fmt.Errorf("failed to auth tailscale: %v", err) return fmt.Errorf("failed to auth tailscale: %w", err)
} }
w, err = client.WatchIPNBus(bootCtx, ipn.NotifyInitialNetMap|ipn.NotifyInitialState) w, err = client.WatchIPNBus(bootCtx, ipn.NotifyInitialNetMap|ipn.NotifyInitialState)
if err != nil { if err != nil {
return fmt.Errorf("rewatching tailscaled for updates after auth: %v", err) return fmt.Errorf("rewatching tailscaled for updates after auth: %w", err)
} }
return nil return nil
} }
if isTwoStepConfigAlwaysAuth(cfg) { if isTwoStepConfigAlwaysAuth(cfg) {
if err := authTailscale(); err != nil { if err := authTailscale(); err != nil {
log.Fatalf("failed to auth tailscale: %v", err) return fmt.Errorf("failed to auth tailscale: %w", err)
} }
} }
@ -269,7 +331,7 @@ authLoop:
for { for {
n, err := w.Next() n, err := w.Next()
if err != nil { if err != nil {
log.Fatalf("failed to read from tailscaled: %v", err) return fmt.Errorf("failed to read from tailscaled: %w", err)
} }
if n.State != nil { if n.State != nil {
@ -278,10 +340,10 @@ authLoop:
if isOneStepConfig(cfg) { if isOneStepConfig(cfg) {
// This could happen if this is the first time tailscaled was run for this // This could happen if this is the first time tailscaled was run for this
// device and the auth key was not passed via the configfile. // device and the auth key was not passed via the configfile.
log.Fatalf("invalid state: tailscaled daemon started with a config file, but tailscale is not logged in: ensure you pass a valid auth key in the config file.") return fmt.Errorf("invalid state: tailscaled daemon started with a config file, but tailscale is not logged in: ensure you pass a valid auth key in the config file.")
} }
if err := authTailscale(); err != nil { if err := authTailscale(); err != nil {
log.Fatalf("failed to auth tailscale: %v", err) return fmt.Errorf("failed to auth tailscale: %w", err)
} }
case ipn.NeedsMachineAuth: case ipn.NeedsMachineAuth:
log.Printf("machine authorization required, please visit the admin panel") log.Printf("machine authorization required, please visit the admin panel")
@ -301,22 +363,20 @@ authLoop:
w.Close() w.Close()
ctx, cancel := contextWithExitSignalWatch()
defer cancel()
if isTwoStepConfigAuthOnce(cfg) { if isTwoStepConfigAuthOnce(cfg) {
// Now that we are authenticated, we can set/reset any of the // Now that we are authenticated, we can set/reset any of the
// settings that we need to. // settings that we need to.
if err := tailscaleSet(ctx, cfg); err != nil { if err := tailscaleSet(ctx, cfg); err != nil {
log.Fatalf("failed to auth tailscale: %v", err) return fmt.Errorf("failed to auth tailscale: %w", err)
} }
} }
if cfg.ServeConfigPath != "" { // Remove any serve config and advertised HTTPS endpoint that may have been set by a previous run of
// Remove any serve config that may have been set by a previous run of
// containerboot, but only if we're providing a new one. // containerboot, but only if we're providing a new one.
if cfg.ServeConfigPath != "" {
log.Printf("serve proxy: unsetting previous config")
if err := client.SetServeConfig(ctx, new(ipn.ServeConfig)); err != nil { if err := client.SetServeConfig(ctx, new(ipn.ServeConfig)); err != nil {
log.Fatalf("failed to unset serve config: %v", err) return fmt.Errorf("failed to unset serve config: %w", err)
} }
} }
@ -325,14 +385,20 @@ authLoop:
// authkey is no longer needed. We don't strictly need to // authkey is no longer needed. We don't strictly need to
// wipe it, but it's good hygiene. // wipe it, but it's good hygiene.
log.Printf("Deleting authkey from kube secret") log.Printf("Deleting authkey from kube secret")
if err := deleteAuthKey(ctx, cfg.KubeSecret); err != nil { if err := kc.deleteAuthKey(ctx); err != nil {
log.Fatalf("deleting authkey from kube secret: %v", err) return fmt.Errorf("deleting authkey from kube secret: %w", err)
} }
} }
w, err = client.WatchIPNBus(ctx, ipn.NotifyInitialNetMap|ipn.NotifyInitialState) w, err = client.WatchIPNBus(ctx, ipn.NotifyInitialNetMap|ipn.NotifyInitialState)
if err != nil { if err != nil {
log.Fatalf("rewatching tailscaled for updates after auth: %v", err) return fmt.Errorf("rewatching tailscaled for updates after auth: %w", err)
}
// If tailscaled config was read from a mounted file, watch the file for updates and reload.
cfgWatchErrChan := make(chan error)
if cfg.TailscaledConfigFilePath != "" {
go watchTailscaledConfigChanges(ctx, cfg.TailscaledConfigFilePath, client, cfgWatchErrChan)
} }
var ( var (
@ -349,17 +415,14 @@ authLoop:
certDomain = new(atomic.Pointer[string]) certDomain = new(atomic.Pointer[string])
certDomainChanged = make(chan bool, 1) certDomainChanged = make(chan bool, 1)
h = &healthz{} // http server for the healthz endpoint triggerWatchServeConfigChanges sync.Once
healthzRunner = sync.OnceFunc(func() { runHealthz(cfg.HealthCheckAddrPort, h) })
) )
if cfg.ServeConfigPath != "" {
go watchServeConfigChanges(ctx, cfg.ServeConfigPath, certDomainChanged, certDomain, client)
}
var nfr linuxfw.NetfilterRunner var nfr linuxfw.NetfilterRunner
if isL3Proxy(cfg) { if isL3Proxy(cfg) {
nfr, err = newNetfilterRunner(log.Printf) nfr, err = newNetfilterRunner(log.Printf)
if err != nil { if err != nil {
log.Fatalf("error creating new netfilter runner: %v", err) return fmt.Errorf("error creating new netfilter runner: %w", err)
} }
} }
@ -377,7 +440,8 @@ authLoop:
) )
// egressSvcsErrorChan will get an error sent to it if this containerboot instance is configured to expose 1+ // egressSvcsErrorChan will get an error sent to it if this containerboot instance is configured to expose 1+
// egress services in HA mode and errored. // egress services in HA mode and errored.
var egressSvcsErrorChan = make(chan error) egressSvcsErrorChan := make(chan error)
ingressSvcsErrorChan := make(chan error)
defer t.Stop() defer t.Stop()
// resetTimer resets timer for when to next attempt to resolve the DNS // resetTimer resets timer for when to next attempt to resolve the DNS
// name for the proxy configured with TS_EXPERIMENTAL_DEST_DNS_NAME. The // name for the proxy configured with TS_EXPERIMENTAL_DEST_DNS_NAME. The
@ -430,7 +494,9 @@ runLoop:
killTailscaled() killTailscaled()
break runLoop break runLoop
case err := <-errChan: case err := <-errChan:
log.Fatalf("failed to read from tailscaled: %v", err) return fmt.Errorf("failed to read from tailscaled: %w", err)
case err := <-cfgWatchErrChan:
return fmt.Errorf("failed to watch tailscaled config: %w", err)
case n := <-notifyChan: case n := <-notifyChan:
if n.State != nil && *n.State != ipn.Running { if n.State != nil && *n.State != ipn.Running {
// Something's gone wrong and we've left the authenticated state. // Something's gone wrong and we've left the authenticated state.
@ -438,7 +504,7 @@ runLoop:
// control flow required to make it work now is hard. So, just crash // control flow required to make it work now is hard. So, just crash
// the container and rely on the container runtime to restart us, // the container and rely on the container runtime to restart us,
// whereupon we'll go through initial auth again. // whereupon we'll go through initial auth again.
log.Fatalf("tailscaled left running state (now in state %q), exiting", *n.State) return fmt.Errorf("tailscaled left running state (now in state %q), exiting", *n.State)
} }
if n.NetMap != nil { if n.NetMap != nil {
addrs = n.NetMap.SelfNode.Addresses().AsSlice() addrs = n.NetMap.SelfNode.Addresses().AsSlice()
@ -455,8 +521,8 @@ runLoop:
// fails. // fails.
deviceID := n.NetMap.SelfNode.StableID() deviceID := n.NetMap.SelfNode.StableID()
if hasKubeStateStore(cfg) && deephash.Update(&currentDeviceID, &deviceID) { if hasKubeStateStore(cfg) && deephash.Update(&currentDeviceID, &deviceID) {
if err := storeDeviceID(ctx, cfg.KubeSecret, n.NetMap.SelfNode.StableID()); err != nil { if err := kc.storeDeviceID(ctx, n.NetMap.SelfNode.StableID()); err != nil {
log.Fatalf("storing device ID in Kubernetes Secret: %v", err) return fmt.Errorf("storing device ID in Kubernetes Secret: %w", err)
} }
} }
if cfg.TailnetTargetFQDN != "" { if cfg.TailnetTargetFQDN != "" {
@ -493,12 +559,12 @@ runLoop:
rulesInstalled = true rulesInstalled = true
log.Printf("Installing forwarding rules for destination %v", ea.String()) log.Printf("Installing forwarding rules for destination %v", ea.String())
if err := installEgressForwardingRule(ctx, ea.String(), addrs, nfr); err != nil { if err := installEgressForwardingRule(ctx, ea.String(), addrs, nfr); err != nil {
log.Fatalf("installing egress proxy rules for destination %s: %v", ea.String(), err) return fmt.Errorf("installing egress proxy rules for destination %s: %v", ea.String(), err)
} }
} }
} }
if !rulesInstalled { if !rulesInstalled {
log.Fatalf("no forwarding rules for egress addresses %v, host supports IPv6: %v", egressAddrs, nfr.HasIPV6NAT()) return fmt.Errorf("no forwarding rules for egress addresses %v, host supports IPv6: %v", egressAddrs, nfr.HasIPV6NAT())
} }
} }
currentEgressIPs = newCurentEgressIPs currentEgressIPs = newCurentEgressIPs
@ -506,7 +572,7 @@ runLoop:
if cfg.ProxyTargetIP != "" && len(addrs) != 0 && ipsHaveChanged { if cfg.ProxyTargetIP != "" && len(addrs) != 0 && ipsHaveChanged {
log.Printf("Installing proxy rules") log.Printf("Installing proxy rules")
if err := installIngressForwardingRule(ctx, cfg.ProxyTargetIP, addrs, nfr); err != nil { if err := installIngressForwardingRule(ctx, cfg.ProxyTargetIP, addrs, nfr); err != nil {
log.Fatalf("installing ingress proxy rules: %v", err) return fmt.Errorf("installing ingress proxy rules: %w", err)
} }
} }
if cfg.ProxyTargetDNSName != "" && len(addrs) != 0 && ipsHaveChanged { if cfg.ProxyTargetDNSName != "" && len(addrs) != 0 && ipsHaveChanged {
@ -522,14 +588,17 @@ runLoop:
if backendsHaveChanged { if backendsHaveChanged {
log.Printf("installing ingress proxy rules for backends %v", newBackendAddrs) log.Printf("installing ingress proxy rules for backends %v", newBackendAddrs)
if err := installIngressForwardingRuleForDNSTarget(ctx, newBackendAddrs, addrs, nfr); err != nil { if err := installIngressForwardingRuleForDNSTarget(ctx, newBackendAddrs, addrs, nfr); err != nil {
log.Fatalf("error installing ingress proxy rules: %v", err) return fmt.Errorf("error installing ingress proxy rules: %w", err)
} }
} }
resetTimer(false) resetTimer(false)
backendAddrs = newBackendAddrs backendAddrs = newBackendAddrs
} }
if cfg.ServeConfigPath != "" && len(n.NetMap.DNS.CertDomains) != 0 { if cfg.ServeConfigPath != "" {
cd := n.NetMap.DNS.CertDomains[0] cd := certDomainFromNetmap(n.NetMap)
if cd == "" {
cd = kubetypes.ValueNoHTTPS
}
prev := certDomain.Swap(ptr.To(cd)) prev := certDomain.Swap(ptr.To(cd))
if prev == nil || *prev != cd { if prev == nil || *prev != cd {
select { select {
@ -541,7 +610,7 @@ runLoop:
if cfg.TailnetTargetIP != "" && ipsHaveChanged && len(addrs) != 0 { if cfg.TailnetTargetIP != "" && ipsHaveChanged && len(addrs) != 0 {
log.Printf("Installing forwarding rules for destination %v", cfg.TailnetTargetIP) log.Printf("Installing forwarding rules for destination %v", cfg.TailnetTargetIP)
if err := installEgressForwardingRule(ctx, cfg.TailnetTargetIP, addrs, nfr); err != nil { if err := installEgressForwardingRule(ctx, cfg.TailnetTargetIP, addrs, nfr); err != nil {
log.Fatalf("installing egress proxy rules: %v", err) return fmt.Errorf("installing egress proxy rules: %w", err)
} }
} }
// If this is a L7 cluster ingress proxy (set up // If this is a L7 cluster ingress proxy (set up
@ -553,7 +622,7 @@ runLoop:
if cfg.AllowProxyingClusterTrafficViaIngress && cfg.ServeConfigPath != "" && ipsHaveChanged && len(addrs) != 0 { if cfg.AllowProxyingClusterTrafficViaIngress && cfg.ServeConfigPath != "" && ipsHaveChanged && len(addrs) != 0 {
log.Printf("installing rules to forward traffic for %s to node's tailnet IP", cfg.PodIP) log.Printf("installing rules to forward traffic for %s to node's tailnet IP", cfg.PodIP)
if err := installTSForwardingRuleForDestination(ctx, cfg.PodIP, addrs, nfr); err != nil { if err := installTSForwardingRuleForDestination(ctx, cfg.PodIP, addrs, nfr); err != nil {
log.Fatalf("installing rules to forward traffic to node's tailnet IP: %v", err) return fmt.Errorf("installing rules to forward traffic to node's tailnet IP: %w", err)
} }
} }
currentIPs = newCurrentIPs currentIPs = newCurrentIPs
@ -571,17 +640,21 @@ runLoop:
// TODO (irbekrm): instead of using the IP and FQDN, have some other mechanism for the proxy signal that it is 'Ready'. // TODO (irbekrm): instead of using the IP and FQDN, have some other mechanism for the proxy signal that it is 'Ready'.
deviceEndpoints := []any{n.NetMap.SelfNode.Name(), n.NetMap.SelfNode.Addresses()} deviceEndpoints := []any{n.NetMap.SelfNode.Name(), n.NetMap.SelfNode.Addresses()}
if hasKubeStateStore(cfg) && deephash.Update(&currentDeviceEndpoints, &deviceEndpoints) { if hasKubeStateStore(cfg) && deephash.Update(&currentDeviceEndpoints, &deviceEndpoints) {
if err := storeDeviceEndpoints(ctx, cfg.KubeSecret, n.NetMap.SelfNode.Name(), n.NetMap.SelfNode.Addresses().AsSlice()); err != nil { if err := kc.storeDeviceEndpoints(ctx, n.NetMap.SelfNode.Name(), n.NetMap.SelfNode.Addresses().AsSlice()); err != nil {
log.Fatalf("storing device IPs and FQDN in Kubernetes Secret: %v", err) return fmt.Errorf("storing device IPs and FQDN in Kubernetes Secret: %w", err)
} }
} }
if cfg.HealthCheckAddrPort != "" { if healthCheck != nil {
h.Lock() healthCheck.Update(len(addrs) != 0)
h.hasAddrs = len(addrs) != 0
h.Unlock()
healthzRunner()
} }
if cfg.ServeConfigPath != "" {
triggerWatchServeConfigChanges.Do(func() {
go watchServeConfigChanges(ctx, certDomainChanged, certDomain, client, kc, cfg)
})
}
if egressSvcsNotify != nil { if egressSvcsNotify != nil {
egressSvcsNotify <- n egressSvcsNotify <- n
} }
@ -603,24 +676,42 @@ runLoop:
// will then continuously monitor the config file and netmap updates and // will then continuously monitor the config file and netmap updates and
// reconfigure the firewall rules as needed. If any of its operations fail, it // reconfigure the firewall rules as needed. If any of its operations fail, it
// will crash this node. // will crash this node.
if cfg.EgressSvcsCfgPath != "" { if cfg.EgressProxiesCfgPath != "" {
log.Printf("configuring egress proxy using configuration file at %s", cfg.EgressSvcsCfgPath) log.Printf("configuring egress proxy using configuration file at %s", cfg.EgressProxiesCfgPath)
egressSvcsNotify = make(chan ipn.Notify) egressSvcsNotify = make(chan ipn.Notify)
ep := egressProxy{ opts := egressProxyRunOpts{
cfgPath: cfg.EgressSvcsCfgPath, cfgPath: cfg.EgressProxiesCfgPath,
nfr: nfr, nfr: nfr,
kc: kc, kc: kc,
tsClient: client,
stateSecret: cfg.KubeSecret, stateSecret: cfg.KubeSecret,
netmapChan: egressSvcsNotify, netmapChan: egressSvcsNotify,
podIP: cfg.PodIP, podIPv4: cfg.PodIPv4,
tailnetAddrs: addrs, tailnetAddrs: addrs,
} }
go func() { go func() {
if err := ep.run(ctx, n); err != nil { if err := ep.run(ctx, n, opts); err != nil {
egressSvcsErrorChan <- err egressSvcsErrorChan <- err
} }
}() }()
} }
ip := ingressProxy{}
if cfg.IngressProxiesCfgPath != "" {
log.Printf("configuring ingress proxy using configuration file at %s", cfg.IngressProxiesCfgPath)
opts := ingressProxyOpts{
cfgPath: cfg.IngressProxiesCfgPath,
nfr: nfr,
kc: kc,
stateSecret: cfg.KubeSecret,
podIPv4: cfg.PodIPv4,
podIPv6: cfg.PodIPv6,
}
go func() {
if err := ip.run(ctx, opts); err != nil {
ingressSvcsErrorChan <- err
}
}()
}
// Wait on tailscaled process. It won't be cleaned up by default when the // Wait on tailscaled process. It won't be cleaned up by default when the
// container exits as it is not PID1. TODO (irbekrm): perhaps we can replace the // container exits as it is not PID1. TODO (irbekrm): perhaps we can replace the
@ -658,16 +749,20 @@ runLoop:
if backendsHaveChanged && len(addrs) != 0 { if backendsHaveChanged && len(addrs) != 0 {
log.Printf("Backend address change detected, installing proxy rules for backends %v", newBackendAddrs) log.Printf("Backend address change detected, installing proxy rules for backends %v", newBackendAddrs)
if err := installIngressForwardingRuleForDNSTarget(ctx, newBackendAddrs, addrs, nfr); err != nil { if err := installIngressForwardingRuleForDNSTarget(ctx, newBackendAddrs, addrs, nfr); err != nil {
log.Fatalf("installing ingress proxy rules for DNS target %s: %v", cfg.ProxyTargetDNSName, err) return fmt.Errorf("installing ingress proxy rules for DNS target %s: %v", cfg.ProxyTargetDNSName, err)
} }
} }
backendAddrs = newBackendAddrs backendAddrs = newBackendAddrs
resetTimer(false) resetTimer(false)
case e := <-egressSvcsErrorChan: case e := <-egressSvcsErrorChan:
log.Fatalf("egress proxy failed: %v", e) return fmt.Errorf("egress proxy failed: %v", e)
case e := <-ingressSvcsErrorChan:
return fmt.Errorf("ingress proxy failed: %v", e)
} }
} }
wg.Wait() wg.Wait()
return nil
} }
// ensureTunFile checks that /dev/net/tun exists, creating it if // ensureTunFile checks that /dev/net/tun exists, creating it if
@ -696,13 +791,13 @@ func resolveDNS(ctx context.Context, name string) ([]net.IP, error) {
ip4s, err := net.DefaultResolver.LookupIP(ctx, "ip4", name) ip4s, err := net.DefaultResolver.LookupIP(ctx, "ip4", name)
if err != nil { if err != nil {
if e, ok := err.(*net.DNSError); !(ok && e.IsNotFound) { if e, ok := err.(*net.DNSError); !(ok && e.IsNotFound) {
return nil, fmt.Errorf("error looking up IPv4 addresses: %v", err) return nil, fmt.Errorf("error looking up IPv4 addresses: %w", err)
} }
} }
ip6s, err := net.DefaultResolver.LookupIP(ctx, "ip6", name) ip6s, err := net.DefaultResolver.LookupIP(ctx, "ip6", name)
if err != nil { if err != nil {
if e, ok := err.(*net.DNSError); !(ok && e.IsNotFound) { if e, ok := err.(*net.DNSError); !(ok && e.IsNotFound) {
return nil, fmt.Errorf("error looking up IPv6 addresses: %v", err) return nil, fmt.Errorf("error looking up IPv6 addresses: %w", err)
} }
} }
if len(ip4s) == 0 && len(ip6s) == 0 { if len(ip4s) == 0 && len(ip6s) == 0 {
@ -715,7 +810,7 @@ func resolveDNS(ctx context.Context, name string) ([]net.IP, error) {
// context that gets cancelled when a signal is received and a cancel function // context that gets cancelled when a signal is received and a cancel function
// that can be called to free the resources when the watch should be stopped. // that can be called to free the resources when the watch should be stopped.
func contextWithExitSignalWatch() (context.Context, func()) { func contextWithExitSignalWatch() (context.Context, func()) {
closeChan := make(chan string) closeChan := make(chan struct{})
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
signalChan := make(chan os.Signal, 1) signalChan := make(chan os.Signal, 1)
signal.Notify(signalChan, syscall.SIGINT, syscall.SIGTERM) signal.Notify(signalChan, syscall.SIGINT, syscall.SIGTERM)
@ -727,8 +822,11 @@ func contextWithExitSignalWatch() (context.Context, func()) {
return return
} }
}() }()
closeOnce := sync.Once{}
f := func() { f := func() {
closeChan <- "goodbye" closeOnce.Do(func() {
close(closeChan)
})
} }
return ctx, f return ctx, f
} }
@ -758,7 +856,6 @@ func tailscaledConfigFilePath() string {
} }
cv, err := kubeutils.CapVerFromFileName(e.Name()) cv, err := kubeutils.CapVerFromFileName(e.Name())
if err != nil { if err != nil {
log.Printf("skipping file %q in tailscaled config directory %q: %v", e.Name(), dir, err)
continue continue
} }
if cv > maxCompatVer && cv <= tailcfg.CurrentCapabilityVersion { if cv > maxCompatVer && cv <= tailcfg.CurrentCapabilityVersion {
@ -766,8 +863,32 @@ func tailscaledConfigFilePath() string {
} }
} }
if maxCompatVer == -1 { if maxCompatVer == -1 {
log.Fatalf("no tailscaled config file found in %q for current capability version %q", dir, tailcfg.CurrentCapabilityVersion) log.Fatalf("no tailscaled config file found in %q for current capability version %d", dir, tailcfg.CurrentCapabilityVersion)
}
filePath := filepath.Join(dir, kubeutils.TailscaledConfigFileName(maxCompatVer))
log.Printf("Using tailscaled config file %q to match current capability version %d", filePath, tailcfg.CurrentCapabilityVersion)
return filePath
}
func runHTTPServer(mux *http.ServeMux, addr string) (close func() error) {
ln, err := net.Listen("tcp", addr)
if err != nil {
log.Fatalf("failed to listen on addr %q: %v", addr, err)
}
srv := &http.Server{Handler: mux}
go func() {
if err := srv.Serve(ln); err != nil {
if err != http.ErrServerClosed {
log.Fatalf("failed running server: %v", err)
} else {
log.Printf("HTTP server at %s closed", addr)
}
}
}()
return func() error {
err := srv.Shutdown(context.Background())
return errors.Join(err, ln.Close())
} }
log.Printf("Using tailscaled config file %q for capability version %q", maxCompatVer, tailcfg.CurrentCapabilityVersion)
return path.Join(dir, kubeutils.TailscaledConfigFileNameForCap(maxCompatVer))
} }

File diff suppressed because it is too large Load Diff

@ -17,8 +17,12 @@ import (
"time" "time"
"github.com/fsnotify/fsnotify" "github.com/fsnotify/fsnotify"
"tailscale.com/client/tailscale" "tailscale.com/client/local"
"tailscale.com/ipn" "tailscale.com/ipn"
"tailscale.com/kube/certs"
"tailscale.com/kube/kubetypes"
klc "tailscale.com/kube/localclient"
"tailscale.com/types/netmap"
) )
// watchServeConfigChanges watches path for changes, and when it sees one, reads // watchServeConfigChanges watches path for changes, and when it sees one, reads
@ -26,27 +30,34 @@ import (
// applies it to lc. It exits when ctx is canceled. cdChanged is a channel that // applies it to lc. It exits when ctx is canceled. cdChanged is a channel that
// is written to when the certDomain changes, causing the serve config to be // is written to when the certDomain changes, causing the serve config to be
// re-read and applied. // re-read and applied.
func watchServeConfigChanges(ctx context.Context, path string, cdChanged <-chan bool, certDomainAtomic *atomic.Pointer[string], lc *tailscale.LocalClient) { func watchServeConfigChanges(ctx context.Context, cdChanged <-chan bool, certDomainAtomic *atomic.Pointer[string], lc *local.Client, kc *kubeClient, cfg *settings) {
if certDomainAtomic == nil { if certDomainAtomic == nil {
panic("cd must not be nil") panic("certDomainAtomic must not be nil")
} }
var tickChan <-chan time.Time var tickChan <-chan time.Time
var eventChan <-chan fsnotify.Event var eventChan <-chan fsnotify.Event
if w, err := fsnotify.NewWatcher(); err != nil { if w, err := fsnotify.NewWatcher(); err != nil {
log.Printf("failed to create fsnotify watcher, timer-only mode: %v", err) // Creating a new fsnotify watcher would fail for example if inotify was not able to create a new file descriptor.
// See https://github.com/tailscale/tailscale/issues/15081
log.Printf("serve proxy: failed to create fsnotify watcher, timer-only mode: %v", err)
ticker := time.NewTicker(5 * time.Second) ticker := time.NewTicker(5 * time.Second)
defer ticker.Stop() defer ticker.Stop()
tickChan = ticker.C tickChan = ticker.C
} else { } else {
defer w.Close() defer w.Close()
if err := w.Add(filepath.Dir(path)); err != nil { if err := w.Add(filepath.Dir(cfg.ServeConfigPath)); err != nil {
log.Fatalf("failed to add fsnotify watch: %v", err) log.Fatalf("serve proxy: failed to add fsnotify watch: %v", err)
} }
eventChan = w.Events eventChan = w.Events
} }
var certDomain string var certDomain string
var prevServeConfig *ipn.ServeConfig var prevServeConfig *ipn.ServeConfig
var cm *certs.CertManager
if cfg.CertShareMode == "rw" {
cm = certs.NewCertManager(klc.New(lc), log.Printf)
}
for { for {
select { select {
case <-ctx.Done(): case <-ctx.Done():
@ -59,22 +70,77 @@ func watchServeConfigChanges(ctx context.Context, path string, cdChanged <-chan
// k8s handles these mounts. So just re-read the file and apply it // k8s handles these mounts. So just re-read the file and apply it
// if it's changed. // if it's changed.
} }
if certDomain == "" { sc, err := readServeConfig(cfg.ServeConfigPath, certDomain)
continue
}
sc, err := readServeConfig(path, certDomain)
if err != nil { if err != nil {
log.Fatalf("failed to read serve config: %v", err) log.Fatalf("serve proxy: failed to read serve config: %v", err)
}
if sc == nil {
log.Printf("serve proxy: no serve config at %q, skipping", cfg.ServeConfigPath)
continue
} }
if prevServeConfig != nil && reflect.DeepEqual(sc, prevServeConfig) { if prevServeConfig != nil && reflect.DeepEqual(sc, prevServeConfig) {
continue continue
} }
log.Printf("Applying serve config") if err := updateServeConfig(ctx, sc, certDomain, lc); err != nil {
if err := lc.SetServeConfig(ctx, sc); err != nil { log.Fatalf("serve proxy: error updating serve config: %v", err)
log.Fatalf("failed to set serve config: %v", err) }
if kc != nil && kc.canPatch {
if err := kc.storeHTTPSEndpoint(ctx, certDomain); err != nil {
log.Fatalf("serve proxy: error storing HTTPS endpoint: %v", err)
}
} }
prevServeConfig = sc prevServeConfig = sc
if cfg.CertShareMode != "rw" {
continue
}
if err := cm.EnsureCertLoops(ctx, sc); err != nil {
log.Fatalf("serve proxy: error ensuring cert loops: %v", err)
}
}
} }
func certDomainFromNetmap(nm *netmap.NetworkMap) string {
if len(nm.DNS.CertDomains) == 0 {
return ""
}
return nm.DNS.CertDomains[0]
}
// localClient is a subset of [local.Client] that can be mocked for testing.
type localClient interface {
SetServeConfig(context.Context, *ipn.ServeConfig) error
CertPair(context.Context, string) ([]byte, []byte, error)
}
func updateServeConfig(ctx context.Context, sc *ipn.ServeConfig, certDomain string, lc localClient) error {
if !isValidHTTPSConfig(certDomain, sc) {
return nil
}
log.Printf("serve proxy: applying serve config")
return lc.SetServeConfig(ctx, sc)
}
func isValidHTTPSConfig(certDomain string, sc *ipn.ServeConfig) bool {
if certDomain == kubetypes.ValueNoHTTPS && hasHTTPSEndpoint(sc) {
log.Printf(
`serve proxy: this node is configured as a proxy that exposes an HTTPS endpoint to tailnet,
(perhaps a Kubernetes operator Ingress proxy) but it is not able to issue TLS certs, so this will likely not work.
To make it work, ensure that HTTPS is enabled for your tailnet, see https://tailscale.com/kb/1153/enabling-https for more details.`)
return false
}
return true
}
func hasHTTPSEndpoint(cfg *ipn.ServeConfig) bool {
if cfg == nil {
return false
}
for _, tcpCfg := range cfg.TCP {
if tcpCfg.HTTPS {
return true
}
}
return false
} }
// readServeConfig reads the ipn.ServeConfig from path, replacing // readServeConfig reads the ipn.ServeConfig from path, replacing
@ -85,8 +151,17 @@ func readServeConfig(path, certDomain string) (*ipn.ServeConfig, error) {
} }
j, err := os.ReadFile(path) j, err := os.ReadFile(path)
if err != nil { if err != nil {
if os.IsNotExist(err) {
return nil, nil
}
return nil, err return nil, err
} }
// Serve config can be provided by users as well as the Kubernetes Operator (for its proxies). User-provided
// config could be empty for reasons.
if len(j) == 0 {
log.Printf("serve proxy: serve config file is empty, skipping")
return nil, nil
}
j = bytes.ReplaceAll(j, []byte("${TS_CERT_DOMAIN}"), []byte(certDomain)) j = bytes.ReplaceAll(j, []byte("${TS_CERT_DOMAIN}"), []byte(certDomain))
var sc ipn.ServeConfig var sc ipn.ServeConfig
if err := json.Unmarshal(j, &sc); err != nil { if err := json.Unmarshal(j, &sc); err != nil {

@ -0,0 +1,271 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build linux
package main
import (
"context"
"os"
"path/filepath"
"testing"
"github.com/google/go-cmp/cmp"
"tailscale.com/client/local"
"tailscale.com/ipn"
"tailscale.com/kube/kubetypes"
)
func TestUpdateServeConfig(t *testing.T) {
tests := []struct {
name string
sc *ipn.ServeConfig
certDomain string
wantCall bool
}{
{
name: "no_https_no_cert_domain",
sc: &ipn.ServeConfig{
TCP: map[uint16]*ipn.TCPPortHandler{
80: {HTTP: true},
},
},
certDomain: kubetypes.ValueNoHTTPS, // tailnet has HTTPS disabled
wantCall: true, // should set serve config as it doesn't have HTTPS endpoints
},
{
name: "https_with_cert_domain",
sc: &ipn.ServeConfig{
TCP: map[uint16]*ipn.TCPPortHandler{
443: {HTTPS: true},
},
Web: map[ipn.HostPort]*ipn.WebServerConfig{
"${TS_CERT_DOMAIN}:443": {
Handlers: map[string]*ipn.HTTPHandler{
"/": {Proxy: "http://10.0.1.100:8080"},
},
},
},
},
certDomain: "test-node.tailnet.ts.net",
wantCall: true,
},
{
name: "https_without_cert_domain",
sc: &ipn.ServeConfig{
TCP: map[uint16]*ipn.TCPPortHandler{
443: {HTTPS: true},
},
},
certDomain: kubetypes.ValueNoHTTPS,
wantCall: false, // incorrect configuration- should not set serve config
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
fakeLC := &fakeLocalClient{}
err := updateServeConfig(context.Background(), tt.sc, tt.certDomain, fakeLC)
if err != nil {
t.Errorf("updateServeConfig() error = %v", err)
}
if fakeLC.setServeCalled != tt.wantCall {
t.Errorf("SetServeConfig() called = %v, want %v", fakeLC.setServeCalled, tt.wantCall)
}
})
}
}
func TestReadServeConfig(t *testing.T) {
tests := []struct {
name string
gotSC string
certDomain string
wantSC *ipn.ServeConfig
wantErr bool
}{
{
name: "empty_file",
},
{
name: "valid_config_with_cert_domain_placeholder",
gotSC: `{
"TCP": {
"443": {
"HTTPS": true
}
},
"Web": {
"${TS_CERT_DOMAIN}:443": {
"Handlers": {
"/api": {
"Proxy": "https://10.2.3.4/api"
}}}}}`,
certDomain: "example.com",
wantSC: &ipn.ServeConfig{
TCP: map[uint16]*ipn.TCPPortHandler{
443: {
HTTPS: true,
},
},
Web: map[ipn.HostPort]*ipn.WebServerConfig{
ipn.HostPort("example.com:443"): {
Handlers: map[string]*ipn.HTTPHandler{
"/api": {
Proxy: "https://10.2.3.4/api",
},
},
},
},
},
},
{
name: "valid_config_for_http_proxy",
gotSC: `{
"TCP": {
"80": {
"HTTP": true
}
}}`,
wantSC: &ipn.ServeConfig{
TCP: map[uint16]*ipn.TCPPortHandler{
80: {
HTTP: true,
},
},
},
},
{
name: "config_without_cert_domain",
gotSC: `{
"TCP": {
"443": {
"HTTPS": true
}
},
"Web": {
"localhost:443": {
"Handlers": {
"/api": {
"Proxy": "https://10.2.3.4/api"
}}}}}`,
certDomain: "",
wantErr: false,
wantSC: &ipn.ServeConfig{
TCP: map[uint16]*ipn.TCPPortHandler{
443: {
HTTPS: true,
},
},
Web: map[ipn.HostPort]*ipn.WebServerConfig{
ipn.HostPort("localhost:443"): {
Handlers: map[string]*ipn.HTTPHandler{
"/api": {
Proxy: "https://10.2.3.4/api",
},
},
},
},
},
},
{
name: "invalid_json",
gotSC: "invalid json",
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
dir := t.TempDir()
path := filepath.Join(dir, "serve-config.json")
if err := os.WriteFile(path, []byte(tt.gotSC), 0644); err != nil {
t.Fatal(err)
}
got, err := readServeConfig(path, tt.certDomain)
if (err != nil) != tt.wantErr {
t.Errorf("readServeConfig() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !cmp.Equal(got, tt.wantSC) {
t.Errorf("readServeConfig() diff (-got +want):\n%s", cmp.Diff(got, tt.wantSC))
}
})
}
}
type fakeLocalClient struct {
*local.Client
setServeCalled bool
}
func (m *fakeLocalClient) SetServeConfig(ctx context.Context, cfg *ipn.ServeConfig) error {
m.setServeCalled = true
return nil
}
func (m *fakeLocalClient) CertPair(ctx context.Context, domain string) (certPEM, keyPEM []byte, err error) {
return nil, nil, nil
}
func TestHasHTTPSEndpoint(t *testing.T) {
tests := []struct {
name string
cfg *ipn.ServeConfig
want bool
}{
{
name: "nil_config",
cfg: nil,
want: false,
},
{
name: "empty_config",
cfg: &ipn.ServeConfig{},
want: false,
},
{
name: "no_https_endpoints",
cfg: &ipn.ServeConfig{
TCP: map[uint16]*ipn.TCPPortHandler{
80: {
HTTPS: false,
},
},
},
want: false,
},
{
name: "has_https_endpoint",
cfg: &ipn.ServeConfig{
TCP: map[uint16]*ipn.TCPPortHandler{
443: {
HTTPS: true,
},
},
},
want: true,
},
{
name: "mixed_endpoints",
cfg: &ipn.ServeConfig{
TCP: map[uint16]*ipn.TCPPortHandler{
80: {HTTPS: false},
443: {HTTPS: true},
},
},
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := hasHTTPSEndpoint(tt.cfg)
if got != tt.want {
t.Errorf("hasHTTPSEndpoint() = %v, want %v", got, tt.want)
}
})
}
}

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save