Compare commits

..

10 Commits

Author SHA1 Message Date
Nick Khyl c50fe71822
VERSION.txt: this is v1.90.3
Signed-off-by: Nick Khyl <nickk@tailscale.com>
1 month ago
M. J. Fromberger 597acd8663
logtail: avoid racing eventbus subscriptions with Shutdown (#17639)
When the eventbus is enabled, set up the subscription for change deltas at the
beginning when the client is created, rather than waiting for the first
awaitInternetUp check.

Otherwise, it is possible for a check to race with the client close in
Shutdown, which triggers a panic.

Updates #17638

Change-Id: I461c07939eca46699072b14b1814ecf28eec750c
Signed-off-by: M. J. Fromberger <fromberger@tailscale.com>
(cherry picked from commit 4346615d77)
1 month ago
Claus Lensbøl e6a3669277
net/tsdial: do not panic if setting the same eventbus twice (#17640)
Updates #17638

Signed-off-by: Claus Lensbøl <claus@tailscale.com>
(cherry picked from commit fd0e541e5d)
1 month ago
Nick Khyl 8bcd44ecf0
VERSION.txt: this is v1.90.2
Signed-off-by: Nick Khyl <nickk@tailscale.com>
1 month ago
Claus Lensbøl b0f0bce928 health: compare warnable codes to avoid errors on release branch (#17637)
This compares the warnings we actually care about and skips the unstable
warnings and the changes with no warnings.

Fixes #17635

Signed-off-by: Claus Lensbøl <claus@tailscale.com>
(cherry picked from commit 7418583e47)
1 month ago
Brad Fitzpatrick c81ef9055b util/linuxfw: fix 32-bit arm regression with iptables
This fixes a regression from dd615c8fdd that moved the
newIPTablesRunner constructor from a any-Linux-GOARCH file to one that
was only amd64 and arm64, thus breaking iptables on other platforms
(notably 32-bit "arm", as seen on older Pis running Buster with
iptables)

Tested by hand on a Raspberry Pi 2 w/ Buster + iptables for now, for
lack of automated 32-bit arm tests at the moment. But filed #17629.

Fixes #17623
Updates #17629

Change-Id: Iac1a3d78f35d8428821b46f0fed3f3717891c1bd
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
(cherry picked from commit 8576a802ca)
1 month ago
Patrick O'Doherty 9fe44b3718 feature/tpm: use withSRK to probe TPM availability (#17627)
On some platforms e.g. ChromeOS the owner hierarchy might not always be
available to us. To avoid stale sealing exceptions later we probe to
confirm it's working rather than rely solely on family indicator status.

Updates #17622

Signed-off-by: Patrick O'Doherty <patrick@tailscale.com>
(cherry picked from commit 672b1f0e76)
1 month ago
Patrick O'Doherty a8ae316858 feature/tpm: check TPM family data for compatibility (#17624)
Check that the TPM we have opened is advertised as a 2.0 family device
before using it for state sealing / hardware attestation.

Updates #17622

Signed-off-by: Patrick O'Doherty <patrick@tailscale.com>
(cherry picked from commit 36ad24b20f)
1 month ago
Nick Khyl 75b0c6f164 VERSION.txt: this is v1.90.1
Signed-off-by: Nick Khyl <nickk@tailscale.com>
1 month ago
Nick Khyl 3c78146ece VERSION.txt: this is v1.90.0
Signed-off-by: Nick Khyl <nickk@tailscale.com>
2 months ago

@ -1,78 +0,0 @@
#!/usr/bin/env bash
#
# This script sets up cigocacher, but should never fail the build if unsuccessful.
# It expects to run on a GitHub-hosted runner, and connects to cigocached over a
# private Azure network that is configured at the runner group level in GitHub.
#
# Usage: ./action.sh
# Inputs:
# URL: The cigocached server URL.
# Outputs:
# success: Whether cigocacher was set up successfully.
set -euo pipefail
if [ -z "${GITHUB_ACTIONS:-}" ]; then
echo "This script is intended to run within GitHub Actions"
exit 1
fi
if [ -z "${URL:-}" ]; then
echo "No cigocached URL is set, skipping cigocacher setup"
exit 0
fi
curl_and_parse() {
local jq_filter="$1"
local step="$2"
shift 2
local response
local curl_exit
response="$(curl -sSL "$@" 2>&1)" || curl_exit="$?"
if [ "${curl_exit:-0}" -ne "0" ]; then
echo "${step}: ${response}" >&2
return 1
fi
local parsed
local jq_exit
parsed=$(echo "${response}" | jq -e -r "${jq_filter}" 2>&1) || jq_exit=$?
if [ "${jq_exit:-0}" -ne "0" ]; then
echo "${step}: Failed to parse JSON response:" >&2
echo "${response}" >&2
return 1
fi
echo "${parsed}"
return 0
}
JWT="$(curl_and_parse ".value" "Fetching GitHub identity JWT" \
-H "Authorization: Bearer ${ACTIONS_ID_TOKEN_REQUEST_TOKEN}" \
"${ACTIONS_ID_TOKEN_REQUEST_URL}&audience=gocached")" || exit 0
# cigocached serves a TLS cert with an FQDN, but DNS is based on VM name.
HOST_AND_PORT="${URL#http*://}"
FIRST_LABEL="${HOST_AND_PORT/.*/}"
# Save CONNECT_TO for later steps to use.
echo "CONNECT_TO=${HOST_AND_PORT}:${FIRST_LABEL}:" >> "${GITHUB_ENV}"
BODY="$(jq -n --arg jwt "$JWT" '{"jwt": $jwt}')"
CIGOCACHER_TOKEN="$(curl_and_parse ".access_token" "Exchanging token with cigocached" \
--connect-to "${HOST_AND_PORT}:${FIRST_LABEL}:" \
-H "Content-Type: application/json" \
"$URL/auth/exchange-token" \
-d "$BODY")" || exit 0
# Wait until we successfully auth before building cigocacher to ensure we know
# it's worth building.
# TODO(tomhjp): bake cigocacher into runner image and use it for auth.
echo "Fetched cigocacher token successfully"
echo "::add-mask::${CIGOCACHER_TOKEN}"
echo "CIGOCACHER_TOKEN=${CIGOCACHER_TOKEN}" >> "${GITHUB_ENV}"
BIN_PATH="${RUNNER_TEMP:-/tmp}/cigocacher$(go env GOEXE)"
go build -o "${BIN_PATH}" ./cmd/cigocacher
echo "GOCACHEPROG=${BIN_PATH} --cache-dir ${CACHE_DIR} --cigocached-url ${URL} --token ${CIGOCACHER_TOKEN}" >> "${GITHUB_ENV}"
echo "success=true" >> "${GITHUB_OUTPUT}"

@ -1,30 +0,0 @@
name: go-cache
description: Set up build to use cigocacher
inputs:
cigocached-url:
description: URL of the cigocached server
required: true
checkout-path:
description: Path to cloned repository
required: true
cache-dir:
description: Directory to use for caching
required: true
outputs:
success:
description: Whether cigocacher was set up successfully
value: ${{ steps.setup.outputs.success }}
runs:
using: composite
steps:
- name: Setup cigocacher
id: setup
shell: bash
env:
URL: ${{ inputs.cigocached-url }}
CACHE_DIR: ${{ inputs.cache-dir }}
working-directory: ${{ inputs.checkout-path }}
run: .github/actions/go-cache/action.sh

@ -4,6 +4,8 @@ on:
branches: branches:
- main - main
pull_request: pull_request:
branches:
- "*"
jobs: jobs:
deploy: deploy:
runs-on: ubuntu-latest runs-on: ubuntu-latest

@ -2,11 +2,7 @@ name: golangci-lint
on: on:
# For now, only lint pull requests, not the main branches. # For now, only lint pull requests, not the main branches.
pull_request: pull_request:
paths:
- ".github/workflows/golangci-lint.yml"
- "**.go"
- "go.mod"
- "go.sum"
# TODO(andrew): enable for main branch after an initial waiting period. # TODO(andrew): enable for main branch after an initial waiting period.
#push: #push:
# branches: # branches:

@ -10,6 +10,8 @@ on:
- scripts/installer.sh - scripts/installer.sh
- .github/workflows/installer.yml - .github/workflows/installer.yml
pull_request: pull_request:
branches:
- "*"
paths: paths:
- scripts/installer.sh - scripts/installer.sh
- .github/workflows/installer.yml - .github/workflows/installer.yml
@ -58,14 +60,6 @@ jobs:
# Check a few images with wget rather than curl. # Check a few images with wget rather than curl.
- { image: "debian:oldstable-slim", deps: "wget" } - { image: "debian:oldstable-slim", deps: "wget" }
- { image: "debian:sid-slim", deps: "wget" } - { image: "debian:sid-slim", deps: "wget" }
- { image: "debian:stable-slim", deps: "curl" }
- { image: "ubuntu:24.04", deps: "curl" }
- { image: "fedora:latest", deps: "curl" }
# Test TAILSCALE_VERSION pinning on a subset of distros.
# Skip Alpine as community repos don't reliably keep old versions.
- { image: "debian:stable-slim", deps: "curl", version: "1.80.0" }
- { image: "ubuntu:24.04", deps: "curl", version: "1.80.0" }
- { image: "fedora:latest", deps: "curl", version: "1.80.0" }
runs-on: ubuntu-latest runs-on: ubuntu-latest
container: container:
image: ${{ matrix.image }} image: ${{ matrix.image }}
@ -102,18 +96,12 @@ jobs:
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: run installer - name: run installer
run: scripts/installer.sh run: scripts/installer.sh
env:
TAILSCALE_VERSION: ${{ matrix.version }}
# Package installation can fail in docker because systemd is not running # Package installation can fail in docker because systemd is not running
# as PID 1, so ignore errors at this step. The real check is the # as PID 1, so ignore errors at this step. The real check is the
# `tailscale --version` command below. # `tailscale --version` command below.
continue-on-error: true continue-on-error: true
- name: check tailscale version - name: check tailscale version
run: | run: tailscale --version
tailscale --version
if [ -n "${{ matrix.version }}" ]; then
tailscale --version | grep -q "^${{ matrix.version }}" || { echo "Version mismatch!"; exit 1; }
fi
notify-slack: notify-slack:
needs: test needs: test
runs-on: ubuntu-latest runs-on: ubuntu-latest

@ -2,7 +2,8 @@ name: request-dataplane-review
on: on:
pull_request: pull_request:
types: [ opened, synchronize, reopened, ready_for_review ] branches:
- "*"
paths: paths:
- ".github/workflows/request-dataplane-review.yml" - ".github/workflows/request-dataplane-review.yml"
- "**/*derp*" - "**/*derp*"
@ -11,7 +12,6 @@ on:
jobs: jobs:
request-dataplane-review: request-dataplane-review:
if: github.event.pull_request.draft == false
name: Request Dataplane Review name: Request Dataplane Review
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:

@ -136,20 +136,21 @@ jobs:
key: ${{ needs.gomod-cache.outputs.cache-key }} key: ${{ needs.gomod-cache.outputs.cache-key }}
enableCrossOsArchive: true enableCrossOsArchive: true
- name: Restore Cache - name: Restore Cache
id: restore-cache uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
with: with:
# Note: this is only restoring the build cache. Mod cache is shared amongst # Note: unlike the other setups, this is only grabbing the mod download
# all jobs in the workflow. # cache, rather than the whole mod directory, as the download cache
# contains zips that can be unpacked in parallel faster than they can be
# fetched and extracted by tar
path: | path: |
~/.cache/go-build ~/.cache/go-build
~\AppData\Local\go-build ~\AppData\Local\go-build
key: ${{ runner.os }}-${{ matrix.goarch }}-${{ matrix.buildflags }}-go-${{ matrix.shard }}-${{ hashFiles('**/go.sum') }}-${{ github.job }}-${{ github.run_id }} # The -2- here should be incremented when the scheme of data to be
# cached changes (e.g. path above changes).
key: ${{ github.job }}-${{ runner.os }}-${{ matrix.goarch }}-${{ matrix.buildflags }}-go-2-${{ hashFiles('**/go.sum') }}-${{ github.run_id }}
restore-keys: | restore-keys: |
${{ runner.os }}-${{ matrix.goarch }}-${{ matrix.buildflags }}-go-${{ matrix.shard }}-${{ hashFiles('**/go.sum') }}-${{ github.job }}- ${{ github.job }}-${{ runner.os }}-${{ matrix.goarch }}-${{ matrix.buildflags }}-go-2-${{ hashFiles('**/go.sum') }}
${{ runner.os }}-${{ matrix.goarch }}-${{ matrix.buildflags }}-go-${{ matrix.shard }}-${{ hashFiles('**/go.sum') }}- ${{ github.job }}-${{ runner.os }}-${{ matrix.goarch }}-${{ matrix.buildflags }}-go-2-
${{ runner.os }}-${{ matrix.goarch }}-${{ matrix.buildflags }}-go-${{ matrix.shard }}-
${{ runner.os }}-${{ matrix.goarch }}-${{ matrix.buildflags }}-go-
- name: build all - name: build all
if: matrix.buildflags == '' # skip on race builder if: matrix.buildflags == '' # skip on race builder
working-directory: src working-directory: src
@ -205,26 +206,12 @@ jobs:
shell: bash shell: bash
run: | run: |
find $(go env GOCACHE) -type f -mmin +90 -delete find $(go env GOCACHE) -type f -mmin +90 -delete
- name: Save Cache
# Save cache even on failure, but only on cache miss and main branch to avoid thrashing.
if: always() && steps.restore-cache.outputs.cache-hit != 'true' && github.ref == 'refs/heads/main'
uses: actions/cache/save@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
with:
# Note: this is only saving the build cache. Mod cache is shared amongst
# all jobs in the workflow.
path: |
~/.cache/go-build
~\AppData\Local\go-build
key: ${{ runner.os }}-${{ matrix.goarch }}-${{ matrix.buildflags }}-go-${{ matrix.shard }}-${{ hashFiles('**/go.sum') }}-${{ github.job }}-${{ github.run_id }}
windows: windows:
permissions: # windows-8vpu is a 2022 GitHub-managed runner in our
id-token: write # This is required for requesting the GitHub action identity JWT that can auth to cigocached # org with 8 cores and 32 GB of RAM:
contents: read # This is required for actions/checkout # https://github.com/organizations/tailscale/settings/actions/github-hosted-runners/1
# ci-windows-github-1 is a 2022 GitHub-managed runner in our org with 8 cores runs-on: windows-8vcpu
# and 32 GB of RAM. It is connected to a private Azure VNet that hosts cigocached.
# https://github.com/organizations/tailscale/settings/actions/github-hosted-runners/5
runs-on: ci-windows-github-1
needs: gomod-cache needs: gomod-cache
name: Windows (${{ matrix.name || matrix.shard}}) name: Windows (${{ matrix.name || matrix.shard}})
strategy: strategy:
@ -233,6 +220,8 @@ jobs:
include: include:
- key: "win-bench" - key: "win-bench"
name: "benchmarks" name: "benchmarks"
- key: "win-tool-go"
name: "./tool/go"
- key: "win-shard-1-2" - key: "win-shard-1-2"
shard: "1/2" shard: "1/2"
- key: "win-shard-2-2" - key: "win-shard-2-2"
@ -241,31 +230,44 @@ jobs:
- name: checkout - name: checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with: with:
path: ${{ github.workspace }}/src path: src
- name: Install Go - name: Install Go
if: matrix.key != 'win-tool-go'
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
with: with:
go-version-file: ${{ github.workspace }}/src/go.mod go-version-file: src/go.mod
cache: false cache: false
- name: Restore Go module cache - name: Restore Go module cache
if: matrix.key != 'win-tool-go'
uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
with: with:
path: gomodcache path: gomodcache
key: ${{ needs.gomod-cache.outputs.cache-key }} key: ${{ needs.gomod-cache.outputs.cache-key }}
enableCrossOsArchive: true enableCrossOsArchive: true
- name: Set up cigocacher - name: Restore Cache
id: cigocacher-setup if: matrix.key != 'win-tool-go'
uses: ./src/.github/actions/go-cache uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
with: with:
checkout-path: ${{ github.workspace }}/src path: |
cache-dir: ${{ github.workspace }}/cigocacher ~/.cache/go-build
cigocached-url: ${{ vars.CIGOCACHED_AZURE_URL }} ~\AppData\Local\go-build
# The -2- here should be incremented when the scheme of data to be
# cached changes (e.g. path above changes).
key: ${{ github.job }}-${{ matrix.key }}-go-2-${{ hashFiles('**/go.sum') }}-${{ github.run_id }}
restore-keys: |
${{ github.job }}-${{ matrix.key }}-go-2-${{ hashFiles('**/go.sum') }}
${{ github.job }}-${{ matrix.key }}-go-2-
- name: test-tool-go
if: matrix.key == 'win-tool-go'
working-directory: src
run: ./tool/go version
- name: test - name: test
if: matrix.key != 'win-bench' # skip on bench builder if: matrix.key != 'win-bench' && matrix.key != 'win-tool-go' # skip on bench builder
working-directory: src working-directory: src
run: go run ./cmd/testwrapper sharded:${{ matrix.shard }} run: go run ./cmd/testwrapper sharded:${{ matrix.shard }}
@ -277,24 +279,12 @@ jobs:
# the equals signs cause great confusion. # the equals signs cause great confusion.
run: go test ./... -bench . -benchtime 1x -run "^$" run: go test ./... -bench . -benchtime 1x -run "^$"
- name: Print stats - name: Tidy cache
if: matrix.key != 'win-tool-go'
working-directory: src
shell: bash shell: bash
if: steps.cigocacher-setup.outputs.success == 'true'
run: | run: |
curl -sSL --connect-to "${CONNECT_TO}" -H "Authorization: Bearer ${CIGOCACHER_TOKEN}" "${{ vars.CIGOCACHED_AZURE_URL }}/session/stats" | jq . find $(go env GOCACHE) -type f -mmin +90 -delete
win-tool-go:
runs-on: windows-latest
needs: gomod-cache
name: Windows (win-tool-go)
steps:
- name: checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
path: src
- name: test-tool-go
working-directory: src
run: ./tool/go version
privileged: privileged:
needs: gomod-cache needs: gomod-cache
@ -386,26 +376,28 @@ jobs:
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with: with:
path: src path: src
- name: Restore Go module cache
uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
with:
path: gomodcache
key: ${{ needs.gomod-cache.outputs.cache-key }}
enableCrossOsArchive: true
- name: Restore Cache - name: Restore Cache
id: restore-cache uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
with: with:
# Note: this is only restoring the build cache. Mod cache is shared amongst # Note: unlike the other setups, this is only grabbing the mod download
# all jobs in the workflow. # cache, rather than the whole mod directory, as the download cache
# contains zips that can be unpacked in parallel faster than they can be
# fetched and extracted by tar
path: | path: |
~/.cache/go-build ~/.cache/go-build
~\AppData\Local\go-build ~\AppData\Local\go-build
key: ${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-${{ matrix.goarm }}-go-${{ hashFiles('**/go.sum') }}-${{ github.job }}-${{ github.run_id }} # The -2- here should be incremented when the scheme of data to be
# cached changes (e.g. path above changes).
key: ${{ github.job }}-${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-go-2-${{ hashFiles('**/go.sum') }}-${{ github.run_id }}
restore-keys: | restore-keys: |
${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-${{ matrix.goarm }}-go-${{ hashFiles('**/go.sum') }}-${{ github.job }}- ${{ github.job }}-${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-go-2-${{ hashFiles('**/go.sum') }}
${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-${{ matrix.goarm }}-go-${{ hashFiles('**/go.sum') }}- ${{ github.job }}-${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-go-2-
${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-${{ matrix.goarm }}-go- - name: Restore Go module cache
uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
with:
path: gomodcache
key: ${{ needs.gomod-cache.outputs.cache-key }}
enableCrossOsArchive: true
- name: build all - name: build all
working-directory: src working-directory: src
run: ./tool/go build ./cmd/... run: ./tool/go build ./cmd/...
@ -426,17 +418,6 @@ jobs:
shell: bash shell: bash
run: | run: |
find $(go env GOCACHE) -type f -mmin +90 -delete find $(go env GOCACHE) -type f -mmin +90 -delete
- name: Save Cache
# Save cache even on failure, but only on cache miss and main branch to avoid thrashing.
if: always() && steps.restore-cache.outputs.cache-hit != 'true' && github.ref == 'refs/heads/main'
uses: actions/cache/save@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
with:
# Note: this is only saving the build cache. Mod cache is shared amongst
# all jobs in the workflow.
path: |
~/.cache/go-build
~\AppData\Local\go-build
key: ${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-${{ matrix.goarm }}-go-${{ hashFiles('**/go.sum') }}-${{ github.job }}-${{ github.run_id }}
ios: # similar to cross above, but iOS can't build most of the repo. So, just ios: # similar to cross above, but iOS can't build most of the repo. So, just
# make it build a few smoke packages. # make it build a few smoke packages.
@ -485,26 +466,28 @@ jobs:
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with: with:
path: src path: src
- name: Restore Go module cache
uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
with:
path: gomodcache
key: ${{ needs.gomod-cache.outputs.cache-key }}
enableCrossOsArchive: true
- name: Restore Cache - name: Restore Cache
id: restore-cache uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
with: with:
# Note: this is only restoring the build cache. Mod cache is shared amongst # Note: unlike the other setups, this is only grabbing the mod download
# all jobs in the workflow. # cache, rather than the whole mod directory, as the download cache
# contains zips that can be unpacked in parallel faster than they can be
# fetched and extracted by tar
path: | path: |
~/.cache/go-build ~/.cache/go-build
~\AppData\Local\go-build ~\AppData\Local\go-build
key: ${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-go-${{ hashFiles('**/go.sum') }}-${{ github.job }}-${{ github.run_id }} # The -2- here should be incremented when the scheme of data to be
# cached changes (e.g. path above changes).
key: ${{ github.job }}-${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-go-2-${{ hashFiles('**/go.sum') }}-${{ github.run_id }}
restore-keys: | restore-keys: |
${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-go-${{ hashFiles('**/go.sum') }}-${{ github.job }}- ${{ github.job }}-${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-go-2-${{ hashFiles('**/go.sum') }}
${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-go-${{ hashFiles('**/go.sum') }}- ${{ github.job }}-${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-go-2-
${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-go- - name: Restore Go module cache
uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
with:
path: gomodcache
key: ${{ needs.gomod-cache.outputs.cache-key }}
enableCrossOsArchive: true
- name: build core - name: build core
working-directory: src working-directory: src
run: ./tool/go build ./cmd/tailscale ./cmd/tailscaled run: ./tool/go build ./cmd/tailscale ./cmd/tailscaled
@ -518,17 +501,6 @@ jobs:
shell: bash shell: bash
run: | run: |
find $(go env GOCACHE) -type f -mmin +90 -delete find $(go env GOCACHE) -type f -mmin +90 -delete
- name: Save Cache
# Save cache even on failure, but only on cache miss and main branch to avoid thrashing.
if: always() && steps.restore-cache.outputs.cache-hit != 'true' && github.ref == 'refs/heads/main'
uses: actions/cache/save@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
with:
# Note: this is only saving the build cache. Mod cache is shared amongst
# all jobs in the workflow.
path: |
~/.cache/go-build
~\AppData\Local\go-build
key: ${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-go-${{ hashFiles('**/go.sum') }}-${{ github.job }}-${{ github.run_id }}
android: android:
# similar to cross above, but android fails to build a few pieces of the # similar to cross above, but android fails to build a few pieces of the
@ -566,26 +538,28 @@ jobs:
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with: with:
path: src path: src
- name: Restore Go module cache
uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
with:
path: gomodcache
key: ${{ needs.gomod-cache.outputs.cache-key }}
enableCrossOsArchive: true
- name: Restore Cache - name: Restore Cache
id: restore-cache uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
with: with:
# Note: this is only restoring the build cache. Mod cache is shared amongst # Note: unlike the other setups, this is only grabbing the mod download
# all jobs in the workflow. # cache, rather than the whole mod directory, as the download cache
# contains zips that can be unpacked in parallel faster than they can be
# fetched and extracted by tar
path: | path: |
~/.cache/go-build ~/.cache/go-build
~\AppData\Local\go-build ~\AppData\Local\go-build
key: ${{ runner.os }}-js-wasm-go-${{ hashFiles('**/go.sum') }}-${{ github.job }}-${{ github.run_id }} # The -2- here should be incremented when the scheme of data to be
# cached changes (e.g. path above changes).
key: ${{ github.job }}-${{ runner.os }}-go-2-${{ hashFiles('**/go.sum') }}-${{ github.run_id }}
restore-keys: | restore-keys: |
${{ runner.os }}-js-wasm-go-${{ hashFiles('**/go.sum') }}-${{ github.job }}- ${{ github.job }}-${{ runner.os }}-go-2-${{ hashFiles('**/go.sum') }}
${{ runner.os }}-js-wasm-go-${{ hashFiles('**/go.sum') }}- ${{ github.job }}-${{ runner.os }}-go-2-
${{ runner.os }}-js-wasm-go- - name: Restore Go module cache
uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
with:
path: gomodcache
key: ${{ needs.gomod-cache.outputs.cache-key }}
enableCrossOsArchive: true
- name: build tsconnect client - name: build tsconnect client
working-directory: src working-directory: src
run: ./tool/go build ./cmd/tsconnect/wasm ./cmd/tailscale/cli run: ./tool/go build ./cmd/tsconnect/wasm ./cmd/tailscale/cli
@ -604,17 +578,6 @@ jobs:
shell: bash shell: bash
run: | run: |
find $(go env GOCACHE) -type f -mmin +90 -delete find $(go env GOCACHE) -type f -mmin +90 -delete
- name: Save Cache
# Save cache even on failure, but only on cache miss and main branch to avoid thrashing.
if: always() && steps.restore-cache.outputs.cache-hit != 'true' && github.ref == 'refs/heads/main'
uses: actions/cache/save@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
with:
# Note: this is only saving the build cache. Mod cache is shared amongst
# all jobs in the workflow.
path: |
~/.cache/go-build
~\AppData\Local\go-build
key: ${{ runner.os }}-js-wasm-go-${{ hashFiles('**/go.sum') }}-${{ github.job }}-${{ github.run_id }}
tailscale_go: # Subset of tests that depend on our custom Go toolchain. tailscale_go: # Subset of tests that depend on our custom Go toolchain.
runs-on: ubuntu-24.04 runs-on: ubuntu-24.04
@ -650,9 +613,7 @@ jobs:
steps: steps:
- name: build fuzzers - name: build fuzzers
id: build id: build
# As of 21 October 2025, this repo doesn't tag releases, so this commit uses: google/oss-fuzz/infra/cifuzz/actions/build_fuzzers@master
# hash is just the tip of master.
uses: google/oss-fuzz/infra/cifuzz/actions/build_fuzzers@1242ccb5b6352601e73c00f189ac2ae397242264
# continue-on-error makes steps.build.conclusion be 'success' even if # continue-on-error makes steps.build.conclusion be 'success' even if
# steps.build.outcome is 'failure'. This means this step does not # steps.build.outcome is 'failure'. This means this step does not
# contribute to the job's overall pass/fail evaluation. # contribute to the job's overall pass/fail evaluation.
@ -682,9 +643,7 @@ jobs:
# report a failure because TS_FUZZ_CURRENTLY_BROKEN is set to the wrong # report a failure because TS_FUZZ_CURRENTLY_BROKEN is set to the wrong
# value. # value.
if: steps.build.outcome == 'success' if: steps.build.outcome == 'success'
# As of 21 October 2025, this repo doesn't tag releases, so this commit uses: google/oss-fuzz/infra/cifuzz/actions/run_fuzzers@master
# hash is just the tip of master.
uses: google/oss-fuzz/infra/cifuzz/actions/run_fuzzers@1242ccb5b6352601e73c00f189ac2ae397242264
with: with:
oss-fuzz-project-name: 'tailscale' oss-fuzz-project-name: 'tailscale'
fuzz-seconds: 150 fuzz-seconds: 150
@ -740,7 +699,6 @@ jobs:
run: | run: |
pkgs=$(./tool/go list ./... | grep -Ev 'dnsfallback|k8s-operator|xdp') pkgs=$(./tool/go list ./... | grep -Ev 'dnsfallback|k8s-operator|xdp')
./tool/go generate $pkgs ./tool/go generate $pkgs
git add -N . # ensure untracked files are noticed
echo echo
echo echo
git diff --name-only --exit-code || (echo "The files above need updating. Please run 'go generate'."; exit 1) git diff --name-only --exit-code || (echo "The files above need updating. Please run 'go generate'."; exit 1)

@ -1,38 +0,0 @@
name: tailscale.com/cmd/vet
env:
HOME: ${{ github.workspace }}
# GOMODCACHE is the same definition on all OSes. Within the workspace, we use
# toplevel directories "src" (for the checked out source code), and "gomodcache"
# and other caches as siblings to follow.
GOMODCACHE: ${{ github.workspace }}/gomodcache
on:
push:
branches:
- main
- "release-branch/*"
paths:
- "**.go"
pull_request:
paths:
- "**.go"
jobs:
vet:
runs-on: [ self-hosted, linux ]
timeout-minutes: 5
steps:
- name: Check out code
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
path: src
- name: Build 'go vet' tool
working-directory: src
run: ./tool/go build -o /tmp/vettool tailscale.com/cmd/vet
- name: Run 'go vet'
working-directory: src
run: ./tool/go vet -vettool=/tmp/vettool tailscale.com/...

@ -3,6 +3,8 @@ on:
workflow_dispatch: workflow_dispatch:
# For now, only run on requests, not the main branches. # For now, only run on requests, not the main branches.
pull_request: pull_request:
branches:
- "*"
paths: paths:
- "client/web/**" - "client/web/**"
- ".github/workflows/webclient.yml" - ".github/workflows/webclient.yml"

@ -1,103 +1,147 @@
# Tailscale Community Code of Conduct # Contributor Covenant Code of Conduct
## Our Pledge ## Our Pledge
We are committed to creating an open, welcoming, diverse, inclusive, healthy and respectful community. We are committed to creating an open, welcoming, diverse, inclusive,
Unacceptable, harmful and inappropriate behavior will not be tolerated. healthy and respectful community.
## Our Standards ## Our Standards
Examples of behavior that contributes to a positive environment for our community include: Examples of behavior that contributes to a positive environment for our
community include:
- Demonstrating empathy and kindness toward other people. * Demonstrating empathy and kindness toward other people.
- Being respectful of differing opinions, viewpoints, and experiences. * Being respectful of differing opinions, viewpoints, and experiences.
- Giving and gracefully accepting constructive feedback. * Giving and gracefully accepting constructive feedback.
- Accepting responsibility and apologizing to those affected by our mistakes, and learning from the experience. * Accepting responsibility and apologizing to those affected by our
- Focusing on what is best not just for us as individuals, but for the overall community. mistakes, and learning from the experience.
* Focusing on what is best not just for us as individuals, but for the
overall community.
Examples of unacceptable behavior include without limitation: Examples of unacceptable behavior include without limitation:
* The use of sexualized language or imagery, and sexual attention or
- The use of language, imagery or emojis (collectively "content") that is racist, sexist, homophobic, transphobic, or otherwise harassing or discriminatory based on any protected characteristic. advances of any kind.
- The use of sexualized content and sexual attention or advances of any kind. * The use of violent, intimidating or bullying language or imagery.
- The use of violent, intimidating or bullying content. * Trolling, insulting or derogatory comments, and personal or
- Trolling, concern trolling, insulting or derogatory comments, and personal or political attacks. political attacks.
- Public or private harassment. * Public or private harassment.
- Publishing others' personal information, such as a photo, physical address, email address, online profile information, or other personal information, without their explicit permission or with the intent to bully or harass the other person. * Publishing others' private information, such as a physical or email
- Posting deep fake or other AI generated content about or involving another person without the explicit permission. address, without their explicit permission.
- Spamming community channels and members, such as sending repeat messages, low-effort content, or automated messages. * Spamming community channels and members, such as sending repeat messages,
- Phishing or any similar activity. low-effort content, or automated messages.
- Distributing or promoting malware. * Phishing or any similar activity;
- The use of any coded or suggestive content to hide or provoke otherwise unacceptable behavior. * Distributing or promoting malware;
- Other conduct which could reasonably be considered harmful, illegal, or inappropriate in a professional setting. * Other conduct which could reasonably be considered inappropriate in a
professional setting.
Please also see the Tailscale Acceptable Use Policy, available at [tailscale.com/tailscale-aup](https://tailscale.com/tailscale-aup).
Please also see the Tailscale Acceptable Use Policy, available at
## Reporting Incidents [tailscale.com/tailscale-aup](https://tailscale.com/tailscale-aup).
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to Tailscale directly via <info@tailscale.com>, or to the community leaders or moderators via DM or similar. # Reporting Incidents
Instances of abusive, harassing, or otherwise unacceptable behavior
may be reported to Tailscale directly via info@tailscale.com, or to
the community leaders or moderators via DM or similar.
All complaints will be reviewed and investigated promptly and fairly. All complaints will be reviewed and investigated promptly and fairly.
We will respect the privacy and safety of the reporter of any issues. We will respect the privacy and safety of the reporter of any issues.
Please note that this community is not moderated by staff 24/7, and we do not have, and do not undertake, any obligation to prescreen, monitor, edit, or remove any content or data, or to actively seek facts or circumstances indicating illegal activity. Please note that this community is not moderated by staff 24/7, and we
While we strive to keep the community safe and welcoming, moderation may not be immediate at all hours. do not have, and do not undertake, any obligation to prescreen, monitor,
edit, or remove any content or data, or to actively seek facts or
circumstances indicating illegal activity. While we strive to keep the
community safe and welcoming, moderation may not be immediate at all hours.
If you encounter any issues, report them using the appropriate channels. If you encounter any issues, report them using the appropriate channels.
## Enforcement Guidelines ## Enforcement
Community leaders and moderators are responsible for clarifying and
enforcing our standards of acceptable behavior and will take appropriate
and fair corrective action in response to any behavior that they deem
inappropriate, threatening, offensive, or harmful.
Community leaders and moderators are responsible for clarifying and enforcing our standards of acceptable behavior and will take appropriate and fair corrective action in response to any behavior that they deem inappropriate, threatening, offensive, or harmful. Community leaders and moderators have the right and responsibility to remove,
edit, or reject comments, commits, code, wiki edits, issues, and other
contributions that are not aligned to this Community Code of Conduct.
Tailscale retains full discretion to take action (or not) in response
to a violation of these guidelines with or without notice or liability
to you. We will interpret our policies and resolve disputes in favor of
protecting users, customers, the public, our community and our company,
as a whole.
Community leaders and moderators have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Community Code of Conduct. ## Enforcement Guidelines
Tailscale retains full discretion to take action (or not) in response to a violation of these guidelines with or without notice or liability to you.
We will interpret our policies and resolve disputes in favor of protecting users, customers, the public, our community and our company, as a whole.
Community leaders will follow these community enforcement guidelines in determining the consequences for any action they deem in violation of this Code of Conduct, Community leaders will follow these Community Impact Guidelines in
and retain full discretion to apply the enforcement guidelines as necessary depending on the circumstances: determining the consequences for any action they deem in violation of
this Code of Conduct:
### 1. Correction ### 1. Correction
Community Impact: Use of inappropriate language or other behavior deemed unprofessional or unwelcome in the community. Community Impact: Use of inappropriate language or other behavior
deemed unprofessional or unwelcome in the community.
Consequence: A private, written warning from community leaders, providing clarity around the nature of the violation and an explanation of why the behavior was inappropriate. Consequence: A private, written warning from community leaders,
A public apology may be requested. providing clarity around the nature of the violation and an
explanation of why the behavior was inappropriate. A public apology
may be requested.
### 2. Warning ### 2. Warning
Community Impact: A violation through a single incident or series of actions. Community Impact: A violation through a single incident or series
of actions.
Consequence: A warning with consequences for continued behavior. Consequence: A warning with consequences for continued
No interaction with the people involved, including unsolicited interaction with those enforcing this Community Code of Conduct, for a specified period of time. behavior. No interaction with the people involved, including
This includes avoiding interactions in community spaces as well as external channels like social media. unsolicited interaction with those enforcing this Community Code of Conduct,
Violating these terms may lead to a temporary or permanent ban. for a specified period of time. This includes avoiding interactions in
community spaces as well as external channels like social
media. Violating these terms may lead to a temporary or permanent ban.
### 3. Temporary Ban ### 3. Temporary Ban
Community Impact: A serious violation of community standards, including sustained inappropriate behavior. Community Impact: A serious violation of community standards,
including sustained inappropriate behavior.
Consequence: A temporary ban from any sort of interaction or public communication with the community for a specified period of time. Consequence: A temporary ban from any sort of interaction or
No public or private interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, is allowed during this period. Violating these terms may lead to a permanent ban. public communication with the community for a specified period of
time. No public or private interaction with the people involved,
including unsolicited interaction with those enforcing the Code of Conduct,
is allowed during this period. Violating these terms may lead to a permanent ban.
### 4. Permanent Ban ### 4. Permanent Ban
Community Impact: Demonstrating a pattern of violation of community standards, including sustained inappropriate behavior, harassment of an individual, or aggression toward or disparagement of classes of individuals. Community Impact: Demonstrating a pattern of violation of community
standards, including sustained inappropriate behavior, harassment of
an individual, or aggression toward or disparagement of
classes of individuals.
Consequence: A permanent ban from any sort of public interaction within the community. Consequence: A permanent ban from any sort of public interaction
within the community.
## Acceptable Use Policy ## Acceptable Use Policy
Violation of this Community Code of Conduct may also violate the Tailscale Acceptable Use Policy, which may result in suspension or termination of your Tailscale account. Violation of this Community Code of Conduct may also violate the
For more information, please see the Tailscale Acceptable Use Policy, available at [tailscale.com/tailscale-aup](https://tailscale.com/tailscale-aup). Tailscale Acceptable Use Policy, which may result in suspension or
termination of your Tailscale account. For more information, please
see the Tailscale Acceptable Use Policy, available at
[tailscale.com/tailscale-aup](https://tailscale.com/tailscale-aup).
## Privacy ## Privacy
Please see the Tailscale [Privacy Policy](https://tailscale.com/privacy-policy) for more information about how Tailscale collects, uses, discloses and protects information. Please see the Tailscale [Privacy Policy](http://tailscale.com/privacy-policy)
for more information about how Tailscale collects, uses, discloses and protects
information.
## Attribution ## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 2.0, available at <https://www.contributor-covenant.org/version/2/0/code_of_conduct.html>. This Code of Conduct is adapted from the [Contributor
Covenant][homepage], version 2.0, available at
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
Community Impact Guidelines were inspired by [Mozilla's code of conduct enforcement ladder](https://github.com/mozilla/diversity). Community Impact Guidelines were inspired by [Mozilla's code of
conduct enforcement ladder](https://github.com/mozilla/diversity).
[homepage]: https://www.contributor-covenant.org [homepage]: https://www.contributor-covenant.org
For answers to common questions about this code of conduct, see the FAQ at <https://www.contributor-covenant.org/faq>. For answers to common questions about this code of conduct, see the
Translations are available at <https://www.contributor-covenant.org/translations>. FAQ at https://www.contributor-covenant.org/faq. Translations are
available at https://www.contributor-covenant.org/translations.

@ -1 +1 @@
1.93.0 1.90.3

@ -16,9 +16,9 @@ import (
"net/netip" "net/netip"
"slices" "slices"
"strings" "strings"
"sync"
"time" "time"
"tailscale.com/syncs"
"tailscale.com/types/appctype" "tailscale.com/types/appctype"
"tailscale.com/types/logger" "tailscale.com/types/logger"
"tailscale.com/types/views" "tailscale.com/types/views"
@ -139,7 +139,7 @@ type AppConnector struct {
hasStoredRoutes bool hasStoredRoutes bool
// mu guards the fields that follow // mu guards the fields that follow
mu syncs.Mutex mu sync.Mutex
// domains is a map of lower case domain names with no trailing dot, to an // domains is a map of lower case domain names with no trailing dot, to an
// ordered list of resolved IP addresses. // ordered list of resolved IP addresses.
@ -203,12 +203,12 @@ func NewAppConnector(c Config) *AppConnector {
ac.wildcards = c.RouteInfo.Wildcards ac.wildcards = c.RouteInfo.Wildcards
ac.controlRoutes = c.RouteInfo.Control ac.controlRoutes = c.RouteInfo.Control
} }
ac.writeRateMinute = newRateLogger(time.Now, time.Minute, func(c int64, s time.Time, ln int64) { ac.writeRateMinute = newRateLogger(time.Now, time.Minute, func(c int64, s time.Time, l int64) {
ac.logf("routeInfo write rate: %d in minute starting at %v (%d routes)", c, s, ln) ac.logf("routeInfo write rate: %d in minute starting at %v (%d routes)", c, s, l)
metricStoreRoutes(c, ln) metricStoreRoutes(c, l)
}) })
ac.writeRateDay = newRateLogger(time.Now, 24*time.Hour, func(c int64, s time.Time, ln int64) { ac.writeRateDay = newRateLogger(time.Now, 24*time.Hour, func(c int64, s time.Time, l int64) {
ac.logf("routeInfo write rate: %d in 24 hours starting at %v (%d routes)", c, s, ln) ac.logf("routeInfo write rate: %d in 24 hours starting at %v (%d routes)", c, s, l)
}) })
return ac return ac
} }
@ -510,8 +510,8 @@ func (e *AppConnector) addDomainAddrLocked(domain string, addr netip.Addr) {
slices.SortFunc(e.domains[domain], compareAddr) slices.SortFunc(e.domains[domain], compareAddr)
} }
func compareAddr(a, b netip.Addr) int { func compareAddr(l, r netip.Addr) int {
return a.Compare(b) return l.Compare(r)
} }
// routesWithout returns a without b where a and b // routesWithout returns a without b where a and b

@ -1,61 +0,0 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package appc
import (
"errors"
"net/netip"
"go4.org/netipx"
)
// errPoolExhausted is returned when there are no more addresses to iterate over.
var errPoolExhausted = errors.New("ip pool exhausted")
// ippool allows for iteration over all the addresses within a netipx.IPSet.
// netipx.IPSet has a Ranges call that returns the "minimum and sorted set of IP ranges that covers [the set]".
// netipx.IPRange is "an inclusive range of IP addresses from the same address family.". So we can iterate over
// all the addresses in the set by keeping a track of the last address we returned, calling Next on the last address
// to get the new one, and if we run off the edge of the current range, starting on the next one.
type ippool struct {
// ranges defines the addresses in the pool
ranges []netipx.IPRange
// last is internal tracking of which the last address provided was.
last netip.Addr
// rangeIdx is internal tracking of which netipx.IPRange from the IPSet we are currently on.
rangeIdx int
}
func newIPPool(ipset *netipx.IPSet) *ippool {
if ipset == nil {
return &ippool{}
}
return &ippool{ranges: ipset.Ranges()}
}
// next returns the next address from the set, or errPoolExhausted if we have
// iterated over the whole set.
func (ipp *ippool) next() (netip.Addr, error) {
if ipp.rangeIdx >= len(ipp.ranges) {
// ipset is empty or we have iterated off the end
return netip.Addr{}, errPoolExhausted
}
if !ipp.last.IsValid() {
// not initialized yet
ipp.last = ipp.ranges[0].From()
return ipp.last, nil
}
currRange := ipp.ranges[ipp.rangeIdx]
if ipp.last == currRange.To() {
// then we need to move to the next range
ipp.rangeIdx++
if ipp.rangeIdx >= len(ipp.ranges) {
return netip.Addr{}, errPoolExhausted
}
ipp.last = ipp.ranges[ipp.rangeIdx].From()
return ipp.last, nil
}
ipp.last = ipp.last.Next()
return ipp.last, nil
}

@ -1,60 +0,0 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package appc
import (
"errors"
"net/netip"
"testing"
"go4.org/netipx"
"tailscale.com/util/must"
)
func TestNext(t *testing.T) {
a := ippool{}
_, err := a.next()
if !errors.Is(err, errPoolExhausted) {
t.Fatalf("expected errPoolExhausted, got %v", err)
}
var isb netipx.IPSetBuilder
ipset := must.Get(isb.IPSet())
b := newIPPool(ipset)
_, err = b.next()
if !errors.Is(err, errPoolExhausted) {
t.Fatalf("expected errPoolExhausted, got %v", err)
}
isb.AddRange(netipx.IPRangeFrom(netip.MustParseAddr("192.168.0.0"), netip.MustParseAddr("192.168.0.2")))
isb.AddRange(netipx.IPRangeFrom(netip.MustParseAddr("200.0.0.0"), netip.MustParseAddr("200.0.0.0")))
isb.AddRange(netipx.IPRangeFrom(netip.MustParseAddr("201.0.0.0"), netip.MustParseAddr("201.0.0.1")))
ipset = must.Get(isb.IPSet())
c := newIPPool(ipset)
expected := []string{
"192.168.0.0",
"192.168.0.1",
"192.168.0.2",
"200.0.0.0",
"201.0.0.0",
"201.0.0.1",
}
for i, want := range expected {
addr, err := c.next()
if err != nil {
t.Fatal(err)
}
if addr != netip.MustParseAddr(want) {
t.Fatalf("next call %d want: %s, got: %v", i, want, addr)
}
}
_, err = c.next()
if !errors.Is(err, errPoolExhausted) {
t.Fatalf("expected errPoolExhausted, got %v", err)
}
_, err = c.next()
if !errors.Is(err, errPoolExhausted) {
t.Fatalf("expected errPoolExhausted, got %v", err)
}
}

@ -31,11 +31,11 @@ func TestDoesNotOverwriteIrregularFiles(t *testing.T) {
// The least troublesome thing to make that is not a file is a unix socket. // The least troublesome thing to make that is not a file is a unix socket.
// Making a null device sadly requires root. // Making a null device sadly requires root.
ln, err := net.ListenUnix("unix", &net.UnixAddr{Name: path, Net: "unix"}) l, err := net.ListenUnix("unix", &net.UnixAddr{Name: path, Net: "unix"})
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
defer ln.Close() defer l.Close()
err = WriteFile(path, []byte("hello"), 0644) err = WriteFile(path, []byte("hello"), 0644)
if err == nil { if err == nil {

@ -44,7 +44,7 @@ var (
) )
func replaceFileW(replaced *uint16, replacement *uint16, backup *uint16, flags uint32, exclude unsafe.Pointer, reserved unsafe.Pointer) (err error) { func replaceFileW(replaced *uint16, replacement *uint16, backup *uint16, flags uint32, exclude unsafe.Pointer, reserved unsafe.Pointer) (err error) {
r1, _, e1 := syscall.SyscallN(procReplaceFileW.Addr(), uintptr(unsafe.Pointer(replaced)), uintptr(unsafe.Pointer(replacement)), uintptr(unsafe.Pointer(backup)), uintptr(flags), uintptr(exclude), uintptr(reserved)) r1, _, e1 := syscall.Syscall6(procReplaceFileW.Addr(), 6, uintptr(unsafe.Pointer(replaced)), uintptr(unsafe.Pointer(replacement)), uintptr(unsafe.Pointer(backup)), uintptr(flags), uintptr(exclude), uintptr(reserved))
if int32(r1) == 0 { if int32(r1) == 0 {
err = errnoErr(e1) err = errnoErr(e1)
} }

@ -24,7 +24,7 @@ type fakeBIRD struct {
func newFakeBIRD(t *testing.T, protocols ...string) *fakeBIRD { func newFakeBIRD(t *testing.T, protocols ...string) *fakeBIRD {
sock := filepath.Join(t.TempDir(), "sock") sock := filepath.Join(t.TempDir(), "sock")
ln, err := net.Listen("unix", sock) l, err := net.Listen("unix", sock)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -33,7 +33,7 @@ func newFakeBIRD(t *testing.T, protocols ...string) *fakeBIRD {
pe[p] = false pe[p] = false
} }
return &fakeBIRD{ return &fakeBIRD{
Listener: ln, Listener: l,
protocolsEnabled: pe, protocolsEnabled: pe,
sock: sock, sock: sock,
} }
@ -123,12 +123,12 @@ type hangingListener struct {
func newHangingListener(t *testing.T) *hangingListener { func newHangingListener(t *testing.T) *hangingListener {
sock := filepath.Join(t.TempDir(), "sock") sock := filepath.Join(t.TempDir(), "sock")
ln, err := net.Listen("unix", sock) l, err := net.Listen("unix", sock)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
return &hangingListener{ return &hangingListener{
Listener: ln, Listener: l,
t: t, t: t,
done: make(chan struct{}), done: make(chan struct{}),
sock: sock, sock: sock,

@ -38,7 +38,6 @@ import (
"tailscale.com/net/udprelay/status" "tailscale.com/net/udprelay/status"
"tailscale.com/paths" "tailscale.com/paths"
"tailscale.com/safesocket" "tailscale.com/safesocket"
"tailscale.com/syncs"
"tailscale.com/tailcfg" "tailscale.com/tailcfg"
"tailscale.com/types/appctype" "tailscale.com/types/appctype"
"tailscale.com/types/dnstype" "tailscale.com/types/dnstype"
@ -597,19 +596,6 @@ func (lc *Client) DebugResultJSON(ctx context.Context, action string) (any, erro
return x, nil return x, nil
} }
// QueryOptionalFeatures queries the optional features supported by the Tailscale daemon.
func (lc *Client) QueryOptionalFeatures(ctx context.Context) (*apitype.OptionalFeatures, error) {
body, err := lc.send(ctx, "POST", "/localapi/v0/debug-optional-features", 200, nil)
if err != nil {
return nil, fmt.Errorf("error %w: %s", err, body)
}
var x apitype.OptionalFeatures
if err := json.Unmarshal(body, &x); err != nil {
return nil, err
}
return &x, nil
}
// SetDevStoreKeyValue set a statestore key/value. It's only meant for development. // SetDevStoreKeyValue set a statestore key/value. It's only meant for development.
// The schema (including when keys are re-read) is not a stable interface. // The schema (including when keys are re-read) is not a stable interface.
func (lc *Client) SetDevStoreKeyValue(ctx context.Context, key, value string) error { func (lc *Client) SetDevStoreKeyValue(ctx context.Context, key, value string) error {
@ -1364,7 +1350,7 @@ type IPNBusWatcher struct {
httpRes *http.Response httpRes *http.Response
dec *json.Decoder dec *json.Decoder
mu syncs.Mutex mu sync.Mutex
closed bool closed bool
} }
@ -1401,23 +1387,6 @@ func (lc *Client) SuggestExitNode(ctx context.Context) (apitype.ExitNodeSuggesti
return decodeJSON[apitype.ExitNodeSuggestionResponse](body) return decodeJSON[apitype.ExitNodeSuggestionResponse](body)
} }
// CheckSOMarkInUse reports whether the socket mark option is in use. This will only
// be true if tailscale is running on Linux and tailscaled uses SO_MARK.
func (lc *Client) CheckSOMarkInUse(ctx context.Context) (bool, error) {
body, err := lc.get200(ctx, "/localapi/v0/check-so-mark-in-use")
if err != nil {
return false, err
}
var res struct {
UseSOMark bool `json:"useSoMark"`
}
if err := json.Unmarshal(body, &res); err != nil {
return false, fmt.Errorf("invalid JSON from check-so-mark-in-use: %w", err)
}
return res.UseSOMark, nil
}
// ShutdownTailscaled requests a graceful shutdown of tailscaled. // ShutdownTailscaled requests a graceful shutdown of tailscaled.
func (lc *Client) ShutdownTailscaled(ctx context.Context) error { func (lc *Client) ShutdownTailscaled(ctx context.Context) error {
_, err := lc.send(ctx, "POST", "/localapi/v0/shutdown", 200, nil) _, err := lc.send(ctx, "POST", "/localapi/v0/shutdown", 200, nil)

@ -158,18 +158,6 @@ func init() {
// onReady is called by the systray package when the menu is ready to be built. // onReady is called by the systray package when the menu is ready to be built.
func (menu *Menu) onReady() { func (menu *Menu) onReady() {
log.Printf("starting") log.Printf("starting")
if os.Getuid() == 0 || os.Getuid() != os.Geteuid() || os.Getenv("SUDO_USER") != "" || os.Getenv("DOAS_USER") != "" {
fmt.Fprintln(os.Stderr, `
It appears that you might be running the systray with sudo/doas.
This can lead to issues with D-Bus, and should be avoided.
The systray application should be run with the same user as your desktop session.
This usually means that you should run the application like:
tailscale systray
See https://tailscale.com/kb/1597/linux-systray for more information.`)
}
setAppIcon(disconnected) setAppIcon(disconnected)
menu.rebuild() menu.rebuild()
@ -512,7 +500,7 @@ func (menu *Menu) watchIPNBus() {
} }
func (menu *Menu) watchIPNBusInner() error { func (menu *Menu) watchIPNBusInner() error {
watcher, err := menu.lc.WatchIPNBus(menu.bgCtx, 0) watcher, err := menu.lc.WatchIPNBus(menu.bgCtx, ipn.NotifyNoPrivateKeys)
if err != nil { if err != nil {
return fmt.Errorf("watching ipn bus: %w", err) return fmt.Errorf("watching ipn bus: %w", err)
} }

@ -94,13 +94,3 @@ type DNSQueryResponse struct {
// Resolvers is the list of resolvers that the forwarder deemed able to resolve the query. // Resolvers is the list of resolvers that the forwarder deemed able to resolve the query.
Resolvers []*dnstype.Resolver Resolvers []*dnstype.Resolver
} }
// OptionalFeatures describes which optional features are enabled in the build.
type OptionalFeatures struct {
// Features is the map of optional feature names to whether they are
// enabled.
//
// Disabled features may be absent from the map. (That is, false values
// are not guaranteed to be present.)
Features map[string]bool
}

@ -34,10 +34,10 @@
"prettier-plugin-organize-imports": "^3.2.2", "prettier-plugin-organize-imports": "^3.2.2",
"tailwindcss": "^3.3.3", "tailwindcss": "^3.3.3",
"typescript": "^5.3.3", "typescript": "^5.3.3",
"vite": "^5.4.21", "vite": "^5.1.7",
"vite-plugin-svgr": "^4.2.0", "vite-plugin-svgr": "^4.2.0",
"vite-tsconfig-paths": "^3.5.0", "vite-tsconfig-paths": "^3.5.0",
"vitest": "^1.6.1" "vitest": "^1.3.1"
}, },
"resolutions": { "resolutions": {
"@typescript-eslint/eslint-plugin": "^6.2.1", "@typescript-eslint/eslint-plugin": "^6.2.1",

@ -66,7 +66,7 @@ export default function useExitNodes(node: NodeData, filter?: string) {
// match from a list of exit node `options` to `nodes`. // match from a list of exit node `options` to `nodes`.
const addBestMatchNode = ( const addBestMatchNode = (
options: ExitNode[], options: ExitNode[],
name: (loc: ExitNodeLocation) => string name: (l: ExitNodeLocation) => string
) => { ) => {
const bestNode = highestPriorityNode(options) const bestNode = highestPriorityNode(options)
if (!bestNode || !bestNode.Location) { if (!bestNode || !bestNode.Location) {
@ -86,7 +86,7 @@ export default function useExitNodes(node: NodeData, filter?: string) {
locationNodesMap.forEach( locationNodesMap.forEach(
// add one node per country // add one node per country
(countryNodes) => (countryNodes) =>
addBestMatchNode(flattenMap(countryNodes), (loc) => loc.Country) addBestMatchNode(flattenMap(countryNodes), (l) => l.Country)
) )
} else { } else {
// Otherwise, show the best match on a city-level, // Otherwise, show the best match on a city-level,
@ -97,12 +97,12 @@ export default function useExitNodes(node: NodeData, filter?: string) {
countryNodes.forEach( countryNodes.forEach(
// add one node per city // add one node per city
(cityNodes) => (cityNodes) =>
addBestMatchNode(cityNodes, (loc) => `${loc.Country}: ${loc.City}`) addBestMatchNode(cityNodes, (l) => `${l.Country}: ${l.City}`)
) )
// add the "Country: Best Match" node // add the "Country: Best Match" node
addBestMatchNode( addBestMatchNode(
flattenMap(countryNodes), flattenMap(countryNodes),
(loc) => `${loc.Country}: Best Match` (l) => `${l.Country}: Best Match`
) )
}) })
} }

@ -1130,120 +1130,120 @@
resolved "https://registry.yarnpkg.com/@cush/relative/-/relative-1.0.0.tgz#8cd1769bf9bde3bb27dac356b1bc94af40f6cc16" resolved "https://registry.yarnpkg.com/@cush/relative/-/relative-1.0.0.tgz#8cd1769bf9bde3bb27dac356b1bc94af40f6cc16"
integrity sha512-RpfLEtTlyIxeNPGKcokS+p3BZII/Q3bYxryFRglh5H3A3T8q9fsLYm72VYAMEOOIBLEa8o93kFLiBDUWKrwXZA== integrity sha512-RpfLEtTlyIxeNPGKcokS+p3BZII/Q3bYxryFRglh5H3A3T8q9fsLYm72VYAMEOOIBLEa8o93kFLiBDUWKrwXZA==
"@esbuild/aix-ppc64@0.21.5": "@esbuild/aix-ppc64@0.19.12":
version "0.21.5" version "0.19.12"
resolved "https://registry.yarnpkg.com/@esbuild/aix-ppc64/-/aix-ppc64-0.21.5.tgz#c7184a326533fcdf1b8ee0733e21c713b975575f" resolved "https://registry.yarnpkg.com/@esbuild/aix-ppc64/-/aix-ppc64-0.19.12.tgz#d1bc06aedb6936b3b6d313bf809a5a40387d2b7f"
integrity sha512-1SDgH6ZSPTlggy1yI6+Dbkiz8xzpHJEVAlF/AM1tHPLsf5STom9rwtjE4hKAF20FfXXNTFqEYXyJNWh1GiZedQ== integrity sha512-bmoCYyWdEL3wDQIVbcyzRyeKLgk2WtWLTWz1ZIAZF/EGbNOwSA6ew3PftJ1PqMiOOGu0OyFMzG53L0zqIpPeNA==
"@esbuild/android-arm64@0.21.5": "@esbuild/android-arm64@0.19.12":
version "0.21.5" version "0.19.12"
resolved "https://registry.yarnpkg.com/@esbuild/android-arm64/-/android-arm64-0.21.5.tgz#09d9b4357780da9ea3a7dfb833a1f1ff439b4052" resolved "https://registry.yarnpkg.com/@esbuild/android-arm64/-/android-arm64-0.19.12.tgz#7ad65a36cfdb7e0d429c353e00f680d737c2aed4"
integrity sha512-c0uX9VAUBQ7dTDCjq+wdyGLowMdtR/GoC2U5IYk/7D1H1JYC0qseD7+11iMP2mRLN9RcCMRcjC4YMclCzGwS/A== integrity sha512-P0UVNGIienjZv3f5zq0DP3Nt2IE/3plFzuaS96vihvD0Hd6H/q4WXUGpCxD/E8YrSXfNyRPbpTq+T8ZQioSuPA==
"@esbuild/android-arm@0.21.5": "@esbuild/android-arm@0.19.12":
version "0.21.5" version "0.19.12"
resolved "https://registry.yarnpkg.com/@esbuild/android-arm/-/android-arm-0.21.5.tgz#9b04384fb771926dfa6d7ad04324ecb2ab9b2e28" resolved "https://registry.yarnpkg.com/@esbuild/android-arm/-/android-arm-0.19.12.tgz#b0c26536f37776162ca8bde25e42040c203f2824"
integrity sha512-vCPvzSjpPHEi1siZdlvAlsPxXl7WbOVUBBAowWug4rJHb68Ox8KualB+1ocNvT5fjv6wpkX6o/iEpbDrf68zcg== integrity sha512-qg/Lj1mu3CdQlDEEiWrlC4eaPZ1KztwGJ9B6J+/6G+/4ewxJg7gqj8eVYWvao1bXrqGiW2rsBZFSX3q2lcW05w==
"@esbuild/android-x64@0.21.5": "@esbuild/android-x64@0.19.12":
version "0.21.5" version "0.19.12"
resolved "https://registry.yarnpkg.com/@esbuild/android-x64/-/android-x64-0.21.5.tgz#29918ec2db754cedcb6c1b04de8cd6547af6461e" resolved "https://registry.yarnpkg.com/@esbuild/android-x64/-/android-x64-0.19.12.tgz#cb13e2211282012194d89bf3bfe7721273473b3d"
integrity sha512-D7aPRUUNHRBwHxzxRvp856rjUHRFW1SdQATKXH2hqA0kAZb1hKmi02OpYRacl0TxIGz/ZmXWlbZgjwWYaCakTA== integrity sha512-3k7ZoUW6Q6YqhdhIaq/WZ7HwBpnFBlW905Fa4s4qWJyiNOgT1dOqDiVAQFwBH7gBRZr17gLrlFCRzF6jFh7Kew==
"@esbuild/darwin-arm64@0.21.5": "@esbuild/darwin-arm64@0.19.12":
version "0.21.5" version "0.19.12"
resolved "https://registry.yarnpkg.com/@esbuild/darwin-arm64/-/darwin-arm64-0.21.5.tgz#e495b539660e51690f3928af50a76fb0a6ccff2a" resolved "https://registry.yarnpkg.com/@esbuild/darwin-arm64/-/darwin-arm64-0.19.12.tgz#cbee41e988020d4b516e9d9e44dd29200996275e"
integrity sha512-DwqXqZyuk5AiWWf3UfLiRDJ5EDd49zg6O9wclZ7kUMv2WRFr4HKjXp/5t8JZ11QbQfUS6/cRCKGwYhtNAY88kQ== integrity sha512-B6IeSgZgtEzGC42jsI+YYu9Z3HKRxp8ZT3cqhvliEHovq8HSX2YX8lNocDn79gCKJXOSaEot9MVYky7AKjCs8g==
"@esbuild/darwin-x64@0.21.5": "@esbuild/darwin-x64@0.19.12":
version "0.21.5" version "0.19.12"
resolved "https://registry.yarnpkg.com/@esbuild/darwin-x64/-/darwin-x64-0.21.5.tgz#c13838fa57372839abdddc91d71542ceea2e1e22" resolved "https://registry.yarnpkg.com/@esbuild/darwin-x64/-/darwin-x64-0.19.12.tgz#e37d9633246d52aecf491ee916ece709f9d5f4cd"
integrity sha512-se/JjF8NlmKVG4kNIuyWMV/22ZaerB+qaSi5MdrXtd6R08kvs2qCN4C09miupktDitvh8jRFflwGFBQcxZRjbw== integrity sha512-hKoVkKzFiToTgn+41qGhsUJXFlIjxI/jSYeZf3ugemDYZldIXIxhvwN6erJGlX4t5h417iFuheZ7l+YVn05N3A==
"@esbuild/freebsd-arm64@0.21.5": "@esbuild/freebsd-arm64@0.19.12":
version "0.21.5" version "0.19.12"
resolved "https://registry.yarnpkg.com/@esbuild/freebsd-arm64/-/freebsd-arm64-0.21.5.tgz#646b989aa20bf89fd071dd5dbfad69a3542e550e" resolved "https://registry.yarnpkg.com/@esbuild/freebsd-arm64/-/freebsd-arm64-0.19.12.tgz#1ee4d8b682ed363b08af74d1ea2b2b4dbba76487"
integrity sha512-5JcRxxRDUJLX8JXp/wcBCy3pENnCgBR9bN6JsY4OmhfUtIHe3ZW0mawA7+RDAcMLrMIZaf03NlQiX9DGyB8h4g== integrity sha512-4aRvFIXmwAcDBw9AueDQ2YnGmz5L6obe5kmPT8Vd+/+x/JMVKCgdcRwH6APrbpNXsPz+K653Qg8HB/oXvXVukA==
"@esbuild/freebsd-x64@0.21.5": "@esbuild/freebsd-x64@0.19.12":
version "0.21.5" version "0.19.12"
resolved "https://registry.yarnpkg.com/@esbuild/freebsd-x64/-/freebsd-x64-0.21.5.tgz#aa615cfc80af954d3458906e38ca22c18cf5c261" resolved "https://registry.yarnpkg.com/@esbuild/freebsd-x64/-/freebsd-x64-0.19.12.tgz#37a693553d42ff77cd7126764b535fb6cc28a11c"
integrity sha512-J95kNBj1zkbMXtHVH29bBriQygMXqoVQOQYA+ISs0/2l3T9/kj42ow2mpqerRBxDJnmkUDCaQT/dfNXWX/ZZCQ== integrity sha512-EYoXZ4d8xtBoVN7CEwWY2IN4ho76xjYXqSXMNccFSx2lgqOG/1TBPW0yPx1bJZk94qu3tX0fycJeeQsKovA8gg==
"@esbuild/linux-arm64@0.21.5": "@esbuild/linux-arm64@0.19.12":
version "0.21.5" version "0.19.12"
resolved "https://registry.yarnpkg.com/@esbuild/linux-arm64/-/linux-arm64-0.21.5.tgz#70ac6fa14f5cb7e1f7f887bcffb680ad09922b5b" resolved "https://registry.yarnpkg.com/@esbuild/linux-arm64/-/linux-arm64-0.19.12.tgz#be9b145985ec6c57470e0e051d887b09dddb2d4b"
integrity sha512-ibKvmyYzKsBeX8d8I7MH/TMfWDXBF3db4qM6sy+7re0YXya+K1cem3on9XgdT2EQGMu4hQyZhan7TeQ8XkGp4Q== integrity sha512-EoTjyYyLuVPfdPLsGVVVC8a0p1BFFvtpQDB/YLEhaXyf/5bczaGeN15QkR+O4S5LeJ92Tqotve7i1jn35qwvdA==
"@esbuild/linux-arm@0.21.5": "@esbuild/linux-arm@0.19.12":
version "0.21.5" version "0.19.12"
resolved "https://registry.yarnpkg.com/@esbuild/linux-arm/-/linux-arm-0.21.5.tgz#fc6fd11a8aca56c1f6f3894f2bea0479f8f626b9" resolved "https://registry.yarnpkg.com/@esbuild/linux-arm/-/linux-arm-0.19.12.tgz#207ecd982a8db95f7b5279207d0ff2331acf5eef"
integrity sha512-bPb5AHZtbeNGjCKVZ9UGqGwo8EUu4cLq68E95A53KlxAPRmUyYv2D6F0uUI65XisGOL1hBP5mTronbgo+0bFcA== integrity sha512-J5jPms//KhSNv+LO1S1TX1UWp1ucM6N6XuL6ITdKWElCu8wXP72l9MM0zDTzzeikVyqFE6U8YAV9/tFyj0ti+w==
"@esbuild/linux-ia32@0.21.5": "@esbuild/linux-ia32@0.19.12":
version "0.21.5" version "0.19.12"
resolved "https://registry.yarnpkg.com/@esbuild/linux-ia32/-/linux-ia32-0.21.5.tgz#3271f53b3f93e3d093d518d1649d6d68d346ede2" resolved "https://registry.yarnpkg.com/@esbuild/linux-ia32/-/linux-ia32-0.19.12.tgz#d0d86b5ca1562523dc284a6723293a52d5860601"
integrity sha512-YvjXDqLRqPDl2dvRODYmmhz4rPeVKYvppfGYKSNGdyZkA01046pLWyRKKI3ax8fbJoK5QbxblURkwK/MWY18Tg== integrity sha512-Thsa42rrP1+UIGaWz47uydHSBOgTUnwBwNq59khgIwktK6x60Hivfbux9iNR0eHCHzOLjLMLfUMLCypBkZXMHA==
"@esbuild/linux-loong64@0.21.5": "@esbuild/linux-loong64@0.19.12":
version "0.21.5" version "0.19.12"
resolved "https://registry.yarnpkg.com/@esbuild/linux-loong64/-/linux-loong64-0.21.5.tgz#ed62e04238c57026aea831c5a130b73c0f9f26df" resolved "https://registry.yarnpkg.com/@esbuild/linux-loong64/-/linux-loong64-0.19.12.tgz#9a37f87fec4b8408e682b528391fa22afd952299"
integrity sha512-uHf1BmMG8qEvzdrzAqg2SIG/02+4/DHB6a9Kbya0XDvwDEKCoC8ZRWI5JJvNdUjtciBGFQ5PuBlpEOXQj+JQSg== integrity sha512-LiXdXA0s3IqRRjm6rV6XaWATScKAXjI4R4LoDlvO7+yQqFdlr1Bax62sRwkVvRIrwXxvtYEHHI4dm50jAXkuAA==
"@esbuild/linux-mips64el@0.21.5": "@esbuild/linux-mips64el@0.19.12":
version "0.21.5" version "0.19.12"
resolved "https://registry.yarnpkg.com/@esbuild/linux-mips64el/-/linux-mips64el-0.21.5.tgz#e79b8eb48bf3b106fadec1ac8240fb97b4e64cbe" resolved "https://registry.yarnpkg.com/@esbuild/linux-mips64el/-/linux-mips64el-0.19.12.tgz#4ddebd4e6eeba20b509d8e74c8e30d8ace0b89ec"
integrity sha512-IajOmO+KJK23bj52dFSNCMsz1QP1DqM6cwLUv3W1QwyxkyIWecfafnI555fvSGqEKwjMXVLokcV5ygHW5b3Jbg== integrity sha512-fEnAuj5VGTanfJ07ff0gOA6IPsvrVHLVb6Lyd1g2/ed67oU1eFzL0r9WL7ZzscD+/N6i3dWumGE1Un4f7Amf+w==
"@esbuild/linux-ppc64@0.21.5": "@esbuild/linux-ppc64@0.19.12":
version "0.21.5" version "0.19.12"
resolved "https://registry.yarnpkg.com/@esbuild/linux-ppc64/-/linux-ppc64-0.21.5.tgz#5f2203860a143b9919d383ef7573521fb154c3e4" resolved "https://registry.yarnpkg.com/@esbuild/linux-ppc64/-/linux-ppc64-0.19.12.tgz#adb67dadb73656849f63cd522f5ecb351dd8dee8"
integrity sha512-1hHV/Z4OEfMwpLO8rp7CvlhBDnjsC3CttJXIhBi+5Aj5r+MBvy4egg7wCbe//hSsT+RvDAG7s81tAvpL2XAE4w== integrity sha512-nYJA2/QPimDQOh1rKWedNOe3Gfc8PabU7HT3iXWtNUbRzXS9+vgB0Fjaqr//XNbd82mCxHzik2qotuI89cfixg==
"@esbuild/linux-riscv64@0.21.5": "@esbuild/linux-riscv64@0.19.12":
version "0.21.5" version "0.19.12"
resolved "https://registry.yarnpkg.com/@esbuild/linux-riscv64/-/linux-riscv64-0.21.5.tgz#07bcafd99322d5af62f618cb9e6a9b7f4bb825dc" resolved "https://registry.yarnpkg.com/@esbuild/linux-riscv64/-/linux-riscv64-0.19.12.tgz#11bc0698bf0a2abf8727f1c7ace2112612c15adf"
integrity sha512-2HdXDMd9GMgTGrPWnJzP2ALSokE/0O5HhTUvWIbD3YdjME8JwvSCnNGBnTThKGEB91OZhzrJ4qIIxk/SBmyDDA== integrity sha512-2MueBrlPQCw5dVJJpQdUYgeqIzDQgw3QtiAHUC4RBz9FXPrskyyU3VI1hw7C0BSKB9OduwSJ79FTCqtGMWqJHg==
"@esbuild/linux-s390x@0.21.5": "@esbuild/linux-s390x@0.19.12":
version "0.21.5" version "0.19.12"
resolved "https://registry.yarnpkg.com/@esbuild/linux-s390x/-/linux-s390x-0.21.5.tgz#b7ccf686751d6a3e44b8627ababc8be3ef62d8de" resolved "https://registry.yarnpkg.com/@esbuild/linux-s390x/-/linux-s390x-0.19.12.tgz#e86fb8ffba7c5c92ba91fc3b27ed5a70196c3cc8"
integrity sha512-zus5sxzqBJD3eXxwvjN1yQkRepANgxE9lgOW2qLnmr8ikMTphkjgXu1HR01K4FJg8h1kEEDAqDcZQtbrRnB41A== integrity sha512-+Pil1Nv3Umes4m3AZKqA2anfhJiVmNCYkPchwFJNEJN5QxmTs1uzyy4TvmDrCRNT2ApwSari7ZIgrPeUx4UZDg==
"@esbuild/linux-x64@0.21.5": "@esbuild/linux-x64@0.19.12":
version "0.21.5" version "0.19.12"
resolved "https://registry.yarnpkg.com/@esbuild/linux-x64/-/linux-x64-0.21.5.tgz#6d8f0c768e070e64309af8004bb94e68ab2bb3b0" resolved "https://registry.yarnpkg.com/@esbuild/linux-x64/-/linux-x64-0.19.12.tgz#5f37cfdc705aea687dfe5dfbec086a05acfe9c78"
integrity sha512-1rYdTpyv03iycF1+BhzrzQJCdOuAOtaqHTWJZCWvijKD2N5Xu0TtVC8/+1faWqcP9iBCWOmjmhoH94dH82BxPQ== integrity sha512-B71g1QpxfwBvNrfyJdVDexenDIt1CiDN1TIXLbhOw0KhJzE78KIFGX6OJ9MrtC0oOqMWf+0xop4qEU8JrJTwCg==
"@esbuild/netbsd-x64@0.21.5": "@esbuild/netbsd-x64@0.19.12":
version "0.21.5" version "0.19.12"
resolved "https://registry.yarnpkg.com/@esbuild/netbsd-x64/-/netbsd-x64-0.21.5.tgz#bbe430f60d378ecb88decb219c602667387a6047" resolved "https://registry.yarnpkg.com/@esbuild/netbsd-x64/-/netbsd-x64-0.19.12.tgz#29da566a75324e0d0dd7e47519ba2f7ef168657b"
integrity sha512-Woi2MXzXjMULccIwMnLciyZH4nCIMpWQAs049KEeMvOcNADVxo0UBIQPfSmxB3CWKedngg7sWZdLvLczpe0tLg== integrity sha512-3ltjQ7n1owJgFbuC61Oj++XhtzmymoCihNFgT84UAmJnxJfm4sYCiSLTXZtE00VWYpPMYc+ZQmB6xbSdVh0JWA==
"@esbuild/openbsd-x64@0.21.5": "@esbuild/openbsd-x64@0.19.12":
version "0.21.5" version "0.19.12"
resolved "https://registry.yarnpkg.com/@esbuild/openbsd-x64/-/openbsd-x64-0.21.5.tgz#99d1cf2937279560d2104821f5ccce220cb2af70" resolved "https://registry.yarnpkg.com/@esbuild/openbsd-x64/-/openbsd-x64-0.19.12.tgz#306c0acbdb5a99c95be98bdd1d47c916e7dc3ff0"
integrity sha512-HLNNw99xsvx12lFBUwoT8EVCsSvRNDVxNpjZ7bPn947b8gJPzeHWyNVhFsaerc0n3TsbOINvRP2byTZ5LKezow== integrity sha512-RbrfTB9SWsr0kWmb9srfF+L933uMDdu9BIzdA7os2t0TXhCRjrQyCeOt6wVxr79CKD4c+p+YhCj31HBkYcXebw==
"@esbuild/sunos-x64@0.21.5": "@esbuild/sunos-x64@0.19.12":
version "0.21.5" version "0.19.12"
resolved "https://registry.yarnpkg.com/@esbuild/sunos-x64/-/sunos-x64-0.21.5.tgz#08741512c10d529566baba837b4fe052c8f3487b" resolved "https://registry.yarnpkg.com/@esbuild/sunos-x64/-/sunos-x64-0.19.12.tgz#0933eaab9af8b9b2c930236f62aae3fc593faf30"
integrity sha512-6+gjmFpfy0BHU5Tpptkuh8+uw3mnrvgs+dSPQXQOv3ekbordwnzTVEb4qnIvQcYXq6gzkyTnoZ9dZG+D4garKg== integrity sha512-HKjJwRrW8uWtCQnQOz9qcU3mUZhTUQvi56Q8DPTLLB+DawoiQdjsYq+j+D3s9I8VFtDr+F9CjgXKKC4ss89IeA==
"@esbuild/win32-arm64@0.21.5": "@esbuild/win32-arm64@0.19.12":
version "0.21.5" version "0.19.12"
resolved "https://registry.yarnpkg.com/@esbuild/win32-arm64/-/win32-arm64-0.21.5.tgz#675b7385398411240735016144ab2e99a60fc75d" resolved "https://registry.yarnpkg.com/@esbuild/win32-arm64/-/win32-arm64-0.19.12.tgz#773bdbaa1971b36db2f6560088639ccd1e6773ae"
integrity sha512-Z0gOTd75VvXqyq7nsl93zwahcTROgqvuAcYDUr+vOv8uHhNSKROyU961kgtCD1e95IqPKSQKH7tBTslnS3tA8A== integrity sha512-URgtR1dJnmGvX864pn1B2YUYNzjmXkuJOIqG2HdU62MVS4EHpU2946OZoTMnRUHklGtJdJZ33QfzdjGACXhn1A==
"@esbuild/win32-ia32@0.21.5": "@esbuild/win32-ia32@0.19.12":
version "0.21.5" version "0.19.12"
resolved "https://registry.yarnpkg.com/@esbuild/win32-ia32/-/win32-ia32-0.21.5.tgz#1bfc3ce98aa6ca9a0969e4d2af72144c59c1193b" resolved "https://registry.yarnpkg.com/@esbuild/win32-ia32/-/win32-ia32-0.19.12.tgz#000516cad06354cc84a73f0943a4aa690ef6fd67"
integrity sha512-SWXFF1CL2RVNMaVs+BBClwtfZSvDgtL//G/smwAc5oVK/UPu2Gu9tIaRgFmYFFKrmg3SyAjSrElf0TiJ1v8fYA== integrity sha512-+ZOE6pUkMOJfmxmBZElNOx72NKpIa/HFOMGzu8fqzQJ5kgf6aTGrcJaFsNiVMH4JKpMipyK+7k0n2UXN7a8YKQ==
"@esbuild/win32-x64@0.21.5": "@esbuild/win32-x64@0.19.12":
version "0.21.5" version "0.19.12"
resolved "https://registry.yarnpkg.com/@esbuild/win32-x64/-/win32-x64-0.21.5.tgz#acad351d582d157bb145535db2a6ff53dd514b5c" resolved "https://registry.yarnpkg.com/@esbuild/win32-x64/-/win32-x64-0.19.12.tgz#c57c8afbb4054a3ab8317591a0b7320360b444ae"
integrity sha512-tQd/1efJuzPC6rCFwEvLtci/xNFcTZknmXs98FYDfGE4wP9ClFV98nyKrzJKVPMhdDnjzLhdUyMX4PsQAPjwIw== integrity sha512-T1QyPSDCyMXaO3pzBkF96E8xMkiRYbUEZADd29SyPGabqxMViNoii+NcK7eWJAEoU6RZyEm5lVSIjTmcdoB9HA==
"@eslint-community/eslint-utils@^4.2.0", "@eslint-community/eslint-utils@^4.4.0": "@eslint-community/eslint-utils@^4.2.0", "@eslint-community/eslint-utils@^4.4.0":
version "4.4.0" version "4.4.0"
@ -1626,115 +1626,70 @@
estree-walker "^2.0.2" estree-walker "^2.0.2"
picomatch "^2.3.1" picomatch "^2.3.1"
"@rollup/rollup-android-arm-eabi@4.52.5": "@rollup/rollup-android-arm-eabi@4.12.0":
version "4.52.5" version "4.12.0"
resolved "https://registry.yarnpkg.com/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.52.5.tgz#0f44a2f8668ed87b040b6fe659358ac9239da4db" resolved "https://registry.yarnpkg.com/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.12.0.tgz#38c3abd1955a3c21d492af6b1a1dca4bb1d894d6"
integrity sha512-8c1vW4ocv3UOMp9K+gToY5zL2XiiVw3k7f1ksf4yO1FlDFQ1C2u72iACFnSOceJFsWskc2WZNqeRhFRPzv+wtQ== integrity sha512-+ac02NL/2TCKRrJu2wffk1kZ+RyqxVUlbjSagNgPm94frxtr+XDL12E5Ll1enWskLrtrZ2r8L3wED1orIibV/w==
"@rollup/rollup-android-arm64@4.52.5": "@rollup/rollup-android-arm64@4.12.0":
version "4.52.5" version "4.12.0"
resolved "https://registry.yarnpkg.com/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.52.5.tgz#25b9a01deef6518a948431564c987bcb205274f5" resolved "https://registry.yarnpkg.com/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.12.0.tgz#3822e929f415627609e53b11cec9a4be806de0e2"
integrity sha512-mQGfsIEFcu21mvqkEKKu2dYmtuSZOBMmAl5CFlPGLY94Vlcm+zWApK7F/eocsNzp8tKmbeBP8yXyAbx0XHsFNA== integrity sha512-OBqcX2BMe6nvjQ0Nyp7cC90cnumt8PXmO7Dp3gfAju/6YwG0Tj74z1vKrfRz7qAv23nBcYM8BCbhrsWqO7PzQQ==
"@rollup/rollup-darwin-arm64@4.52.5": "@rollup/rollup-darwin-arm64@4.12.0":
version "4.52.5" version "4.12.0"
resolved "https://registry.yarnpkg.com/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.52.5.tgz#8a102869c88f3780c7d5e6776afd3f19084ecd7f" resolved "https://registry.yarnpkg.com/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.12.0.tgz#6c082de71f481f57df6cfa3701ab2a7afde96f69"
integrity sha512-takF3CR71mCAGA+v794QUZ0b6ZSrgJkArC+gUiG6LB6TQty9T0Mqh3m2ImRBOxS2IeYBo4lKWIieSvnEk2OQWA== integrity sha512-X64tZd8dRE/QTrBIEs63kaOBG0b5GVEd3ccoLtyf6IdXtHdh8h+I56C2yC3PtC9Ucnv0CpNFJLqKFVgCYe0lOQ==
"@rollup/rollup-darwin-x64@4.52.5": "@rollup/rollup-darwin-x64@4.12.0":
version "4.52.5" version "4.12.0"
resolved "https://registry.yarnpkg.com/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.52.5.tgz#8e526417cd6f54daf1d0c04cf361160216581956" resolved "https://registry.yarnpkg.com/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.12.0.tgz#c34ca0d31f3c46a22c9afa0e944403eea0edcfd8"
integrity sha512-W901Pla8Ya95WpxDn//VF9K9u2JbocwV/v75TE0YIHNTbhqUTv9w4VuQ9MaWlNOkkEfFwkdNhXgcLqPSmHy0fA== integrity sha512-cc71KUZoVbUJmGP2cOuiZ9HSOP14AzBAThn3OU+9LcA1+IUqswJyR1cAJj3Mg55HbjZP6OLAIscbQsQLrpgTOg==
"@rollup/rollup-freebsd-arm64@4.52.5": "@rollup/rollup-linux-arm-gnueabihf@4.12.0":
version "4.52.5" version "4.12.0"
resolved "https://registry.yarnpkg.com/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.52.5.tgz#0e7027054493f3409b1f219a3eac5efd128ef899" resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.12.0.tgz#48e899c1e438629c072889b824a98787a7c2362d"
integrity sha512-QofO7i7JycsYOWxe0GFqhLmF6l1TqBswJMvICnRUjqCx8b47MTo46W8AoeQwiokAx3zVryVnxtBMcGcnX12LvA== integrity sha512-a6w/Y3hyyO6GlpKL2xJ4IOh/7d+APaqLYdMf86xnczU3nurFTaVN9s9jOXQg97BE4nYm/7Ga51rjec5nfRdrvA==
"@rollup/rollup-freebsd-x64@4.52.5": "@rollup/rollup-linux-arm64-gnu@4.12.0":
version "4.52.5" version "4.12.0"
resolved "https://registry.yarnpkg.com/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.52.5.tgz#72b204a920139e9ec3d331bd9cfd9a0c248ccb10" resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.12.0.tgz#788c2698a119dc229062d40da6ada8a090a73a68"
integrity sha512-jr21b/99ew8ujZubPo9skbrItHEIE50WdV86cdSoRkKtmWa+DDr6fu2c/xyRT0F/WazZpam6kk7IHBerSL7LDQ== integrity sha512-0fZBq27b+D7Ar5CQMofVN8sggOVhEtzFUwOwPppQt0k+VR+7UHMZZY4y+64WJ06XOhBTKXtQB/Sv0NwQMXyNAA==
"@rollup/rollup-linux-arm-gnueabihf@4.52.5": "@rollup/rollup-linux-arm64-musl@4.12.0":
version "4.52.5" version "4.12.0"
resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.52.5.tgz#ab1b522ebe5b7e06c99504cc38f6cd8b808ba41c" resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.12.0.tgz#3882a4e3a564af9e55804beeb67076857b035ab7"
integrity sha512-PsNAbcyv9CcecAUagQefwX8fQn9LQ4nZkpDboBOttmyffnInRy8R8dSg6hxxl2Re5QhHBf6FYIDhIj5v982ATQ== integrity sha512-eTvzUS3hhhlgeAv6bfigekzWZjaEX9xP9HhxB0Dvrdbkk5w/b+1Sxct2ZuDxNJKzsRStSq1EaEkVSEe7A7ipgQ==
"@rollup/rollup-linux-arm-musleabihf@4.52.5": "@rollup/rollup-linux-riscv64-gnu@4.12.0":
version "4.52.5" version "4.12.0"
resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.52.5.tgz#f8cc30b638f1ee7e3d18eac24af47ea29d9beb00" resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.12.0.tgz#0c6ad792e1195c12bfae634425a3d2aa0fe93ab7"
integrity sha512-Fw4tysRutyQc/wwkmcyoqFtJhh0u31K+Q6jYjeicsGJJ7bbEq8LwPWV/w0cnzOqR2m694/Af6hpFayLJZkG2VQ== integrity sha512-ix+qAB9qmrCRiaO71VFfY8rkiAZJL8zQRXveS27HS+pKdjwUfEhqo2+YF2oI+H/22Xsiski+qqwIBxVewLK7sw==
"@rollup/rollup-linux-arm64-gnu@4.52.5": "@rollup/rollup-linux-x64-gnu@4.12.0":
version "4.52.5" version "4.12.0"
resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.52.5.tgz#7af37a9e85f25db59dc8214172907b7e146c12cc" resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.12.0.tgz#9d62485ea0f18d8674033b57aa14fb758f6ec6e3"
integrity sha512-a+3wVnAYdQClOTlyapKmyI6BLPAFYs0JM8HRpgYZQO02rMR09ZcV9LbQB+NL6sljzG38869YqThrRnfPMCDtZg== integrity sha512-TenQhZVOtw/3qKOPa7d+QgkeM6xY0LtwzR8OplmyL5LrgTWIXpTQg2Q2ycBf8jm+SFW2Wt/DTn1gf7nFp3ssVA==
"@rollup/rollup-linux-arm64-musl@4.52.5": "@rollup/rollup-linux-x64-musl@4.12.0":
version "4.52.5" version "4.12.0"
resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.52.5.tgz#a623eb0d3617c03b7a73716eb85c6e37b776f7e0" resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.12.0.tgz#50e8167e28b33c977c1f813def2b2074d1435e05"
integrity sha512-AvttBOMwO9Pcuuf7m9PkC1PUIKsfaAJ4AYhy944qeTJgQOqJYJ9oVl2nYgY7Rk0mkbsuOpCAYSs6wLYB2Xiw0Q== integrity sha512-LfFdRhNnW0zdMvdCb5FNuWlls2WbbSridJvxOvYWgSBOYZtgBfW9UGNJG//rwMqTX1xQE9BAodvMH9tAusKDUw==
"@rollup/rollup-linux-loong64-gnu@4.52.5": "@rollup/rollup-win32-arm64-msvc@4.12.0":
version "4.52.5" version "4.12.0"
resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.52.5.tgz#76ea038b549c5c6c5f0d062942627c4066642ee2" resolved "https://registry.yarnpkg.com/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.12.0.tgz#68d233272a2004429124494121a42c4aebdc5b8e"
integrity sha512-DkDk8pmXQV2wVrF6oq5tONK6UHLz/XcEVow4JTTerdeV1uqPeHxwcg7aFsfnSm9L+OO8WJsWotKM2JJPMWrQtA== integrity sha512-JPDxovheWNp6d7AHCgsUlkuCKvtu3RB55iNEkaQcf0ttsDU/JZF+iQnYcQJSk/7PtT4mjjVG8N1kpwnI9SLYaw==
"@rollup/rollup-linux-ppc64-gnu@4.52.5": "@rollup/rollup-win32-ia32-msvc@4.12.0":
version "4.52.5" version "4.12.0"
resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.52.5.tgz#d9a4c3f0a3492bc78f6fdfe8131ac61c7359ccd5" resolved "https://registry.yarnpkg.com/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.12.0.tgz#366ca62221d1689e3b55a03f4ae12ae9ba595d40"
integrity sha512-W/b9ZN/U9+hPQVvlGwjzi+Wy4xdoH2I8EjaCkMvzpI7wJUs8sWJ03Rq96jRnHkSrcHTpQe8h5Tg3ZzUPGauvAw== integrity sha512-fjtuvMWRGJn1oZacG8IPnzIV6GF2/XG+h71FKn76OYFqySXInJtseAqdprVTDTyqPxQOG9Exak5/E9Z3+EJ8ZA==
"@rollup/rollup-linux-riscv64-gnu@4.52.5": "@rollup/rollup-win32-x64-msvc@4.12.0":
version "4.52.5" version "4.12.0"
resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.52.5.tgz#87ab033eebd1a9a1dd7b60509f6333ec1f82d994" resolved "https://registry.yarnpkg.com/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.12.0.tgz#9ffdf9ed133a7464f4ae187eb9e1294413fab235"
integrity sha512-sjQLr9BW7R/ZiXnQiWPkErNfLMkkWIoCz7YMn27HldKsADEKa5WYdobaa1hmN6slu9oWQbB6/jFpJ+P2IkVrmw== integrity sha512-ZYmr5mS2wd4Dew/JjT0Fqi2NPB/ZhZ2VvPp7SmvPZb4Y1CG/LRcS6tcRo2cYU7zLK5A7cdbhWnnWmUjoI4qapg==
"@rollup/rollup-linux-riscv64-musl@4.52.5":
version "4.52.5"
resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.52.5.tgz#bda3eb67e1c993c1ba12bc9c2f694e7703958d9f"
integrity sha512-hq3jU/kGyjXWTvAh2awn8oHroCbrPm8JqM7RUpKjalIRWWXE01CQOf/tUNWNHjmbMHg/hmNCwc/Pz3k1T/j/Lg==
"@rollup/rollup-linux-s390x-gnu@4.52.5":
version "4.52.5"
resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.52.5.tgz#f7bc10fbe096ab44694233dc42a2291ed5453d4b"
integrity sha512-gn8kHOrku8D4NGHMK1Y7NA7INQTRdVOntt1OCYypZPRt6skGbddska44K8iocdpxHTMMNui5oH4elPH4QOLrFQ==
"@rollup/rollup-linux-x64-gnu@4.52.5":
version "4.52.5"
resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.52.5.tgz#a151cb1234cc9b2cf5e8cfc02aa91436b8f9e278"
integrity sha512-hXGLYpdhiNElzN770+H2nlx+jRog8TyynpTVzdlc6bndktjKWyZyiCsuDAlpd+j+W+WNqfcyAWz9HxxIGfZm1Q==
"@rollup/rollup-linux-x64-musl@4.52.5":
version "4.52.5"
resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.52.5.tgz#7859e196501cc3b3062d45d2776cfb4d2f3a9350"
integrity sha512-arCGIcuNKjBoKAXD+y7XomR9gY6Mw7HnFBv5Rw7wQRvwYLR7gBAgV7Mb2QTyjXfTveBNFAtPt46/36vV9STLNg==
"@rollup/rollup-openharmony-arm64@4.52.5":
version "4.52.5"
resolved "https://registry.yarnpkg.com/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.52.5.tgz#85d0df7233734df31e547c1e647d2a5300b3bf30"
integrity sha512-QoFqB6+/9Rly/RiPjaomPLmR/13cgkIGfA40LHly9zcH1S0bN2HVFYk3a1eAyHQyjs3ZJYlXvIGtcCs5tko9Cw==
"@rollup/rollup-win32-arm64-msvc@4.52.5":
version "4.52.5"
resolved "https://registry.yarnpkg.com/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.52.5.tgz#e62357d00458db17277b88adbf690bb855cac937"
integrity sha512-w0cDWVR6MlTstla1cIfOGyl8+qb93FlAVutcor14Gf5Md5ap5ySfQ7R9S/NjNaMLSFdUnKGEasmVnu3lCMqB7w==
"@rollup/rollup-win32-ia32-msvc@4.52.5":
version "4.52.5"
resolved "https://registry.yarnpkg.com/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.52.5.tgz#fc7cd40f44834a703c1f1c3fe8bcc27ce476cd50"
integrity sha512-Aufdpzp7DpOTULJCuvzqcItSGDH73pF3ko/f+ckJhxQyHtp67rHw3HMNxoIdDMUITJESNE6a8uh4Lo4SLouOUg==
"@rollup/rollup-win32-x64-gnu@4.52.5":
version "4.52.5"
resolved "https://registry.yarnpkg.com/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.52.5.tgz#1a22acfc93c64a64a48c42672e857ee51774d0d3"
integrity sha512-UGBUGPFp1vkj6p8wCRraqNhqwX/4kNQPS57BCFc8wYh0g94iVIW33wJtQAx3G7vrjjNtRaxiMUylM0ktp/TRSQ==
"@rollup/rollup-win32-x64-msvc@4.52.5":
version "4.52.5"
resolved "https://registry.yarnpkg.com/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.52.5.tgz#1657f56326bbe0ac80eedc9f9c18fc1ddd24e107"
integrity sha512-TAcgQh2sSkykPRWLrdyy2AiceMckNf5loITqXxFI5VuQjS5tSuw3WlwdN8qv8vzjLAUTvYaH/mVjSFpbkFbpTg==
"@rushstack/eslint-patch@^1.1.0": "@rushstack/eslint-patch@^1.1.0":
version "1.6.0" version "1.6.0"
@ -1908,12 +1863,7 @@
resolved "https://registry.yarnpkg.com/@swc/types/-/types-0.1.5.tgz#043b731d4f56a79b4897a3de1af35e75d56bc63a" resolved "https://registry.yarnpkg.com/@swc/types/-/types-0.1.5.tgz#043b731d4f56a79b4897a3de1af35e75d56bc63a"
integrity sha512-myfUej5naTBWnqOCc/MdVOLVjXUXtIA+NpDrDBKJtLLg2shUjBu3cZmB/85RyitKc55+lUUyl7oRfLOvkr2hsw== integrity sha512-myfUej5naTBWnqOCc/MdVOLVjXUXtIA+NpDrDBKJtLLg2shUjBu3cZmB/85RyitKc55+lUUyl7oRfLOvkr2hsw==
"@types/estree@1.0.8": "@types/estree@1.0.5", "@types/estree@^1.0.0":
version "1.0.8"
resolved "https://registry.yarnpkg.com/@types/estree/-/estree-1.0.8.tgz#958b91c991b1867ced318bedea0e215ee050726e"
integrity sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==
"@types/estree@^1.0.0":
version "1.0.5" version "1.0.5"
resolved "https://registry.yarnpkg.com/@types/estree/-/estree-1.0.5.tgz#a6ce3e556e00fd9895dd872dd172ad0d4bd687f4" resolved "https://registry.yarnpkg.com/@types/estree/-/estree-1.0.5.tgz#a6ce3e556e00fd9895dd872dd172ad0d4bd687f4"
integrity sha512-/kYRxGDLWzHOB7q+wtSUQlFrtcdUccpfy+X+9iMBpHK8QLLhx2wIPYuS5DYtR9Wa/YlZAbIovy7qVdB1Aq6Lyw== integrity sha512-/kYRxGDLWzHOB7q+wtSUQlFrtcdUccpfy+X+9iMBpHK8QLLhx2wIPYuS5DYtR9Wa/YlZAbIovy7qVdB1Aq6Lyw==
@ -2124,44 +2074,44 @@
dependencies: dependencies:
"@swc/core" "^1.3.107" "@swc/core" "^1.3.107"
"@vitest/expect@1.6.1": "@vitest/expect@1.3.1":
version "1.6.1" version "1.3.1"
resolved "https://registry.yarnpkg.com/@vitest/expect/-/expect-1.6.1.tgz#b90c213f587514a99ac0bf84f88cff9042b0f14d" resolved "https://registry.yarnpkg.com/@vitest/expect/-/expect-1.3.1.tgz#d4c14b89c43a25fd400a6b941f51ba27fe0cb918"
integrity sha512-jXL+9+ZNIJKruofqXuuTClf44eSpcHlgj3CiuNihUF3Ioujtmc0zIa3UJOW5RjDK1YLBJZnWBlPuqhYycLioog== integrity sha512-xofQFwIzfdmLLlHa6ag0dPV8YsnKOCP1KdAeVVh34vSjN2dcUiXYCD9htu/9eM7t8Xln4v03U9HLxLpPlsXdZw==
dependencies: dependencies:
"@vitest/spy" "1.6.1" "@vitest/spy" "1.3.1"
"@vitest/utils" "1.6.1" "@vitest/utils" "1.3.1"
chai "^4.3.10" chai "^4.3.10"
"@vitest/runner@1.6.1": "@vitest/runner@1.3.1":
version "1.6.1" version "1.3.1"
resolved "https://registry.yarnpkg.com/@vitest/runner/-/runner-1.6.1.tgz#10f5857c3e376218d58c2bfacfea1161e27e117f" resolved "https://registry.yarnpkg.com/@vitest/runner/-/runner-1.3.1.tgz#e7f96cdf74842934782bfd310eef4b8695bbfa30"
integrity sha512-3nSnYXkVkf3mXFfE7vVyPmi3Sazhb/2cfZGGs0JRzFsPFvAMBEcrweV1V1GsrstdXeKCTXlJbvnQwGWgEIHmOA== integrity sha512-5FzF9c3jG/z5bgCnjr8j9LNq/9OxV2uEBAITOXfoe3rdZJTdO7jzThth7FXv/6b+kdY65tpRQB7WaKhNZwX+Kg==
dependencies: dependencies:
"@vitest/utils" "1.6.1" "@vitest/utils" "1.3.1"
p-limit "^5.0.0" p-limit "^5.0.0"
pathe "^1.1.1" pathe "^1.1.1"
"@vitest/snapshot@1.6.1": "@vitest/snapshot@1.3.1":
version "1.6.1" version "1.3.1"
resolved "https://registry.yarnpkg.com/@vitest/snapshot/-/snapshot-1.6.1.tgz#90414451a634bb36cd539ccb29ae0d048a8c0479" resolved "https://registry.yarnpkg.com/@vitest/snapshot/-/snapshot-1.3.1.tgz#193a5d7febf6ec5d22b3f8c5a093f9e4322e7a88"
integrity sha512-WvidQuWAzU2p95u8GAKlRMqMyN1yOJkGHnx3M1PL9Raf7AQ1kwLKg04ADlCa3+OXUZE7BceOhVZiuWAbzCKcUQ== integrity sha512-EF++BZbt6RZmOlE3SuTPu/NfwBF6q4ABS37HHXzs2LUVPBLx2QoY/K0fKpRChSo8eLiuxcbCVfqKgx/dplCDuQ==
dependencies: dependencies:
magic-string "^0.30.5" magic-string "^0.30.5"
pathe "^1.1.1" pathe "^1.1.1"
pretty-format "^29.7.0" pretty-format "^29.7.0"
"@vitest/spy@1.6.1": "@vitest/spy@1.3.1":
version "1.6.1" version "1.3.1"
resolved "https://registry.yarnpkg.com/@vitest/spy/-/spy-1.6.1.tgz#33376be38a5ed1ecd829eb986edaecc3e798c95d" resolved "https://registry.yarnpkg.com/@vitest/spy/-/spy-1.3.1.tgz#814245d46d011b99edd1c7528f5725c64e85a88b"
integrity sha512-MGcMmpGkZebsMZhbQKkAf9CX5zGvjkBTqf8Zx3ApYWXr3wG+QvEu2eXWfnIIWYSJExIp4V9FCKDEeygzkYrXMw== integrity sha512-xAcW+S099ylC9VLU7eZfdT9myV67Nor9w9zhf0mGCYJSO+zM2839tOeROTdikOi/8Qeusffvxb/MyBSOja1Uig==
dependencies: dependencies:
tinyspy "^2.2.0" tinyspy "^2.2.0"
"@vitest/utils@1.6.1": "@vitest/utils@1.3.1":
version "1.6.1" version "1.3.1"
resolved "https://registry.yarnpkg.com/@vitest/utils/-/utils-1.6.1.tgz#6d2f36cb6d866f2bbf59da854a324d6bf8040f17" resolved "https://registry.yarnpkg.com/@vitest/utils/-/utils-1.3.1.tgz#7b05838654557544f694a372de767fcc9594d61a"
integrity sha512-jOrrUvXM4Av9ZWiG1EajNto0u96kWAhJ1LmPmJhXXQx/32MecEKd10pOLYgS2BQx1TgkGhloPU1ArDW2vvaY6g== integrity sha512-d3Waie/299qqRyHTm2DjADeTaNdNSVsnwHPWrs20JMpjh6eiVq7ggggweO8rc4arhf6rRkWuHKwvxGvejUXZZQ==
dependencies: dependencies:
diff-sequences "^29.6.3" diff-sequences "^29.6.3"
estree-walker "^3.0.3" estree-walker "^3.0.3"
@ -2477,11 +2427,11 @@ brace-expansion@^2.0.1:
balanced-match "^1.0.0" balanced-match "^1.0.0"
braces@^3.0.2, braces@~3.0.2: braces@^3.0.2, braces@~3.0.2:
version "3.0.3" version "3.0.2"
resolved "https://registry.yarnpkg.com/braces/-/braces-3.0.3.tgz#490332f40919452272d55a8480adc0c441358789" resolved "https://registry.yarnpkg.com/braces/-/braces-3.0.2.tgz#3454e1a462ee8d599e236df336cd9ea4f8afe107"
integrity sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA== integrity sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==
dependencies: dependencies:
fill-range "^7.1.1" fill-range "^7.0.1"
browserslist@^4.21.10, browserslist@^4.21.9, browserslist@^4.22.1: browserslist@^4.21.10, browserslist@^4.21.9, browserslist@^4.22.1:
version "4.22.1" version "4.22.1"
@ -2677,9 +2627,9 @@ cosmiconfig@^8.1.3:
path-type "^4.0.0" path-type "^4.0.0"
cross-spawn@^7.0.2, cross-spawn@^7.0.3: cross-spawn@^7.0.2, cross-spawn@^7.0.3:
version "7.0.6" version "7.0.3"
resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-7.0.6.tgz#8a58fe78f00dcd70c370451759dfbfaf03e8ee9f" resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-7.0.3.tgz#f73a85b9d5d41d045551c177e2882d4ac85728a6"
integrity sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA== integrity sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==
dependencies: dependencies:
path-key "^3.1.0" path-key "^3.1.0"
shebang-command "^2.0.0" shebang-command "^2.0.0"
@ -2971,34 +2921,34 @@ es-to-primitive@^1.2.1:
is-date-object "^1.0.1" is-date-object "^1.0.1"
is-symbol "^1.0.2" is-symbol "^1.0.2"
esbuild@^0.21.3: esbuild@^0.19.3:
version "0.21.5" version "0.19.12"
resolved "https://registry.yarnpkg.com/esbuild/-/esbuild-0.21.5.tgz#9ca301b120922959b766360d8ac830da0d02997d" resolved "https://registry.yarnpkg.com/esbuild/-/esbuild-0.19.12.tgz#dc82ee5dc79e82f5a5c3b4323a2a641827db3e04"
integrity sha512-mg3OPMV4hXywwpoDxu3Qda5xCKQi+vCTZq8S9J/EpkhB2HzKXq4SNFZE3+NK93JYxc8VMSep+lOUSC/RVKaBqw== integrity sha512-aARqgq8roFBj054KvQr5f1sFu0D65G+miZRCuJyJ0G13Zwx7vRar5Zhn2tkQNzIXcBrNVsv/8stehpj+GAjgbg==
optionalDependencies: optionalDependencies:
"@esbuild/aix-ppc64" "0.21.5" "@esbuild/aix-ppc64" "0.19.12"
"@esbuild/android-arm" "0.21.5" "@esbuild/android-arm" "0.19.12"
"@esbuild/android-arm64" "0.21.5" "@esbuild/android-arm64" "0.19.12"
"@esbuild/android-x64" "0.21.5" "@esbuild/android-x64" "0.19.12"
"@esbuild/darwin-arm64" "0.21.5" "@esbuild/darwin-arm64" "0.19.12"
"@esbuild/darwin-x64" "0.21.5" "@esbuild/darwin-x64" "0.19.12"
"@esbuild/freebsd-arm64" "0.21.5" "@esbuild/freebsd-arm64" "0.19.12"
"@esbuild/freebsd-x64" "0.21.5" "@esbuild/freebsd-x64" "0.19.12"
"@esbuild/linux-arm" "0.21.5" "@esbuild/linux-arm" "0.19.12"
"@esbuild/linux-arm64" "0.21.5" "@esbuild/linux-arm64" "0.19.12"
"@esbuild/linux-ia32" "0.21.5" "@esbuild/linux-ia32" "0.19.12"
"@esbuild/linux-loong64" "0.21.5" "@esbuild/linux-loong64" "0.19.12"
"@esbuild/linux-mips64el" "0.21.5" "@esbuild/linux-mips64el" "0.19.12"
"@esbuild/linux-ppc64" "0.21.5" "@esbuild/linux-ppc64" "0.19.12"
"@esbuild/linux-riscv64" "0.21.5" "@esbuild/linux-riscv64" "0.19.12"
"@esbuild/linux-s390x" "0.21.5" "@esbuild/linux-s390x" "0.19.12"
"@esbuild/linux-x64" "0.21.5" "@esbuild/linux-x64" "0.19.12"
"@esbuild/netbsd-x64" "0.21.5" "@esbuild/netbsd-x64" "0.19.12"
"@esbuild/openbsd-x64" "0.21.5" "@esbuild/openbsd-x64" "0.19.12"
"@esbuild/sunos-x64" "0.21.5" "@esbuild/sunos-x64" "0.19.12"
"@esbuild/win32-arm64" "0.21.5" "@esbuild/win32-arm64" "0.19.12"
"@esbuild/win32-ia32" "0.21.5" "@esbuild/win32-ia32" "0.19.12"
"@esbuild/win32-x64" "0.21.5" "@esbuild/win32-x64" "0.19.12"
escalade@^3.1.1: escalade@^3.1.1:
version "3.1.1" version "3.1.1"
@ -3325,10 +3275,10 @@ file-entry-cache@^6.0.1:
dependencies: dependencies:
flat-cache "^3.0.4" flat-cache "^3.0.4"
fill-range@^7.1.1: fill-range@^7.0.1:
version "7.1.1" version "7.0.1"
resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-7.1.1.tgz#44265d3cac07e3ea7dc247516380643754a05292" resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-7.0.1.tgz#1919a6a7c75fe38b2c7c77e5198535da9acdda40"
integrity sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg== integrity sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==
dependencies: dependencies:
to-regex-range "^5.0.1" to-regex-range "^5.0.1"
@ -3935,9 +3885,9 @@ js-tokens@^8.0.2:
integrity sha512-UfJMcSJc+SEXEl9lH/VLHSZbThQyLpw1vLO1Lb+j4RWDvG3N2f7yj3PVQA3cmkTBNldJ9eFnM+xEXxHIXrYiJw== integrity sha512-UfJMcSJc+SEXEl9lH/VLHSZbThQyLpw1vLO1Lb+j4RWDvG3N2f7yj3PVQA3cmkTBNldJ9eFnM+xEXxHIXrYiJw==
js-yaml@^4.1.0: js-yaml@^4.1.0:
version "4.1.1" version "4.1.0"
resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-4.1.1.tgz#854c292467705b699476e1a2decc0c8a3458806b" resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-4.1.0.tgz#c1fb65f8f5017901cdd2c951864ba18458a10602"
integrity sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA== integrity sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==
dependencies: dependencies:
argparse "^2.0.1" argparse "^2.0.1"
@ -4222,10 +4172,10 @@ mz@^2.7.0:
object-assign "^4.0.1" object-assign "^4.0.1"
thenify-all "^1.0.0" thenify-all "^1.0.0"
nanoid@^3.3.11: nanoid@^3.3.7:
version "3.3.11" version "3.3.7"
resolved "https://registry.yarnpkg.com/nanoid/-/nanoid-3.3.11.tgz#4f4f112cefbe303202f2199838128936266d185b" resolved "https://registry.yarnpkg.com/nanoid/-/nanoid-3.3.7.tgz#d0c301a691bc8d54efa0a2226ccf3fe2fd656bd8"
integrity sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w== integrity sha512-eSRppjcPIatRIMC1U6UngP8XFcz8MQWGQdt1MTBQ7NaAmvXDfvNxbvWV3x2y6CdEUciCSsDHDQZbhYaB8QEo2g==
natural-compare@^1.4.0: natural-compare@^1.4.0:
version "1.4.0" version "1.4.0"
@ -4453,10 +4403,10 @@ pathval@^1.1.1:
resolved "https://registry.yarnpkg.com/pathval/-/pathval-1.1.1.tgz#8534e77a77ce7ac5a2512ea21e0fdb8fcf6c3d8d" resolved "https://registry.yarnpkg.com/pathval/-/pathval-1.1.1.tgz#8534e77a77ce7ac5a2512ea21e0fdb8fcf6c3d8d"
integrity sha512-Dp6zGqpTdETdR63lehJYPeIOqpiNBNtc7BpWSLrOje7UaIsE5aY92r/AunQA7rsXvet3lrJ3JnZX29UPTKXyKQ== integrity sha512-Dp6zGqpTdETdR63lehJYPeIOqpiNBNtc7BpWSLrOje7UaIsE5aY92r/AunQA7rsXvet3lrJ3JnZX29UPTKXyKQ==
picocolors@^1.0.0, picocolors@^1.1.1: picocolors@^1.0.0:
version "1.1.1" version "1.0.0"
resolved "https://registry.yarnpkg.com/picocolors/-/picocolors-1.1.1.tgz#3d321af3eab939b083c8f929a1d12cda81c26b6b" resolved "https://registry.yarnpkg.com/picocolors/-/picocolors-1.0.0.tgz#cb5bdc74ff3f51892236eaf79d68bc44564ab81c"
integrity sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA== integrity sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==
picomatch@^2.0.4, picomatch@^2.2.1, picomatch@^2.3.1: picomatch@^2.0.4, picomatch@^2.2.1, picomatch@^2.3.1:
version "2.3.1" version "2.3.1"
@ -4526,14 +4476,14 @@ postcss-value-parser@^4.0.0, postcss-value-parser@^4.2.0:
resolved "https://registry.yarnpkg.com/postcss-value-parser/-/postcss-value-parser-4.2.0.tgz#723c09920836ba6d3e5af019f92bc0971c02e514" resolved "https://registry.yarnpkg.com/postcss-value-parser/-/postcss-value-parser-4.2.0.tgz#723c09920836ba6d3e5af019f92bc0971c02e514"
integrity sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ== integrity sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==
postcss@^8.4.23, postcss@^8.4.31, postcss@^8.4.43: postcss@^8.4.23, postcss@^8.4.31, postcss@^8.4.35:
version "8.5.6" version "8.4.35"
resolved "https://registry.yarnpkg.com/postcss/-/postcss-8.5.6.tgz#2825006615a619b4f62a9e7426cc120b349a8f3c" resolved "https://registry.yarnpkg.com/postcss/-/postcss-8.4.35.tgz#60997775689ce09011edf083a549cea44aabe2f7"
integrity sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg== integrity sha512-u5U8qYpBCpN13BsiEB0CbR1Hhh4Gc0zLFuedrHJKMctHCHAGrMdG0PRM/KErzAL3CU6/eckEtmHNB3x6e3c0vA==
dependencies: dependencies:
nanoid "^3.3.11" nanoid "^3.3.7"
picocolors "^1.1.1" picocolors "^1.0.0"
source-map-js "^1.2.1" source-map-js "^1.0.2"
prelude-ls@^1.2.1: prelude-ls@^1.2.1:
version "1.2.1" version "1.2.1"
@ -4765,35 +4715,26 @@ rimraf@^3.0.2:
dependencies: dependencies:
glob "^7.1.3" glob "^7.1.3"
rollup@^4.20.0: rollup@^4.2.0:
version "4.52.5" version "4.12.0"
resolved "https://registry.yarnpkg.com/rollup/-/rollup-4.52.5.tgz#96982cdcaedcdd51b12359981f240f94304ec235" resolved "https://registry.yarnpkg.com/rollup/-/rollup-4.12.0.tgz#0b6d1e5f3d46bbcf244deec41a7421dc54cc45b5"
integrity sha512-3GuObel8h7Kqdjt0gxkEzaifHTqLVW56Y/bjN7PSQtkKr0w3V/QYSdt6QWYtd7A1xUtYQigtdUfgj1RvWVtorw== integrity sha512-wz66wn4t1OHIJw3+XU7mJJQV/2NAfw5OAk6G6Hoo3zcvz/XOfQ52Vgi+AN4Uxoxi0KBBwk2g8zPrTDA4btSB/Q==
dependencies: dependencies:
"@types/estree" "1.0.8" "@types/estree" "1.0.5"
optionalDependencies: optionalDependencies:
"@rollup/rollup-android-arm-eabi" "4.52.5" "@rollup/rollup-android-arm-eabi" "4.12.0"
"@rollup/rollup-android-arm64" "4.52.5" "@rollup/rollup-android-arm64" "4.12.0"
"@rollup/rollup-darwin-arm64" "4.52.5" "@rollup/rollup-darwin-arm64" "4.12.0"
"@rollup/rollup-darwin-x64" "4.52.5" "@rollup/rollup-darwin-x64" "4.12.0"
"@rollup/rollup-freebsd-arm64" "4.52.5" "@rollup/rollup-linux-arm-gnueabihf" "4.12.0"
"@rollup/rollup-freebsd-x64" "4.52.5" "@rollup/rollup-linux-arm64-gnu" "4.12.0"
"@rollup/rollup-linux-arm-gnueabihf" "4.52.5" "@rollup/rollup-linux-arm64-musl" "4.12.0"
"@rollup/rollup-linux-arm-musleabihf" "4.52.5" "@rollup/rollup-linux-riscv64-gnu" "4.12.0"
"@rollup/rollup-linux-arm64-gnu" "4.52.5" "@rollup/rollup-linux-x64-gnu" "4.12.0"
"@rollup/rollup-linux-arm64-musl" "4.52.5" "@rollup/rollup-linux-x64-musl" "4.12.0"
"@rollup/rollup-linux-loong64-gnu" "4.52.5" "@rollup/rollup-win32-arm64-msvc" "4.12.0"
"@rollup/rollup-linux-ppc64-gnu" "4.52.5" "@rollup/rollup-win32-ia32-msvc" "4.12.0"
"@rollup/rollup-linux-riscv64-gnu" "4.52.5" "@rollup/rollup-win32-x64-msvc" "4.12.0"
"@rollup/rollup-linux-riscv64-musl" "4.52.5"
"@rollup/rollup-linux-s390x-gnu" "4.52.5"
"@rollup/rollup-linux-x64-gnu" "4.52.5"
"@rollup/rollup-linux-x64-musl" "4.52.5"
"@rollup/rollup-openharmony-arm64" "4.52.5"
"@rollup/rollup-win32-arm64-msvc" "4.52.5"
"@rollup/rollup-win32-ia32-msvc" "4.52.5"
"@rollup/rollup-win32-x64-gnu" "4.52.5"
"@rollup/rollup-win32-x64-msvc" "4.52.5"
fsevents "~2.3.2" fsevents "~2.3.2"
rrweb-cssom@^0.6.0: rrweb-cssom@^0.6.0:
@ -4921,10 +4862,10 @@ snake-case@^3.0.4:
dot-case "^3.0.4" dot-case "^3.0.4"
tslib "^2.0.3" tslib "^2.0.3"
source-map-js@^1.2.1: source-map-js@^1.0.2:
version "1.2.1" version "1.0.2"
resolved "https://registry.yarnpkg.com/source-map-js/-/source-map-js-1.2.1.tgz#1ce5650fddd87abc099eda37dcff024c2667ae46" resolved "https://registry.yarnpkg.com/source-map-js/-/source-map-js-1.0.2.tgz#adbc361d9c62df380125e7f161f71c826f1e490c"
integrity sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA== integrity sha512-R0XvVJ9WusLiqTCEiGCmICCMplcCkIwwR11mOSD9CR5u+IXYdiseeEuXCVAjS54zqwkLcPNnmU4OeJ6tUrWhDw==
stackback@0.0.2: stackback@0.0.2:
version "0.0.2" version "0.0.2"
@ -5114,10 +5055,10 @@ tinybench@^2.5.1:
resolved "https://registry.yarnpkg.com/tinybench/-/tinybench-2.6.0.tgz#1423284ee22de07c91b3752c048d2764714b341b" resolved "https://registry.yarnpkg.com/tinybench/-/tinybench-2.6.0.tgz#1423284ee22de07c91b3752c048d2764714b341b"
integrity sha512-N8hW3PG/3aOoZAN5V/NSAEDz0ZixDSSt5b/a05iqtpgfLWMSVuCo7w0k2vVvEjdrIoeGqZzweX2WlyioNIHchA== integrity sha512-N8hW3PG/3aOoZAN5V/NSAEDz0ZixDSSt5b/a05iqtpgfLWMSVuCo7w0k2vVvEjdrIoeGqZzweX2WlyioNIHchA==
tinypool@^0.8.3: tinypool@^0.8.2:
version "0.8.4" version "0.8.2"
resolved "https://registry.yarnpkg.com/tinypool/-/tinypool-0.8.4.tgz#e217fe1270d941b39e98c625dcecebb1408c9aa8" resolved "https://registry.yarnpkg.com/tinypool/-/tinypool-0.8.2.tgz#84013b03dc69dacb322563a475d4c0a9be00f82a"
integrity sha512-i11VH5gS6IFeLY3gMBQ00/MmLncVP7JLXOw1vlgkytLmJK7QnEr7NXf0LBdxfmNPAeyetukOk0bOYrJrFGjYJQ== integrity sha512-SUszKYe5wgsxnNOVlBYO6IC+8VGWdVGZWAqUxp3UErNBtptZvWbwyUOyzNL59zigz2rCA92QiL3wvG+JDSdJdQ==
tinyspy@^2.2.0: tinyspy@^2.2.0:
version "2.2.1" version "2.2.1"
@ -5356,10 +5297,10 @@ util-deprecate@^1.0.2:
resolved "https://registry.yarnpkg.com/util-deprecate/-/util-deprecate-1.0.2.tgz#450d4dc9fa70de732762fbd2d4a28981419a0ccf" resolved "https://registry.yarnpkg.com/util-deprecate/-/util-deprecate-1.0.2.tgz#450d4dc9fa70de732762fbd2d4a28981419a0ccf"
integrity sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw== integrity sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==
vite-node@1.6.1: vite-node@1.3.1:
version "1.6.1" version "1.3.1"
resolved "https://registry.yarnpkg.com/vite-node/-/vite-node-1.6.1.tgz#fff3ef309296ea03ceaa6ca4bb660922f5416c57" resolved "https://registry.yarnpkg.com/vite-node/-/vite-node-1.3.1.tgz#a93f7372212f5d5df38e945046b945ac3f4855d2"
integrity sha512-YAXkfvGtuTzwWbDSACdJSg4A4DZiAqckWe90Zapc/sEX3XvHcw1NdurM/6od8J207tSDqNbSsgdCacBgvJKFuA== integrity sha512-azbRrqRxlWTJEVbzInZCTchx0X69M/XPTCz4H+TLvlTcR/xH/3hkRqhOakT41fMJCMzXTu4UvegkZiEoJAWvng==
dependencies: dependencies:
cac "^6.7.14" cac "^6.7.14"
debug "^4.3.4" debug "^4.3.4"
@ -5386,27 +5327,27 @@ vite-tsconfig-paths@^3.5.0:
recrawl-sync "^2.0.3" recrawl-sync "^2.0.3"
tsconfig-paths "^4.0.0" tsconfig-paths "^4.0.0"
vite@^5.0.0, vite@^5.4.21: vite@^5.0.0, vite@^5.1.7:
version "5.4.21" version "5.1.7"
resolved "https://registry.yarnpkg.com/vite/-/vite-5.4.21.tgz#84a4f7c5d860b071676d39ba513c0d598fdc7027" resolved "https://registry.yarnpkg.com/vite/-/vite-5.1.7.tgz#9f685a2c4c70707fef6d37341b0e809c366da619"
integrity sha512-o5a9xKjbtuhY6Bi5S3+HvbRERmouabWbyUcpXXUA1u+GNUKoROi9byOJ8M0nHbHYHkYICiMlqxkg1KkYmm25Sw== integrity sha512-sgnEEFTZYMui/sTlH1/XEnVNHMujOahPLGMxn1+5sIT45Xjng1Ec1K78jRP15dSmVgg5WBin9yO81j3o9OxofA==
dependencies: dependencies:
esbuild "^0.21.3" esbuild "^0.19.3"
postcss "^8.4.43" postcss "^8.4.35"
rollup "^4.20.0" rollup "^4.2.0"
optionalDependencies: optionalDependencies:
fsevents "~2.3.3" fsevents "~2.3.3"
vitest@^1.6.1: vitest@^1.3.1:
version "1.6.1" version "1.3.1"
resolved "https://registry.yarnpkg.com/vitest/-/vitest-1.6.1.tgz#b4a3097adf8f79ac18bc2e2e0024c534a7a78d2f" resolved "https://registry.yarnpkg.com/vitest/-/vitest-1.3.1.tgz#2d7e9861f030d88a4669392a4aecb40569d90937"
integrity sha512-Ljb1cnSJSivGN0LqXd/zmDbWEM0RNNg2t1QW/XUhYl/qPqyu7CsqeWtqQXHVaJsecLPuDoak2oJcZN2QoRIOag== integrity sha512-/1QJqXs8YbCrfv/GPQ05wAZf2eakUPLPa18vkJAKE7RXOKfVHqMZZ1WlTjiwl6Gcn65M5vpNUB6EFLnEdRdEXQ==
dependencies: dependencies:
"@vitest/expect" "1.6.1" "@vitest/expect" "1.3.1"
"@vitest/runner" "1.6.1" "@vitest/runner" "1.3.1"
"@vitest/snapshot" "1.6.1" "@vitest/snapshot" "1.3.1"
"@vitest/spy" "1.6.1" "@vitest/spy" "1.3.1"
"@vitest/utils" "1.6.1" "@vitest/utils" "1.3.1"
acorn-walk "^8.3.2" acorn-walk "^8.3.2"
chai "^4.3.10" chai "^4.3.10"
debug "^4.3.4" debug "^4.3.4"
@ -5418,9 +5359,9 @@ vitest@^1.6.1:
std-env "^3.5.0" std-env "^3.5.0"
strip-literal "^2.0.0" strip-literal "^2.0.0"
tinybench "^2.5.1" tinybench "^2.5.1"
tinypool "^0.8.3" tinypool "^0.8.2"
vite "^5.0.0" vite "^5.0.0"
vite-node "1.6.1" vite-node "1.3.1"
why-is-node-running "^2.2.2" why-is-node-running "^2.2.2"
w3c-xmlserializer@^5.0.0: w3c-xmlserializer@^5.0.0:

@ -418,13 +418,13 @@ func parseSynoinfo(path string) (string, error) {
// Extract the CPU in the middle (88f6282 in the above example). // Extract the CPU in the middle (88f6282 in the above example).
s := bufio.NewScanner(f) s := bufio.NewScanner(f)
for s.Scan() { for s.Scan() {
line := s.Text() l := s.Text()
if !strings.HasPrefix(line, "unique=") { if !strings.HasPrefix(l, "unique=") {
continue continue
} }
parts := strings.SplitN(line, "_", 3) parts := strings.SplitN(l, "_", 3)
if len(parts) != 3 { if len(parts) != 3 {
return "", fmt.Errorf(`malformed %q: found %q, expected format like 'unique="synology_$cpu_$model'`, path, line) return "", fmt.Errorf(`malformed %q: found %q, expected format like 'unique="synology_$cpu_$model'`, path, l)
} }
return parts[1], nil return parts[1], nil
} }

@ -1,311 +0,0 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// cigocacher is an opinionated-to-Tailscale client for gocached. It connects
// at a URL like "https://ci-gocached-azure-1.corp.ts.net:31364", but that is
// stored in a GitHub actions variable so that its hostname can be updated for
// all branches at the same time in sync with the actual infrastructure.
//
// It authenticates using GitHub OIDC tokens, and all HTTP errors are ignored
// so that its failure mode is just that builds get slower and fall back to
// disk-only cache.
package main
import (
"bytes"
"context"
jsonv1 "encoding/json"
"errors"
"flag"
"fmt"
"io"
"log"
"net"
"net/http"
"os"
"path/filepath"
"strings"
"sync/atomic"
"time"
"github.com/bradfitz/go-tool-cache/cacheproc"
"github.com/bradfitz/go-tool-cache/cachers"
)
func main() {
var (
auth = flag.Bool("auth", false, "auth with cigocached and exit, printing the access token as output")
token = flag.String("token", "", "the cigocached access token to use, as created using --auth")
cigocachedURL = flag.String("cigocached-url", "", "optional cigocached URL (scheme, host, and port). empty means to not use one.")
dir = flag.String("cache-dir", "", "cache directory; empty means automatic")
verbose = flag.Bool("verbose", false, "enable verbose logging")
)
flag.Parse()
if *auth {
if *cigocachedURL == "" {
log.Print("--cigocached-url is empty, skipping auth")
return
}
tk, err := fetchAccessToken(httpClient(), os.Getenv("ACTIONS_ID_TOKEN_REQUEST_URL"), os.Getenv("ACTIONS_ID_TOKEN_REQUEST_TOKEN"), *cigocachedURL)
if err != nil {
log.Printf("error fetching access token, skipping auth: %v", err)
return
}
fmt.Println(tk)
return
}
if *dir == "" {
d, err := os.UserCacheDir()
if err != nil {
log.Fatal(err)
}
*dir = filepath.Join(d, "go-cacher")
log.Printf("Defaulting to cache dir %v ...", *dir)
}
if err := os.MkdirAll(*dir, 0750); err != nil {
log.Fatal(err)
}
c := &cigocacher{
disk: &cachers.DiskCache{
Dir: *dir,
Verbose: *verbose,
},
verbose: *verbose,
}
if *cigocachedURL != "" {
if *verbose {
log.Printf("Using cigocached at %s", *cigocachedURL)
}
c.gocached = &gocachedClient{
baseURL: *cigocachedURL,
cl: httpClient(),
accessToken: *token,
verbose: *verbose,
}
}
var p *cacheproc.Process
p = &cacheproc.Process{
Close: func() error {
if c.verbose {
log.Printf("gocacheprog: closing; %d gets (%d hits, %d misses, %d errors); %d puts (%d errors)",
p.Gets.Load(), p.GetHits.Load(), p.GetMisses.Load(), p.GetErrors.Load(), p.Puts.Load(), p.PutErrors.Load())
}
return c.close()
},
Get: c.get,
Put: c.put,
}
if err := p.Run(); err != nil {
log.Fatal(err)
}
}
func httpClient() *http.Client {
return &http.Client{
Transport: &http.Transport{
DialContext: func(ctx context.Context, network, addr string) (net.Conn, error) {
host, port, err := net.SplitHostPort(addr)
if err == nil {
// This does not run in a tailnet. We serve corp.ts.net
// TLS certs, and override DNS resolution to lookup the
// private IP for the VM by its hostname.
if vm, ok := strings.CutSuffix(host, ".corp.ts.net"); ok {
addr = net.JoinHostPort(vm, port)
}
}
var d net.Dialer
return d.DialContext(ctx, network, addr)
},
},
}
}
type cigocacher struct {
disk *cachers.DiskCache
gocached *gocachedClient
verbose bool
getNanos atomic.Int64 // total nanoseconds spent in gets
putNanos atomic.Int64 // total nanoseconds spent in puts
getHTTP atomic.Int64 // HTTP get requests made
getHTTPBytes atomic.Int64 // HTTP get bytes transferred
getHTTPHits atomic.Int64 // HTTP get hits
getHTTPMisses atomic.Int64 // HTTP get misses
getHTTPErrors atomic.Int64 // HTTP get errors ignored on best-effort basis
getHTTPNanos atomic.Int64 // total nanoseconds spent in HTTP gets
putHTTP atomic.Int64 // HTTP put requests made
putHTTPBytes atomic.Int64 // HTTP put bytes transferred
putHTTPErrors atomic.Int64 // HTTP put errors ignored on best-effort basis
putHTTPNanos atomic.Int64 // total nanoseconds spent in HTTP puts
}
func (c *cigocacher) get(ctx context.Context, actionID string) (outputID, diskPath string, err error) {
t0 := time.Now()
defer func() {
c.getNanos.Add(time.Since(t0).Nanoseconds())
}()
if c.gocached == nil {
return c.disk.Get(ctx, actionID)
}
outputID, diskPath, err = c.disk.Get(ctx, actionID)
if err == nil && outputID != "" {
return outputID, diskPath, nil
}
c.getHTTP.Add(1)
t0HTTP := time.Now()
defer func() {
c.getHTTPNanos.Add(time.Since(t0HTTP).Nanoseconds())
}()
outputID, res, err := c.gocached.get(ctx, actionID)
if err != nil {
c.getHTTPErrors.Add(1)
return "", "", nil
}
if outputID == "" || res == nil {
c.getHTTPMisses.Add(1)
return "", "", nil
}
defer res.Body.Close()
diskPath, err = put(c.disk, actionID, outputID, res.ContentLength, res.Body)
if err != nil {
return "", "", fmt.Errorf("error filling disk cache from HTTP: %w", err)
}
c.getHTTPHits.Add(1)
c.getHTTPBytes.Add(res.ContentLength)
return outputID, diskPath, nil
}
func (c *cigocacher) put(ctx context.Context, actionID, outputID string, size int64, r io.Reader) (diskPath string, err error) {
t0 := time.Now()
defer func() {
c.putNanos.Add(time.Since(t0).Nanoseconds())
}()
if c.gocached == nil {
return put(c.disk, actionID, outputID, size, r)
}
c.putHTTP.Add(1)
var diskReader, httpReader io.Reader
tee := &bestEffortTeeReader{r: r}
if size == 0 {
// Special case the empty file so NewRequest sets "Content-Length: 0",
// as opposed to thinking we didn't set it and not being able to sniff its size
// from the type.
diskReader, httpReader = bytes.NewReader(nil), bytes.NewReader(nil)
} else {
pr, pw := io.Pipe()
defer pw.Close()
// The diskReader is in the driving seat. We will try to forward data
// to httpReader as well, but only best-effort.
diskReader = tee
tee.w = pw
httpReader = pr
}
httpErrCh := make(chan error)
go func() {
t0HTTP := time.Now()
defer func() {
c.putHTTPNanos.Add(time.Since(t0HTTP).Nanoseconds())
}()
httpErrCh <- c.gocached.put(ctx, actionID, outputID, size, httpReader)
}()
diskPath, err = put(c.disk, actionID, outputID, size, diskReader)
if err != nil {
return "", fmt.Errorf("error writing to disk cache: %w", errors.Join(err, tee.err))
}
select {
case err := <-httpErrCh:
if err != nil {
c.putHTTPErrors.Add(1)
} else {
c.putHTTPBytes.Add(size)
}
case <-ctx.Done():
}
return diskPath, nil
}
func (c *cigocacher) close() error {
if !c.verbose || c.gocached == nil {
return nil
}
log.Printf("cigocacher HTTP stats: %d gets (%.1fMiB, %.2fs, %d hits, %d misses, %d errors ignored); %d puts (%.1fMiB, %.2fs, %d errors ignored)",
c.getHTTP.Load(), float64(c.getHTTPBytes.Load())/float64(1<<20), float64(c.getHTTPNanos.Load())/float64(time.Second), c.getHTTPHits.Load(), c.getHTTPMisses.Load(), c.getHTTPErrors.Load(),
c.putHTTP.Load(), float64(c.putHTTPBytes.Load())/float64(1<<20), float64(c.putHTTPNanos.Load())/float64(time.Second), c.putHTTPErrors.Load())
stats, err := c.gocached.fetchStats()
if err != nil {
log.Printf("error fetching gocached stats: %v", err)
} else {
log.Printf("gocached session stats: %s", stats)
}
return nil
}
func fetchAccessToken(cl *http.Client, idTokenURL, idTokenRequestToken, gocachedURL string) (string, error) {
req, err := http.NewRequest("GET", idTokenURL+"&audience=gocached", nil)
if err != nil {
return "", err
}
req.Header.Set("Authorization", "Bearer "+idTokenRequestToken)
resp, err := cl.Do(req)
if err != nil {
return "", err
}
defer resp.Body.Close()
type idTokenResp struct {
Value string `json:"value"`
}
var idToken idTokenResp
if err := jsonv1.NewDecoder(resp.Body).Decode(&idToken); err != nil {
return "", err
}
req, _ = http.NewRequest("POST", gocachedURL+"/auth/exchange-token", strings.NewReader(`{"jwt":"`+idToken.Value+`"}`))
req.Header.Set("Content-Type", "application/json")
resp, err = cl.Do(req)
if err != nil {
return "", err
}
defer resp.Body.Close()
type accessTokenResp struct {
AccessToken string `json:"access_token"`
}
var accessToken accessTokenResp
if err := jsonv1.NewDecoder(resp.Body).Decode(&accessToken); err != nil {
return "", err
}
return accessToken.AccessToken, nil
}
type bestEffortTeeReader struct {
r io.Reader
w io.WriteCloser
err error
}
func (t *bestEffortTeeReader) Read(p []byte) (int, error) {
n, err := t.r.Read(p)
if n > 0 && t.w != nil {
if _, err := t.w.Write(p[:n]); err != nil {
t.err = errors.Join(err, t.w.Close())
t.w = nil
}
}
return n, err
}

@ -1,88 +0,0 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package main
import (
"encoding/json"
"errors"
"fmt"
"io"
"log"
"os"
"path/filepath"
"time"
"github.com/bradfitz/go-tool-cache/cachers"
)
// indexEntry is the metadata that DiskCache stores on disk for an ActionID.
type indexEntry struct {
Version int `json:"v"`
OutputID string `json:"o"`
Size int64 `json:"n"`
TimeNanos int64 `json:"t"`
}
func validHex(x string) bool {
if len(x) < 4 || len(x) > 100 {
return false
}
for _, b := range x {
if b >= '0' && b <= '9' || b >= 'a' && b <= 'f' {
continue
}
return false
}
return true
}
// put is like dc.Put but refactored to support safe concurrent writes on Windows.
// TODO(tomhjp): upstream these changes to go-tool-cache once they look stable.
func put(dc *cachers.DiskCache, actionID, outputID string, size int64, body io.Reader) (diskPath string, _ error) {
if len(actionID) < 4 || len(outputID) < 4 {
return "", fmt.Errorf("actionID and outputID must be at least 4 characters long")
}
if !validHex(actionID) {
log.Printf("diskcache: got invalid actionID %q", actionID)
return "", errors.New("actionID must be hex")
}
if !validHex(outputID) {
log.Printf("diskcache: got invalid outputID %q", outputID)
return "", errors.New("outputID must be hex")
}
actionFile := dc.ActionFilename(actionID)
outputFile := dc.OutputFilename(outputID)
actionDir := filepath.Dir(actionFile)
outputDir := filepath.Dir(outputFile)
if err := os.MkdirAll(actionDir, 0755); err != nil {
return "", fmt.Errorf("failed to create action directory: %w", err)
}
if err := os.MkdirAll(outputDir, 0755); err != nil {
return "", fmt.Errorf("failed to create output directory: %w", err)
}
wrote, err := writeOutputFile(outputFile, body, size, outputID)
if err != nil {
return "", err
}
if wrote != size {
return "", fmt.Errorf("wrote %d bytes, expected %d", wrote, size)
}
ij, err := json.Marshal(indexEntry{
Version: 1,
OutputID: outputID,
Size: size,
TimeNanos: time.Now().UnixNano(),
})
if err != nil {
return "", err
}
if err := writeActionFile(dc.ActionFilename(actionID), ij); err != nil {
return "", fmt.Errorf("atomic write failed: %w", err)
}
return outputFile, nil
}

@ -1,44 +0,0 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build !windows
package main
import (
"bytes"
"io"
"os"
"path/filepath"
)
func writeActionFile(dest string, b []byte) error {
_, err := writeAtomic(dest, bytes.NewReader(b))
return err
}
func writeOutputFile(dest string, r io.Reader, _ int64, _ string) (int64, error) {
return writeAtomic(dest, r)
}
func writeAtomic(dest string, r io.Reader) (int64, error) {
tf, err := os.CreateTemp(filepath.Dir(dest), filepath.Base(dest)+".*")
if err != nil {
return 0, err
}
size, err := io.Copy(tf, r)
if err != nil {
tf.Close()
os.Remove(tf.Name())
return 0, err
}
if err := tf.Close(); err != nil {
os.Remove(tf.Name())
return 0, err
}
if err := os.Rename(tf.Name(), dest); err != nil {
os.Remove(tf.Name())
return 0, err
}
return size, nil
}

@ -1,102 +0,0 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package main
import (
"crypto/sha256"
"errors"
"fmt"
"io"
"os"
)
// The functions in this file are based on go's own cache in
// cmd/go/internal/cache/cache.go, particularly putIndexEntry and copyFile.
// writeActionFile writes the indexEntry metadata for an ActionID to disk. It
// may be called for the same actionID concurrently from multiple processes,
// and the outputID for a specific actionID may change from time to time due
// to non-deterministic builds. It makes a best-effort to delete the file if
// anything goes wrong.
func writeActionFile(dest string, b []byte) (retErr error) {
f, err := os.OpenFile(dest, os.O_WRONLY|os.O_CREATE, 0o666)
if err != nil {
return err
}
defer func() {
cerr := f.Close()
if retErr != nil || cerr != nil {
retErr = errors.Join(retErr, cerr, os.Remove(dest))
}
}()
_, err = f.Write(b)
if err != nil {
return err
}
// Truncate the file only *after* writing it.
// (This should be a no-op, but truncate just in case of previous corruption.)
//
// This differs from os.WriteFile, which truncates to 0 *before* writing
// via os.O_TRUNC. Truncating only after writing ensures that a second write
// of the same content to the same file is idempotent, and does not - even
// temporarily! - undo the effect of the first write.
return f.Truncate(int64(len(b)))
}
// writeOutputFile writes content to be cached to disk. The outputID is the
// sha256 hash of the content, and each file should only be written ~once,
// assuming no sha256 hash collisions. It may be written multiple times if
// concurrent processes are both populating the same output. The file is opened
// with FILE_SHARE_READ|FILE_SHARE_WRITE, which means both processes can write
// the same contents concurrently without conflict.
//
// It makes a best effort to clean up if anything goes wrong, but the file may
// be left in an inconsistent state in the event of disk-related errors such as
// another process taking file locks, or power loss etc.
func writeOutputFile(dest string, r io.Reader, size int64, outputID string) (_ int64, retErr error) {
info, err := os.Stat(dest)
if err == nil && info.Size() == size {
// Already exists, check the hash.
if f, err := os.Open(dest); err == nil {
h := sha256.New()
io.Copy(h, f)
f.Close()
if fmt.Sprintf("%x", h.Sum(nil)) == outputID {
// Still drain the reader to ensure associated resources are released.
return io.Copy(io.Discard, r)
}
}
}
// Didn't successfully find the pre-existing file, write it.
mode := os.O_WRONLY | os.O_CREATE
if err == nil && info.Size() > size {
mode |= os.O_TRUNC // Should never happen, but self-heal.
}
f, err := os.OpenFile(dest, mode, 0644)
if err != nil {
return 0, fmt.Errorf("failed to open output file %q: %w", dest, err)
}
defer func() {
cerr := f.Close()
if retErr != nil || cerr != nil {
retErr = errors.Join(retErr, cerr, os.Remove(dest))
}
}()
// Copy file to f, but also into h to double-check hash.
h := sha256.New()
w := io.MultiWriter(f, h)
n, err := io.Copy(w, r)
if err != nil {
return 0, err
}
if fmt.Sprintf("%x", h.Sum(nil)) != outputID {
return 0, errors.New("file content changed underfoot")
}
return n, nil
}

@ -1,115 +0,0 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package main
import (
"context"
"fmt"
"io"
"log"
"net/http"
)
type gocachedClient struct {
baseURL string // base URL of the cacher server, like "http://localhost:31364".
cl *http.Client // http.Client to use.
accessToken string // Bearer token to use in the Authorization header.
verbose bool
}
// drainAndClose reads and throws away a small bounded amount of data. This is a
// best-effort attempt to allow connection reuse; Go's HTTP/1 Transport won't
// reuse a TCP connection unless you fully consume HTTP responses.
func drainAndClose(body io.ReadCloser) {
io.CopyN(io.Discard, body, 4<<10)
body.Close()
}
func tryReadErrorMessage(res *http.Response) []byte {
msg, _ := io.ReadAll(io.LimitReader(res.Body, 4<<10))
return msg
}
func (c *gocachedClient) get(ctx context.Context, actionID string) (outputID string, resp *http.Response, err error) {
// TODO(tomhjp): make sure we timeout if cigocached disappears, but for some
// reason, this seemed to tank network performance.
// // Set a generous upper limit on the time we'll wait for a response. We'll
// // shorten this deadline later once we know the content length.
// ctx, cancel := context.WithTimeout(ctx, time.Minute)
// defer cancel()
req, _ := http.NewRequestWithContext(ctx, "GET", c.baseURL+"/action/"+actionID, nil)
req.Header.Set("Want-Object", "1") // opt in to single roundtrip protocol
if c.accessToken != "" {
req.Header.Set("Authorization", "Bearer "+c.accessToken)
}
res, err := c.cl.Do(req)
if err != nil {
return "", nil, err
}
defer func() {
if resp == nil {
drainAndClose(res.Body)
}
}()
if res.StatusCode == http.StatusNotFound {
return "", nil, nil
}
if res.StatusCode != http.StatusOK {
msg := tryReadErrorMessage(res)
if c.verbose {
log.Printf("error GET /action/%s: %v, %s", actionID, res.Status, msg)
}
return "", nil, fmt.Errorf("unexpected GET /action/%s status %v", actionID, res.Status)
}
outputID = res.Header.Get("Go-Output-Id")
if outputID == "" {
return "", nil, fmt.Errorf("missing Go-Output-Id header in response")
}
if res.ContentLength == -1 {
return "", nil, fmt.Errorf("no Content-Length from server")
}
return outputID, res, nil
}
func (c *gocachedClient) put(ctx context.Context, actionID, outputID string, size int64, body io.Reader) error {
req, _ := http.NewRequestWithContext(ctx, "PUT", c.baseURL+"/"+actionID+"/"+outputID, body)
req.ContentLength = size
if c.accessToken != "" {
req.Header.Set("Authorization", "Bearer "+c.accessToken)
}
res, err := c.cl.Do(req)
if err != nil {
if c.verbose {
log.Printf("error PUT /%s/%s: %v", actionID, outputID, err)
}
return err
}
defer res.Body.Close()
if res.StatusCode != http.StatusNoContent {
msg := tryReadErrorMessage(res)
if c.verbose {
log.Printf("error PUT /%s/%s: %v, %s", actionID, outputID, res.Status, msg)
}
return fmt.Errorf("unexpected PUT /%s/%s status %v", actionID, outputID, res.Status)
}
return nil
}
func (c *gocachedClient) fetchStats() (string, error) {
req, _ := http.NewRequest("GET", c.baseURL+"/session/stats", nil)
req.Header.Set("Authorization", "Bearer "+c.accessToken)
resp, err := c.cl.Do(req)
if err != nil {
return "", err
}
defer resp.Body.Close()
b, err := io.ReadAll(resp.Body)
if err != nil {
return "", err
}
return string(b), nil
}

@ -192,34 +192,45 @@ func gen(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named) {
writef("\t\tdst.%s[k] = append([]%s{}, src.%s[k]...)", fname, n, fname) writef("\t\tdst.%s[k] = append([]%s{}, src.%s[k]...)", fname, n, fname)
writef("\t}") writef("\t}")
writef("}") writef("}")
} else if codegen.IsViewType(elem) || !codegen.ContainsPointers(elem) { } else if codegen.ContainsPointers(elem) {
// If the map values are view types (which are
// immutable and don't need cloning) or don't
// themselves contain pointers, we can just
// clone the map itself.
it.Import("", "maps")
writef("\tdst.%s = maps.Clone(src.%s)", fname, fname)
} else {
// Otherwise we need to clone each element of
// the map using our recursive helper.
writef("if dst.%s != nil {", fname) writef("if dst.%s != nil {", fname)
writef("\tdst.%s = map[%s]%s{}", fname, it.QualifiedName(ft.Key()), it.QualifiedName(elem)) writef("\tdst.%s = map[%s]%s{}", fname, it.QualifiedName(ft.Key()), it.QualifiedName(elem))
writef("\tfor k, v := range src.%s {", fname) writef("\tfor k, v := range src.%s {", fname)
// Use a recursive helper here; this handles switch elem := elem.Underlying().(type) {
// arbitrarily nested maps in addition to case *types.Pointer:
// simpler types. writef("\t\tif v == nil { dst.%s[k] = nil } else {", fname)
writeMapValueClone(mapValueCloneParams{ if base := elem.Elem().Underlying(); codegen.ContainsPointers(base) {
Buf: buf, if _, isIface := base.(*types.Interface); isIface {
It: it, it.Import("", "tailscale.com/types/ptr")
Elem: elem, writef("\t\t\tdst.%s[k] = ptr.To((*v).Clone())", fname)
SrcExpr: "v", } else {
DstExpr: fmt.Sprintf("dst.%s[k]", fname), writef("\t\t\tdst.%s[k] = v.Clone()", fname)
BaseIndent: "\t", }
Depth: 1, } else {
}) it.Import("", "tailscale.com/types/ptr")
writef("\t\t\tdst.%s[k] = ptr.To(*v)", fname)
}
writef("}")
case *types.Interface:
if cloneResultType := methodResultType(elem, "Clone"); cloneResultType != nil {
if _, isPtr := cloneResultType.(*types.Pointer); isPtr {
writef("\t\tdst.%s[k] = *(v.Clone())", fname)
} else {
writef("\t\tdst.%s[k] = v.Clone()", fname)
}
} else {
writef(`panic("%s (%v) does not have a Clone method")`, fname, elem)
}
default:
writef("\t\tdst.%s[k] = *(v.Clone())", fname)
}
writef("\t}") writef("\t}")
writef("}") writef("}")
} else {
it.Import("", "maps")
writef("\tdst.%s = maps.Clone(src.%s)", fname, fname)
} }
case *types.Interface: case *types.Interface:
// If ft is an interface with a "Clone() ft" method, it can be used to clone the field. // If ft is an interface with a "Clone() ft" method, it can be used to clone the field.
@ -260,99 +271,3 @@ func methodResultType(typ types.Type, method string) types.Type {
} }
return sig.Results().At(0).Type() return sig.Results().At(0).Type()
} }
type mapValueCloneParams struct {
// Buf is the buffer to write generated code to
Buf *bytes.Buffer
// It is the import tracker for managing imports.
It *codegen.ImportTracker
// Elem is the type of the map value to clone
Elem types.Type
// SrcExpr is the expression for the source value (e.g., "v", "v2", "v3")
SrcExpr string
// DstExpr is the expression for the destination (e.g., "dst.Field[k]", "dst.Field[k][k2]")
DstExpr string
// BaseIndent is the "base" indentation string for the generated code
// (i.e. 1 or more tabs). Additional indentation will be added based on
// the Depth parameter.
BaseIndent string
// Depth is the current nesting depth (1 for first level, 2 for second, etc.)
Depth int
}
// writeMapValueClone generates code to clone a map value recursively.
// It handles arbitrary nesting of maps, pointers, and interfaces.
func writeMapValueClone(params mapValueCloneParams) {
indent := params.BaseIndent + strings.Repeat("\t", params.Depth)
writef := func(format string, args ...any) {
fmt.Fprintf(params.Buf, indent+format+"\n", args...)
}
switch elem := params.Elem.Underlying().(type) {
case *types.Pointer:
writef("if %s == nil { %s = nil } else {", params.SrcExpr, params.DstExpr)
if base := elem.Elem().Underlying(); codegen.ContainsPointers(base) {
if _, isIface := base.(*types.Interface); isIface {
params.It.Import("", "tailscale.com/types/ptr")
writef("\t%s = ptr.To((*%s).Clone())", params.DstExpr, params.SrcExpr)
} else {
writef("\t%s = %s.Clone()", params.DstExpr, params.SrcExpr)
}
} else {
params.It.Import("", "tailscale.com/types/ptr")
writef("\t%s = ptr.To(*%s)", params.DstExpr, params.SrcExpr)
}
writef("}")
case *types.Map:
// Recursively handle nested maps
innerElem := elem.Elem()
if codegen.IsViewType(innerElem) || !codegen.ContainsPointers(innerElem) {
// Inner map values don't need deep cloning
params.It.Import("", "maps")
writef("%s = maps.Clone(%s)", params.DstExpr, params.SrcExpr)
} else {
// Inner map values need cloning
keyType := params.It.QualifiedName(elem.Key())
valueType := params.It.QualifiedName(innerElem)
// Generate unique variable names for nested loops based on depth
keyVar := fmt.Sprintf("k%d", params.Depth+1)
valVar := fmt.Sprintf("v%d", params.Depth+1)
writef("if %s == nil {", params.SrcExpr)
writef("\t%s = nil", params.DstExpr)
writef("\tcontinue")
writef("}")
writef("%s = map[%s]%s{}", params.DstExpr, keyType, valueType)
writef("for %s, %s := range %s {", keyVar, valVar, params.SrcExpr)
// Recursively generate cloning code for the nested map value
nestedDstExpr := fmt.Sprintf("%s[%s]", params.DstExpr, keyVar)
writeMapValueClone(mapValueCloneParams{
Buf: params.Buf,
It: params.It,
Elem: innerElem,
SrcExpr: valVar,
DstExpr: nestedDstExpr,
BaseIndent: params.BaseIndent,
Depth: params.Depth + 1,
})
writef("}")
}
case *types.Interface:
if cloneResultType := methodResultType(elem, "Clone"); cloneResultType != nil {
if _, isPtr := cloneResultType.(*types.Pointer); isPtr {
writef("%s = *(%s.Clone())", params.DstExpr, params.SrcExpr)
} else {
writef("%s = %s.Clone()", params.DstExpr, params.SrcExpr)
}
} else {
writef(`panic("map value (%%v) does not have a Clone method")`, elem)
}
default:
writef("%s = *(%s.Clone())", params.DstExpr, params.SrcExpr)
}
}

@ -108,109 +108,3 @@ func TestInterfaceContainer(t *testing.T) {
}) })
} }
} }
func TestMapWithPointers(t *testing.T) {
num1, num2 := 42, 100
orig := &clonerex.MapWithPointers{
Nested: map[string]*int{
"foo": &num1,
"bar": &num2,
},
WithCloneMethod: map[string]*clonerex.SliceContainer{
"container1": {Slice: []*int{&num1, &num2}},
"container2": {Slice: []*int{&num1}},
},
CloneInterface: map[string]clonerex.Cloneable{
"impl1": &clonerex.CloneableImpl{Value: 123},
"impl2": &clonerex.CloneableImpl{Value: 456},
},
}
cloned := orig.Clone()
if !reflect.DeepEqual(orig, cloned) {
t.Errorf("Clone() = %v, want %v", cloned, orig)
}
// Mutate cloned.Nested pointer values
*cloned.Nested["foo"] = 999
if *orig.Nested["foo"] == 999 {
t.Errorf("Clone() aliased memory in Nested: original was modified")
}
// Mutate cloned.WithCloneMethod slice values
*cloned.WithCloneMethod["container1"].Slice[0] = 888
if *orig.WithCloneMethod["container1"].Slice[0] == 888 {
t.Errorf("Clone() aliased memory in WithCloneMethod: original was modified")
}
// Mutate cloned.CloneInterface values
if impl, ok := cloned.CloneInterface["impl1"].(*clonerex.CloneableImpl); ok {
impl.Value = 777
if origImpl, ok := orig.CloneInterface["impl1"].(*clonerex.CloneableImpl); ok {
if origImpl.Value == 777 {
t.Errorf("Clone() aliased memory in CloneInterface: original was modified")
}
}
}
}
func TestDeeplyNestedMap(t *testing.T) {
num := 123
orig := &clonerex.DeeplyNestedMap{
ThreeLevels: map[string]map[string]map[string]int{
"a": {
"b": {"c": 1, "d": 2},
"e": {"f": 3},
},
"g": {
"h": {"i": 4},
},
},
FourLevels: map[string]map[string]map[string]map[string]*clonerex.SliceContainer{
"l1a": {
"l2a": {
"l3a": {
"l4a": {Slice: []*int{&num}},
"l4b": {Slice: []*int{&num, &num}},
},
},
},
},
}
cloned := orig.Clone()
if !reflect.DeepEqual(orig, cloned) {
t.Errorf("Clone() = %v, want %v", cloned, orig)
}
// Mutate the clone's ThreeLevels map
cloned.ThreeLevels["a"]["b"]["c"] = 777
if orig.ThreeLevels["a"]["b"]["c"] == 777 {
t.Errorf("Clone() aliased memory in ThreeLevels: original was modified")
}
// Mutate the clone's FourLevels map at the deepest pointer level
*cloned.FourLevels["l1a"]["l2a"]["l3a"]["l4a"].Slice[0] = 666
if *orig.FourLevels["l1a"]["l2a"]["l3a"]["l4a"].Slice[0] == 666 {
t.Errorf("Clone() aliased memory in FourLevels: original was modified")
}
// Add a new top-level key to the clone's FourLevels map
newNum := 999
cloned.FourLevels["l1b"] = map[string]map[string]map[string]*clonerex.SliceContainer{
"l2b": {
"l3b": {
"l4c": {Slice: []*int{&newNum}},
},
},
}
if _, exists := orig.FourLevels["l1b"]; exists {
t.Errorf("Clone() aliased FourLevels map: new top-level key appeared in original")
}
// Add a new nested key to the clone's FourLevels map
cloned.FourLevels["l1a"]["l2a"]["l3a"]["l4c"] = &clonerex.SliceContainer{Slice: []*int{&newNum}}
if _, exists := orig.FourLevels["l1a"]["l2a"]["l3a"]["l4c"]; exists {
t.Errorf("Clone() aliased FourLevels map: new nested key appeared in original")
}
}

@ -1,7 +1,7 @@
// Copyright (c) Tailscale Inc & AUTHORS // Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause // SPDX-License-Identifier: BSD-3-Clause
//go:generate go run tailscale.com/cmd/cloner -clonefunc=true -type SliceContainer,InterfaceContainer,MapWithPointers,DeeplyNestedMap //go:generate go run tailscale.com/cmd/cloner -clonefunc=true -type SliceContainer,InterfaceContainer
// Package clonerex is an example package for the cloner tool. // Package clonerex is an example package for the cloner tool.
package clonerex package clonerex
@ -32,15 +32,3 @@ func (c *CloneableImpl) Clone() Cloneable {
type InterfaceContainer struct { type InterfaceContainer struct {
Interface Cloneable Interface Cloneable
} }
type MapWithPointers struct {
Nested map[string]*int
WithCloneMethod map[string]*SliceContainer
CloneInterface map[string]Cloneable
}
// DeeplyNestedMap tests arbitrary depth of map nesting (3+ levels)
type DeeplyNestedMap struct {
ThreeLevels map[string]map[string]map[string]int
FourLevels map[string]map[string]map[string]map[string]*SliceContainer
}

@ -6,8 +6,6 @@
package clonerex package clonerex
import ( import (
"maps"
"tailscale.com/types/ptr" "tailscale.com/types/ptr"
) )
@ -56,114 +54,9 @@ var _InterfaceContainerCloneNeedsRegeneration = InterfaceContainer(struct {
Interface Cloneable Interface Cloneable
}{}) }{})
// Clone makes a deep copy of MapWithPointers.
// The result aliases no memory with the original.
func (src *MapWithPointers) Clone() *MapWithPointers {
if src == nil {
return nil
}
dst := new(MapWithPointers)
*dst = *src
if dst.Nested != nil {
dst.Nested = map[string]*int{}
for k, v := range src.Nested {
if v == nil {
dst.Nested[k] = nil
} else {
dst.Nested[k] = ptr.To(*v)
}
}
}
if dst.WithCloneMethod != nil {
dst.WithCloneMethod = map[string]*SliceContainer{}
for k, v := range src.WithCloneMethod {
if v == nil {
dst.WithCloneMethod[k] = nil
} else {
dst.WithCloneMethod[k] = v.Clone()
}
}
}
if dst.CloneInterface != nil {
dst.CloneInterface = map[string]Cloneable{}
for k, v := range src.CloneInterface {
dst.CloneInterface[k] = v.Clone()
}
}
return dst
}
// A compilation failure here means this code must be regenerated, with the command at the top of this file.
var _MapWithPointersCloneNeedsRegeneration = MapWithPointers(struct {
Nested map[string]*int
WithCloneMethod map[string]*SliceContainer
CloneInterface map[string]Cloneable
}{})
// Clone makes a deep copy of DeeplyNestedMap.
// The result aliases no memory with the original.
func (src *DeeplyNestedMap) Clone() *DeeplyNestedMap {
if src == nil {
return nil
}
dst := new(DeeplyNestedMap)
*dst = *src
if dst.ThreeLevels != nil {
dst.ThreeLevels = map[string]map[string]map[string]int{}
for k, v := range src.ThreeLevels {
if v == nil {
dst.ThreeLevels[k] = nil
continue
}
dst.ThreeLevels[k] = map[string]map[string]int{}
for k2, v2 := range v {
dst.ThreeLevels[k][k2] = maps.Clone(v2)
}
}
}
if dst.FourLevels != nil {
dst.FourLevels = map[string]map[string]map[string]map[string]*SliceContainer{}
for k, v := range src.FourLevels {
if v == nil {
dst.FourLevels[k] = nil
continue
}
dst.FourLevels[k] = map[string]map[string]map[string]*SliceContainer{}
for k2, v2 := range v {
if v2 == nil {
dst.FourLevels[k][k2] = nil
continue
}
dst.FourLevels[k][k2] = map[string]map[string]*SliceContainer{}
for k3, v3 := range v2 {
if v3 == nil {
dst.FourLevels[k][k2][k3] = nil
continue
}
dst.FourLevels[k][k2][k3] = map[string]*SliceContainer{}
for k4, v4 := range v3 {
if v4 == nil {
dst.FourLevels[k][k2][k3][k4] = nil
} else {
dst.FourLevels[k][k2][k3][k4] = v4.Clone()
}
}
}
}
}
}
return dst
}
// A compilation failure here means this code must be regenerated, with the command at the top of this file.
var _DeeplyNestedMapCloneNeedsRegeneration = DeeplyNestedMap(struct {
ThreeLevels map[string]map[string]map[string]int
FourLevels map[string]map[string]map[string]map[string]*SliceContainer
}{})
// Clone duplicates src into dst and reports whether it succeeded. // Clone duplicates src into dst and reports whether it succeeded.
// To succeed, <src, dst> must be of types <*T, *T> or <*T, **T>, // To succeed, <src, dst> must be of types <*T, *T> or <*T, **T>,
// where T is one of SliceContainer,InterfaceContainer,MapWithPointers,DeeplyNestedMap. // where T is one of SliceContainer,InterfaceContainer.
func Clone(dst, src any) bool { func Clone(dst, src any) bool {
switch src := src.(type) { switch src := src.(type) {
case *SliceContainer: case *SliceContainer:
@ -184,24 +77,6 @@ func Clone(dst, src any) bool {
*dst = src.Clone() *dst = src.Clone()
return true return true
} }
case *MapWithPointers:
switch dst := dst.(type) {
case *MapWithPointers:
*dst = *src.Clone()
return true
case **MapWithPointers:
*dst = src.Clone()
return true
}
case *DeeplyNestedMap:
switch dst := dst.(type) {
case *DeeplyNestedMap:
*dst = *src.Clone()
return true
case **DeeplyNestedMap:
*dst = src.Clone()
return true
}
} }
return false return false
} }

@ -1287,8 +1287,8 @@ type localAPI struct {
notify *ipn.Notify notify *ipn.Notify
} }
func (lc *localAPI) Start() error { func (l *localAPI) Start() error {
path := filepath.Join(lc.FSRoot, "tmp/tailscaled.sock.fake") path := filepath.Join(l.FSRoot, "tmp/tailscaled.sock.fake")
if err := os.MkdirAll(filepath.Dir(path), 0700); err != nil { if err := os.MkdirAll(filepath.Dir(path), 0700); err != nil {
return err return err
} }
@ -1298,30 +1298,30 @@ func (lc *localAPI) Start() error {
return err return err
} }
lc.srv = &http.Server{ l.srv = &http.Server{
Handler: lc, Handler: l,
} }
lc.Path = path l.Path = path
lc.cond = sync.NewCond(&lc.Mutex) l.cond = sync.NewCond(&l.Mutex)
go lc.srv.Serve(ln) go l.srv.Serve(ln)
return nil return nil
} }
func (lc *localAPI) Close() { func (l *localAPI) Close() {
lc.srv.Close() l.srv.Close()
} }
func (lc *localAPI) Notify(n *ipn.Notify) { func (l *localAPI) Notify(n *ipn.Notify) {
if n == nil { if n == nil {
return return
} }
lc.Lock() l.Lock()
defer lc.Unlock() defer l.Unlock()
lc.notify = n l.notify = n
lc.cond.Broadcast() l.cond.Broadcast()
} }
func (lc *localAPI) ServeHTTP(w http.ResponseWriter, r *http.Request) { func (l *localAPI) ServeHTTP(w http.ResponseWriter, r *http.Request) {
switch r.URL.Path { switch r.URL.Path {
case "/localapi/v0/serve-config": case "/localapi/v0/serve-config":
if r.Method != "POST" { if r.Method != "POST" {
@ -1348,11 +1348,11 @@ func (lc *localAPI) ServeHTTP(w http.ResponseWriter, r *http.Request) {
f.Flush() f.Flush()
} }
enc := json.NewEncoder(w) enc := json.NewEncoder(w)
lc.Lock() l.Lock()
defer lc.Unlock() defer l.Unlock()
for { for {
if lc.notify != nil { if l.notify != nil {
if err := enc.Encode(lc.notify); err != nil { if err := enc.Encode(l.notify); err != nil {
// Usually broken pipe as the test client disconnects. // Usually broken pipe as the test client disconnects.
return return
} }
@ -1360,7 +1360,7 @@ func (lc *localAPI) ServeHTTP(w http.ResponseWriter, r *http.Request) {
f.Flush() f.Flush()
} }
} }
lc.cond.Wait() l.cond.Wait()
} }
} }

@ -2,16 +2,13 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa
filippo.io/edwards25519 from github.com/hdevalence/ed25519consensus filippo.io/edwards25519 from github.com/hdevalence/ed25519consensus
filippo.io/edwards25519/field from filippo.io/edwards25519 filippo.io/edwards25519/field from filippo.io/edwards25519
github.com/axiomhq/hyperloglog from tailscale.com/derp/derpserver
github.com/beorn7/perks/quantile from github.com/prometheus/client_golang/prometheus github.com/beorn7/perks/quantile from github.com/prometheus/client_golang/prometheus
💣 github.com/cespare/xxhash/v2 from github.com/prometheus/client_golang/prometheus 💣 github.com/cespare/xxhash/v2 from github.com/prometheus/client_golang/prometheus
github.com/coder/websocket from tailscale.com/cmd/derper+ github.com/coder/websocket from tailscale.com/cmd/derper+
github.com/coder/websocket/internal/errd from github.com/coder/websocket github.com/coder/websocket/internal/errd from github.com/coder/websocket
github.com/coder/websocket/internal/util from github.com/coder/websocket github.com/coder/websocket/internal/util from github.com/coder/websocket
github.com/coder/websocket/internal/xsync from github.com/coder/websocket github.com/coder/websocket/internal/xsync from github.com/coder/websocket
github.com/creachadair/msync/throttle from github.com/tailscale/setec/client/setec
W 💣 github.com/dblohm7/wingoes from tailscale.com/util/winutil W 💣 github.com/dblohm7/wingoes from tailscale.com/util/winutil
github.com/dgryski/go-metro from github.com/axiomhq/hyperloglog
github.com/fxamacker/cbor/v2 from tailscale.com/tka github.com/fxamacker/cbor/v2 from tailscale.com/tka
github.com/go-json-experiment/json from tailscale.com/types/opt+ github.com/go-json-experiment/json from tailscale.com/types/opt+
github.com/go-json-experiment/json/internal from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/internal from github.com/go-json-experiment/json+
@ -33,9 +30,9 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa
github.com/prometheus/client_model/go from github.com/prometheus/client_golang/prometheus+ github.com/prometheus/client_model/go from github.com/prometheus/client_golang/prometheus+
github.com/prometheus/common/expfmt from github.com/prometheus/client_golang/prometheus+ github.com/prometheus/common/expfmt from github.com/prometheus/client_golang/prometheus+
github.com/prometheus/common/model from github.com/prometheus/client_golang/prometheus+ github.com/prometheus/common/model from github.com/prometheus/client_golang/prometheus+
L github.com/prometheus/procfs from github.com/prometheus/client_golang/prometheus LD github.com/prometheus/procfs from github.com/prometheus/client_golang/prometheus
L github.com/prometheus/procfs/internal/fs from github.com/prometheus/procfs LD github.com/prometheus/procfs/internal/fs from github.com/prometheus/procfs
L github.com/prometheus/procfs/internal/util from github.com/prometheus/procfs LD github.com/prometheus/procfs/internal/util from github.com/prometheus/procfs
W 💣 github.com/tailscale/go-winio from tailscale.com/safesocket W 💣 github.com/tailscale/go-winio from tailscale.com/safesocket
W 💣 github.com/tailscale/go-winio/internal/fs from github.com/tailscale/go-winio W 💣 github.com/tailscale/go-winio/internal/fs from github.com/tailscale/go-winio
W 💣 github.com/tailscale/go-winio/internal/socket from github.com/tailscale/go-winio W 💣 github.com/tailscale/go-winio/internal/socket from github.com/tailscale/go-winio
@ -75,7 +72,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa
google.golang.org/protobuf/reflect/protoregistry from google.golang.org/protobuf/encoding/prototext+ google.golang.org/protobuf/reflect/protoregistry from google.golang.org/protobuf/encoding/prototext+
google.golang.org/protobuf/runtime/protoiface from google.golang.org/protobuf/internal/impl+ google.golang.org/protobuf/runtime/protoiface from google.golang.org/protobuf/internal/impl+
google.golang.org/protobuf/runtime/protoimpl from github.com/prometheus/client_model/go+ google.golang.org/protobuf/runtime/protoimpl from github.com/prometheus/client_model/go+
💣 google.golang.org/protobuf/types/known/timestamppb from github.com/prometheus/client_golang/prometheus+ google.golang.org/protobuf/types/known/timestamppb from github.com/prometheus/client_golang/prometheus+
tailscale.com from tailscale.com/version tailscale.com from tailscale.com/version
💣 tailscale.com/atomicfile from tailscale.com/cmd/derper+ 💣 tailscale.com/atomicfile from tailscale.com/cmd/derper+
tailscale.com/client/local from tailscale.com/derp/derpserver tailscale.com/client/local from tailscale.com/derp/derpserver
@ -142,7 +139,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa
tailscale.com/types/structs from tailscale.com/ipn+ tailscale.com/types/structs from tailscale.com/ipn+
tailscale.com/types/tkatype from tailscale.com/client/local+ tailscale.com/types/tkatype from tailscale.com/client/local+
tailscale.com/types/views from tailscale.com/ipn+ tailscale.com/types/views from tailscale.com/ipn+
tailscale.com/util/cibuild from tailscale.com/health+ tailscale.com/util/cibuild from tailscale.com/health
tailscale.com/util/clientmetric from tailscale.com/net/netmon tailscale.com/util/clientmetric from tailscale.com/net/netmon
tailscale.com/util/cloudenv from tailscale.com/hostinfo+ tailscale.com/util/cloudenv from tailscale.com/hostinfo+
tailscale.com/util/ctxkey from tailscale.com/tsweb+ tailscale.com/util/ctxkey from tailscale.com/tsweb+
@ -191,6 +188,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa
golang.org/x/net/proxy from tailscale.com/net/netns golang.org/x/net/proxy from tailscale.com/net/netns
D golang.org/x/net/route from tailscale.com/net/netmon+ D golang.org/x/net/route from tailscale.com/net/netmon+
golang.org/x/sync/errgroup from github.com/mdlayher/socket+ golang.org/x/sync/errgroup from github.com/mdlayher/socket+
golang.org/x/sync/singleflight from github.com/tailscale/setec/client/setec
golang.org/x/sys/cpu from golang.org/x/crypto/argon2+ golang.org/x/sys/cpu from golang.org/x/crypto/argon2+
LD golang.org/x/sys/unix from github.com/jsimonetti/rtnetlink/internal/unix+ LD golang.org/x/sys/unix from github.com/jsimonetti/rtnetlink/internal/unix+
W golang.org/x/sys/windows from github.com/dblohm7/wingoes+ W golang.org/x/sys/windows from github.com/dblohm7/wingoes+

@ -481,32 +481,32 @@ func newRateLimitedListener(ln net.Listener, limit rate.Limit, burst int) *rateL
return &rateLimitedListener{Listener: ln, lim: rate.NewLimiter(limit, burst)} return &rateLimitedListener{Listener: ln, lim: rate.NewLimiter(limit, burst)}
} }
func (ln *rateLimitedListener) ExpVar() expvar.Var { func (l *rateLimitedListener) ExpVar() expvar.Var {
m := new(metrics.Set) m := new(metrics.Set)
m.Set("counter_accepted_connections", &ln.numAccepts) m.Set("counter_accepted_connections", &l.numAccepts)
m.Set("counter_rejected_connections", &ln.numRejects) m.Set("counter_rejected_connections", &l.numRejects)
return m return m
} }
var errLimitedConn = errors.New("cannot accept connection; rate limited") var errLimitedConn = errors.New("cannot accept connection; rate limited")
func (ln *rateLimitedListener) Accept() (net.Conn, error) { func (l *rateLimitedListener) Accept() (net.Conn, error) {
// Even under a rate limited situation, we accept the connection immediately // Even under a rate limited situation, we accept the connection immediately
// and close it, rather than being slow at accepting new connections. // and close it, rather than being slow at accepting new connections.
// This provides two benefits: 1) it signals to the client that something // This provides two benefits: 1) it signals to the client that something
// is going on on the server, and 2) it prevents new connections from // is going on on the server, and 2) it prevents new connections from
// piling up and occupying resources in the OS kernel. // piling up and occupying resources in the OS kernel.
// The client will retry as needing (with backoffs in place). // The client will retry as needing (with backoffs in place).
cn, err := ln.Listener.Accept() cn, err := l.Listener.Accept()
if err != nil { if err != nil {
return nil, err return nil, err
} }
if !ln.lim.Allow() { if !l.lim.Allow() {
ln.numRejects.Add(1) l.numRejects.Add(1)
cn.Close() cn.Close()
return nil, errLimitedConn return nil, errLimitedConn
} }
ln.numAccepts.Add(1) l.numAccepts.Add(1)
return cn, nil return cn, nil
} }

@ -1,175 +0,0 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package main
import (
"bytes"
"go/ast"
"go/format"
"go/parser"
"go/token"
"go/types"
"path"
"slices"
"strconv"
"strings"
"tailscale.com/util/must"
)
// mustFormatFile formats a Go source file and adjust "json" imports.
// It panics if there are any parsing errors.
//
// - "encoding/json" is imported under the name "jsonv1" or "jsonv1std"
// - "encoding/json/v2" is rewritten to import "github.com/go-json-experiment/json" instead
// - "encoding/json/jsontext" is rewritten to import "github.com/go-json-experiment/json/jsontext" instead
// - "github.com/go-json-experiment/json" is imported under the name "jsonv2"
// - "github.com/go-json-experiment/json/v1" is imported under the name "jsonv1"
//
// If no changes to the file is made, it returns input.
func mustFormatFile(in []byte) (out []byte) {
fset := token.NewFileSet()
f := must.Get(parser.ParseFile(fset, "", in, parser.ParseComments))
// Check for the existence of "json" imports.
jsonImports := make(map[string][]*ast.ImportSpec)
for _, imp := range f.Imports {
switch pkgPath := must.Get(strconv.Unquote(imp.Path.Value)); pkgPath {
case
"encoding/json",
"encoding/json/v2",
"encoding/json/jsontext",
"github.com/go-json-experiment/json",
"github.com/go-json-experiment/json/v1",
"github.com/go-json-experiment/json/jsontext":
jsonImports[pkgPath] = append(jsonImports[pkgPath], imp)
}
}
if len(jsonImports) == 0 {
return in
}
// Best-effort local type-check of the file
// to resolve local declarations to detect shadowed variables.
typeInfo := &types.Info{Uses: make(map[*ast.Ident]types.Object)}
(&types.Config{
Error: func(err error) {},
}).Check("", fset, []*ast.File{f}, typeInfo)
// Rewrite imports to instead use "github.com/go-json-experiment/json".
// This ensures that code continues to build even if
// goexperiment.jsonv2 is *not* specified.
// As of https://github.com/go-json-experiment/json/pull/186,
// imports to "github.com/go-json-experiment/json" are identical
// to the standard library if built with goexperiment.jsonv2.
for fromPath, toPath := range map[string]string{
"encoding/json/v2": "github.com/go-json-experiment/json",
"encoding/json/jsontext": "github.com/go-json-experiment/json/jsontext",
} {
for _, imp := range jsonImports[fromPath] {
imp.Path.Value = strconv.Quote(toPath)
jsonImports[toPath] = append(jsonImports[toPath], imp)
}
delete(jsonImports, fromPath)
}
// While in a transitory state, where both v1 and v2 json imports
// may exist in our codebase, always explicitly import with
// either jsonv1 or jsonv2 in the package name to avoid ambiguities
// when looking at a particular Marshal or Unmarshal call site.
renames := make(map[string]string) // mapping of old names to new names
deletes := make(map[*ast.ImportSpec]bool) // set of imports to delete
for pkgPath, imps := range jsonImports {
var newName string
switch pkgPath {
case "encoding/json":
newName = "jsonv1"
// If "github.com/go-json-experiment/json/v1" is also imported,
// then use jsonv1std for "encoding/json" to avoid a conflict.
if len(jsonImports["github.com/go-json-experiment/json/v1"]) > 0 {
newName += "std"
}
case "github.com/go-json-experiment/json":
newName = "jsonv2"
case "github.com/go-json-experiment/json/v1":
newName = "jsonv1"
}
// Rename the import if different than expected.
if oldName := importName(imps[0]); oldName != newName && newName != "" {
renames[oldName] = newName
pos := imps[0].Pos() // preserve original positioning
imps[0].Name = ast.NewIdent(newName)
imps[0].Name.NamePos = pos
}
// For all redundant imports, use the first imported name.
for _, imp := range imps[1:] {
renames[importName(imp)] = importName(imps[0])
deletes[imp] = true
}
}
if len(deletes) > 0 {
f.Imports = slices.DeleteFunc(f.Imports, func(imp *ast.ImportSpec) bool {
return deletes[imp]
})
for _, decl := range f.Decls {
if genDecl, ok := decl.(*ast.GenDecl); ok && genDecl.Tok == token.IMPORT {
genDecl.Specs = slices.DeleteFunc(genDecl.Specs, func(spec ast.Spec) bool {
return deletes[spec.(*ast.ImportSpec)]
})
}
}
}
if len(renames) > 0 {
ast.Walk(astVisitor(func(n ast.Node) bool {
if sel, ok := n.(*ast.SelectorExpr); ok {
if id, ok := sel.X.(*ast.Ident); ok {
// Just because the selector looks like "json.Marshal"
// does not mean that it is referencing the "json" package.
// There could be a local "json" declaration that shadows
// the package import. Check partial type information
// to see if there was a local declaration.
if obj, ok := typeInfo.Uses[id]; ok {
if _, ok := obj.(*types.PkgName); !ok {
return true
}
}
if newName, ok := renames[id.String()]; ok {
id.Name = newName
}
}
}
return true
}), f)
}
bb := new(bytes.Buffer)
must.Do(format.Node(bb, fset, f))
return must.Get(format.Source(bb.Bytes()))
}
// importName is the local package name used for an import.
// If no explicit local name is used, then it uses string parsing
// to derive the package name from the path, relying on the convention
// that the package name is the base name of the package path.
func importName(imp *ast.ImportSpec) string {
if imp.Name != nil {
return imp.Name.String()
}
pkgPath, _ := strconv.Unquote(imp.Path.Value)
pkgPath = strings.TrimRight(pkgPath, "/v0123456789") // exclude version directories
return path.Base(pkgPath)
}
// astVisitor is a function that implements [ast.Visitor].
type astVisitor func(ast.Node) bool
func (f astVisitor) Visit(node ast.Node) ast.Visitor {
if !f(node) {
return nil
}
return f
}

@ -1,162 +0,0 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package main
import (
"go/format"
"testing"
"tailscale.com/util/must"
"tailscale.com/util/safediff"
)
func TestFormatFile(t *testing.T) {
tests := []struct{ in, want string }{{
in: `package foobar
import (
"encoding/json"
jsonv2exp "github.com/go-json-experiment/json"
)
func main() {
json.Marshal()
jsonv2exp.Marshal()
{
var json T // deliberately shadow "json" package name
json.Marshal() // should not be re-written
}
}
`,
want: `package foobar
import (
jsonv1 "encoding/json"
jsonv2 "github.com/go-json-experiment/json"
)
func main() {
jsonv1.Marshal()
jsonv2.Marshal()
{
var json T // deliberately shadow "json" package name
json.Marshal() // should not be re-written
}
}
`,
}, {
in: `package foobar
import (
"github.com/go-json-experiment/json"
jsonv2exp "github.com/go-json-experiment/json"
)
func main() {
json.Marshal()
jsonv2exp.Marshal()
}
`,
want: `package foobar
import (
jsonv2 "github.com/go-json-experiment/json"
)
func main() {
jsonv2.Marshal()
jsonv2.Marshal()
}
`,
}, {
in: `package foobar
import "github.com/go-json-experiment/json/v1"
func main() {
json.Marshal()
}
`,
want: `package foobar
import jsonv1 "github.com/go-json-experiment/json/v1"
func main() {
jsonv1.Marshal()
}
`,
}, {
in: `package foobar
import (
"encoding/json"
jsonv1in2 "github.com/go-json-experiment/json/v1"
)
func main() {
json.Marshal()
jsonv1in2.Marshal()
}
`,
want: `package foobar
import (
jsonv1std "encoding/json"
jsonv1 "github.com/go-json-experiment/json/v1"
)
func main() {
jsonv1std.Marshal()
jsonv1.Marshal()
}
`,
}, {
in: `package foobar
import (
"encoding/json"
jsonv1in2 "github.com/go-json-experiment/json/v1"
)
func main() {
json.Marshal()
jsonv1in2.Marshal()
}
`,
want: `package foobar
import (
jsonv1std "encoding/json"
jsonv1 "github.com/go-json-experiment/json/v1"
)
func main() {
jsonv1std.Marshal()
jsonv1.Marshal()
}
`,
}, {
in: `package foobar
import (
"encoding/json"
j2 "encoding/json/v2"
"encoding/json/jsontext"
)
func main() {
json.Marshal()
j2.Marshal()
jsontext.NewEncoder
}
`,
want: `package foobar
import (
jsonv1 "encoding/json"
jsonv2 "github.com/go-json-experiment/json"
"github.com/go-json-experiment/json/jsontext"
)
func main() {
jsonv1.Marshal()
jsonv2.Marshal()
jsontext.NewEncoder
}
`,
}}
for _, tt := range tests {
got := string(must.Get(format.Source([]byte(tt.in))))
got = string(mustFormatFile([]byte(got)))
want := string(must.Get(format.Source([]byte(tt.want))))
if got != want {
diff, _ := safediff.Lines(got, want, -1)
t.Errorf("mismatch (-got +want)\n%s", diff)
t.Error(got)
t.Error(want)
}
}
}

@ -1,124 +0,0 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// The jsonimports tool formats all Go source files in the repository
// to enforce that "json" imports are consistent.
//
// With Go 1.25, the "encoding/json/v2" and "encoding/json/jsontext"
// packages are now available under goexperiment.jsonv2.
// This leads to possible confusion over the following:
//
// - "encoding/json"
// - "encoding/json/v2"
// - "encoding/json/jsontext"
// - "github.com/go-json-experiment/json/v1"
// - "github.com/go-json-experiment/json"
// - "github.com/go-json-experiment/json/jsontext"
//
// In order to enforce consistent usage, we apply the following rules:
//
// - Until the Go standard library formally accepts "encoding/json/v2"
// and "encoding/json/jsontext" into the standard library
// (i.e., they are no longer considered experimental),
// we forbid any code from directly importing those packages.
// Go code should instead import "github.com/go-json-experiment/json"
// and "github.com/go-json-experiment/json/jsontext".
// The latter packages contain aliases to the standard library
// if built on Go 1.25 with the goexperiment.jsonv2 tag specified.
//
// - Imports of "encoding/json" or "github.com/go-json-experiment/json/v1"
// must be explicitly imported under the package name "jsonv1".
// If both packages need to be imported, then the former should
// be imported under the package name "jsonv1std".
//
// - Imports of "github.com/go-json-experiment/json"
// must be explicitly imported under the package name "jsonv2".
//
// The latter two rules exist to provide clarity when reading code.
// Without them, it is unclear whether "json.Marshal" refers to v1 or v2.
// With them, however, it is clear that "jsonv1.Marshal" is calling v1 and
// that "jsonv2.Marshal" is calling v2.
//
// TODO(@joetsai): At this present moment, there is no guidance given on
// whether to use v1 or v2 for newly written Go source code.
// I will write a document in the near future providing more guidance.
// Feel free to continue using v1 "encoding/json" as you are accustomed to.
package main
import (
"bytes"
"flag"
"fmt"
"os"
"os/exec"
"runtime"
"strings"
"sync"
"tailscale.com/syncs"
"tailscale.com/util/must"
"tailscale.com/util/safediff"
)
func main() {
update := flag.Bool("update", false, "update all Go source files")
flag.Parse()
// Change working directory to Git repository root.
repoRoot := strings.TrimSuffix(string(must.Get(exec.Command(
"git", "rev-parse", "--show-toplevel",
).Output())), "\n")
must.Do(os.Chdir(repoRoot))
// Iterate over all indexed files in the Git repository.
var printMu sync.Mutex
var group sync.WaitGroup
sema := syncs.NewSemaphore(runtime.NumCPU())
var numDiffs int
files := string(must.Get(exec.Command("git", "ls-files").Output()))
for file := range strings.Lines(files) {
sema.Acquire()
group.Go(func() {
defer sema.Release()
// Ignore non-Go source files.
file = strings.TrimSuffix(file, "\n")
if !strings.HasSuffix(file, ".go") {
return
}
// Format all "json" imports in the Go source file.
srcIn := must.Get(os.ReadFile(file))
srcOut := mustFormatFile(srcIn)
// Print differences with each formatted file.
if !bytes.Equal(srcIn, srcOut) {
numDiffs++
printMu.Lock()
fmt.Println(file)
lines, _ := safediff.Lines(string(srcIn), string(srcOut), -1)
for line := range strings.Lines(lines) {
fmt.Print("\t", line)
}
fmt.Println()
printMu.Unlock()
// If -update is specified, write out the changes.
if *update {
mode := must.Get(os.Stat(file)).Mode()
must.Do(os.WriteFile(file, srcOut, mode))
}
}
})
}
group.Wait()
// Report whether any differences were detected.
if numDiffs > 0 && !*update {
fmt.Printf(`%d files with "json" imports that need formatting`+"\n", numDiffs)
fmt.Println("Please run:")
fmt.Println("\t./tool/go run tailscale.com/cmd/jsonimports -update")
os.Exit(1)
}
}

@ -157,6 +157,12 @@ func (r *KubeAPIServerTSServiceReconciler) maybeProvision(ctx context.Context, s
// 1. Check there isn't a Tailscale Service with the same hostname // 1. Check there isn't a Tailscale Service with the same hostname
// already created and not owned by this ProxyGroup. // already created and not owned by this ProxyGroup.
existingTSSvc, err := r.tsClient.GetVIPService(ctx, serviceName) existingTSSvc, err := r.tsClient.GetVIPService(ctx, serviceName)
if isErrorFeatureFlagNotEnabled(err) {
logger.Warn(msgFeatureFlagNotEnabled)
r.recorder.Event(pg, corev1.EventTypeWarning, warningTailscaleServiceFeatureFlagNotEnabled, msgFeatureFlagNotEnabled)
tsoperator.SetProxyGroupCondition(pg, tsapi.KubeAPIServerProxyValid, metav1.ConditionFalse, reasonKubeAPIServerProxyInvalid, msgFeatureFlagNotEnabled, pg.Generation, r.clock, logger)
return nil
}
if err != nil && !isErrorTailscaleServiceNotFound(err) { if err != nil && !isErrorTailscaleServiceNotFound(err) {
return fmt.Errorf("error getting Tailscale Service %q: %w", serviceName, err) return fmt.Errorf("error getting Tailscale Service %q: %w", serviceName, err)
} }

@ -182,7 +182,9 @@ func TestAPIServerProxyReconciler(t *testing.T) {
expectEqual(t, fc, certSecretRoleBinding(pg, ns, defaultDomain)) expectEqual(t, fc, certSecretRoleBinding(pg, ns, defaultDomain))
// Simulate certs being issued; should observe AdvertiseServices config change. // Simulate certs being issued; should observe AdvertiseServices config change.
populateTLSSecret(t, fc, pgName, defaultDomain) if err := populateTLSSecret(t.Context(), fc, pgName, defaultDomain); err != nil {
t.Fatalf("populating TLS Secret: %v", err)
}
expectReconciled(t, r, "", pgName) expectReconciled(t, r, "", pgName)
expectedCfg.AdvertiseServices = []string{"svc:" + pgName} expectedCfg.AdvertiseServices = []string{"svc:" + pgName}
@ -245,7 +247,9 @@ func TestAPIServerProxyReconciler(t *testing.T) {
expectMissing[rbacv1.RoleBinding](t, fc, ns, defaultDomain) expectMissing[rbacv1.RoleBinding](t, fc, ns, defaultDomain)
// Check we get the new hostname in the status once ready. // Check we get the new hostname in the status once ready.
populateTLSSecret(t, fc, pgName, updatedDomain) if err := populateTLSSecret(t.Context(), fc, pgName, updatedDomain); err != nil {
t.Fatalf("populating TLS Secret: %v", err)
}
mustUpdate(t, fc, "operator-ns", "test-pg-0", func(s *corev1.Secret) { mustUpdate(t, fc, "operator-ns", "test-pg-0", func(s *corev1.Secret) {
s.Data["profile-foo"] = []byte(`{"AdvertiseServices":["svc:test-pg"],"Config":{"NodeID":"node-foo"}}`) s.Data["profile-foo"] = []byte(`{"AdvertiseServices":["svc:test-pg"],"Config":{"NodeID":"node-foo"}}`)
}) })

@ -12,7 +12,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/
github.com/coder/websocket/internal/errd from github.com/coder/websocket github.com/coder/websocket/internal/errd from github.com/coder/websocket
github.com/coder/websocket/internal/util from github.com/coder/websocket github.com/coder/websocket/internal/util from github.com/coder/websocket
github.com/coder/websocket/internal/xsync from github.com/coder/websocket github.com/coder/websocket/internal/xsync from github.com/coder/websocket
github.com/creachadair/msync/trigger from tailscale.com/logtail
💣 github.com/davecgh/go-spew/spew from k8s.io/apimachinery/pkg/util/dump 💣 github.com/davecgh/go-spew/spew from k8s.io/apimachinery/pkg/util/dump
W 💣 github.com/dblohm7/wingoes from tailscale.com/net/tshttpproxy+ W 💣 github.com/dblohm7/wingoes from tailscale.com/net/tshttpproxy+
W 💣 github.com/dblohm7/wingoes/com from tailscale.com/util/osdiag+ W 💣 github.com/dblohm7/wingoes/com from tailscale.com/util/osdiag+
@ -71,9 +70,8 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/
github.com/klauspost/compress/fse from github.com/klauspost/compress/huff0 github.com/klauspost/compress/fse from github.com/klauspost/compress/huff0
github.com/klauspost/compress/huff0 from github.com/klauspost/compress/zstd github.com/klauspost/compress/huff0 from github.com/klauspost/compress/zstd
github.com/klauspost/compress/internal/cpuinfo from github.com/klauspost/compress/huff0+ github.com/klauspost/compress/internal/cpuinfo from github.com/klauspost/compress/huff0+
💣 github.com/klauspost/compress/internal/le from github.com/klauspost/compress/huff0+
github.com/klauspost/compress/internal/snapref from github.com/klauspost/compress/zstd github.com/klauspost/compress/internal/snapref from github.com/klauspost/compress/zstd
github.com/klauspost/compress/zstd from tailscale.com/util/zstdframe github.com/klauspost/compress/zstd from tailscale.com/util/zstdframe+
github.com/klauspost/compress/zstd/internal/xxhash from github.com/klauspost/compress/zstd github.com/klauspost/compress/zstd/internal/xxhash from github.com/klauspost/compress/zstd
github.com/mailru/easyjson/buffer from github.com/mailru/easyjson/jwriter github.com/mailru/easyjson/buffer from github.com/mailru/easyjson/jwriter
💣 github.com/mailru/easyjson/jlexer from github.com/go-openapi/swag 💣 github.com/mailru/easyjson/jlexer from github.com/go-openapi/swag
@ -86,7 +84,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/
💣 github.com/modern-go/reflect2 from github.com/json-iterator/go 💣 github.com/modern-go/reflect2 from github.com/json-iterator/go
github.com/munnerz/goautoneg from k8s.io/kube-openapi/pkg/handler3+ github.com/munnerz/goautoneg from k8s.io/kube-openapi/pkg/handler3+
github.com/opencontainers/go-digest from github.com/distribution/reference github.com/opencontainers/go-digest from github.com/distribution/reference
github.com/pires/go-proxyproto from tailscale.com/ipn/ipnlocal
github.com/pkg/errors from github.com/evanphx/json-patch/v5+ github.com/pkg/errors from github.com/evanphx/json-patch/v5+
D github.com/prometheus-community/pro-bing from tailscale.com/wgengine/netstack D github.com/prometheus-community/pro-bing from tailscale.com/wgengine/netstack
github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil from github.com/prometheus/client_golang/prometheus/promhttp github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil from github.com/prometheus/client_golang/prometheus/promhttp
@ -95,7 +92,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/
github.com/prometheus/client_golang/prometheus/collectors from sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics+ github.com/prometheus/client_golang/prometheus/collectors from sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics+
github.com/prometheus/client_golang/prometheus/internal from github.com/prometheus/client_golang/prometheus+ github.com/prometheus/client_golang/prometheus/internal from github.com/prometheus/client_golang/prometheus+
github.com/prometheus/client_golang/prometheus/promhttp from sigs.k8s.io/controller-runtime/pkg/metrics/server+ github.com/prometheus/client_golang/prometheus/promhttp from sigs.k8s.io/controller-runtime/pkg/metrics/server+
github.com/prometheus/client_golang/prometheus/promhttp/internal from github.com/prometheus/client_golang/prometheus/promhttp
github.com/prometheus/client_model/go from github.com/prometheus/client_golang/prometheus+ github.com/prometheus/client_model/go from github.com/prometheus/client_golang/prometheus+
github.com/prometheus/common/expfmt from github.com/prometheus/client_golang/prometheus+ github.com/prometheus/common/expfmt from github.com/prometheus/client_golang/prometheus+
github.com/prometheus/common/model from github.com/prometheus/client_golang/prometheus+ github.com/prometheus/common/model from github.com/prometheus/client_golang/prometheus+
@ -182,10 +178,10 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/
google.golang.org/protobuf/reflect/protoregistry from github.com/golang/protobuf/proto+ google.golang.org/protobuf/reflect/protoregistry from github.com/golang/protobuf/proto+
google.golang.org/protobuf/runtime/protoiface from github.com/golang/protobuf/proto+ google.golang.org/protobuf/runtime/protoiface from github.com/golang/protobuf/proto+
google.golang.org/protobuf/runtime/protoimpl from github.com/golang/protobuf/proto+ google.golang.org/protobuf/runtime/protoimpl from github.com/golang/protobuf/proto+
💣 google.golang.org/protobuf/types/descriptorpb from github.com/google/gnostic-models/openapiv3+ google.golang.org/protobuf/types/descriptorpb from github.com/google/gnostic-models/openapiv3+
💣 google.golang.org/protobuf/types/gofeaturespb from google.golang.org/protobuf/reflect/protodesc google.golang.org/protobuf/types/gofeaturespb from google.golang.org/protobuf/reflect/protodesc
💣 google.golang.org/protobuf/types/known/anypb from github.com/google/gnostic-models/compiler+ google.golang.org/protobuf/types/known/anypb from github.com/google/gnostic-models/compiler+
💣 google.golang.org/protobuf/types/known/timestamppb from github.com/prometheus/client_golang/prometheus+ google.golang.org/protobuf/types/known/timestamppb from github.com/prometheus/client_golang/prometheus+
gopkg.in/evanphx/json-patch.v4 from k8s.io/client-go/testing gopkg.in/evanphx/json-patch.v4 from k8s.io/client-go/testing
gopkg.in/inf.v0 from k8s.io/apimachinery/pkg/api/resource gopkg.in/inf.v0 from k8s.io/apimachinery/pkg/api/resource
gopkg.in/yaml.v3 from github.com/go-openapi/swag+ gopkg.in/yaml.v3 from github.com/go-openapi/swag+
@ -727,11 +723,9 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/
tailscale.com/feature/buildfeatures from tailscale.com/wgengine/magicsock+ tailscale.com/feature/buildfeatures from tailscale.com/wgengine/magicsock+
tailscale.com/feature/c2n from tailscale.com/tsnet tailscale.com/feature/c2n from tailscale.com/tsnet
tailscale.com/feature/condlite/expvar from tailscale.com/wgengine/magicsock tailscale.com/feature/condlite/expvar from tailscale.com/wgengine/magicsock
tailscale.com/feature/condregister/identityfederation from tailscale.com/tsnet
tailscale.com/feature/condregister/oauthkey from tailscale.com/tsnet tailscale.com/feature/condregister/oauthkey from tailscale.com/tsnet
tailscale.com/feature/condregister/portmapper from tailscale.com/tsnet tailscale.com/feature/condregister/portmapper from tailscale.com/tsnet
tailscale.com/feature/condregister/useproxy from tailscale.com/tsnet tailscale.com/feature/condregister/useproxy from tailscale.com/tsnet
tailscale.com/feature/identityfederation from tailscale.com/feature/condregister/identityfederation
tailscale.com/feature/oauthkey from tailscale.com/feature/condregister/oauthkey tailscale.com/feature/oauthkey from tailscale.com/feature/condregister/oauthkey
tailscale.com/feature/portmapper from tailscale.com/feature/condregister/portmapper tailscale.com/feature/portmapper from tailscale.com/feature/condregister/portmapper
tailscale.com/feature/syspolicy from tailscale.com/logpolicy tailscale.com/feature/syspolicy from tailscale.com/logpolicy
@ -830,7 +824,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/
tailscale.com/tsweb from tailscale.com/util/eventbus tailscale.com/tsweb from tailscale.com/util/eventbus
tailscale.com/tsweb/varz from tailscale.com/util/usermetric+ tailscale.com/tsweb/varz from tailscale.com/util/usermetric+
tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal+ tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal+
tailscale.com/types/bools from tailscale.com/tsnet+ tailscale.com/types/bools from tailscale.com/tsnet
tailscale.com/types/dnstype from tailscale.com/ipn/ipnlocal+ tailscale.com/types/dnstype from tailscale.com/ipn/ipnlocal+
tailscale.com/types/empty from tailscale.com/ipn+ tailscale.com/types/empty from tailscale.com/ipn+
tailscale.com/types/ipproto from tailscale.com/net/flowtrack+ tailscale.com/types/ipproto from tailscale.com/net/flowtrack+
@ -853,7 +847,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/
tailscale.com/types/views from tailscale.com/appc+ tailscale.com/types/views from tailscale.com/appc+
tailscale.com/util/backoff from tailscale.com/cmd/k8s-operator+ tailscale.com/util/backoff from tailscale.com/cmd/k8s-operator+
tailscale.com/util/checkchange from tailscale.com/ipn/ipnlocal+ tailscale.com/util/checkchange from tailscale.com/ipn/ipnlocal+
tailscale.com/util/cibuild from tailscale.com/health+ tailscale.com/util/cibuild from tailscale.com/health
tailscale.com/util/clientmetric from tailscale.com/cmd/k8s-operator+ tailscale.com/util/clientmetric from tailscale.com/cmd/k8s-operator+
tailscale.com/util/cloudenv from tailscale.com/hostinfo+ tailscale.com/util/cloudenv from tailscale.com/hostinfo+
LW tailscale.com/util/cmpver from tailscale.com/net/dns+ LW tailscale.com/util/cmpver from tailscale.com/net/dns+
@ -1001,7 +995,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/
crypto/ecdsa from crypto/tls+ crypto/ecdsa from crypto/tls+
crypto/ed25519 from crypto/tls+ crypto/ed25519 from crypto/tls+
crypto/elliptic from crypto/ecdsa+ crypto/elliptic from crypto/ecdsa+
crypto/fips140 from crypto/tls/internal/fips140tls+ crypto/fips140 from crypto/tls/internal/fips140tls
crypto/hkdf from crypto/internal/hpke+ crypto/hkdf from crypto/internal/hpke+
crypto/hmac from crypto/tls+ crypto/hmac from crypto/tls+
crypto/internal/boring from crypto/aes+ crypto/internal/boring from crypto/aes+

@ -26,4 +26,4 @@ maintainers:
version: 0.1.0 version: 0.1.0
# appVersion will be set to Tailscale repo tag at release time. # appVersion will be set to Tailscale repo tag at release time.
appVersion: "stable" appVersion: "unstable"

@ -3,8 +3,8 @@
# If old setting used, enable both old (operator) and new (ProxyGroup) workflows. # If old setting used, enable both old (operator) and new (ProxyGroup) workflows.
# If new setting used, enable only new workflow. # If new setting used, enable only new workflow.
{{ if or (eq (toString .Values.apiServerProxyConfig.mode) "true") {{ if or (eq .Values.apiServerProxyConfig.mode "true")
(eq (toString .Values.apiServerProxyConfig.allowImpersonation) "true") }} (eq .Values.apiServerProxyConfig.allowImpersonation "true") }}
apiVersion: v1 apiVersion: v1
kind: ServiceAccount kind: ServiceAccount
metadata: metadata:
@ -25,7 +25,7 @@ kind: ClusterRoleBinding
metadata: metadata:
name: tailscale-auth-proxy name: tailscale-auth-proxy
subjects: subjects:
{{- if eq (toString .Values.apiServerProxyConfig.mode) "true" }} {{- if eq .Values.apiServerProxyConfig.mode "true" }}
- kind: ServiceAccount - kind: ServiceAccount
name: operator name: operator
namespace: {{ .Release.Namespace }} namespace: {{ .Release.Namespace }}

@ -34,9 +34,7 @@ spec:
securityContext: securityContext:
{{- toYaml . | nindent 8 }} {{- toYaml . | nindent 8 }}
{{- end }} {{- end }}
{{- if or .Values.oauth.clientSecret .Values.oauth.audience }}
volumes: volumes:
{{- if .Values.oauth.clientSecret }}
- name: oauth - name: oauth
{{- with .Values.oauthSecretVolume }} {{- with .Values.oauthSecretVolume }}
{{- toYaml . | nindent 10 }} {{- toYaml . | nindent 10 }}
@ -44,17 +42,6 @@ spec:
secret: secret:
secretName: operator-oauth secretName: operator-oauth
{{- end }} {{- end }}
{{- else }}
- name: oidc-jwt
projected:
defaultMode: 420
sources:
- serviceAccountToken:
audience: {{ .Values.oauth.audience }}
expirationSeconds: 3600
path: token
{{- end }}
{{- end }}
containers: containers:
- name: operator - name: operator
{{- with .Values.operatorConfig.securityContext }} {{- with .Values.operatorConfig.securityContext }}
@ -85,15 +72,10 @@ spec:
value: {{ .Values.loginServer }} value: {{ .Values.loginServer }}
- name: OPERATOR_INGRESS_CLASS_NAME - name: OPERATOR_INGRESS_CLASS_NAME
value: {{ .Values.ingressClass.name }} value: {{ .Values.ingressClass.name }}
{{- if .Values.oauth.clientSecret }}
- name: CLIENT_ID_FILE - name: CLIENT_ID_FILE
value: /oauth/client_id value: /oauth/client_id
- name: CLIENT_SECRET_FILE - name: CLIENT_SECRET_FILE
value: /oauth/client_secret value: /oauth/client_secret
{{- else if .Values.oauth.audience }}
- name: CLIENT_ID
value: {{ .Values.oauth.clientId }}
{{- end }}
{{- $proxyTag := printf ":%s" ( .Values.proxyConfig.image.tag | default .Chart.AppVersion )}} {{- $proxyTag := printf ":%s" ( .Values.proxyConfig.image.tag | default .Chart.AppVersion )}}
- name: PROXY_IMAGE - name: PROXY_IMAGE
value: {{ coalesce .Values.proxyConfig.image.repo .Values.proxyConfig.image.repository }}{{- if .Values.proxyConfig.image.digest -}}{{ printf "@%s" .Values.proxyConfig.image.digest}}{{- else -}}{{ printf "%s" $proxyTag }}{{- end }} value: {{ coalesce .Values.proxyConfig.image.repo .Values.proxyConfig.image.repository }}{{- if .Values.proxyConfig.image.digest -}}{{ printf "@%s" .Values.proxyConfig.image.digest}}{{- else -}}{{ printf "%s" $proxyTag }}{{- end }}
@ -118,18 +100,10 @@ spec:
{{- with .Values.operatorConfig.extraEnv }} {{- with .Values.operatorConfig.extraEnv }}
{{- toYaml . | nindent 12 }} {{- toYaml . | nindent 12 }}
{{- end }} {{- end }}
{{- if or .Values.oauth.clientSecret .Values.oauth.audience }}
volumeMounts: volumeMounts:
{{- if .Values.oauth.clientSecret }}
- name: oauth - name: oauth
mountPath: /oauth mountPath: /oauth
readOnly: true readOnly: true
{{- else }}
- name: oidc-jwt
mountPath: /var/run/secrets/tailscale/serviceaccount
readOnly: true
{{- end }}
{{- end }}
{{- with .Values.operatorConfig.nodeSelector }} {{- with .Values.operatorConfig.nodeSelector }}
nodeSelector: nodeSelector:
{{- toYaml . | nindent 8 }} {{- toYaml . | nindent 8 }}

@ -1,7 +1,7 @@
# Copyright (c) Tailscale Inc & AUTHORS # Copyright (c) Tailscale Inc & AUTHORS
# SPDX-License-Identifier: BSD-3-Clause # SPDX-License-Identifier: BSD-3-Clause
{{ if and .Values.oauth .Values.oauth.clientId .Values.oauth.clientSecret -}} {{ if and .Values.oauth .Values.oauth.clientId -}}
apiVersion: v1 apiVersion: v1
kind: Secret kind: Secret
metadata: metadata:

@ -1,20 +1,13 @@
# Copyright (c) Tailscale Inc & AUTHORS # Copyright (c) Tailscale Inc & AUTHORS
# SPDX-License-Identifier: BSD-3-Clause # SPDX-License-Identifier: BSD-3-Clause
# Operator oauth credentials. If unset a Secret named operator-oauth must be # Operator oauth credentials. If set a Kubernetes Secret with the provided
# precreated or oauthSecretVolume needs to be adjusted. This block will be # values will be created in the operator namespace. If unset a Secret named
# overridden by oauthSecretVolume, if set. # operator-oauth must be precreated or oauthSecretVolume needs to be adjusted.
oauth: # This block will be overridden by oauthSecretVolume, if set.
# The Client ID the operator will authenticate with. oauth: {}
clientId: "" # clientId: ""
# If set a Kubernetes Secret with the provided value will be created in # clientSecret: ""
# the operator namespace, and mounted into the operator Pod. Takes precedence
# over oauth.audience.
clientSecret: ""
# The audience for oauth.clientId if using a workload identity federation
# OAuth client. Mutually exclusive with oauth.clientSecret.
# See https://tailscale.com/kb/1581/workload-identity-federation.
audience: ""
# URL of the control plane to be used by all resources managed by the operator. # URL of the control plane to be used by all resources managed by the operator.
loginServer: "" loginServer: ""

@ -68,11 +68,6 @@ spec:
Corresponds to --ui tsrecorder flag https://tailscale.com/kb/1246/tailscale-ssh-session-recording#deploy-a-recorder-node. Corresponds to --ui tsrecorder flag https://tailscale.com/kb/1246/tailscale-ssh-session-recording#deploy-a-recorder-node.
Required if S3 storage is not set up, to ensure that recordings are accessible. Required if S3 storage is not set up, to ensure that recordings are accessible.
type: boolean type: boolean
replicas:
description: Replicas specifies how many instances of tsrecorder to run. Defaults to 1.
type: integer
format: int32
minimum: 0
statefulSet: statefulSet:
description: |- description: |-
Configuration parameters for the Recorder's StatefulSet. The operator Configuration parameters for the Recorder's StatefulSet. The operator
@ -1688,9 +1683,6 @@ spec:
items: items:
type: string type: string
pattern: ^tag:[a-zA-Z][a-zA-Z0-9-]*$ pattern: ^tag:[a-zA-Z][a-zA-Z0-9-]*$
x-kubernetes-validations:
- rule: '!(self.replicas > 1 && (!has(self.storage) || !has(self.storage.s3)))'
message: S3 storage must be used when deploying multiple Recorder replicas
status: status:
description: |- description: |-
RecorderStatus describes the status of the recorder. This is set RecorderStatus describes the status of the recorder. This is set

@ -3348,11 +3348,6 @@ spec:
Corresponds to --ui tsrecorder flag https://tailscale.com/kb/1246/tailscale-ssh-session-recording#deploy-a-recorder-node. Corresponds to --ui tsrecorder flag https://tailscale.com/kb/1246/tailscale-ssh-session-recording#deploy-a-recorder-node.
Required if S3 storage is not set up, to ensure that recordings are accessible. Required if S3 storage is not set up, to ensure that recordings are accessible.
type: boolean type: boolean
replicas:
description: Replicas specifies how many instances of tsrecorder to run. Defaults to 1.
format: int32
minimum: 0
type: integer
statefulSet: statefulSet:
description: |- description: |-
Configuration parameters for the Recorder's StatefulSet. The operator Configuration parameters for the Recorder's StatefulSet. The operator
@ -4969,9 +4964,6 @@ spec:
type: string type: string
type: array type: array
type: object type: object
x-kubernetes-validations:
- message: S3 storage must be used when deploying multiple Recorder replicas
rule: '!(self.replicas > 1 && (!has(self.storage) || !has(self.storage.s3)))'
status: status:
description: |- description: |-
RecorderStatus describes the status of the recorder. This is set RecorderStatus describes the status of the recorder. This is set
@ -5374,7 +5366,7 @@ spec:
- name: CLIENT_SECRET_FILE - name: CLIENT_SECRET_FILE
value: /oauth/client_secret value: /oauth/client_secret
- name: PROXY_IMAGE - name: PROXY_IMAGE
value: tailscale/tailscale:stable value: tailscale/tailscale:unstable
- name: PROXY_TAGS - name: PROXY_TAGS
value: tag:k8s value: tag:k8s
- name: APISERVER_PROXY - name: APISERVER_PROXY
@ -5389,7 +5381,7 @@ spec:
valueFrom: valueFrom:
fieldRef: fieldRef:
fieldPath: metadata.uid fieldPath: metadata.uid
image: tailscale/k8s-operator:stable image: tailscale/k8s-operator:unstable
imagePullPolicy: Always imagePullPolicy: Always
name: operator name: operator
volumeMounts: volumeMounts:

@ -36,21 +36,21 @@ type egressEpsReconciler struct {
// It compares tailnet service state stored in egress proxy state Secrets by containerboot with the desired // It compares tailnet service state stored in egress proxy state Secrets by containerboot with the desired
// configuration stored in proxy-cfg ConfigMap to determine if the endpoint is ready. // configuration stored in proxy-cfg ConfigMap to determine if the endpoint is ready.
func (er *egressEpsReconciler) Reconcile(ctx context.Context, req reconcile.Request) (res reconcile.Result, err error) { func (er *egressEpsReconciler) Reconcile(ctx context.Context, req reconcile.Request) (res reconcile.Result, err error) {
lg := er.logger.With("Service", req.NamespacedName) l := er.logger.With("Service", req.NamespacedName)
lg.Debugf("starting reconcile") l.Debugf("starting reconcile")
defer lg.Debugf("reconcile finished") defer l.Debugf("reconcile finished")
eps := new(discoveryv1.EndpointSlice) eps := new(discoveryv1.EndpointSlice)
err = er.Get(ctx, req.NamespacedName, eps) err = er.Get(ctx, req.NamespacedName, eps)
if apierrors.IsNotFound(err) { if apierrors.IsNotFound(err) {
lg.Debugf("EndpointSlice not found") l.Debugf("EndpointSlice not found")
return reconcile.Result{}, nil return reconcile.Result{}, nil
} }
if err != nil { if err != nil {
return reconcile.Result{}, fmt.Errorf("failed to get EndpointSlice: %w", err) return reconcile.Result{}, fmt.Errorf("failed to get EndpointSlice: %w", err)
} }
if !eps.DeletionTimestamp.IsZero() { if !eps.DeletionTimestamp.IsZero() {
lg.Debugf("EnpointSlice is being deleted") l.Debugf("EnpointSlice is being deleted")
return res, nil return res, nil
} }
@ -64,7 +64,7 @@ func (er *egressEpsReconciler) Reconcile(ctx context.Context, req reconcile.Requ
} }
err = er.Get(ctx, client.ObjectKeyFromObject(svc), svc) err = er.Get(ctx, client.ObjectKeyFromObject(svc), svc)
if apierrors.IsNotFound(err) { if apierrors.IsNotFound(err) {
lg.Infof("ExternalName Service %s/%s not found, perhaps it was deleted", svc.Namespace, svc.Name) l.Infof("ExternalName Service %s/%s not found, perhaps it was deleted", svc.Namespace, svc.Name)
return res, nil return res, nil
} }
if err != nil { if err != nil {
@ -77,7 +77,7 @@ func (er *egressEpsReconciler) Reconcile(ctx context.Context, req reconcile.Requ
oldEps := eps.DeepCopy() oldEps := eps.DeepCopy()
tailnetSvc := tailnetSvcName(svc) tailnetSvc := tailnetSvcName(svc)
lg = lg.With("tailnet-service-name", tailnetSvc) l = l.With("tailnet-service-name", tailnetSvc)
// Retrieve the desired tailnet service configuration from the ConfigMap. // Retrieve the desired tailnet service configuration from the ConfigMap.
proxyGroupName := eps.Labels[labelProxyGroup] proxyGroupName := eps.Labels[labelProxyGroup]
@ -88,12 +88,12 @@ func (er *egressEpsReconciler) Reconcile(ctx context.Context, req reconcile.Requ
if cfgs == nil { if cfgs == nil {
// TODO(irbekrm): this path would be hit if egress service was once exposed on a ProxyGroup that later // TODO(irbekrm): this path would be hit if egress service was once exposed on a ProxyGroup that later
// got deleted. Probably the EndpointSlices then need to be deleted too- need to rethink this flow. // got deleted. Probably the EndpointSlices then need to be deleted too- need to rethink this flow.
lg.Debugf("No egress config found, likely because ProxyGroup has not been created") l.Debugf("No egress config found, likely because ProxyGroup has not been created")
return res, nil return res, nil
} }
cfg, ok := (*cfgs)[tailnetSvc] cfg, ok := (*cfgs)[tailnetSvc]
if !ok { if !ok {
lg.Infof("[unexpected] configuration for tailnet service %s not found", tailnetSvc) l.Infof("[unexpected] configuration for tailnet service %s not found", tailnetSvc)
return res, nil return res, nil
} }
@ -105,7 +105,7 @@ func (er *egressEpsReconciler) Reconcile(ctx context.Context, req reconcile.Requ
} }
newEndpoints := make([]discoveryv1.Endpoint, 0) newEndpoints := make([]discoveryv1.Endpoint, 0)
for _, pod := range podList.Items { for _, pod := range podList.Items {
ready, err := er.podIsReadyToRouteTraffic(ctx, pod, &cfg, tailnetSvc, lg) ready, err := er.podIsReadyToRouteTraffic(ctx, pod, &cfg, tailnetSvc, l)
if err != nil { if err != nil {
return res, fmt.Errorf("error verifying if Pod is ready to route traffic: %w", err) return res, fmt.Errorf("error verifying if Pod is ready to route traffic: %w", err)
} }
@ -130,7 +130,7 @@ func (er *egressEpsReconciler) Reconcile(ctx context.Context, req reconcile.Requ
// run a cleanup for deleted Pods etc. // run a cleanup for deleted Pods etc.
eps.Endpoints = newEndpoints eps.Endpoints = newEndpoints
if !reflect.DeepEqual(eps, oldEps) { if !reflect.DeepEqual(eps, oldEps) {
lg.Infof("Updating EndpointSlice to ensure traffic is routed to ready proxy Pods") l.Infof("Updating EndpointSlice to ensure traffic is routed to ready proxy Pods")
if err := er.Update(ctx, eps); err != nil { if err := er.Update(ctx, eps); err != nil {
return res, fmt.Errorf("error updating EndpointSlice: %w", err) return res, fmt.Errorf("error updating EndpointSlice: %w", err)
} }
@ -154,11 +154,11 @@ func podIPv4(pod *corev1.Pod) (string, error) {
// podIsReadyToRouteTraffic returns true if it appears that the proxy Pod has configured firewall rules to be able to // podIsReadyToRouteTraffic returns true if it appears that the proxy Pod has configured firewall rules to be able to
// route traffic to the given tailnet service. It retrieves the proxy's state Secret and compares the tailnet service // route traffic to the given tailnet service. It retrieves the proxy's state Secret and compares the tailnet service
// status written there to the desired service configuration. // status written there to the desired service configuration.
func (er *egressEpsReconciler) podIsReadyToRouteTraffic(ctx context.Context, pod corev1.Pod, cfg *egressservices.Config, tailnetSvcName string, lg *zap.SugaredLogger) (bool, error) { func (er *egressEpsReconciler) podIsReadyToRouteTraffic(ctx context.Context, pod corev1.Pod, cfg *egressservices.Config, tailnetSvcName string, l *zap.SugaredLogger) (bool, error) {
lg = lg.With("proxy_pod", pod.Name) l = l.With("proxy_pod", pod.Name)
lg.Debugf("checking whether proxy is ready to route to egress service") l.Debugf("checking whether proxy is ready to route to egress service")
if !pod.DeletionTimestamp.IsZero() { if !pod.DeletionTimestamp.IsZero() {
lg.Debugf("proxy Pod is being deleted, ignore") l.Debugf("proxy Pod is being deleted, ignore")
return false, nil return false, nil
} }
podIP, err := podIPv4(&pod) podIP, err := podIPv4(&pod)
@ -166,7 +166,7 @@ func (er *egressEpsReconciler) podIsReadyToRouteTraffic(ctx context.Context, pod
return false, fmt.Errorf("error determining Pod IP address: %v", err) return false, fmt.Errorf("error determining Pod IP address: %v", err)
} }
if podIP == "" { if podIP == "" {
lg.Infof("[unexpected] Pod does not have an IPv4 address, and IPv6 is not currently supported") l.Infof("[unexpected] Pod does not have an IPv4 address, and IPv6 is not currently supported")
return false, nil return false, nil
} }
stateS := &corev1.Secret{ stateS := &corev1.Secret{
@ -177,7 +177,7 @@ func (er *egressEpsReconciler) podIsReadyToRouteTraffic(ctx context.Context, pod
} }
err = er.Get(ctx, client.ObjectKeyFromObject(stateS), stateS) err = er.Get(ctx, client.ObjectKeyFromObject(stateS), stateS)
if apierrors.IsNotFound(err) { if apierrors.IsNotFound(err) {
lg.Debugf("proxy does not have a state Secret, waiting...") l.Debugf("proxy does not have a state Secret, waiting...")
return false, nil return false, nil
} }
if err != nil { if err != nil {
@ -185,7 +185,7 @@ func (er *egressEpsReconciler) podIsReadyToRouteTraffic(ctx context.Context, pod
} }
svcStatusBS := stateS.Data[egressservices.KeyEgressServices] svcStatusBS := stateS.Data[egressservices.KeyEgressServices]
if len(svcStatusBS) == 0 { if len(svcStatusBS) == 0 {
lg.Debugf("proxy's state Secret does not contain egress services status, waiting...") l.Debugf("proxy's state Secret does not contain egress services status, waiting...")
return false, nil return false, nil
} }
svcStatus := &egressservices.Status{} svcStatus := &egressservices.Status{}
@ -193,22 +193,22 @@ func (er *egressEpsReconciler) podIsReadyToRouteTraffic(ctx context.Context, pod
return false, fmt.Errorf("error unmarshalling egress service status: %w", err) return false, fmt.Errorf("error unmarshalling egress service status: %w", err)
} }
if !strings.EqualFold(podIP, svcStatus.PodIPv4) { if !strings.EqualFold(podIP, svcStatus.PodIPv4) {
lg.Infof("proxy's egress service status is for Pod IP %s, current proxy's Pod IP %s, waiting for the proxy to reconfigure...", svcStatus.PodIPv4, podIP) l.Infof("proxy's egress service status is for Pod IP %s, current proxy's Pod IP %s, waiting for the proxy to reconfigure...", svcStatus.PodIPv4, podIP)
return false, nil return false, nil
} }
st, ok := (*svcStatus).Services[tailnetSvcName] st, ok := (*svcStatus).Services[tailnetSvcName]
if !ok { if !ok {
lg.Infof("proxy's state Secret does not have egress service status, waiting...") l.Infof("proxy's state Secret does not have egress service status, waiting...")
return false, nil return false, nil
} }
if !reflect.DeepEqual(cfg.TailnetTarget, st.TailnetTarget) { if !reflect.DeepEqual(cfg.TailnetTarget, st.TailnetTarget) {
lg.Infof("proxy has configured egress service for tailnet target %v, current target is %v, waiting for proxy to reconfigure...", st.TailnetTarget, cfg.TailnetTarget) l.Infof("proxy has configured egress service for tailnet target %v, current target is %v, waiting for proxy to reconfigure...", st.TailnetTarget, cfg.TailnetTarget)
return false, nil return false, nil
} }
if !reflect.DeepEqual(cfg.Ports, st.Ports) { if !reflect.DeepEqual(cfg.Ports, st.Ports) {
lg.Debugf("proxy has configured egress service for ports %#+v, wants ports %#+v, waiting for proxy to reconfigure", st.Ports, cfg.Ports) l.Debugf("proxy has configured egress service for ports %#+v, wants ports %#+v, waiting for proxy to reconfigure", st.Ports, cfg.Ports)
return false, nil return false, nil
} }
lg.Debugf("proxy is ready to route traffic to egress service") l.Debugf("proxy is ready to route traffic to egress service")
return true, nil return true, nil
} }

@ -71,9 +71,9 @@ type egressPodsReconciler struct {
// If the Pod does not appear to be serving the health check endpoint (pre-v1.80 proxies), the reconciler just sets the // If the Pod does not appear to be serving the health check endpoint (pre-v1.80 proxies), the reconciler just sets the
// readiness condition for backwards compatibility reasons. // readiness condition for backwards compatibility reasons.
func (er *egressPodsReconciler) Reconcile(ctx context.Context, req reconcile.Request) (res reconcile.Result, err error) { func (er *egressPodsReconciler) Reconcile(ctx context.Context, req reconcile.Request) (res reconcile.Result, err error) {
lg := er.logger.With("Pod", req.NamespacedName) l := er.logger.With("Pod", req.NamespacedName)
lg.Debugf("starting reconcile") l.Debugf("starting reconcile")
defer lg.Debugf("reconcile finished") defer l.Debugf("reconcile finished")
pod := new(corev1.Pod) pod := new(corev1.Pod)
err = er.Get(ctx, req.NamespacedName, pod) err = er.Get(ctx, req.NamespacedName, pod)
@ -84,11 +84,11 @@ func (er *egressPodsReconciler) Reconcile(ctx context.Context, req reconcile.Req
return reconcile.Result{}, fmt.Errorf("failed to get Pod: %w", err) return reconcile.Result{}, fmt.Errorf("failed to get Pod: %w", err)
} }
if !pod.DeletionTimestamp.IsZero() { if !pod.DeletionTimestamp.IsZero() {
lg.Debugf("Pod is being deleted, do nothing") l.Debugf("Pod is being deleted, do nothing")
return res, nil return res, nil
} }
if pod.Labels[LabelParentType] != proxyTypeProxyGroup { if pod.Labels[LabelParentType] != proxyTypeProxyGroup {
lg.Infof("[unexpected] reconciler called for a Pod that is not a ProxyGroup Pod") l.Infof("[unexpected] reconciler called for a Pod that is not a ProxyGroup Pod")
return res, nil return res, nil
} }
@ -97,7 +97,7 @@ func (er *egressPodsReconciler) Reconcile(ctx context.Context, req reconcile.Req
if !slices.ContainsFunc(pod.Spec.ReadinessGates, func(r corev1.PodReadinessGate) bool { if !slices.ContainsFunc(pod.Spec.ReadinessGates, func(r corev1.PodReadinessGate) bool {
return r.ConditionType == tsEgressReadinessGate return r.ConditionType == tsEgressReadinessGate
}) { }) {
lg.Debug("Pod does not have egress readiness gate set, skipping") l.Debug("Pod does not have egress readiness gate set, skipping")
return res, nil return res, nil
} }
@ -107,7 +107,7 @@ func (er *egressPodsReconciler) Reconcile(ctx context.Context, req reconcile.Req
return res, fmt.Errorf("error getting ProxyGroup %q: %w", proxyGroupName, err) return res, fmt.Errorf("error getting ProxyGroup %q: %w", proxyGroupName, err)
} }
if pg.Spec.Type != typeEgress { if pg.Spec.Type != typeEgress {
lg.Infof("[unexpected] reconciler called for %q ProxyGroup Pod", pg.Spec.Type) l.Infof("[unexpected] reconciler called for %q ProxyGroup Pod", pg.Spec.Type)
return res, nil return res, nil
} }
// Get all ClusterIP Services for all egress targets exposed to cluster via this ProxyGroup. // Get all ClusterIP Services for all egress targets exposed to cluster via this ProxyGroup.
@ -125,7 +125,7 @@ func (er *egressPodsReconciler) Reconcile(ctx context.Context, req reconcile.Req
return c.Type == tsEgressReadinessGate return c.Type == tsEgressReadinessGate
}) })
if idx != -1 { if idx != -1 {
lg.Debugf("Pod is already ready, do nothing") l.Debugf("Pod is already ready, do nothing")
return res, nil return res, nil
} }
@ -134,7 +134,7 @@ func (er *egressPodsReconciler) Reconcile(ctx context.Context, req reconcile.Req
for _, svc := range svcs.Items { for _, svc := range svcs.Items {
s := svc s := svc
go func() { go func() {
ll := lg.With("service_name", s.Name) ll := l.With("service_name", s.Name)
d := retrieveClusterDomain(er.tsNamespace, ll) d := retrieveClusterDomain(er.tsNamespace, ll)
healthCheckAddr := healthCheckForSvc(&s, d) healthCheckAddr := healthCheckForSvc(&s, d)
if healthCheckAddr == "" { if healthCheckAddr == "" {
@ -175,25 +175,25 @@ func (er *egressPodsReconciler) Reconcile(ctx context.Context, req reconcile.Req
err = errors.Join(err, e) err = errors.Join(err, e)
} }
if err != nil { if err != nil {
return res, fmt.Errorf("error verifying connectivity: %w", err) return res, fmt.Errorf("error verifying conectivity: %w", err)
} }
if rm := routesMissing.Load(); rm { if rm := routesMissing.Load(); rm {
lg.Info("Pod is not yet added as an endpoint for all egress targets, waiting...") l.Info("Pod is not yet added as an endpoint for all egress targets, waiting...")
return reconcile.Result{RequeueAfter: shortRequeue}, nil return reconcile.Result{RequeueAfter: shortRequeue}, nil
} }
if err := er.setPodReady(ctx, pod, lg); err != nil { if err := er.setPodReady(ctx, pod, l); err != nil {
return res, fmt.Errorf("error setting Pod as ready: %w", err) return res, fmt.Errorf("error setting Pod as ready: %w", err)
} }
return res, nil return res, nil
} }
func (er *egressPodsReconciler) setPodReady(ctx context.Context, pod *corev1.Pod, lg *zap.SugaredLogger) error { func (er *egressPodsReconciler) setPodReady(ctx context.Context, pod *corev1.Pod, l *zap.SugaredLogger) error {
if slices.ContainsFunc(pod.Status.Conditions, func(c corev1.PodCondition) bool { if slices.ContainsFunc(pod.Status.Conditions, func(c corev1.PodCondition) bool {
return c.Type == tsEgressReadinessGate return c.Type == tsEgressReadinessGate
}) { }) {
return nil return nil
} }
lg.Infof("Pod is ready to route traffic to all egress targets") l.Infof("Pod is ready to route traffic to all egress targets")
pod.Status.Conditions = append(pod.Status.Conditions, corev1.PodCondition{ pod.Status.Conditions = append(pod.Status.Conditions, corev1.PodCondition{
Type: tsEgressReadinessGate, Type: tsEgressReadinessGate,
Status: corev1.ConditionTrue, Status: corev1.ConditionTrue,
@ -216,11 +216,11 @@ const (
) )
// lookupPodRouteViaSvc attempts to reach a Pod using a health check endpoint served by a Service and returns the state of the health check. // lookupPodRouteViaSvc attempts to reach a Pod using a health check endpoint served by a Service and returns the state of the health check.
func (er *egressPodsReconciler) lookupPodRouteViaSvc(ctx context.Context, pod *corev1.Pod, healthCheckAddr string, lg *zap.SugaredLogger) (healthCheckState, error) { func (er *egressPodsReconciler) lookupPodRouteViaSvc(ctx context.Context, pod *corev1.Pod, healthCheckAddr string, l *zap.SugaredLogger) (healthCheckState, error) {
if !slices.ContainsFunc(pod.Spec.Containers[0].Env, func(e corev1.EnvVar) bool { if !slices.ContainsFunc(pod.Spec.Containers[0].Env, func(e corev1.EnvVar) bool {
return e.Name == "TS_ENABLE_HEALTH_CHECK" && e.Value == "true" return e.Name == "TS_ENABLE_HEALTH_CHECK" && e.Value == "true"
}) { }) {
lg.Debugf("Pod does not have health check enabled, unable to verify if it is currently routable via Service") l.Debugf("Pod does not have health check enabled, unable to verify if it is currently routable via Service")
return cannotVerify, nil return cannotVerify, nil
} }
wantsIP, err := podIPv4(pod) wantsIP, err := podIPv4(pod)
@ -241,14 +241,14 @@ func (er *egressPodsReconciler) lookupPodRouteViaSvc(ctx context.Context, pod *c
req.Close = true req.Close = true
resp, err := er.httpClient.Do(req) resp, err := er.httpClient.Do(req)
if err != nil { if err != nil {
// This is most likely because this is the first Pod and is not yet added to service endpoints. Other // This is most likely because this is the first Pod and is not yet added to Service endoints. Other
// error types are possible, but checking for those would likely make the system too fragile. // error types are possible, but checking for those would likely make the system too fragile.
return unreachable, nil return unreachable, nil
} }
defer resp.Body.Close() defer resp.Body.Close()
gotIP := resp.Header.Get(kubetypes.PodIPv4Header) gotIP := resp.Header.Get(kubetypes.PodIPv4Header)
if gotIP == "" { if gotIP == "" {
lg.Debugf("Health check does not return Pod's IP header, unable to verify if Pod is currently routable via Service") l.Debugf("Health check does not return Pod's IP header, unable to verify if Pod is currently routable via Service")
return cannotVerify, nil return cannotVerify, nil
} }
if !strings.EqualFold(wantsIP, gotIP) { if !strings.EqualFold(wantsIP, gotIP) {

@ -47,13 +47,13 @@ type egressSvcsReadinessReconciler struct {
// route traffic to the target. It compares proxy Pod IPs with the endpoints set on the EndpointSlice for the egress // route traffic to the target. It compares proxy Pod IPs with the endpoints set on the EndpointSlice for the egress
// service to determine how many replicas are currently able to route traffic. // service to determine how many replicas are currently able to route traffic.
func (esrr *egressSvcsReadinessReconciler) Reconcile(ctx context.Context, req reconcile.Request) (res reconcile.Result, err error) { func (esrr *egressSvcsReadinessReconciler) Reconcile(ctx context.Context, req reconcile.Request) (res reconcile.Result, err error) {
lg := esrr.logger.With("Service", req.NamespacedName) l := esrr.logger.With("Service", req.NamespacedName)
lg.Debugf("starting reconcile") l.Debugf("starting reconcile")
defer lg.Debugf("reconcile finished") defer l.Debugf("reconcile finished")
svc := new(corev1.Service) svc := new(corev1.Service)
if err = esrr.Get(ctx, req.NamespacedName, svc); apierrors.IsNotFound(err) { if err = esrr.Get(ctx, req.NamespacedName, svc); apierrors.IsNotFound(err) {
lg.Debugf("Service not found") l.Debugf("Service not found")
return res, nil return res, nil
} else if err != nil { } else if err != nil {
return res, fmt.Errorf("failed to get Service: %w", err) return res, fmt.Errorf("failed to get Service: %w", err)
@ -64,7 +64,7 @@ func (esrr *egressSvcsReadinessReconciler) Reconcile(ctx context.Context, req re
) )
oldStatus := svc.Status.DeepCopy() oldStatus := svc.Status.DeepCopy()
defer func() { defer func() {
tsoperator.SetServiceCondition(svc, tsapi.EgressSvcReady, st, reason, msg, esrr.clock, lg) tsoperator.SetServiceCondition(svc, tsapi.EgressSvcReady, st, reason, msg, esrr.clock, l)
if !apiequality.Semantic.DeepEqual(oldStatus, &svc.Status) { if !apiequality.Semantic.DeepEqual(oldStatus, &svc.Status) {
err = errors.Join(err, esrr.Status().Update(ctx, svc)) err = errors.Join(err, esrr.Status().Update(ctx, svc))
} }
@ -79,7 +79,7 @@ func (esrr *egressSvcsReadinessReconciler) Reconcile(ctx context.Context, req re
return res, err return res, err
} }
if eps == nil { if eps == nil {
lg.Infof("EndpointSlice for Service does not yet exist, waiting...") l.Infof("EndpointSlice for Service does not yet exist, waiting...")
reason, msg = reasonClusterResourcesNotReady, reasonClusterResourcesNotReady reason, msg = reasonClusterResourcesNotReady, reasonClusterResourcesNotReady
st = metav1.ConditionFalse st = metav1.ConditionFalse
return res, nil return res, nil
@ -91,7 +91,7 @@ func (esrr *egressSvcsReadinessReconciler) Reconcile(ctx context.Context, req re
} }
err = esrr.Get(ctx, client.ObjectKeyFromObject(pg), pg) err = esrr.Get(ctx, client.ObjectKeyFromObject(pg), pg)
if apierrors.IsNotFound(err) { if apierrors.IsNotFound(err) {
lg.Infof("ProxyGroup for Service does not exist, waiting...") l.Infof("ProxyGroup for Service does not exist, waiting...")
reason, msg = reasonClusterResourcesNotReady, reasonClusterResourcesNotReady reason, msg = reasonClusterResourcesNotReady, reasonClusterResourcesNotReady
st = metav1.ConditionFalse st = metav1.ConditionFalse
return res, nil return res, nil
@ -103,7 +103,7 @@ func (esrr *egressSvcsReadinessReconciler) Reconcile(ctx context.Context, req re
return res, err return res, err
} }
if !tsoperator.ProxyGroupAvailable(pg) { if !tsoperator.ProxyGroupAvailable(pg) {
lg.Infof("ProxyGroup for Service is not ready, waiting...") l.Infof("ProxyGroup for Service is not ready, waiting...")
reason, msg = reasonClusterResourcesNotReady, reasonClusterResourcesNotReady reason, msg = reasonClusterResourcesNotReady, reasonClusterResourcesNotReady
st = metav1.ConditionFalse st = metav1.ConditionFalse
return res, nil return res, nil
@ -111,7 +111,7 @@ func (esrr *egressSvcsReadinessReconciler) Reconcile(ctx context.Context, req re
replicas := pgReplicas(pg) replicas := pgReplicas(pg)
if replicas == 0 { if replicas == 0 {
lg.Infof("ProxyGroup replicas set to 0") l.Infof("ProxyGroup replicas set to 0")
reason, msg = reasonNoProxies, reasonNoProxies reason, msg = reasonNoProxies, reasonNoProxies
st = metav1.ConditionFalse st = metav1.ConditionFalse
return res, nil return res, nil
@ -128,16 +128,16 @@ func (esrr *egressSvcsReadinessReconciler) Reconcile(ctx context.Context, req re
return res, err return res, err
} }
if pod == nil { if pod == nil {
lg.Warnf("[unexpected] ProxyGroup is ready, but replica %d was not found", i) l.Warnf("[unexpected] ProxyGroup is ready, but replica %d was not found", i)
reason, msg = reasonClusterResourcesNotReady, reasonClusterResourcesNotReady reason, msg = reasonClusterResourcesNotReady, reasonClusterResourcesNotReady
return res, nil return res, nil
} }
lg.Debugf("looking at Pod with IPs %v", pod.Status.PodIPs) l.Debugf("looking at Pod with IPs %v", pod.Status.PodIPs)
ready := false ready := false
for _, ep := range eps.Endpoints { for _, ep := range eps.Endpoints {
lg.Debugf("looking at endpoint with addresses %v", ep.Addresses) l.Debugf("looking at endpoint with addresses %v", ep.Addresses)
if endpointReadyForPod(&ep, pod, lg) { if endpointReadyForPod(&ep, pod, l) {
lg.Debugf("endpoint is ready for Pod") l.Debugf("endpoint is ready for Pod")
ready = true ready = true
break break
} }
@ -163,10 +163,10 @@ func (esrr *egressSvcsReadinessReconciler) Reconcile(ctx context.Context, req re
// endpointReadyForPod returns true if the endpoint is for the Pod's IPv4 address and is ready to serve traffic. // endpointReadyForPod returns true if the endpoint is for the Pod's IPv4 address and is ready to serve traffic.
// Endpoint must not be nil. // Endpoint must not be nil.
func endpointReadyForPod(ep *discoveryv1.Endpoint, pod *corev1.Pod, lg *zap.SugaredLogger) bool { func endpointReadyForPod(ep *discoveryv1.Endpoint, pod *corev1.Pod, l *zap.SugaredLogger) bool {
podIP, err := podIPv4(pod) podIP, err := podIPv4(pod)
if err != nil { if err != nil {
lg.Warnf("[unexpected] error retrieving Pod's IPv4 address: %v", err) l.Warnf("[unexpected] error retrieving Pod's IPv4 address: %v", err)
return false return false
} }
// Currently we only ever set a single address on and Endpoint and nothing else is meant to modify this. // Currently we only ever set a single address on and Endpoint and nothing else is meant to modify this.

@ -49,12 +49,12 @@ func TestEgressServiceReadiness(t *testing.T) {
}, },
} }
fakeClusterIPSvc := &corev1.Service{ObjectMeta: metav1.ObjectMeta{Name: "my-app", Namespace: "operator-ns"}} fakeClusterIPSvc := &corev1.Service{ObjectMeta: metav1.ObjectMeta{Name: "my-app", Namespace: "operator-ns"}}
labels := egressSvcEpsLabels(egressSvc, fakeClusterIPSvc) l := egressSvcEpsLabels(egressSvc, fakeClusterIPSvc)
eps := &discoveryv1.EndpointSlice{ eps := &discoveryv1.EndpointSlice{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "my-app", Name: "my-app",
Namespace: "operator-ns", Namespace: "operator-ns",
Labels: labels, Labels: l,
}, },
AddressType: discoveryv1.AddressTypeIPv4, AddressType: discoveryv1.AddressTypeIPv4,
} }
@ -118,26 +118,26 @@ func TestEgressServiceReadiness(t *testing.T) {
}) })
} }
func setClusterNotReady(svc *corev1.Service, cl tstime.Clock, lg *zap.SugaredLogger) { func setClusterNotReady(svc *corev1.Service, cl tstime.Clock, l *zap.SugaredLogger) {
tsoperator.SetServiceCondition(svc, tsapi.EgressSvcReady, metav1.ConditionFalse, reasonClusterResourcesNotReady, reasonClusterResourcesNotReady, cl, lg) tsoperator.SetServiceCondition(svc, tsapi.EgressSvcReady, metav1.ConditionFalse, reasonClusterResourcesNotReady, reasonClusterResourcesNotReady, cl, l)
} }
func setNotReady(svc *corev1.Service, cl tstime.Clock, lg *zap.SugaredLogger, replicas int32) { func setNotReady(svc *corev1.Service, cl tstime.Clock, l *zap.SugaredLogger, replicas int32) {
msg := fmt.Sprintf(msgReadyToRouteTemplate, 0, replicas) msg := fmt.Sprintf(msgReadyToRouteTemplate, 0, replicas)
tsoperator.SetServiceCondition(svc, tsapi.EgressSvcReady, metav1.ConditionFalse, reasonNotReady, msg, cl, lg) tsoperator.SetServiceCondition(svc, tsapi.EgressSvcReady, metav1.ConditionFalse, reasonNotReady, msg, cl, l)
} }
func setReady(svc *corev1.Service, cl tstime.Clock, lg *zap.SugaredLogger, replicas, readyReplicas int32) { func setReady(svc *corev1.Service, cl tstime.Clock, l *zap.SugaredLogger, replicas, readyReplicas int32) {
reason := reasonPartiallyReady reason := reasonPartiallyReady
if readyReplicas == replicas { if readyReplicas == replicas {
reason = reasonReady reason = reasonReady
} }
msg := fmt.Sprintf(msgReadyToRouteTemplate, readyReplicas, replicas) msg := fmt.Sprintf(msgReadyToRouteTemplate, readyReplicas, replicas)
tsoperator.SetServiceCondition(svc, tsapi.EgressSvcReady, metav1.ConditionTrue, reason, msg, cl, lg) tsoperator.SetServiceCondition(svc, tsapi.EgressSvcReady, metav1.ConditionTrue, reason, msg, cl, l)
} }
func setPGReady(pg *tsapi.ProxyGroup, cl tstime.Clock, lg *zap.SugaredLogger) { func setPGReady(pg *tsapi.ProxyGroup, cl tstime.Clock, l *zap.SugaredLogger) {
tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionTrue, "foo", "foo", pg.Generation, cl, lg) tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionTrue, "foo", "foo", pg.Generation, cl, l)
} }
func setEndpointForReplica(pg *tsapi.ProxyGroup, ordinal int32, eps *discoveryv1.EndpointSlice) { func setEndpointForReplica(pg *tsapi.ProxyGroup, ordinal int32, eps *discoveryv1.EndpointSlice) {
@ -153,14 +153,14 @@ func setEndpointForReplica(pg *tsapi.ProxyGroup, ordinal int32, eps *discoveryv1
} }
func pod(pg *tsapi.ProxyGroup, ordinal int32) *corev1.Pod { func pod(pg *tsapi.ProxyGroup, ordinal int32) *corev1.Pod {
labels := pgLabels(pg.Name, nil) l := pgLabels(pg.Name, nil)
labels[appsv1.PodIndexLabel] = fmt.Sprintf("%d", ordinal) l[appsv1.PodIndexLabel] = fmt.Sprintf("%d", ordinal)
ip := fmt.Sprintf("10.0.0.%d", ordinal) ip := fmt.Sprintf("10.0.0.%d", ordinal)
return &corev1.Pod{ return &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-%d", pg.Name, ordinal), Name: fmt.Sprintf("%s-%d", pg.Name, ordinal),
Namespace: "operator-ns", Namespace: "operator-ns",
Labels: labels, Labels: l,
}, },
Status: corev1.PodStatus{ Status: corev1.PodStatus{
PodIPs: []corev1.PodIP{{IP: ip}}, PodIPs: []corev1.PodIP{{IP: ip}},

@ -98,12 +98,12 @@ type egressSvcsReconciler struct {
// - updates the egress service config in a ConfigMap mounted to the ProxyGroup proxies with the tailnet target and the // - updates the egress service config in a ConfigMap mounted to the ProxyGroup proxies with the tailnet target and the
// portmappings. // portmappings.
func (esr *egressSvcsReconciler) Reconcile(ctx context.Context, req reconcile.Request) (res reconcile.Result, err error) { func (esr *egressSvcsReconciler) Reconcile(ctx context.Context, req reconcile.Request) (res reconcile.Result, err error) {
lg := esr.logger.With("Service", req.NamespacedName) l := esr.logger.With("Service", req.NamespacedName)
defer lg.Info("reconcile finished") defer l.Info("reconcile finished")
svc := new(corev1.Service) svc := new(corev1.Service)
if err = esr.Get(ctx, req.NamespacedName, svc); apierrors.IsNotFound(err) { if err = esr.Get(ctx, req.NamespacedName, svc); apierrors.IsNotFound(err) {
lg.Info("Service not found") l.Info("Service not found")
return res, nil return res, nil
} else if err != nil { } else if err != nil {
return res, fmt.Errorf("failed to get Service: %w", err) return res, fmt.Errorf("failed to get Service: %w", err)
@ -111,7 +111,7 @@ func (esr *egressSvcsReconciler) Reconcile(ctx context.Context, req reconcile.Re
// Name of the 'egress service', meaning the tailnet target. // Name of the 'egress service', meaning the tailnet target.
tailnetSvc := tailnetSvcName(svc) tailnetSvc := tailnetSvcName(svc)
lg = lg.With("tailnet-service", tailnetSvc) l = l.With("tailnet-service", tailnetSvc)
// Note that resources for egress Services are only cleaned up when the // Note that resources for egress Services are only cleaned up when the
// Service is actually deleted (and not if, for example, user decides to // Service is actually deleted (and not if, for example, user decides to
@ -119,8 +119,8 @@ func (esr *egressSvcsReconciler) Reconcile(ctx context.Context, req reconcile.Re
// assume that the egress ExternalName Services are always created for // assume that the egress ExternalName Services are always created for
// Tailscale operator specifically. // Tailscale operator specifically.
if !svc.DeletionTimestamp.IsZero() { if !svc.DeletionTimestamp.IsZero() {
lg.Info("Service is being deleted, ensuring resource cleanup") l.Info("Service is being deleted, ensuring resource cleanup")
return res, esr.maybeCleanup(ctx, svc, lg) return res, esr.maybeCleanup(ctx, svc, l)
} }
oldStatus := svc.Status.DeepCopy() oldStatus := svc.Status.DeepCopy()
@ -131,7 +131,7 @@ func (esr *egressSvcsReconciler) Reconcile(ctx context.Context, req reconcile.Re
}() }()
// Validate the user-created ExternalName Service and the associated ProxyGroup. // Validate the user-created ExternalName Service and the associated ProxyGroup.
if ok, err := esr.validateClusterResources(ctx, svc, lg); err != nil { if ok, err := esr.validateClusterResources(ctx, svc, l); err != nil {
return res, fmt.Errorf("error validating cluster resources: %w", err) return res, fmt.Errorf("error validating cluster resources: %w", err)
} else if !ok { } else if !ok {
return res, nil return res, nil
@ -141,8 +141,8 @@ func (esr *egressSvcsReconciler) Reconcile(ctx context.Context, req reconcile.Re
svc.Finalizers = append(svc.Finalizers, FinalizerName) svc.Finalizers = append(svc.Finalizers, FinalizerName)
if err := esr.updateSvcSpec(ctx, svc); err != nil { if err := esr.updateSvcSpec(ctx, svc); err != nil {
err := fmt.Errorf("failed to add finalizer: %w", err) err := fmt.Errorf("failed to add finalizer: %w", err)
r := svcConfiguredReason(svc, false, lg) r := svcConfiguredReason(svc, false, l)
tsoperator.SetServiceCondition(svc, tsapi.EgressSvcConfigured, metav1.ConditionFalse, r, err.Error(), esr.clock, lg) tsoperator.SetServiceCondition(svc, tsapi.EgressSvcConfigured, metav1.ConditionFalse, r, err.Error(), esr.clock, l)
return res, err return res, err
} }
esr.mu.Lock() esr.mu.Lock()
@ -151,16 +151,16 @@ func (esr *egressSvcsReconciler) Reconcile(ctx context.Context, req reconcile.Re
esr.mu.Unlock() esr.mu.Unlock()
} }
if err := esr.maybeCleanupProxyGroupConfig(ctx, svc, lg); err != nil { if err := esr.maybeCleanupProxyGroupConfig(ctx, svc, l); err != nil {
err = fmt.Errorf("cleaning up resources for previous ProxyGroup failed: %w", err) err = fmt.Errorf("cleaning up resources for previous ProxyGroup failed: %w", err)
r := svcConfiguredReason(svc, false, lg) r := svcConfiguredReason(svc, false, l)
tsoperator.SetServiceCondition(svc, tsapi.EgressSvcConfigured, metav1.ConditionFalse, r, err.Error(), esr.clock, lg) tsoperator.SetServiceCondition(svc, tsapi.EgressSvcConfigured, metav1.ConditionFalse, r, err.Error(), esr.clock, l)
return res, err return res, err
} }
if err := esr.maybeProvision(ctx, svc, lg); err != nil { if err := esr.maybeProvision(ctx, svc, l); err != nil {
if strings.Contains(err.Error(), optimisticLockErrorMsg) { if strings.Contains(err.Error(), optimisticLockErrorMsg) {
lg.Infof("optimistic lock error, retrying: %s", err) l.Infof("optimistic lock error, retrying: %s", err)
} else { } else {
return reconcile.Result{}, err return reconcile.Result{}, err
} }
@ -169,15 +169,15 @@ func (esr *egressSvcsReconciler) Reconcile(ctx context.Context, req reconcile.Re
return res, nil return res, nil
} }
func (esr *egressSvcsReconciler) maybeProvision(ctx context.Context, svc *corev1.Service, lg *zap.SugaredLogger) (err error) { func (esr *egressSvcsReconciler) maybeProvision(ctx context.Context, svc *corev1.Service, l *zap.SugaredLogger) (err error) {
r := svcConfiguredReason(svc, false, lg) r := svcConfiguredReason(svc, false, l)
st := metav1.ConditionFalse st := metav1.ConditionFalse
defer func() { defer func() {
msg := r msg := r
if st != metav1.ConditionTrue && err != nil { if st != metav1.ConditionTrue && err != nil {
msg = err.Error() msg = err.Error()
} }
tsoperator.SetServiceCondition(svc, tsapi.EgressSvcConfigured, st, r, msg, esr.clock, lg) tsoperator.SetServiceCondition(svc, tsapi.EgressSvcConfigured, st, r, msg, esr.clock, l)
}() }()
crl := egressSvcChildResourceLabels(svc) crl := egressSvcChildResourceLabels(svc)
@ -189,36 +189,36 @@ func (esr *egressSvcsReconciler) maybeProvision(ctx context.Context, svc *corev1
if clusterIPSvc == nil { if clusterIPSvc == nil {
clusterIPSvc = esr.clusterIPSvcForEgress(crl) clusterIPSvc = esr.clusterIPSvcForEgress(crl)
} }
upToDate := svcConfigurationUpToDate(svc, lg) upToDate := svcConfigurationUpToDate(svc, l)
provisioned := true provisioned := true
if !upToDate { if !upToDate {
if clusterIPSvc, provisioned, err = esr.provision(ctx, svc.Annotations[AnnotationProxyGroup], svc, clusterIPSvc, lg); err != nil { if clusterIPSvc, provisioned, err = esr.provision(ctx, svc.Annotations[AnnotationProxyGroup], svc, clusterIPSvc, l); err != nil {
return err return err
} }
} }
if !provisioned { if !provisioned {
lg.Infof("unable to provision cluster resources") l.Infof("unable to provision cluster resources")
return nil return nil
} }
// Update ExternalName Service to point at the ClusterIP Service. // Update ExternalName Service to point at the ClusterIP Service.
clusterDomain := retrieveClusterDomain(esr.tsNamespace, lg) clusterDomain := retrieveClusterDomain(esr.tsNamespace, l)
clusterIPSvcFQDN := fmt.Sprintf("%s.%s.svc.%s", clusterIPSvc.Name, clusterIPSvc.Namespace, clusterDomain) clusterIPSvcFQDN := fmt.Sprintf("%s.%s.svc.%s", clusterIPSvc.Name, clusterIPSvc.Namespace, clusterDomain)
if svc.Spec.ExternalName != clusterIPSvcFQDN { if svc.Spec.ExternalName != clusterIPSvcFQDN {
lg.Infof("Configuring ExternalName Service to point to ClusterIP Service %s", clusterIPSvcFQDN) l.Infof("Configuring ExternalName Service to point to ClusterIP Service %s", clusterIPSvcFQDN)
svc.Spec.ExternalName = clusterIPSvcFQDN svc.Spec.ExternalName = clusterIPSvcFQDN
if err = esr.updateSvcSpec(ctx, svc); err != nil { if err = esr.updateSvcSpec(ctx, svc); err != nil {
err = fmt.Errorf("error updating ExternalName Service: %w", err) err = fmt.Errorf("error updating ExternalName Service: %w", err)
return err return err
} }
} }
r = svcConfiguredReason(svc, true, lg) r = svcConfiguredReason(svc, true, l)
st = metav1.ConditionTrue st = metav1.ConditionTrue
return nil return nil
} }
func (esr *egressSvcsReconciler) provision(ctx context.Context, proxyGroupName string, svc, clusterIPSvc *corev1.Service, lg *zap.SugaredLogger) (*corev1.Service, bool, error) { func (esr *egressSvcsReconciler) provision(ctx context.Context, proxyGroupName string, svc, clusterIPSvc *corev1.Service, l *zap.SugaredLogger) (*corev1.Service, bool, error) {
lg.Infof("updating configuration...") l.Infof("updating configuration...")
usedPorts, err := esr.usedPortsForPG(ctx, proxyGroupName) usedPorts, err := esr.usedPortsForPG(ctx, proxyGroupName)
if err != nil { if err != nil {
return nil, false, fmt.Errorf("error calculating used ports for ProxyGroup %s: %w", proxyGroupName, err) return nil, false, fmt.Errorf("error calculating used ports for ProxyGroup %s: %w", proxyGroupName, err)
@ -246,7 +246,7 @@ func (esr *egressSvcsReconciler) provision(ctx context.Context, proxyGroupName s
} }
} }
if !found { if !found {
lg.Debugf("portmapping %s:%d -> %s:%d is no longer required, removing", pm.Protocol, pm.TargetPort.IntVal, pm.Protocol, pm.Port) l.Debugf("portmapping %s:%d -> %s:%d is no longer required, removing", pm.Protocol, pm.TargetPort.IntVal, pm.Protocol, pm.Port)
clusterIPSvc.Spec.Ports = slices.Delete(clusterIPSvc.Spec.Ports, i, i+1) clusterIPSvc.Spec.Ports = slices.Delete(clusterIPSvc.Spec.Ports, i, i+1)
} }
} }
@ -277,7 +277,7 @@ func (esr *egressSvcsReconciler) provision(ctx context.Context, proxyGroupName s
return nil, false, fmt.Errorf("unable to allocate additional ports on ProxyGroup %s, %d ports already used. Create another ProxyGroup or open an issue if you believe this is unexpected.", proxyGroupName, maxPorts) return nil, false, fmt.Errorf("unable to allocate additional ports on ProxyGroup %s, %d ports already used. Create another ProxyGroup or open an issue if you believe this is unexpected.", proxyGroupName, maxPorts)
} }
p := unusedPort(usedPorts) p := unusedPort(usedPorts)
lg.Debugf("mapping tailnet target port %d to container port %d", wantsPM.Port, p) l.Debugf("mapping tailnet target port %d to container port %d", wantsPM.Port, p)
usedPorts.Insert(p) usedPorts.Insert(p)
clusterIPSvc.Spec.Ports = append(clusterIPSvc.Spec.Ports, corev1.ServicePort{ clusterIPSvc.Spec.Ports = append(clusterIPSvc.Spec.Ports, corev1.ServicePort{
Name: wantsPM.Name, Name: wantsPM.Name,
@ -343,14 +343,14 @@ func (esr *egressSvcsReconciler) provision(ctx context.Context, proxyGroupName s
return nil, false, fmt.Errorf("error retrieving egress services configuration: %w", err) return nil, false, fmt.Errorf("error retrieving egress services configuration: %w", err)
} }
if cm == nil { if cm == nil {
lg.Info("ConfigMap not yet created, waiting..") l.Info("ConfigMap not yet created, waiting..")
return nil, false, nil return nil, false, nil
} }
tailnetSvc := tailnetSvcName(svc) tailnetSvc := tailnetSvcName(svc)
gotCfg := (*cfgs)[tailnetSvc] gotCfg := (*cfgs)[tailnetSvc]
wantsCfg := egressSvcCfg(svc, clusterIPSvc, esr.tsNamespace, lg) wantsCfg := egressSvcCfg(svc, clusterIPSvc, esr.tsNamespace, l)
if !reflect.DeepEqual(gotCfg, wantsCfg) { if !reflect.DeepEqual(gotCfg, wantsCfg) {
lg.Debugf("updating egress services ConfigMap %s", cm.Name) l.Debugf("updating egress services ConfigMap %s", cm.Name)
mak.Set(cfgs, tailnetSvc, wantsCfg) mak.Set(cfgs, tailnetSvc, wantsCfg)
bs, err := json.Marshal(cfgs) bs, err := json.Marshal(cfgs)
if err != nil { if err != nil {
@ -361,7 +361,7 @@ func (esr *egressSvcsReconciler) provision(ctx context.Context, proxyGroupName s
return nil, false, fmt.Errorf("error updating egress services ConfigMap: %w", err) return nil, false, fmt.Errorf("error updating egress services ConfigMap: %w", err)
} }
} }
lg.Infof("egress service configuration has been updated") l.Infof("egress service configuration has been updated")
return clusterIPSvc, true, nil return clusterIPSvc, true, nil
} }
@ -402,7 +402,7 @@ func (esr *egressSvcsReconciler) maybeCleanup(ctx context.Context, svc *corev1.S
return nil return nil
} }
func (esr *egressSvcsReconciler) maybeCleanupProxyGroupConfig(ctx context.Context, svc *corev1.Service, lg *zap.SugaredLogger) error { func (esr *egressSvcsReconciler) maybeCleanupProxyGroupConfig(ctx context.Context, svc *corev1.Service, l *zap.SugaredLogger) error {
wantsProxyGroup := svc.Annotations[AnnotationProxyGroup] wantsProxyGroup := svc.Annotations[AnnotationProxyGroup]
cond := tsoperator.GetServiceCondition(svc, tsapi.EgressSvcConfigured) cond := tsoperator.GetServiceCondition(svc, tsapi.EgressSvcConfigured)
if cond == nil { if cond == nil {
@ -416,7 +416,7 @@ func (esr *egressSvcsReconciler) maybeCleanupProxyGroupConfig(ctx context.Contex
return nil return nil
} }
esr.logger.Infof("egress Service configured on ProxyGroup %s, wants ProxyGroup %s, cleaning up...", ss[2], wantsProxyGroup) esr.logger.Infof("egress Service configured on ProxyGroup %s, wants ProxyGroup %s, cleaning up...", ss[2], wantsProxyGroup)
if err := esr.ensureEgressSvcCfgDeleted(ctx, svc, lg); err != nil { if err := esr.ensureEgressSvcCfgDeleted(ctx, svc, l); err != nil {
return fmt.Errorf("error deleting egress service config: %w", err) return fmt.Errorf("error deleting egress service config: %w", err)
} }
return nil return nil
@ -471,17 +471,17 @@ func (esr *egressSvcsReconciler) ensureEgressSvcCfgDeleted(ctx context.Context,
Namespace: esr.tsNamespace, Namespace: esr.tsNamespace,
}, },
} }
lggr := logger.With("ConfigMap", client.ObjectKeyFromObject(cm)) l := logger.With("ConfigMap", client.ObjectKeyFromObject(cm))
lggr.Debug("ensuring that egress service configuration is removed from proxy config") l.Debug("ensuring that egress service configuration is removed from proxy config")
if err := esr.Get(ctx, client.ObjectKeyFromObject(cm), cm); apierrors.IsNotFound(err) { if err := esr.Get(ctx, client.ObjectKeyFromObject(cm), cm); apierrors.IsNotFound(err) {
lggr.Debugf("ConfigMap not found") l.Debugf("ConfigMap not found")
return nil return nil
} else if err != nil { } else if err != nil {
return fmt.Errorf("error retrieving ConfigMap: %w", err) return fmt.Errorf("error retrieving ConfigMap: %w", err)
} }
bs := cm.BinaryData[egressservices.KeyEgressServices] bs := cm.BinaryData[egressservices.KeyEgressServices]
if len(bs) == 0 { if len(bs) == 0 {
lggr.Debugf("ConfigMap does not contain egress service configs") l.Debugf("ConfigMap does not contain egress service configs")
return nil return nil
} }
cfgs := &egressservices.Configs{} cfgs := &egressservices.Configs{}
@ -491,12 +491,12 @@ func (esr *egressSvcsReconciler) ensureEgressSvcCfgDeleted(ctx context.Context,
tailnetSvc := tailnetSvcName(svc) tailnetSvc := tailnetSvcName(svc)
_, ok := (*cfgs)[tailnetSvc] _, ok := (*cfgs)[tailnetSvc]
if !ok { if !ok {
lggr.Debugf("ConfigMap does not contain egress service config, likely because it was already deleted") l.Debugf("ConfigMap does not contain egress service config, likely because it was already deleted")
return nil return nil
} }
lggr.Infof("before deleting config %+#v", *cfgs) l.Infof("before deleting config %+#v", *cfgs)
delete(*cfgs, tailnetSvc) delete(*cfgs, tailnetSvc)
lggr.Infof("after deleting config %+#v", *cfgs) l.Infof("after deleting config %+#v", *cfgs)
bs, err := json.Marshal(cfgs) bs, err := json.Marshal(cfgs)
if err != nil { if err != nil {
return fmt.Errorf("error marshalling egress services configs: %w", err) return fmt.Errorf("error marshalling egress services configs: %w", err)
@ -505,7 +505,7 @@ func (esr *egressSvcsReconciler) ensureEgressSvcCfgDeleted(ctx context.Context,
return esr.Update(ctx, cm) return esr.Update(ctx, cm)
} }
func (esr *egressSvcsReconciler) validateClusterResources(ctx context.Context, svc *corev1.Service, lg *zap.SugaredLogger) (bool, error) { func (esr *egressSvcsReconciler) validateClusterResources(ctx context.Context, svc *corev1.Service, l *zap.SugaredLogger) (bool, error) {
proxyGroupName := svc.Annotations[AnnotationProxyGroup] proxyGroupName := svc.Annotations[AnnotationProxyGroup]
pg := &tsapi.ProxyGroup{ pg := &tsapi.ProxyGroup{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
@ -513,36 +513,36 @@ func (esr *egressSvcsReconciler) validateClusterResources(ctx context.Context, s
}, },
} }
if err := esr.Get(ctx, client.ObjectKeyFromObject(pg), pg); apierrors.IsNotFound(err) { if err := esr.Get(ctx, client.ObjectKeyFromObject(pg), pg); apierrors.IsNotFound(err) {
lg.Infof("ProxyGroup %q not found, waiting...", proxyGroupName) l.Infof("ProxyGroup %q not found, waiting...", proxyGroupName)
tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionUnknown, reasonProxyGroupNotReady, reasonProxyGroupNotReady, esr.clock, lg) tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionUnknown, reasonProxyGroupNotReady, reasonProxyGroupNotReady, esr.clock, l)
tsoperator.RemoveServiceCondition(svc, tsapi.EgressSvcConfigured) tsoperator.RemoveServiceCondition(svc, tsapi.EgressSvcConfigured)
return false, nil return false, nil
} else if err != nil { } else if err != nil {
err := fmt.Errorf("unable to retrieve ProxyGroup %s: %w", proxyGroupName, err) err := fmt.Errorf("unable to retrieve ProxyGroup %s: %w", proxyGroupName, err)
tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionUnknown, reasonProxyGroupNotReady, err.Error(), esr.clock, lg) tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionUnknown, reasonProxyGroupNotReady, err.Error(), esr.clock, l)
tsoperator.RemoveServiceCondition(svc, tsapi.EgressSvcConfigured) tsoperator.RemoveServiceCondition(svc, tsapi.EgressSvcConfigured)
return false, err return false, err
} }
if violations := validateEgressService(svc, pg); len(violations) > 0 { if violations := validateEgressService(svc, pg); len(violations) > 0 {
msg := fmt.Sprintf("invalid egress Service: %s", strings.Join(violations, ", ")) msg := fmt.Sprintf("invalid egress Service: %s", strings.Join(violations, ", "))
esr.recorder.Event(svc, corev1.EventTypeWarning, "INVALIDSERVICE", msg) esr.recorder.Event(svc, corev1.EventTypeWarning, "INVALIDSERVICE", msg)
lg.Info(msg) l.Info(msg)
tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionFalse, reasonEgressSvcInvalid, msg, esr.clock, lg) tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionFalse, reasonEgressSvcInvalid, msg, esr.clock, l)
tsoperator.RemoveServiceCondition(svc, tsapi.EgressSvcConfigured) tsoperator.RemoveServiceCondition(svc, tsapi.EgressSvcConfigured)
return false, nil return false, nil
} }
if !tsoperator.ProxyGroupAvailable(pg) { if !tsoperator.ProxyGroupAvailable(pg) {
tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionUnknown, reasonProxyGroupNotReady, reasonProxyGroupNotReady, esr.clock, lg) tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionUnknown, reasonProxyGroupNotReady, reasonProxyGroupNotReady, esr.clock, l)
tsoperator.RemoveServiceCondition(svc, tsapi.EgressSvcConfigured) tsoperator.RemoveServiceCondition(svc, tsapi.EgressSvcConfigured)
} }
lg.Debugf("egress service is valid") l.Debugf("egress service is valid")
tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionTrue, reasonEgressSvcValid, reasonEgressSvcValid, esr.clock, lg) tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionTrue, reasonEgressSvcValid, reasonEgressSvcValid, esr.clock, l)
return true, nil return true, nil
} }
func egressSvcCfg(externalNameSvc, clusterIPSvc *corev1.Service, ns string, lg *zap.SugaredLogger) egressservices.Config { func egressSvcCfg(externalNameSvc, clusterIPSvc *corev1.Service, ns string, l *zap.SugaredLogger) egressservices.Config {
d := retrieveClusterDomain(ns, lg) d := retrieveClusterDomain(ns, l)
tt := tailnetTargetFromSvc(externalNameSvc) tt := tailnetTargetFromSvc(externalNameSvc)
hep := healthCheckForSvc(clusterIPSvc, d) hep := healthCheckForSvc(clusterIPSvc, d)
cfg := egressservices.Config{ cfg := egressservices.Config{
@ -691,18 +691,18 @@ func egressSvcChildResourceLabels(svc *corev1.Service) map[string]string {
// egressEpsLabels returns labels to be added to an EndpointSlice created for an egress service. // egressEpsLabels returns labels to be added to an EndpointSlice created for an egress service.
func egressSvcEpsLabels(extNSvc, clusterIPSvc *corev1.Service) map[string]string { func egressSvcEpsLabels(extNSvc, clusterIPSvc *corev1.Service) map[string]string {
lbels := egressSvcChildResourceLabels(extNSvc) l := egressSvcChildResourceLabels(extNSvc)
// Adding this label is what makes kube proxy set up rules to route traffic sent to the clusterIP Service to the // Adding this label is what makes kube proxy set up rules to route traffic sent to the clusterIP Service to the
// endpoints defined on this EndpointSlice. // endpoints defined on this EndpointSlice.
// https://kubernetes.io/docs/concepts/services-networking/endpoint-slices/#ownership // https://kubernetes.io/docs/concepts/services-networking/endpoint-slices/#ownership
lbels[discoveryv1.LabelServiceName] = clusterIPSvc.Name l[discoveryv1.LabelServiceName] = clusterIPSvc.Name
// Kubernetes recommends setting this label. // Kubernetes recommends setting this label.
// https://kubernetes.io/docs/concepts/services-networking/endpoint-slices/#management // https://kubernetes.io/docs/concepts/services-networking/endpoint-slices/#management
lbels[discoveryv1.LabelManagedBy] = "tailscale.com" l[discoveryv1.LabelManagedBy] = "tailscale.com"
return lbels return l
} }
func svcConfigurationUpToDate(svc *corev1.Service, lg *zap.SugaredLogger) bool { func svcConfigurationUpToDate(svc *corev1.Service, l *zap.SugaredLogger) bool {
cond := tsoperator.GetServiceCondition(svc, tsapi.EgressSvcConfigured) cond := tsoperator.GetServiceCondition(svc, tsapi.EgressSvcConfigured)
if cond == nil { if cond == nil {
return false return false
@ -710,21 +710,21 @@ func svcConfigurationUpToDate(svc *corev1.Service, lg *zap.SugaredLogger) bool {
if cond.Status != metav1.ConditionTrue { if cond.Status != metav1.ConditionTrue {
return false return false
} }
wantsReadyReason := svcConfiguredReason(svc, true, lg) wantsReadyReason := svcConfiguredReason(svc, true, l)
return strings.EqualFold(wantsReadyReason, cond.Reason) return strings.EqualFold(wantsReadyReason, cond.Reason)
} }
func cfgHash(c cfg, lg *zap.SugaredLogger) string { func cfgHash(c cfg, l *zap.SugaredLogger) string {
bs, err := json.Marshal(c) bs, err := json.Marshal(c)
if err != nil { if err != nil {
// Don't use l.Error as that messes up component logs with, in this case, unnecessary stack trace. // Don't use l.Error as that messes up component logs with, in this case, unnecessary stack trace.
lg.Infof("error marhsalling Config: %v", err) l.Infof("error marhsalling Config: %v", err)
return "" return ""
} }
h := sha256.New() h := sha256.New()
if _, err := h.Write(bs); err != nil { if _, err := h.Write(bs); err != nil {
// Don't use l.Error as that messes up component logs with, in this case, unnecessary stack trace. // Don't use l.Error as that messes up component logs with, in this case, unnecessary stack trace.
lg.Infof("error producing Config hash: %v", err) l.Infof("error producing Config hash: %v", err)
return "" return ""
} }
return fmt.Sprintf("%x", h.Sum(nil)) return fmt.Sprintf("%x", h.Sum(nil))
@ -736,7 +736,7 @@ type cfg struct {
ProxyGroup string `json:"proxyGroup"` ProxyGroup string `json:"proxyGroup"`
} }
func svcConfiguredReason(svc *corev1.Service, configured bool, lg *zap.SugaredLogger) string { func svcConfiguredReason(svc *corev1.Service, configured bool, l *zap.SugaredLogger) string {
var r string var r string
if configured { if configured {
r = "ConfiguredFor:" r = "ConfiguredFor:"
@ -750,7 +750,7 @@ func svcConfiguredReason(svc *corev1.Service, configured bool, lg *zap.SugaredLo
TailnetTarget: tt, TailnetTarget: tt,
ProxyGroup: svc.Annotations[AnnotationProxyGroup], ProxyGroup: svc.Annotations[AnnotationProxyGroup],
} }
r += fmt.Sprintf(":Config:%s", cfgHash(s, lg)) r += fmt.Sprintf(":Config:%s", cfgHash(s, l))
return r return r
} }

@ -249,9 +249,9 @@ func portsForEndpointSlice(svc *corev1.Service) []discoveryv1.EndpointPort {
return ports return ports
} }
func mustHaveConfigForSvc(t *testing.T, cl client.Client, extNSvc, clusterIPSvc *corev1.Service, cm *corev1.ConfigMap, lg *zap.Logger) { func mustHaveConfigForSvc(t *testing.T, cl client.Client, extNSvc, clusterIPSvc *corev1.Service, cm *corev1.ConfigMap, l *zap.Logger) {
t.Helper() t.Helper()
wantsCfg := egressSvcCfg(extNSvc, clusterIPSvc, clusterIPSvc.Namespace, lg.Sugar()) wantsCfg := egressSvcCfg(extNSvc, clusterIPSvc, clusterIPSvc.Namespace, l.Sugar())
if err := cl.Get(context.Background(), client.ObjectKeyFromObject(cm), cm); err != nil { if err := cl.Get(context.Background(), client.ObjectKeyFromObject(cm), cm); err != nil {
t.Fatalf("Error retrieving ConfigMap: %v", err) t.Fatalf("Error retrieving ConfigMap: %v", err)
} }

@ -69,7 +69,7 @@ func main() {
}() }()
log.Print("Templating Helm chart contents") log.Print("Templating Helm chart contents")
helmTmplCmd := exec.Command("./tool/helm", "template", "operator", "./cmd/k8s-operator/deploy/chart", helmTmplCmd := exec.Command("./tool/helm", "template", "operator", "./cmd/k8s-operator/deploy/chart",
"--namespace=tailscale", "--set=oauth.clientSecret=''") "--namespace=tailscale")
helmTmplCmd.Dir = repoRoot helmTmplCmd.Dir = repoRoot
var out bytes.Buffer var out bytes.Buffer
helmTmplCmd.Stdout = &out helmTmplCmd.Stdout = &out
@ -144,7 +144,7 @@ func generate(baseDir string) error {
if _, err := file.Write([]byte(helmConditionalEnd)); err != nil { if _, err := file.Write([]byte(helmConditionalEnd)); err != nil {
return fmt.Errorf("error writing helm if-statement end: %w", err) return fmt.Errorf("error writing helm if-statement end: %w", err)
} }
return file.Close() return nil
} }
for _, crd := range []struct { for _, crd := range []struct {
crdPath, templatePath string crdPath, templatePath string

@ -7,50 +7,26 @@ package main
import ( import (
"bytes" "bytes"
"context"
"net"
"os" "os"
"os/exec" "os/exec"
"path/filepath" "path/filepath"
"strings" "strings"
"testing" "testing"
"time"
"tailscale.com/tstest/nettest"
"tailscale.com/util/cibuild"
) )
func Test_generate(t *testing.T) { func Test_generate(t *testing.T) {
nettest.SkipIfNoNetwork(t)
ctx, cancel := context.WithTimeout(t.Context(), 10*time.Second)
defer cancel()
if _, err := net.DefaultResolver.LookupIPAddr(ctx, "get.helm.sh"); err != nil {
// https://github.com/helm/helm/issues/31434
t.Skipf("get.helm.sh seems down or unreachable; skipping test")
}
base, err := os.Getwd() base, err := os.Getwd()
base = filepath.Join(base, "../../../") base = filepath.Join(base, "../../../")
if err != nil { if err != nil {
t.Fatalf("error getting current working directory: %v", err) t.Fatalf("error getting current working directory: %v", err)
} }
defer cleanup(base) defer cleanup(base)
helmCLIPath := filepath.Join(base, "tool/helm")
if out, err := exec.Command(helmCLIPath, "version").CombinedOutput(); err != nil && cibuild.On() {
// It's not just DNS. Azure is generating bogus certs within GitHub Actions at least for
// helm. So try to run it and see if we can even fetch it.
//
// https://github.com/helm/helm/issues/31434
t.Skipf("error fetching helm; skipping test in CI: %v, %s", err, out)
}
if err := generate(base); err != nil { if err := generate(base); err != nil {
t.Fatalf("CRD template generation: %v", err) t.Fatalf("CRD template generation: %v", err)
} }
tempDir := t.TempDir() tempDir := t.TempDir()
helmCLIPath := filepath.Join(base, "tool/helm")
helmChartTemplatesPath := filepath.Join(base, "cmd/k8s-operator/deploy/chart") helmChartTemplatesPath := filepath.Join(base, "cmd/k8s-operator/deploy/chart")
helmPackageCmd := exec.Command(helmCLIPath, "package", helmChartTemplatesPath, "--destination", tempDir, "--version", "0.0.1") helmPackageCmd := exec.Command(helmCLIPath, "package", helmChartTemplatesPath, "--destination", tempDir, "--version", "0.0.1")
helmPackageCmd.Stderr = os.Stderr helmPackageCmd.Stderr = os.Stderr

@ -29,7 +29,6 @@ import (
"k8s.io/client-go/tools/record" "k8s.io/client-go/tools/record"
"sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/reconcile"
"tailscale.com/internal/client/tailscale" "tailscale.com/internal/client/tailscale"
"tailscale.com/ipn" "tailscale.com/ipn"
"tailscale.com/ipn/ipnstate" "tailscale.com/ipn/ipnstate"
@ -155,6 +154,11 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin
// needs to be explicitly enabled for a tailnet to be able to use them. // needs to be explicitly enabled for a tailnet to be able to use them.
serviceName := tailcfg.ServiceName("svc:" + hostname) serviceName := tailcfg.ServiceName("svc:" + hostname)
existingTSSvc, err := r.tsClient.GetVIPService(ctx, serviceName) existingTSSvc, err := r.tsClient.GetVIPService(ctx, serviceName)
if isErrorFeatureFlagNotEnabled(err) {
logger.Warn(msgFeatureFlagNotEnabled)
r.recorder.Event(ing, corev1.EventTypeWarning, warningTailscaleServiceFeatureFlagNotEnabled, msgFeatureFlagNotEnabled)
return false, nil
}
if err != nil && !isErrorTailscaleServiceNotFound(err) { if err != nil && !isErrorTailscaleServiceNotFound(err) {
return false, fmt.Errorf("error getting Tailscale Service %q: %w", hostname, err) return false, fmt.Errorf("error getting Tailscale Service %q: %w", hostname, err)
} }
@ -290,25 +294,6 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin
ingCfg.Web[epHTTP] = &ipn.WebServerConfig{ ingCfg.Web[epHTTP] = &ipn.WebServerConfig{
Handlers: handlers, Handlers: handlers,
} }
if isHTTPRedirectEnabled(ing) {
logger.Warnf("Both HTTP endpoint and HTTP redirect flags are enabled: ignoring HTTP redirect.")
}
} else if isHTTPRedirectEnabled(ing) {
logger.Infof("HTTP redirect enabled, setting up port 80 redirect handlers")
epHTTP := ipn.HostPort(fmt.Sprintf("%s:80", dnsName))
ingCfg.TCP[80] = &ipn.TCPPortHandler{HTTP: true}
ingCfg.Web[epHTTP] = &ipn.WebServerConfig{
Handlers: map[string]*ipn.HTTPHandler{},
}
web80 := ingCfg.Web[epHTTP]
for mountPoint := range handlers {
// We send a 301 - Moved Permanently redirect from HTTP to HTTPS
redirectURL := "301:https://${HOST}${REQUEST_URI}"
logger.Debugf("Creating redirect handler: %s -> %s", mountPoint, redirectURL)
web80.Handlers[mountPoint] = &ipn.HTTPHandler{
Redirect: redirectURL,
}
}
} }
var gotCfg *ipn.ServiceConfig var gotCfg *ipn.ServiceConfig
@ -335,7 +320,7 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin
} }
tsSvcPorts := []string{"tcp:443"} // always 443 for Ingress tsSvcPorts := []string{"tcp:443"} // always 443 for Ingress
if isHTTPEndpointEnabled(ing) || isHTTPRedirectEnabled(ing) { if isHTTPEndpointEnabled(ing) {
tsSvcPorts = append(tsSvcPorts, "tcp:80") tsSvcPorts = append(tsSvcPorts, "tcp:80")
} }
@ -365,7 +350,7 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin
// 5. Update tailscaled's AdvertiseServices config, which should add the Tailscale Service // 5. Update tailscaled's AdvertiseServices config, which should add the Tailscale Service
// IPs to the ProxyGroup Pods' AllowedIPs in the next netmap update if approved. // IPs to the ProxyGroup Pods' AllowedIPs in the next netmap update if approved.
mode := serviceAdvertisementHTTPS mode := serviceAdvertisementHTTPS
if isHTTPEndpointEnabled(ing) || isHTTPRedirectEnabled(ing) { if isHTTPEndpointEnabled(ing) {
mode = serviceAdvertisementHTTPAndHTTPS mode = serviceAdvertisementHTTPAndHTTPS
} }
if err = r.maybeUpdateAdvertiseServicesConfig(ctx, pg.Name, serviceName, mode, logger); err != nil { if err = r.maybeUpdateAdvertiseServicesConfig(ctx, pg.Name, serviceName, mode, logger); err != nil {
@ -396,7 +381,7 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin
Port: 443, Port: 443,
}) })
} }
if isHTTPEndpointEnabled(ing) || isHTTPRedirectEnabled(ing) { if isHTTPEndpointEnabled(ing) {
ports = append(ports, networkingv1.IngressPortStatus{ ports = append(ports, networkingv1.IngressPortStatus{
Protocol: "TCP", Protocol: "TCP",
Port: 80, Port: 80,
@ -468,6 +453,11 @@ func (r *HAIngressReconciler) maybeCleanupProxyGroup(ctx context.Context, proxyG
if !found { if !found {
logger.Infof("Tailscale Service %q is not owned by any Ingress, cleaning up", tsSvcName) logger.Infof("Tailscale Service %q is not owned by any Ingress, cleaning up", tsSvcName)
tsService, err := r.tsClient.GetVIPService(ctx, tsSvcName) tsService, err := r.tsClient.GetVIPService(ctx, tsSvcName)
if isErrorFeatureFlagNotEnabled(err) {
msg := fmt.Sprintf("Unable to proceed with cleanup: %s.", msgFeatureFlagNotEnabled)
logger.Warn(msg)
return false, nil
}
if isErrorTailscaleServiceNotFound(err) { if isErrorTailscaleServiceNotFound(err) {
return false, nil return false, nil
} }
@ -524,7 +514,16 @@ func (r *HAIngressReconciler) maybeCleanup(ctx context.Context, hostname string,
logger.Infof("Ensuring that Tailscale Service %q configuration is cleaned up", hostname) logger.Infof("Ensuring that Tailscale Service %q configuration is cleaned up", hostname)
serviceName := tailcfg.ServiceName("svc:" + hostname) serviceName := tailcfg.ServiceName("svc:" + hostname)
svc, err := r.tsClient.GetVIPService(ctx, serviceName) svc, err := r.tsClient.GetVIPService(ctx, serviceName)
if err != nil && !isErrorTailscaleServiceNotFound(err) { if err != nil {
if isErrorFeatureFlagNotEnabled(err) {
msg := fmt.Sprintf("Unable to proceed with cleanup: %s.", msgFeatureFlagNotEnabled)
logger.Warn(msg)
r.recorder.Event(ing, corev1.EventTypeWarning, warningTailscaleServiceFeatureFlagNotEnabled, msg)
return false, nil
}
if isErrorTailscaleServiceNotFound(err) {
return false, nil
}
return false, fmt.Errorf("error getting Tailscale Service: %w", err) return false, fmt.Errorf("error getting Tailscale Service: %w", err)
} }
@ -730,15 +729,10 @@ func (r *HAIngressReconciler) cleanupTailscaleService(ctx context.Context, svc *
} }
if len(o.OwnerRefs) == 1 { if len(o.OwnerRefs) == 1 {
logger.Infof("Deleting Tailscale Service %q", svc.Name) logger.Infof("Deleting Tailscale Service %q", svc.Name)
if err = r.tsClient.DeleteVIPService(ctx, svc.Name); err != nil && !isErrorTailscaleServiceNotFound(err) { return false, r.tsClient.DeleteVIPService(ctx, svc.Name)
return false, err
}
return false, nil
} }
o.OwnerRefs = slices.Delete(o.OwnerRefs, ix, ix+1) o.OwnerRefs = slices.Delete(o.OwnerRefs, ix, ix+1)
logger.Infof("Creating/Updating Tailscale Service %q", svc.Name) logger.Infof("Deleting Tailscale Service %q", svc.Name)
json, err := json.Marshal(o) json, err := json.Marshal(o)
if err != nil { if err != nil {
return false, fmt.Errorf("error marshalling updated Tailscale Service owner reference: %w", err) return false, fmt.Errorf("error marshalling updated Tailscale Service owner reference: %w", err)
@ -1128,6 +1122,14 @@ func hasCerts(ctx context.Context, cl client.Client, lc localClient, ns string,
return len(cert) > 0 && len(key) > 0, nil return len(cert) > 0 && len(key) > 0, nil
} }
func isErrorFeatureFlagNotEnabled(err error) bool {
// messageFFNotEnabled is the error message returned by
// Tailscale control plane when a Tailscale Service API call is made for a
// tailnet that does not have the Tailscale Services feature flag enabled.
const messageFFNotEnabled = "feature unavailable for tailnet"
return err != nil && strings.Contains(err.Error(), messageFFNotEnabled)
}
func isErrorTailscaleServiceNotFound(err error) bool { func isErrorTailscaleServiceNotFound(err error) bool {
var errResp tailscale.ErrResponse var errResp tailscale.ErrResponse
ok := errors.As(err, &errResp) ok := errors.As(err, &errResp)

@ -25,7 +25,6 @@ import (
"k8s.io/client-go/tools/record" "k8s.io/client-go/tools/record"
"sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/client/fake"
"tailscale.com/internal/client/tailscale" "tailscale.com/internal/client/tailscale"
"tailscale.com/ipn" "tailscale.com/ipn"
"tailscale.com/ipn/ipnstate" "tailscale.com/ipn/ipnstate"
@ -68,7 +67,7 @@ func TestIngressPGReconciler(t *testing.T) {
// Verify initial reconciliation // Verify initial reconciliation
expectReconciled(t, ingPGR, "default", "test-ingress") expectReconciled(t, ingPGR, "default", "test-ingress")
populateTLSSecret(t, fc, "test-pg", "my-svc.ts.net") populateTLSSecret(context.Background(), fc, "test-pg", "my-svc.ts.net")
expectReconciled(t, ingPGR, "default", "test-ingress") expectReconciled(t, ingPGR, "default", "test-ingress")
verifyServeConfig(t, fc, "svc:my-svc", false) verifyServeConfig(t, fc, "svc:my-svc", false)
verifyTailscaleService(t, ft, "svc:my-svc", []string{"tcp:443"}) verifyTailscaleService(t, ft, "svc:my-svc", []string{"tcp:443"})
@ -90,7 +89,7 @@ func TestIngressPGReconciler(t *testing.T) {
expectReconciled(t, ingPGR, "default", "test-ingress") expectReconciled(t, ingPGR, "default", "test-ingress")
// Verify Tailscale Service uses custom tags // Verify Tailscale Service uses custom tags
tsSvc, err := ft.GetVIPService(t.Context(), "svc:my-svc") tsSvc, err := ft.GetVIPService(context.Background(), "svc:my-svc")
if err != nil { if err != nil {
t.Fatalf("getting Tailscale Service: %v", err) t.Fatalf("getting Tailscale Service: %v", err)
} }
@ -135,7 +134,7 @@ func TestIngressPGReconciler(t *testing.T) {
// Verify second Ingress reconciliation // Verify second Ingress reconciliation
expectReconciled(t, ingPGR, "default", "my-other-ingress") expectReconciled(t, ingPGR, "default", "my-other-ingress")
populateTLSSecret(t, fc, "test-pg", "my-other-svc.ts.net") populateTLSSecret(context.Background(), fc, "test-pg", "my-other-svc.ts.net")
expectReconciled(t, ingPGR, "default", "my-other-ingress") expectReconciled(t, ingPGR, "default", "my-other-ingress")
verifyServeConfig(t, fc, "svc:my-other-svc", false) verifyServeConfig(t, fc, "svc:my-other-svc", false)
verifyTailscaleService(t, ft, "svc:my-other-svc", []string{"tcp:443"}) verifyTailscaleService(t, ft, "svc:my-other-svc", []string{"tcp:443"})
@ -152,14 +151,14 @@ func TestIngressPGReconciler(t *testing.T) {
verifyTailscaledConfig(t, fc, "test-pg", []string{"svc:my-svc", "svc:my-other-svc"}) verifyTailscaledConfig(t, fc, "test-pg", []string{"svc:my-svc", "svc:my-other-svc"})
// Delete second Ingress // Delete second Ingress
if err := fc.Delete(t.Context(), ing2); err != nil { if err := fc.Delete(context.Background(), ing2); err != nil {
t.Fatalf("deleting second Ingress: %v", err) t.Fatalf("deleting second Ingress: %v", err)
} }
expectReconciled(t, ingPGR, "default", "my-other-ingress") expectReconciled(t, ingPGR, "default", "my-other-ingress")
// Verify second Ingress cleanup // Verify second Ingress cleanup
cm := &corev1.ConfigMap{} cm := &corev1.ConfigMap{}
if err := fc.Get(t.Context(), types.NamespacedName{ if err := fc.Get(context.Background(), types.NamespacedName{
Name: "test-pg-ingress-config", Name: "test-pg-ingress-config",
Namespace: "operator-ns", Namespace: "operator-ns",
}, cm); err != nil { }, cm); err != nil {
@ -200,7 +199,7 @@ func TestIngressPGReconciler(t *testing.T) {
expectEqual(t, fc, certSecretRoleBinding(pg, "operator-ns", "my-svc.ts.net")) expectEqual(t, fc, certSecretRoleBinding(pg, "operator-ns", "my-svc.ts.net"))
// Delete the first Ingress and verify cleanup // Delete the first Ingress and verify cleanup
if err := fc.Delete(t.Context(), ing); err != nil { if err := fc.Delete(context.Background(), ing); err != nil {
t.Fatalf("deleting Ingress: %v", err) t.Fatalf("deleting Ingress: %v", err)
} }
@ -208,7 +207,7 @@ func TestIngressPGReconciler(t *testing.T) {
// Verify the ConfigMap was cleaned up // Verify the ConfigMap was cleaned up
cm = &corev1.ConfigMap{} cm = &corev1.ConfigMap{}
if err := fc.Get(t.Context(), types.NamespacedName{ if err := fc.Get(context.Background(), types.NamespacedName{
Name: "test-pg-second-ingress-config", Name: "test-pg-second-ingress-config",
Namespace: "operator-ns", Namespace: "operator-ns",
}, cm); err != nil { }, cm); err != nil {
@ -229,47 +228,6 @@ func TestIngressPGReconciler(t *testing.T) {
expectMissing[corev1.Secret](t, fc, "operator-ns", "my-svc.ts.net") expectMissing[corev1.Secret](t, fc, "operator-ns", "my-svc.ts.net")
expectMissing[rbacv1.Role](t, fc, "operator-ns", "my-svc.ts.net") expectMissing[rbacv1.Role](t, fc, "operator-ns", "my-svc.ts.net")
expectMissing[rbacv1.RoleBinding](t, fc, "operator-ns", "my-svc.ts.net") expectMissing[rbacv1.RoleBinding](t, fc, "operator-ns", "my-svc.ts.net")
// Create a third ingress
ing3 := &networkingv1.Ingress{
TypeMeta: metav1.TypeMeta{Kind: "Ingress", APIVersion: "networking.k8s.io/v1"},
ObjectMeta: metav1.ObjectMeta{
Name: "my-other-ingress",
Namespace: "default",
UID: types.UID("5678-UID"),
Annotations: map[string]string{
"tailscale.com/proxy-group": "test-pg",
},
},
Spec: networkingv1.IngressSpec{
IngressClassName: ptr.To("tailscale"),
DefaultBackend: &networkingv1.IngressBackend{
Service: &networkingv1.IngressServiceBackend{
Name: "test",
Port: networkingv1.ServiceBackendPort{
Number: 8080,
},
},
},
TLS: []networkingv1.IngressTLS{
{Hosts: []string{"my-other-svc.tailnetxyz.ts.net"}},
},
},
}
mustCreate(t, fc, ing3)
expectReconciled(t, ingPGR, ing3.Namespace, ing3.Name)
// Delete the service from "control"
ft.vipServices = make(map[tailcfg.ServiceName]*tailscale.VIPService)
// Delete the ingress and confirm we don't get stuck due to the VIP service not existing.
if err = fc.Delete(t.Context(), ing3); err != nil {
t.Fatalf("deleting Ingress: %v", err)
}
expectReconciled(t, ingPGR, ing3.Namespace, ing3.Name)
expectMissing[networkingv1.Ingress](t, fc, ing3.Namespace, ing3.Name)
} }
func TestIngressPGReconciler_UpdateIngressHostname(t *testing.T) { func TestIngressPGReconciler_UpdateIngressHostname(t *testing.T) {
@ -304,7 +262,7 @@ func TestIngressPGReconciler_UpdateIngressHostname(t *testing.T) {
// Verify initial reconciliation // Verify initial reconciliation
expectReconciled(t, ingPGR, "default", "test-ingress") expectReconciled(t, ingPGR, "default", "test-ingress")
populateTLSSecret(t, fc, "test-pg", "my-svc.ts.net") populateTLSSecret(context.Background(), fc, "test-pg", "my-svc.ts.net")
expectReconciled(t, ingPGR, "default", "test-ingress") expectReconciled(t, ingPGR, "default", "test-ingress")
verifyServeConfig(t, fc, "svc:my-svc", false) verifyServeConfig(t, fc, "svc:my-svc", false)
verifyTailscaleService(t, ft, "svc:my-svc", []string{"tcp:443"}) verifyTailscaleService(t, ft, "svc:my-svc", []string{"tcp:443"})
@ -315,13 +273,13 @@ func TestIngressPGReconciler_UpdateIngressHostname(t *testing.T) {
ing.Spec.TLS[0].Hosts[0] = "updated-svc" ing.Spec.TLS[0].Hosts[0] = "updated-svc"
}) })
expectReconciled(t, ingPGR, "default", "test-ingress") expectReconciled(t, ingPGR, "default", "test-ingress")
populateTLSSecret(t, fc, "test-pg", "updated-svc.ts.net") populateTLSSecret(context.Background(), fc, "test-pg", "updated-svc.ts.net")
expectReconciled(t, ingPGR, "default", "test-ingress") expectReconciled(t, ingPGR, "default", "test-ingress")
verifyServeConfig(t, fc, "svc:updated-svc", false) verifyServeConfig(t, fc, "svc:updated-svc", false)
verifyTailscaleService(t, ft, "svc:updated-svc", []string{"tcp:443"}) verifyTailscaleService(t, ft, "svc:updated-svc", []string{"tcp:443"})
verifyTailscaledConfig(t, fc, "test-pg", []string{"svc:updated-svc"}) verifyTailscaledConfig(t, fc, "test-pg", []string{"svc:updated-svc"})
_, err := ft.GetVIPService(context.Background(), "svc:my-svc") _, err := ft.GetVIPService(context.Background(), tailcfg.ServiceName("svc:my-svc"))
if err == nil { if err == nil {
t.Fatalf("svc:my-svc not cleaned up") t.Fatalf("svc:my-svc not cleaned up")
} }
@ -542,7 +500,7 @@ func TestIngressPGReconciler_HTTPEndpoint(t *testing.T) {
// Verify initial reconciliation with HTTP enabled // Verify initial reconciliation with HTTP enabled
expectReconciled(t, ingPGR, "default", "test-ingress") expectReconciled(t, ingPGR, "default", "test-ingress")
populateTLSSecret(t, fc, "test-pg", "my-svc.ts.net") populateTLSSecret(context.Background(), fc, "test-pg", "my-svc.ts.net")
expectReconciled(t, ingPGR, "default", "test-ingress") expectReconciled(t, ingPGR, "default", "test-ingress")
verifyTailscaleService(t, ft, "svc:my-svc", []string{"tcp:80", "tcp:443"}) verifyTailscaleService(t, ft, "svc:my-svc", []string{"tcp:80", "tcp:443"})
verifyServeConfig(t, fc, "svc:my-svc", true) verifyServeConfig(t, fc, "svc:my-svc", true)
@ -618,236 +576,6 @@ func TestIngressPGReconciler_HTTPEndpoint(t *testing.T) {
} }
} }
func TestIngressPGReconciler_HTTPRedirect(t *testing.T) {
ingPGR, fc, ft := setupIngressTest(t)
// Create backend Service that the Ingress will route to
backendSvc := &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "test",
Namespace: "default",
},
Spec: corev1.ServiceSpec{
ClusterIP: "10.0.0.1",
Ports: []corev1.ServicePort{
{
Port: 8080,
},
},
},
}
mustCreate(t, fc, backendSvc)
// Create test Ingress with HTTP redirect enabled
ing := &networkingv1.Ingress{
TypeMeta: metav1.TypeMeta{Kind: "Ingress", APIVersion: "networking.k8s.io/v1"},
ObjectMeta: metav1.ObjectMeta{
Name: "test-ingress",
Namespace: "default",
UID: types.UID("1234-UID"),
Annotations: map[string]string{
"tailscale.com/proxy-group": "test-pg",
"tailscale.com/http-redirect": "true",
},
},
Spec: networkingv1.IngressSpec{
IngressClassName: ptr.To("tailscale"),
DefaultBackend: &networkingv1.IngressBackend{
Service: &networkingv1.IngressServiceBackend{
Name: "test",
Port: networkingv1.ServiceBackendPort{
Number: 8080,
},
},
},
TLS: []networkingv1.IngressTLS{
{Hosts: []string{"my-svc"}},
},
},
}
if err := fc.Create(context.Background(), ing); err != nil {
t.Fatal(err)
}
// Verify initial reconciliation with HTTP redirect enabled
expectReconciled(t, ingPGR, "default", "test-ingress")
populateTLSSecret(t, fc, "test-pg", "my-svc.ts.net")
expectReconciled(t, ingPGR, "default", "test-ingress")
// Verify Tailscale Service includes both tcp:80 and tcp:443
verifyTailscaleService(t, ft, "svc:my-svc", []string{"tcp:80", "tcp:443"})
// Verify Ingress status includes port 80
ing = &networkingv1.Ingress{}
if err := fc.Get(context.Background(), types.NamespacedName{
Name: "test-ingress",
Namespace: "default",
}, ing); err != nil {
t.Fatal(err)
}
// Add the Tailscale Service to prefs to have the Ingress recognised as ready
mustCreate(t, fc, &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "test-pg-0",
Namespace: "operator-ns",
Labels: pgSecretLabels("test-pg", kubetypes.LabelSecretTypeState),
},
Data: map[string][]byte{
"_current-profile": []byte("profile-foo"),
"profile-foo": []byte(`{"AdvertiseServices":["svc:my-svc"],"Config":{"NodeID":"node-foo"}}`),
},
})
// Reconcile and re-fetch Ingress
expectReconciled(t, ingPGR, "default", "test-ingress")
if err := fc.Get(context.Background(), client.ObjectKeyFromObject(ing), ing); err != nil {
t.Fatal(err)
}
wantStatus := []networkingv1.IngressPortStatus{
{Port: 443, Protocol: "TCP"},
{Port: 80, Protocol: "TCP"},
}
if !reflect.DeepEqual(ing.Status.LoadBalancer.Ingress[0].Ports, wantStatus) {
t.Errorf("incorrect status ports: got %v, want %v",
ing.Status.LoadBalancer.Ingress[0].Ports, wantStatus)
}
// Remove HTTP redirect annotation
mustUpdate(t, fc, "default", "test-ingress", func(ing *networkingv1.Ingress) {
delete(ing.Annotations, "tailscale.com/http-redirect")
})
// Verify reconciliation after removing HTTP redirect
expectReconciled(t, ingPGR, "default", "test-ingress")
verifyTailscaleService(t, ft, "svc:my-svc", []string{"tcp:443"})
// Verify Ingress status no longer includes port 80
ing = &networkingv1.Ingress{}
if err := fc.Get(context.Background(), types.NamespacedName{
Name: "test-ingress",
Namespace: "default",
}, ing); err != nil {
t.Fatal(err)
}
wantStatus = []networkingv1.IngressPortStatus{
{Port: 443, Protocol: "TCP"},
}
if !reflect.DeepEqual(ing.Status.LoadBalancer.Ingress[0].Ports, wantStatus) {
t.Errorf("incorrect status ports after removing redirect: got %v, want %v",
ing.Status.LoadBalancer.Ingress[0].Ports, wantStatus)
}
}
func TestIngressPGReconciler_HTTPEndpointAndRedirectConflict(t *testing.T) {
ingPGR, fc, ft := setupIngressTest(t)
// Create backend Service that the Ingress will route to
backendSvc := &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "test",
Namespace: "default",
},
Spec: corev1.ServiceSpec{
ClusterIP: "10.0.0.1",
Ports: []corev1.ServicePort{
{
Port: 8080,
},
},
},
}
mustCreate(t, fc, backendSvc)
// Create test Ingress with both HTTP endpoint and HTTP redirect enabled
ing := &networkingv1.Ingress{
TypeMeta: metav1.TypeMeta{Kind: "Ingress", APIVersion: "networking.k8s.io/v1"},
ObjectMeta: metav1.ObjectMeta{
Name: "test-ingress",
Namespace: "default",
UID: types.UID("1234-UID"),
Annotations: map[string]string{
"tailscale.com/proxy-group": "test-pg",
"tailscale.com/http-endpoint": "enabled",
"tailscale.com/http-redirect": "true",
},
},
Spec: networkingv1.IngressSpec{
IngressClassName: ptr.To("tailscale"),
DefaultBackend: &networkingv1.IngressBackend{
Service: &networkingv1.IngressServiceBackend{
Name: "test",
Port: networkingv1.ServiceBackendPort{
Number: 8080,
},
},
},
TLS: []networkingv1.IngressTLS{
{Hosts: []string{"my-svc"}},
},
},
}
if err := fc.Create(context.Background(), ing); err != nil {
t.Fatal(err)
}
// Verify initial reconciliation - HTTP endpoint should take precedence
expectReconciled(t, ingPGR, "default", "test-ingress")
populateTLSSecret(t, fc, "test-pg", "my-svc.ts.net")
expectReconciled(t, ingPGR, "default", "test-ingress")
// Verify Tailscale Service includes both tcp:80 and tcp:443
verifyTailscaleService(t, ft, "svc:my-svc", []string{"tcp:80", "tcp:443"})
// Verify the serve config has HTTP endpoint handlers on port 80, NOT redirect handlers
cm := &corev1.ConfigMap{}
if err := fc.Get(context.Background(), types.NamespacedName{
Name: "test-pg-ingress-config",
Namespace: "operator-ns",
}, cm); err != nil {
t.Fatalf("getting ConfigMap: %v", err)
}
// Verify Ingress status includes port 80
ing = &networkingv1.Ingress{}
if err := fc.Get(context.Background(), types.NamespacedName{
Name: "test-ingress",
Namespace: "default",
}, ing); err != nil {
t.Fatal(err)
}
// Add the Tailscale Service to prefs to have the Ingress recognised as ready
mustCreate(t, fc, &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "test-pg-0",
Namespace: "operator-ns",
Labels: pgSecretLabels("test-pg", kubetypes.LabelSecretTypeState),
},
Data: map[string][]byte{
"_current-profile": []byte("profile-foo"),
"profile-foo": []byte(`{"AdvertiseServices":["svc:my-svc"],"Config":{"NodeID":"node-foo"}}`),
},
})
// Reconcile and re-fetch Ingress
expectReconciled(t, ingPGR, "default", "test-ingress")
if err := fc.Get(context.Background(), client.ObjectKeyFromObject(ing), ing); err != nil {
t.Fatal(err)
}
wantStatus := []networkingv1.IngressPortStatus{
{Port: 443, Protocol: "TCP"},
{Port: 80, Protocol: "TCP"},
}
if !reflect.DeepEqual(ing.Status.LoadBalancer.Ingress[0].Ports, wantStatus) {
t.Errorf("incorrect status ports: got %v, want %v",
ing.Status.LoadBalancer.Ingress[0].Ports, wantStatus)
}
}
func TestIngressPGReconciler_MultiCluster(t *testing.T) { func TestIngressPGReconciler_MultiCluster(t *testing.T) {
ingPGR, fc, ft := setupIngressTest(t) ingPGR, fc, ft := setupIngressTest(t)
ingPGR.operatorID = "operator-1" ingPGR.operatorID = "operator-1"
@ -989,9 +717,7 @@ func TestOwnerAnnotations(t *testing.T) {
} }
} }
func populateTLSSecret(t *testing.T, c client.Client, pgName, domain string) { func populateTLSSecret(ctx context.Context, c client.Client, pgName, domain string) error {
t.Helper()
secret := &corev1.Secret{ secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: domain, Name: domain,
@ -1010,12 +736,10 @@ func populateTLSSecret(t *testing.T, c client.Client, pgName, domain string) {
}, },
} }
_, err := createOrUpdate(t.Context(), c, "operator-ns", secret, func(s *corev1.Secret) { _, err := createOrUpdate(ctx, c, "operator-ns", secret, func(s *corev1.Secret) {
s.Data = secret.Data s.Data = secret.Data
}) })
if err != nil { return err
t.Fatalf("failed to populate TLS secret: %v", err)
}
} }
func verifyTailscaleService(t *testing.T, ft *fakeTSClient, serviceName string, wantPorts []string) { func verifyTailscaleService(t *testing.T, ft *fakeTSClient, serviceName string, wantPorts []string) {

@ -204,27 +204,6 @@ func (a *IngressReconciler) maybeProvision(ctx context.Context, logger *zap.Suga
return nil return nil
} }
if isHTTPRedirectEnabled(ing) {
logger.Infof("HTTP redirect enabled, setting up port 80 redirect handlers")
const magic80 = "${TS_CERT_DOMAIN}:80"
sc.TCP[80] = &ipn.TCPPortHandler{HTTP: true}
sc.Web[magic80] = &ipn.WebServerConfig{
Handlers: map[string]*ipn.HTTPHandler{},
}
if sc.AllowFunnel != nil && sc.AllowFunnel[magic443] {
sc.AllowFunnel[magic80] = true
}
web80 := sc.Web[magic80]
for mountPoint := range handlers {
// We send a 301 - Moved Permanently redirect from HTTP to HTTPS
redirectURL := "301:https://${HOST}${REQUEST_URI}"
logger.Debugf("Creating redirect handler: %s -> %s", mountPoint, redirectURL)
web80.Handlers[mountPoint] = &ipn.HTTPHandler{
Redirect: redirectURL,
}
}
}
crl := childResourceLabels(ing.Name, ing.Namespace, "ingress") crl := childResourceLabels(ing.Name, ing.Namespace, "ingress")
var tags []string var tags []string
if tstr, ok := ing.Annotations[AnnotationTags]; ok { if tstr, ok := ing.Annotations[AnnotationTags]; ok {
@ -265,21 +244,14 @@ func (a *IngressReconciler) maybeProvision(ctx context.Context, logger *zap.Suga
} }
logger.Debugf("setting Ingress hostname to %q", dev.ingressDNSName) logger.Debugf("setting Ingress hostname to %q", dev.ingressDNSName)
ports := []networkingv1.IngressPortStatus{
{
Protocol: "TCP",
Port: 443,
},
}
if isHTTPRedirectEnabled(ing) {
ports = append(ports, networkingv1.IngressPortStatus{
Protocol: "TCP",
Port: 80,
})
}
ing.Status.LoadBalancer.Ingress = append(ing.Status.LoadBalancer.Ingress, networkingv1.IngressLoadBalancerIngress{ ing.Status.LoadBalancer.Ingress = append(ing.Status.LoadBalancer.Ingress, networkingv1.IngressLoadBalancerIngress{
Hostname: dev.ingressDNSName, Hostname: dev.ingressDNSName,
Ports: ports, Ports: []networkingv1.IngressPortStatus{
{
Protocol: "TCP",
Port: 443,
},
},
}) })
} }
@ -391,12 +363,6 @@ func handlersForIngress(ctx context.Context, ing *networkingv1.Ingress, cl clien
return handlers, nil return handlers, nil
} }
// isHTTPRedirectEnabled returns true if HTTP redirect is enabled for the Ingress.
// The annotation is tailscale.com/http-redirect and it should be set to "true".
func isHTTPRedirectEnabled(ing *networkingv1.Ingress) bool {
return ing.Annotations != nil && opt.Bool(ing.Annotations[AnnotationHTTPRedirect]).EqualBool(true)
}
// hostnameForIngress returns the hostname for an Ingress resource. // hostnameForIngress returns the hostname for an Ingress resource.
// If the Ingress has TLS configured with a host, it returns the first component of that host. // If the Ingress has TLS configured with a host, it returns the first component of that host.
// Otherwise, it returns a hostname derived from the Ingress name and namespace. // Otherwise, it returns a hostname derived from the Ingress name and namespace.

@ -7,7 +7,6 @@ package main
import ( import (
"context" "context"
"reflect"
"testing" "testing"
"go.uber.org/zap" "go.uber.org/zap"
@ -65,14 +64,12 @@ func TestTailscaleIngress(t *testing.T) {
parentType: "ingress", parentType: "ingress",
hostname: "default-test", hostname: "default-test",
app: kubetypes.AppIngressResource, app: kubetypes.AppIngressResource,
serveConfig: &ipn.ServeConfig{
TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}},
Web: map[ipn.HostPort]*ipn.WebServerConfig{
"${TS_CERT_DOMAIN}:443": {Handlers: map[string]*ipn.HTTPHandler{
"/": {Proxy: "http://1.2.3.4:8080/"},
}}},
},
} }
serveConfig := &ipn.ServeConfig{
TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}},
Web: map[ipn.HostPort]*ipn.WebServerConfig{"${TS_CERT_DOMAIN}:443": {Handlers: map[string]*ipn.HTTPHandler{"/": {Proxy: "http://1.2.3.4:8080/"}}}},
}
opts.serveConfig = serveConfig
expectEqual(t, fc, expectedSecret(t, fc, opts)) expectEqual(t, fc, expectedSecret(t, fc, opts))
expectEqual(t, fc, expectedHeadlessService(shortName, "ingress")) expectEqual(t, fc, expectedHeadlessService(shortName, "ingress"))
@ -159,14 +156,12 @@ func TestTailscaleIngressHostname(t *testing.T) {
parentType: "ingress", parentType: "ingress",
hostname: "default-test", hostname: "default-test",
app: kubetypes.AppIngressResource, app: kubetypes.AppIngressResource,
serveConfig: &ipn.ServeConfig{
TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}},
Web: map[ipn.HostPort]*ipn.WebServerConfig{
"${TS_CERT_DOMAIN}:443": {Handlers: map[string]*ipn.HTTPHandler{
"/": {Proxy: "http://1.2.3.4:8080/"},
}}},
},
} }
serveConfig := &ipn.ServeConfig{
TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}},
Web: map[ipn.HostPort]*ipn.WebServerConfig{"${TS_CERT_DOMAIN}:443": {Handlers: map[string]*ipn.HTTPHandler{"/": {Proxy: "http://1.2.3.4:8080/"}}}},
}
opts.serveConfig = serveConfig
expectEqual(t, fc, expectedSecret(t, fc, opts)) expectEqual(t, fc, expectedSecret(t, fc, opts))
expectEqual(t, fc, expectedHeadlessService(shortName, "ingress")) expectEqual(t, fc, expectedHeadlessService(shortName, "ingress"))
@ -281,14 +276,12 @@ func TestTailscaleIngressWithProxyClass(t *testing.T) {
parentType: "ingress", parentType: "ingress",
hostname: "default-test", hostname: "default-test",
app: kubetypes.AppIngressResource, app: kubetypes.AppIngressResource,
serveConfig: &ipn.ServeConfig{
TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}},
Web: map[ipn.HostPort]*ipn.WebServerConfig{
"${TS_CERT_DOMAIN}:443": {Handlers: map[string]*ipn.HTTPHandler{
"/": {Proxy: "http://1.2.3.4:8080/"},
}}},
},
} }
serveConfig := &ipn.ServeConfig{
TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}},
Web: map[ipn.HostPort]*ipn.WebServerConfig{"${TS_CERT_DOMAIN}:443": {Handlers: map[string]*ipn.HTTPHandler{"/": {Proxy: "http://1.2.3.4:8080/"}}}},
}
opts.serveConfig = serveConfig
expectEqual(t, fc, expectedSecret(t, fc, opts)) expectEqual(t, fc, expectedSecret(t, fc, opts))
expectEqual(t, fc, expectedHeadlessService(shortName, "ingress")) expectEqual(t, fc, expectedHeadlessService(shortName, "ingress"))
@ -375,6 +368,10 @@ func TestTailscaleIngressWithServiceMonitor(t *testing.T) {
} }
expectReconciled(t, ingR, "default", "test") expectReconciled(t, ingR, "default", "test")
fullName, shortName := findGenName(t, fc, "default", "test", "ingress") fullName, shortName := findGenName(t, fc, "default", "test", "ingress")
serveConfig := &ipn.ServeConfig{
TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}},
Web: map[ipn.HostPort]*ipn.WebServerConfig{"${TS_CERT_DOMAIN}:443": {Handlers: map[string]*ipn.HTTPHandler{"/": {Proxy: "http://1.2.3.4:8080/"}}}},
}
opts := configOpts{ opts := configOpts{
stsName: shortName, stsName: shortName,
secretName: fullName, secretName: fullName,
@ -385,14 +382,8 @@ func TestTailscaleIngressWithServiceMonitor(t *testing.T) {
app: kubetypes.AppIngressResource, app: kubetypes.AppIngressResource,
namespaced: true, namespaced: true,
proxyType: proxyTypeIngressResource, proxyType: proxyTypeIngressResource,
serveConfig: &ipn.ServeConfig{ serveConfig: serveConfig,
TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}}, resourceVersion: "1",
Web: map[ipn.HostPort]*ipn.WebServerConfig{
"${TS_CERT_DOMAIN}:443": {Handlers: map[string]*ipn.HTTPHandler{
"/": {Proxy: "http://1.2.3.4:8080/"},
}}},
},
resourceVersion: "1",
} }
// 1. Enable metrics- expect metrics Service to be created // 1. Enable metrics- expect metrics Service to be created
@ -726,14 +717,12 @@ func TestEmptyPath(t *testing.T) {
parentType: "ingress", parentType: "ingress",
hostname: "foo", hostname: "foo",
app: kubetypes.AppIngressResource, app: kubetypes.AppIngressResource,
serveConfig: &ipn.ServeConfig{
TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}},
Web: map[ipn.HostPort]*ipn.WebServerConfig{
"${TS_CERT_DOMAIN}:443": {Handlers: map[string]*ipn.HTTPHandler{
"/": {Proxy: "http://1.2.3.4:8080/"},
}}},
},
} }
serveConfig := &ipn.ServeConfig{
TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}},
Web: map[ipn.HostPort]*ipn.WebServerConfig{"${TS_CERT_DOMAIN}:443": {Handlers: map[string]*ipn.HTTPHandler{"/": {Proxy: "http://1.2.3.4:8080/"}}}},
}
opts.serveConfig = serveConfig
expectEqual(t, fc, expectedSecret(t, fc, opts)) expectEqual(t, fc, expectedSecret(t, fc, opts))
expectEqual(t, fc, expectedHeadlessService(shortName, "ingress")) expectEqual(t, fc, expectedHeadlessService(shortName, "ingress"))
@ -827,101 +816,3 @@ func backend() *networkingv1.IngressBackend {
}, },
} }
} }
func TestTailscaleIngressWithHTTPRedirect(t *testing.T) {
fc := fake.NewFakeClient(ingressClass())
ft := &fakeTSClient{}
fakeTsnetServer := &fakeTSNetServer{certDomains: []string{"foo.com"}}
zl, err := zap.NewDevelopment()
if err != nil {
t.Fatal(err)
}
ingR := &IngressReconciler{
Client: fc,
ingressClassName: "tailscale",
ssr: &tailscaleSTSReconciler{
Client: fc,
tsClient: ft,
tsnetServer: fakeTsnetServer,
defaultTags: []string{"tag:k8s"},
operatorNamespace: "operator-ns",
proxyImage: "tailscale/tailscale",
},
logger: zl.Sugar(),
}
// 1. Create Ingress with HTTP redirect annotation
ing := ingress()
mak.Set(&ing.Annotations, AnnotationHTTPRedirect, "true")
mustCreate(t, fc, ing)
mustCreate(t, fc, service())
expectReconciled(t, ingR, "default", "test")
fullName, shortName := findGenName(t, fc, "default", "test", "ingress")
opts := configOpts{
replicas: ptr.To[int32](1),
stsName: shortName,
secretName: fullName,
namespace: "default",
parentType: "ingress",
hostname: "default-test",
app: kubetypes.AppIngressResource,
serveConfig: &ipn.ServeConfig{
TCP: map[uint16]*ipn.TCPPortHandler{
443: {HTTPS: true},
80: {HTTP: true},
},
Web: map[ipn.HostPort]*ipn.WebServerConfig{
"${TS_CERT_DOMAIN}:443": {Handlers: map[string]*ipn.HTTPHandler{
"/": {Proxy: "http://1.2.3.4:8080/"},
}},
"${TS_CERT_DOMAIN}:80": {Handlers: map[string]*ipn.HTTPHandler{
"/": {Redirect: "301:https://${HOST}${REQUEST_URI}"},
}},
},
},
}
expectEqual(t, fc, expectedSecret(t, fc, opts))
expectEqual(t, fc, expectedHeadlessService(shortName, "ingress"))
expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeResourceReqs)
// 2. Update device info to get status updated
mustUpdate(t, fc, "operator-ns", opts.secretName, func(secret *corev1.Secret) {
mak.Set(&secret.Data, "device_id", []byte("1234"))
mak.Set(&secret.Data, "device_fqdn", []byte("foo.tailnetxyz.ts.net"))
})
expectReconciled(t, ingR, "default", "test")
// Verify Ingress status includes both ports 80 and 443
ing = &networkingv1.Ingress{}
if err := fc.Get(context.Background(), types.NamespacedName{Name: "test", Namespace: "default"}, ing); err != nil {
t.Fatal(err)
}
wantPorts := []networkingv1.IngressPortStatus{
{Port: 443, Protocol: "TCP"},
{Port: 80, Protocol: "TCP"},
}
if !reflect.DeepEqual(ing.Status.LoadBalancer.Ingress[0].Ports, wantPorts) {
t.Errorf("incorrect status ports: got %v, want %v", ing.Status.LoadBalancer.Ingress[0].Ports, wantPorts)
}
// 3. Remove HTTP redirect annotation
mustUpdate(t, fc, "default", "test", func(ing *networkingv1.Ingress) {
delete(ing.Annotations, AnnotationHTTPRedirect)
})
expectReconciled(t, ingR, "default", "test")
// 4. Verify Ingress status no longer includes port 80
ing = &networkingv1.Ingress{}
if err := fc.Get(context.Background(), types.NamespacedName{Name: "test", Namespace: "default"}, ing); err != nil {
t.Fatal(err)
}
wantPorts = []networkingv1.IngressPortStatus{
{Port: 443, Protocol: "TCP"},
}
if !reflect.DeepEqual(ing.Status.LoadBalancer.Ingress[0].Ports, wantPorts) {
t.Errorf("incorrect status ports after removing redirect: got %v, want %v", ing.Status.LoadBalancer.Ingress[0].Ports, wantPorts)
}
}

@ -26,7 +26,6 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/yaml" "sigs.k8s.io/yaml"
tsoperator "tailscale.com/k8s-operator" tsoperator "tailscale.com/k8s-operator"
tsapi "tailscale.com/k8s-operator/apis/v1alpha1" tsapi "tailscale.com/k8s-operator/apis/v1alpha1"
"tailscale.com/kube/kubetypes" "tailscale.com/kube/kubetypes"
@ -46,7 +45,10 @@ const (
messageMultipleDNSConfigsPresent = "Multiple DNSConfig resources found in cluster. Please ensure no more than one is present." messageMultipleDNSConfigsPresent = "Multiple DNSConfig resources found in cluster. Please ensure no more than one is present."
defaultNameserverImageRepo = "tailscale/k8s-nameserver" defaultNameserverImageRepo = "tailscale/k8s-nameserver"
defaultNameserverImageTag = "stable" // TODO (irbekrm): once we start publishing nameserver images for stable
// track, replace 'unstable' here with the version of this operator
// instance.
defaultNameserverImageTag = "unstable"
) )
// NameserverReconciler knows how to create nameserver resources in cluster in // NameserverReconciler knows how to create nameserver resources in cluster in

@ -19,7 +19,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/client/fake"
"sigs.k8s.io/yaml" "sigs.k8s.io/yaml"
operatorutils "tailscale.com/k8s-operator" operatorutils "tailscale.com/k8s-operator"
tsapi "tailscale.com/k8s-operator/apis/v1alpha1" tsapi "tailscale.com/k8s-operator/apis/v1alpha1"
"tailscale.com/tstest" "tailscale.com/tstest"
@ -183,7 +182,7 @@ func TestNameserverReconciler(t *testing.T) {
dnsCfg.Spec.Nameserver.Image = nil dnsCfg.Spec.Nameserver.Image = nil
}) })
expectReconciled(t, reconciler, "", "test") expectReconciled(t, reconciler, "", "test")
wantsDeploy.Spec.Template.Spec.Containers[0].Image = "tailscale/k8s-nameserver:stable" wantsDeploy.Spec.Template.Spec.Containers[0].Image = "tailscale/k8s-nameserver:unstable"
expectEqual(t, fc, wantsDeploy) expectEqual(t, fc, wantsDeploy)
}) })
} }

@ -44,10 +44,10 @@ import (
"sigs.k8s.io/controller-runtime/pkg/manager/signals" "sigs.k8s.io/controller-runtime/pkg/manager/signals"
"sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/reconcile"
"tailscale.com/envknob"
"tailscale.com/client/local" "tailscale.com/client/local"
"tailscale.com/client/tailscale" "tailscale.com/client/tailscale"
"tailscale.com/envknob"
"tailscale.com/hostinfo" "tailscale.com/hostinfo"
"tailscale.com/ipn" "tailscale.com/ipn"
"tailscale.com/ipn/store/kubestore" "tailscale.com/ipn/store/kubestore"
@ -164,24 +164,22 @@ func main() {
runReconcilers(rOpts) runReconcilers(rOpts)
} }
// initTSNet initializes the tsnet.Server and logs in to Tailscale. If CLIENT_ID // initTSNet initializes the tsnet.Server and logs in to Tailscale. It uses the
// is set, it authenticates to the Tailscale API using the federated OIDC workload // CLIENT_ID_FILE and CLIENT_SECRET_FILE environment variables to authenticate
// identity flow. Otherwise, it uses the CLIENT_ID_FILE and CLIENT_SECRET_FILE // with Tailscale.
// environment variables to authenticate with static credentials.
func initTSNet(zlog *zap.SugaredLogger, loginServer string) (*tsnet.Server, tsClient) { func initTSNet(zlog *zap.SugaredLogger, loginServer string) (*tsnet.Server, tsClient) {
var ( var (
clientID = defaultEnv("CLIENT_ID", "") // Used for workload identity federation. clientIDPath = defaultEnv("CLIENT_ID_FILE", "")
clientIDPath = defaultEnv("CLIENT_ID_FILE", "") // Used for static client credentials. clientSecretPath = defaultEnv("CLIENT_SECRET_FILE", "")
clientSecretPath = defaultEnv("CLIENT_SECRET_FILE", "") // Used for static client credentials.
hostname = defaultEnv("OPERATOR_HOSTNAME", "tailscale-operator") hostname = defaultEnv("OPERATOR_HOSTNAME", "tailscale-operator")
kubeSecret = defaultEnv("OPERATOR_SECRET", "") kubeSecret = defaultEnv("OPERATOR_SECRET", "")
operatorTags = defaultEnv("OPERATOR_INITIAL_TAGS", "tag:k8s-operator") operatorTags = defaultEnv("OPERATOR_INITIAL_TAGS", "tag:k8s-operator")
) )
startlog := zlog.Named("startup") startlog := zlog.Named("startup")
if clientID == "" && (clientIDPath == "" || clientSecretPath == "") { if clientIDPath == "" || clientSecretPath == "" {
startlog.Fatalf("CLIENT_ID_FILE and CLIENT_SECRET_FILE must be set") // TODO(tomhjp): error message can mention WIF once it's publicly available. startlog.Fatalf("CLIENT_ID_FILE and CLIENT_SECRET_FILE must be set")
} }
tsc, err := newTSClient(zlog.Named("ts-api-client"), clientID, clientIDPath, clientSecretPath, loginServer) tsc, err := newTSClient(context.Background(), clientIDPath, clientSecretPath, loginServer)
if err != nil { if err != nil {
startlog.Fatalf("error creating Tailscale client: %v", err) startlog.Fatalf("error creating Tailscale client: %v", err)
} }
@ -638,7 +636,7 @@ func runReconcilers(opts reconcilerOpts) {
recorder: eventRecorder, recorder: eventRecorder,
tsNamespace: opts.tailscaleNamespace, tsNamespace: opts.tailscaleNamespace,
Client: mgr.GetClient(), Client: mgr.GetClient(),
log: opts.log.Named("recorder-reconciler"), l: opts.log.Named("recorder-reconciler"),
clock: tstime.DefaultClock{}, clock: tstime.DefaultClock{},
tsClient: opts.tsClient, tsClient: opts.tsClient,
loginServer: opts.loginServer, loginServer: opts.loginServer,
@ -693,7 +691,7 @@ func runReconcilers(opts reconcilerOpts) {
Complete(&ProxyGroupReconciler{ Complete(&ProxyGroupReconciler{
recorder: eventRecorder, recorder: eventRecorder,
Client: mgr.GetClient(), Client: mgr.GetClient(),
log: opts.log.Named("proxygroup-reconciler"), l: opts.log.Named("proxygroup-reconciler"),
clock: tstime.DefaultClock{}, clock: tstime.DefaultClock{},
tsClient: opts.tsClient, tsClient: opts.tsClient,
@ -1122,7 +1120,7 @@ func serviceHandlerForIngress(cl client.Client, logger *zap.SugaredLogger, ingre
reqs := make([]reconcile.Request, 0) reqs := make([]reconcile.Request, 0)
for _, ing := range ingList.Items { for _, ing := range ingList.Items {
if ing.Spec.IngressClassName == nil || *ing.Spec.IngressClassName != ingressClassName { if ing.Spec.IngressClassName == nil || *ing.Spec.IngressClassName != ingressClassName {
continue return nil
} }
if hasProxyGroupAnnotation(&ing) { if hasProxyGroupAnnotation(&ing) {
// We don't want to reconcile backend Services for Ingresses for ProxyGroups. // We don't want to reconcile backend Services for Ingresses for ProxyGroups.

@ -1282,8 +1282,8 @@ func TestServiceProxyClassAnnotation(t *testing.T) {
slist := &corev1.SecretList{} slist := &corev1.SecretList{}
fc.List(context.Background(), slist, client.InNamespace("operator-ns")) fc.List(context.Background(), slist, client.InNamespace("operator-ns"))
for _, i := range slist.Items { for _, i := range slist.Items {
labels, _ := json.Marshal(i.Labels) l, _ := json.Marshal(i.Labels)
t.Logf("found secret %q with labels %q ", i.Name, string(labels)) t.Logf("found secret %q with labels %q ", i.Name, string(l))
} }
_, shortName := findGenName(t, fc, "default", "test", "svc") _, shortName := findGenName(t, fc, "default", "test", "svc")
@ -1698,42 +1698,6 @@ func Test_serviceHandlerForIngress(t *testing.T) {
} }
} }
func Test_serviceHandlerForIngress_multipleIngressClasses(t *testing.T) {
fc := fake.NewFakeClient()
zl, err := zap.NewDevelopment()
if err != nil {
t.Fatal(err)
}
svc := &corev1.Service{
ObjectMeta: metav1.ObjectMeta{Name: "backend", Namespace: "default"},
}
mustCreate(t, fc, svc)
mustCreate(t, fc, &networkingv1.Ingress{
ObjectMeta: metav1.ObjectMeta{Name: "nginx-ing", Namespace: "default"},
Spec: networkingv1.IngressSpec{
IngressClassName: ptr.To("nginx"),
DefaultBackend: &networkingv1.IngressBackend{Service: &networkingv1.IngressServiceBackend{Name: "backend"}},
},
})
mustCreate(t, fc, &networkingv1.Ingress{
ObjectMeta: metav1.ObjectMeta{Name: "ts-ing", Namespace: "default"},
Spec: networkingv1.IngressSpec{
IngressClassName: ptr.To("tailscale"),
DefaultBackend: &networkingv1.IngressBackend{Service: &networkingv1.IngressServiceBackend{Name: "backend"}},
},
})
got := serviceHandlerForIngress(fc, zl.Sugar(), "tailscale")(context.Background(), svc)
want := []reconcile.Request{{NamespacedName: types.NamespacedName{Namespace: "default", Name: "ts-ing"}}}
if diff := cmp.Diff(got, want); diff != "" {
t.Fatalf("unexpected reconcile requests (-got +want):\n%s", diff)
}
}
func Test_clusterDomainFromResolverConf(t *testing.T) { func Test_clusterDomainFromResolverConf(t *testing.T) {
zl, err := zap.NewDevelopment() zl, err := zap.NewDevelopment()
if err != nil { if err != nil {

@ -80,7 +80,7 @@ var (
// ProxyGroupReconciler ensures cluster resources for a ProxyGroup definition. // ProxyGroupReconciler ensures cluster resources for a ProxyGroup definition.
type ProxyGroupReconciler struct { type ProxyGroupReconciler struct {
client.Client client.Client
log *zap.SugaredLogger l *zap.SugaredLogger
recorder record.EventRecorder recorder record.EventRecorder
clock tstime.Clock clock tstime.Clock
tsClient tsClient tsClient tsClient
@ -101,7 +101,7 @@ type ProxyGroupReconciler struct {
} }
func (r *ProxyGroupReconciler) logger(name string) *zap.SugaredLogger { func (r *ProxyGroupReconciler) logger(name string) *zap.SugaredLogger {
return r.log.With("ProxyGroup", name) return r.l.With("ProxyGroup", name)
} }
func (r *ProxyGroupReconciler) Reconcile(ctx context.Context, req reconcile.Request) (_ reconcile.Result, err error) { func (r *ProxyGroupReconciler) Reconcile(ctx context.Context, req reconcile.Request) (_ reconcile.Result, err error) {

@ -524,16 +524,16 @@ func pgSecretLabels(pgName, secretType string) map[string]string {
} }
func pgLabels(pgName string, customLabels map[string]string) map[string]string { func pgLabels(pgName string, customLabels map[string]string) map[string]string {
labels := make(map[string]string, len(customLabels)+3) l := make(map[string]string, len(customLabels)+3)
for k, v := range customLabels { for k, v := range customLabels {
labels[k] = v l[k] = v
} }
labels[kubetypes.LabelManaged] = "true" l[kubetypes.LabelManaged] = "true"
labels[LabelParentType] = "proxygroup" l[LabelParentType] = "proxygroup"
labels[LabelParentName] = pgName l[LabelParentName] = pgName
return labels return l
} }
func pgOwnerReference(owner *tsapi.ProxyGroup) []metav1.OwnerReference { func pgOwnerReference(owner *tsapi.ProxyGroup) []metav1.OwnerReference {

@ -670,7 +670,7 @@ func TestProxyGroupWithStaticEndpoints(t *testing.T) {
t.Logf("created node %q with data", n.name) t.Logf("created node %q with data", n.name)
} }
reconciler.log = zl.Sugar().With("TestName", tt.name).With("Reconcile", i) reconciler.l = zl.Sugar().With("TestName", tt.name).With("Reconcile", i)
pg.Spec.Replicas = r.replicas pg.Spec.Replicas = r.replicas
pc.Spec.StaticEndpoints = r.staticEndpointConfig pc.Spec.StaticEndpoints = r.staticEndpointConfig
@ -784,7 +784,7 @@ func TestProxyGroupWithStaticEndpoints(t *testing.T) {
Client: fc, Client: fc,
tsClient: tsClient, tsClient: tsClient,
recorder: fr, recorder: fr,
log: zl.Sugar().With("TestName", tt.name).With("Reconcile", "cleanup"), l: zl.Sugar().With("TestName", tt.name).With("Reconcile", "cleanup"),
clock: cl, clock: cl,
} }
@ -845,7 +845,7 @@ func TestProxyGroup(t *testing.T) {
Client: fc, Client: fc,
tsClient: tsClient, tsClient: tsClient,
recorder: fr, recorder: fr,
log: zl.Sugar(), l: zl.Sugar(),
clock: cl, clock: cl,
} }
crd := &apiextensionsv1.CustomResourceDefinition{ObjectMeta: metav1.ObjectMeta{Name: serviceMonitorCRD}} crd := &apiextensionsv1.CustomResourceDefinition{ObjectMeta: metav1.ObjectMeta{Name: serviceMonitorCRD}}
@ -1049,7 +1049,7 @@ func TestProxyGroupTypes(t *testing.T) {
tsNamespace: tsNamespace, tsNamespace: tsNamespace,
tsProxyImage: testProxyImage, tsProxyImage: testProxyImage,
Client: fc, Client: fc,
log: zl.Sugar(), l: zl.Sugar(),
tsClient: &fakeTSClient{}, tsClient: &fakeTSClient{},
clock: tstest.NewClock(tstest.ClockOpts{}), clock: tstest.NewClock(tstest.ClockOpts{}),
} }
@ -1289,24 +1289,24 @@ func TestKubeAPIServerStatusConditionFlow(t *testing.T) {
tsNamespace: tsNamespace, tsNamespace: tsNamespace,
tsProxyImage: testProxyImage, tsProxyImage: testProxyImage,
Client: fc, Client: fc,
log: zap.Must(zap.NewDevelopment()).Sugar(), l: zap.Must(zap.NewDevelopment()).Sugar(),
tsClient: &fakeTSClient{}, tsClient: &fakeTSClient{},
clock: tstest.NewClock(tstest.ClockOpts{}), clock: tstest.NewClock(tstest.ClockOpts{}),
} }
expectReconciled(t, r, "", pg.Name) expectReconciled(t, r, "", pg.Name)
pg.ObjectMeta.Finalizers = append(pg.ObjectMeta.Finalizers, FinalizerName) pg.ObjectMeta.Finalizers = append(pg.ObjectMeta.Finalizers, FinalizerName)
tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionFalse, reasonProxyGroupCreating, "", 0, r.clock, r.log) tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionFalse, reasonProxyGroupCreating, "", 0, r.clock, r.l)
tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "", 1, r.clock, r.log) tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "", 1, r.clock, r.l)
expectEqual(t, fc, pg, omitPGStatusConditionMessages) expectEqual(t, fc, pg, omitPGStatusConditionMessages)
// Set kube-apiserver valid. // Set kube-apiserver valid.
mustUpdateStatus(t, fc, "", pg.Name, func(p *tsapi.ProxyGroup) { mustUpdateStatus(t, fc, "", pg.Name, func(p *tsapi.ProxyGroup) {
tsoperator.SetProxyGroupCondition(p, tsapi.KubeAPIServerProxyValid, metav1.ConditionTrue, reasonKubeAPIServerProxyValid, "", 1, r.clock, r.log) tsoperator.SetProxyGroupCondition(p, tsapi.KubeAPIServerProxyValid, metav1.ConditionTrue, reasonKubeAPIServerProxyValid, "", 1, r.clock, r.l)
}) })
expectReconciled(t, r, "", pg.Name) expectReconciled(t, r, "", pg.Name)
tsoperator.SetProxyGroupCondition(pg, tsapi.KubeAPIServerProxyValid, metav1.ConditionTrue, reasonKubeAPIServerProxyValid, "", 1, r.clock, r.log) tsoperator.SetProxyGroupCondition(pg, tsapi.KubeAPIServerProxyValid, metav1.ConditionTrue, reasonKubeAPIServerProxyValid, "", 1, r.clock, r.l)
tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "", 1, r.clock, r.log) tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "", 1, r.clock, r.l)
expectEqual(t, fc, pg, omitPGStatusConditionMessages) expectEqual(t, fc, pg, omitPGStatusConditionMessages)
// Set available. // Set available.
@ -1318,17 +1318,17 @@ func TestKubeAPIServerStatusConditionFlow(t *testing.T) {
TailnetIPs: []string{"1.2.3.4", "::1"}, TailnetIPs: []string{"1.2.3.4", "::1"},
}, },
} }
tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionTrue, reasonProxyGroupAvailable, "", 0, r.clock, r.log) tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionTrue, reasonProxyGroupAvailable, "", 0, r.clock, r.l)
tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "", 1, r.clock, r.log) tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "", 1, r.clock, r.l)
expectEqual(t, fc, pg, omitPGStatusConditionMessages) expectEqual(t, fc, pg, omitPGStatusConditionMessages)
// Set kube-apiserver configured. // Set kube-apiserver configured.
mustUpdateStatus(t, fc, "", pg.Name, func(p *tsapi.ProxyGroup) { mustUpdateStatus(t, fc, "", pg.Name, func(p *tsapi.ProxyGroup) {
tsoperator.SetProxyGroupCondition(p, tsapi.KubeAPIServerProxyConfigured, metav1.ConditionTrue, reasonKubeAPIServerProxyConfigured, "", 1, r.clock, r.log) tsoperator.SetProxyGroupCondition(p, tsapi.KubeAPIServerProxyConfigured, metav1.ConditionTrue, reasonKubeAPIServerProxyConfigured, "", 1, r.clock, r.l)
}) })
expectReconciled(t, r, "", pg.Name) expectReconciled(t, r, "", pg.Name)
tsoperator.SetProxyGroupCondition(pg, tsapi.KubeAPIServerProxyConfigured, metav1.ConditionTrue, reasonKubeAPIServerProxyConfigured, "", 1, r.clock, r.log) tsoperator.SetProxyGroupCondition(pg, tsapi.KubeAPIServerProxyConfigured, metav1.ConditionTrue, reasonKubeAPIServerProxyConfigured, "", 1, r.clock, r.l)
tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionTrue, reasonProxyGroupReady, "", 1, r.clock, r.log) tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionTrue, reasonProxyGroupReady, "", 1, r.clock, r.l)
expectEqual(t, fc, pg, omitPGStatusConditionMessages) expectEqual(t, fc, pg, omitPGStatusConditionMessages)
} }
@ -1342,7 +1342,7 @@ func TestKubeAPIServerType_DoesNotOverwriteServicesConfig(t *testing.T) {
tsNamespace: tsNamespace, tsNamespace: tsNamespace,
tsProxyImage: testProxyImage, tsProxyImage: testProxyImage,
Client: fc, Client: fc,
log: zap.Must(zap.NewDevelopment()).Sugar(), l: zap.Must(zap.NewDevelopment()).Sugar(),
tsClient: &fakeTSClient{}, tsClient: &fakeTSClient{},
clock: tstest.NewClock(tstest.ClockOpts{}), clock: tstest.NewClock(tstest.ClockOpts{}),
} }
@ -1427,7 +1427,7 @@ func TestIngressAdvertiseServicesConfigPreserved(t *testing.T) {
tsNamespace: tsNamespace, tsNamespace: tsNamespace,
tsProxyImage: testProxyImage, tsProxyImage: testProxyImage,
Client: fc, Client: fc,
log: zap.Must(zap.NewDevelopment()).Sugar(), l: zap.Must(zap.NewDevelopment()).Sugar(),
tsClient: &fakeTSClient{}, tsClient: &fakeTSClient{},
clock: tstest.NewClock(tstest.ClockOpts{}), clock: tstest.NewClock(tstest.ClockOpts{}),
} }
@ -1902,7 +1902,7 @@ func TestProxyGroupLetsEncryptStaging(t *testing.T) {
defaultProxyClass: tt.defaultProxyClass, defaultProxyClass: tt.defaultProxyClass,
Client: fc, Client: fc,
tsClient: &fakeTSClient{}, tsClient: &fakeTSClient{},
log: zl.Sugar(), l: zl.Sugar(),
clock: cl, clock: cl,
} }

@ -69,8 +69,7 @@ const (
AnnotationProxyGroup = "tailscale.com/proxy-group" AnnotationProxyGroup = "tailscale.com/proxy-group"
// Annotations settable by users on ingresses. // Annotations settable by users on ingresses.
AnnotationFunnel = "tailscale.com/funnel" AnnotationFunnel = "tailscale.com/funnel"
AnnotationHTTPRedirect = "tailscale.com/http-redirect"
// If set to true, set up iptables/nftables rules in the proxy forward // If set to true, set up iptables/nftables rules in the proxy forward
// cluster traffic to the tailnet IP of that proxy. This can only be set // cluster traffic to the tailnet IP of that proxy. This can only be set

@ -71,11 +71,11 @@ func Test_applyProxyClassToStatefulSet(t *testing.T) {
SecurityContext: &corev1.PodSecurityContext{ SecurityContext: &corev1.PodSecurityContext{
RunAsUser: ptr.To(int64(0)), RunAsUser: ptr.To(int64(0)),
}, },
ImagePullSecrets: []corev1.LocalObjectReference{{Name: "docker-creds"}}, ImagePullSecrets: []corev1.LocalObjectReference{{Name: "docker-creds"}},
NodeName: "some-node", NodeName: "some-node",
NodeSelector: map[string]string{"beta.kubernetes.io/os": "linux"}, NodeSelector: map[string]string{"beta.kubernetes.io/os": "linux"},
Affinity: &corev1.Affinity{NodeAffinity: &corev1.NodeAffinity{RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{}}}, Affinity: &corev1.Affinity{NodeAffinity: &corev1.NodeAffinity{RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{}}},
Tolerations: []corev1.Toleration{{Key: "", Operator: "Exists"}}, Tolerations: []corev1.Toleration{{Key: "", Operator: "Exists"}},
PriorityClassName: "high-priority", PriorityClassName: "high-priority",
TopologySpreadConstraints: []corev1.TopologySpreadConstraint{ TopologySpreadConstraints: []corev1.TopologySpreadConstraint{
{ {

@ -207,6 +207,11 @@ func (r *HAServiceReconciler) maybeProvision(ctx context.Context, hostname strin
// already created and not owned by this Service. // already created and not owned by this Service.
serviceName := tailcfg.ServiceName("svc:" + hostname) serviceName := tailcfg.ServiceName("svc:" + hostname)
existingTSSvc, err := r.tsClient.GetVIPService(ctx, serviceName) existingTSSvc, err := r.tsClient.GetVIPService(ctx, serviceName)
if isErrorFeatureFlagNotEnabled(err) {
logger.Warn(msgFeatureFlagNotEnabled)
r.recorder.Event(svc, corev1.EventTypeWarning, warningTailscaleServiceFeatureFlagNotEnabled, msgFeatureFlagNotEnabled)
return false, nil
}
if err != nil && !isErrorTailscaleServiceNotFound(err) { if err != nil && !isErrorTailscaleServiceNotFound(err) {
return false, fmt.Errorf("error getting Tailscale Service %q: %w", hostname, err) return false, fmt.Errorf("error getting Tailscale Service %q: %w", hostname, err)
} }
@ -525,6 +530,11 @@ func (r *HAServiceReconciler) tailnetCertDomain(ctx context.Context) (string, er
// It returns true if an existing Tailscale Service was updated to remove owner reference, as well as any error that occurred. // It returns true if an existing Tailscale Service was updated to remove owner reference, as well as any error that occurred.
func cleanupTailscaleService(ctx context.Context, tsClient tsClient, name tailcfg.ServiceName, operatorID string, logger *zap.SugaredLogger) (updated bool, err error) { func cleanupTailscaleService(ctx context.Context, tsClient tsClient, name tailcfg.ServiceName, operatorID string, logger *zap.SugaredLogger) (updated bool, err error) {
svc, err := tsClient.GetVIPService(ctx, name) svc, err := tsClient.GetVIPService(ctx, name)
if isErrorFeatureFlagNotEnabled(err) {
msg := fmt.Sprintf("Unable to proceed with cleanup: %s.", msgFeatureFlagNotEnabled)
logger.Warn(msg)
return false, nil
}
if err != nil { if err != nil {
errResp := &tailscale.ErrResponse{} errResp := &tailscale.ErrResponse{}
ok := errors.As(err, errResp) ok := errors.As(err, errResp)

@ -8,13 +8,8 @@ package main
import ( import (
"context" "context"
"fmt" "fmt"
"net/http"
"os" "os"
"sync"
"time"
"go.uber.org/zap"
"golang.org/x/oauth2"
"golang.org/x/oauth2/clientcredentials" "golang.org/x/oauth2/clientcredentials"
"tailscale.com/internal/client/tailscale" "tailscale.com/internal/client/tailscale"
"tailscale.com/ipn" "tailscale.com/ipn"
@ -25,53 +20,30 @@ import (
// call should be performed on the default tailnet for the provided credentials. // call should be performed on the default tailnet for the provided credentials.
const ( const (
defaultTailnet = "-" defaultTailnet = "-"
oidcJWTPath = "/var/run/secrets/tailscale/serviceaccount/token"
) )
func newTSClient(logger *zap.SugaredLogger, clientID, clientIDPath, clientSecretPath, loginServer string) (*tailscale.Client, error) { func newTSClient(ctx context.Context, clientIDPath, clientSecretPath, loginServer string) (tsClient, error) {
baseURL := ipn.DefaultControlURL clientID, err := os.ReadFile(clientIDPath)
if err != nil {
return nil, fmt.Errorf("error reading client ID %q: %w", clientIDPath, err)
}
clientSecret, err := os.ReadFile(clientSecretPath)
if err != nil {
return nil, fmt.Errorf("reading client secret %q: %w", clientSecretPath, err)
}
const tokenURLPath = "/api/v2/oauth/token"
tokenURL := fmt.Sprintf("%s%s", ipn.DefaultControlURL, tokenURLPath)
if loginServer != "" { if loginServer != "" {
baseURL = loginServer tokenURL = fmt.Sprintf("%s%s", loginServer, tokenURLPath)
} }
credentials := clientcredentials.Config{
var httpClient *http.Client ClientID: string(clientID),
if clientID == "" { ClientSecret: string(clientSecret),
// Use static client credentials mounted to disk. TokenURL: tokenURL,
id, err := os.ReadFile(clientIDPath)
if err != nil {
return nil, fmt.Errorf("error reading client ID %q: %w", clientIDPath, err)
}
secret, err := os.ReadFile(clientSecretPath)
if err != nil {
return nil, fmt.Errorf("reading client secret %q: %w", clientSecretPath, err)
}
credentials := clientcredentials.Config{
ClientID: string(id),
ClientSecret: string(secret),
TokenURL: fmt.Sprintf("%s%s", baseURL, "/api/v2/oauth/token"),
}
tokenSrc := credentials.TokenSource(context.Background())
httpClient = oauth2.NewClient(context.Background(), tokenSrc)
} else {
// Use workload identity federation.
tokenSrc := &jwtTokenSource{
logger: logger,
jwtPath: oidcJWTPath,
baseCfg: clientcredentials.Config{
ClientID: clientID,
TokenURL: fmt.Sprintf("%s%s", baseURL, "/api/v2/oauth/token-exchange"),
},
}
httpClient = &http.Client{
Transport: &oauth2.Transport{
Source: tokenSrc,
},
}
} }
c := tailscale.NewClient(defaultTailnet, nil) c := tailscale.NewClient(defaultTailnet, nil)
c.UserAgent = "tailscale-k8s-operator" c.UserAgent = "tailscale-k8s-operator"
c.HTTPClient = httpClient c.HTTPClient = credentials.Client(ctx)
if loginServer != "" { if loginServer != "" {
c.BaseURL = loginServer c.BaseURL = loginServer
} }
@ -91,43 +63,3 @@ type tsClient interface {
// DeleteVIPService is a method for deleting a Tailscale Service. // DeleteVIPService is a method for deleting a Tailscale Service.
DeleteVIPService(ctx context.Context, name tailcfg.ServiceName) error DeleteVIPService(ctx context.Context, name tailcfg.ServiceName) error
} }
// jwtTokenSource implements the [oauth2.TokenSource] interface, but with the
// ability to regenerate a fresh underlying token source each time a new value
// of the JWT parameter is needed due to expiration.
type jwtTokenSource struct {
logger *zap.SugaredLogger
jwtPath string // Path to the file containing an automatically refreshed JWT.
baseCfg clientcredentials.Config // Holds config that doesn't change for the lifetime of the process.
mu sync.Mutex // Guards underlying.
underlying oauth2.TokenSource // The oauth2 client implementation. Does its own separate caching of the access token.
}
func (s *jwtTokenSource) Token() (*oauth2.Token, error) {
s.mu.Lock()
defer s.mu.Unlock()
if s.underlying != nil {
t, err := s.underlying.Token()
if err == nil && t != nil && t.Valid() {
return t, nil
}
}
s.logger.Debugf("Refreshing JWT from %s", s.jwtPath)
tk, err := os.ReadFile(s.jwtPath)
if err != nil {
return nil, fmt.Errorf("error reading JWT from %q: %w", s.jwtPath, err)
}
// Shallow copy of the base config.
credentials := s.baseCfg
credentials.EndpointParams = map[string][]string{
"jwt": {string(tk)},
}
src := credentials.TokenSource(context.Background())
s.underlying = oauth2.ReuseTokenSourceWithExpiry(nil, src, time.Minute)
return s.underlying.Token()
}

@ -1,135 +0,0 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build !plan9
package main
import (
"encoding/json"
"fmt"
"io"
"net/http"
"net/http/httptest"
"os"
"path/filepath"
"testing"
"go.uber.org/zap"
"golang.org/x/oauth2"
)
func TestNewStaticClient(t *testing.T) {
const (
clientIDFile = "client-id"
clientSecretFile = "client-secret"
)
tmp := t.TempDir()
clientIDPath := filepath.Join(tmp, clientIDFile)
if err := os.WriteFile(clientIDPath, []byte("test-client-id"), 0600); err != nil {
t.Fatalf("error writing test file %q: %v", clientIDPath, err)
}
clientSecretPath := filepath.Join(tmp, clientSecretFile)
if err := os.WriteFile(clientSecretPath, []byte("test-client-secret"), 0600); err != nil {
t.Fatalf("error writing test file %q: %v", clientSecretPath, err)
}
srv := testAPI(t, 3600)
cl, err := newTSClient(zap.NewNop().Sugar(), "", clientIDPath, clientSecretPath, srv.URL)
if err != nil {
t.Fatalf("error creating Tailscale client: %v", err)
}
resp, err := cl.HTTPClient.Get(srv.URL)
if err != nil {
t.Fatalf("error making test API call: %v", err)
}
defer resp.Body.Close()
got, err := io.ReadAll(resp.Body)
if err != nil {
t.Fatalf("error reading response body: %v", err)
}
want := "Bearer " + testToken("/api/v2/oauth/token", "test-client-id", "test-client-secret", "")
if string(got) != want {
t.Errorf("got %q; want %q", got, want)
}
}
func TestNewWorkloadIdentityClient(t *testing.T) {
// 5 seconds is within expiryDelta leeway, so the access token will
// immediately be considered expired and get refreshed on each access.
srv := testAPI(t, 5)
cl, err := newTSClient(zap.NewNop().Sugar(), "test-client-id", "", "", srv.URL)
if err != nil {
t.Fatalf("error creating Tailscale client: %v", err)
}
// Modify the path where the JWT will be read from.
oauth2Transport, ok := cl.HTTPClient.Transport.(*oauth2.Transport)
if !ok {
t.Fatalf("expected oauth2.Transport, got %T", cl.HTTPClient.Transport)
}
jwtTokenSource, ok := oauth2Transport.Source.(*jwtTokenSource)
if !ok {
t.Fatalf("expected jwtTokenSource, got %T", oauth2Transport.Source)
}
tmp := t.TempDir()
jwtPath := filepath.Join(tmp, "token")
jwtTokenSource.jwtPath = jwtPath
for _, jwt := range []string{"test-jwt", "updated-test-jwt"} {
if err := os.WriteFile(jwtPath, []byte(jwt), 0600); err != nil {
t.Fatalf("error writing test file %q: %v", jwtPath, err)
}
resp, err := cl.HTTPClient.Get(srv.URL)
if err != nil {
t.Fatalf("error making test API call: %v", err)
}
defer resp.Body.Close()
got, err := io.ReadAll(resp.Body)
if err != nil {
t.Fatalf("error reading response body: %v", err)
}
if want := "Bearer " + testToken("/api/v2/oauth/token-exchange", "test-client-id", "", jwt); string(got) != want {
t.Errorf("got %q; want %q", got, want)
}
}
}
func testAPI(t *testing.T, expirationSeconds int) *httptest.Server {
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
t.Logf("test server got request: %s %s", r.Method, r.URL.Path)
switch r.URL.Path {
case "/api/v2/oauth/token", "/api/v2/oauth/token-exchange":
id, secret, ok := r.BasicAuth()
if !ok {
t.Fatal("missing or invalid basic auth")
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(map[string]any{
"access_token": testToken(r.URL.Path, id, secret, r.FormValue("jwt")),
"token_type": "Bearer",
"expires_in": expirationSeconds,
}); err != nil {
t.Fatalf("error writing response: %v", err)
}
case "/":
// Echo back the authz header for test assertions.
_, err := w.Write([]byte(r.Header.Get("Authorization")))
if err != nil {
t.Fatalf("error writing response: %v", err)
}
default:
w.WriteHeader(http.StatusNotFound)
}
}))
t.Cleanup(srv.Close)
return srv
}
func testToken(path, id, secret, jwt string) string {
return fmt.Sprintf("%s|%s|%s|%s", path, id, secret, jwt)
}

@ -12,7 +12,6 @@ import (
"fmt" "fmt"
"net/http" "net/http"
"slices" "slices"
"strconv"
"strings" "strings"
"sync" "sync"
@ -30,7 +29,6 @@ import (
"k8s.io/client-go/tools/record" "k8s.io/client-go/tools/record"
"sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/reconcile"
"tailscale.com/client/tailscale" "tailscale.com/client/tailscale"
tsoperator "tailscale.com/k8s-operator" tsoperator "tailscale.com/k8s-operator"
tsapi "tailscale.com/k8s-operator/apis/v1alpha1" tsapi "tailscale.com/k8s-operator/apis/v1alpha1"
@ -56,7 +54,7 @@ var gaugeRecorderResources = clientmetric.NewGauge(kubetypes.MetricRecorderCount
// Recorder CRs. // Recorder CRs.
type RecorderReconciler struct { type RecorderReconciler struct {
client.Client client.Client
log *zap.SugaredLogger l *zap.SugaredLogger
recorder record.EventRecorder recorder record.EventRecorder
clock tstime.Clock clock tstime.Clock
tsNamespace string tsNamespace string
@ -68,16 +66,16 @@ type RecorderReconciler struct {
} }
func (r *RecorderReconciler) logger(name string) *zap.SugaredLogger { func (r *RecorderReconciler) logger(name string) *zap.SugaredLogger {
return r.log.With("Recorder", name) return r.l.With("Recorder", name)
} }
func (r *RecorderReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { func (r *RecorderReconciler) Reconcile(ctx context.Context, req reconcile.Request) (_ reconcile.Result, err error) {
logger := r.logger(req.Name) logger := r.logger(req.Name)
logger.Debugf("starting reconcile") logger.Debugf("starting reconcile")
defer logger.Debugf("reconcile finished") defer logger.Debugf("reconcile finished")
tsr := new(tsapi.Recorder) tsr := new(tsapi.Recorder)
err := r.Get(ctx, req.NamespacedName, tsr) err = r.Get(ctx, req.NamespacedName, tsr)
if apierrors.IsNotFound(err) { if apierrors.IsNotFound(err) {
logger.Debugf("Recorder not found, assuming it was deleted") logger.Debugf("Recorder not found, assuming it was deleted")
return reconcile.Result{}, nil return reconcile.Result{}, nil
@ -100,7 +98,7 @@ func (r *RecorderReconciler) Reconcile(ctx context.Context, req reconcile.Reques
} }
tsr.Finalizers = slices.Delete(tsr.Finalizers, ix, ix+1) tsr.Finalizers = slices.Delete(tsr.Finalizers, ix, ix+1)
if err = r.Update(ctx, tsr); err != nil { if err := r.Update(ctx, tsr); err != nil {
return reconcile.Result{}, err return reconcile.Result{}, err
} }
return reconcile.Result{}, nil return reconcile.Result{}, nil
@ -112,11 +110,10 @@ func (r *RecorderReconciler) Reconcile(ctx context.Context, req reconcile.Reques
if !apiequality.Semantic.DeepEqual(oldTSRStatus, &tsr.Status) { if !apiequality.Semantic.DeepEqual(oldTSRStatus, &tsr.Status) {
// An error encountered here should get returned by the Reconcile function. // An error encountered here should get returned by the Reconcile function.
if updateErr := r.Client.Status().Update(ctx, tsr); updateErr != nil { if updateErr := r.Client.Status().Update(ctx, tsr); updateErr != nil {
return reconcile.Result{}, errors.Join(err, updateErr) err = errors.Join(err, updateErr)
} }
} }
return reconcile.Result{}, err
return reconcile.Result{}, nil
} }
if !slices.Contains(tsr.Finalizers, FinalizerName) { if !slices.Contains(tsr.Finalizers, FinalizerName) {
@ -126,12 +123,12 @@ func (r *RecorderReconciler) Reconcile(ctx context.Context, req reconcile.Reques
// operation is underway. // operation is underway.
logger.Infof("ensuring Recorder is set up") logger.Infof("ensuring Recorder is set up")
tsr.Finalizers = append(tsr.Finalizers, FinalizerName) tsr.Finalizers = append(tsr.Finalizers, FinalizerName)
if err = r.Update(ctx, tsr); err != nil { if err := r.Update(ctx, tsr); err != nil {
return setStatusReady(tsr, metav1.ConditionFalse, reasonRecorderCreationFailed, reasonRecorderCreationFailed) return setStatusReady(tsr, metav1.ConditionFalse, reasonRecorderCreationFailed, reasonRecorderCreationFailed)
} }
} }
if err = r.validate(ctx, tsr); err != nil { if err := r.validate(ctx, tsr); err != nil {
message := fmt.Sprintf("Recorder is invalid: %s", err) message := fmt.Sprintf("Recorder is invalid: %s", err)
r.recorder.Eventf(tsr, corev1.EventTypeWarning, reasonRecorderInvalid, message) r.recorder.Eventf(tsr, corev1.EventTypeWarning, reasonRecorderInvalid, message)
return setStatusReady(tsr, metav1.ConditionFalse, reasonRecorderInvalid, message) return setStatusReady(tsr, metav1.ConditionFalse, reasonRecorderInvalid, message)
@ -163,29 +160,19 @@ func (r *RecorderReconciler) maybeProvision(ctx context.Context, tsr *tsapi.Reco
gaugeRecorderResources.Set(int64(r.recorders.Len())) gaugeRecorderResources.Set(int64(r.recorders.Len()))
r.mu.Unlock() r.mu.Unlock()
if err := r.ensureAuthSecretsCreated(ctx, tsr); err != nil { if err := r.ensureAuthSecretCreated(ctx, tsr); err != nil {
return fmt.Errorf("error creating secrets: %w", err) return fmt.Errorf("error creating secrets: %w", err)
} }
// State Secret is precreated so we can use the Recorder CR as its owner ref.
// State Secrets are pre-created so we can use the Recorder CR as its owner ref. sec := tsrStateSecret(tsr, r.tsNamespace)
var replicas int32 = 1 if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, sec, func(s *corev1.Secret) {
if tsr.Spec.Replicas != nil { s.ObjectMeta.Labels = sec.ObjectMeta.Labels
replicas = *tsr.Spec.Replicas s.ObjectMeta.Annotations = sec.ObjectMeta.Annotations
} }); err != nil {
return fmt.Errorf("error creating state Secret: %w", err)
for replica := range replicas {
sec := tsrStateSecret(tsr, r.tsNamespace, replica)
_, err := createOrUpdate(ctx, r.Client, r.tsNamespace, sec, func(s *corev1.Secret) {
s.ObjectMeta.Labels = sec.ObjectMeta.Labels
s.ObjectMeta.Annotations = sec.ObjectMeta.Annotations
})
if err != nil {
return fmt.Errorf("error creating state Secret %q: %w", sec.Name, err)
}
} }
sa := tsrServiceAccount(tsr, r.tsNamespace) sa := tsrServiceAccount(tsr, r.tsNamespace)
_, err := createOrMaybeUpdate(ctx, r.Client, r.tsNamespace, sa, func(s *corev1.ServiceAccount) error { if _, err := createOrMaybeUpdate(ctx, r.Client, r.tsNamespace, sa, func(s *corev1.ServiceAccount) error {
// Perform this check within the update function to make sure we don't // Perform this check within the update function to make sure we don't
// have a race condition between the previous check and the update. // have a race condition between the previous check and the update.
if err := saOwnedByRecorder(s, tsr); err != nil { if err := saOwnedByRecorder(s, tsr); err != nil {
@ -196,68 +183,54 @@ func (r *RecorderReconciler) maybeProvision(ctx context.Context, tsr *tsapi.Reco
s.ObjectMeta.Annotations = sa.ObjectMeta.Annotations s.ObjectMeta.Annotations = sa.ObjectMeta.Annotations
return nil return nil
}) }); err != nil {
if err != nil {
return fmt.Errorf("error creating ServiceAccount: %w", err) return fmt.Errorf("error creating ServiceAccount: %w", err)
} }
role := tsrRole(tsr, r.tsNamespace) role := tsrRole(tsr, r.tsNamespace)
_, err = createOrUpdate(ctx, r.Client, r.tsNamespace, role, func(r *rbacv1.Role) { if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, role, func(r *rbacv1.Role) {
r.ObjectMeta.Labels = role.ObjectMeta.Labels r.ObjectMeta.Labels = role.ObjectMeta.Labels
r.ObjectMeta.Annotations = role.ObjectMeta.Annotations r.ObjectMeta.Annotations = role.ObjectMeta.Annotations
r.Rules = role.Rules r.Rules = role.Rules
}) }); err != nil {
if err != nil {
return fmt.Errorf("error creating Role: %w", err) return fmt.Errorf("error creating Role: %w", err)
} }
roleBinding := tsrRoleBinding(tsr, r.tsNamespace) roleBinding := tsrRoleBinding(tsr, r.tsNamespace)
_, err = createOrUpdate(ctx, r.Client, r.tsNamespace, roleBinding, func(r *rbacv1.RoleBinding) { if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, roleBinding, func(r *rbacv1.RoleBinding) {
r.ObjectMeta.Labels = roleBinding.ObjectMeta.Labels r.ObjectMeta.Labels = roleBinding.ObjectMeta.Labels
r.ObjectMeta.Annotations = roleBinding.ObjectMeta.Annotations r.ObjectMeta.Annotations = roleBinding.ObjectMeta.Annotations
r.RoleRef = roleBinding.RoleRef r.RoleRef = roleBinding.RoleRef
r.Subjects = roleBinding.Subjects r.Subjects = roleBinding.Subjects
}) }); err != nil {
if err != nil {
return fmt.Errorf("error creating RoleBinding: %w", err) return fmt.Errorf("error creating RoleBinding: %w", err)
} }
ss := tsrStatefulSet(tsr, r.tsNamespace, r.loginServer) ss := tsrStatefulSet(tsr, r.tsNamespace, r.loginServer)
_, err = createOrUpdate(ctx, r.Client, r.tsNamespace, ss, func(s *appsv1.StatefulSet) { if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, ss, func(s *appsv1.StatefulSet) {
s.ObjectMeta.Labels = ss.ObjectMeta.Labels s.ObjectMeta.Labels = ss.ObjectMeta.Labels
s.ObjectMeta.Annotations = ss.ObjectMeta.Annotations s.ObjectMeta.Annotations = ss.ObjectMeta.Annotations
s.Spec = ss.Spec s.Spec = ss.Spec
}) }); err != nil {
if err != nil {
return fmt.Errorf("error creating StatefulSet: %w", err) return fmt.Errorf("error creating StatefulSet: %w", err)
} }
// ServiceAccount name may have changed, in which case we need to clean up // ServiceAccount name may have changed, in which case we need to clean up
// the previous ServiceAccount. RoleBinding will already be updated to point // the previous ServiceAccount. RoleBinding will already be updated to point
// to the new ServiceAccount. // to the new ServiceAccount.
if err = r.maybeCleanupServiceAccounts(ctx, tsr, sa.Name); err != nil { if err := r.maybeCleanupServiceAccounts(ctx, tsr, sa.Name); err != nil {
return fmt.Errorf("error cleaning up ServiceAccounts: %w", err) return fmt.Errorf("error cleaning up ServiceAccounts: %w", err)
} }
// If we have scaled the recorder down, we will have dangling state secrets
// that we need to clean up.
if err = r.maybeCleanupSecrets(ctx, tsr); err != nil {
return fmt.Errorf("error cleaning up Secrets: %w", err)
}
var devices []tsapi.RecorderTailnetDevice var devices []tsapi.RecorderTailnetDevice
for replica := range replicas {
dev, ok, err := r.getDeviceInfo(ctx, tsr.Name, replica)
switch {
case err != nil:
return fmt.Errorf("failed to get device info: %w", err)
case !ok:
logger.Debugf("no Tailscale hostname known yet, waiting for Recorder pod to finish auth")
continue
}
devices = append(devices, dev) device, ok, err := r.getDeviceInfo(ctx, tsr.Name)
if err != nil {
return fmt.Errorf("failed to get device info: %w", err)
} }
if !ok {
logger.Debugf("no Tailscale hostname known yet, waiting for Recorder pod to finish auth")
return nil
}
devices = append(devices, device)
tsr.Status.Devices = devices tsr.Status.Devices = devices
@ -284,89 +257,22 @@ func saOwnedByRecorder(sa *corev1.ServiceAccount, tsr *tsapi.Recorder) error {
func (r *RecorderReconciler) maybeCleanupServiceAccounts(ctx context.Context, tsr *tsapi.Recorder, currentName string) error { func (r *RecorderReconciler) maybeCleanupServiceAccounts(ctx context.Context, tsr *tsapi.Recorder, currentName string) error {
logger := r.logger(tsr.Name) logger := r.logger(tsr.Name)
options := []client.ListOption{ // List all ServiceAccounts owned by this Recorder.
client.InNamespace(r.tsNamespace),
client.MatchingLabels(tsrLabels("recorder", tsr.Name, nil)),
}
sas := &corev1.ServiceAccountList{} sas := &corev1.ServiceAccountList{}
if err := r.List(ctx, sas, options...); err != nil { if err := r.List(ctx, sas, client.InNamespace(r.tsNamespace), client.MatchingLabels(labels("recorder", tsr.Name, nil))); err != nil {
return fmt.Errorf("error listing ServiceAccounts for cleanup: %w", err) return fmt.Errorf("error listing ServiceAccounts for cleanup: %w", err)
} }
for _, sa := range sas.Items {
for _, serviceAccount := range sas.Items { if sa.Name == currentName {
if serviceAccount.Name == currentName {
continue
}
err := r.Delete(ctx, &serviceAccount)
switch {
case apierrors.IsNotFound(err):
logger.Debugf("ServiceAccount %s not found, likely already deleted", serviceAccount.Name)
continue
case err != nil:
return fmt.Errorf("error deleting ServiceAccount %s: %w", serviceAccount.Name, err)
}
}
return nil
}
func (r *RecorderReconciler) maybeCleanupSecrets(ctx context.Context, tsr *tsapi.Recorder) error {
options := []client.ListOption{
client.InNamespace(r.tsNamespace),
client.MatchingLabels(tsrLabels("recorder", tsr.Name, nil)),
}
secrets := &corev1.SecretList{}
if err := r.List(ctx, secrets, options...); err != nil {
return fmt.Errorf("error listing Secrets for cleanup: %w", err)
}
// Get the largest ordinal suffix that we expect. Then we'll go through the list of secrets owned by this
// recorder and remove them.
var replicas int32 = 1
if tsr.Spec.Replicas != nil {
replicas = *tsr.Spec.Replicas
}
for _, secret := range secrets.Items {
parts := strings.Split(secret.Name, "-")
if len(parts) == 0 {
continue
}
ordinal, err := strconv.ParseUint(parts[len(parts)-1], 10, 32)
if err != nil {
return fmt.Errorf("error parsing secret name %q: %w", secret.Name, err)
}
if int32(ordinal) < replicas {
continue continue
} }
if err := r.Delete(ctx, &sa); err != nil {
devicePrefs, ok, err := getDevicePrefs(&secret) if apierrors.IsNotFound(err) {
if err != nil { logger.Debugf("ServiceAccount %s not found, likely already deleted", sa.Name)
return err } else {
} return fmt.Errorf("error deleting ServiceAccount %s: %w", sa.Name, err)
if ok {
var errResp *tailscale.ErrResponse
r.log.Debugf("deleting device %s", devicePrefs.Config.NodeID)
err = r.tsClient.DeleteDevice(ctx, string(devicePrefs.Config.NodeID))
switch {
case errors.As(err, &errResp) && errResp.Status == http.StatusNotFound:
// This device has possibly already been deleted in the admin console. So we can ignore this
// and move on to removing the secret.
case err != nil:
return err
} }
} }
if err = r.Delete(ctx, &secret); err != nil {
return err
}
} }
return nil return nil
@ -378,38 +284,30 @@ func (r *RecorderReconciler) maybeCleanupSecrets(ctx context.Context, tsr *tsapi
func (r *RecorderReconciler) maybeCleanup(ctx context.Context, tsr *tsapi.Recorder) (bool, error) { func (r *RecorderReconciler) maybeCleanup(ctx context.Context, tsr *tsapi.Recorder) (bool, error) {
logger := r.logger(tsr.Name) logger := r.logger(tsr.Name)
var replicas int32 = 1 prefs, ok, err := r.getDevicePrefs(ctx, tsr.Name)
if tsr.Spec.Replicas != nil { if err != nil {
replicas = *tsr.Spec.Replicas return false, err
} }
if !ok {
for replica := range replicas { logger.Debugf("state Secret %s-0 not found or does not contain node ID, continuing cleanup", tsr.Name)
devicePrefs, ok, err := r.getDevicePrefs(ctx, tsr.Name, replica) r.mu.Lock()
if err != nil { r.recorders.Remove(tsr.UID)
return false, err gaugeRecorderResources.Set(int64(r.recorders.Len()))
} r.mu.Unlock()
if !ok { return true, nil
logger.Debugf("state Secret %s-%d not found or does not contain node ID, continuing cleanup", tsr.Name, replica) }
r.mu.Lock()
r.recorders.Remove(tsr.UID) id := string(prefs.Config.NodeID)
gaugeRecorderResources.Set(int64(r.recorders.Len())) logger.Debugf("deleting device %s from control", string(id))
r.mu.Unlock() if err := r.tsClient.DeleteDevice(ctx, string(id)); err != nil {
return true, nil errResp := &tailscale.ErrResponse{}
} if ok := errors.As(err, errResp); ok && errResp.Status == http.StatusNotFound {
logger.Debugf("device %s not found, likely because it has already been deleted from control", string(id))
nodeID := string(devicePrefs.Config.NodeID) } else {
logger.Debugf("deleting device %s from control", nodeID)
if err = r.tsClient.DeleteDevice(ctx, nodeID); err != nil {
errResp := &tailscale.ErrResponse{}
if errors.As(err, errResp) && errResp.Status == http.StatusNotFound {
logger.Debugf("device %s not found, likely because it has already been deleted from control", nodeID)
continue
}
return false, fmt.Errorf("error deleting device: %w", err) return false, fmt.Errorf("error deleting device: %w", err)
} }
} else {
logger.Debugf("device %s deleted from control", nodeID) logger.Debugf("device %s deleted from control", string(id))
} }
// Unlike most log entries in the reconcile loop, this will get printed // Unlike most log entries in the reconcile loop, this will get printed
@ -421,46 +319,38 @@ func (r *RecorderReconciler) maybeCleanup(ctx context.Context, tsr *tsapi.Record
r.recorders.Remove(tsr.UID) r.recorders.Remove(tsr.UID)
gaugeRecorderResources.Set(int64(r.recorders.Len())) gaugeRecorderResources.Set(int64(r.recorders.Len()))
r.mu.Unlock() r.mu.Unlock()
return true, nil return true, nil
} }
func (r *RecorderReconciler) ensureAuthSecretsCreated(ctx context.Context, tsr *tsapi.Recorder) error { func (r *RecorderReconciler) ensureAuthSecretCreated(ctx context.Context, tsr *tsapi.Recorder) error {
var replicas int32 = 1 logger := r.logger(tsr.Name)
if tsr.Spec.Replicas != nil { key := types.NamespacedName{
replicas = *tsr.Spec.Replicas Namespace: r.tsNamespace,
Name: tsr.Name,
}
if err := r.Get(ctx, key, &corev1.Secret{}); err == nil {
// No updates, already created the auth key.
logger.Debugf("auth Secret %s already exists", key.Name)
return nil
} else if !apierrors.IsNotFound(err) {
return err
} }
// Create the auth key Secret which is going to be used by the StatefulSet
// to authenticate with Tailscale.
logger.Debugf("creating authkey for new Recorder")
tags := tsr.Spec.Tags tags := tsr.Spec.Tags
if len(tags) == 0 { if len(tags) == 0 {
tags = tsapi.Tags{"tag:k8s"} tags = tsapi.Tags{"tag:k8s"}
} }
authKey, err := newAuthKey(ctx, r.tsClient, tags.Stringify())
if err != nil {
return err
}
logger := r.logger(tsr.Name) logger.Debug("creating a new Secret for the Recorder")
if err := r.Create(ctx, tsrAuthSecret(tsr, r.tsNamespace, authKey)); err != nil {
for replica := range replicas { return err
key := types.NamespacedName{
Namespace: r.tsNamespace,
Name: fmt.Sprintf("%s-auth-%d", tsr.Name, replica),
}
err := r.Get(ctx, key, &corev1.Secret{})
switch {
case err == nil:
logger.Debugf("auth Secret %q already exists", key.Name)
continue
case !apierrors.IsNotFound(err):
return fmt.Errorf("failed to get Secret %q: %w", key.Name, err)
}
authKey, err := newAuthKey(ctx, r.tsClient, tags.Stringify())
if err != nil {
return err
}
if err = r.Create(ctx, tsrAuthSecret(tsr, r.tsNamespace, authKey, replica)); err != nil {
return err
}
} }
return nil return nil
@ -471,10 +361,6 @@ func (r *RecorderReconciler) validate(ctx context.Context, tsr *tsapi.Recorder)
return errors.New("must either enable UI or use S3 storage to ensure recordings are accessible") return errors.New("must either enable UI or use S3 storage to ensure recordings are accessible")
} }
if tsr.Spec.Replicas != nil && *tsr.Spec.Replicas > 1 && tsr.Spec.Storage.S3 == nil {
return errors.New("must use S3 storage when using multiple replicas to ensure recordings are accessible")
}
// Check any custom ServiceAccount config doesn't conflict with pre-existing // Check any custom ServiceAccount config doesn't conflict with pre-existing
// ServiceAccounts. This check is performed once during validation to ensure // ServiceAccounts. This check is performed once during validation to ensure
// errors are raised early, but also again during any Updates to prevent a race. // errors are raised early, but also again during any Updates to prevent a race.
@ -508,11 +394,11 @@ func (r *RecorderReconciler) validate(ctx context.Context, tsr *tsapi.Recorder)
return nil return nil
} }
func (r *RecorderReconciler) getStateSecret(ctx context.Context, tsrName string, replica int32) (*corev1.Secret, error) { func (r *RecorderReconciler) getStateSecret(ctx context.Context, tsrName string) (*corev1.Secret, error) {
secret := &corev1.Secret{ secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Namespace: r.tsNamespace, Namespace: r.tsNamespace,
Name: fmt.Sprintf("%s-%d", tsrName, replica), Name: fmt.Sprintf("%s-0", tsrName),
}, },
} }
if err := r.Get(ctx, client.ObjectKeyFromObject(secret), secret); err != nil { if err := r.Get(ctx, client.ObjectKeyFromObject(secret), secret); err != nil {
@ -526,8 +412,8 @@ func (r *RecorderReconciler) getStateSecret(ctx context.Context, tsrName string,
return secret, nil return secret, nil
} }
func (r *RecorderReconciler) getDevicePrefs(ctx context.Context, tsrName string, replica int32) (prefs prefs, ok bool, err error) { func (r *RecorderReconciler) getDevicePrefs(ctx context.Context, tsrName string) (prefs prefs, ok bool, err error) {
secret, err := r.getStateSecret(ctx, tsrName, replica) secret, err := r.getStateSecret(ctx, tsrName)
if err != nil || secret == nil { if err != nil || secret == nil {
return prefs, false, err return prefs, false, err
} }
@ -555,8 +441,8 @@ func getDevicePrefs(secret *corev1.Secret) (prefs prefs, ok bool, err error) {
return prefs, ok, nil return prefs, ok, nil
} }
func (r *RecorderReconciler) getDeviceInfo(ctx context.Context, tsrName string, replica int32) (d tsapi.RecorderTailnetDevice, ok bool, err error) { func (r *RecorderReconciler) getDeviceInfo(ctx context.Context, tsrName string) (d tsapi.RecorderTailnetDevice, ok bool, err error) {
secret, err := r.getStateSecret(ctx, tsrName, replica) secret, err := r.getStateSecret(ctx, tsrName)
if err != nil || secret == nil { if err != nil || secret == nil {
return tsapi.RecorderTailnetDevice{}, false, err return tsapi.RecorderTailnetDevice{}, false, err
} }

@ -12,36 +12,30 @@ import (
corev1 "k8s.io/api/core/v1" corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1" rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
tsapi "tailscale.com/k8s-operator/apis/v1alpha1" tsapi "tailscale.com/k8s-operator/apis/v1alpha1"
"tailscale.com/types/ptr" "tailscale.com/types/ptr"
"tailscale.com/version" "tailscale.com/version"
) )
func tsrStatefulSet(tsr *tsapi.Recorder, namespace string, loginServer string) *appsv1.StatefulSet { func tsrStatefulSet(tsr *tsapi.Recorder, namespace string, loginServer string) *appsv1.StatefulSet {
var replicas int32 = 1 return &appsv1.StatefulSet{
if tsr.Spec.Replicas != nil {
replicas = *tsr.Spec.Replicas
}
ss := &appsv1.StatefulSet{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: tsr.Name, Name: tsr.Name,
Namespace: namespace, Namespace: namespace,
Labels: tsrLabels("recorder", tsr.Name, tsr.Spec.StatefulSet.Labels), Labels: labels("recorder", tsr.Name, tsr.Spec.StatefulSet.Labels),
OwnerReferences: tsrOwnerReference(tsr), OwnerReferences: tsrOwnerReference(tsr),
Annotations: tsr.Spec.StatefulSet.Annotations, Annotations: tsr.Spec.StatefulSet.Annotations,
}, },
Spec: appsv1.StatefulSetSpec{ Spec: appsv1.StatefulSetSpec{
Replicas: ptr.To(replicas), Replicas: ptr.To[int32](1),
Selector: &metav1.LabelSelector{ Selector: &metav1.LabelSelector{
MatchLabels: tsrLabels("recorder", tsr.Name, tsr.Spec.StatefulSet.Pod.Labels), MatchLabels: labels("recorder", tsr.Name, tsr.Spec.StatefulSet.Pod.Labels),
}, },
Template: corev1.PodTemplateSpec{ Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: tsr.Name, Name: tsr.Name,
Namespace: namespace, Namespace: namespace,
Labels: tsrLabels("recorder", tsr.Name, tsr.Spec.StatefulSet.Pod.Labels), Labels: labels("recorder", tsr.Name, tsr.Spec.StatefulSet.Pod.Labels),
Annotations: tsr.Spec.StatefulSet.Pod.Annotations, Annotations: tsr.Spec.StatefulSet.Pod.Annotations,
}, },
Spec: corev1.PodSpec{ Spec: corev1.PodSpec{
@ -65,7 +59,7 @@ func tsrStatefulSet(tsr *tsapi.Recorder, namespace string, loginServer string) *
ImagePullPolicy: tsr.Spec.StatefulSet.Pod.Container.ImagePullPolicy, ImagePullPolicy: tsr.Spec.StatefulSet.Pod.Container.ImagePullPolicy,
Resources: tsr.Spec.StatefulSet.Pod.Container.Resources, Resources: tsr.Spec.StatefulSet.Pod.Container.Resources,
SecurityContext: tsr.Spec.StatefulSet.Pod.Container.SecurityContext, SecurityContext: tsr.Spec.StatefulSet.Pod.Container.SecurityContext,
Env: tsrEnv(tsr, loginServer), Env: env(tsr, loginServer),
EnvFrom: func() []corev1.EnvFromSource { EnvFrom: func() []corev1.EnvFromSource {
if tsr.Spec.Storage.S3 == nil || tsr.Spec.Storage.S3.Credentials.Secret.Name == "" { if tsr.Spec.Storage.S3 == nil || tsr.Spec.Storage.S3.Credentials.Secret.Name == "" {
return nil return nil
@ -101,28 +95,6 @@ func tsrStatefulSet(tsr *tsapi.Recorder, namespace string, loginServer string) *
}, },
}, },
} }
for replica := range replicas {
volumeName := fmt.Sprintf("authkey-%d", replica)
ss.Spec.Template.Spec.Containers[0].VolumeMounts = append(ss.Spec.Template.Spec.Containers[0].VolumeMounts, corev1.VolumeMount{
Name: volumeName,
ReadOnly: true,
MountPath: fmt.Sprintf("/etc/tailscaled/%s-%d", ss.Name, replica),
})
ss.Spec.Template.Spec.Volumes = append(ss.Spec.Template.Spec.Volumes, corev1.Volume{
Name: volumeName,
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: fmt.Sprintf("%s-auth-%d", tsr.Name, replica),
Items: []corev1.KeyToPath{{Key: "authkey", Path: "authkey"}},
},
},
})
}
return ss
} }
func tsrServiceAccount(tsr *tsapi.Recorder, namespace string) *corev1.ServiceAccount { func tsrServiceAccount(tsr *tsapi.Recorder, namespace string) *corev1.ServiceAccount {
@ -130,7 +102,7 @@ func tsrServiceAccount(tsr *tsapi.Recorder, namespace string) *corev1.ServiceAcc
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: tsrServiceAccountName(tsr), Name: tsrServiceAccountName(tsr),
Namespace: namespace, Namespace: namespace,
Labels: tsrLabels("recorder", tsr.Name, nil), Labels: labels("recorder", tsr.Name, nil),
OwnerReferences: tsrOwnerReference(tsr), OwnerReferences: tsrOwnerReference(tsr),
Annotations: tsr.Spec.StatefulSet.Pod.ServiceAccount.Annotations, Annotations: tsr.Spec.StatefulSet.Pod.ServiceAccount.Annotations,
}, },
@ -148,24 +120,11 @@ func tsrServiceAccountName(tsr *tsapi.Recorder) string {
} }
func tsrRole(tsr *tsapi.Recorder, namespace string) *rbacv1.Role { func tsrRole(tsr *tsapi.Recorder, namespace string) *rbacv1.Role {
var replicas int32 = 1
if tsr.Spec.Replicas != nil {
replicas = *tsr.Spec.Replicas
}
resourceNames := make([]string, 0)
for replica := range replicas {
resourceNames = append(resourceNames,
fmt.Sprintf("%s-%d", tsr.Name, replica), // State secret.
fmt.Sprintf("%s-auth-%d", tsr.Name, replica), // Auth key secret.
)
}
return &rbacv1.Role{ return &rbacv1.Role{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: tsr.Name, Name: tsr.Name,
Namespace: namespace, Namespace: namespace,
Labels: tsrLabels("recorder", tsr.Name, nil), Labels: labels("recorder", tsr.Name, nil),
OwnerReferences: tsrOwnerReference(tsr), OwnerReferences: tsrOwnerReference(tsr),
}, },
Rules: []rbacv1.PolicyRule{ Rules: []rbacv1.PolicyRule{
@ -177,7 +136,10 @@ func tsrRole(tsr *tsapi.Recorder, namespace string) *rbacv1.Role {
"patch", "patch",
"update", "update",
}, },
ResourceNames: resourceNames, ResourceNames: []string{
tsr.Name, // Contains the auth key.
fmt.Sprintf("%s-0", tsr.Name), // Contains the node state.
},
}, },
{ {
APIGroups: []string{""}, APIGroups: []string{""},
@ -197,7 +159,7 @@ func tsrRoleBinding(tsr *tsapi.Recorder, namespace string) *rbacv1.RoleBinding {
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: tsr.Name, Name: tsr.Name,
Namespace: namespace, Namespace: namespace,
Labels: tsrLabels("recorder", tsr.Name, nil), Labels: labels("recorder", tsr.Name, nil),
OwnerReferences: tsrOwnerReference(tsr), OwnerReferences: tsrOwnerReference(tsr),
}, },
Subjects: []rbacv1.Subject{ Subjects: []rbacv1.Subject{
@ -214,12 +176,12 @@ func tsrRoleBinding(tsr *tsapi.Recorder, namespace string) *rbacv1.RoleBinding {
} }
} }
func tsrAuthSecret(tsr *tsapi.Recorder, namespace string, authKey string, replica int32) *corev1.Secret { func tsrAuthSecret(tsr *tsapi.Recorder, namespace string, authKey string) *corev1.Secret {
return &corev1.Secret{ return &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Namespace: namespace, Namespace: namespace,
Name: fmt.Sprintf("%s-auth-%d", tsr.Name, replica), Name: tsr.Name,
Labels: tsrLabels("recorder", tsr.Name, nil), Labels: labels("recorder", tsr.Name, nil),
OwnerReferences: tsrOwnerReference(tsr), OwnerReferences: tsrOwnerReference(tsr),
}, },
StringData: map[string]string{ StringData: map[string]string{
@ -228,19 +190,30 @@ func tsrAuthSecret(tsr *tsapi.Recorder, namespace string, authKey string, replic
} }
} }
func tsrStateSecret(tsr *tsapi.Recorder, namespace string, replica int32) *corev1.Secret { func tsrStateSecret(tsr *tsapi.Recorder, namespace string) *corev1.Secret {
return &corev1.Secret{ return &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-%d", tsr.Name, replica), Name: fmt.Sprintf("%s-0", tsr.Name),
Namespace: namespace, Namespace: namespace,
Labels: tsrLabels("recorder", tsr.Name, nil), Labels: labels("recorder", tsr.Name, nil),
OwnerReferences: tsrOwnerReference(tsr), OwnerReferences: tsrOwnerReference(tsr),
}, },
} }
} }
func tsrEnv(tsr *tsapi.Recorder, loginServer string) []corev1.EnvVar { func env(tsr *tsapi.Recorder, loginServer string) []corev1.EnvVar {
envs := []corev1.EnvVar{ envs := []corev1.EnvVar{
{
Name: "TS_AUTHKEY",
ValueFrom: &corev1.EnvVarSource{
SecretKeyRef: &corev1.SecretKeySelector{
LocalObjectReference: corev1.LocalObjectReference{
Name: tsr.Name,
},
Key: "authkey",
},
},
},
{ {
Name: "POD_NAME", Name: "POD_NAME",
ValueFrom: &corev1.EnvVarSource{ ValueFrom: &corev1.EnvVarSource{
@ -258,10 +231,6 @@ func tsrEnv(tsr *tsapi.Recorder, loginServer string) []corev1.EnvVar {
}, },
}, },
}, },
{
Name: "TS_AUTHKEY_FILE",
Value: "/etc/tailscaled/$(POD_NAME)/authkey",
},
{ {
Name: "TS_STATE", Name: "TS_STATE",
Value: "kube:$(POD_NAME)", Value: "kube:$(POD_NAME)",
@ -311,18 +280,18 @@ func tsrEnv(tsr *tsapi.Recorder, loginServer string) []corev1.EnvVar {
return envs return envs
} }
func tsrLabels(app, instance string, customLabels map[string]string) map[string]string { func labels(app, instance string, customLabels map[string]string) map[string]string {
labels := make(map[string]string, len(customLabels)+3) l := make(map[string]string, len(customLabels)+3)
for k, v := range customLabels { for k, v := range customLabels {
labels[k] = v l[k] = v
} }
// ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ // ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/
labels["app.kubernetes.io/name"] = app l["app.kubernetes.io/name"] = app
labels["app.kubernetes.io/instance"] = instance l["app.kubernetes.io/instance"] = instance
labels["app.kubernetes.io/managed-by"] = "tailscale-operator" l["app.kubernetes.io/managed-by"] = "tailscale-operator"
return labels return l
} }
func tsrOwnerReference(owner metav1.Object) []metav1.OwnerReference { func tsrOwnerReference(owner metav1.Object) []metav1.OwnerReference {

@ -12,7 +12,6 @@ import (
corev1 "k8s.io/api/core/v1" corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
tsapi "tailscale.com/k8s-operator/apis/v1alpha1" tsapi "tailscale.com/k8s-operator/apis/v1alpha1"
"tailscale.com/types/ptr" "tailscale.com/types/ptr"
) )
@ -24,7 +23,6 @@ func TestRecorderSpecs(t *testing.T) {
Name: "test", Name: "test",
}, },
Spec: tsapi.RecorderSpec{ Spec: tsapi.RecorderSpec{
Replicas: ptr.To[int32](3),
StatefulSet: tsapi.RecorderStatefulSet{ StatefulSet: tsapi.RecorderStatefulSet{
Labels: map[string]string{ Labels: map[string]string{
"ss-label-key": "ss-label-value", "ss-label-key": "ss-label-value",
@ -103,10 +101,10 @@ func TestRecorderSpecs(t *testing.T) {
} }
// Pod-level. // Pod-level.
if diff := cmp.Diff(ss.Labels, tsrLabels("recorder", "test", tsr.Spec.StatefulSet.Labels)); diff != "" { if diff := cmp.Diff(ss.Labels, labels("recorder", "test", tsr.Spec.StatefulSet.Labels)); diff != "" {
t.Errorf("(-got +want):\n%s", diff) t.Errorf("(-got +want):\n%s", diff)
} }
if diff := cmp.Diff(ss.Spec.Template.Labels, tsrLabels("recorder", "test", tsr.Spec.StatefulSet.Pod.Labels)); diff != "" { if diff := cmp.Diff(ss.Spec.Template.Labels, labels("recorder", "test", tsr.Spec.StatefulSet.Pod.Labels)); diff != "" {
t.Errorf("(-got +want):\n%s", diff) t.Errorf("(-got +want):\n%s", diff)
} }
if diff := cmp.Diff(ss.Spec.Template.Spec.Affinity, tsr.Spec.StatefulSet.Pod.Affinity); diff != "" { if diff := cmp.Diff(ss.Spec.Template.Spec.Affinity, tsr.Spec.StatefulSet.Pod.Affinity); diff != "" {
@ -126,7 +124,7 @@ func TestRecorderSpecs(t *testing.T) {
} }
// Container-level. // Container-level.
if diff := cmp.Diff(ss.Spec.Template.Spec.Containers[0].Env, tsrEnv(tsr, tsLoginServer)); diff != "" { if diff := cmp.Diff(ss.Spec.Template.Spec.Containers[0].Env, env(tsr, tsLoginServer)); diff != "" {
t.Errorf("(-got +want):\n%s", diff) t.Errorf("(-got +want):\n%s", diff)
} }
if diff := cmp.Diff(ss.Spec.Template.Spec.Containers[0].Image, tsr.Spec.StatefulSet.Pod.Container.Image); diff != "" { if diff := cmp.Diff(ss.Spec.Template.Spec.Containers[0].Image, tsr.Spec.StatefulSet.Pod.Container.Image); diff != "" {
@ -141,17 +139,5 @@ func TestRecorderSpecs(t *testing.T) {
if diff := cmp.Diff(ss.Spec.Template.Spec.Containers[0].Resources, tsr.Spec.StatefulSet.Pod.Container.Resources); diff != "" { if diff := cmp.Diff(ss.Spec.Template.Spec.Containers[0].Resources, tsr.Spec.StatefulSet.Pod.Container.Resources); diff != "" {
t.Errorf("(-got +want):\n%s", diff) t.Errorf("(-got +want):\n%s", diff)
} }
if *ss.Spec.Replicas != *tsr.Spec.Replicas {
t.Errorf("expected %d replicas, got %d", *tsr.Spec.Replicas, *ss.Spec.Replicas)
}
if len(ss.Spec.Template.Spec.Volumes) != int(*tsr.Spec.Replicas)+1 {
t.Errorf("expected %d volumes, got %d", *tsr.Spec.Replicas+1, len(ss.Spec.Template.Spec.Volumes))
}
if len(ss.Spec.Template.Spec.Containers[0].VolumeMounts) != int(*tsr.Spec.Replicas)+1 {
t.Errorf("expected %d volume mounts, got %d", *tsr.Spec.Replicas+1, len(ss.Spec.Template.Spec.Containers[0].VolumeMounts))
}
}) })
} }

@ -8,7 +8,6 @@ package main
import ( import (
"context" "context"
"encoding/json" "encoding/json"
"fmt"
"strings" "strings"
"testing" "testing"
@ -21,11 +20,9 @@ import (
"k8s.io/client-go/tools/record" "k8s.io/client-go/tools/record"
"sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/client/fake"
tsoperator "tailscale.com/k8s-operator" tsoperator "tailscale.com/k8s-operator"
tsapi "tailscale.com/k8s-operator/apis/v1alpha1" tsapi "tailscale.com/k8s-operator/apis/v1alpha1"
"tailscale.com/tstest" "tailscale.com/tstest"
"tailscale.com/types/ptr"
) )
const ( const (
@ -39,9 +36,6 @@ func TestRecorder(t *testing.T) {
Name: "test", Name: "test",
Finalizers: []string{"tailscale.com/finalizer"}, Finalizers: []string{"tailscale.com/finalizer"},
}, },
Spec: tsapi.RecorderSpec{
Replicas: ptr.To[int32](3),
},
} }
fc := fake.NewClientBuilder(). fc := fake.NewClientBuilder().
@ -58,7 +52,7 @@ func TestRecorder(t *testing.T) {
Client: fc, Client: fc,
tsClient: tsClient, tsClient: tsClient,
recorder: fr, recorder: fr,
log: zl.Sugar(), l: zl.Sugar(),
clock: cl, clock: cl,
loginServer: tsLoginServer, loginServer: tsLoginServer,
} }
@ -86,15 +80,6 @@ func TestRecorder(t *testing.T) {
}) })
expectReconciled(t, reconciler, "", tsr.Name) expectReconciled(t, reconciler, "", tsr.Name)
expectedEvent = "Warning RecorderInvalid Recorder is invalid: must use S3 storage when using multiple replicas to ensure recordings are accessible"
expectEvents(t, fr, []string{expectedEvent})
tsr.Spec.Storage.S3 = &tsapi.S3{}
mustUpdate(t, fc, "", "test", func(t *tsapi.Recorder) {
t.Spec = tsr.Spec
})
expectReconciled(t, reconciler, "", tsr.Name)
// Only check part of this error message, because it's defined in an // Only check part of this error message, because it's defined in an
// external package and may change. // external package and may change.
if err := fc.Get(context.Background(), client.ObjectKey{ if err := fc.Get(context.Background(), client.ObjectKey{
@ -195,47 +180,33 @@ func TestRecorder(t *testing.T) {
}) })
t.Run("populate_node_info_in_state_secret_and_see_it_appear_in_status", func(t *testing.T) { t.Run("populate_node_info_in_state_secret_and_see_it_appear_in_status", func(t *testing.T) {
bytes, err := json.Marshal(map[string]any{
"Config": map[string]any{
"NodeID": "nodeid-123",
"UserProfile": map[string]any{
"LoginName": "test-0.example.ts.net",
},
},
})
if err != nil {
t.Fatal(err)
}
const key = "profile-abc" const key = "profile-abc"
for replica := range *tsr.Spec.Replicas { mustUpdate(t, fc, tsNamespace, "test-0", func(s *corev1.Secret) {
bytes, err := json.Marshal(map[string]any{ s.Data = map[string][]byte{
"Config": map[string]any{ currentProfileKey: []byte(key),
"NodeID": fmt.Sprintf("node-%d", replica), key: bytes,
"UserProfile": map[string]any{
"LoginName": fmt.Sprintf("test-%d.example.ts.net", replica),
},
},
})
if err != nil {
t.Fatal(err)
} }
})
name := fmt.Sprintf("%s-%d", "test", replica)
mustUpdate(t, fc, tsNamespace, name, func(s *corev1.Secret) {
s.Data = map[string][]byte{
currentProfileKey: []byte(key),
key: bytes,
}
})
}
expectReconciled(t, reconciler, "", tsr.Name) expectReconciled(t, reconciler, "", tsr.Name)
tsr.Status.Devices = []tsapi.RecorderTailnetDevice{ tsr.Status.Devices = []tsapi.RecorderTailnetDevice{
{ {
Hostname: "hostname-node-0", Hostname: "hostname-nodeid-123",
TailnetIPs: []string{"1.2.3.4", "::1"}, TailnetIPs: []string{"1.2.3.4", "::1"},
URL: "https://test-0.example.ts.net", URL: "https://test-0.example.ts.net",
}, },
{
Hostname: "hostname-node-1",
TailnetIPs: []string{"1.2.3.4", "::1"},
URL: "https://test-1.example.ts.net",
},
{
Hostname: "hostname-node-2",
TailnetIPs: []string{"1.2.3.4", "::1"},
URL: "https://test-2.example.ts.net",
},
} }
expectEqual(t, fc, tsr) expectEqual(t, fc, tsr)
}) })
@ -251,7 +222,7 @@ func TestRecorder(t *testing.T) {
if expected := 0; reconciler.recorders.Len() != expected { if expected := 0; reconciler.recorders.Len() != expected {
t.Fatalf("expected %d recorders, got %d", expected, reconciler.recorders.Len()) t.Fatalf("expected %d recorders, got %d", expected, reconciler.recorders.Len())
} }
if diff := cmp.Diff(tsClient.deleted, []string{"node-0", "node-1", "node-2"}); diff != "" { if diff := cmp.Diff(tsClient.deleted, []string{"nodeid-123"}); diff != "" {
t.Fatalf("unexpected deleted devices (-got +want):\n%s", diff) t.Fatalf("unexpected deleted devices (-got +want):\n%s", diff)
} }
// The fake client does not clean up objects whose owner has been // The fake client does not clean up objects whose owner has been
@ -262,38 +233,26 @@ func TestRecorder(t *testing.T) {
func expectRecorderResources(t *testing.T, fc client.WithWatch, tsr *tsapi.Recorder, shouldExist bool) { func expectRecorderResources(t *testing.T, fc client.WithWatch, tsr *tsapi.Recorder, shouldExist bool) {
t.Helper() t.Helper()
var replicas int32 = 1 auth := tsrAuthSecret(tsr, tsNamespace, "secret-authkey")
if tsr.Spec.Replicas != nil { state := tsrStateSecret(tsr, tsNamespace)
replicas = *tsr.Spec.Replicas
}
role := tsrRole(tsr, tsNamespace) role := tsrRole(tsr, tsNamespace)
roleBinding := tsrRoleBinding(tsr, tsNamespace) roleBinding := tsrRoleBinding(tsr, tsNamespace)
serviceAccount := tsrServiceAccount(tsr, tsNamespace) serviceAccount := tsrServiceAccount(tsr, tsNamespace)
statefulSet := tsrStatefulSet(tsr, tsNamespace, tsLoginServer) statefulSet := tsrStatefulSet(tsr, tsNamespace, tsLoginServer)
if shouldExist { if shouldExist {
expectEqual(t, fc, auth)
expectEqual(t, fc, state)
expectEqual(t, fc, role) expectEqual(t, fc, role)
expectEqual(t, fc, roleBinding) expectEqual(t, fc, roleBinding)
expectEqual(t, fc, serviceAccount) expectEqual(t, fc, serviceAccount)
expectEqual(t, fc, statefulSet, removeResourceReqs) expectEqual(t, fc, statefulSet, removeResourceReqs)
} else { } else {
expectMissing[corev1.Secret](t, fc, auth.Namespace, auth.Name)
expectMissing[corev1.Secret](t, fc, state.Namespace, state.Name)
expectMissing[rbacv1.Role](t, fc, role.Namespace, role.Name) expectMissing[rbacv1.Role](t, fc, role.Namespace, role.Name)
expectMissing[rbacv1.RoleBinding](t, fc, roleBinding.Namespace, roleBinding.Name) expectMissing[rbacv1.RoleBinding](t, fc, roleBinding.Namespace, roleBinding.Name)
expectMissing[corev1.ServiceAccount](t, fc, serviceAccount.Namespace, serviceAccount.Name) expectMissing[corev1.ServiceAccount](t, fc, serviceAccount.Namespace, serviceAccount.Name)
expectMissing[appsv1.StatefulSet](t, fc, statefulSet.Namespace, statefulSet.Name) expectMissing[appsv1.StatefulSet](t, fc, statefulSet.Namespace, statefulSet.Name)
} }
for replica := range replicas {
auth := tsrAuthSecret(tsr, tsNamespace, "secret-authkey", replica)
state := tsrStateSecret(tsr, tsNamespace, replica)
if shouldExist {
expectEqual(t, fc, auth)
expectEqual(t, fc, state)
} else {
expectMissing[corev1.Secret](t, fc, auth.Namespace, auth.Name)
expectMissing[corev1.Secret](t, fc, state.Namespace, state.Name)
}
}
} }

@ -50,32 +50,32 @@ func NewConfigLoader(logger *zap.SugaredLogger, client clientcorev1.CoreV1Interf
} }
} }
func (ld *configLoader) WatchConfig(ctx context.Context, path string) error { func (l *configLoader) WatchConfig(ctx context.Context, path string) error {
secretNamespacedName, isKubeSecret := strings.CutPrefix(path, "kube:") secretNamespacedName, isKubeSecret := strings.CutPrefix(path, "kube:")
if isKubeSecret { if isKubeSecret {
secretNamespace, secretName, ok := strings.Cut(secretNamespacedName, string(types.Separator)) secretNamespace, secretName, ok := strings.Cut(secretNamespacedName, string(types.Separator))
if !ok { if !ok {
return fmt.Errorf("invalid Kubernetes Secret reference %q, expected format <namespace>/<name>", path) return fmt.Errorf("invalid Kubernetes Secret reference %q, expected format <namespace>/<name>", path)
} }
if err := ld.watchConfigSecretChanges(ctx, secretNamespace, secretName); err != nil && !errors.Is(err, context.Canceled) { if err := l.watchConfigSecretChanges(ctx, secretNamespace, secretName); err != nil && !errors.Is(err, context.Canceled) {
return fmt.Errorf("error watching config Secret %q: %w", secretNamespacedName, err) return fmt.Errorf("error watching config Secret %q: %w", secretNamespacedName, err)
} }
return nil return nil
} }
if err := ld.watchConfigFileChanges(ctx, path); err != nil && !errors.Is(err, context.Canceled) { if err := l.watchConfigFileChanges(ctx, path); err != nil && !errors.Is(err, context.Canceled) {
return fmt.Errorf("error watching config file %q: %w", path, err) return fmt.Errorf("error watching config file %q: %w", path, err)
} }
return nil return nil
} }
func (ld *configLoader) reloadConfig(ctx context.Context, raw []byte) error { func (l *configLoader) reloadConfig(ctx context.Context, raw []byte) error {
if bytes.Equal(raw, ld.previous) { if bytes.Equal(raw, l.previous) {
if ld.cfgIgnored != nil && testenv.InTest() { if l.cfgIgnored != nil && testenv.InTest() {
ld.once.Do(func() { l.once.Do(func() {
close(ld.cfgIgnored) close(l.cfgIgnored)
}) })
} }
return nil return nil
@ -89,14 +89,14 @@ func (ld *configLoader) reloadConfig(ctx context.Context, raw []byte) error {
select { select {
case <-ctx.Done(): case <-ctx.Done():
return ctx.Err() return ctx.Err()
case ld.cfgChan <- &cfg: case l.cfgChan <- &cfg:
} }
ld.previous = raw l.previous = raw
return nil return nil
} }
func (ld *configLoader) watchConfigFileChanges(ctx context.Context, path string) error { func (l *configLoader) watchConfigFileChanges(ctx context.Context, path string) error {
var ( var (
tickChan <-chan time.Time tickChan <-chan time.Time
eventChan <-chan fsnotify.Event eventChan <-chan fsnotify.Event
@ -106,14 +106,14 @@ func (ld *configLoader) watchConfigFileChanges(ctx context.Context, path string)
if w, err := fsnotify.NewWatcher(); err != nil { if w, err := fsnotify.NewWatcher(); err != nil {
// Creating a new fsnotify watcher would fail for example if inotify was not able to create a new file descriptor. // Creating a new fsnotify watcher would fail for example if inotify was not able to create a new file descriptor.
// See https://github.com/tailscale/tailscale/issues/15081 // See https://github.com/tailscale/tailscale/issues/15081
ld.logger.Infof("Failed to create fsnotify watcher on config file %q; watching for changes on 5s timer: %v", path, err) l.logger.Infof("Failed to create fsnotify watcher on config file %q; watching for changes on 5s timer: %v", path, err)
ticker := time.NewTicker(5 * time.Second) ticker := time.NewTicker(5 * time.Second)
defer ticker.Stop() defer ticker.Stop()
tickChan = ticker.C tickChan = ticker.C
} else { } else {
dir := filepath.Dir(path) dir := filepath.Dir(path)
file := filepath.Base(path) file := filepath.Base(path)
ld.logger.Infof("Watching directory %q for changes to config file %q", dir, file) l.logger.Infof("Watching directory %q for changes to config file %q", dir, file)
defer w.Close() defer w.Close()
if err := w.Add(dir); err != nil { if err := w.Add(dir); err != nil {
return fmt.Errorf("failed to add fsnotify watch: %w", err) return fmt.Errorf("failed to add fsnotify watch: %w", err)
@ -128,7 +128,7 @@ func (ld *configLoader) watchConfigFileChanges(ctx context.Context, path string)
if err != nil { if err != nil {
return fmt.Errorf("error reading config file %q: %w", path, err) return fmt.Errorf("error reading config file %q: %w", path, err)
} }
if err := ld.reloadConfig(ctx, b); err != nil { if err := l.reloadConfig(ctx, b); err != nil {
return fmt.Errorf("error loading initial config file %q: %w", path, err) return fmt.Errorf("error loading initial config file %q: %w", path, err)
} }
@ -163,14 +163,14 @@ func (ld *configLoader) watchConfigFileChanges(ctx context.Context, path string)
if len(b) == 0 { if len(b) == 0 {
continue continue
} }
if err := ld.reloadConfig(ctx, b); err != nil { if err := l.reloadConfig(ctx, b); err != nil {
return fmt.Errorf("error reloading config file %q: %v", path, err) return fmt.Errorf("error reloading config file %q: %v", path, err)
} }
} }
} }
func (ld *configLoader) watchConfigSecretChanges(ctx context.Context, secretNamespace, secretName string) error { func (l *configLoader) watchConfigSecretChanges(ctx context.Context, secretNamespace, secretName string) error {
secrets := ld.client.Secrets(secretNamespace) secrets := l.client.Secrets(secretNamespace)
w, err := secrets.Watch(ctx, metav1.ListOptions{ w, err := secrets.Watch(ctx, metav1.ListOptions{
TypeMeta: metav1.TypeMeta{ TypeMeta: metav1.TypeMeta{
Kind: "Secret", Kind: "Secret",
@ -198,11 +198,11 @@ func (ld *configLoader) watchConfigSecretChanges(ctx context.Context, secretName
return fmt.Errorf("failed to get config Secret %q: %w", secretName, err) return fmt.Errorf("failed to get config Secret %q: %w", secretName, err)
} }
if err := ld.configFromSecret(ctx, secret); err != nil { if err := l.configFromSecret(ctx, secret); err != nil {
return fmt.Errorf("error loading initial config: %w", err) return fmt.Errorf("error loading initial config: %w", err)
} }
ld.logger.Infof("Watching config Secret %q for changes", secretName) l.logger.Infof("Watching config Secret %q for changes", secretName)
for { for {
var secret *corev1.Secret var secret *corev1.Secret
select { select {
@ -237,7 +237,7 @@ func (ld *configLoader) watchConfigSecretChanges(ctx context.Context, secretName
if secret == nil || secret.Data == nil { if secret == nil || secret.Data == nil {
continue continue
} }
if err := ld.configFromSecret(ctx, secret); err != nil { if err := l.configFromSecret(ctx, secret); err != nil {
return fmt.Errorf("error reloading config Secret %q: %v", secret.Name, err) return fmt.Errorf("error reloading config Secret %q: %v", secret.Name, err)
} }
case watch.Error: case watch.Error:
@ -250,13 +250,13 @@ func (ld *configLoader) watchConfigSecretChanges(ctx context.Context, secretName
} }
} }
func (ld *configLoader) configFromSecret(ctx context.Context, s *corev1.Secret) error { func (l *configLoader) configFromSecret(ctx context.Context, s *corev1.Secret) error {
b := s.Data[kubetypes.KubeAPIServerConfigFile] b := s.Data[kubetypes.KubeAPIServerConfigFile]
if len(b) == 0 { if len(b) == 0 {
return fmt.Errorf("config Secret %q does not contain expected config in key %q", s.Name, kubetypes.KubeAPIServerConfigFile) return fmt.Errorf("config Secret %q does not contain expected config in key %q", s.Name, kubetypes.KubeAPIServerConfigFile)
} }
if err := ld.reloadConfig(ctx, b); err != nil { if err := l.reloadConfig(ctx, b); err != nil {
return err return err
} }

@ -125,15 +125,15 @@ func TestWatchConfig(t *testing.T) {
} }
} }
configChan := make(chan *conf.Config) configChan := make(chan *conf.Config)
loader := NewConfigLoader(zap.Must(zap.NewDevelopment()).Sugar(), cl.CoreV1(), configChan) l := NewConfigLoader(zap.Must(zap.NewDevelopment()).Sugar(), cl.CoreV1(), configChan)
loader.cfgIgnored = make(chan struct{}) l.cfgIgnored = make(chan struct{})
errs := make(chan error) errs := make(chan error)
ctx, cancel := context.WithCancel(t.Context()) ctx, cancel := context.WithCancel(t.Context())
defer cancel() defer cancel()
writeFile(t, tc.initialConfig) writeFile(t, tc.initialConfig)
go func() { go func() {
errs <- loader.WatchConfig(ctx, cfgPath) errs <- l.WatchConfig(ctx, cfgPath)
}() }()
for i, p := range tc.phases { for i, p := range tc.phases {
@ -159,7 +159,7 @@ func TestWatchConfig(t *testing.T) {
} else if !strings.Contains(err.Error(), p.expectedErr) { } else if !strings.Contains(err.Error(), p.expectedErr) {
t.Fatalf("expected error to contain %q, got %q", p.expectedErr, err.Error()) t.Fatalf("expected error to contain %q, got %q", p.expectedErr, err.Error())
} }
case <-loader.cfgIgnored: case <-l.cfgIgnored:
if p.expectedConf != nil { if p.expectedConf != nil {
t.Fatalf("expected config to be reloaded, but got ignored signal") t.Fatalf("expected config to be reloaded, but got ignored signal")
} }
@ -192,13 +192,13 @@ func TestWatchConfigSecret_Rewatches(t *testing.T) {
}) })
configChan := make(chan *conf.Config) configChan := make(chan *conf.Config)
loader := NewConfigLoader(zap.Must(zap.NewDevelopment()).Sugar(), cl.CoreV1(), configChan) l := NewConfigLoader(zap.Must(zap.NewDevelopment()).Sugar(), cl.CoreV1(), configChan)
mustCreateOrUpdate(t, cl, secretFrom(expected[0])) mustCreateOrUpdate(t, cl, secretFrom(expected[0]))
errs := make(chan error) errs := make(chan error)
go func() { go func() {
errs <- loader.watchConfigSecretChanges(t.Context(), "default", "config-secret") errs <- l.watchConfigSecretChanges(t.Context(), "default", "config-secret")
}() }()
for i := range 2 { for i := range 2 {
@ -212,7 +212,7 @@ func TestWatchConfigSecret_Rewatches(t *testing.T) {
} }
case err := <-errs: case err := <-errs:
t.Fatalf("unexpected error: %v", err) t.Fatalf("unexpected error: %v", err)
case <-loader.cfgIgnored: case <-l.cfgIgnored:
t.Fatalf("expected config to be reloaded, but got ignored signal") t.Fatalf("expected config to be reloaded, but got ignored signal")
case <-time.After(5 * time.Second): case <-time.After(5 * time.Second):
t.Fatalf("timed out waiting for expected event") t.Fatalf("timed out waiting for expected event")

@ -422,9 +422,9 @@ func (ipp *ConsensusIPPool) applyCheckoutAddr(nid tailcfg.NodeID, domain string,
} }
// Apply is part of the raft.FSM interface. It takes an incoming log entry and applies it to the state. // Apply is part of the raft.FSM interface. It takes an incoming log entry and applies it to the state.
func (ipp *ConsensusIPPool) Apply(lg *raft.Log) any { func (ipp *ConsensusIPPool) Apply(l *raft.Log) any {
var c tsconsensus.Command var c tsconsensus.Command
if err := json.Unmarshal(lg.Data, &c); err != nil { if err := json.Unmarshal(l.Data, &c); err != nil {
panic(fmt.Sprintf("failed to unmarshal command: %s", err.Error())) panic(fmt.Sprintf("failed to unmarshal command: %s", err.Error()))
} }
switch c.Name { switch c.Name {

@ -44,52 +44,25 @@ import (
"github.com/dsnet/try" "github.com/dsnet/try"
jsonv2 "github.com/go-json-experiment/json" jsonv2 "github.com/go-json-experiment/json"
"github.com/go-json-experiment/json/jsontext" "github.com/go-json-experiment/json/jsontext"
"tailscale.com/tailcfg"
"tailscale.com/types/bools"
"tailscale.com/types/logid" "tailscale.com/types/logid"
"tailscale.com/types/netlogtype" "tailscale.com/types/netlogtype"
"tailscale.com/util/must" "tailscale.com/util/must"
) )
var ( var (
resolveNames = flag.Bool("resolve-names", false, "This is equivalent to specifying \"--resolve-addrs=name\".") resolveNames = flag.Bool("resolve-names", false, "convert tailscale IP addresses to hostnames; must also specify --api-key and --tailnet-id")
resolveAddrs = flag.String("resolve-addrs", "", "Resolve each tailscale IP address as a node ID, name, or user.\n"+ apiKey = flag.String("api-key", "", "API key to query the Tailscale API with; see https://login.tailscale.com/admin/settings/keys")
"If network flow logs do not support embedded node information,\n"+ tailnetName = flag.String("tailnet-name", "", "tailnet domain name to lookup devices in; see https://login.tailscale.com/admin/settings/general")
"then --api-key and --tailnet-name must also be provided.\n"+
"Valid values include \"nodeId\", \"name\", or \"user\".")
apiKey = flag.String("api-key", "", "The API key to query the Tailscale API with.\nSee https://login.tailscale.com/admin/settings/keys")
tailnetName = flag.String("tailnet-name", "", "The Tailnet name to lookup nodes within.\nSee https://login.tailscale.com/admin/settings/general")
) )
var ( var namesByAddr map[netip.Addr]string
tailnetNodesByAddr map[netip.Addr]netlogtype.Node
tailnetNodesByID map[tailcfg.StableNodeID]netlogtype.Node
)
func main() { func main() {
flag.Parse() flag.Parse()
if *resolveNames { if *resolveNames {
*resolveAddrs = "name" namesByAddr = mustMakeNamesByAddr()
}
*resolveAddrs = strings.ToLower(*resolveAddrs) // make case-insensitive
*resolveAddrs = strings.TrimSuffix(*resolveAddrs, "s") // allow plural form
*resolveAddrs = strings.ReplaceAll(*resolveAddrs, " ", "") // ignore spaces
*resolveAddrs = strings.ReplaceAll(*resolveAddrs, "-", "") // ignore dashes
*resolveAddrs = strings.ReplaceAll(*resolveAddrs, "_", "") // ignore underscores
switch *resolveAddrs {
case "":
case "id", "nodeid":
*resolveAddrs = "nodeid"
case "name", "hostname":
*resolveAddrs = "name"
case "user", "tag", "usertag", "taguser":
*resolveAddrs = "user" // tag resolution is implied
default:
log.Fatalf("--resolve-addrs must be \"nodeId\", \"name\", or \"user\"")
} }
mustLoadTailnetNodes()
// The logic handles a stream of arbitrary JSON. // The logic handles a stream of arbitrary JSON.
// So long as a JSON object seems like a network log message, // So long as a JSON object seems like a network log message,
// then this will unmarshal and print it. // then this will unmarshal and print it.
@ -130,7 +103,7 @@ func processArray(dec *jsontext.Decoder) {
func processObject(dec *jsontext.Decoder) { func processObject(dec *jsontext.Decoder) {
var hasTraffic bool var hasTraffic bool
var rawMsg jsontext.Value var rawMsg []byte
try.E1(dec.ReadToken()) // parse '{' try.E1(dec.ReadToken()) // parse '{'
for dec.PeekKind() != '}' { for dec.PeekKind() != '}' {
// Capture any members that could belong to a network log message. // Capture any members that could belong to a network log message.
@ -138,13 +111,13 @@ func processObject(dec *jsontext.Decoder) {
case "virtualTraffic", "subnetTraffic", "exitTraffic", "physicalTraffic": case "virtualTraffic", "subnetTraffic", "exitTraffic", "physicalTraffic":
hasTraffic = true hasTraffic = true
fallthrough fallthrough
case "logtail", "nodeId", "logged", "srcNode", "dstNodes", "start", "end": case "logtail", "nodeId", "logged", "start", "end":
if len(rawMsg) == 0 { if len(rawMsg) == 0 {
rawMsg = append(rawMsg, '{') rawMsg = append(rawMsg, '{')
} else { } else {
rawMsg = append(rawMsg[:len(rawMsg)-1], ',') rawMsg = append(rawMsg[:len(rawMsg)-1], ',')
} }
rawMsg, _ = jsontext.AppendQuote(rawMsg, name.String()) rawMsg = append(append(append(rawMsg, '"'), name.String()...), '"')
rawMsg = append(rawMsg, ':') rawMsg = append(rawMsg, ':')
rawMsg = append(rawMsg, try.E1(dec.ReadValue())...) rawMsg = append(rawMsg, try.E1(dec.ReadValue())...)
rawMsg = append(rawMsg, '}') rawMsg = append(rawMsg, '}')
@ -172,32 +145,6 @@ type message struct {
} }
func printMessage(msg message) { func printMessage(msg message) {
var nodesByAddr map[netip.Addr]netlogtype.Node
var tailnetDNS string // e.g., ".acme-corp.ts.net"
if *resolveAddrs != "" {
nodesByAddr = make(map[netip.Addr]netlogtype.Node)
insertNode := func(node netlogtype.Node) {
for _, addr := range node.Addresses {
nodesByAddr[addr] = node
}
}
for _, node := range msg.DstNodes {
insertNode(node)
}
insertNode(msg.SrcNode)
// Derive the Tailnet DNS of the self node.
detectTailnetDNS := func(nodeName string) {
if prefix, ok := strings.CutSuffix(nodeName, ".ts.net"); ok {
if i := strings.LastIndexByte(prefix, '.'); i > 0 {
tailnetDNS = nodeName[i:]
}
}
}
detectTailnetDNS(msg.SrcNode.Name)
detectTailnetDNS(tailnetNodesByID[msg.NodeID].Name)
}
// Construct a table of network traffic per connection. // Construct a table of network traffic per connection.
rows := [][7]string{{3: "Tx[P/s]", 4: "Tx[B/s]", 5: "Rx[P/s]", 6: "Rx[B/s]"}} rows := [][7]string{{3: "Tx[P/s]", 4: "Tx[B/s]", 5: "Rx[P/s]", 6: "Rx[B/s]"}}
duration := msg.End.Sub(msg.Start) duration := msg.End.Sub(msg.Start)
@ -228,25 +175,16 @@ func printMessage(msg message) {
if !a.IsValid() { if !a.IsValid() {
return "" return ""
} }
name := a.Addr().String() if name, ok := namesByAddr[a.Addr()]; ok {
node, ok := tailnetNodesByAddr[a.Addr()] if a.Port() == 0 {
if !ok { return name
node, ok = nodesByAddr[a.Addr()]
}
if ok {
switch *resolveAddrs {
case "nodeid":
name = cmp.Or(string(node.NodeID), name)
case "name":
name = cmp.Or(strings.TrimSuffix(string(node.Name), tailnetDNS), name)
case "user":
name = cmp.Or(bools.IfElse(len(node.Tags) > 0, fmt.Sprint(node.Tags), node.User), name)
} }
}
if a.Port() != 0 {
return name + ":" + strconv.Itoa(int(a.Port())) return name + ":" + strconv.Itoa(int(a.Port()))
} }
return name if a.Port() == 0 {
return a.Addr().String()
}
return a.String()
} }
for _, cc := range traffic { for _, cc := range traffic {
row := [7]string{ row := [7]string{
@ -341,10 +279,8 @@ func printMessage(msg message) {
} }
} }
func mustLoadTailnetNodes() { func mustMakeNamesByAddr() map[netip.Addr]string {
switch { switch {
case *apiKey == "" && *tailnetName == "":
return // rely on embedded node information in the logs themselves
case *apiKey == "": case *apiKey == "":
log.Fatalf("--api-key must be specified with --resolve-names") log.Fatalf("--api-key must be specified with --resolve-names")
case *tailnetName == "": case *tailnetName == "":
@ -364,19 +300,57 @@ func mustLoadTailnetNodes() {
// Unmarshal the API response. // Unmarshal the API response.
var m struct { var m struct {
Devices []netlogtype.Node `json:"devices"` Devices []struct {
Name string `json:"name"`
Addrs []netip.Addr `json:"addresses"`
} `json:"devices"`
} }
must.Do(json.Unmarshal(b, &m)) must.Do(json.Unmarshal(b, &m))
// Construct a mapping of Tailscale IP addresses to node information. // Construct a unique mapping of Tailscale IP addresses to hostnames.
tailnetNodesByAddr = make(map[netip.Addr]netlogtype.Node) // For brevity, we start with the first segment of the name and
tailnetNodesByID = make(map[tailcfg.StableNodeID]netlogtype.Node) // use more segments until we find the shortest prefix that is unique
for _, node := range m.Devices { // for all names in the tailnet.
for _, addr := range node.Addresses { seen := make(map[string]bool)
tailnetNodesByAddr[addr] = node namesByAddr := make(map[netip.Addr]string)
retry:
for i := range 10 {
clear(seen)
clear(namesByAddr)
for _, d := range m.Devices {
name := fieldPrefix(d.Name, i)
if seen[name] {
continue retry
}
seen[name] = true
for _, a := range d.Addrs {
namesByAddr[a] = name
}
}
return namesByAddr
}
panic("unable to produce unique mapping of address to names")
}
// fieldPrefix returns the first n number of dot-separated segments.
//
// Example:
//
// fieldPrefix("foo.bar.baz", 0) returns ""
// fieldPrefix("foo.bar.baz", 1) returns "foo"
// fieldPrefix("foo.bar.baz", 2) returns "foo.bar"
// fieldPrefix("foo.bar.baz", 3) returns "foo.bar.baz"
// fieldPrefix("foo.bar.baz", 4) returns "foo.bar.baz"
func fieldPrefix(s string, n int) string {
s0 := s
for i := 0; i < n && len(s) > 0; i++ {
if j := strings.IndexByte(s, '.'); j >= 0 {
s = s[j+1:]
} else {
s = ""
} }
tailnetNodesByID[node.NodeID] = node
} }
return strings.TrimSuffix(s0[:len(s0)-len(s)], ".")
} }
func appendRepeatByte(b []byte, c byte, n int) []byte { func appendRepeatByte(b []byte, c byte, n int) []byte {

@ -141,7 +141,7 @@ func run(ctx context.Context, ts *tsnet.Server, wgPort int, hostname string, pro
// in the netmap. // in the netmap.
// We set the NotifyInitialNetMap flag so we will always get woken with the // We set the NotifyInitialNetMap flag so we will always get woken with the
// current netmap, before only being woken on changes. // current netmap, before only being woken on changes.
bus, err := lc.WatchIPNBus(ctx, ipn.NotifyWatchEngineUpdates|ipn.NotifyInitialNetMap) bus, err := lc.WatchIPNBus(ctx, ipn.NotifyWatchEngineUpdates|ipn.NotifyInitialNetMap|ipn.NotifyNoPrivateKeys)
if err != nil { if err != nil {
log.Fatalf("watching IPN bus: %v", err) log.Fatalf("watching IPN bus: %v", err)
} }

@ -152,17 +152,17 @@ func TestSNIProxyWithNetmapConfig(t *testing.T) {
configCapKey: []tailcfg.RawMessage{tailcfg.RawMessage(b)}, configCapKey: []tailcfg.RawMessage{tailcfg.RawMessage(b)},
}) })
// Let's spin up a second node (to represent the client). // Lets spin up a second node (to represent the client).
client, _, _ := startNode(t, ctx, controlURL, "client") client, _, _ := startNode(t, ctx, controlURL, "client")
// Make sure that the sni node has received its config. // Make sure that the sni node has received its config.
lc, err := sni.LocalClient() l, err := sni.LocalClient()
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
gotConfigured := false gotConfigured := false
for range 100 { for range 100 {
s, err := lc.StatusWithoutPeers(ctx) s, err := l.StatusWithoutPeers(ctx)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -176,7 +176,7 @@ func TestSNIProxyWithNetmapConfig(t *testing.T) {
t.Error("sni node never received its configuration from the coordination server!") t.Error("sni node never received its configuration from the coordination server!")
} }
// Let's make the client open a connection to the sniproxy node, and // Lets make the client open a connection to the sniproxy node, and
// make sure it results in a connection to our test listener. // make sure it results in a connection to our test listener.
w, err := client.Dial(ctx, "tcp", fmt.Sprintf("%s:%d", ip, ln.Addr().(*net.TCPAddr).Port)) w, err := client.Dial(ctx, "tcp", fmt.Sprintf("%s:%d", ip, ln.Addr().(*net.TCPAddr).Port))
if err != nil { if err != nil {
@ -208,10 +208,10 @@ func TestSNIProxyWithFlagConfig(t *testing.T) {
sni, _, ip := startNode(t, ctx, controlURL, "snitest") sni, _, ip := startNode(t, ctx, controlURL, "snitest")
go run(ctx, sni, 0, sni.Hostname, false, 0, "", fmt.Sprintf("tcp/%d/localhost", ln.Addr().(*net.TCPAddr).Port)) go run(ctx, sni, 0, sni.Hostname, false, 0, "", fmt.Sprintf("tcp/%d/localhost", ln.Addr().(*net.TCPAddr).Port))
// Let's spin up a second node (to represent the client). // Lets spin up a second node (to represent the client).
client, _, _ := startNode(t, ctx, controlURL, "client") client, _, _ := startNode(t, ctx, controlURL, "client")
// Let's make the client open a connection to the sniproxy node, and // Lets make the client open a connection to the sniproxy node, and
// make sure it results in a connection to our test listener. // make sure it results in a connection to our test listener.
w, err := client.Dial(ctx, "tcp", fmt.Sprintf("%s:%d", ip, ln.Addr().(*net.TCPAddr).Port)) w, err := client.Dial(ctx, "tcp", fmt.Sprintf("%s:%d", ip, ln.Addr().(*net.TCPAddr).Port))
if err != nil { if err != nil {

@ -14,9 +14,9 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar
github.com/prometheus/client_model/go from github.com/prometheus/client_golang/prometheus+ github.com/prometheus/client_model/go from github.com/prometheus/client_golang/prometheus+
github.com/prometheus/common/expfmt from github.com/prometheus/client_golang/prometheus+ github.com/prometheus/common/expfmt from github.com/prometheus/client_golang/prometheus+
github.com/prometheus/common/model from github.com/prometheus/client_golang/prometheus+ github.com/prometheus/common/model from github.com/prometheus/client_golang/prometheus+
L github.com/prometheus/procfs from github.com/prometheus/client_golang/prometheus LD github.com/prometheus/procfs from github.com/prometheus/client_golang/prometheus
L github.com/prometheus/procfs/internal/fs from github.com/prometheus/procfs LD github.com/prometheus/procfs/internal/fs from github.com/prometheus/procfs
L github.com/prometheus/procfs/internal/util from github.com/prometheus/procfs LD github.com/prometheus/procfs/internal/util from github.com/prometheus/procfs
💣 go4.org/mem from tailscale.com/metrics+ 💣 go4.org/mem from tailscale.com/metrics+
go4.org/netipx from tailscale.com/net/tsaddr go4.org/netipx from tailscale.com/net/tsaddr
google.golang.org/protobuf/encoding/protodelim from github.com/prometheus/common/expfmt google.golang.org/protobuf/encoding/protodelim from github.com/prometheus/common/expfmt
@ -47,7 +47,7 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar
google.golang.org/protobuf/reflect/protoregistry from google.golang.org/protobuf/encoding/prototext+ google.golang.org/protobuf/reflect/protoregistry from google.golang.org/protobuf/encoding/prototext+
google.golang.org/protobuf/runtime/protoiface from google.golang.org/protobuf/internal/impl+ google.golang.org/protobuf/runtime/protoiface from google.golang.org/protobuf/internal/impl+
google.golang.org/protobuf/runtime/protoimpl from github.com/prometheus/client_model/go+ google.golang.org/protobuf/runtime/protoimpl from github.com/prometheus/client_model/go+
💣 google.golang.org/protobuf/types/known/timestamppb from github.com/prometheus/client_golang/prometheus+ google.golang.org/protobuf/types/known/timestamppb from github.com/prometheus/client_golang/prometheus+
tailscale.com from tailscale.com/version tailscale.com from tailscale.com/version
tailscale.com/envknob from tailscale.com/tsweb+ tailscale.com/envknob from tailscale.com/tsweb+
tailscale.com/feature from tailscale.com/tsweb tailscale.com/feature from tailscale.com/tsweb
@ -82,9 +82,8 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar
tailscale.com/util/mak from tailscale.com/syncs+ tailscale.com/util/mak from tailscale.com/syncs+
tailscale.com/util/nocasemaps from tailscale.com/types/ipproto tailscale.com/util/nocasemaps from tailscale.com/types/ipproto
tailscale.com/util/rands from tailscale.com/tsweb tailscale.com/util/rands from tailscale.com/tsweb
tailscale.com/util/set from tailscale.com/types/key
tailscale.com/util/slicesx from tailscale.com/tailcfg tailscale.com/util/slicesx from tailscale.com/tailcfg
tailscale.com/util/testenv from tailscale.com/types/logger+ tailscale.com/util/testenv from tailscale.com/types/logger
tailscale.com/util/vizerror from tailscale.com/tailcfg+ tailscale.com/util/vizerror from tailscale.com/tailcfg+
tailscale.com/version from tailscale.com/envknob+ tailscale.com/version from tailscale.com/envknob+
tailscale.com/version/distro from tailscale.com/envknob tailscale.com/version/distro from tailscale.com/envknob
@ -95,7 +94,7 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar
golang.org/x/crypto/nacl/box from tailscale.com/types/key golang.org/x/crypto/nacl/box from tailscale.com/types/key
golang.org/x/crypto/nacl/secretbox from golang.org/x/crypto/nacl/box golang.org/x/crypto/nacl/secretbox from golang.org/x/crypto/nacl/box
golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+
golang.org/x/exp/constraints from tailscale.com/tsweb/varz+ golang.org/x/exp/constraints from tailscale.com/tsweb/varz
golang.org/x/sys/cpu from golang.org/x/crypto/blake2b+ golang.org/x/sys/cpu from golang.org/x/crypto/blake2b+
LD golang.org/x/sys/unix from github.com/prometheus/procfs+ LD golang.org/x/sys/unix from github.com/prometheus/procfs+
W golang.org/x/sys/windows from github.com/prometheus/client_golang/prometheus W golang.org/x/sys/windows from github.com/prometheus/client_golang/prometheus

@ -135,18 +135,18 @@ type lportsPool struct {
ports []int ports []int
} }
func (pl *lportsPool) get() int { func (l *lportsPool) get() int {
pl.Lock() l.Lock()
defer pl.Unlock() defer l.Unlock()
ret := pl.ports[0] ret := l.ports[0]
pl.ports = append(pl.ports[:0], pl.ports[1:]...) l.ports = append(l.ports[:0], l.ports[1:]...)
return ret return ret
} }
func (pl *lportsPool) put(i int) { func (l *lportsPool) put(i int) {
pl.Lock() l.Lock()
defer pl.Unlock() defer l.Unlock()
pl.ports = append(pl.ports, int(i)) l.ports = append(l.ports, int(i))
} }
var ( var (
@ -173,19 +173,19 @@ func init() {
// measure dial time. // measure dial time.
type lportForTCPConn int type lportForTCPConn int
func (lp *lportForTCPConn) Close() error { func (l *lportForTCPConn) Close() error {
if *lp == 0 { if *l == 0 {
return nil return nil
} }
lports.put(int(*lp)) lports.put(int(*l))
return nil return nil
} }
func (lp *lportForTCPConn) Write([]byte) (int, error) { func (l *lportForTCPConn) Write([]byte) (int, error) {
return 0, errors.New("unimplemented") return 0, errors.New("unimplemented")
} }
func (lp *lportForTCPConn) Read([]byte) (int, error) { func (l *lportForTCPConn) Read([]byte) (int, error) {
return 0, errors.New("unimplemented") return 0, errors.New("unimplemented")
} }

@ -65,9 +65,9 @@ func main() {
} }
add, remove := diffTags(stags, dtags) add, remove := diffTags(stags, dtags)
if ln := len(add); ln > 0 { if l := len(add); l > 0 {
log.Printf("%d tags to push: %s", len(add), strings.Join(add, ", ")) log.Printf("%d tags to push: %s", len(add), strings.Join(add, ", "))
if *max > 0 && ln > *max { if *max > 0 && l > *max {
log.Printf("Limiting sync to %d tags", *max) log.Printf("Limiting sync to %d tags", *max)
add = add[:*max] add = add[:*max]
} }

@ -174,7 +174,6 @@ func TestCheckForAccidentalSettingReverts(t *testing.T) {
curUser string // os.Getenv("USER") on the client side curUser string // os.Getenv("USER") on the client side
goos string // empty means "linux" goos string // empty means "linux"
distro distro.Distro distro distro.Distro
backendState string // empty means "Running"
want string want string
}{ }{
@ -189,28 +188,6 @@ func TestCheckForAccidentalSettingReverts(t *testing.T) {
}, },
want: "", want: "",
}, },
{
name: "bare_up_needs_login_default_prefs",
flags: []string{},
curPrefs: ipn.NewPrefs(),
backendState: ipn.NeedsLogin.String(),
want: "",
},
{
name: "bare_up_needs_login_losing_prefs",
flags: []string{},
curPrefs: &ipn.Prefs{
// defaults:
ControlURL: ipn.DefaultControlURL,
WantRunning: false,
NetfilterMode: preftype.NetfilterOn,
NoStatefulFiltering: opt.NewBool(true),
// non-default:
CorpDNS: false,
},
backendState: ipn.NeedsLogin.String(),
want: accidentalUpPrefix + " --accept-dns=false",
},
{ {
name: "losing_hostname", name: "losing_hostname",
flags: []string{"--accept-dns"}, flags: []string{"--accept-dns"},
@ -643,13 +620,9 @@ func TestCheckForAccidentalSettingReverts(t *testing.T) {
} }
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
goos := stdcmp.Or(tt.goos, "linux") goos := "linux"
backendState := stdcmp.Or(tt.backendState, ipn.Running.String()) if tt.goos != "" {
// Needs to match the other conditions in checkForAccidentalSettingReverts goos = tt.goos
tt.curPrefs.Persist = &persist.Persist{
UserProfile: tailcfg.UserProfile{
LoginName: "janet",
},
} }
var upArgs upArgsT var upArgs upArgsT
flagSet := newUpFlagSet(goos, &upArgs, "up") flagSet := newUpFlagSet(goos, &upArgs, "up")
@ -665,11 +638,10 @@ func TestCheckForAccidentalSettingReverts(t *testing.T) {
curExitNodeIP: tt.curExitNodeIP, curExitNodeIP: tt.curExitNodeIP,
distro: tt.distro, distro: tt.distro,
user: tt.curUser, user: tt.curUser,
backendState: backendState,
} }
applyImplicitPrefs(newPrefs, tt.curPrefs, upEnv) applyImplicitPrefs(newPrefs, tt.curPrefs, upEnv)
var got string var got string
if _, err := checkForAccidentalSettingReverts(newPrefs, tt.curPrefs, upEnv); err != nil { if err := checkForAccidentalSettingReverts(newPrefs, tt.curPrefs, upEnv); err != nil {
got = err.Error() got = err.Error()
} }
if strings.TrimSpace(got) != tt.want { if strings.TrimSpace(got) != tt.want {
@ -1039,10 +1011,13 @@ func TestUpdatePrefs(t *testing.T) {
wantErrSubtr string wantErrSubtr string
}{ }{
{ {
name: "bare_up_means_up", name: "bare_up_means_up",
flags: []string{}, flags: []string{},
curPrefs: ipn.NewPrefs(), curPrefs: &ipn.Prefs{
wantSimpleUp: false, // user profile not set, so no simple up ControlURL: ipn.DefaultControlURL,
WantRunning: false,
Hostname: "foo",
},
}, },
{ {
name: "just_up", name: "just_up",
@ -1056,32 +1031,6 @@ func TestUpdatePrefs(t *testing.T) {
}, },
wantSimpleUp: true, wantSimpleUp: true,
}, },
{
name: "just_up_needs_login_default_prefs",
flags: []string{},
curPrefs: ipn.NewPrefs(),
env: upCheckEnv{
backendState: "NeedsLogin",
},
wantSimpleUp: false,
},
{
name: "just_up_needs_login_losing_prefs",
flags: []string{},
curPrefs: &ipn.Prefs{
// defaults:
ControlURL: ipn.DefaultControlURL,
WantRunning: false,
NetfilterMode: preftype.NetfilterOn,
// non-default:
CorpDNS: false,
},
env: upCheckEnv{
backendState: "NeedsLogin",
},
wantSimpleUp: false,
wantErrSubtr: "tailscale up --accept-dns=false",
},
{ {
name: "just_edit", name: "just_edit",
flags: []string{}, flags: []string{},

@ -48,12 +48,9 @@ func runConfigureJetKVM(ctx context.Context, args []string) error {
if runtime.GOOS != "linux" || distro.Get() != distro.JetKVM { if runtime.GOOS != "linux" || distro.Get() != distro.JetKVM {
return errors.New("only implemented on JetKVM") return errors.New("only implemented on JetKVM")
} }
if err := os.MkdirAll("/userdata/init.d", 0755); err != nil { err := os.WriteFile("/etc/init.d/S22tailscale", bytes.TrimLeft([]byte(`
return errors.New("unable to create /userdata/init.d")
}
err := os.WriteFile("/userdata/init.d/S22tailscale", bytes.TrimLeft([]byte(`
#!/bin/sh #!/bin/sh
# /userdata/init.d/S22tailscale # /etc/init.d/S22tailscale
# Start/stop tailscaled # Start/stop tailscaled
case "$1" in case "$1" in

@ -182,12 +182,6 @@ func debugCmd() *ffcli.Command {
Exec: localAPIAction("rebind"), Exec: localAPIAction("rebind"),
ShortHelp: "Force a magicsock rebind", ShortHelp: "Force a magicsock rebind",
}, },
{
Name: "rotate-disco-key",
ShortUsage: "tailscale debug rotate-disco-key",
Exec: localAPIAction("rotate-disco-key"),
ShortHelp: "Rotate the discovery key",
},
{ {
Name: "derp-set-on-demand", Name: "derp-set-on-demand",
ShortUsage: "tailscale debug derp-set-on-demand", ShortUsage: "tailscale debug derp-set-on-demand",
@ -263,7 +257,8 @@ func debugCmd() *ffcli.Command {
fs := newFlagSet("watch-ipn") fs := newFlagSet("watch-ipn")
fs.BoolVar(&watchIPNArgs.netmap, "netmap", true, "include netmap in messages") fs.BoolVar(&watchIPNArgs.netmap, "netmap", true, "include netmap in messages")
fs.BoolVar(&watchIPNArgs.initial, "initial", false, "include initial status") fs.BoolVar(&watchIPNArgs.initial, "initial", false, "include initial status")
fs.BoolVar(&watchIPNArgs.rateLimit, "rate-limit", true, "rate limit messages") fs.BoolVar(&watchIPNArgs.rateLimit, "rate-limit", true, "rate limit messags")
fs.BoolVar(&watchIPNArgs.showPrivateKey, "show-private-key", false, "include node private key in printed netmap")
fs.IntVar(&watchIPNArgs.count, "count", 0, "exit after printing this many statuses, or 0 to keep going forever") fs.IntVar(&watchIPNArgs.count, "count", 0, "exit after printing this many statuses, or 0 to keep going forever")
return fs return fs
})(), })(),
@ -275,6 +270,7 @@ func debugCmd() *ffcli.Command {
ShortHelp: "Print the current network map", ShortHelp: "Print the current network map",
FlagSet: (func() *flag.FlagSet { FlagSet: (func() *flag.FlagSet {
fs := newFlagSet("netmap") fs := newFlagSet("netmap")
fs.BoolVar(&netmapArgs.showPrivateKey, "show-private-key", false, "include node private key in printed netmap")
return fs return fs
})(), })(),
}, },
@ -618,10 +614,11 @@ func runPrefs(ctx context.Context, args []string) error {
} }
var watchIPNArgs struct { var watchIPNArgs struct {
netmap bool netmap bool
initial bool initial bool
rateLimit bool showPrivateKey bool
count int rateLimit bool
count int
} }
func runWatchIPN(ctx context.Context, args []string) error { func runWatchIPN(ctx context.Context, args []string) error {
@ -629,6 +626,9 @@ func runWatchIPN(ctx context.Context, args []string) error {
if watchIPNArgs.initial { if watchIPNArgs.initial {
mask = ipn.NotifyInitialState | ipn.NotifyInitialPrefs | ipn.NotifyInitialNetMap mask = ipn.NotifyInitialState | ipn.NotifyInitialPrefs | ipn.NotifyInitialNetMap
} }
if !watchIPNArgs.showPrivateKey {
mask |= ipn.NotifyNoPrivateKeys
}
if watchIPNArgs.rateLimit { if watchIPNArgs.rateLimit {
mask |= ipn.NotifyRateLimit mask |= ipn.NotifyRateLimit
} }
@ -652,11 +652,18 @@ func runWatchIPN(ctx context.Context, args []string) error {
return nil return nil
} }
var netmapArgs struct {
showPrivateKey bool
}
func runNetmap(ctx context.Context, args []string) error { func runNetmap(ctx context.Context, args []string) error {
ctx, cancel := context.WithTimeout(ctx, 5*time.Second) ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel() defer cancel()
var mask ipn.NotifyWatchOpt = ipn.NotifyInitialNetMap var mask ipn.NotifyWatchOpt = ipn.NotifyInitialNetMap
if !netmapArgs.showPrivateKey {
mask |= ipn.NotifyNoPrivateKeys
}
watcher, err := localClient.WatchIPNBus(ctx, mask) watcher, err := localClient.WatchIPNBus(ctx, mask)
if err != nil { if err != nil {
return err return err

@ -1,84 +0,0 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Package jsonoutput provides stable and versioned JSON serialisation for CLI output.
// This allows us to provide stable output to scripts/clients, but also make
// breaking changes to the output when it's useful.
//
// Historically we only used `--json` as a boolean flag, so changing the output
// could break scripts that rely on the existing format.
//
// This package allows callers to pass a version number to `--json` and get
// a consistent output. We'll bump the version when we make a breaking change
// that's likely to break scripts that rely on the existing output, e.g. if
// we remove a field or change the type/format.
//
// Passing just the boolean flag `--json` will always return v1, to preserve
// compatibility with scripts written before we versioned our output.
package jsonoutput
import (
"errors"
"fmt"
"strconv"
)
// JSONSchemaVersion implements flag.Value, and tracks whether the CLI has
// been called with `--json`, and if so, with what value.
type JSONSchemaVersion struct {
// IsSet tracks if the flag was provided at all.
IsSet bool
// Value tracks the desired schema version, which defaults to 1 if
// the user passes `--json` without an argument.
Value int
}
// String returns the default value which is printed in the CLI help text.
func (v *JSONSchemaVersion) String() string {
if v.IsSet {
return strconv.Itoa(v.Value)
} else {
return "(not set)"
}
}
// Set is called when the user passes the flag as a command-line argument.
func (v *JSONSchemaVersion) Set(s string) error {
if v.IsSet {
return errors.New("received multiple instances of --json; only pass it once")
}
v.IsSet = true
// If the user doesn't supply a schema version, default to 1.
// This ensures that any existing scripts will continue to get their
// current output.
if s == "true" {
v.Value = 1
return nil
}
version, err := strconv.Atoi(s)
if err != nil {
return fmt.Errorf("invalid integer value passed to --json: %q", s)
}
v.Value = version
return nil
}
// IsBoolFlag tells the flag package that JSONSchemaVersion can be set
// without an argument.
func (v *JSONSchemaVersion) IsBoolFlag() bool {
return true
}
// ResponseEnvelope is a set of fields common to all versioned JSON output.
type ResponseEnvelope struct {
// SchemaVersion is the version of the JSON output, e.g. "1", "2", "3"
SchemaVersion string
// ResponseWarning tells a user if a newer version of the JSON output
// is available.
ResponseWarning string `json:"_WARNING,omitzero"`
}

@ -1,203 +0,0 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package jsonoutput
import (
"bytes"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"tailscale.com/ipn/ipnstate"
"tailscale.com/tka"
)
// PrintNetworkLockJSONV1 prints the stored TKA state as a JSON object to the CLI,
// in a stable "v1" format.
//
// This format includes:
//
// - the AUM hash as a base32-encoded string
// - the raw AUM as base64-encoded bytes
// - the expanded AUM, which prints named fields for consumption by other tools
func PrintNetworkLockJSONV1(out io.Writer, updates []ipnstate.NetworkLockUpdate) error {
messages := make([]logMessageV1, len(updates))
for i, update := range updates {
var aum tka.AUM
if err := aum.Unserialize(update.Raw); err != nil {
return fmt.Errorf("decoding: %w", err)
}
h := aum.Hash()
if !bytes.Equal(h[:], update.Hash[:]) {
return fmt.Errorf("incorrect AUM hash: got %v, want %v", h, update)
}
messages[i] = toLogMessageV1(aum, update)
}
result := struct {
ResponseEnvelope
Messages []logMessageV1
}{
ResponseEnvelope: ResponseEnvelope{
SchemaVersion: "1",
},
Messages: messages,
}
enc := json.NewEncoder(out)
enc.SetIndent("", " ")
return enc.Encode(result)
}
// toLogMessageV1 converts a [tka.AUM] and [ipnstate.NetworkLockUpdate] to the
// JSON output returned by the CLI.
func toLogMessageV1(aum tka.AUM, update ipnstate.NetworkLockUpdate) logMessageV1 {
expandedAUM := expandedAUMV1{}
expandedAUM.MessageKind = aum.MessageKind.String()
if len(aum.PrevAUMHash) > 0 {
expandedAUM.PrevAUMHash = aum.PrevAUMHash.String()
}
if key := aum.Key; key != nil {
expandedAUM.Key = toExpandedKeyV1(key)
}
if keyID := aum.KeyID; keyID != nil {
expandedAUM.KeyID = fmt.Sprintf("tlpub:%x", keyID)
}
if state := aum.State; state != nil {
expandedState := expandedStateV1{}
if h := state.LastAUMHash; h != nil {
expandedState.LastAUMHash = h.String()
}
for _, secret := range state.DisablementSecrets {
expandedState.DisablementSecrets = append(expandedState.DisablementSecrets, fmt.Sprintf("%x", secret))
}
for _, key := range state.Keys {
expandedState.Keys = append(expandedState.Keys, toExpandedKeyV1(&key))
}
expandedState.StateID1 = state.StateID1
expandedState.StateID2 = state.StateID2
expandedAUM.State = expandedState
}
if votes := aum.Votes; votes != nil {
expandedAUM.Votes = *votes
}
expandedAUM.Meta = aum.Meta
for _, signature := range aum.Signatures {
expandedAUM.Signatures = append(expandedAUM.Signatures, expandedSignatureV1{
KeyID: fmt.Sprintf("tlpub:%x", signature.KeyID),
Signature: base64.URLEncoding.EncodeToString(signature.Signature),
})
}
return logMessageV1{
Hash: aum.Hash().String(),
AUM: expandedAUM,
Raw: base64.URLEncoding.EncodeToString(update.Raw),
}
}
// toExpandedKeyV1 converts a [tka.Key] to the JSON output returned
// by the CLI.
func toExpandedKeyV1(key *tka.Key) expandedKeyV1 {
return expandedKeyV1{
Kind: key.Kind.String(),
Votes: key.Votes,
Public: fmt.Sprintf("tlpub:%x", key.Public),
Meta: key.Meta,
}
}
// logMessageV1 is the JSON representation of an AUM as both raw bytes and
// in its expanded form, and the CLI output is a list of these entries.
type logMessageV1 struct {
// The BLAKE2s digest of the CBOR-encoded AUM. This is printed as a
// base32-encoded string, e.g. KCE…XZQ
Hash string
// The expanded form of the AUM, which presents the fields in a more
// accessible format than doing a CBOR decoding.
AUM expandedAUMV1
// The raw bytes of the CBOR-encoded AUM, encoded as base64.
// This is useful for verifying the AUM hash.
Raw string
}
// expandedAUMV1 is the expanded version of a [tka.AUM], designed so external tools
// can read the AUM without knowing our CBOR definitions.
type expandedAUMV1 struct {
MessageKind string
PrevAUMHash string `json:"PrevAUMHash,omitzero"`
// Key encodes a public key to be added to the key authority.
// This field is used for AddKey AUMs.
Key expandedKeyV1 `json:"Key,omitzero"`
// KeyID references a public key which is part of the key authority.
// This field is used for RemoveKey and UpdateKey AUMs.
KeyID string `json:"KeyID,omitzero"`
// State describes the full state of the key authority.
// This field is used for Checkpoint AUMs.
State expandedStateV1 `json:"State,omitzero"`
// Votes and Meta describe properties of a key in the key authority.
// These fields are used for UpdateKey AUMs.
Votes uint `json:"Votes,omitzero"`
Meta map[string]string `json:"Meta,omitzero"`
// Signatures lists the signatures over this AUM.
Signatures []expandedSignatureV1 `json:"Signatures,omitzero"`
}
// expandedAUMV1 is the expanded version of a [tka.Key], which describes
// the public components of a key known to network-lock.
type expandedKeyV1 struct {
Kind string
// Votes describes the weight applied to signatures using this key.
Votes uint
// Public encodes the public key of the key as a hex string.
Public string
// Meta describes arbitrary metadata about the key. This could be
// used to store the name of the key, for instance.
Meta map[string]string `json:"Meta,omitzero"`
}
// expandedStateV1 is the expanded version of a [tka.State], which describes
// Tailnet Key Authority state at an instant in time.
type expandedStateV1 struct {
// LastAUMHash is the blake2s digest of the last-applied AUM.
LastAUMHash string `json:"LastAUMHash,omitzero"`
// DisablementSecrets are KDF-derived values which can be used
// to turn off the TKA in the event of a consensus-breaking bug.
DisablementSecrets []string
// Keys are the public keys of either:
//
// 1. The signing nodes currently trusted by the TKA.
// 2. Ephemeral keys that were used to generate pre-signed auth keys.
Keys []expandedKeyV1
// StateID's are nonce's, generated on enablement and fixed for
// the lifetime of the Tailnet Key Authority.
StateID1 uint64
StateID2 uint64
}
// expandedSignatureV1 is the expanded form of a [tka.Signature], which
// describes a signature over an AUM. This signature can be verified
// using the key referenced by KeyID.
type expandedSignatureV1 struct {
KeyID string
Signature string
}

@ -180,11 +180,7 @@ func printReport(dm *tailcfg.DERPMap, report *netcheck.Report) error {
printf("\t* Nearest DERP: unknown (no response to latency probes)\n") printf("\t* Nearest DERP: unknown (no response to latency probes)\n")
} else { } else {
if report.PreferredDERP != 0 { if report.PreferredDERP != 0 {
if region, ok := dm.Regions[report.PreferredDERP]; ok { printf("\t* Nearest DERP: %v\n", dm.Regions[report.PreferredDERP].RegionName)
printf("\t* Nearest DERP: %v\n", region.RegionName)
} else {
printf("\t* Nearest DERP: %v (region not found in map)\n", report.PreferredDERP)
}
} else { } else {
printf("\t* Nearest DERP: [none]\n") printf("\t* Nearest DERP: [none]\n")
} }

@ -10,11 +10,10 @@ import (
"context" "context"
"crypto/rand" "crypto/rand"
"encoding/hex" "encoding/hex"
jsonv1 "encoding/json" "encoding/json"
"errors" "errors"
"flag" "flag"
"fmt" "fmt"
"io"
"os" "os"
"strconv" "strconv"
"strings" "strings"
@ -22,7 +21,6 @@ import (
"github.com/mattn/go-isatty" "github.com/mattn/go-isatty"
"github.com/peterbourgon/ff/v3/ffcli" "github.com/peterbourgon/ff/v3/ffcli"
"tailscale.com/cmd/tailscale/cli/jsonoutput"
"tailscale.com/ipn/ipnstate" "tailscale.com/ipn/ipnstate"
"tailscale.com/tka" "tailscale.com/tka"
"tailscale.com/tsconst" "tailscale.com/tsconst"
@ -221,7 +219,7 @@ func runNetworkLockStatus(ctx context.Context, args []string) error {
} }
if nlStatusArgs.json { if nlStatusArgs.json {
enc := jsonv1.NewEncoder(os.Stdout) enc := json.NewEncoder(os.Stdout)
enc.SetIndent("", " ") enc.SetIndent("", " ")
return enc.Encode(st) return enc.Encode(st)
} }
@ -602,7 +600,7 @@ func runNetworkLockDisablementKDF(ctx context.Context, args []string) error {
var nlLogArgs struct { var nlLogArgs struct {
limit int limit int
json jsonoutput.JSONSchemaVersion json bool
} }
var nlLogCmd = &ffcli.Command{ var nlLogCmd = &ffcli.Command{
@ -614,7 +612,7 @@ var nlLogCmd = &ffcli.Command{
FlagSet: (func() *flag.FlagSet { FlagSet: (func() *flag.FlagSet {
fs := newFlagSet("lock log") fs := newFlagSet("lock log")
fs.IntVar(&nlLogArgs.limit, "limit", 50, "max number of updates to list") fs.IntVar(&nlLogArgs.limit, "limit", 50, "max number of updates to list")
fs.Var(&nlLogArgs.json, "json", "output in JSON format") fs.BoolVar(&nlLogArgs.json, "json", false, "output in JSON format (WARNING: format subject to change)")
return fs return fs
})(), })(),
} }
@ -680,7 +678,7 @@ func nlDescribeUpdate(update ipnstate.NetworkLockUpdate, color bool) (string, er
default: default:
// Print a JSON encoding of the AUM as a fallback. // Print a JSON encoding of the AUM as a fallback.
e := jsonv1.NewEncoder(&stanza) e := json.NewEncoder(&stanza)
e.SetIndent("", "\t") e.SetIndent("", "\t")
if err := e.Encode(aum); err != nil { if err := e.Encode(aum); err != nil {
return "", err return "", err
@ -704,21 +702,14 @@ func runNetworkLockLog(ctx context.Context, args []string) error {
if err != nil { if err != nil {
return fixTailscaledConnectError(err) return fixTailscaledConnectError(err)
} }
if nlLogArgs.json {
enc := json.NewEncoder(Stdout)
enc.SetIndent("", " ")
return enc.Encode(updates)
}
out, useColor := colorableOutput() out, useColor := colorableOutput()
return printNetworkLockLog(updates, out, nlLogArgs.json, useColor)
}
func printNetworkLockLog(updates []ipnstate.NetworkLockUpdate, out io.Writer, jsonSchema jsonoutput.JSONSchemaVersion, useColor bool) error {
if jsonSchema.IsSet {
if jsonSchema.Value == 1 {
return jsonoutput.PrintNetworkLockJSONV1(out, updates)
} else {
return fmt.Errorf("unrecognised version: %q", jsonSchema.Value)
}
}
for _, update := range updates { for _, update := range updates {
stanza, err := nlDescribeUpdate(update, useColor) stanza, err := nlDescribeUpdate(update, useColor)
if err != nil { if err != nil {

@ -1,204 +0,0 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package cli
import (
"bytes"
"testing"
"github.com/google/go-cmp/cmp"
"tailscale.com/cmd/tailscale/cli/jsonoutput"
"tailscale.com/ipn/ipnstate"
"tailscale.com/tka"
"tailscale.com/types/tkatype"
)
func TestNetworkLockLogOutput(t *testing.T) {
votes := uint(1)
aum1 := tka.AUM{
MessageKind: tka.AUMAddKey,
Key: &tka.Key{
Kind: tka.Key25519,
Votes: 1,
Public: []byte{2, 2},
},
}
h1 := aum1.Hash()
aum2 := tka.AUM{
MessageKind: tka.AUMRemoveKey,
KeyID: []byte{3, 3},
PrevAUMHash: h1[:],
Signatures: []tkatype.Signature{
{
KeyID: []byte{3, 4},
Signature: []byte{4, 5},
},
},
Meta: map[string]string{"en": "three", "de": "drei", "es": "tres"},
}
h2 := aum2.Hash()
aum3 := tka.AUM{
MessageKind: tka.AUMCheckpoint,
PrevAUMHash: h2[:],
State: &tka.State{
Keys: []tka.Key{
{
Kind: tka.Key25519,
Votes: 1,
Public: []byte{1, 1},
Meta: map[string]string{"en": "one", "de": "eins", "es": "uno"},
},
},
DisablementSecrets: [][]byte{
{1, 2, 3},
{4, 5, 6},
{7, 8, 9},
},
},
Votes: &votes,
}
updates := []ipnstate.NetworkLockUpdate{
{
Hash: aum3.Hash(),
Change: aum3.MessageKind.String(),
Raw: aum3.Serialize(),
},
{
Hash: aum2.Hash(),
Change: aum2.MessageKind.String(),
Raw: aum2.Serialize(),
},
{
Hash: aum1.Hash(),
Change: aum1.MessageKind.String(),
Raw: aum1.Serialize(),
},
}
t.Run("human-readable", func(t *testing.T) {
t.Parallel()
var outBuf bytes.Buffer
json := jsonoutput.JSONSchemaVersion{}
useColor := false
printNetworkLockLog(updates, &outBuf, json, useColor)
t.Logf("%s", outBuf.String())
want := `update 4M4Q3IXBARPQMFVXHJBDCYQMWU5H5FBKD7MFF75HE4O5JMIWR2UA (checkpoint)
Disablement values:
- 010203
- 040506
- 070809
Keys:
Type: 25519
KeyID: tlpub:0101
Metadata: map[de:eins en:one es:uno]
update BKVVXHOVBW7Y7YXYTLVVLMNSYG6DS5GVRVSYZLASNU3AQKA732XQ (remove-key)
KeyID: tlpub:0303
update UKJIKFHILQ62AEN7MQIFHXJ6SFVDGQCQA3OHVI3LWVPM736EMSAA (add-key)
Type: 25519
KeyID: tlpub:0202
`
if diff := cmp.Diff(outBuf.String(), want); diff != "" {
t.Fatalf("wrong output (-got, +want):\n%s", diff)
}
})
jsonV1 := `{
"SchemaVersion": "1",
"Messages": [
{
"Hash": "4M4Q3IXBARPQMFVXHJBDCYQMWU5H5FBKD7MFF75HE4O5JMIWR2UA",
"AUM": {
"MessageKind": "checkpoint",
"PrevAUMHash": "BKVVXHOVBW7Y7YXYTLVVLMNSYG6DS5GVRVSYZLASNU3AQKA732XQ",
"State": {
"DisablementSecrets": [
"010203",
"040506",
"070809"
],
"Keys": [
{
"Kind": "25519",
"Votes": 1,
"Public": "tlpub:0101",
"Meta": {
"de": "eins",
"en": "one",
"es": "uno"
}
}
],
"StateID1": 0,
"StateID2": 0
},
"Votes": 1
},
"Raw": "pAEFAlggCqtbndUNv4_i-JrrVbGywbw5dNWNZYysEm02CCgf3q8FowH2AoNDAQIDQwQFBkMHCAkDgaQBAQIBA0IBAQyjYmRlZGVpbnNiZW5jb25lYmVzY3VubwYB"
},
{
"Hash": "BKVVXHOVBW7Y7YXYTLVVLMNSYG6DS5GVRVSYZLASNU3AQKA732XQ",
"AUM": {
"MessageKind": "remove-key",
"PrevAUMHash": "UKJIKFHILQ62AEN7MQIFHXJ6SFVDGQCQA3OHVI3LWVPM736EMSAA",
"KeyID": "tlpub:0303",
"Meta": {
"de": "drei",
"en": "three",
"es": "tres"
},
"Signatures": [
{
"KeyID": "tlpub:0304",
"Signature": "BAU="
}
]
},
"Raw": "pQECAlggopKFFOhcPaARv2QQU90-kWozQFAG3Hqja7Vez-_EZIAEQgMDB6NiZGVkZHJlaWJlbmV0aHJlZWJlc2R0cmVzF4GiAUIDBAJCBAU="
},
{
"Hash": "UKJIKFHILQ62AEN7MQIFHXJ6SFVDGQCQA3OHVI3LWVPM736EMSAA",
"AUM": {
"MessageKind": "add-key",
"Key": {
"Kind": "25519",
"Votes": 1,
"Public": "tlpub:0202"
}
},
"Raw": "owEBAvYDowEBAgEDQgIC"
}
]
}
`
t.Run("json-1", func(t *testing.T) {
t.Parallel()
t.Logf("BOOM")
var outBuf bytes.Buffer
json := jsonoutput.JSONSchemaVersion{
IsSet: true,
Value: 1,
}
useColor := false
printNetworkLockLog(updates, &outBuf, json, useColor)
want := jsonV1
t.Logf("%s", outBuf.String())
if diff := cmp.Diff(outBuf.String(), want); diff != "" {
t.Fatalf("wrong output (-got, +want):\n%s", diff)
}
})
}

@ -40,7 +40,7 @@ func init() {
var serveCmd = func() *ffcli.Command { var serveCmd = func() *ffcli.Command {
se := &serveEnv{lc: &localClient} se := &serveEnv{lc: &localClient}
// previously used to serve legacy newFunnelCommand unless useWIPCode is true // previously used to serve legacy newFunnelCommand unless useWIPCode is true
// change is limited to make a revert easier and full cleanup to come after the release. // change is limited to make a revert easier and full cleanup to come after the relase.
// TODO(tylersmalley): cleanup and removal of newServeLegacyCommand as of 2023-10-16 // TODO(tylersmalley): cleanup and removal of newServeLegacyCommand as of 2023-10-16
return newServeV2Command(se, serve) return newServeV2Command(se, serve)
} }
@ -149,7 +149,6 @@ type localServeClient interface {
IncrementCounter(ctx context.Context, name string, delta int) error IncrementCounter(ctx context.Context, name string, delta int) error
GetPrefs(ctx context.Context) (*ipn.Prefs, error) GetPrefs(ctx context.Context) (*ipn.Prefs, error)
EditPrefs(ctx context.Context, mp *ipn.MaskedPrefs) (*ipn.Prefs, error) EditPrefs(ctx context.Context, mp *ipn.MaskedPrefs) (*ipn.Prefs, error)
CheckSOMarkInUse(ctx context.Context) (bool, error)
} }
// serveEnv is the environment the serve command runs within. All I/O should be // serveEnv is the environment the serve command runs within. All I/O should be
@ -163,21 +162,20 @@ type serveEnv struct {
json bool // output JSON (status only for now) json bool // output JSON (status only for now)
// v2 specific flags // v2 specific flags
bg bgBoolFlag // background mode bg bgBoolFlag // background mode
setPath string // serve path setPath string // serve path
https uint // HTTP port https uint // HTTP port
http uint // HTTP port http uint // HTTP port
tcp uint // TCP port tcp uint // TCP port
tlsTerminatedTCP uint // a TLS terminated TCP port tlsTerminatedTCP uint // a TLS terminated TCP port
proxyProtocol uint // PROXY protocol version (1 or 2) subcmd serveMode // subcommand
subcmd serveMode // subcommand yes bool // update without prompt
yes bool // update without prompt service tailcfg.ServiceName // service name
service tailcfg.ServiceName // service name tun bool // redirect traffic to OS for service
tun bool // redirect traffic to OS for service allServices bool // apply config file to all services
allServices bool // apply config file to all services
acceptAppCaps []tailcfg.PeerCapability // app capabilities to forward
lc localServeClient // localClient interface, specific to serve lc localServeClient // localClient interface, specific to serve
// optional stuff for tests: // optional stuff for tests:
testFlagOut io.Writer testFlagOut io.Writer
testStdout io.Writer testStdout io.Writer
@ -572,7 +570,7 @@ func (e *serveEnv) handleTCPServe(ctx context.Context, srcType string, srcPort u
return fmt.Errorf("cannot serve TCP; already serving web on %d", srcPort) return fmt.Errorf("cannot serve TCP; already serving web on %d", srcPort)
} }
sc.SetTCPForwarding(srcPort, fwdAddr, terminateTLS, 0 /* proxy proto */, dnsName) sc.SetTCPForwarding(srcPort, fwdAddr, terminateTLS, dnsName)
if !reflect.DeepEqual(cursc, sc) { if !reflect.DeepEqual(cursc, sc) {
if err := e.lc.SetServeConfig(ctx, sc); err != nil { if err := e.lc.SetServeConfig(ctx, sc); err != nil {

@ -860,8 +860,6 @@ type fakeLocalServeClient struct {
setCount int // counts calls to SetServeConfig setCount int // counts calls to SetServeConfig
queryFeatureResponse *mockQueryFeatureResponse // mock response to QueryFeature calls queryFeatureResponse *mockQueryFeatureResponse // mock response to QueryFeature calls
prefs *ipn.Prefs // fake preferences, used to test GetPrefs and SetPrefs prefs *ipn.Prefs // fake preferences, used to test GetPrefs and SetPrefs
SOMarkInUse bool // fake SO mark in use status
statusWithoutPeers *ipnstate.Status // nil for fakeStatus
} }
// fakeStatus is a fake ipnstate.Status value for tests. // fakeStatus is a fake ipnstate.Status value for tests.
@ -882,10 +880,7 @@ var fakeStatus = &ipnstate.Status{
} }
func (lc *fakeLocalServeClient) StatusWithoutPeers(ctx context.Context) (*ipnstate.Status, error) { func (lc *fakeLocalServeClient) StatusWithoutPeers(ctx context.Context) (*ipnstate.Status, error) {
if lc.statusWithoutPeers == nil { return fakeStatus, nil
return fakeStatus, nil
}
return lc.statusWithoutPeers, nil
} }
func (lc *fakeLocalServeClient) GetServeConfig(ctx context.Context) (*ipn.ServeConfig, error) { func (lc *fakeLocalServeClient) GetServeConfig(ctx context.Context) (*ipn.ServeConfig, error) {
@ -938,10 +933,6 @@ func (lc *fakeLocalServeClient) IncrementCounter(ctx context.Context, name strin
return nil // unused in tests return nil // unused in tests
} }
func (lc *fakeLocalServeClient) CheckSOMarkInUse(ctx context.Context) (bool, error) {
return lc.SOMarkInUse, nil
}
// exactError returns an error checker that wants exactly the provided want error. // exactError returns an error checker that wants exactly the provided want error.
// If optName is non-empty, it's used in the error message. // If optName is non-empty, it's used in the error message.
func exactErr(want error, optName ...string) func(error) string { func exactErr(want error, optName ...string) func(error) string {

@ -20,8 +20,6 @@ import (
"os/signal" "os/signal"
"path" "path"
"path/filepath" "path/filepath"
"regexp"
"runtime"
"slices" "slices"
"sort" "sort"
"strconv" "strconv"
@ -34,7 +32,6 @@ import (
"tailscale.com/ipn/ipnstate" "tailscale.com/ipn/ipnstate"
"tailscale.com/tailcfg" "tailscale.com/tailcfg"
"tailscale.com/types/ipproto" "tailscale.com/types/ipproto"
"tailscale.com/util/dnsname"
"tailscale.com/util/mak" "tailscale.com/util/mak"
"tailscale.com/util/prompt" "tailscale.com/util/prompt"
"tailscale.com/util/set" "tailscale.com/util/set"
@ -99,41 +96,6 @@ func (b *bgBoolFlag) String() string {
return strconv.FormatBool(b.Value) return strconv.FormatBool(b.Value)
} }
type acceptAppCapsFlag struct {
Value *[]tailcfg.PeerCapability
}
// An application capability name has the form {domain}/{name}.
// Both parts must use the (simplified) FQDN label character set.
// The "name" can contain forward slashes.
// \pL = Unicode Letter, \pN = Unicode Number, - = Hyphen
var validAppCap = regexp.MustCompile(`^([\pL\pN-]+\.)+[\pL\pN-]+\/[\pL\pN-/]+$`)
// Set appends s to the list of appCaps to accept.
func (u *acceptAppCapsFlag) Set(s string) error {
if s == "" {
return nil
}
appCaps := strings.Split(s, ",")
for _, appCap := range appCaps {
appCap = strings.TrimSpace(appCap)
if !validAppCap.MatchString(appCap) {
return fmt.Errorf("%q does not match the form {domain}/{name}, where domain must be a fully qualified domain name", appCap)
}
*u.Value = append(*u.Value, tailcfg.PeerCapability(appCap))
}
return nil
}
// String returns the string representation of the slice of appCaps to accept.
func (u *acceptAppCapsFlag) String() string {
s := make([]string, len(*u.Value))
for i, v := range *u.Value {
s[i] = string(v)
}
return strings.Join(s, ",")
}
var serveHelpCommon = strings.TrimSpace(` var serveHelpCommon = strings.TrimSpace(`
<target> can be a file, directory, text, or most commonly the location to a service running on the <target> can be a file, directory, text, or most commonly the location to a service running on the
local machine. The location to the location service can be expressed as a port number (e.g., 3000), local machine. The location to the location service can be expressed as a port number (e.g., 3000),
@ -237,12 +199,10 @@ func newServeV2Command(e *serveEnv, subcmd serveMode) *ffcli.Command {
fs.UintVar(&e.https, "https", 0, "Expose an HTTPS server at the specified port (default mode)") fs.UintVar(&e.https, "https", 0, "Expose an HTTPS server at the specified port (default mode)")
if subcmd == serve { if subcmd == serve {
fs.UintVar(&e.http, "http", 0, "Expose an HTTP server at the specified port") fs.UintVar(&e.http, "http", 0, "Expose an HTTP server at the specified port")
fs.Var(&acceptAppCapsFlag{Value: &e.acceptAppCaps}, "accept-app-caps", "App capabilities to forward to the server (specify multiple capabilities with a comma-separated list)")
fs.Var(&serviceNameFlag{Value: &e.service}, "service", "Serve for a service with distinct virtual IP instead on node itself.")
} }
fs.UintVar(&e.tcp, "tcp", 0, "Expose a TCP forwarder to forward raw TCP packets at the specified port") fs.UintVar(&e.tcp, "tcp", 0, "Expose a TCP forwarder to forward raw TCP packets at the specified port")
fs.UintVar(&e.tlsTerminatedTCP, "tls-terminated-tcp", 0, "Expose a TCP forwarder to forward TLS-terminated TCP packets at the specified port") fs.UintVar(&e.tlsTerminatedTCP, "tls-terminated-tcp", 0, "Expose a TCP forwarder to forward TLS-terminated TCP packets at the specified port")
fs.UintVar(&e.proxyProtocol, "proxy-protocol", 0, "PROXY protocol version (1 or 2) for TCP forwarding") fs.Var(&serviceNameFlag{Value: &e.service}, "service", "Serve for a service with distinct virtual IP instead on node itself.")
fs.BoolVar(&e.yes, "yes", false, "Update without interactive prompts (default false)") fs.BoolVar(&e.yes, "yes", false, "Update without interactive prompts (default false)")
fs.BoolVar(&e.tun, "tun", false, "Forward all traffic to the local machine (default false), only supported for services. Refer to docs for more information.") fs.BoolVar(&e.tun, "tun", false, "Forward all traffic to the local machine (default false), only supported for services. Refer to docs for more information.")
}), }),
@ -295,7 +255,7 @@ func newServeV2Command(e *serveEnv, subcmd serveMode) *ffcli.Command {
Name: "get-config", Name: "get-config",
ShortUsage: fmt.Sprintf("tailscale %s get-config <file> [--service=<service>] [--all]", info.Name), ShortUsage: fmt.Sprintf("tailscale %s get-config <file> [--service=<service>] [--all]", info.Name),
ShortHelp: "Get service configuration to save to a file", ShortHelp: "Get service configuration to save to a file",
LongHelp: "Get the configuration for services that this node is currently hosting in a\n" + LongHelp: hidden + "Get the configuration for services that this node is currently hosting in a\n" +
"format that can later be provided to set-config. This can be used to declaratively set\n" + "format that can later be provided to set-config. This can be used to declaratively set\n" +
"configuration for a service host.", "configuration for a service host.",
Exec: e.runServeGetConfig, Exec: e.runServeGetConfig,
@ -308,11 +268,10 @@ func newServeV2Command(e *serveEnv, subcmd serveMode) *ffcli.Command {
Name: "set-config", Name: "set-config",
ShortUsage: fmt.Sprintf("tailscale %s set-config <file> [--service=<service>] [--all]", info.Name), ShortUsage: fmt.Sprintf("tailscale %s set-config <file> [--service=<service>] [--all]", info.Name),
ShortHelp: "Define service configuration from a file", ShortHelp: "Define service configuration from a file",
LongHelp: "Read the provided configuration file and use it to declaratively set the configuration\n" + LongHelp: hidden + "Read the provided configuration file and use it to declaratively set the configuration\n" +
"for either a single service, or for all services that this node is hosting. If --service is specified,\n" + "for either a single service, or for all services that this node is hosting. If --service is specified,\n" +
"all endpoint handlers for that service are overwritten. If --all is specified, all endpoint handlers for\n" + "all endpoint handlers for that service are overwritten. If --all is specified, all endpoint handlers for\n" +
"all services are overwritten.\n\n" + "all services are overwritten.",
"For information on the file format, see tailscale.com/kb/1589/tailscale-services-configuration-file",
Exec: e.runServeSetConfig, Exec: e.runServeSetConfig,
FlagSet: e.newFlags("serve-set-config", func(fs *flag.FlagSet) { FlagSet: e.newFlags("serve-set-config", func(fs *flag.FlagSet) {
fs.BoolVar(&e.allServices, "all", false, "apply config to all services") fs.BoolVar(&e.allServices, "all", false, "apply config to all services")
@ -416,14 +375,6 @@ func (e *serveEnv) runServeCombined(subcmd serveMode) execFunc {
return errHelpFunc(subcmd) return errHelpFunc(subcmd)
} }
if (srvType == serveTypeHTTP || srvType == serveTypeHTTPS) && e.proxyProtocol != 0 {
return fmt.Errorf("PROXY protocol is only supported for TCP forwarding, not HTTP/HTTPS")
}
// Validate PROXY protocol version
if e.proxyProtocol != 0 && e.proxyProtocol != 1 && e.proxyProtocol != 2 {
return fmt.Errorf("invalid PROXY protocol version %d; must be 1 or 2", e.proxyProtocol)
}
sc, err := e.lc.GetServeConfig(ctx) sc, err := e.lc.GetServeConfig(ctx)
if err != nil { if err != nil {
return fmt.Errorf("error getting serve config: %w", err) return fmt.Errorf("error getting serve config: %w", err)
@ -469,19 +420,20 @@ func (e *serveEnv) runServeCombined(subcmd serveMode) execFunc {
svcName = e.service svcName = e.service
dnsName = e.service.String() dnsName = e.service.String()
} }
tagged := st.Self.Tags != nil && st.Self.Tags.Len() > 0
if forService && !tagged && !turnOff {
return errors.New("service hosts must be tagged nodes")
}
if !forService && srvType == serveTypeTUN { if !forService && srvType == serveTypeTUN {
return errors.New("tun mode is only supported for services") return errors.New("tun mode is only supported for services")
} }
wantFg := !e.bg.Value && !turnOff wantFg := !e.bg.Value && !turnOff
if wantFg { if wantFg {
// validate the config before creating a WatchIPNBus session
if err := e.validateConfig(parentSC, srvPort, srvType, svcName); err != nil {
return err
}
// if foreground mode, create a WatchIPNBus session // if foreground mode, create a WatchIPNBus session
// and use the nested config for all following operations // and use the nested config for all following operations
// TODO(marwan-at-work): nested-config validations should happen here or previous to this point. // TODO(marwan-at-work): nested-config validations should happen here or previous to this point.
watcher, err = e.lc.WatchIPNBus(ctx, ipn.NotifyInitialState) watcher, err = e.lc.WatchIPNBus(ctx, ipn.NotifyInitialState|ipn.NotifyNoPrivateKeys)
if err != nil { if err != nil {
return err return err
} }
@ -503,6 +455,9 @@ func (e *serveEnv) runServeCombined(subcmd serveMode) execFunc {
// only unset serve when trying to unset with type and port flags. // only unset serve when trying to unset with type and port flags.
err = e.unsetServe(sc, dnsName, srvType, srvPort, mount, magicDNSSuffix) err = e.unsetServe(sc, dnsName, srvType, srvPort, mount, magicDNSSuffix)
} else { } else {
if err := e.validateConfig(parentSC, srvPort, srvType, svcName); err != nil {
return err
}
if forService { if forService {
e.addServiceToPrefs(ctx, svcName) e.addServiceToPrefs(ctx, svcName)
} }
@ -510,10 +465,7 @@ func (e *serveEnv) runServeCombined(subcmd serveMode) execFunc {
if len(args) > 0 { if len(args) > 0 {
target = args[0] target = args[0]
} }
if err := e.shouldWarnRemoteDestCompatibility(ctx, target); err != nil { err = e.setServe(sc, dnsName, srvType, srvPort, mount, target, funnel, magicDNSSuffix)
return err
}
err = e.setServe(sc, dnsName, srvType, srvPort, mount, target, funnel, magicDNSSuffix, e.acceptAppCaps, int(e.proxyProtocol))
msg = e.messageForPort(sc, st, dnsName, srvType, srvPort) msg = e.messageForPort(sc, st, dnsName, srvType, srvPort)
} }
if err != nil { if err != nil {
@ -834,7 +786,7 @@ func (e *serveEnv) runServeSetConfig(ctx context.Context, args []string) (err er
for name, details := range scf.Services { for name, details := range scf.Services {
for ppr, ep := range details.Endpoints { for ppr, ep := range details.Endpoints {
if ep.Protocol == conffile.ProtoTUN { if ep.Protocol == conffile.ProtoTUN {
err := e.setServe(sc, name.String(), serveTypeTUN, 0, "", "", false, magicDNSSuffix, nil, 0 /* proxy protocol */) err := e.setServe(sc, name.String(), serveTypeTUN, 0, "", "", false, magicDNSSuffix)
if err != nil { if err != nil {
return err return err
} }
@ -856,7 +808,7 @@ func (e *serveEnv) runServeSetConfig(ctx context.Context, args []string) (err er
portStr := fmt.Sprint(destPort) portStr := fmt.Sprint(destPort)
target = fmt.Sprintf("%s://%s", ep.Protocol, net.JoinHostPort(ep.Destination, portStr)) target = fmt.Sprintf("%s://%s", ep.Protocol, net.JoinHostPort(ep.Destination, portStr))
} }
err := e.setServe(sc, name.String(), serveType, port, "/", target, false, magicDNSSuffix, nil, 0 /* proxy protocol */) err := e.setServe(sc, name.String(), serveType, port, "/", target, false, magicDNSSuffix)
if err != nil { if err != nil {
return fmt.Errorf("service %q: %w", name, err) return fmt.Errorf("service %q: %w", name, err)
} }
@ -899,12 +851,72 @@ func (e *serveEnv) runServeSetConfig(ctx context.Context, args []string) (err er
return e.lc.SetServeConfig(ctx, sc) return e.lc.SetServeConfig(ctx, sc)
} }
func (e *serveEnv) setServe(sc *ipn.ServeConfig, dnsName string, srvType serveType, srvPort uint16, mount string, target string, allowFunnel bool, mds string, caps []tailcfg.PeerCapability, proxyProtocol int) error { const backgroundExistsMsg = "background configuration already exists, use `tailscale %s --%s=%d off` to remove the existing configuration"
// validateConfig checks if the serve config is valid to serve the type wanted on the port.
// dnsName is a FQDN or a serviceName (with `svc:` prefix).
func (e *serveEnv) validateConfig(sc *ipn.ServeConfig, port uint16, wantServe serveType, svcName tailcfg.ServiceName) error {
var tcpHandlerForPort *ipn.TCPPortHandler
if svcName != noService {
svc := sc.Services[svcName]
if svc == nil {
return nil
}
if wantServe == serveTypeTUN && (svc.TCP != nil || svc.Web != nil) {
return errors.New("service already has a TCP or Web handler, cannot serve in TUN mode")
}
if svc.Tun && wantServe != serveTypeTUN {
return errors.New("service is already being served in TUN mode")
}
if svc.TCP[port] == nil {
return nil
}
tcpHandlerForPort = svc.TCP[port]
} else {
sc, isFg := sc.FindConfig(port)
if sc == nil {
return nil
}
if isFg {
return errors.New("foreground already exists under this port")
}
if !e.bg.Value {
return fmt.Errorf(backgroundExistsMsg, infoMap[e.subcmd].Name, wantServe.String(), port)
}
tcpHandlerForPort = sc.TCP[port]
}
existingServe := serveFromPortHandler(tcpHandlerForPort)
if wantServe != existingServe {
target := svcName
if target == noService {
target = "machine"
}
return fmt.Errorf("want to serve %q but port is already serving %q for %q", wantServe, existingServe, target)
}
return nil
}
func serveFromPortHandler(tcp *ipn.TCPPortHandler) serveType {
switch {
case tcp.HTTP:
return serveTypeHTTP
case tcp.HTTPS:
return serveTypeHTTPS
case tcp.TerminateTLS != "":
return serveTypeTLSTerminatedTCP
case tcp.TCPForward != "":
return serveTypeTCP
default:
return -1
}
}
func (e *serveEnv) setServe(sc *ipn.ServeConfig, dnsName string, srvType serveType, srvPort uint16, mount string, target string, allowFunnel bool, mds string) error {
// update serve config based on the type // update serve config based on the type
switch srvType { switch srvType {
case serveTypeHTTPS, serveTypeHTTP: case serveTypeHTTPS, serveTypeHTTP:
useTLS := srvType == serveTypeHTTPS useTLS := srvType == serveTypeHTTPS
err := e.applyWebServe(sc, dnsName, srvPort, useTLS, mount, target, mds, caps) err := e.applyWebServe(sc, dnsName, srvPort, useTLS, mount, target, mds)
if err != nil { if err != nil {
return fmt.Errorf("failed apply web serve: %w", err) return fmt.Errorf("failed apply web serve: %w", err)
} }
@ -912,7 +924,7 @@ func (e *serveEnv) setServe(sc *ipn.ServeConfig, dnsName string, srvType serveTy
if e.setPath != "" { if e.setPath != "" {
return fmt.Errorf("cannot mount a path for TCP serve") return fmt.Errorf("cannot mount a path for TCP serve")
} }
err := e.applyTCPServe(sc, dnsName, srvType, srvPort, target, proxyProtocol) err := e.applyTCPServe(sc, dnsName, srvType, srvPort, target)
if err != nil { if err != nil {
return fmt.Errorf("failed to apply TCP serve: %w", err) return fmt.Errorf("failed to apply TCP serve: %w", err)
} }
@ -936,17 +948,16 @@ func (e *serveEnv) setServe(sc *ipn.ServeConfig, dnsName string, srvType serveTy
} }
var ( var (
msgFunnelAvailable = "Available on the internet:" msgFunnelAvailable = "Available on the internet:"
msgServeAvailable = "Available within your tailnet:" msgServeAvailable = "Available within your tailnet:"
msgServiceWaitingApproval = "This machine is configured as a service proxy for %s, but approval from an admin is required. Once approved, it will be available in your Tailnet as:" msgServiceWaitingApproval = "This machine is configured as a service proxy for %s, but approval from an admin is required. Once approved, it will be available in your Tailnet as:"
msgRunningInBackground = "%s started and running in the background." msgRunningInBackground = "%s started and running in the background."
msgRunningTunService = "IPv4 and IPv6 traffic to %s is being routed to your operating system." msgRunningTunService = "IPv4 and IPv6 traffic to %s is being routed to your operating system."
msgDisableProxy = "To disable the proxy, run: tailscale %s --%s=%d off" msgDisableProxy = "To disable the proxy, run: tailscale %s --%s=%d off"
msgDisableServiceProxy = "To disable the proxy, run: tailscale serve --service=%s --%s=%d off" msgDisableServiceProxy = "To disable the proxy, run: tailscale serve --service=%s --%s=%d off"
msgDisableServiceTun = "To disable the service in TUN mode, run: tailscale serve --service=%s --tun off" msgDisableServiceTun = "To disable the service in TUN mode, run: tailscale serve --service=%s --tun off"
msgDisableService = "To remove config for the service, run: tailscale serve clear %s" msgDisableService = "To remove config for the service, run: tailscale serve clear %s"
msgWarnRemoteDestCompatibility = "Warning: %s doesn't support connecting to remote destinations from non-default route, see tailscale.com/kb/1552/tailscale-services for detail." msgToExit = "Press Ctrl+C to exit."
msgToExit = "Press Ctrl+C to exit."
) )
// messageForPort returns a message for the given port based on the // messageForPort returns a message for the given port based on the
@ -1039,9 +1050,6 @@ func (e *serveEnv) messageForPort(sc *ipn.ServeConfig, st *ipnstate.Status, dnsN
if tcpHandler.TerminateTLS != "" { if tcpHandler.TerminateTLS != "" {
tlsStatus = "TLS terminated" tlsStatus = "TLS terminated"
} }
if ver := tcpHandler.ProxyProtocol; ver != 0 {
tlsStatus = fmt.Sprintf("%s, PROXY protocol v%d", tlsStatus, ver)
}
output.WriteString(fmt.Sprintf("|-- tcp://%s:%d (%s)\n", host, srvPort, tlsStatus)) output.WriteString(fmt.Sprintf("|-- tcp://%s:%d (%s)\n", host, srvPort, tlsStatus))
for _, a := range ips { for _, a := range ips {
@ -1072,78 +1080,7 @@ func (e *serveEnv) messageForPort(sc *ipn.ServeConfig, st *ipnstate.Status, dnsN
return output.String() return output.String()
} }
// isRemote reports whether the given destination from serve config func (e *serveEnv) applyWebServe(sc *ipn.ServeConfig, dnsName string, srvPort uint16, useTLS bool, mount, target string, mds string) error {
// is a remote destination.
func isRemote(target string) bool {
// target being a port number means it's localhost
if _, err := strconv.ParseUint(target, 10, 16); err == nil {
return false
}
// prepend tmp:// if no scheme is present just to help parsing
if !strings.Contains(target, "://") {
target = "tmp://" + target
}
// make sure we can parse the target, wether it's a full URL or just a host:port
u, err := url.ParseRequestURI(target)
if err != nil {
// If we can't parse the target, it doesn't matter if it's remote or not
return false
}
validHN := dnsname.ValidHostname(u.Hostname()) == nil
validIP := net.ParseIP(u.Hostname()) != nil
if !validHN && !validIP {
return false
}
if u.Hostname() == "localhost" || u.Hostname() == "127.0.0.1" || u.Hostname() == "::1" {
return false
}
return true
}
// shouldWarnRemoteDestCompatibility reports whether we should warn the user
// that their current OS/environment may not be compatible with
// service's proxy destination.
func (e *serveEnv) shouldWarnRemoteDestCompatibility(ctx context.Context, target string) error {
// no target means nothing to check
if target == "" {
return nil
}
if filepath.IsAbs(target) || strings.HasPrefix(target, "text:") {
// local path or text target, nothing to check
return nil
}
// only check for remote destinations
if !isRemote(target) {
return nil
}
// Check if running as Mac extension and warn
if version.IsMacAppStore() || version.IsMacSysExt() {
return fmt.Errorf(msgWarnRemoteDestCompatibility, "the MacOS extension")
}
// Check for linux, if it's running with TS_FORCE_LINUX_BIND_TO_DEVICE=true
// and tailscale bypass mark is not working. If any of these conditions are true, and the dest is
// a remote destination, return true.
if runtime.GOOS == "linux" {
SOMarkInUse, err := e.lc.CheckSOMarkInUse(ctx)
if err != nil {
log.Printf("error checking SO mark in use: %v", err)
return nil
}
if !SOMarkInUse {
return fmt.Errorf(msgWarnRemoteDestCompatibility, "the Linux tailscaled without SO_MARK")
}
}
return nil
}
func (e *serveEnv) applyWebServe(sc *ipn.ServeConfig, dnsName string, srvPort uint16, useTLS bool, mount, target, mds string, caps []tailcfg.PeerCapability) error {
h := new(ipn.HTTPHandler) h := new(ipn.HTTPHandler)
switch { switch {
case strings.HasPrefix(target, "text:"): case strings.HasPrefix(target, "text:"):
@ -1177,7 +1114,6 @@ func (e *serveEnv) applyWebServe(sc *ipn.ServeConfig, dnsName string, srvPort ui
return err return err
} }
h.Proxy = t h.Proxy = t
h.AcceptAppCaps = caps
} }
// TODO: validation needs to check nested foreground configs // TODO: validation needs to check nested foreground configs
@ -1191,7 +1127,7 @@ func (e *serveEnv) applyWebServe(sc *ipn.ServeConfig, dnsName string, srvPort ui
return nil return nil
} }
func (e *serveEnv) applyTCPServe(sc *ipn.ServeConfig, dnsName string, srcType serveType, srcPort uint16, target string, proxyProtocol int) error { func (e *serveEnv) applyTCPServe(sc *ipn.ServeConfig, dnsName string, srcType serveType, srcPort uint16, target string) error {
var terminateTLS bool var terminateTLS bool
switch srcType { switch srcType {
case serveTypeTCP: case serveTypeTCP:
@ -1202,8 +1138,6 @@ func (e *serveEnv) applyTCPServe(sc *ipn.ServeConfig, dnsName string, srcType se
return fmt.Errorf("invalid TCP target %q", target) return fmt.Errorf("invalid TCP target %q", target)
} }
svcName := tailcfg.AsServiceName(dnsName)
targetURL, err := ipn.ExpandProxyTargetValue(target, []string{"tcp"}, "tcp") targetURL, err := ipn.ExpandProxyTargetValue(target, []string{"tcp"}, "tcp")
if err != nil { if err != nil {
return fmt.Errorf("unable to expand target: %v", err) return fmt.Errorf("unable to expand target: %v", err)
@ -1215,11 +1149,13 @@ func (e *serveEnv) applyTCPServe(sc *ipn.ServeConfig, dnsName string, srcType se
} }
// TODO: needs to account for multiple configs from foreground mode // TODO: needs to account for multiple configs from foreground mode
svcName := tailcfg.AsServiceName(dnsName)
if sc.IsServingWeb(srcPort, svcName) { if sc.IsServingWeb(srcPort, svcName) {
return fmt.Errorf("cannot serve TCP; already serving web on %d for %s", srcPort, dnsName) return fmt.Errorf("cannot serve TCP; already serving web on %d for %s", srcPort, dnsName)
} }
sc.SetTCPForwarding(srcPort, dstURL.Host, terminateTLS, proxyProtocol, dnsName) sc.SetTCPForwarding(srcPort, dstURL.Host, terminateTLS, dnsName)
return nil return nil
} }

@ -12,7 +12,6 @@ import (
"os" "os"
"path/filepath" "path/filepath"
"reflect" "reflect"
"regexp"
"slices" "slices"
"strconv" "strconv"
"strings" "strings"
@ -23,7 +22,6 @@ import (
"tailscale.com/ipn" "tailscale.com/ipn"
"tailscale.com/ipn/ipnstate" "tailscale.com/ipn/ipnstate"
"tailscale.com/tailcfg" "tailscale.com/tailcfg"
"tailscale.com/types/views"
) )
func TestServeDevConfigMutations(t *testing.T) { func TestServeDevConfigMutations(t *testing.T) {
@ -35,11 +33,10 @@ func TestServeDevConfigMutations(t *testing.T) {
} }
// group is a group of steps that share the same // group is a group of steps that share the same
// config mutation // config mutation, but always starts from an empty config
type group struct { type group struct {
name string name string
steps []step steps []step
initialState fakeLocalServeClient // use the zero value for empty config
} }
// creaet a temporary directory for path-based destinations // creaet a temporary directory for path-based destinations
@ -220,20 +217,10 @@ func TestServeDevConfigMutations(t *testing.T) {
}}, }},
}, },
{ {
name: "ip_host", name: "invalid_host",
initialState: fakeLocalServeClient{
SOMarkInUse: true,
},
steps: []step{{ steps: []step{{
command: cmd("serve --https=443 --bg http://192.168.1.1:3000"), command: cmd("serve --https=443 --bg http://somehost:3000"), // invalid host
want: &ipn.ServeConfig{ wantErr: anyErr(),
TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}},
Web: map[ipn.HostPort]*ipn.WebServerConfig{
"foo.test.ts.net:443": {Handlers: map[string]*ipn.HTTPHandler{
"/": {Proxy: "http://192.168.1.1:3000"},
}},
},
},
}}, }},
}, },
{ {
@ -243,16 +230,6 @@ func TestServeDevConfigMutations(t *testing.T) {
wantErr: anyErr(), wantErr: anyErr(),
}}, }},
}, },
{
name: "no_scheme_remote_host_tcp",
initialState: fakeLocalServeClient{
SOMarkInUse: true,
},
steps: []step{{
command: cmd("serve --https=443 --bg 192.168.1.1:3000"),
wantErr: exactErrMsg(errHelp),
}},
},
{ {
name: "turn_off_https", name: "turn_off_https",
steps: []step{ steps: []step{
@ -422,11 +399,15 @@ func TestServeDevConfigMutations(t *testing.T) {
}, },
}}, }},
}, },
{
name: "unknown_host_tcp",
steps: []step{{
command: cmd("serve --tls-terminated-tcp=443 --bg tcp://somehost:5432"),
wantErr: exactErrMsg(errHelp),
}},
},
{ {
name: "tcp_port_too_low", name: "tcp_port_too_low",
initialState: fakeLocalServeClient{
SOMarkInUse: true,
},
steps: []step{{ steps: []step{{
command: cmd("serve --tls-terminated-tcp=443 --bg tcp://somehost:0"), command: cmd("serve --tls-terminated-tcp=443 --bg tcp://somehost:0"),
wantErr: exactErrMsg(errHelp), wantErr: exactErrMsg(errHelp),
@ -434,9 +415,6 @@ func TestServeDevConfigMutations(t *testing.T) {
}, },
{ {
name: "tcp_port_too_high", name: "tcp_port_too_high",
initialState: fakeLocalServeClient{
SOMarkInUse: true,
},
steps: []step{{ steps: []step{{
command: cmd("serve --tls-terminated-tcp=443 --bg tcp://somehost:65536"), command: cmd("serve --tls-terminated-tcp=443 --bg tcp://somehost:65536"),
wantErr: exactErrMsg(errHelp), wantErr: exactErrMsg(errHelp),
@ -551,9 +529,6 @@ func TestServeDevConfigMutations(t *testing.T) {
}, },
{ {
name: "bad_path", name: "bad_path",
initialState: fakeLocalServeClient{
SOMarkInUse: true,
},
steps: []step{{ steps: []step{{
command: cmd("serve --bg --https=443 bad/path"), command: cmd("serve --bg --https=443 bad/path"),
wantErr: exactErrMsg(errHelp), wantErr: exactErrMsg(errHelp),
@ -820,186 +795,36 @@ func TestServeDevConfigMutations(t *testing.T) {
}, },
}, },
{ {
name: "advertise_service", name: "forground_with_bg_conflict",
initialState: fakeLocalServeClient{
statusWithoutPeers: &ipnstate.Status{
BackendState: ipn.Running.String(),
Self: &ipnstate.PeerStatus{
DNSName: "foo.test.ts.net",
CapMap: tailcfg.NodeCapMap{
tailcfg.NodeAttrFunnel: nil,
tailcfg.CapabilityFunnelPorts + "?ports=443,8443": nil,
},
Tags: ptrToReadOnlySlice([]string{"some-tag"}),
},
CurrentTailnet: &ipnstate.TailnetStatus{MagicDNSSuffix: "test.ts.net"},
},
SOMarkInUse: true,
},
steps: []step{{
command: cmd("serve --service=svc:foo --http=80 text:foo"),
want: &ipn.ServeConfig{
Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{
"svc:foo": {
TCP: map[uint16]*ipn.TCPPortHandler{
80: {HTTP: true},
},
Web: map[ipn.HostPort]*ipn.WebServerConfig{
"foo.test.ts.net:80": {Handlers: map[string]*ipn.HTTPHandler{
"/": {Text: "foo"},
}},
},
},
},
},
}},
},
{
name: "advertise_service_from_untagged_node",
steps: []step{{
command: cmd("serve --service=svc:foo --http=80 text:foo"),
wantErr: anyErr(),
}},
},
{
name: "forward_grant_header",
steps: []step{ steps: []step{
{ {
command: cmd("serve --bg --accept-app-caps=example.com/cap/foo 3000"), command: cmd("serve --bg --http=3000 localhost:3000"),
want: &ipn.ServeConfig{
TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}},
Web: map[ipn.HostPort]*ipn.WebServerConfig{
"foo.test.ts.net:443": {Handlers: map[string]*ipn.HTTPHandler{
"/": {
Proxy: "http://127.0.0.1:3000",
AcceptAppCaps: []tailcfg.PeerCapability{"example.com/cap/foo"},
},
}},
},
},
},
{
command: cmd("serve --bg --accept-app-caps=example.com/cap/foo,example.com/cap/bar 3000"),
want: &ipn.ServeConfig{ want: &ipn.ServeConfig{
TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}}, TCP: map[uint16]*ipn.TCPPortHandler{3000: {HTTP: true}},
Web: map[ipn.HostPort]*ipn.WebServerConfig{ Web: map[ipn.HostPort]*ipn.WebServerConfig{
"foo.test.ts.net:443": {Handlers: map[string]*ipn.HTTPHandler{ "foo.test.ts.net:3000": {Handlers: map[string]*ipn.HTTPHandler{
"/": { "/": {Proxy: "http://localhost:3000"},
Proxy: "http://127.0.0.1:3000",
AcceptAppCaps: []tailcfg.PeerCapability{"example.com/cap/foo", "example.com/cap/bar"},
},
}},
},
},
},
{
command: cmd("serve --bg --accept-app-caps=example.com/cap/bar 3000"),
want: &ipn.ServeConfig{
TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}},
Web: map[ipn.HostPort]*ipn.WebServerConfig{
"foo.test.ts.net:443": {Handlers: map[string]*ipn.HTTPHandler{
"/": {
Proxy: "http://127.0.0.1:3000",
AcceptAppCaps: []tailcfg.PeerCapability{"example.com/cap/bar"},
},
}}, }},
}, },
}, },
}, },
},
},
{
name: "invalid_accept_caps_invalid_app_cap",
steps: []step{
{
command: cmd("serve --bg --accept-app-caps=example.com/cap/fine,NOTFINE 3000"), // should be {domain.tld}/{name}
wantErr: func(err error) (badErrMsg string) {
if err == nil || !strings.Contains(err.Error(), fmt.Sprintf("%q does not match", "NOTFINE")) {
return fmt.Sprintf("wanted validation error that quotes the non-matching capability (and nothing more) but got %q", err.Error())
}
return ""
},
},
},
},
{
name: "tcp_with_proxy_protocol_v1",
steps: []step{{
command: cmd("serve --tcp=8000 --proxy-protocol=1 --bg tcp://localhost:5432"),
want: &ipn.ServeConfig{
TCP: map[uint16]*ipn.TCPPortHandler{
8000: {
TCPForward: "localhost:5432",
ProxyProtocol: 1,
},
},
},
}},
},
{
name: "tls_terminated_tcp_with_proxy_protocol_v2",
steps: []step{{
command: cmd("serve --tls-terminated-tcp=443 --proxy-protocol=2 --bg tcp://localhost:5432"),
want: &ipn.ServeConfig{
TCP: map[uint16]*ipn.TCPPortHandler{
443: {
TCPForward: "localhost:5432",
TerminateTLS: "foo.test.ts.net",
ProxyProtocol: 2,
},
},
},
}},
},
{
name: "tcp_update_to_add_proxy_protocol",
steps: []step{
{
command: cmd("serve --tcp=8000 --bg tcp://localhost:5432"),
want: &ipn.ServeConfig{
TCP: map[uint16]*ipn.TCPPortHandler{
8000: {TCPForward: "localhost:5432"},
},
},
},
{ {
command: cmd("serve --tcp=8000 --proxy-protocol=1 --bg tcp://localhost:5432"), command: cmd("serve --http=3000 localhost:3000"),
want: &ipn.ServeConfig{ wantErr: exactErrMsg(fmt.Errorf(backgroundExistsMsg, "serve", "http", 3000)),
TCP: map[uint16]*ipn.TCPPortHandler{
8000: {
TCPForward: "localhost:5432",
ProxyProtocol: 1,
},
},
},
}, },
}, },
}, },
{
name: "tcp_proxy_protocol_invalid_version",
steps: []step{{
command: cmd("serve --tcp=8000 --proxy-protocol=3 --bg tcp://localhost:5432"),
wantErr: anyErr(),
}},
},
{
name: "proxy_protocol_without_tcp",
steps: []step{{
command: cmd("serve --https=443 --proxy-protocol=1 --bg http://localhost:3000"),
wantErr: anyErr(),
}},
},
} }
for _, group := range groups { for _, group := range groups {
t.Run(group.name, func(t *testing.T) { t.Run(group.name, func(t *testing.T) {
lc := group.initialState lc := &fakeLocalServeClient{}
for i, st := range group.steps { for i, st := range group.steps {
var stderr bytes.Buffer var stderr bytes.Buffer
var stdout bytes.Buffer var stdout bytes.Buffer
var flagOut bytes.Buffer var flagOut bytes.Buffer
e := &serveEnv{ e := &serveEnv{
lc: &lc, lc: lc,
testFlagOut: &flagOut, testFlagOut: &flagOut,
testStdout: &stdout, testStdout: &stdout,
testStderr: &stderr, testStderr: &stderr,
@ -1047,6 +872,190 @@ func TestServeDevConfigMutations(t *testing.T) {
} }
} }
func TestValidateConfig(t *testing.T) {
tests := [...]struct {
name string
desc string
cfg *ipn.ServeConfig
svc tailcfg.ServiceName
servePort uint16
serveType serveType
bg bgBoolFlag
wantErr bool
}{
{
name: "nil_config",
desc: "when config is nil, all requests valid",
cfg: nil,
servePort: 3000,
serveType: serveTypeHTTPS,
},
{
name: "new_bg_tcp",
desc: "no error when config exists but we're adding a new bg tcp port",
cfg: &ipn.ServeConfig{
TCP: map[uint16]*ipn.TCPPortHandler{
443: {HTTPS: true},
},
},
bg: bgBoolFlag{true, false},
servePort: 10000,
serveType: serveTypeHTTPS,
},
{
name: "override_bg_tcp",
desc: "no error when overwriting previous port under the same serve type",
cfg: &ipn.ServeConfig{
TCP: map[uint16]*ipn.TCPPortHandler{
443: {TCPForward: "http://localhost:4545"},
},
},
bg: bgBoolFlag{true, false},
servePort: 443,
serveType: serveTypeTCP,
},
{
name: "override_bg_tcp",
desc: "error when overwriting previous port under a different serve type",
cfg: &ipn.ServeConfig{
TCP: map[uint16]*ipn.TCPPortHandler{
443: {HTTPS: true},
},
},
bg: bgBoolFlag{true, false},
servePort: 443,
serveType: serveTypeHTTP,
wantErr: true,
},
{
name: "new_fg_port",
desc: "no error when serving a new foreground port",
cfg: &ipn.ServeConfig{
TCP: map[uint16]*ipn.TCPPortHandler{
443: {HTTPS: true},
},
Foreground: map[string]*ipn.ServeConfig{
"abc123": {
TCP: map[uint16]*ipn.TCPPortHandler{
3000: {HTTPS: true},
},
},
},
},
servePort: 4040,
serveType: serveTypeTCP,
},
{
name: "same_fg_port",
desc: "error when overwriting a previous fg port",
cfg: &ipn.ServeConfig{
Foreground: map[string]*ipn.ServeConfig{
"abc123": {
TCP: map[uint16]*ipn.TCPPortHandler{
3000: {HTTPS: true},
},
},
},
},
servePort: 3000,
serveType: serveTypeTCP,
wantErr: true,
},
{
name: "new_service_tcp",
desc: "no error when adding a new service port",
cfg: &ipn.ServeConfig{
Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{
"svc:foo": {
TCP: map[uint16]*ipn.TCPPortHandler{80: {HTTP: true}},
},
},
},
svc: "svc:foo",
servePort: 8080,
serveType: serveTypeTCP,
},
{
name: "override_service_tcp",
desc: "no error when overwriting a previous service port",
cfg: &ipn.ServeConfig{
Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{
"svc:foo": {
TCP: map[uint16]*ipn.TCPPortHandler{
443: {TCPForward: "http://localhost:4545"},
},
},
},
},
svc: "svc:foo",
servePort: 443,
serveType: serveTypeTCP,
},
{
name: "override_service_tcp",
desc: "error when overwriting a previous service port with a different serve type",
cfg: &ipn.ServeConfig{
Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{
"svc:foo": {
TCP: map[uint16]*ipn.TCPPortHandler{
443: {HTTPS: true},
},
},
},
},
svc: "svc:foo",
servePort: 443,
serveType: serveTypeHTTP,
wantErr: true,
},
{
name: "override_service_tcp",
desc: "error when setting previous tcp service to tun mode",
cfg: &ipn.ServeConfig{
Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{
"svc:foo": {
TCP: map[uint16]*ipn.TCPPortHandler{
443: {TCPForward: "http://localhost:4545"},
},
},
},
},
svc: "svc:foo",
serveType: serveTypeTUN,
wantErr: true,
},
{
name: "override_service_tun",
desc: "error when setting previous tun service to tcp forwarder",
cfg: &ipn.ServeConfig{
Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{
"svc:foo": {
Tun: true,
},
},
},
svc: "svc:foo",
serveType: serveTypeTCP,
servePort: 443,
wantErr: true,
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
se := serveEnv{bg: tc.bg}
err := se.validateConfig(tc.cfg, tc.servePort, tc.serveType, tc.svc)
if err == nil && tc.wantErr {
t.Fatal("expected an error but got nil")
}
if err != nil && !tc.wantErr {
t.Fatalf("expected no error but got: %v", err)
}
})
}
}
func TestSrcTypeFromFlags(t *testing.T) { func TestSrcTypeFromFlags(t *testing.T) {
tests := []struct { tests := []struct {
name string name string
@ -1121,118 +1130,6 @@ func TestSrcTypeFromFlags(t *testing.T) {
} }
} }
func TestAcceptSetAppCapsFlag(t *testing.T) {
testCases := []struct {
name string
inputs []string
expectErr bool
expectErrToMatch *regexp.Regexp
expectedValue []tailcfg.PeerCapability
}{
{
name: "valid_simple",
inputs: []string{"example.com/name"},
expectErr: false,
expectedValue: []tailcfg.PeerCapability{"example.com/name"},
},
{
name: "valid_unicode",
inputs: []string{"bücher.de/something"},
expectErr: false,
expectedValue: []tailcfg.PeerCapability{"bücher.de/something"},
},
{
name: "more_valid_unicode",
inputs: []string{"example.tw/某某某"},
expectErr: false,
expectedValue: []tailcfg.PeerCapability{"example.tw/某某某"},
},
{
name: "valid_path_slashes",
inputs: []string{"domain.com/path/to/name"},
expectErr: false,
expectedValue: []tailcfg.PeerCapability{"domain.com/path/to/name"},
},
{
name: "valid_multiple_sets",
inputs: []string{"one.com/foo,two.com/bar"},
expectErr: false,
expectedValue: []tailcfg.PeerCapability{"one.com/foo", "two.com/bar"},
},
{
name: "valid_empty_string",
inputs: []string{""},
expectErr: false,
expectedValue: nil, // Empty string should be a no-op and not append anything.
},
{
name: "invalid_path_chars",
inputs: []string{"domain.com/path_with_underscore"},
expectErr: true,
expectErrToMatch: regexp.MustCompile(`"domain.com/path_with_underscore"`),
expectedValue: nil, // Slice should remain empty.
},
{
name: "valid_subdomain",
inputs: []string{"sub.domain.com/name"},
expectErr: false,
expectedValue: []tailcfg.PeerCapability{"sub.domain.com/name"},
},
{
name: "invalid_no_path",
inputs: []string{"domain.com/"},
expectErr: true,
expectErrToMatch: regexp.MustCompile(`"domain.com/"`),
expectedValue: nil,
},
{
name: "invalid_no_domain",
inputs: []string{"/path/only"},
expectErr: true,
expectErrToMatch: regexp.MustCompile(`"/path/only"`),
expectedValue: nil,
},
{
name: "some_invalid_some_valid",
inputs: []string{"one.com/foo,bad/bar,two.com/baz"},
expectErr: true,
expectErrToMatch: regexp.MustCompile(`"bad/bar"`),
expectedValue: []tailcfg.PeerCapability{"one.com/foo"}, // Parsing will stop after first error
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
var v []tailcfg.PeerCapability
flag := &acceptAppCapsFlag{Value: &v}
var err error
for _, s := range tc.inputs {
err = flag.Set(s)
if err != nil {
break
}
}
if tc.expectErr && err == nil {
t.Errorf("expected an error, but got none")
}
if tc.expectErrToMatch != nil {
if !tc.expectErrToMatch.MatchString(err.Error()) {
t.Errorf("expected error to match %q, but was %q", tc.expectErrToMatch, err)
}
}
if !tc.expectErr && err != nil {
t.Errorf("did not expect an error, but got: %v", err)
}
if !reflect.DeepEqual(tc.expectedValue, v) {
t.Errorf("unexpected value, got: %q, want: %q", v, tc.expectedValue)
}
})
}
}
func TestCleanURLPath(t *testing.T) { func TestCleanURLPath(t *testing.T) {
tests := []struct { tests := []struct {
input string input string
@ -1765,7 +1662,7 @@ func TestIsLegacyInvocation(t *testing.T) {
} }
if gotTranslation != tt.translation { if gotTranslation != tt.translation {
t.Fatalf("expected translation to be %q but got %q", tt.translation, gotTranslation) t.Fatalf("expected translaction to be %q but got %q", tt.translation, gotTranslation)
} }
}) })
} }
@ -1775,19 +1672,18 @@ func TestSetServe(t *testing.T) {
e := &serveEnv{} e := &serveEnv{}
magicDNSSuffix := "test.ts.net" magicDNSSuffix := "test.ts.net"
tests := []struct { tests := []struct {
name string name string
desc string desc string
cfg *ipn.ServeConfig cfg *ipn.ServeConfig
st *ipnstate.Status st *ipnstate.Status
dnsName string dnsName string
srvType serveType srvType serveType
srvPort uint16 srvPort uint16
mountPath string mountPath string
target string target string
allowFunnel bool allowFunnel bool
proxyProtocol int expected *ipn.ServeConfig
expected *ipn.ServeConfig expectErr bool
expectErr bool
}{ }{
{ {
name: "add new handler", name: "add new handler",
@ -2070,7 +1966,7 @@ func TestSetServe(t *testing.T) {
} }
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
err := e.setServe(tt.cfg, tt.dnsName, tt.srvType, tt.srvPort, tt.mountPath, tt.target, tt.allowFunnel, magicDNSSuffix, nil, tt.proxyProtocol) err := e.setServe(tt.cfg, tt.dnsName, tt.srvType, tt.srvPort, tt.mountPath, tt.target, tt.allowFunnel, magicDNSSuffix)
if err != nil && !tt.expectErr { if err != nil && !tt.expectErr {
t.Fatalf("got error: %v; did not expect error.", err) t.Fatalf("got error: %v; did not expect error.", err)
} }
@ -2353,8 +2249,3 @@ func exactErrMsg(want error) func(error) string {
return fmt.Sprintf("\ngot: %v\nwant: %v\n", got, want) return fmt.Sprintf("\ngot: %v\nwant: %v\n", got, want)
} }
} }
func ptrToReadOnlySlice[T any](s []T) *views.Slice[T] {
vs := views.SliceOf(s)
return &vs
}

@ -11,7 +11,6 @@ import (
"net/netip" "net/netip"
"os/exec" "os/exec"
"runtime" "runtime"
"slices"
"strconv" "strconv"
"strings" "strings"
@ -26,7 +25,6 @@ import (
"tailscale.com/types/opt" "tailscale.com/types/opt"
"tailscale.com/types/ptr" "tailscale.com/types/ptr"
"tailscale.com/types/views" "tailscale.com/types/views"
"tailscale.com/util/set"
"tailscale.com/version" "tailscale.com/version"
) )
@ -45,30 +43,28 @@ Only settings explicitly mentioned will be set. There are no default values.`,
} }
type setArgsT struct { type setArgsT struct {
acceptRoutes bool acceptRoutes bool
acceptDNS bool acceptDNS bool
exitNodeIP string exitNodeIP string
exitNodeAllowLANAccess bool exitNodeAllowLANAccess bool
shieldsUp bool shieldsUp bool
runSSH bool runSSH bool
runWebClient bool runWebClient bool
hostname string hostname string
advertiseRoutes string advertiseRoutes string
advertiseDefaultRoute bool advertiseDefaultRoute bool
advertiseConnector bool advertiseConnector bool
opUser string opUser string
acceptedRisks string acceptedRisks string
profileName string profileName string
forceDaemon bool forceDaemon bool
updateCheck bool updateCheck bool
updateApply bool updateApply bool
reportPosture bool reportPosture bool
snat bool snat bool
statefulFiltering bool statefulFiltering bool
sync bool netfilterMode string
netfilterMode string relayServerPort string
relayServerPort string
relayServerStaticEndpoints string
} }
func newSetFlagSet(goos string, setArgs *setArgsT) *flag.FlagSet { func newSetFlagSet(goos string, setArgs *setArgsT) *flag.FlagSet {
@ -89,9 +85,7 @@ func newSetFlagSet(goos string, setArgs *setArgsT) *flag.FlagSet {
setf.BoolVar(&setArgs.updateApply, "auto-update", false, "automatically update to the latest available version") setf.BoolVar(&setArgs.updateApply, "auto-update", false, "automatically update to the latest available version")
setf.BoolVar(&setArgs.reportPosture, "report-posture", false, "allow management plane to gather device posture information") setf.BoolVar(&setArgs.reportPosture, "report-posture", false, "allow management plane to gather device posture information")
setf.BoolVar(&setArgs.runWebClient, "webclient", false, "expose the web interface for managing this node over Tailscale at port 5252") setf.BoolVar(&setArgs.runWebClient, "webclient", false, "expose the web interface for managing this node over Tailscale at port 5252")
setf.BoolVar(&setArgs.sync, "sync", false, hidden+"actively sync configuration from the control plane (set to false only for network failure testing)")
setf.StringVar(&setArgs.relayServerPort, "relay-server-port", "", "UDP port number (0 will pick a random unused port) for the relay server to bind to, on all interfaces, or empty string to disable relay server functionality") setf.StringVar(&setArgs.relayServerPort, "relay-server-port", "", "UDP port number (0 will pick a random unused port) for the relay server to bind to, on all interfaces, or empty string to disable relay server functionality")
setf.StringVar(&setArgs.relayServerStaticEndpoints, "relay-server-static-endpoints", "", "static IP:port endpoints to advertise as candidates for relay connections (comma-separated, e.g. \"[2001:db8::1]:40000,192.0.2.1:40000\") or empty string to not advertise any static endpoints")
ffcomplete.Flag(setf, "exit-node", func(args []string) ([]string, ffcomplete.ShellCompDirective, error) { ffcomplete.Flag(setf, "exit-node", func(args []string) ([]string, ffcomplete.ShellCompDirective, error) {
st, err := localClient.Status(context.Background()) st, err := localClient.Status(context.Background())
@ -114,7 +108,7 @@ func newSetFlagSet(goos string, setArgs *setArgsT) *flag.FlagSet {
switch goos { switch goos {
case "linux": case "linux":
setf.BoolVar(&setArgs.snat, "snat-subnet-routes", true, "source NAT traffic to local routes advertised with --advertise-routes") setf.BoolVar(&setArgs.snat, "snat-subnet-routes", true, "source NAT traffic to local routes advertised with --advertise-routes")
setf.BoolVar(&setArgs.statefulFiltering, "stateful-filtering", false, "apply stateful filtering to forwarded packets (subnet routers, exit nodes, and so on)") setf.BoolVar(&setArgs.statefulFiltering, "stateful-filtering", false, "apply stateful filtering to forwarded packets (subnet routers, exit nodes, etc.)")
setf.StringVar(&setArgs.netfilterMode, "netfilter-mode", defaultNetfilterMode(), "netfilter mode (one of on, nodivert, off)") setf.StringVar(&setArgs.netfilterMode, "netfilter-mode", defaultNetfilterMode(), "netfilter mode (one of on, nodivert, off)")
case "windows": case "windows":
setf.BoolVar(&setArgs.forceDaemon, "unattended", false, "run in \"Unattended Mode\" where Tailscale keeps running even after the current GUI user logs out (Windows-only)") setf.BoolVar(&setArgs.forceDaemon, "unattended", false, "run in \"Unattended Mode\" where Tailscale keeps running even after the current GUI user logs out (Windows-only)")
@ -155,7 +149,6 @@ func runSet(ctx context.Context, args []string) (retErr error) {
OperatorUser: setArgs.opUser, OperatorUser: setArgs.opUser,
NoSNAT: !setArgs.snat, NoSNAT: !setArgs.snat,
ForceDaemon: setArgs.forceDaemon, ForceDaemon: setArgs.forceDaemon,
Sync: opt.NewBool(setArgs.sync),
AutoUpdate: ipn.AutoUpdatePrefs{ AutoUpdate: ipn.AutoUpdatePrefs{
Check: setArgs.updateCheck, Check: setArgs.updateCheck,
Apply: opt.NewBool(setArgs.updateApply), Apply: opt.NewBool(setArgs.updateApply),
@ -249,22 +242,7 @@ func runSet(ctx context.Context, args []string) (retErr error) {
if err != nil { if err != nil {
return fmt.Errorf("failed to set relay server port: %v", err) return fmt.Errorf("failed to set relay server port: %v", err)
} }
maskedPrefs.Prefs.RelayServerPort = ptr.To(uint16(uport)) maskedPrefs.Prefs.RelayServerPort = ptr.To(int(uport))
}
if setArgs.relayServerStaticEndpoints != "" {
endpointsSet := make(set.Set[netip.AddrPort])
endpointsSplit := strings.Split(setArgs.relayServerStaticEndpoints, ",")
for _, s := range endpointsSplit {
ap, err := netip.ParseAddrPort(s)
if err != nil {
return fmt.Errorf("failed to set relay server static endpoints: %q is not a valid IP:port", s)
}
endpointsSet.Add(ap)
}
endpoints := endpointsSet.Slice()
slices.SortFunc(endpoints, netip.AddrPort.Compare)
maskedPrefs.Prefs.RelayServerStaticEndpoints = endpoints
} }
checkPrefs := curPrefs.Clone() checkPrefs := curPrefs.Clone()

@ -122,7 +122,7 @@ func newUpFlagSet(goos string, upArgs *upArgsT, cmd string) *flag.FlagSet {
switch goos { switch goos {
case "linux": case "linux":
upf.BoolVar(&upArgs.snat, "snat-subnet-routes", true, "source NAT traffic to local routes advertised with --advertise-routes") upf.BoolVar(&upArgs.snat, "snat-subnet-routes", true, "source NAT traffic to local routes advertised with --advertise-routes")
upf.BoolVar(&upArgs.statefulFiltering, "stateful-filtering", false, "apply stateful filtering to forwarded packets (subnet routers, exit nodes, and so on)") upf.BoolVar(&upArgs.statefulFiltering, "stateful-filtering", false, "apply stateful filtering to forwarded packets (subnet routers, exit nodes, etc.)")
upf.StringVar(&upArgs.netfilterMode, "netfilter-mode", defaultNetfilterMode(), "netfilter mode (one of on, nodivert, off)") upf.StringVar(&upArgs.netfilterMode, "netfilter-mode", defaultNetfilterMode(), "netfilter mode (one of on, nodivert, off)")
case "windows": case "windows":
upf.BoolVar(&upArgs.forceDaemon, "unattended", false, "run in \"Unattended Mode\" where Tailscale keeps running even after the current GUI user logs out (Windows-only)") upf.BoolVar(&upArgs.forceDaemon, "unattended", false, "run in \"Unattended Mode\" where Tailscale keeps running even after the current GUI user logs out (Windows-only)")
@ -137,7 +137,7 @@ func newUpFlagSet(goos string, upArgs *upArgsT, cmd string) *flag.FlagSet {
// Some flags are only for "up", not "login". // Some flags are only for "up", not "login".
upf.BoolVar(&upArgs.json, "json", false, "output in JSON format (WARNING: format subject to change)") upf.BoolVar(&upArgs.json, "json", false, "output in JSON format (WARNING: format subject to change)")
upf.BoolVar(&upArgs.reset, "reset", false, "reset unspecified settings to their default values") upf.BoolVar(&upArgs.reset, "reset", false, "reset unspecified settings to their default values")
upf.BoolVar(&upArgs.forceReauth, "force-reauth", false, "force reauthentication (WARNING: this may bring down the Tailscale connection and thus should not be done remotely over SSH or RDP)") upf.BoolVar(&upArgs.forceReauth, "force-reauth", false, "force reauthentication (WARNING: this will bring down the Tailscale connection and thus should not be done remotely over SSH or RDP)")
registerAcceptRiskFlag(upf, &upArgs.acceptedRisks) registerAcceptRiskFlag(upf, &upArgs.acceptedRisks)
} }
@ -388,8 +388,7 @@ func updatePrefs(prefs, curPrefs *ipn.Prefs, env upCheckEnv) (simpleUp bool, jus
if !env.upArgs.reset { if !env.upArgs.reset {
applyImplicitPrefs(prefs, curPrefs, env) applyImplicitPrefs(prefs, curPrefs, env)
simpleUp, err = checkForAccidentalSettingReverts(prefs, curPrefs, env) if err := checkForAccidentalSettingReverts(prefs, curPrefs, env); err != nil {
if err != nil {
return false, nil, err return false, nil, err
} }
} }
@ -421,6 +420,11 @@ func updatePrefs(prefs, curPrefs *ipn.Prefs, env upCheckEnv) (simpleUp bool, jus
tagsChanged := !reflect.DeepEqual(curPrefs.AdvertiseTags, prefs.AdvertiseTags) tagsChanged := !reflect.DeepEqual(curPrefs.AdvertiseTags, prefs.AdvertiseTags)
simpleUp = env.flagSet.NFlag() == 0 &&
curPrefs.Persist != nil &&
curPrefs.Persist.UserProfile.LoginName != "" &&
env.backendState != ipn.NeedsLogin.String()
justEdit := env.backendState == ipn.Running.String() && justEdit := env.backendState == ipn.Running.String() &&
!env.upArgs.forceReauth && !env.upArgs.forceReauth &&
env.upArgs.authKeyOrFile == "" && env.upArgs.authKeyOrFile == "" &&
@ -814,7 +818,6 @@ func upWorthyWarning(s string) bool {
strings.Contains(s, healthmsg.WarnAcceptRoutesOff) || strings.Contains(s, healthmsg.WarnAcceptRoutesOff) ||
strings.Contains(s, healthmsg.LockedOut) || strings.Contains(s, healthmsg.LockedOut) ||
strings.Contains(s, healthmsg.WarnExitNodeUsage) || strings.Contains(s, healthmsg.WarnExitNodeUsage) ||
strings.Contains(s, healthmsg.InMemoryTailnetLockState) ||
strings.Contains(strings.ToLower(s), "update available: ") strings.Contains(strings.ToLower(s), "update available: ")
} }
@ -886,8 +889,6 @@ func init() {
addPrefFlagMapping("advertise-connector", "AppConnector") addPrefFlagMapping("advertise-connector", "AppConnector")
addPrefFlagMapping("report-posture", "PostureChecking") addPrefFlagMapping("report-posture", "PostureChecking")
addPrefFlagMapping("relay-server-port", "RelayServerPort") addPrefFlagMapping("relay-server-port", "RelayServerPort")
addPrefFlagMapping("sync", "Sync")
addPrefFlagMapping("relay-server-static-endpoints", "RelayServerStaticEndpoints")
} }
func addPrefFlagMapping(flagName string, prefNames ...string) { func addPrefFlagMapping(flagName string, prefNames ...string) {
@ -923,7 +924,7 @@ func updateMaskedPrefsFromUpOrSetFlag(mp *ipn.MaskedPrefs, flagName string) {
if prefs, ok := prefsOfFlag[flagName]; ok { if prefs, ok := prefsOfFlag[flagName]; ok {
for _, pref := range prefs { for _, pref := range prefs {
f := reflect.ValueOf(mp).Elem() f := reflect.ValueOf(mp).Elem()
for name := range strings.SplitSeq(pref, ".") { for _, name := range strings.Split(pref, ".") {
f = f.FieldByName(name + "Set") f = f.FieldByName(name + "Set")
} }
f.SetBool(true) f.SetBool(true)
@ -965,10 +966,10 @@ type upCheckEnv struct {
// //
// mp is the mask of settings actually set, where mp.Prefs is the new // mp is the mask of settings actually set, where mp.Prefs is the new
// preferences to set, including any values set from implicit flags. // preferences to set, including any values set from implicit flags.
func checkForAccidentalSettingReverts(newPrefs, curPrefs *ipn.Prefs, env upCheckEnv) (simpleUp bool, err error) { func checkForAccidentalSettingReverts(newPrefs, curPrefs *ipn.Prefs, env upCheckEnv) error {
if curPrefs.ControlURL == "" { if curPrefs.ControlURL == "" {
// Don't validate things on initial "up" before a control URL has been set. // Don't validate things on initial "up" before a control URL has been set.
return false, nil return nil
} }
flagIsSet := map[string]bool{} flagIsSet := map[string]bool{}
@ -976,13 +977,10 @@ func checkForAccidentalSettingReverts(newPrefs, curPrefs *ipn.Prefs, env upCheck
flagIsSet[f.Name] = true flagIsSet[f.Name] = true
}) })
if len(flagIsSet) == 0 && if len(flagIsSet) == 0 {
curPrefs.Persist != nil &&
curPrefs.Persist.UserProfile.LoginName != "" &&
env.backendState != ipn.NeedsLogin.String() {
// A bare "tailscale up" is a special case to just // A bare "tailscale up" is a special case to just
// mean bringing the network up without any changes. // mean bringing the network up without any changes.
return true, nil return nil
} }
// flagsCur is what flags we'd need to use to keep the exact // flagsCur is what flags we'd need to use to keep the exact
@ -1024,7 +1022,7 @@ func checkForAccidentalSettingReverts(newPrefs, curPrefs *ipn.Prefs, env upCheck
missing = append(missing, fmtFlagValueArg(flagName, valCur)) missing = append(missing, fmtFlagValueArg(flagName, valCur))
} }
if len(missing) == 0 { if len(missing) == 0 {
return false, nil return nil
} }
// Some previously provided flags are missing. This run of 'tailscale // Some previously provided flags are missing. This run of 'tailscale
@ -1057,7 +1055,7 @@ func checkForAccidentalSettingReverts(newPrefs, curPrefs *ipn.Prefs, env upCheck
fmt.Fprintf(&sb, " %s", a) fmt.Fprintf(&sb, " %s", a)
} }
sb.WriteString("\n\n") sb.WriteString("\n\n")
return false, errors.New(sb.String()) return errors.New(sb.String())
} }
// applyImplicitPrefs mutates prefs to add implicit preferences for the user operator. // applyImplicitPrefs mutates prefs to add implicit preferences for the user operator.

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save