mirror of https://github.com/tailscale/tailscale/
Compare commits
8 Commits
| Author | SHA1 | Date |
|---|---|---|
|
|
1b41fdeddb | 1 year ago |
|
|
3037dc793c | 1 year ago |
|
|
6e0f168db0 | 1 year ago |
|
|
3e3d5d8c68 | 1 year ago |
|
|
c80eb698d5 | 1 year ago |
|
|
1aef3e83b8 | 1 year ago |
|
|
2690b4762f | 1 year ago |
|
|
0267fe83b2 | 1 year ago |
@ -1,17 +0,0 @@
|
|||||||
PRs welcome! But please file bugs first and explain the problem or
|
|
||||||
motivation. For new or changed functionality, strike up a discussion
|
|
||||||
and get agreement on the design/solution before spending too much time writing
|
|
||||||
code.
|
|
||||||
|
|
||||||
Commit messages should [reference
|
|
||||||
bugs](https://docs.github.com/en/github/writing-on-github/autolinked-references-and-urls).
|
|
||||||
|
|
||||||
We require [Developer Certificate of
|
|
||||||
Origin](https://en.wikipedia.org/wiki/Developer_Certificate_of_Origin) (DCO)
|
|
||||||
`Signed-off-by` lines in commits. (`git commit -s`)
|
|
||||||
|
|
||||||
Please squash your code review edits & force push. Multiple commits in
|
|
||||||
a PR are fine, but only if they're each logically separate and all tests pass
|
|
||||||
at each stage. No fixup commits.
|
|
||||||
|
|
||||||
See [commit-messages.md](docs/commit-messages.md) (or skim `git log`) for our commit message style.
|
|
||||||
@ -1,54 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
#
|
|
||||||
# This script sets up cigocacher, but should never fail the build if unsuccessful.
|
|
||||||
# It expects to run on a GitHub-hosted runner, and connects to cigocached over a
|
|
||||||
# private Azure network that is configured at the runner group level in GitHub.
|
|
||||||
#
|
|
||||||
# Usage: ./action.sh
|
|
||||||
# Inputs:
|
|
||||||
# URL: The cigocached server URL.
|
|
||||||
# HOST: The cigocached server host to dial.
|
|
||||||
# Outputs:
|
|
||||||
# success: Whether cigocacher was set up successfully.
|
|
||||||
|
|
||||||
set -euo pipefail
|
|
||||||
|
|
||||||
if [ -z "${GITHUB_ACTIONS:-}" ]; then
|
|
||||||
echo "This script is intended to run within GitHub Actions"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -z "${URL:-}" ]; then
|
|
||||||
echo "No cigocached URL is set, skipping cigocacher setup"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
GOPATH=$(command -v go || true)
|
|
||||||
if [ -z "${GOPATH}" ]; then
|
|
||||||
if [ ! -f "tool/go" ]; then
|
|
||||||
echo "Go not available, unable to proceed"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
GOPATH="./tool/go"
|
|
||||||
fi
|
|
||||||
|
|
||||||
BIN_PATH="${RUNNER_TEMP:-/tmp}/cigocacher$(${GOPATH} env GOEXE)"
|
|
||||||
if [ -d "cmd/cigocacher" ]; then
|
|
||||||
echo "cmd/cigocacher found locally, building from local source"
|
|
||||||
"${GOPATH}" build -o "${BIN_PATH}" ./cmd/cigocacher
|
|
||||||
else
|
|
||||||
echo "cmd/cigocacher not found locally, fetching from tailscale.com/cmd/cigocacher"
|
|
||||||
"${GOPATH}" build -o "${BIN_PATH}" tailscale.com/cmd/cigocacher
|
|
||||||
fi
|
|
||||||
|
|
||||||
CIGOCACHER_TOKEN="$("${BIN_PATH}" --auth --cigocached-url "${URL}" --cigocached-host "${HOST}" )"
|
|
||||||
if [ -z "${CIGOCACHER_TOKEN:-}" ]; then
|
|
||||||
echo "Failed to fetch cigocacher token, skipping cigocacher setup"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "Fetched cigocacher token successfully"
|
|
||||||
echo "::add-mask::${CIGOCACHER_TOKEN}"
|
|
||||||
|
|
||||||
echo "GOCACHEPROG=${BIN_PATH} --cache-dir ${CACHE_DIR} --cigocached-url ${URL} --cigocached-host ${HOST} --token ${CIGOCACHER_TOKEN}" >> "${GITHUB_ENV}"
|
|
||||||
echo "success=true" >> "${GITHUB_OUTPUT}"
|
|
||||||
@ -1,35 +0,0 @@
|
|||||||
name: go-cache
|
|
||||||
description: Set up build to use cigocacher
|
|
||||||
|
|
||||||
inputs:
|
|
||||||
cigocached-url:
|
|
||||||
description: URL of the cigocached server
|
|
||||||
required: true
|
|
||||||
cigocached-host:
|
|
||||||
description: Host to dial for the cigocached server
|
|
||||||
required: true
|
|
||||||
checkout-path:
|
|
||||||
description: Path to cloned repository
|
|
||||||
required: true
|
|
||||||
cache-dir:
|
|
||||||
description: Directory to use for caching
|
|
||||||
required: true
|
|
||||||
|
|
||||||
outputs:
|
|
||||||
success:
|
|
||||||
description: Whether cigocacher was set up successfully
|
|
||||||
value: ${{ steps.setup.outputs.success }}
|
|
||||||
|
|
||||||
runs:
|
|
||||||
using: composite
|
|
||||||
steps:
|
|
||||||
- name: Setup cigocacher
|
|
||||||
id: setup
|
|
||||||
shell: bash
|
|
||||||
env:
|
|
||||||
URL: ${{ inputs.cigocached-url }}
|
|
||||||
HOST: ${{ inputs.cigocached-host }}
|
|
||||||
CACHE_DIR: ${{ inputs.cache-dir }}
|
|
||||||
working-directory: ${{ inputs.checkout-path }}
|
|
||||||
# https://github.com/orgs/community/discussions/25910
|
|
||||||
run: $GITHUB_ACTION_PATH/action.sh
|
|
||||||
@ -1,73 +0,0 @@
|
|||||||
name: Build cigocacher
|
|
||||||
|
|
||||||
on:
|
|
||||||
# Released on-demand. The commit will be used as part of the tag, so generally
|
|
||||||
# prefer to release from main where the commit is stable in linear history.
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build:
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
GOOS: ["linux", "darwin", "windows"]
|
|
||||||
GOARCH: ["amd64", "arm64"]
|
|
||||||
runs-on: ubuntu-24.04
|
|
||||||
env:
|
|
||||||
GOOS: "${{ matrix.GOOS }}"
|
|
||||||
GOARCH: "${{ matrix.GOARCH }}"
|
|
||||||
CGO_ENABLED: "0"
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
|
||||||
- name: Build
|
|
||||||
run: |
|
|
||||||
OUT="cigocacher$(./tool/go env GOEXE)"
|
|
||||||
./tool/go build -o "${OUT}" ./cmd/cigocacher/
|
|
||||||
tar -zcf cigocacher-${{ matrix.GOOS }}-${{ matrix.GOARCH }}.tar.gz "${OUT}"
|
|
||||||
|
|
||||||
- uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
|
||||||
with:
|
|
||||||
name: cigocacher-${{ matrix.GOOS }}-${{ matrix.GOARCH }}
|
|
||||||
path: cigocacher-${{ matrix.GOOS }}-${{ matrix.GOARCH }}.tar.gz
|
|
||||||
|
|
||||||
release:
|
|
||||||
runs-on: ubuntu-24.04
|
|
||||||
needs: build
|
|
||||||
permissions:
|
|
||||||
contents: write
|
|
||||||
steps:
|
|
||||||
- name: Download all artifacts
|
|
||||||
uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0
|
|
||||||
with:
|
|
||||||
pattern: 'cigocacher-*'
|
|
||||||
merge-multiple: true
|
|
||||||
# This step is a simplified version of actions/create-release and
|
|
||||||
# actions/upload-release-asset, which are archived and unmaintained.
|
|
||||||
- name: Create release
|
|
||||||
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0
|
|
||||||
with:
|
|
||||||
script: |
|
|
||||||
const fs = require('fs');
|
|
||||||
const path = require('path');
|
|
||||||
|
|
||||||
const { data: release } = await github.rest.repos.createRelease({
|
|
||||||
owner: context.repo.owner,
|
|
||||||
repo: context.repo.repo,
|
|
||||||
tag_name: `cmd/cigocacher/${{ github.sha }}`,
|
|
||||||
name: `cigocacher-${{ github.sha }}`,
|
|
||||||
draft: false,
|
|
||||||
prerelease: true,
|
|
||||||
target_commitish: `${{ github.sha }}`
|
|
||||||
});
|
|
||||||
|
|
||||||
const files = fs.readdirSync('.').filter(f => f.endsWith('.tar.gz'));
|
|
||||||
|
|
||||||
for (const file of files) {
|
|
||||||
await github.rest.repos.uploadReleaseAsset({
|
|
||||||
owner: context.repo.owner,
|
|
||||||
repo: context.repo.repo,
|
|
||||||
release_id: release.id,
|
|
||||||
name: file,
|
|
||||||
data: fs.readFileSync(file)
|
|
||||||
});
|
|
||||||
console.log(`Uploaded ${file}`);
|
|
||||||
}
|
|
||||||
@ -1,29 +0,0 @@
|
|||||||
name: "Validate Docker base image"
|
|
||||||
on:
|
|
||||||
workflow_dispatch:
|
|
||||||
pull_request:
|
|
||||||
paths:
|
|
||||||
- "Dockerfile.base"
|
|
||||||
- ".github/workflows/docker-base.yml"
|
|
||||||
jobs:
|
|
||||||
build-and-test:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
|
||||||
- name: "build and test"
|
|
||||||
run: |
|
|
||||||
set -e
|
|
||||||
IMG="test-base:$(head -c 8 /dev/urandom | xxd -p)"
|
|
||||||
docker build -t "$IMG" -f Dockerfile.base .
|
|
||||||
|
|
||||||
iptables_version=$(docker run --rm "$IMG" iptables --version)
|
|
||||||
if [[ "$iptables_version" != *"(legacy)"* ]]; then
|
|
||||||
echo "ERROR: Docker base image should contain legacy iptables; found ${iptables_version}"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
ip6tables_version=$(docker run --rm "$IMG" ip6tables --version)
|
|
||||||
if [[ "$ip6tables_version" != *"(legacy)"* ]]; then
|
|
||||||
echo "ERROR: Docker base image should contain legacy ip6tables; found ${ip6tables_version}"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
@ -1,27 +0,0 @@
|
|||||||
# Run some natlab integration tests.
|
|
||||||
# See https://github.com/tailscale/tailscale/issues/13038
|
|
||||||
name: "natlab-integrationtest"
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
on:
|
|
||||||
pull_request:
|
|
||||||
paths:
|
|
||||||
- "tstest/integration/nat/nat_test.go"
|
|
||||||
jobs:
|
|
||||||
natlab-integrationtest:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: Check out code
|
|
||||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
|
||||||
- name: Install qemu
|
|
||||||
run: |
|
|
||||||
sudo rm /var/lib/man-db/auto-update
|
|
||||||
sudo apt-get -y update
|
|
||||||
sudo apt-get -y remove man-db
|
|
||||||
sudo apt-get install -y qemu-system-x86 qemu-utils
|
|
||||||
- name: Run natlab integration tests
|
|
||||||
run: |
|
|
||||||
./tool/go test -v -run=^TestEasyEasy$ -timeout=3m -count=1 ./tstest/integration/nat --run-vm-tests
|
|
||||||
@ -1,29 +0,0 @@
|
|||||||
# Pin images used in github actions to a hash instead of a version tag.
|
|
||||||
name: pin-github-actions
|
|
||||||
on:
|
|
||||||
pull_request:
|
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
paths:
|
|
||||||
- ".github/workflows/**"
|
|
||||||
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
pull-requests: read
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
run:
|
|
||||||
name: pin-github-actions
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
|
||||||
- name: pin
|
|
||||||
run: make pin-github-actions
|
|
||||||
- name: check for changed workflow files
|
|
||||||
run: git diff --no-ext-diff --exit-code .github/workflows || (echo "Some github actions versions need pinning, run make pin-github-actions."; exit 1)
|
|
||||||
@ -1,32 +0,0 @@
|
|||||||
name: request-dataplane-review
|
|
||||||
|
|
||||||
on:
|
|
||||||
pull_request:
|
|
||||||
types: [ opened, synchronize, reopened, ready_for_review ]
|
|
||||||
paths:
|
|
||||||
- ".github/workflows/request-dataplane-review.yml"
|
|
||||||
- "**/*derp*"
|
|
||||||
- "**/derp*/**"
|
|
||||||
- "!**/depaware.txt"
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
request-dataplane-review:
|
|
||||||
if: github.event.pull_request.draft == false
|
|
||||||
name: Request Dataplane Review
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: Check out code
|
|
||||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
|
||||||
- name: Get access token
|
|
||||||
uses: actions/create-github-app-token@df432ceedc7162793a195dd1713ff69aefc7379e # v2.0.6
|
|
||||||
id: generate-token
|
|
||||||
with:
|
|
||||||
# Get token for app: https://github.com/apps/change-visibility-bot
|
|
||||||
app-id: ${{ secrets.VISIBILITY_BOT_APP_ID }}
|
|
||||||
private-key: ${{ secrets.VISIBILITY_BOT_APP_PRIVATE_KEY }}
|
|
||||||
- name: Add reviewers
|
|
||||||
env:
|
|
||||||
GH_TOKEN: ${{ steps.generate-token.outputs.token }}
|
|
||||||
url: ${{ github.event.pull_request.html_url }}
|
|
||||||
run: |
|
|
||||||
gh pr edit "$url" --add-reviewer tailscale/dataplane
|
|
||||||
@ -1,38 +0,0 @@
|
|||||||
name: tailscale.com/cmd/vet
|
|
||||||
|
|
||||||
env:
|
|
||||||
HOME: ${{ github.workspace }}
|
|
||||||
# GOMODCACHE is the same definition on all OSes. Within the workspace, we use
|
|
||||||
# toplevel directories "src" (for the checked out source code), and "gomodcache"
|
|
||||||
# and other caches as siblings to follow.
|
|
||||||
GOMODCACHE: ${{ github.workspace }}/gomodcache
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
- "release-branch/*"
|
|
||||||
paths:
|
|
||||||
- "**.go"
|
|
||||||
pull_request:
|
|
||||||
paths:
|
|
||||||
- "**.go"
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
vet:
|
|
||||||
runs-on: [ self-hosted, linux ]
|
|
||||||
timeout-minutes: 5
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Check out code
|
|
||||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
|
||||||
with:
|
|
||||||
path: src
|
|
||||||
|
|
||||||
- name: Build 'go vet' tool
|
|
||||||
working-directory: src
|
|
||||||
run: ./tool/go build -o /tmp/vettool tailscale.com/cmd/vet
|
|
||||||
|
|
||||||
- name: Run 'go vet'
|
|
||||||
working-directory: src
|
|
||||||
run: ./tool/go vet -vettool=/tmp/vettool tailscale.com/...
|
|
||||||
@ -1,110 +1,104 @@
|
|||||||
version: "2"
|
|
||||||
# Configuration for how we run golangci-lint
|
|
||||||
# Timeout of 5m was the default in v1.
|
|
||||||
run:
|
|
||||||
timeout: 5m
|
|
||||||
linters:
|
linters:
|
||||||
# Don't enable any linters by default; just the ones that we explicitly
|
# Don't enable any linters by default; just the ones that we explicitly
|
||||||
# enable in the list below.
|
# enable in the list below.
|
||||||
default: none
|
disable-all: true
|
||||||
enable:
|
enable:
|
||||||
- bidichk
|
- bidichk
|
||||||
|
- gofmt
|
||||||
|
- goimports
|
||||||
- govet
|
- govet
|
||||||
- misspell
|
- misspell
|
||||||
- revive
|
- revive
|
||||||
settings:
|
|
||||||
|
# Configuration for how we run golangci-lint
|
||||||
|
run:
|
||||||
|
timeout: 5m
|
||||||
|
|
||||||
|
issues:
|
||||||
|
# Excluding configuration per-path, per-linter, per-text and per-source
|
||||||
|
exclude-rules:
|
||||||
|
# These are forks of an upstream package and thus are exempt from stylistic
|
||||||
|
# changes that would make pulling in upstream changes harder.
|
||||||
|
- path: tempfork/.*\.go
|
||||||
|
text: "File is not `gofmt`-ed with `-s` `-r 'interface{} -> any'`"
|
||||||
|
- path: util/singleflight/.*\.go
|
||||||
|
text: "File is not `gofmt`-ed with `-s` `-r 'interface{} -> any'`"
|
||||||
|
|
||||||
|
# Per-linter settings are contained in this top-level key
|
||||||
|
linters-settings:
|
||||||
|
# Enable all rules by default; we don't use invisible unicode runes.
|
||||||
|
bidichk:
|
||||||
|
|
||||||
|
gofmt:
|
||||||
|
rewrite-rules:
|
||||||
|
- pattern: 'interface{}'
|
||||||
|
replacement: 'any'
|
||||||
|
|
||||||
|
goimports:
|
||||||
|
|
||||||
|
govet:
|
||||||
# Matches what we use in corp as of 2023-12-07
|
# Matches what we use in corp as of 2023-12-07
|
||||||
govet:
|
enable:
|
||||||
enable:
|
- asmdecl
|
||||||
- asmdecl
|
- assign
|
||||||
- assign
|
- atomic
|
||||||
- atomic
|
- bools
|
||||||
- bools
|
- buildtag
|
||||||
- buildtag
|
- cgocall
|
||||||
- cgocall
|
- copylocks
|
||||||
- copylocks
|
- deepequalerrors
|
||||||
- deepequalerrors
|
- errorsas
|
||||||
- errorsas
|
- framepointer
|
||||||
- framepointer
|
- httpresponse
|
||||||
- httpresponse
|
- ifaceassert
|
||||||
- ifaceassert
|
- loopclosure
|
||||||
- loopclosure
|
- lostcancel
|
||||||
- lostcancel
|
- nilfunc
|
||||||
- nilfunc
|
- nilness
|
||||||
- nilness
|
- printf
|
||||||
- printf
|
- reflectvaluecompare
|
||||||
- reflectvaluecompare
|
- shift
|
||||||
- shift
|
- sigchanyzer
|
||||||
- sigchanyzer
|
- sortslice
|
||||||
- sortslice
|
- stdmethods
|
||||||
- stdmethods
|
- stringintconv
|
||||||
- stringintconv
|
- structtag
|
||||||
- structtag
|
- testinggoroutine
|
||||||
- testinggoroutine
|
- tests
|
||||||
- tests
|
- unmarshal
|
||||||
- unmarshal
|
- unreachable
|
||||||
- unreachable
|
- unsafeptr
|
||||||
- unsafeptr
|
- unusedresult
|
||||||
- unusedresult
|
settings:
|
||||||
settings:
|
printf:
|
||||||
printf:
|
# List of print function names to check (in addition to default)
|
||||||
# List of print function names to check (in addition to default)
|
funcs:
|
||||||
funcs:
|
- github.com/tailscale/tailscale/types/logger.Discard
|
||||||
- github.com/tailscale/tailscale/types/logger.Discard
|
# NOTE(andrew-d): this doesn't currently work because the printf
|
||||||
# NOTE(andrew-d): this doesn't currently work because the printf
|
# analyzer doesn't support type declarations
|
||||||
# analyzer doesn't support type declarations
|
#- github.com/tailscale/tailscale/types/logger.Logf
|
||||||
#- github.com/tailscale/tailscale/types/logger.Logf
|
|
||||||
revive:
|
misspell:
|
||||||
enable-all-rules: false
|
|
||||||
rules:
|
revive:
|
||||||
- name: atomic
|
enable-all-rules: false
|
||||||
- name: context-keys-type
|
ignore-generated-header: true
|
||||||
- name: defer
|
|
||||||
arguments: [[
|
|
||||||
# Calling 'recover' at the time a defer is registered (i.e. "defer recover()") has no effect.
|
|
||||||
"immediate-recover",
|
|
||||||
# Calling 'recover' outside of a deferred function has no effect
|
|
||||||
"recover",
|
|
||||||
# Returning values from a deferred function has no effect
|
|
||||||
"return",
|
|
||||||
]]
|
|
||||||
- name: duplicated-imports
|
|
||||||
- name: errorf
|
|
||||||
- name: string-of-int
|
|
||||||
- name: time-equal
|
|
||||||
- name: unconditional-recursion
|
|
||||||
- name: useless-break
|
|
||||||
- name: waitgroup-by-value
|
|
||||||
exclusions:
|
|
||||||
generated: lax
|
|
||||||
presets:
|
|
||||||
- comments
|
|
||||||
- common-false-positives
|
|
||||||
- legacy
|
|
||||||
- std-error-handling
|
|
||||||
rules:
|
rules:
|
||||||
# These are forks of an upstream package and thus are exempt from stylistic
|
- name: atomic
|
||||||
# changes that would make pulling in upstream changes harder.
|
- name: context-keys-type
|
||||||
- path: tempfork/.*\.go
|
- name: defer
|
||||||
text: File is not `gofmt`-ed with `-s` `-r 'interface{} -> any'`
|
arguments: [[
|
||||||
- path: util/singleflight/.*\.go
|
# Calling 'recover' at the time a defer is registered (i.e. "defer recover()") has no effect.
|
||||||
text: File is not `gofmt`-ed with `-s` `-r 'interface{} -> any'`
|
"immediate-recover",
|
||||||
paths:
|
# Calling 'recover' outside of a deferred function has no effect
|
||||||
- third_party$
|
"recover",
|
||||||
- builtin$
|
# Returning values from a deferred function has no effect
|
||||||
- examples$
|
"return",
|
||||||
formatters:
|
]]
|
||||||
enable:
|
- name: duplicated-imports
|
||||||
- gofmt
|
- name: errorf
|
||||||
- goimports
|
- name: string-of-int
|
||||||
settings:
|
- name: time-equal
|
||||||
gofmt:
|
- name: unconditional-recursion
|
||||||
rewrite-rules:
|
- name: useless-break
|
||||||
- pattern: interface{}
|
- name: waitgroup-by-value
|
||||||
replacement: any
|
|
||||||
exclusions:
|
|
||||||
generated: lax
|
|
||||||
paths:
|
|
||||||
- third_party$
|
|
||||||
- builtin$
|
|
||||||
- examples$
|
|
||||||
|
|||||||
@ -1 +1 @@
|
|||||||
3.22
|
3.18
|
||||||
@ -1,103 +1,135 @@
|
|||||||
# Tailscale Community Code of Conduct
|
# Contributor Covenant Code of Conduct
|
||||||
|
|
||||||
## Our Pledge
|
## Our Pledge
|
||||||
|
|
||||||
We are committed to creating an open, welcoming, diverse, inclusive, healthy and respectful community.
|
We as members, contributors, and leaders pledge to make participation
|
||||||
Unacceptable, harmful and inappropriate behavior will not be tolerated.
|
in our community a harassment-free experience for everyone, regardless
|
||||||
|
of age, body size, visible or invisible disability, ethnicity, sex
|
||||||
|
characteristics, gender identity and expression, level of experience,
|
||||||
|
education, socio-economic status, nationality, personal appearance,
|
||||||
|
race, religion, or sexual identity and orientation.
|
||||||
|
|
||||||
|
We pledge to act and interact in ways that contribute to an open,
|
||||||
|
welcoming, diverse, inclusive, and healthy community.
|
||||||
|
|
||||||
## Our Standards
|
## Our Standards
|
||||||
|
|
||||||
Examples of behavior that contributes to a positive environment for our community include:
|
Examples of behavior that contributes to a positive environment for
|
||||||
|
our community include:
|
||||||
|
|
||||||
- Demonstrating empathy and kindness toward other people.
|
* Demonstrating empathy and kindness toward other people
|
||||||
- Being respectful of differing opinions, viewpoints, and experiences.
|
* Being respectful of differing opinions, viewpoints, and experiences
|
||||||
- Giving and gracefully accepting constructive feedback.
|
* Giving and gracefully accepting constructive feedback
|
||||||
- Accepting responsibility and apologizing to those affected by our mistakes, and learning from the experience.
|
* Accepting responsibility and apologizing to those affected by our
|
||||||
- Focusing on what is best not just for us as individuals, but for the overall community.
|
mistakes, and learning from the experience
|
||||||
|
* Focusing on what is best not just for us as individuals, but for the
|
||||||
|
overall community
|
||||||
|
|
||||||
Examples of unacceptable behavior include without limitation:
|
Examples of unacceptable behavior include:
|
||||||
|
|
||||||
- The use of language, imagery or emojis (collectively "content") that is racist, sexist, homophobic, transphobic, or otherwise harassing or discriminatory based on any protected characteristic.
|
* The use of sexualized language or imagery, and sexual attention or
|
||||||
- The use of sexualized content and sexual attention or advances of any kind.
|
advances of any kind
|
||||||
- The use of violent, intimidating or bullying content.
|
* Trolling, insulting or derogatory comments, and personal or
|
||||||
- Trolling, concern trolling, insulting or derogatory comments, and personal or political attacks.
|
political attacks
|
||||||
- Public or private harassment.
|
* Public or private harassment
|
||||||
- Publishing others' personal information, such as a photo, physical address, email address, online profile information, or other personal information, without their explicit permission or with the intent to bully or harass the other person.
|
* Publishing others' private information, such as a physical or email
|
||||||
- Posting deep fake or other AI generated content about or involving another person without the explicit permission.
|
address, without their explicit permission
|
||||||
- Spamming community channels and members, such as sending repeat messages, low-effort content, or automated messages.
|
* Other conduct which could reasonably be considered inappropriate in
|
||||||
- Phishing or any similar activity.
|
a professional setting
|
||||||
- Distributing or promoting malware.
|
|
||||||
- The use of any coded or suggestive content to hide or provoke otherwise unacceptable behavior.
|
|
||||||
- Other conduct which could reasonably be considered harmful, illegal, or inappropriate in a professional setting.
|
|
||||||
|
|
||||||
Please also see the Tailscale Acceptable Use Policy, available at [tailscale.com/tailscale-aup](https://tailscale.com/tailscale-aup).
|
## Enforcement Responsibilities
|
||||||
|
|
||||||
## Reporting Incidents
|
Community leaders are responsible for clarifying and enforcing our
|
||||||
|
standards of acceptable behavior and will take appropriate and fair
|
||||||
|
corrective action in response to any behavior that they deem
|
||||||
|
inappropriate, threatening, offensive, or harmful.
|
||||||
|
|
||||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to Tailscale directly via <info@tailscale.com>, or to the community leaders or moderators via DM or similar.
|
Community leaders have the right and responsibility to remove, edit,
|
||||||
All complaints will be reviewed and investigated promptly and fairly.
|
or reject comments, commits, code, wiki edits, issues, and other
|
||||||
We will respect the privacy and safety of the reporter of any issues.
|
contributions that are not aligned to this Code of Conduct, and will
|
||||||
|
communicate reasons for moderation decisions when appropriate.
|
||||||
|
|
||||||
Please note that this community is not moderated by staff 24/7, and we do not have, and do not undertake, any obligation to prescreen, monitor, edit, or remove any content or data, or to actively seek facts or circumstances indicating illegal activity.
|
## Scope
|
||||||
While we strive to keep the community safe and welcoming, moderation may not be immediate at all hours.
|
|
||||||
If you encounter any issues, report them using the appropriate channels.
|
|
||||||
|
|
||||||
## Enforcement Guidelines
|
This Code of Conduct applies within all community spaces, and also
|
||||||
|
applies when an individual is officially representing the community in
|
||||||
|
public spaces. Examples of representing our community include using an
|
||||||
|
official e-mail address, posting via an official social media account,
|
||||||
|
or acting as an appointed representative at an online or offline
|
||||||
|
event.
|
||||||
|
|
||||||
|
## Enforcement
|
||||||
|
|
||||||
Community leaders and moderators are responsible for clarifying and enforcing our standards of acceptable behavior and will take appropriate and fair corrective action in response to any behavior that they deem inappropriate, threatening, offensive, or harmful.
|
Instances of abusive, harassing, or otherwise unacceptable behavior
|
||||||
|
may be reported to the community leaders responsible for enforcement
|
||||||
|
at [info@tailscale.com](mailto:info@tailscale.com). All complaints
|
||||||
|
will be reviewed and investigated promptly and fairly.
|
||||||
|
|
||||||
Community leaders and moderators have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Community Code of Conduct.
|
All community leaders are obligated to respect the privacy and
|
||||||
Tailscale retains full discretion to take action (or not) in response to a violation of these guidelines with or without notice or liability to you.
|
security of the reporter of any incident.
|
||||||
We will interpret our policies and resolve disputes in favor of protecting users, customers, the public, our community and our company, as a whole.
|
|
||||||
|
## Enforcement Guidelines
|
||||||
|
|
||||||
Community leaders will follow these community enforcement guidelines in determining the consequences for any action they deem in violation of this Code of Conduct,
|
Community leaders will follow these Community Impact Guidelines in
|
||||||
and retain full discretion to apply the enforcement guidelines as necessary depending on the circumstances:
|
determining the consequences for any action they deem in violation of
|
||||||
|
this Code of Conduct:
|
||||||
|
|
||||||
### 1. Correction
|
### 1. Correction
|
||||||
|
|
||||||
Community Impact: Use of inappropriate language or other behavior deemed unprofessional or unwelcome in the community.
|
**Community Impact**: Use of inappropriate language or other behavior
|
||||||
|
deemed unprofessional or unwelcome in the community.
|
||||||
|
|
||||||
Consequence: A private, written warning from community leaders, providing clarity around the nature of the violation and an explanation of why the behavior was inappropriate.
|
**Consequence**: A private, written warning from community leaders,
|
||||||
A public apology may be requested.
|
providing clarity around the nature of the violation and an
|
||||||
|
explanation of why the behavior was inappropriate. A public apology
|
||||||
|
may be requested.
|
||||||
|
|
||||||
### 2. Warning
|
### 2. Warning
|
||||||
|
|
||||||
Community Impact: A violation through a single incident or series of actions.
|
**Community Impact**: A violation through a single incident or series
|
||||||
|
of actions.
|
||||||
|
|
||||||
Consequence: A warning with consequences for continued behavior.
|
**Consequence**: A warning with consequences for continued
|
||||||
No interaction with the people involved, including unsolicited interaction with those enforcing this Community Code of Conduct, for a specified period of time.
|
behavior. No interaction with the people involved, including
|
||||||
This includes avoiding interactions in community spaces as well as external channels like social media.
|
unsolicited interaction with those enforcing the Code of Conduct, for
|
||||||
Violating these terms may lead to a temporary or permanent ban.
|
a specified period of time. This includes avoiding interactions in
|
||||||
|
community spaces as well as external channels like social
|
||||||
|
media. Violating these terms may lead to a temporary or permanent ban.
|
||||||
|
|
||||||
### 3. Temporary Ban
|
### 3. Temporary Ban
|
||||||
|
|
||||||
Community Impact: A serious violation of community standards, including sustained inappropriate behavior.
|
**Community Impact**: A serious violation of community standards,
|
||||||
|
including sustained inappropriate behavior.
|
||||||
|
|
||||||
Consequence: A temporary ban from any sort of interaction or public communication with the community for a specified period of time.
|
**Consequence**: A temporary ban from any sort of interaction or
|
||||||
No public or private interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, is allowed during this period. Violating these terms may lead to a permanent ban.
|
public communication with the community for a specified period of
|
||||||
|
time. No public or private interaction with the people involved,
|
||||||
|
including unsolicited interaction with those enforcing the Code of
|
||||||
|
Conduct, is allowed during this period. Violating these terms may lead
|
||||||
|
to a permanent ban.
|
||||||
|
|
||||||
### 4. Permanent Ban
|
### 4. Permanent Ban
|
||||||
|
|
||||||
Community Impact: Demonstrating a pattern of violation of community standards, including sustained inappropriate behavior, harassment of an individual, or aggression toward or disparagement of classes of individuals.
|
**Community Impact**: Demonstrating a pattern of violation of
|
||||||
|
community standards, including sustained inappropriate behavior,
|
||||||
|
harassment of an individual, or aggression toward or disparagement of
|
||||||
|
classes of individuals.
|
||||||
|
|
||||||
Consequence: A permanent ban from any sort of public interaction within the community.
|
**Consequence**: A permanent ban from any sort of public interaction
|
||||||
|
within the community.
|
||||||
## Acceptable Use Policy
|
|
||||||
|
|
||||||
Violation of this Community Code of Conduct may also violate the Tailscale Acceptable Use Policy, which may result in suspension or termination of your Tailscale account.
|
|
||||||
For more information, please see the Tailscale Acceptable Use Policy, available at [tailscale.com/tailscale-aup](https://tailscale.com/tailscale-aup).
|
|
||||||
|
|
||||||
## Privacy
|
|
||||||
|
|
||||||
Please see the Tailscale [Privacy Policy](https://tailscale.com/privacy-policy) for more information about how Tailscale collects, uses, discloses and protects information.
|
|
||||||
|
|
||||||
## Attribution
|
## Attribution
|
||||||
|
|
||||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 2.0, available at <https://www.contributor-covenant.org/version/2/0/code_of_conduct.html>.
|
This Code of Conduct is adapted from the [Contributor
|
||||||
|
Covenant][homepage], version 2.0, available at
|
||||||
|
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
|
||||||
|
|
||||||
Community Impact Guidelines were inspired by [Mozilla's code of conduct enforcement ladder](https://github.com/mozilla/diversity).
|
Community Impact Guidelines were inspired by [Mozilla's code of
|
||||||
|
conduct enforcement ladder](https://github.com/mozilla/diversity).
|
||||||
|
|
||||||
[homepage]: https://www.contributor-covenant.org
|
[homepage]: https://www.contributor-covenant.org
|
||||||
|
|
||||||
For answers to common questions about this code of conduct, see the FAQ at <https://www.contributor-covenant.org/faq>.
|
For answers to common questions about this code of conduct, see the
|
||||||
Translations are available at <https://www.contributor-covenant.org/translations>.
|
FAQ at https://www.contributor-covenant.org/faq. Translations are
|
||||||
|
available at https://www.contributor-covenant.org/translations.
|
||||||
|
|
||||||
|
|||||||
@ -1,12 +1,5 @@
|
|||||||
# Copyright (c) Tailscale Inc & AUTHORS
|
# Copyright (c) Tailscale Inc & AUTHORS
|
||||||
# SPDX-License-Identifier: BSD-3-Clause
|
# SPDX-License-Identifier: BSD-3-Clause
|
||||||
|
|
||||||
FROM alpine:3.22
|
FROM alpine:3.18
|
||||||
RUN apk add --no-cache ca-certificates iptables iptables-legacy iproute2 ip6tables iputils
|
RUN apk add --no-cache ca-certificates iptables iproute2 ip6tables iputils
|
||||||
# Alpine 3.19 replaced legacy iptables with nftables based implementation.
|
|
||||||
# Tailscale is used on some hosts that don't support nftables, such as Synology
|
|
||||||
# NAS, so link iptables back to legacy version. Hosts that don't require legacy
|
|
||||||
# iptables should be able to use Tailscale in nftables mode. See
|
|
||||||
# https://github.com/tailscale/tailscale/issues/17854
|
|
||||||
RUN rm /usr/sbin/iptables && ln -s /usr/sbin/iptables-legacy /usr/sbin/iptables
|
|
||||||
RUN rm /usr/sbin/ip6tables && ln -s /usr/sbin/ip6tables-legacy /usr/sbin/ip6tables
|
|
||||||
|
|||||||
@ -1 +1 @@
|
|||||||
1.95.0
|
1.78.3
|
||||||
|
|||||||
@ -1,110 +0,0 @@
|
|||||||
// Copyright (c) Tailscale Inc & AUTHORS
|
|
||||||
// SPDX-License-Identifier: BSD-3-Clause
|
|
||||||
|
|
||||||
package appc
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/netip"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"tailscale.com/tailcfg"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Conn25 holds the developing state for the as yet nascent next generation app connector.
|
|
||||||
// There is currently (2025-12-08) no actual app connecting functionality.
|
|
||||||
type Conn25 struct {
|
|
||||||
mu sync.Mutex
|
|
||||||
transitIPs map[tailcfg.NodeID]map[netip.Addr]netip.Addr
|
|
||||||
}
|
|
||||||
|
|
||||||
const dupeTransitIPMessage = "Duplicate transit address in ConnectorTransitIPRequest"
|
|
||||||
|
|
||||||
// HandleConnectorTransitIPRequest creates a ConnectorTransitIPResponse in response to a ConnectorTransitIPRequest.
|
|
||||||
// It updates the connectors mapping of TransitIP->DestinationIP per peer (tailcfg.NodeID).
|
|
||||||
// If a peer has stored this mapping in the connector Conn25 will route traffic to TransitIPs to DestinationIPs for that peer.
|
|
||||||
func (c *Conn25) HandleConnectorTransitIPRequest(nid tailcfg.NodeID, ctipr ConnectorTransitIPRequest) ConnectorTransitIPResponse {
|
|
||||||
resp := ConnectorTransitIPResponse{}
|
|
||||||
seen := map[netip.Addr]bool{}
|
|
||||||
for _, each := range ctipr.TransitIPs {
|
|
||||||
if seen[each.TransitIP] {
|
|
||||||
resp.TransitIPs = append(resp.TransitIPs, TransitIPResponse{
|
|
||||||
Code: OtherFailure,
|
|
||||||
Message: dupeTransitIPMessage,
|
|
||||||
})
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
tipresp := c.handleTransitIPRequest(nid, each)
|
|
||||||
seen[each.TransitIP] = true
|
|
||||||
resp.TransitIPs = append(resp.TransitIPs, tipresp)
|
|
||||||
}
|
|
||||||
return resp
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Conn25) handleTransitIPRequest(nid tailcfg.NodeID, tipr TransitIPRequest) TransitIPResponse {
|
|
||||||
c.mu.Lock()
|
|
||||||
defer c.mu.Unlock()
|
|
||||||
if c.transitIPs == nil {
|
|
||||||
c.transitIPs = make(map[tailcfg.NodeID]map[netip.Addr]netip.Addr)
|
|
||||||
}
|
|
||||||
peerMap, ok := c.transitIPs[nid]
|
|
||||||
if !ok {
|
|
||||||
peerMap = make(map[netip.Addr]netip.Addr)
|
|
||||||
c.transitIPs[nid] = peerMap
|
|
||||||
}
|
|
||||||
peerMap[tipr.TransitIP] = tipr.DestinationIP
|
|
||||||
return TransitIPResponse{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Conn25) transitIPTarget(nid tailcfg.NodeID, tip netip.Addr) netip.Addr {
|
|
||||||
c.mu.Lock()
|
|
||||||
defer c.mu.Unlock()
|
|
||||||
return c.transitIPs[nid][tip]
|
|
||||||
}
|
|
||||||
|
|
||||||
// TransitIPRequest details a single TransitIP allocation request from a client to a
|
|
||||||
// connector.
|
|
||||||
type TransitIPRequest struct {
|
|
||||||
// TransitIP is the intermediate destination IP that will be received at this
|
|
||||||
// connector and will be replaced by DestinationIP when performing DNAT.
|
|
||||||
TransitIP netip.Addr `json:"transitIP,omitzero"`
|
|
||||||
|
|
||||||
// DestinationIP is the final destination IP that connections to the TransitIP
|
|
||||||
// should be mapped to when performing DNAT.
|
|
||||||
DestinationIP netip.Addr `json:"destinationIP,omitzero"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConnectorTransitIPRequest is the request body for a PeerAPI request to
|
|
||||||
// /connector/transit-ip and can include zero or more TransitIP allocation requests.
|
|
||||||
type ConnectorTransitIPRequest struct {
|
|
||||||
// TransitIPs is the list of requested mappings.
|
|
||||||
TransitIPs []TransitIPRequest `json:"transitIPs,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// TransitIPResponseCode appears in TransitIPResponse and signifies success or failure status.
|
|
||||||
type TransitIPResponseCode int
|
|
||||||
|
|
||||||
const (
|
|
||||||
// OK indicates that the mapping was created as requested.
|
|
||||||
OK TransitIPResponseCode = 0
|
|
||||||
|
|
||||||
// OtherFailure indicates that the mapping failed for a reason that does not have
|
|
||||||
// another relevant [TransitIPResponsecode].
|
|
||||||
OtherFailure TransitIPResponseCode = 1
|
|
||||||
)
|
|
||||||
|
|
||||||
// TransitIPResponse is the response to a TransitIPRequest
|
|
||||||
type TransitIPResponse struct {
|
|
||||||
// Code is an error code indicating success or failure of the [TransitIPRequest].
|
|
||||||
Code TransitIPResponseCode `json:"code,omitzero"`
|
|
||||||
// Message is an error message explaining what happened, suitable for logging but
|
|
||||||
// not necessarily suitable for displaying in a UI to non-technical users. It
|
|
||||||
// should be empty when [Code] is [OK].
|
|
||||||
Message string `json:"message,omitzero"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConnectorTransitIPResponse is the response to a ConnectorTransitIPRequest
|
|
||||||
type ConnectorTransitIPResponse struct {
|
|
||||||
// TransitIPs is the list of outcomes for each requested mapping. Elements
|
|
||||||
// correspond to the order of [ConnectorTransitIPRequest.TransitIPs].
|
|
||||||
TransitIPs []TransitIPResponse `json:"transitIPs,omitempty"`
|
|
||||||
}
|
|
||||||
@ -1,188 +0,0 @@
|
|||||||
// Copyright (c) Tailscale Inc & AUTHORS
|
|
||||||
// SPDX-License-Identifier: BSD-3-Clause
|
|
||||||
|
|
||||||
package appc
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/netip"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"tailscale.com/tailcfg"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TestHandleConnectorTransitIPRequestZeroLength tests that if sent a
|
|
||||||
// ConnectorTransitIPRequest with 0 TransitIPRequests, we respond with a
|
|
||||||
// ConnectorTransitIPResponse with 0 TransitIPResponses.
|
|
||||||
func TestHandleConnectorTransitIPRequestZeroLength(t *testing.T) {
|
|
||||||
c := &Conn25{}
|
|
||||||
req := ConnectorTransitIPRequest{}
|
|
||||||
nid := tailcfg.NodeID(1)
|
|
||||||
|
|
||||||
resp := c.HandleConnectorTransitIPRequest(nid, req)
|
|
||||||
if len(resp.TransitIPs) != 0 {
|
|
||||||
t.Fatalf("n TransitIPs in response: %d, want 0", len(resp.TransitIPs))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestHandleConnectorTransitIPRequestStoresAddr tests that if sent a
|
|
||||||
// request with a transit addr and a destination addr we store that mapping
|
|
||||||
// and can retrieve it. If sent another req with a different dst for that transit addr
|
|
||||||
// we store that instead.
|
|
||||||
func TestHandleConnectorTransitIPRequestStoresAddr(t *testing.T) {
|
|
||||||
c := &Conn25{}
|
|
||||||
nid := tailcfg.NodeID(1)
|
|
||||||
tip := netip.MustParseAddr("0.0.0.1")
|
|
||||||
dip := netip.MustParseAddr("1.2.3.4")
|
|
||||||
dip2 := netip.MustParseAddr("1.2.3.5")
|
|
||||||
mr := func(t, d netip.Addr) ConnectorTransitIPRequest {
|
|
||||||
return ConnectorTransitIPRequest{
|
|
||||||
TransitIPs: []TransitIPRequest{
|
|
||||||
{TransitIP: t, DestinationIP: d},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
resp := c.HandleConnectorTransitIPRequest(nid, mr(tip, dip))
|
|
||||||
if len(resp.TransitIPs) != 1 {
|
|
||||||
t.Fatalf("n TransitIPs in response: %d, want 1", len(resp.TransitIPs))
|
|
||||||
}
|
|
||||||
got := resp.TransitIPs[0].Code
|
|
||||||
if got != TransitIPResponseCode(0) {
|
|
||||||
t.Fatalf("TransitIP Code: %d, want 0", got)
|
|
||||||
}
|
|
||||||
gotAddr := c.transitIPTarget(nid, tip)
|
|
||||||
if gotAddr != dip {
|
|
||||||
t.Fatalf("Connector stored destination for tip: %v, want %v", gotAddr, dip)
|
|
||||||
}
|
|
||||||
|
|
||||||
// mapping can be overwritten
|
|
||||||
resp2 := c.HandleConnectorTransitIPRequest(nid, mr(tip, dip2))
|
|
||||||
if len(resp2.TransitIPs) != 1 {
|
|
||||||
t.Fatalf("n TransitIPs in response: %d, want 1", len(resp2.TransitIPs))
|
|
||||||
}
|
|
||||||
got2 := resp.TransitIPs[0].Code
|
|
||||||
if got2 != TransitIPResponseCode(0) {
|
|
||||||
t.Fatalf("TransitIP Code: %d, want 0", got2)
|
|
||||||
}
|
|
||||||
gotAddr2 := c.transitIPTarget(nid, tip)
|
|
||||||
if gotAddr2 != dip2 {
|
|
||||||
t.Fatalf("Connector stored destination for tip: %v, want %v", gotAddr, dip2)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestHandleConnectorTransitIPRequestMultipleTIP tests that we can
|
|
||||||
// get a req with multiple mappings and we store them all. Including
|
|
||||||
// multiple transit addrs for the same destination.
|
|
||||||
func TestHandleConnectorTransitIPRequestMultipleTIP(t *testing.T) {
|
|
||||||
c := &Conn25{}
|
|
||||||
nid := tailcfg.NodeID(1)
|
|
||||||
tip := netip.MustParseAddr("0.0.0.1")
|
|
||||||
tip2 := netip.MustParseAddr("0.0.0.2")
|
|
||||||
tip3 := netip.MustParseAddr("0.0.0.3")
|
|
||||||
dip := netip.MustParseAddr("1.2.3.4")
|
|
||||||
dip2 := netip.MustParseAddr("1.2.3.5")
|
|
||||||
req := ConnectorTransitIPRequest{
|
|
||||||
TransitIPs: []TransitIPRequest{
|
|
||||||
{TransitIP: tip, DestinationIP: dip},
|
|
||||||
{TransitIP: tip2, DestinationIP: dip2},
|
|
||||||
// can store same dst addr for multiple transit addrs
|
|
||||||
{TransitIP: tip3, DestinationIP: dip},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
resp := c.HandleConnectorTransitIPRequest(nid, req)
|
|
||||||
if len(resp.TransitIPs) != 3 {
|
|
||||||
t.Fatalf("n TransitIPs in response: %d, want 3", len(resp.TransitIPs))
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := 0; i < 3; i++ {
|
|
||||||
got := resp.TransitIPs[i].Code
|
|
||||||
if got != TransitIPResponseCode(0) {
|
|
||||||
t.Fatalf("i=%d TransitIP Code: %d, want 0", i, got)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
gotAddr1 := c.transitIPTarget(nid, tip)
|
|
||||||
if gotAddr1 != dip {
|
|
||||||
t.Fatalf("Connector stored destination for tip(%v): %v, want %v", tip, gotAddr1, dip)
|
|
||||||
}
|
|
||||||
gotAddr2 := c.transitIPTarget(nid, tip2)
|
|
||||||
if gotAddr2 != dip2 {
|
|
||||||
t.Fatalf("Connector stored destination for tip(%v): %v, want %v", tip2, gotAddr2, dip2)
|
|
||||||
}
|
|
||||||
gotAddr3 := c.transitIPTarget(nid, tip3)
|
|
||||||
if gotAddr3 != dip {
|
|
||||||
t.Fatalf("Connector stored destination for tip(%v): %v, want %v", tip3, gotAddr3, dip)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestHandleConnectorTransitIPRequestSameTIP tests that if we get
|
|
||||||
// a req that has more than one TransitIPRequest for the same transit addr
|
|
||||||
// only the first is stored, and the subsequent ones get an error code and
|
|
||||||
// message in the response.
|
|
||||||
func TestHandleConnectorTransitIPRequestSameTIP(t *testing.T) {
|
|
||||||
c := &Conn25{}
|
|
||||||
nid := tailcfg.NodeID(1)
|
|
||||||
tip := netip.MustParseAddr("0.0.0.1")
|
|
||||||
tip2 := netip.MustParseAddr("0.0.0.2")
|
|
||||||
dip := netip.MustParseAddr("1.2.3.4")
|
|
||||||
dip2 := netip.MustParseAddr("1.2.3.5")
|
|
||||||
dip3 := netip.MustParseAddr("1.2.3.6")
|
|
||||||
req := ConnectorTransitIPRequest{
|
|
||||||
TransitIPs: []TransitIPRequest{
|
|
||||||
{TransitIP: tip, DestinationIP: dip},
|
|
||||||
// cannot have dupe TransitIPs in one ConnectorTransitIPRequest
|
|
||||||
{TransitIP: tip, DestinationIP: dip2},
|
|
||||||
{TransitIP: tip2, DestinationIP: dip3},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
resp := c.HandleConnectorTransitIPRequest(nid, req)
|
|
||||||
if len(resp.TransitIPs) != 3 {
|
|
||||||
t.Fatalf("n TransitIPs in response: %d, want 3", len(resp.TransitIPs))
|
|
||||||
}
|
|
||||||
|
|
||||||
got := resp.TransitIPs[0].Code
|
|
||||||
if got != TransitIPResponseCode(0) {
|
|
||||||
t.Fatalf("i=0 TransitIP Code: %d, want 0", got)
|
|
||||||
}
|
|
||||||
msg := resp.TransitIPs[0].Message
|
|
||||||
if msg != "" {
|
|
||||||
t.Fatalf("i=0 TransitIP Message: \"%s\", want \"%s\"", msg, "")
|
|
||||||
}
|
|
||||||
got1 := resp.TransitIPs[1].Code
|
|
||||||
if got1 != TransitIPResponseCode(1) {
|
|
||||||
t.Fatalf("i=1 TransitIP Code: %d, want 1", got1)
|
|
||||||
}
|
|
||||||
msg1 := resp.TransitIPs[1].Message
|
|
||||||
if msg1 != dupeTransitIPMessage {
|
|
||||||
t.Fatalf("i=1 TransitIP Message: \"%s\", want \"%s\"", msg1, dupeTransitIPMessage)
|
|
||||||
}
|
|
||||||
got2 := resp.TransitIPs[2].Code
|
|
||||||
if got2 != TransitIPResponseCode(0) {
|
|
||||||
t.Fatalf("i=2 TransitIP Code: %d, want 0", got2)
|
|
||||||
}
|
|
||||||
msg2 := resp.TransitIPs[2].Message
|
|
||||||
if msg2 != "" {
|
|
||||||
t.Fatalf("i=2 TransitIP Message: \"%s\", want \"%s\"", msg, "")
|
|
||||||
}
|
|
||||||
|
|
||||||
gotAddr1 := c.transitIPTarget(nid, tip)
|
|
||||||
if gotAddr1 != dip {
|
|
||||||
t.Fatalf("Connector stored destination for tip(%v): %v, want %v", tip, gotAddr1, dip)
|
|
||||||
}
|
|
||||||
gotAddr2 := c.transitIPTarget(nid, tip2)
|
|
||||||
if gotAddr2 != dip3 {
|
|
||||||
t.Fatalf("Connector stored destination for tip(%v): %v, want %v", tip2, gotAddr2, dip3)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestGetDstIPUnknownTIP tests that unknown transit addresses can be looked up without problem.
|
|
||||||
func TestTransitIPTargetUnknownTIP(t *testing.T) {
|
|
||||||
c := &Conn25{}
|
|
||||||
nid := tailcfg.NodeID(1)
|
|
||||||
tip := netip.MustParseAddr("0.0.0.1")
|
|
||||||
got := c.transitIPTarget(nid, tip)
|
|
||||||
want := netip.Addr{}
|
|
||||||
if got != want {
|
|
||||||
t.Fatalf("Unknown transit addr, want: %v, got %v", want, got)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@ -1,61 +0,0 @@
|
|||||||
// Copyright (c) Tailscale Inc & AUTHORS
|
|
||||||
// SPDX-License-Identifier: BSD-3-Clause
|
|
||||||
|
|
||||||
package appc
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"net/netip"
|
|
||||||
|
|
||||||
"go4.org/netipx"
|
|
||||||
)
|
|
||||||
|
|
||||||
// errPoolExhausted is returned when there are no more addresses to iterate over.
|
|
||||||
var errPoolExhausted = errors.New("ip pool exhausted")
|
|
||||||
|
|
||||||
// ippool allows for iteration over all the addresses within a netipx.IPSet.
|
|
||||||
// netipx.IPSet has a Ranges call that returns the "minimum and sorted set of IP ranges that covers [the set]".
|
|
||||||
// netipx.IPRange is "an inclusive range of IP addresses from the same address family.". So we can iterate over
|
|
||||||
// all the addresses in the set by keeping a track of the last address we returned, calling Next on the last address
|
|
||||||
// to get the new one, and if we run off the edge of the current range, starting on the next one.
|
|
||||||
type ippool struct {
|
|
||||||
// ranges defines the addresses in the pool
|
|
||||||
ranges []netipx.IPRange
|
|
||||||
// last is internal tracking of which the last address provided was.
|
|
||||||
last netip.Addr
|
|
||||||
// rangeIdx is internal tracking of which netipx.IPRange from the IPSet we are currently on.
|
|
||||||
rangeIdx int
|
|
||||||
}
|
|
||||||
|
|
||||||
func newIPPool(ipset *netipx.IPSet) *ippool {
|
|
||||||
if ipset == nil {
|
|
||||||
return &ippool{}
|
|
||||||
}
|
|
||||||
return &ippool{ranges: ipset.Ranges()}
|
|
||||||
}
|
|
||||||
|
|
||||||
// next returns the next address from the set, or errPoolExhausted if we have
|
|
||||||
// iterated over the whole set.
|
|
||||||
func (ipp *ippool) next() (netip.Addr, error) {
|
|
||||||
if ipp.rangeIdx >= len(ipp.ranges) {
|
|
||||||
// ipset is empty or we have iterated off the end
|
|
||||||
return netip.Addr{}, errPoolExhausted
|
|
||||||
}
|
|
||||||
if !ipp.last.IsValid() {
|
|
||||||
// not initialized yet
|
|
||||||
ipp.last = ipp.ranges[0].From()
|
|
||||||
return ipp.last, nil
|
|
||||||
}
|
|
||||||
currRange := ipp.ranges[ipp.rangeIdx]
|
|
||||||
if ipp.last == currRange.To() {
|
|
||||||
// then we need to move to the next range
|
|
||||||
ipp.rangeIdx++
|
|
||||||
if ipp.rangeIdx >= len(ipp.ranges) {
|
|
||||||
return netip.Addr{}, errPoolExhausted
|
|
||||||
}
|
|
||||||
ipp.last = ipp.ranges[ipp.rangeIdx].From()
|
|
||||||
return ipp.last, nil
|
|
||||||
}
|
|
||||||
ipp.last = ipp.last.Next()
|
|
||||||
return ipp.last, nil
|
|
||||||
}
|
|
||||||
@ -1,60 +0,0 @@
|
|||||||
// Copyright (c) Tailscale Inc & AUTHORS
|
|
||||||
// SPDX-License-Identifier: BSD-3-Clause
|
|
||||||
|
|
||||||
package appc
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"net/netip"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"go4.org/netipx"
|
|
||||||
"tailscale.com/util/must"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestNext(t *testing.T) {
|
|
||||||
a := ippool{}
|
|
||||||
_, err := a.next()
|
|
||||||
if !errors.Is(err, errPoolExhausted) {
|
|
||||||
t.Fatalf("expected errPoolExhausted, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var isb netipx.IPSetBuilder
|
|
||||||
ipset := must.Get(isb.IPSet())
|
|
||||||
b := newIPPool(ipset)
|
|
||||||
_, err = b.next()
|
|
||||||
if !errors.Is(err, errPoolExhausted) {
|
|
||||||
t.Fatalf("expected errPoolExhausted, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
isb.AddRange(netipx.IPRangeFrom(netip.MustParseAddr("192.168.0.0"), netip.MustParseAddr("192.168.0.2")))
|
|
||||||
isb.AddRange(netipx.IPRangeFrom(netip.MustParseAddr("200.0.0.0"), netip.MustParseAddr("200.0.0.0")))
|
|
||||||
isb.AddRange(netipx.IPRangeFrom(netip.MustParseAddr("201.0.0.0"), netip.MustParseAddr("201.0.0.1")))
|
|
||||||
ipset = must.Get(isb.IPSet())
|
|
||||||
c := newIPPool(ipset)
|
|
||||||
expected := []string{
|
|
||||||
"192.168.0.0",
|
|
||||||
"192.168.0.1",
|
|
||||||
"192.168.0.2",
|
|
||||||
"200.0.0.0",
|
|
||||||
"201.0.0.0",
|
|
||||||
"201.0.0.1",
|
|
||||||
}
|
|
||||||
for i, want := range expected {
|
|
||||||
addr, err := c.next()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if addr != netip.MustParseAddr(want) {
|
|
||||||
t.Fatalf("next call %d want: %s, got: %v", i, want, addr)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_, err = c.next()
|
|
||||||
if !errors.Is(err, errPoolExhausted) {
|
|
||||||
t.Fatalf("expected errPoolExhausted, got %v", err)
|
|
||||||
}
|
|
||||||
_, err = c.next()
|
|
||||||
if !errors.Is(err, errPoolExhausted) {
|
|
||||||
t.Fatalf("expected errPoolExhausted, got %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@ -1,132 +0,0 @@
|
|||||||
// Copyright (c) Tailscale Inc & AUTHORS
|
|
||||||
// SPDX-License-Identifier: BSD-3-Clause
|
|
||||||
|
|
||||||
//go:build !ts_omit_appconnectors
|
|
||||||
|
|
||||||
package appc
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/netip"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"golang.org/x/net/dns/dnsmessage"
|
|
||||||
"tailscale.com/util/mak"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ObserveDNSResponse is a callback invoked by the DNS resolver when a DNS
|
|
||||||
// response is being returned over the PeerAPI. The response is parsed and
|
|
||||||
// matched against the configured domains, if matched the routeAdvertiser is
|
|
||||||
// advised to advertise the discovered route.
|
|
||||||
func (e *AppConnector) ObserveDNSResponse(res []byte) error {
|
|
||||||
var p dnsmessage.Parser
|
|
||||||
if _, err := p.Start(res); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := p.SkipAllQuestions(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// cnameChain tracks a chain of CNAMEs for a given query in order to reverse
|
|
||||||
// a CNAME chain back to the original query for flattening. The keys are
|
|
||||||
// CNAME record targets, and the value is the name the record answers, so
|
|
||||||
// for www.example.com CNAME example.com, the map would contain
|
|
||||||
// ["example.com"] = "www.example.com".
|
|
||||||
var cnameChain map[string]string
|
|
||||||
|
|
||||||
// addressRecords is a list of address records found in the response.
|
|
||||||
var addressRecords map[string][]netip.Addr
|
|
||||||
|
|
||||||
for {
|
|
||||||
h, err := p.AnswerHeader()
|
|
||||||
if err == dnsmessage.ErrSectionDone {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if h.Class != dnsmessage.ClassINET {
|
|
||||||
if err := p.SkipAnswer(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
switch h.Type {
|
|
||||||
case dnsmessage.TypeCNAME, dnsmessage.TypeA, dnsmessage.TypeAAAA:
|
|
||||||
default:
|
|
||||||
if err := p.SkipAnswer(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
domain := strings.TrimSuffix(strings.ToLower(h.Name.String()), ".")
|
|
||||||
if len(domain) == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if h.Type == dnsmessage.TypeCNAME {
|
|
||||||
res, err := p.CNAMEResource()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
cname := strings.TrimSuffix(strings.ToLower(res.CNAME.String()), ".")
|
|
||||||
if len(cname) == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
mak.Set(&cnameChain, cname, domain)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
switch h.Type {
|
|
||||||
case dnsmessage.TypeA:
|
|
||||||
r, err := p.AResource()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
addr := netip.AddrFrom4(r.A)
|
|
||||||
mak.Set(&addressRecords, domain, append(addressRecords[domain], addr))
|
|
||||||
case dnsmessage.TypeAAAA:
|
|
||||||
r, err := p.AAAAResource()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
addr := netip.AddrFrom16(r.AAAA)
|
|
||||||
mak.Set(&addressRecords, domain, append(addressRecords[domain], addr))
|
|
||||||
default:
|
|
||||||
if err := p.SkipAnswer(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
e.mu.Lock()
|
|
||||||
defer e.mu.Unlock()
|
|
||||||
|
|
||||||
for domain, addrs := range addressRecords {
|
|
||||||
domain, isRouted := e.findRoutedDomainLocked(domain, cnameChain)
|
|
||||||
|
|
||||||
// domain and none of the CNAMEs in the chain are routed
|
|
||||||
if !isRouted {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// advertise each address we have learned for the routed domain, that
|
|
||||||
// was not already known.
|
|
||||||
var toAdvertise []netip.Prefix
|
|
||||||
for _, addr := range addrs {
|
|
||||||
if !e.isAddrKnownLocked(domain, addr) {
|
|
||||||
toAdvertise = append(toAdvertise, netip.PrefixFrom(addr, addr.BitLen()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(toAdvertise) > 0 {
|
|
||||||
e.logf("[v2] observed new routes for %s: %s", domain, toAdvertise)
|
|
||||||
e.scheduleAdvertisement(domain, toAdvertise...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@ -1,8 +0,0 @@
|
|||||||
// Copyright (c) Tailscale Inc & AUTHORS
|
|
||||||
// SPDX-License-Identifier: BSD-3-Clause
|
|
||||||
|
|
||||||
//go:build ts_omit_appconnectors
|
|
||||||
|
|
||||||
package appc
|
|
||||||
|
|
||||||
func (e *AppConnector) ObserveDNSResponse(res []byte) error { return nil }
|
|
||||||
@ -1,14 +0,0 @@
|
|||||||
// Copyright (c) Tailscale Inc & AUTHORS
|
|
||||||
// SPDX-License-Identifier: BSD-3-Clause
|
|
||||||
|
|
||||||
//go:build !windows
|
|
||||||
|
|
||||||
package atomicfile
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
)
|
|
||||||
|
|
||||||
func rename(srcFile, destFile string) error {
|
|
||||||
return os.Rename(srcFile, destFile)
|
|
||||||
}
|
|
||||||
@ -1,33 +0,0 @@
|
|||||||
// Copyright (c) Tailscale Inc & AUTHORS
|
|
||||||
// SPDX-License-Identifier: BSD-3-Clause
|
|
||||||
|
|
||||||
package atomicfile
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"golang.org/x/sys/windows"
|
|
||||||
)
|
|
||||||
|
|
||||||
func rename(srcFile, destFile string) error {
|
|
||||||
// Use replaceFile when possible to preserve the original file's attributes and ACLs.
|
|
||||||
if err := replaceFile(destFile, srcFile); err == nil || err != windows.ERROR_FILE_NOT_FOUND {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// destFile doesn't exist. Just do a normal rename.
|
|
||||||
return os.Rename(srcFile, destFile)
|
|
||||||
}
|
|
||||||
|
|
||||||
func replaceFile(destFile, srcFile string) error {
|
|
||||||
destFile16, err := windows.UTF16PtrFromString(destFile)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
srcFile16, err := windows.UTF16PtrFromString(srcFile)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return replaceFileW(destFile16, srcFile16, nil, 0, nil, nil)
|
|
||||||
}
|
|
||||||
@ -1,146 +0,0 @@
|
|||||||
// Copyright (c) Tailscale Inc & AUTHORS
|
|
||||||
// SPDX-License-Identifier: BSD-3-Clause
|
|
||||||
|
|
||||||
package atomicfile
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"testing"
|
|
||||||
"unsafe"
|
|
||||||
|
|
||||||
"golang.org/x/sys/windows"
|
|
||||||
)
|
|
||||||
|
|
||||||
var _SECURITY_RESOURCE_MANAGER_AUTHORITY = windows.SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 9}}
|
|
||||||
|
|
||||||
// makeRandomSID generates a SID derived from a v4 GUID.
|
|
||||||
// This is basically the same algorithm used by browser sandboxes for generating
|
|
||||||
// random SIDs.
|
|
||||||
func makeRandomSID() (*windows.SID, error) {
|
|
||||||
guid, err := windows.GenerateGUID()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
rids := *((*[4]uint32)(unsafe.Pointer(&guid)))
|
|
||||||
|
|
||||||
var pSID *windows.SID
|
|
||||||
if err := windows.AllocateAndInitializeSid(&_SECURITY_RESOURCE_MANAGER_AUTHORITY, 4, rids[0], rids[1], rids[2], rids[3], 0, 0, 0, 0, &pSID); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer windows.FreeSid(pSID)
|
|
||||||
|
|
||||||
// Make a copy that lives on the Go heap
|
|
||||||
return pSID.Copy()
|
|
||||||
}
|
|
||||||
|
|
||||||
func getExistingFileSD(name string) (*windows.SECURITY_DESCRIPTOR, error) {
|
|
||||||
const infoFlags = windows.DACL_SECURITY_INFORMATION
|
|
||||||
return windows.GetNamedSecurityInfo(name, windows.SE_FILE_OBJECT, infoFlags)
|
|
||||||
}
|
|
||||||
|
|
||||||
func getExistingFileDACL(name string) (*windows.ACL, error) {
|
|
||||||
sd, err := getExistingFileSD(name)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
dacl, _, err := sd.DACL()
|
|
||||||
return dacl, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func addDenyACEForRandomSID(dacl *windows.ACL) (*windows.ACL, error) {
|
|
||||||
randomSID, err := makeRandomSID()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
randomSIDTrustee := windows.TRUSTEE{nil, windows.NO_MULTIPLE_TRUSTEE,
|
|
||||||
windows.TRUSTEE_IS_SID, windows.TRUSTEE_IS_UNKNOWN,
|
|
||||||
windows.TrusteeValueFromSID(randomSID)}
|
|
||||||
|
|
||||||
entries := []windows.EXPLICIT_ACCESS{
|
|
||||||
{
|
|
||||||
windows.GENERIC_ALL,
|
|
||||||
windows.DENY_ACCESS,
|
|
||||||
windows.NO_INHERITANCE,
|
|
||||||
randomSIDTrustee,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
return windows.ACLFromEntries(entries, dacl)
|
|
||||||
}
|
|
||||||
|
|
||||||
func setExistingFileDACL(name string, dacl *windows.ACL) error {
|
|
||||||
return windows.SetNamedSecurityInfo(name, windows.SE_FILE_OBJECT,
|
|
||||||
windows.DACL_SECURITY_INFORMATION, nil, nil, dacl, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// makeOrigFileWithCustomDACL creates a new, temporary file with a custom
|
|
||||||
// DACL that we can check for later. It returns the name of the temporary
|
|
||||||
// file and the security descriptor for the file in SDDL format.
|
|
||||||
func makeOrigFileWithCustomDACL() (name, sddl string, err error) {
|
|
||||||
f, err := os.CreateTemp("", "foo*.tmp")
|
|
||||||
if err != nil {
|
|
||||||
return "", "", err
|
|
||||||
}
|
|
||||||
name = f.Name()
|
|
||||||
if err := f.Close(); err != nil {
|
|
||||||
return "", "", err
|
|
||||||
}
|
|
||||||
f = nil
|
|
||||||
defer func() {
|
|
||||||
if err != nil {
|
|
||||||
os.Remove(name)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
dacl, err := getExistingFileDACL(name)
|
|
||||||
if err != nil {
|
|
||||||
return "", "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add a harmless, deny-only ACE for a random SID that isn't used for anything
|
|
||||||
// (but that we can check for later).
|
|
||||||
dacl, err = addDenyACEForRandomSID(dacl)
|
|
||||||
if err != nil {
|
|
||||||
return "", "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := setExistingFileDACL(name, dacl); err != nil {
|
|
||||||
return "", "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
sd, err := getExistingFileSD(name)
|
|
||||||
if err != nil {
|
|
||||||
return "", "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
return name, sd.String(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPreserveSecurityInfo(t *testing.T) {
|
|
||||||
// Make a test file with a custom ACL.
|
|
||||||
origFileName, want, err := makeOrigFileWithCustomDACL()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("makeOrigFileWithCustomDACL returned %v", err)
|
|
||||||
}
|
|
||||||
t.Cleanup(func() {
|
|
||||||
os.Remove(origFileName)
|
|
||||||
})
|
|
||||||
|
|
||||||
if err := WriteFile(origFileName, []byte{}, 0); err != nil {
|
|
||||||
t.Fatalf("WriteFile returned %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// We expect origFileName's security descriptor to be unchanged despite
|
|
||||||
// the WriteFile call.
|
|
||||||
sd, err := getExistingFileSD(origFileName)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("getExistingFileSD(%q) returned %v", origFileName, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if got := sd.String(); got != want {
|
|
||||||
t.Errorf("security descriptor comparison failed: got %q, want %q", got, want)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@ -1,8 +0,0 @@
|
|||||||
// Copyright (c) Tailscale Inc & AUTHORS
|
|
||||||
// SPDX-License-Identifier: BSD-3-Clause
|
|
||||||
|
|
||||||
package atomicfile
|
|
||||||
|
|
||||||
//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output zsyscall_windows.go mksyscall.go
|
|
||||||
|
|
||||||
//sys replaceFileW(replaced *uint16, replacement *uint16, backup *uint16, flags uint32, exclude unsafe.Pointer, reserved unsafe.Pointer) (err error) [int32(failretval)==0] = kernel32.ReplaceFileW
|
|
||||||
@ -1,52 +0,0 @@
|
|||||||
// Code generated by 'go generate'; DO NOT EDIT.
|
|
||||||
|
|
||||||
package atomicfile
|
|
||||||
|
|
||||||
import (
|
|
||||||
"syscall"
|
|
||||||
"unsafe"
|
|
||||||
|
|
||||||
"golang.org/x/sys/windows"
|
|
||||||
)
|
|
||||||
|
|
||||||
var _ unsafe.Pointer
|
|
||||||
|
|
||||||
// Do the interface allocations only once for common
|
|
||||||
// Errno values.
|
|
||||||
const (
|
|
||||||
errnoERROR_IO_PENDING = 997
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
|
|
||||||
errERROR_EINVAL error = syscall.EINVAL
|
|
||||||
)
|
|
||||||
|
|
||||||
// errnoErr returns common boxed Errno values, to prevent
|
|
||||||
// allocations at runtime.
|
|
||||||
func errnoErr(e syscall.Errno) error {
|
|
||||||
switch e {
|
|
||||||
case 0:
|
|
||||||
return errERROR_EINVAL
|
|
||||||
case errnoERROR_IO_PENDING:
|
|
||||||
return errERROR_IO_PENDING
|
|
||||||
}
|
|
||||||
// TODO: add more here, after collecting data on the common
|
|
||||||
// error values see on Windows. (perhaps when running
|
|
||||||
// all.bat?)
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
|
|
||||||
|
|
||||||
procReplaceFileW = modkernel32.NewProc("ReplaceFileW")
|
|
||||||
)
|
|
||||||
|
|
||||||
func replaceFileW(replaced *uint16, replacement *uint16, backup *uint16, flags uint32, exclude unsafe.Pointer, reserved unsafe.Pointer) (err error) {
|
|
||||||
r1, _, e1 := syscall.SyscallN(procReplaceFileW.Addr(), uintptr(unsafe.Pointer(replaced)), uintptr(unsafe.Pointer(replacement)), uintptr(unsafe.Pointer(backup)), uintptr(flags), uintptr(exclude), uintptr(reserved))
|
|
||||||
if int32(r1) == 0 {
|
|
||||||
err = errnoErr(e1)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
@ -1,151 +0,0 @@
|
|||||||
// Copyright (c) Tailscale Inc & AUTHORS
|
|
||||||
// SPDX-License-Identifier: BSD-3-Clause
|
|
||||||
|
|
||||||
//go:build !js && !ts_omit_acme
|
|
||||||
|
|
||||||
package local
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"crypto/tls"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"net/url"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"go4.org/mem"
|
|
||||||
)
|
|
||||||
|
|
||||||
// SetDNS adds a DNS TXT record for the given domain name, containing
|
|
||||||
// the provided TXT value. The intended use case is answering
|
|
||||||
// LetsEncrypt/ACME dns-01 challenges.
|
|
||||||
//
|
|
||||||
// The control plane will only permit SetDNS requests with very
|
|
||||||
// specific names and values. The name should be
|
|
||||||
// "_acme-challenge." + your node's MagicDNS name. It's expected that
|
|
||||||
// clients cache the certs from LetsEncrypt (or whichever CA is
|
|
||||||
// providing them) and only request new ones as needed; the control plane
|
|
||||||
// rate limits SetDNS requests.
|
|
||||||
//
|
|
||||||
// This is a low-level interface; it's expected that most Tailscale
|
|
||||||
// users use a higher level interface to getting/using TLS
|
|
||||||
// certificates.
|
|
||||||
func (lc *Client) SetDNS(ctx context.Context, name, value string) error {
|
|
||||||
v := url.Values{}
|
|
||||||
v.Set("name", name)
|
|
||||||
v.Set("value", value)
|
|
||||||
_, err := lc.send(ctx, "POST", "/localapi/v0/set-dns?"+v.Encode(), 200, nil)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// CertPair returns a cert and private key for the provided DNS domain.
|
|
||||||
//
|
|
||||||
// It returns a cached certificate from disk if it's still valid.
|
|
||||||
//
|
|
||||||
// Deprecated: use [Client.CertPair].
|
|
||||||
func CertPair(ctx context.Context, domain string) (certPEM, keyPEM []byte, err error) {
|
|
||||||
return defaultClient.CertPair(ctx, domain)
|
|
||||||
}
|
|
||||||
|
|
||||||
// CertPair returns a cert and private key for the provided DNS domain.
|
|
||||||
//
|
|
||||||
// It returns a cached certificate from disk if it's still valid.
|
|
||||||
//
|
|
||||||
// API maturity: this is considered a stable API.
|
|
||||||
func (lc *Client) CertPair(ctx context.Context, domain string) (certPEM, keyPEM []byte, err error) {
|
|
||||||
return lc.CertPairWithValidity(ctx, domain, 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
// CertPairWithValidity returns a cert and private key for the provided DNS
|
|
||||||
// domain.
|
|
||||||
//
|
|
||||||
// It returns a cached certificate from disk if it's still valid.
|
|
||||||
// When minValidity is non-zero, the returned certificate will be valid for at
|
|
||||||
// least the given duration, if permitted by the CA. If the certificate is
|
|
||||||
// valid, but for less than minValidity, it will be synchronously renewed.
|
|
||||||
//
|
|
||||||
// API maturity: this is considered a stable API.
|
|
||||||
func (lc *Client) CertPairWithValidity(ctx context.Context, domain string, minValidity time.Duration) (certPEM, keyPEM []byte, err error) {
|
|
||||||
res, err := lc.send(ctx, "GET", fmt.Sprintf("/localapi/v0/cert/%s?type=pair&min_validity=%s", domain, minValidity), 200, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
// with ?type=pair, the response PEM is first the one private
|
|
||||||
// key PEM block, then the cert PEM blocks.
|
|
||||||
i := mem.Index(mem.B(res), mem.S("--\n--"))
|
|
||||||
if i == -1 {
|
|
||||||
return nil, nil, fmt.Errorf("unexpected output: no delimiter")
|
|
||||||
}
|
|
||||||
i += len("--\n")
|
|
||||||
keyPEM, certPEM = res[:i], res[i:]
|
|
||||||
if mem.Contains(mem.B(certPEM), mem.S(" PRIVATE KEY-----")) {
|
|
||||||
return nil, nil, fmt.Errorf("unexpected output: key in cert")
|
|
||||||
}
|
|
||||||
return certPEM, keyPEM, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetCertificate fetches a TLS certificate for the TLS ClientHello in hi.
|
|
||||||
//
|
|
||||||
// It returns a cached certificate from disk if it's still valid.
|
|
||||||
//
|
|
||||||
// It's the right signature to use as the value of
|
|
||||||
// [tls.Config.GetCertificate].
|
|
||||||
//
|
|
||||||
// Deprecated: use [Client.GetCertificate].
|
|
||||||
func GetCertificate(hi *tls.ClientHelloInfo) (*tls.Certificate, error) {
|
|
||||||
return defaultClient.GetCertificate(hi)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetCertificate fetches a TLS certificate for the TLS ClientHello in hi.
|
|
||||||
//
|
|
||||||
// It returns a cached certificate from disk if it's still valid.
|
|
||||||
//
|
|
||||||
// It's the right signature to use as the value of
|
|
||||||
// [tls.Config.GetCertificate].
|
|
||||||
//
|
|
||||||
// API maturity: this is considered a stable API.
|
|
||||||
func (lc *Client) GetCertificate(hi *tls.ClientHelloInfo) (*tls.Certificate, error) {
|
|
||||||
if hi == nil || hi.ServerName == "" {
|
|
||||||
return nil, errors.New("no SNI ServerName")
|
|
||||||
}
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
name := hi.ServerName
|
|
||||||
if !strings.Contains(name, ".") {
|
|
||||||
if v, ok := lc.ExpandSNIName(ctx, name); ok {
|
|
||||||
name = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
certPEM, keyPEM, err := lc.CertPair(ctx, name)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
cert, err := tls.X509KeyPair(certPEM, keyPEM)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &cert, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExpandSNIName expands bare label name into the most likely actual TLS cert name.
|
|
||||||
//
|
|
||||||
// Deprecated: use [Client.ExpandSNIName].
|
|
||||||
func ExpandSNIName(ctx context.Context, name string) (fqdn string, ok bool) {
|
|
||||||
return defaultClient.ExpandSNIName(ctx, name)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExpandSNIName expands bare label name into the most likely actual TLS cert name.
|
|
||||||
func (lc *Client) ExpandSNIName(ctx context.Context, name string) (fqdn string, ok bool) {
|
|
||||||
st, err := lc.StatusWithoutPeers(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return "", false
|
|
||||||
}
|
|
||||||
for _, d := range st.CertDomains {
|
|
||||||
if len(d) > len(name)+1 && strings.HasPrefix(d, name) && d[len(name)] == '.' {
|
|
||||||
return d, true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return "", false
|
|
||||||
}
|
|
||||||
@ -1,84 +0,0 @@
|
|||||||
// Copyright (c) Tailscale Inc & AUTHORS
|
|
||||||
// SPDX-License-Identifier: BSD-3-Clause
|
|
||||||
|
|
||||||
//go:build !ts_omit_debugportmapper
|
|
||||||
|
|
||||||
package local
|
|
||||||
|
|
||||||
import (
|
|
||||||
"cmp"
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"net/netip"
|
|
||||||
"net/url"
|
|
||||||
"strconv"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"tailscale.com/client/tailscale/apitype"
|
|
||||||
)
|
|
||||||
|
|
||||||
// DebugPortmapOpts contains options for the [Client.DebugPortmap] command.
|
|
||||||
type DebugPortmapOpts struct {
|
|
||||||
// Duration is how long the mapping should be created for. It defaults
|
|
||||||
// to 5 seconds if not set.
|
|
||||||
Duration time.Duration
|
|
||||||
|
|
||||||
// Type is the kind of portmap to debug. The empty string instructs the
|
|
||||||
// portmap client to perform all known types. Other valid options are
|
|
||||||
// "pmp", "pcp", and "upnp".
|
|
||||||
Type string
|
|
||||||
|
|
||||||
// GatewayAddr specifies the gateway address used during portmapping.
|
|
||||||
// If set, SelfAddr must also be set. If unset, it will be
|
|
||||||
// autodetected.
|
|
||||||
GatewayAddr netip.Addr
|
|
||||||
|
|
||||||
// SelfAddr specifies the gateway address used during portmapping. If
|
|
||||||
// set, GatewayAddr must also be set. If unset, it will be
|
|
||||||
// autodetected.
|
|
||||||
SelfAddr netip.Addr
|
|
||||||
|
|
||||||
// LogHTTP instructs the debug-portmap endpoint to print all HTTP
|
|
||||||
// requests and responses made to the logs.
|
|
||||||
LogHTTP bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// DebugPortmap invokes the debug-portmap endpoint, and returns an
|
|
||||||
// io.ReadCloser that can be used to read the logs that are printed during this
|
|
||||||
// process.
|
|
||||||
//
|
|
||||||
// opts can be nil; if so, default values will be used.
|
|
||||||
func (lc *Client) DebugPortmap(ctx context.Context, opts *DebugPortmapOpts) (io.ReadCloser, error) {
|
|
||||||
vals := make(url.Values)
|
|
||||||
if opts == nil {
|
|
||||||
opts = &DebugPortmapOpts{}
|
|
||||||
}
|
|
||||||
|
|
||||||
vals.Set("duration", cmp.Or(opts.Duration, 5*time.Second).String())
|
|
||||||
vals.Set("type", opts.Type)
|
|
||||||
vals.Set("log_http", strconv.FormatBool(opts.LogHTTP))
|
|
||||||
|
|
||||||
if opts.GatewayAddr.IsValid() != opts.SelfAddr.IsValid() {
|
|
||||||
return nil, fmt.Errorf("both GatewayAddr and SelfAddr must be provided if one is")
|
|
||||||
} else if opts.GatewayAddr.IsValid() {
|
|
||||||
vals.Set("gateway_and_self", fmt.Sprintf("%s/%s", opts.GatewayAddr, opts.SelfAddr))
|
|
||||||
}
|
|
||||||
|
|
||||||
req, err := http.NewRequestWithContext(ctx, "GET", "http://"+apitype.LocalAPIHost+"/localapi/v0/debug-portmap?"+vals.Encode(), nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
res, err := lc.doLocalRequestNiceError(req)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if res.StatusCode != 200 {
|
|
||||||
body, _ := io.ReadAll(res.Body)
|
|
||||||
res.Body.Close()
|
|
||||||
return nil, fmt.Errorf("HTTP %s: %s", res.Status, body)
|
|
||||||
}
|
|
||||||
|
|
||||||
return res.Body, nil
|
|
||||||
}
|
|
||||||
@ -1,55 +0,0 @@
|
|||||||
// Copyright (c) Tailscale Inc & AUTHORS
|
|
||||||
// SPDX-License-Identifier: BSD-3-Clause
|
|
||||||
|
|
||||||
//go:build !ts_omit_serve
|
|
||||||
|
|
||||||
package local
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
|
|
||||||
"tailscale.com/ipn"
|
|
||||||
)
|
|
||||||
|
|
||||||
// GetServeConfig return the current serve config.
|
|
||||||
//
|
|
||||||
// If the serve config is empty, it returns (nil, nil).
|
|
||||||
func (lc *Client) GetServeConfig(ctx context.Context) (*ipn.ServeConfig, error) {
|
|
||||||
body, h, err := lc.sendWithHeaders(ctx, "GET", "/localapi/v0/serve-config", 200, nil, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("getting serve config: %w", err)
|
|
||||||
}
|
|
||||||
sc, err := getServeConfigFromJSON(body)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if sc == nil {
|
|
||||||
sc = new(ipn.ServeConfig)
|
|
||||||
}
|
|
||||||
sc.ETag = h.Get("Etag")
|
|
||||||
return sc, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func getServeConfigFromJSON(body []byte) (sc *ipn.ServeConfig, err error) {
|
|
||||||
if err := json.Unmarshal(body, &sc); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return sc, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetServeConfig sets or replaces the serving settings.
|
|
||||||
// If config is nil, settings are cleared and serving is disabled.
|
|
||||||
func (lc *Client) SetServeConfig(ctx context.Context, config *ipn.ServeConfig) error {
|
|
||||||
h := make(http.Header)
|
|
||||||
if config != nil {
|
|
||||||
h.Set("If-Match", config.ETag)
|
|
||||||
}
|
|
||||||
_, _, err := lc.sendWithHeaders(ctx, "POST", "/localapi/v0/serve-config", 200, jsonBody(config), h)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("sending serve config: %w", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@ -1,40 +0,0 @@
|
|||||||
// Copyright (c) Tailscale Inc & AUTHORS
|
|
||||||
// SPDX-License-Identifier: BSD-3-Clause
|
|
||||||
|
|
||||||
//go:build !ts_omit_syspolicy
|
|
||||||
|
|
||||||
package local
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"net/http"
|
|
||||||
|
|
||||||
"tailscale.com/util/syspolicy/setting"
|
|
||||||
)
|
|
||||||
|
|
||||||
// GetEffectivePolicy returns the effective policy for the specified scope.
|
|
||||||
func (lc *Client) GetEffectivePolicy(ctx context.Context, scope setting.PolicyScope) (*setting.Snapshot, error) {
|
|
||||||
scopeID, err := scope.MarshalText()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
body, err := lc.get200(ctx, "/localapi/v0/policy/"+string(scopeID))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return decodeJSON[*setting.Snapshot](body)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReloadEffectivePolicy reloads the effective policy for the specified scope
|
|
||||||
// by reading and merging policy settings from all applicable policy sources.
|
|
||||||
func (lc *Client) ReloadEffectivePolicy(ctx context.Context, scope setting.PolicyScope) (*setting.Snapshot, error) {
|
|
||||||
scopeID, err := scope.MarshalText()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
body, err := lc.send(ctx, "POST", "/localapi/v0/policy/"+string(scopeID), 200, http.NoBody)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return decodeJSON[*setting.Snapshot](body)
|
|
||||||
}
|
|
||||||
@ -1,204 +0,0 @@
|
|||||||
// Copyright (c) Tailscale Inc & AUTHORS
|
|
||||||
// SPDX-License-Identifier: BSD-3-Clause
|
|
||||||
|
|
||||||
//go:build !ts_omit_tailnetlock
|
|
||||||
|
|
||||||
package local
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"net/url"
|
|
||||||
|
|
||||||
"tailscale.com/ipn/ipnstate"
|
|
||||||
"tailscale.com/tka"
|
|
||||||
"tailscale.com/types/key"
|
|
||||||
"tailscale.com/types/tkatype"
|
|
||||||
)
|
|
||||||
|
|
||||||
// NetworkLockStatus fetches information about the tailnet key authority, if one is configured.
|
|
||||||
func (lc *Client) NetworkLockStatus(ctx context.Context) (*ipnstate.NetworkLockStatus, error) {
|
|
||||||
body, err := lc.send(ctx, "GET", "/localapi/v0/tka/status", 200, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("error: %w", err)
|
|
||||||
}
|
|
||||||
return decodeJSON[*ipnstate.NetworkLockStatus](body)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NetworkLockInit initializes the tailnet key authority.
|
|
||||||
//
|
|
||||||
// TODO(tom): Plumb through disablement secrets.
|
|
||||||
func (lc *Client) NetworkLockInit(ctx context.Context, keys []tka.Key, disablementValues [][]byte, supportDisablement []byte) (*ipnstate.NetworkLockStatus, error) {
|
|
||||||
var b bytes.Buffer
|
|
||||||
type initRequest struct {
|
|
||||||
Keys []tka.Key
|
|
||||||
DisablementValues [][]byte
|
|
||||||
SupportDisablement []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := json.NewEncoder(&b).Encode(initRequest{Keys: keys, DisablementValues: disablementValues, SupportDisablement: supportDisablement}); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
body, err := lc.send(ctx, "POST", "/localapi/v0/tka/init", 200, &b)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("error: %w", err)
|
|
||||||
}
|
|
||||||
return decodeJSON[*ipnstate.NetworkLockStatus](body)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NetworkLockWrapPreauthKey wraps a pre-auth key with information to
|
|
||||||
// enable unattended bringup in the locked tailnet.
|
|
||||||
func (lc *Client) NetworkLockWrapPreauthKey(ctx context.Context, preauthKey string, tkaKey key.NLPrivate) (string, error) {
|
|
||||||
encodedPrivate, err := tkaKey.MarshalText()
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
var b bytes.Buffer
|
|
||||||
type wrapRequest struct {
|
|
||||||
TSKey string
|
|
||||||
TKAKey string // key.NLPrivate.MarshalText
|
|
||||||
}
|
|
||||||
if err := json.NewEncoder(&b).Encode(wrapRequest{TSKey: preauthKey, TKAKey: string(encodedPrivate)}); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
body, err := lc.send(ctx, "POST", "/localapi/v0/tka/wrap-preauth-key", 200, &b)
|
|
||||||
if err != nil {
|
|
||||||
return "", fmt.Errorf("error: %w", err)
|
|
||||||
}
|
|
||||||
return string(body), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NetworkLockModify adds and/or removes key(s) to the tailnet key authority.
|
|
||||||
func (lc *Client) NetworkLockModify(ctx context.Context, addKeys, removeKeys []tka.Key) error {
|
|
||||||
var b bytes.Buffer
|
|
||||||
type modifyRequest struct {
|
|
||||||
AddKeys []tka.Key
|
|
||||||
RemoveKeys []tka.Key
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := json.NewEncoder(&b).Encode(modifyRequest{AddKeys: addKeys, RemoveKeys: removeKeys}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := lc.send(ctx, "POST", "/localapi/v0/tka/modify", 204, &b); err != nil {
|
|
||||||
return fmt.Errorf("error: %w", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NetworkLockSign signs the specified node-key and transmits that signature to the control plane.
|
|
||||||
// rotationPublic, if specified, must be an ed25519 public key.
|
|
||||||
func (lc *Client) NetworkLockSign(ctx context.Context, nodeKey key.NodePublic, rotationPublic []byte) error {
|
|
||||||
var b bytes.Buffer
|
|
||||||
type signRequest struct {
|
|
||||||
NodeKey key.NodePublic
|
|
||||||
RotationPublic []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := json.NewEncoder(&b).Encode(signRequest{NodeKey: nodeKey, RotationPublic: rotationPublic}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := lc.send(ctx, "POST", "/localapi/v0/tka/sign", 200, &b); err != nil {
|
|
||||||
return fmt.Errorf("error: %w", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NetworkLockAffectedSigs returns all signatures signed by the specified keyID.
|
|
||||||
func (lc *Client) NetworkLockAffectedSigs(ctx context.Context, keyID tkatype.KeyID) ([]tkatype.MarshaledSignature, error) {
|
|
||||||
body, err := lc.send(ctx, "POST", "/localapi/v0/tka/affected-sigs", 200, bytes.NewReader(keyID))
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("error: %w", err)
|
|
||||||
}
|
|
||||||
return decodeJSON[[]tkatype.MarshaledSignature](body)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NetworkLockLog returns up to maxEntries number of changes to network-lock state.
|
|
||||||
func (lc *Client) NetworkLockLog(ctx context.Context, maxEntries int) ([]ipnstate.NetworkLockUpdate, error) {
|
|
||||||
v := url.Values{}
|
|
||||||
v.Set("limit", fmt.Sprint(maxEntries))
|
|
||||||
body, err := lc.send(ctx, "GET", "/localapi/v0/tka/log?"+v.Encode(), 200, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("error %w: %s", err, body)
|
|
||||||
}
|
|
||||||
return decodeJSON[[]ipnstate.NetworkLockUpdate](body)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NetworkLockForceLocalDisable forcibly shuts down network lock on this node.
|
|
||||||
func (lc *Client) NetworkLockForceLocalDisable(ctx context.Context) error {
|
|
||||||
// This endpoint expects an empty JSON stanza as the payload.
|
|
||||||
var b bytes.Buffer
|
|
||||||
if err := json.NewEncoder(&b).Encode(struct{}{}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := lc.send(ctx, "POST", "/localapi/v0/tka/force-local-disable", 200, &b); err != nil {
|
|
||||||
return fmt.Errorf("error: %w", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NetworkLockVerifySigningDeeplink verifies the network lock deeplink contained
|
|
||||||
// in url and returns information extracted from it.
|
|
||||||
func (lc *Client) NetworkLockVerifySigningDeeplink(ctx context.Context, url string) (*tka.DeeplinkValidationResult, error) {
|
|
||||||
vr := struct {
|
|
||||||
URL string
|
|
||||||
}{url}
|
|
||||||
|
|
||||||
body, err := lc.send(ctx, "POST", "/localapi/v0/tka/verify-deeplink", 200, jsonBody(vr))
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("sending verify-deeplink: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return decodeJSON[*tka.DeeplinkValidationResult](body)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NetworkLockGenRecoveryAUM generates an AUM for recovering from a tailnet-lock key compromise.
|
|
||||||
func (lc *Client) NetworkLockGenRecoveryAUM(ctx context.Context, removeKeys []tkatype.KeyID, forkFrom tka.AUMHash) ([]byte, error) {
|
|
||||||
vr := struct {
|
|
||||||
Keys []tkatype.KeyID
|
|
||||||
ForkFrom string
|
|
||||||
}{removeKeys, forkFrom.String()}
|
|
||||||
|
|
||||||
body, err := lc.send(ctx, "POST", "/localapi/v0/tka/generate-recovery-aum", 200, jsonBody(vr))
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("sending generate-recovery-aum: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return body, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NetworkLockCosignRecoveryAUM co-signs a recovery AUM using the node's tailnet lock key.
|
|
||||||
func (lc *Client) NetworkLockCosignRecoveryAUM(ctx context.Context, aum tka.AUM) ([]byte, error) {
|
|
||||||
r := bytes.NewReader(aum.Serialize())
|
|
||||||
body, err := lc.send(ctx, "POST", "/localapi/v0/tka/cosign-recovery-aum", 200, r)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("sending cosign-recovery-aum: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return body, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NetworkLockSubmitRecoveryAUM submits a recovery AUM to the control plane.
|
|
||||||
func (lc *Client) NetworkLockSubmitRecoveryAUM(ctx context.Context, aum tka.AUM) error {
|
|
||||||
r := bytes.NewReader(aum.Serialize())
|
|
||||||
_, err := lc.send(ctx, "POST", "/localapi/v0/tka/submit-recovery-aum", 200, r)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("sending cosign-recovery-aum: %w", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NetworkLockDisable shuts down network-lock across the tailnet.
|
|
||||||
func (lc *Client) NetworkLockDisable(ctx context.Context, secret []byte) error {
|
|
||||||
if _, err := lc.send(ctx, "POST", "/localapi/v0/tka/disable", 200, bytes.NewReader(secret)); err != nil {
|
|
||||||
return fmt.Errorf("error: %w", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@ -1,327 +0,0 @@
|
|||||||
// Copyright (c) Tailscale Inc & AUTHORS
|
|
||||||
// SPDX-License-Identifier: BSD-3-Clause
|
|
||||||
|
|
||||||
//go:build cgo || !darwin
|
|
||||||
|
|
||||||
package systray
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"image"
|
|
||||||
"image/color"
|
|
||||||
"image/png"
|
|
||||||
"runtime"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"fyne.io/systray"
|
|
||||||
ico "github.com/Kodeworks/golang-image-ico"
|
|
||||||
"github.com/fogleman/gg"
|
|
||||||
)
|
|
||||||
|
|
||||||
// tsLogo represents the Tailscale logo displayed as the systray icon.
|
|
||||||
type tsLogo struct {
|
|
||||||
// dots represents the state of the 3x3 dot grid in the logo.
|
|
||||||
// A 0 represents a gray dot, any other value is a white dot.
|
|
||||||
dots [9]byte
|
|
||||||
|
|
||||||
// dotMask returns an image mask to be used when rendering the logo dots.
|
|
||||||
dotMask func(dc *gg.Context, borderUnits int, radius int) *image.Alpha
|
|
||||||
|
|
||||||
// overlay is called after the dots are rendered to draw an additional overlay.
|
|
||||||
overlay func(dc *gg.Context, borderUnits int, radius int)
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
// disconnected is all gray dots
|
|
||||||
disconnected = tsLogo{dots: [9]byte{
|
|
||||||
0, 0, 0,
|
|
||||||
0, 0, 0,
|
|
||||||
0, 0, 0,
|
|
||||||
}}
|
|
||||||
|
|
||||||
// connected is the normal Tailscale logo
|
|
||||||
connected = tsLogo{dots: [9]byte{
|
|
||||||
0, 0, 0,
|
|
||||||
1, 1, 1,
|
|
||||||
0, 1, 0,
|
|
||||||
}}
|
|
||||||
|
|
||||||
// loading is a special tsLogo value that is not meant to be rendered directly,
|
|
||||||
// but indicates that the loading animation should be shown.
|
|
||||||
loading = tsLogo{dots: [9]byte{'l', 'o', 'a', 'd', 'i', 'n', 'g'}}
|
|
||||||
|
|
||||||
// loadingIcons are shown in sequence as an animated loading icon.
|
|
||||||
loadingLogos = []tsLogo{
|
|
||||||
{dots: [9]byte{
|
|
||||||
0, 1, 1,
|
|
||||||
1, 0, 1,
|
|
||||||
0, 0, 1,
|
|
||||||
}},
|
|
||||||
{dots: [9]byte{
|
|
||||||
0, 1, 1,
|
|
||||||
0, 0, 1,
|
|
||||||
0, 1, 0,
|
|
||||||
}},
|
|
||||||
{dots: [9]byte{
|
|
||||||
0, 1, 1,
|
|
||||||
0, 0, 0,
|
|
||||||
0, 0, 1,
|
|
||||||
}},
|
|
||||||
{dots: [9]byte{
|
|
||||||
0, 0, 1,
|
|
||||||
0, 1, 0,
|
|
||||||
0, 0, 0,
|
|
||||||
}},
|
|
||||||
{dots: [9]byte{
|
|
||||||
0, 1, 0,
|
|
||||||
0, 0, 0,
|
|
||||||
0, 0, 0,
|
|
||||||
}},
|
|
||||||
{dots: [9]byte{
|
|
||||||
0, 0, 0,
|
|
||||||
0, 0, 1,
|
|
||||||
0, 0, 0,
|
|
||||||
}},
|
|
||||||
{dots: [9]byte{
|
|
||||||
0, 0, 0,
|
|
||||||
0, 0, 0,
|
|
||||||
0, 0, 0,
|
|
||||||
}},
|
|
||||||
{dots: [9]byte{
|
|
||||||
0, 0, 1,
|
|
||||||
0, 0, 0,
|
|
||||||
0, 0, 0,
|
|
||||||
}},
|
|
||||||
{dots: [9]byte{
|
|
||||||
0, 0, 0,
|
|
||||||
0, 0, 0,
|
|
||||||
1, 0, 0,
|
|
||||||
}},
|
|
||||||
{dots: [9]byte{
|
|
||||||
0, 0, 0,
|
|
||||||
0, 0, 0,
|
|
||||||
1, 1, 0,
|
|
||||||
}},
|
|
||||||
{dots: [9]byte{
|
|
||||||
0, 0, 0,
|
|
||||||
1, 0, 0,
|
|
||||||
1, 1, 0,
|
|
||||||
}},
|
|
||||||
{dots: [9]byte{
|
|
||||||
0, 0, 0,
|
|
||||||
1, 1, 0,
|
|
||||||
0, 1, 0,
|
|
||||||
}},
|
|
||||||
{dots: [9]byte{
|
|
||||||
0, 0, 0,
|
|
||||||
1, 1, 0,
|
|
||||||
0, 1, 1,
|
|
||||||
}},
|
|
||||||
{dots: [9]byte{
|
|
||||||
0, 0, 0,
|
|
||||||
1, 1, 1,
|
|
||||||
0, 0, 1,
|
|
||||||
}},
|
|
||||||
{dots: [9]byte{
|
|
||||||
0, 1, 0,
|
|
||||||
0, 1, 1,
|
|
||||||
1, 0, 1,
|
|
||||||
}},
|
|
||||||
}
|
|
||||||
|
|
||||||
// exitNodeOnline is the Tailscale logo with an additional arrow overlay in the corner.
|
|
||||||
exitNodeOnline = tsLogo{
|
|
||||||
dots: [9]byte{
|
|
||||||
0, 0, 0,
|
|
||||||
1, 1, 1,
|
|
||||||
0, 1, 0,
|
|
||||||
},
|
|
||||||
// draw an arrow mask in the bottom right corner with a reasonably thick line width.
|
|
||||||
dotMask: func(dc *gg.Context, borderUnits int, radius int) *image.Alpha {
|
|
||||||
bu, r := float64(borderUnits), float64(radius)
|
|
||||||
|
|
||||||
x1 := r * (bu + 3.5)
|
|
||||||
y := r * (bu + 7)
|
|
||||||
x2 := x1 + (r * 5)
|
|
||||||
|
|
||||||
mc := gg.NewContext(dc.Width(), dc.Height())
|
|
||||||
mc.DrawLine(x1, y, x2, y) // arrow center line
|
|
||||||
mc.DrawLine(x2-(1.5*r), y-(1.5*r), x2, y) // top of arrow tip
|
|
||||||
mc.DrawLine(x2-(1.5*r), y+(1.5*r), x2, y) // bottom of arrow tip
|
|
||||||
mc.SetLineWidth(r * 3)
|
|
||||||
mc.Stroke()
|
|
||||||
return mc.AsMask()
|
|
||||||
},
|
|
||||||
// draw an arrow in the bottom right corner over the masked area.
|
|
||||||
overlay: func(dc *gg.Context, borderUnits int, radius int) {
|
|
||||||
bu, r := float64(borderUnits), float64(radius)
|
|
||||||
|
|
||||||
x1 := r * (bu + 3.5)
|
|
||||||
y := r * (bu + 7)
|
|
||||||
x2 := x1 + (r * 5)
|
|
||||||
|
|
||||||
dc.DrawLine(x1, y, x2, y) // arrow center line
|
|
||||||
dc.DrawLine(x2-(1.5*r), y-(1.5*r), x2, y) // top of arrow tip
|
|
||||||
dc.DrawLine(x2-(1.5*r), y+(1.5*r), x2, y) // bottom of arrow tip
|
|
||||||
dc.SetColor(fg)
|
|
||||||
dc.SetLineWidth(r)
|
|
||||||
dc.Stroke()
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// exitNodeOffline is the Tailscale logo with a red "x" in the corner.
|
|
||||||
exitNodeOffline = tsLogo{
|
|
||||||
dots: [9]byte{
|
|
||||||
0, 0, 0,
|
|
||||||
1, 1, 1,
|
|
||||||
0, 1, 0,
|
|
||||||
},
|
|
||||||
// Draw a square that hides the four dots in the bottom right corner,
|
|
||||||
dotMask: func(dc *gg.Context, borderUnits int, radius int) *image.Alpha {
|
|
||||||
bu, r := float64(borderUnits), float64(radius)
|
|
||||||
x := r * (bu + 3)
|
|
||||||
|
|
||||||
mc := gg.NewContext(dc.Width(), dc.Height())
|
|
||||||
mc.DrawRectangle(x, x, r*6, r*6)
|
|
||||||
mc.Fill()
|
|
||||||
return mc.AsMask()
|
|
||||||
},
|
|
||||||
// draw a red "x" over the bottom right corner.
|
|
||||||
overlay: func(dc *gg.Context, borderUnits int, radius int) {
|
|
||||||
bu, r := float64(borderUnits), float64(radius)
|
|
||||||
|
|
||||||
x1 := r * (bu + 4)
|
|
||||||
x2 := x1 + (r * 3.5)
|
|
||||||
dc.DrawLine(x1, x1, x2, x2) // top-left to bottom-right stroke
|
|
||||||
dc.DrawLine(x1, x2, x2, x1) // bottom-left to top-right stroke
|
|
||||||
dc.SetColor(red)
|
|
||||||
dc.SetLineWidth(r)
|
|
||||||
dc.Stroke()
|
|
||||||
},
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
bg = color.NRGBA{0, 0, 0, 255}
|
|
||||||
fg = color.NRGBA{255, 255, 255, 255}
|
|
||||||
gray = color.NRGBA{255, 255, 255, 102}
|
|
||||||
red = color.NRGBA{229, 111, 74, 255}
|
|
||||||
)
|
|
||||||
|
|
||||||
// render returns a PNG image of the logo.
|
|
||||||
func (logo tsLogo) render() *bytes.Buffer {
|
|
||||||
const borderUnits = 1
|
|
||||||
return logo.renderWithBorder(borderUnits)
|
|
||||||
}
|
|
||||||
|
|
||||||
// renderWithBorder returns a PNG image of the logo with the specified border width.
|
|
||||||
// One border unit is equal to the radius of a tailscale logo dot.
|
|
||||||
func (logo tsLogo) renderWithBorder(borderUnits int) *bytes.Buffer {
|
|
||||||
const radius = 25
|
|
||||||
dim := radius * (8 + borderUnits*2)
|
|
||||||
|
|
||||||
dc := gg.NewContext(dim, dim)
|
|
||||||
dc.DrawRectangle(0, 0, float64(dim), float64(dim))
|
|
||||||
dc.SetColor(bg)
|
|
||||||
dc.Fill()
|
|
||||||
|
|
||||||
if logo.dotMask != nil {
|
|
||||||
mask := logo.dotMask(dc, borderUnits, radius)
|
|
||||||
dc.SetMask(mask)
|
|
||||||
dc.InvertMask()
|
|
||||||
}
|
|
||||||
|
|
||||||
for y := 0; y < 3; y++ {
|
|
||||||
for x := 0; x < 3; x++ {
|
|
||||||
px := (borderUnits + 1 + 3*x) * radius
|
|
||||||
py := (borderUnits + 1 + 3*y) * radius
|
|
||||||
col := fg
|
|
||||||
if logo.dots[y*3+x] == 0 {
|
|
||||||
col = gray
|
|
||||||
}
|
|
||||||
dc.DrawCircle(float64(px), float64(py), radius)
|
|
||||||
dc.SetColor(col)
|
|
||||||
dc.Fill()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if logo.overlay != nil {
|
|
||||||
dc.ResetClip()
|
|
||||||
logo.overlay(dc, borderUnits, radius)
|
|
||||||
}
|
|
||||||
|
|
||||||
b := bytes.NewBuffer(nil)
|
|
||||||
|
|
||||||
// Encode as ICO format on Windows, PNG on all other platforms.
|
|
||||||
if runtime.GOOS == "windows" {
|
|
||||||
_ = ico.Encode(b, dc.Image())
|
|
||||||
} else {
|
|
||||||
_ = png.Encode(b, dc.Image())
|
|
||||||
}
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
// setAppIcon renders logo and sets it as the systray icon.
|
|
||||||
func setAppIcon(icon tsLogo) {
|
|
||||||
if icon.dots == loading.dots {
|
|
||||||
startLoadingAnimation()
|
|
||||||
} else {
|
|
||||||
stopLoadingAnimation()
|
|
||||||
systray.SetIcon(icon.render().Bytes())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
loadingMu sync.Mutex // protects loadingCancel
|
|
||||||
|
|
||||||
// loadingCancel stops the loading animation in the systray icon.
|
|
||||||
// This is nil if the animation is not currently active.
|
|
||||||
loadingCancel func()
|
|
||||||
)
|
|
||||||
|
|
||||||
// startLoadingAnimation starts the animated loading icon in the system tray.
|
|
||||||
// The animation continues until [stopLoadingAnimation] is called.
|
|
||||||
// If the loading animation is already active, this func does nothing.
|
|
||||||
func startLoadingAnimation() {
|
|
||||||
loadingMu.Lock()
|
|
||||||
defer loadingMu.Unlock()
|
|
||||||
|
|
||||||
if loadingCancel != nil {
|
|
||||||
// loading icon already displayed
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
ctx, loadingCancel = context.WithCancel(ctx)
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
t := time.NewTicker(500 * time.Millisecond)
|
|
||||||
var i int
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return
|
|
||||||
case <-t.C:
|
|
||||||
systray.SetIcon(loadingLogos[i].render().Bytes())
|
|
||||||
i++
|
|
||||||
if i >= len(loadingLogos) {
|
|
||||||
i = 0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
// stopLoadingAnimation stops the animated loading icon in the system tray.
|
|
||||||
// If the loading animation is not currently active, this func does nothing.
|
|
||||||
func stopLoadingAnimation() {
|
|
||||||
loadingMu.Lock()
|
|
||||||
defer loadingMu.Unlock()
|
|
||||||
|
|
||||||
if loadingCancel != nil {
|
|
||||||
loadingCancel()
|
|
||||||
loadingCancel = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@ -1,76 +0,0 @@
|
|||||||
// Copyright (c) Tailscale Inc & AUTHORS
|
|
||||||
// SPDX-License-Identifier: BSD-3-Clause
|
|
||||||
|
|
||||||
//go:build cgo || !darwin
|
|
||||||
|
|
||||||
// Package systray provides a minimal Tailscale systray application.
|
|
||||||
package systray
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"bytes"
|
|
||||||
_ "embed"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
//go:embed tailscale-systray.service
|
|
||||||
var embedSystemd string
|
|
||||||
|
|
||||||
func InstallStartupScript(initSystem string) error {
|
|
||||||
switch initSystem {
|
|
||||||
case "systemd":
|
|
||||||
return installSystemd()
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("unsupported init system '%s'", initSystem)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func installSystemd() error {
|
|
||||||
// Find the path to tailscale, just in case it's not where the example file
|
|
||||||
// has it placed, and replace that before writing the file.
|
|
||||||
tailscaleBin, err := exec.LookPath("tailscale")
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to find tailscale binary %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var output bytes.Buffer
|
|
||||||
scanner := bufio.NewScanner(strings.NewReader(embedSystemd))
|
|
||||||
for scanner.Scan() {
|
|
||||||
line := scanner.Text()
|
|
||||||
if strings.HasPrefix(line, "ExecStart=") {
|
|
||||||
line = fmt.Sprintf("ExecStart=%s systray", tailscaleBin)
|
|
||||||
}
|
|
||||||
output.WriteString(line + "\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
configDir, err := os.UserConfigDir()
|
|
||||||
if err != nil {
|
|
||||||
homeDir, err := os.UserHomeDir()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("unable to locate user home: %w", err)
|
|
||||||
}
|
|
||||||
configDir = filepath.Join(homeDir, ".config")
|
|
||||||
}
|
|
||||||
|
|
||||||
systemdDir := filepath.Join(configDir, "systemd", "user")
|
|
||||||
if err := os.MkdirAll(systemdDir, 0o755); err != nil {
|
|
||||||
return fmt.Errorf("failed creating systemd uuser dir: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
serviceFile := filepath.Join(systemdDir, "tailscale-systray.service")
|
|
||||||
|
|
||||||
if err := os.WriteFile(serviceFile, output.Bytes(), 0o755); err != nil {
|
|
||||||
return fmt.Errorf("failed writing systemd user service: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Printf("Successfully installed systemd service to: %s\n", serviceFile)
|
|
||||||
fmt.Println("To enable and start the service, run:")
|
|
||||||
fmt.Println(" systemctl --user daemon-reload")
|
|
||||||
fmt.Println(" systemctl --user enable --now tailscale-systray")
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@ -1,801 +0,0 @@
|
|||||||
// Copyright (c) Tailscale Inc & AUTHORS
|
|
||||||
// SPDX-License-Identifier: BSD-3-Clause
|
|
||||||
|
|
||||||
//go:build cgo || !darwin
|
|
||||||
|
|
||||||
// Package systray provides a minimal Tailscale systray application.
|
|
||||||
package systray
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"image"
|
|
||||||
"io"
|
|
||||||
"log"
|
|
||||||
"net/http"
|
|
||||||
"os"
|
|
||||||
"os/signal"
|
|
||||||
"runtime"
|
|
||||||
"slices"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"syscall"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"fyne.io/systray"
|
|
||||||
ico "github.com/Kodeworks/golang-image-ico"
|
|
||||||
"github.com/atotto/clipboard"
|
|
||||||
dbus "github.com/godbus/dbus/v5"
|
|
||||||
"github.com/toqueteos/webbrowser"
|
|
||||||
"tailscale.com/client/local"
|
|
||||||
"tailscale.com/ipn"
|
|
||||||
"tailscale.com/ipn/ipnstate"
|
|
||||||
"tailscale.com/tailcfg"
|
|
||||||
"tailscale.com/util/slicesx"
|
|
||||||
"tailscale.com/util/stringsx"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// newMenuDelay is the amount of time to sleep after creating a new menu,
|
|
||||||
// but before adding items to it. This works around a bug in some dbus implementations.
|
|
||||||
newMenuDelay time.Duration
|
|
||||||
|
|
||||||
// if true, treat all mullvad exit node countries as single-city.
|
|
||||||
// Instead of rendering a submenu with cities, just select the highest-priority peer.
|
|
||||||
hideMullvadCities bool
|
|
||||||
)
|
|
||||||
|
|
||||||
// Run starts the systray menu and blocks until the menu exits.
|
|
||||||
// If client is nil, a default local.Client is used.
|
|
||||||
func (menu *Menu) Run(client *local.Client) {
|
|
||||||
if client == nil {
|
|
||||||
client = &local.Client{}
|
|
||||||
}
|
|
||||||
menu.lc = client
|
|
||||||
menu.updateState()
|
|
||||||
|
|
||||||
// exit cleanly on SIGINT and SIGTERM
|
|
||||||
go func() {
|
|
||||||
interrupt := make(chan os.Signal, 1)
|
|
||||||
signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM)
|
|
||||||
select {
|
|
||||||
case <-interrupt:
|
|
||||||
menu.onExit()
|
|
||||||
case <-menu.bgCtx.Done():
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
go menu.lc.SetGauge(menu.bgCtx, "systray_running", 1)
|
|
||||||
defer menu.lc.SetGauge(menu.bgCtx, "systray_running", 0)
|
|
||||||
|
|
||||||
systray.Run(menu.onReady, menu.onExit)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Menu represents the systray menu, its items, and the current Tailscale state.
|
|
||||||
type Menu struct {
|
|
||||||
mu sync.Mutex // protects the entire Menu
|
|
||||||
|
|
||||||
lc *local.Client
|
|
||||||
status *ipnstate.Status
|
|
||||||
curProfile ipn.LoginProfile
|
|
||||||
allProfiles []ipn.LoginProfile
|
|
||||||
|
|
||||||
// readonly is whether the systray app is running in read-only mode.
|
|
||||||
// This is set if LocalAPI returns a permission error,
|
|
||||||
// typically because the user needs to run `tailscale set --operator=$USER`.
|
|
||||||
readonly bool
|
|
||||||
|
|
||||||
bgCtx context.Context // ctx for background tasks not involving menu item clicks
|
|
||||||
bgCancel context.CancelFunc
|
|
||||||
|
|
||||||
// Top-level menu items
|
|
||||||
connect *systray.MenuItem
|
|
||||||
disconnect *systray.MenuItem
|
|
||||||
self *systray.MenuItem
|
|
||||||
exitNodes *systray.MenuItem
|
|
||||||
more *systray.MenuItem
|
|
||||||
rebuildMenu *systray.MenuItem
|
|
||||||
quit *systray.MenuItem
|
|
||||||
|
|
||||||
rebuildCh chan struct{} // triggers a menu rebuild
|
|
||||||
accountsCh chan ipn.ProfileID
|
|
||||||
exitNodeCh chan tailcfg.StableNodeID // ID of selected exit node
|
|
||||||
|
|
||||||
eventCancel context.CancelFunc // cancel eventLoop
|
|
||||||
|
|
||||||
notificationIcon *os.File // icon used for desktop notifications
|
|
||||||
}
|
|
||||||
|
|
||||||
func (menu *Menu) init() {
|
|
||||||
if menu.bgCtx != nil {
|
|
||||||
// already initialized
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
menu.rebuildCh = make(chan struct{}, 1)
|
|
||||||
menu.accountsCh = make(chan ipn.ProfileID)
|
|
||||||
menu.exitNodeCh = make(chan tailcfg.StableNodeID)
|
|
||||||
|
|
||||||
// dbus wants a file path for notification icons, so copy to a temp file.
|
|
||||||
menu.notificationIcon, _ = os.CreateTemp("", "tailscale-systray.png")
|
|
||||||
io.Copy(menu.notificationIcon, connected.renderWithBorder(3))
|
|
||||||
|
|
||||||
menu.bgCtx, menu.bgCancel = context.WithCancel(context.Background())
|
|
||||||
go menu.watchIPNBus()
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
if runtime.GOOS != "linux" {
|
|
||||||
// so far, these tweaks are only needed on Linux
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
desktop := strings.ToLower(os.Getenv("XDG_CURRENT_DESKTOP"))
|
|
||||||
switch desktop {
|
|
||||||
case "gnome", "ubuntu:gnome":
|
|
||||||
// GNOME expands submenus downward in the main menu, rather than flyouts to the side.
|
|
||||||
// Either as a result of that or another limitation, there seems to be a maximum depth of submenus.
|
|
||||||
// Mullvad countries that have a city submenu are not being rendered, and so can't be selected.
|
|
||||||
// Handle this by simply treating all mullvad countries as single-city and select the best peer.
|
|
||||||
hideMullvadCities = true
|
|
||||||
case "kde":
|
|
||||||
// KDE doesn't need a delay, and actually won't render submenus
|
|
||||||
// if we delay for more than about 400µs.
|
|
||||||
newMenuDelay = 0
|
|
||||||
default:
|
|
||||||
// Add a slight delay to ensure the menu is created before adding items.
|
|
||||||
//
|
|
||||||
// Systray implementations that use libdbusmenu sometimes process messages out of order,
|
|
||||||
// resulting in errors such as:
|
|
||||||
// (waybar:153009): LIBDBUSMENU-GTK-WARNING **: 18:07:11.551: Children but no menu, someone's been naughty with their 'children-display' property: 'submenu'
|
|
||||||
//
|
|
||||||
// See also: https://github.com/fyne-io/systray/issues/12
|
|
||||||
newMenuDelay = 10 * time.Millisecond
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// onReady is called by the systray package when the menu is ready to be built.
|
|
||||||
func (menu *Menu) onReady() {
|
|
||||||
log.Printf("starting")
|
|
||||||
if os.Getuid() == 0 || os.Getuid() != os.Geteuid() || os.Getenv("SUDO_USER") != "" || os.Getenv("DOAS_USER") != "" {
|
|
||||||
fmt.Fprintln(os.Stderr, `
|
|
||||||
It appears that you might be running the systray with sudo/doas.
|
|
||||||
This can lead to issues with D-Bus, and should be avoided.
|
|
||||||
|
|
||||||
The systray application should be run with the same user as your desktop session.
|
|
||||||
This usually means that you should run the application like:
|
|
||||||
|
|
||||||
tailscale systray
|
|
||||||
|
|
||||||
See https://tailscale.com/kb/1597/linux-systray for more information.`)
|
|
||||||
}
|
|
||||||
setAppIcon(disconnected)
|
|
||||||
menu.rebuild()
|
|
||||||
|
|
||||||
menu.mu.Lock()
|
|
||||||
if menu.readonly {
|
|
||||||
fmt.Fprintln(os.Stderr, `
|
|
||||||
No permission to manage Tailscale. Set operator by running:
|
|
||||||
|
|
||||||
sudo tailscale set --operator=$USER
|
|
||||||
|
|
||||||
See https://tailscale.com/s/cli-operator for more information.`)
|
|
||||||
}
|
|
||||||
menu.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// updateState updates the Menu state from the Tailscale local client.
|
|
||||||
func (menu *Menu) updateState() {
|
|
||||||
menu.mu.Lock()
|
|
||||||
defer menu.mu.Unlock()
|
|
||||||
menu.init()
|
|
||||||
|
|
||||||
menu.readonly = false
|
|
||||||
|
|
||||||
var err error
|
|
||||||
menu.status, err = menu.lc.Status(menu.bgCtx)
|
|
||||||
if err != nil {
|
|
||||||
log.Print(err)
|
|
||||||
}
|
|
||||||
menu.curProfile, menu.allProfiles, err = menu.lc.ProfileStatus(menu.bgCtx)
|
|
||||||
if err != nil {
|
|
||||||
if local.IsAccessDeniedError(err) {
|
|
||||||
menu.readonly = true
|
|
||||||
}
|
|
||||||
log.Print(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// rebuild the systray menu based on the current Tailscale state.
|
|
||||||
//
|
|
||||||
// We currently rebuild the entire menu because it is not easy to update the existing menu.
|
|
||||||
// You cannot iterate over the items in a menu, nor can you remove some items like separators.
|
|
||||||
// So for now we rebuild the whole thing, and can optimize this later if needed.
|
|
||||||
func (menu *Menu) rebuild() {
|
|
||||||
menu.mu.Lock()
|
|
||||||
defer menu.mu.Unlock()
|
|
||||||
menu.init()
|
|
||||||
|
|
||||||
if menu.eventCancel != nil {
|
|
||||||
menu.eventCancel()
|
|
||||||
}
|
|
||||||
ctx := context.Background()
|
|
||||||
ctx, menu.eventCancel = context.WithCancel(ctx)
|
|
||||||
|
|
||||||
systray.ResetMenu()
|
|
||||||
|
|
||||||
if menu.readonly {
|
|
||||||
const readonlyMsg = "No permission to manage Tailscale.\nSee tailscale.com/s/cli-operator"
|
|
||||||
m := systray.AddMenuItem(readonlyMsg, "")
|
|
||||||
onClick(ctx, m, func(_ context.Context) {
|
|
||||||
webbrowser.Open("https://tailscale.com/s/cli-operator")
|
|
||||||
})
|
|
||||||
systray.AddSeparator()
|
|
||||||
}
|
|
||||||
|
|
||||||
menu.connect = systray.AddMenuItem("Connect", "")
|
|
||||||
menu.disconnect = systray.AddMenuItem("Disconnect", "")
|
|
||||||
menu.disconnect.Hide()
|
|
||||||
systray.AddSeparator()
|
|
||||||
|
|
||||||
// delay to prevent race setting icon on first start
|
|
||||||
time.Sleep(newMenuDelay)
|
|
||||||
|
|
||||||
// Set systray menu icon and title.
|
|
||||||
// Also adjust connect/disconnect menu items if needed.
|
|
||||||
var backendState string
|
|
||||||
if menu.status != nil {
|
|
||||||
backendState = menu.status.BackendState
|
|
||||||
}
|
|
||||||
switch backendState {
|
|
||||||
case ipn.Running.String():
|
|
||||||
if menu.status.ExitNodeStatus != nil && !menu.status.ExitNodeStatus.ID.IsZero() {
|
|
||||||
if menu.status.ExitNodeStatus.Online {
|
|
||||||
setTooltip("Using exit node")
|
|
||||||
setAppIcon(exitNodeOnline)
|
|
||||||
} else {
|
|
||||||
setTooltip("Exit node offline")
|
|
||||||
setAppIcon(exitNodeOffline)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
setTooltip(fmt.Sprintf("Connected to %s", menu.status.CurrentTailnet.Name))
|
|
||||||
setAppIcon(connected)
|
|
||||||
}
|
|
||||||
menu.connect.SetTitle("Connected")
|
|
||||||
menu.connect.Disable()
|
|
||||||
menu.disconnect.Show()
|
|
||||||
menu.disconnect.Enable()
|
|
||||||
case ipn.Starting.String():
|
|
||||||
setTooltip("Connecting")
|
|
||||||
setAppIcon(loading)
|
|
||||||
default:
|
|
||||||
setTooltip("Disconnected")
|
|
||||||
setAppIcon(disconnected)
|
|
||||||
}
|
|
||||||
|
|
||||||
if menu.readonly {
|
|
||||||
menu.connect.Disable()
|
|
||||||
menu.disconnect.Disable()
|
|
||||||
}
|
|
||||||
|
|
||||||
account := "Account"
|
|
||||||
if pt := profileTitle(menu.curProfile); pt != "" {
|
|
||||||
account = pt
|
|
||||||
}
|
|
||||||
if !menu.readonly {
|
|
||||||
accounts := systray.AddMenuItem(account, "")
|
|
||||||
setRemoteIcon(accounts, menu.curProfile.UserProfile.ProfilePicURL)
|
|
||||||
time.Sleep(newMenuDelay)
|
|
||||||
for _, profile := range menu.allProfiles {
|
|
||||||
title := profileTitle(profile)
|
|
||||||
var item *systray.MenuItem
|
|
||||||
if profile.ID == menu.curProfile.ID {
|
|
||||||
item = accounts.AddSubMenuItemCheckbox(title, "", true)
|
|
||||||
} else {
|
|
||||||
item = accounts.AddSubMenuItem(title, "")
|
|
||||||
}
|
|
||||||
setRemoteIcon(item, profile.UserProfile.ProfilePicURL)
|
|
||||||
onClick(ctx, item, func(ctx context.Context) {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
case menu.accountsCh <- profile.ID:
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if menu.status != nil && menu.status.Self != nil && len(menu.status.Self.TailscaleIPs) > 0 {
|
|
||||||
title := fmt.Sprintf("This Device: %s (%s)", menu.status.Self.HostName, menu.status.Self.TailscaleIPs[0])
|
|
||||||
menu.self = systray.AddMenuItem(title, "")
|
|
||||||
} else {
|
|
||||||
menu.self = systray.AddMenuItem("This Device: not connected", "")
|
|
||||||
menu.self.Disable()
|
|
||||||
}
|
|
||||||
systray.AddSeparator()
|
|
||||||
|
|
||||||
if !menu.readonly {
|
|
||||||
menu.rebuildExitNodeMenu(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
menu.more = systray.AddMenuItem("More settings", "")
|
|
||||||
if menu.status != nil && menu.status.BackendState == "Running" {
|
|
||||||
// web client is only available if backend is running
|
|
||||||
onClick(ctx, menu.more, func(_ context.Context) {
|
|
||||||
webbrowser.Open("http://100.100.100.100/")
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
menu.more.Disable()
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO(#15528): this menu item shouldn't be necessary at all,
|
|
||||||
// but is at least more discoverable than having users switch profiles or exit nodes.
|
|
||||||
menu.rebuildMenu = systray.AddMenuItem("Rebuild menu", "Fix missing menu items")
|
|
||||||
onClick(ctx, menu.rebuildMenu, func(ctx context.Context) {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
case menu.rebuildCh <- struct{}{}:
|
|
||||||
}
|
|
||||||
})
|
|
||||||
menu.rebuildMenu.Enable()
|
|
||||||
|
|
||||||
menu.quit = systray.AddMenuItem("Quit", "Quit the app")
|
|
||||||
menu.quit.Enable()
|
|
||||||
|
|
||||||
go menu.eventLoop(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// profileTitle returns the title string for a profile menu item.
|
|
||||||
func profileTitle(profile ipn.LoginProfile) string {
|
|
||||||
title := profile.Name
|
|
||||||
if profile.NetworkProfile.DomainName != "" {
|
|
||||||
if runtime.GOOS == "windows" || runtime.GOOS == "darwin" {
|
|
||||||
// windows and mac don't support multi-line menu
|
|
||||||
title += " (" + profile.NetworkProfile.DisplayNameOrDefault() + ")"
|
|
||||||
} else {
|
|
||||||
title += "\n" + profile.NetworkProfile.DisplayNameOrDefault()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return title
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
cacheMu sync.Mutex
|
|
||||||
httpCache = map[string][]byte{} // URL => response body
|
|
||||||
)
|
|
||||||
|
|
||||||
// setRemoteIcon sets the icon for menu to the specified remote image.
|
|
||||||
// Remote images are fetched as needed and cached.
|
|
||||||
func setRemoteIcon(menu *systray.MenuItem, urlStr string) {
|
|
||||||
if menu == nil || urlStr == "" {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
cacheMu.Lock()
|
|
||||||
defer cacheMu.Unlock()
|
|
||||||
b, ok := httpCache[urlStr]
|
|
||||||
if !ok {
|
|
||||||
resp, err := http.Get(urlStr)
|
|
||||||
if err == nil && resp.StatusCode == http.StatusOK {
|
|
||||||
b, _ = io.ReadAll(resp.Body)
|
|
||||||
|
|
||||||
// Convert image to ICO format on Windows
|
|
||||||
if runtime.GOOS == "windows" {
|
|
||||||
im, _, err := image.Decode(bytes.NewReader(b))
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
buf := bytes.NewBuffer(nil)
|
|
||||||
if err := ico.Encode(buf, im); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
b = buf.Bytes()
|
|
||||||
}
|
|
||||||
|
|
||||||
httpCache[urlStr] = b
|
|
||||||
resp.Body.Close()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(b) > 0 {
|
|
||||||
menu.SetIcon(b)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// setTooltip sets the tooltip text for the systray icon.
|
|
||||||
func setTooltip(text string) {
|
|
||||||
if runtime.GOOS == "darwin" || runtime.GOOS == "windows" {
|
|
||||||
systray.SetTooltip(text)
|
|
||||||
} else {
|
|
||||||
// on Linux, SetTitle actually sets the tooltip
|
|
||||||
systray.SetTitle(text)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// eventLoop is the main event loop for handling click events on menu items
|
|
||||||
// and responding to Tailscale state changes.
|
|
||||||
// This method does not return until ctx.Done is closed.
|
|
||||||
func (menu *Menu) eventLoop(ctx context.Context) {
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return
|
|
||||||
case <-menu.rebuildCh:
|
|
||||||
menu.updateState()
|
|
||||||
menu.rebuild()
|
|
||||||
case <-menu.connect.ClickedCh:
|
|
||||||
_, err := menu.lc.EditPrefs(ctx, &ipn.MaskedPrefs{
|
|
||||||
Prefs: ipn.Prefs{
|
|
||||||
WantRunning: true,
|
|
||||||
},
|
|
||||||
WantRunningSet: true,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("error connecting: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
case <-menu.disconnect.ClickedCh:
|
|
||||||
_, err := menu.lc.EditPrefs(ctx, &ipn.MaskedPrefs{
|
|
||||||
Prefs: ipn.Prefs{
|
|
||||||
WantRunning: false,
|
|
||||||
},
|
|
||||||
WantRunningSet: true,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("error disconnecting: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
case <-menu.self.ClickedCh:
|
|
||||||
menu.copyTailscaleIP(menu.status.Self)
|
|
||||||
|
|
||||||
case id := <-menu.accountsCh:
|
|
||||||
if err := menu.lc.SwitchProfile(ctx, id); err != nil {
|
|
||||||
log.Printf("error switching to profile ID %v: %v", id, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
case exitNode := <-menu.exitNodeCh:
|
|
||||||
if exitNode.IsZero() {
|
|
||||||
log.Print("disable exit node")
|
|
||||||
if err := menu.lc.SetUseExitNode(ctx, false); err != nil {
|
|
||||||
log.Printf("error disabling exit node: %v", err)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
log.Printf("enable exit node: %v", exitNode)
|
|
||||||
mp := &ipn.MaskedPrefs{
|
|
||||||
Prefs: ipn.Prefs{
|
|
||||||
ExitNodeID: exitNode,
|
|
||||||
},
|
|
||||||
ExitNodeIDSet: true,
|
|
||||||
}
|
|
||||||
if _, err := menu.lc.EditPrefs(ctx, mp); err != nil {
|
|
||||||
log.Printf("error setting exit node: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
case <-menu.quit.ClickedCh:
|
|
||||||
systray.Quit()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// onClick registers a click handler for a menu item.
|
|
||||||
func onClick(ctx context.Context, item *systray.MenuItem, fn func(ctx context.Context)) {
|
|
||||||
go func() {
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return
|
|
||||||
case <-item.ClickedCh:
|
|
||||||
fn(ctx)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
// watchIPNBus subscribes to the tailscale event bus and sends state updates to chState.
|
|
||||||
// This method does not return.
|
|
||||||
func (menu *Menu) watchIPNBus() {
|
|
||||||
for {
|
|
||||||
if err := menu.watchIPNBusInner(); err != nil {
|
|
||||||
log.Println(err)
|
|
||||||
if errors.Is(err, context.Canceled) {
|
|
||||||
// If the context got canceled, we will never be able to
|
|
||||||
// reconnect to IPN bus, so exit the process.
|
|
||||||
log.Fatalf("watchIPNBus: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// If our watch connection breaks, wait a bit before reconnecting. No
|
|
||||||
// reason to spam the logs if e.g. tailscaled is restarting or goes
|
|
||||||
// down.
|
|
||||||
time.Sleep(3 * time.Second)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (menu *Menu) watchIPNBusInner() error {
|
|
||||||
watcher, err := menu.lc.WatchIPNBus(menu.bgCtx, 0)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("watching ipn bus: %w", err)
|
|
||||||
}
|
|
||||||
defer watcher.Close()
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-menu.bgCtx.Done():
|
|
||||||
return nil
|
|
||||||
default:
|
|
||||||
n, err := watcher.Next()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("ipnbus error: %w", err)
|
|
||||||
}
|
|
||||||
var rebuild bool
|
|
||||||
if n.State != nil {
|
|
||||||
log.Printf("new state: %v", n.State)
|
|
||||||
rebuild = true
|
|
||||||
}
|
|
||||||
if n.Prefs != nil {
|
|
||||||
rebuild = true
|
|
||||||
}
|
|
||||||
if rebuild {
|
|
||||||
menu.rebuildCh <- struct{}{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// copyTailscaleIP copies the first Tailscale IP of the given device to the clipboard
|
|
||||||
// and sends a notification with the copied value.
|
|
||||||
func (menu *Menu) copyTailscaleIP(device *ipnstate.PeerStatus) {
|
|
||||||
if device == nil || len(device.TailscaleIPs) == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
name := strings.Split(device.DNSName, ".")[0]
|
|
||||||
ip := device.TailscaleIPs[0].String()
|
|
||||||
err := clipboard.WriteAll(ip)
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("clipboard error: %v", err)
|
|
||||||
} else {
|
|
||||||
menu.sendNotification(fmt.Sprintf("Copied Address for %v", name), ip)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// sendNotification sends a desktop notification with the given title and content.
|
|
||||||
func (menu *Menu) sendNotification(title, content string) {
|
|
||||||
conn, err := dbus.SessionBus()
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("dbus: %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
timeout := 3 * time.Second
|
|
||||||
obj := conn.Object("org.freedesktop.Notifications", "/org/freedesktop/Notifications")
|
|
||||||
call := obj.Call("org.freedesktop.Notifications.Notify", 0, "Tailscale", uint32(0),
|
|
||||||
menu.notificationIcon.Name(), title, content, []string{}, map[string]dbus.Variant{}, int32(timeout.Milliseconds()))
|
|
||||||
if call.Err != nil {
|
|
||||||
log.Printf("dbus: %v", call.Err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (menu *Menu) rebuildExitNodeMenu(ctx context.Context) {
|
|
||||||
if menu.status == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
status := menu.status
|
|
||||||
menu.exitNodes = systray.AddMenuItem("Exit Nodes", "")
|
|
||||||
time.Sleep(newMenuDelay)
|
|
||||||
|
|
||||||
// register a click handler for a menu item to set nodeID as the exit node.
|
|
||||||
setExitNodeOnClick := func(item *systray.MenuItem, nodeID tailcfg.StableNodeID) {
|
|
||||||
onClick(ctx, item, func(ctx context.Context) {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
case menu.exitNodeCh <- nodeID:
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
noExitNodeMenu := menu.exitNodes.AddSubMenuItemCheckbox("None", "", status.ExitNodeStatus == nil)
|
|
||||||
setExitNodeOnClick(noExitNodeMenu, "")
|
|
||||||
|
|
||||||
// Show recommended exit node if available.
|
|
||||||
if status.Self.CapMap.Contains(tailcfg.NodeAttrSuggestExitNodeUI) {
|
|
||||||
sugg, err := menu.lc.SuggestExitNode(ctx)
|
|
||||||
if err == nil {
|
|
||||||
title := "Recommended: "
|
|
||||||
if loc := sugg.Location; loc.Valid() && loc.Country() != "" {
|
|
||||||
flag := countryFlag(loc.CountryCode())
|
|
||||||
title += fmt.Sprintf("%s %s: %s", flag, loc.Country(), loc.City())
|
|
||||||
} else {
|
|
||||||
title += strings.Split(sugg.Name, ".")[0]
|
|
||||||
}
|
|
||||||
menu.exitNodes.AddSeparator()
|
|
||||||
rm := menu.exitNodes.AddSubMenuItemCheckbox(title, "", false)
|
|
||||||
setExitNodeOnClick(rm, sugg.ID)
|
|
||||||
if status.ExitNodeStatus != nil && sugg.ID == status.ExitNodeStatus.ID {
|
|
||||||
rm.Check()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add tailnet exit nodes if present.
|
|
||||||
var tailnetExitNodes []*ipnstate.PeerStatus
|
|
||||||
for _, ps := range status.Peer {
|
|
||||||
if ps.ExitNodeOption && ps.Location == nil {
|
|
||||||
tailnetExitNodes = append(tailnetExitNodes, ps)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(tailnetExitNodes) > 0 {
|
|
||||||
menu.exitNodes.AddSeparator()
|
|
||||||
menu.exitNodes.AddSubMenuItem("Tailnet Exit Nodes", "").Disable()
|
|
||||||
for _, ps := range status.Peer {
|
|
||||||
if !ps.ExitNodeOption || ps.Location != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
name := strings.Split(ps.DNSName, ".")[0]
|
|
||||||
if !ps.Online {
|
|
||||||
name += " (offline)"
|
|
||||||
}
|
|
||||||
sm := menu.exitNodes.AddSubMenuItemCheckbox(name, "", false)
|
|
||||||
if !ps.Online {
|
|
||||||
sm.Disable()
|
|
||||||
}
|
|
||||||
if status.ExitNodeStatus != nil && ps.ID == status.ExitNodeStatus.ID {
|
|
||||||
sm.Check()
|
|
||||||
}
|
|
||||||
setExitNodeOnClick(sm, ps.ID)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add mullvad exit nodes if present.
|
|
||||||
var mullvadExitNodes mullvadPeers
|
|
||||||
if status.Self.CapMap.Contains("mullvad") {
|
|
||||||
mullvadExitNodes = newMullvadPeers(status)
|
|
||||||
}
|
|
||||||
if len(mullvadExitNodes.countries) > 0 {
|
|
||||||
menu.exitNodes.AddSeparator()
|
|
||||||
menu.exitNodes.AddSubMenuItem("Location-based Exit Nodes", "").Disable()
|
|
||||||
mullvadMenu := menu.exitNodes.AddSubMenuItemCheckbox("Mullvad VPN", "", false)
|
|
||||||
|
|
||||||
for _, country := range mullvadExitNodes.sortedCountries() {
|
|
||||||
flag := countryFlag(country.code)
|
|
||||||
countryMenu := mullvadMenu.AddSubMenuItemCheckbox(flag+" "+country.name, "", false)
|
|
||||||
|
|
||||||
// single-city country, no submenu
|
|
||||||
if len(country.cities) == 1 || hideMullvadCities {
|
|
||||||
setExitNodeOnClick(countryMenu, country.best.ID)
|
|
||||||
if status.ExitNodeStatus != nil {
|
|
||||||
for _, city := range country.cities {
|
|
||||||
for _, ps := range city.peers {
|
|
||||||
if status.ExitNodeStatus.ID == ps.ID {
|
|
||||||
mullvadMenu.Check()
|
|
||||||
countryMenu.Check()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// multi-city country, build submenu with "best available" option and cities.
|
|
||||||
time.Sleep(newMenuDelay)
|
|
||||||
bm := countryMenu.AddSubMenuItemCheckbox("Best Available", "", false)
|
|
||||||
setExitNodeOnClick(bm, country.best.ID)
|
|
||||||
countryMenu.AddSeparator()
|
|
||||||
|
|
||||||
for _, city := range country.sortedCities() {
|
|
||||||
cityMenu := countryMenu.AddSubMenuItemCheckbox(city.name, "", false)
|
|
||||||
setExitNodeOnClick(cityMenu, city.best.ID)
|
|
||||||
if status.ExitNodeStatus != nil {
|
|
||||||
for _, ps := range city.peers {
|
|
||||||
if status.ExitNodeStatus.ID == ps.ID {
|
|
||||||
mullvadMenu.Check()
|
|
||||||
countryMenu.Check()
|
|
||||||
cityMenu.Check()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: "Allow Local Network Access" and "Run Exit Node" menu items
|
|
||||||
}
|
|
||||||
|
|
||||||
// mullvadPeers contains all mullvad peer nodes, sorted by country and city.
|
|
||||||
type mullvadPeers struct {
|
|
||||||
countries map[string]*mvCountry // country code (uppercase) => country
|
|
||||||
}
|
|
||||||
|
|
||||||
// sortedCountries returns countries containing mullvad nodes, sorted by name.
|
|
||||||
func (mp mullvadPeers) sortedCountries() []*mvCountry {
|
|
||||||
countries := slicesx.MapValues(mp.countries)
|
|
||||||
slices.SortFunc(countries, func(a, b *mvCountry) int {
|
|
||||||
return stringsx.CompareFold(a.name, b.name)
|
|
||||||
})
|
|
||||||
return countries
|
|
||||||
}
|
|
||||||
|
|
||||||
type mvCountry struct {
|
|
||||||
code string
|
|
||||||
name string
|
|
||||||
best *ipnstate.PeerStatus // highest priority peer in the country
|
|
||||||
cities map[string]*mvCity // city code => city
|
|
||||||
}
|
|
||||||
|
|
||||||
// sortedCities returns cities containing mullvad nodes, sorted by name.
|
|
||||||
func (mc *mvCountry) sortedCities() []*mvCity {
|
|
||||||
cities := slicesx.MapValues(mc.cities)
|
|
||||||
slices.SortFunc(cities, func(a, b *mvCity) int {
|
|
||||||
return stringsx.CompareFold(a.name, b.name)
|
|
||||||
})
|
|
||||||
return cities
|
|
||||||
}
|
|
||||||
|
|
||||||
// countryFlag takes a 2-character ASCII string and returns the corresponding emoji flag.
|
|
||||||
// It returns the empty string on error.
|
|
||||||
func countryFlag(code string) string {
|
|
||||||
if len(code) != 2 {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
runes := make([]rune, 0, 2)
|
|
||||||
for i := range 2 {
|
|
||||||
b := code[i] | 32 // lowercase
|
|
||||||
if b < 'a' || b > 'z' {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
// https://en.wikipedia.org/wiki/Regional_indicator_symbol
|
|
||||||
runes = append(runes, 0x1F1E6+rune(b-'a'))
|
|
||||||
}
|
|
||||||
return string(runes)
|
|
||||||
}
|
|
||||||
|
|
||||||
type mvCity struct {
|
|
||||||
name string
|
|
||||||
best *ipnstate.PeerStatus // highest priority peer in the city
|
|
||||||
peers []*ipnstate.PeerStatus
|
|
||||||
}
|
|
||||||
|
|
||||||
func newMullvadPeers(status *ipnstate.Status) mullvadPeers {
|
|
||||||
countries := make(map[string]*mvCountry)
|
|
||||||
for _, ps := range status.Peer {
|
|
||||||
if !ps.ExitNodeOption || ps.Location == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
loc := ps.Location
|
|
||||||
country, ok := countries[loc.CountryCode]
|
|
||||||
if !ok {
|
|
||||||
country = &mvCountry{
|
|
||||||
code: loc.CountryCode,
|
|
||||||
name: loc.Country,
|
|
||||||
cities: make(map[string]*mvCity),
|
|
||||||
}
|
|
||||||
countries[loc.CountryCode] = country
|
|
||||||
}
|
|
||||||
city, ok := countries[loc.CountryCode].cities[loc.CityCode]
|
|
||||||
if !ok {
|
|
||||||
city = &mvCity{
|
|
||||||
name: loc.City,
|
|
||||||
}
|
|
||||||
countries[loc.CountryCode].cities[loc.CityCode] = city
|
|
||||||
}
|
|
||||||
city.peers = append(city.peers, ps)
|
|
||||||
if city.best == nil || ps.Location.Priority > city.best.Location.Priority {
|
|
||||||
city.best = ps
|
|
||||||
}
|
|
||||||
if country.best == nil || ps.Location.Priority > country.best.Location.Priority {
|
|
||||||
country.best = ps
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return mullvadPeers{countries}
|
|
||||||
}
|
|
||||||
|
|
||||||
// onExit is called by the systray package when the menu is exiting.
|
|
||||||
func (menu *Menu) onExit() {
|
|
||||||
log.Printf("exiting")
|
|
||||||
if menu.bgCancel != nil {
|
|
||||||
menu.bgCancel()
|
|
||||||
}
|
|
||||||
if menu.eventCancel != nil {
|
|
||||||
menu.eventCancel()
|
|
||||||
}
|
|
||||||
|
|
||||||
os.Remove(menu.notificationIcon.Name())
|
|
||||||
}
|
|
||||||
@ -1,10 +0,0 @@
|
|||||||
[Unit]
|
|
||||||
Description=Tailscale System Tray
|
|
||||||
After=graphical.target
|
|
||||||
|
|
||||||
[Service]
|
|
||||||
Type=simple
|
|
||||||
ExecStart=/usr/bin/tailscale systray
|
|
||||||
|
|
||||||
[Install]
|
|
||||||
WantedBy=default.target
|
|
||||||
@ -1,34 +0,0 @@
|
|||||||
// Copyright (c) Tailscale Inc & AUTHORS
|
|
||||||
// SPDX-License-Identifier: BSD-3-Clause
|
|
||||||
|
|
||||||
//go:build !js && !ts_omit_acme
|
|
||||||
|
|
||||||
package tailscale
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"crypto/tls"
|
|
||||||
|
|
||||||
"tailscale.com/client/local"
|
|
||||||
)
|
|
||||||
|
|
||||||
// GetCertificate is an alias for [tailscale.com/client/local.GetCertificate].
|
|
||||||
//
|
|
||||||
// Deprecated: import [tailscale.com/client/local] instead and use [local.Client.GetCertificate].
|
|
||||||
func GetCertificate(hi *tls.ClientHelloInfo) (*tls.Certificate, error) {
|
|
||||||
return local.GetCertificate(hi)
|
|
||||||
}
|
|
||||||
|
|
||||||
// CertPair is an alias for [tailscale.com/client/local.CertPair].
|
|
||||||
//
|
|
||||||
// Deprecated: import [tailscale.com/client/local] instead and use [local.Client.CertPair].
|
|
||||||
func CertPair(ctx context.Context, domain string) (certPEM, keyPEM []byte, err error) {
|
|
||||||
return local.CertPair(ctx, domain)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExpandSNIName is an alias for [tailscale.com/client/local.ExpandSNIName].
|
|
||||||
//
|
|
||||||
// Deprecated: import [tailscale.com/client/local] instead and use [local.Client.ExpandSNIName].
|
|
||||||
func ExpandSNIName(ctx context.Context, name string) (fqdn string, ok bool) {
|
|
||||||
return local.ExpandSNIName(ctx, name)
|
|
||||||
}
|
|
||||||
File diff suppressed because it is too large
Load Diff
@ -1,79 +0,0 @@
|
|||||||
// Copyright (c) Tailscale Inc & AUTHORS
|
|
||||||
// SPDX-License-Identifier: BSD-3-Clause
|
|
||||||
|
|
||||||
package tailscale
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
|
|
||||||
"tailscale.com/client/local"
|
|
||||||
"tailscale.com/client/tailscale/apitype"
|
|
||||||
"tailscale.com/ipn/ipnstate"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ErrPeerNotFound is an alias for [tailscale.com/client/local.ErrPeerNotFound].
|
|
||||||
//
|
|
||||||
// Deprecated: import [tailscale.com/client/local] instead.
|
|
||||||
var ErrPeerNotFound = local.ErrPeerNotFound
|
|
||||||
|
|
||||||
// LocalClient is an alias for [tailscale.com/client/local.Client].
|
|
||||||
//
|
|
||||||
// Deprecated: import [tailscale.com/client/local] instead.
|
|
||||||
type LocalClient = local.Client
|
|
||||||
|
|
||||||
// IPNBusWatcher is an alias for [tailscale.com/client/local.IPNBusWatcher].
|
|
||||||
//
|
|
||||||
// Deprecated: import [tailscale.com/client/local] instead.
|
|
||||||
type IPNBusWatcher = local.IPNBusWatcher
|
|
||||||
|
|
||||||
// BugReportOpts is an alias for [tailscale.com/client/local.BugReportOpts].
|
|
||||||
//
|
|
||||||
// Deprecated: import [tailscale.com/client/local] instead.
|
|
||||||
type BugReportOpts = local.BugReportOpts
|
|
||||||
|
|
||||||
// PingOpts is an alias for [tailscale.com/client/local.PingOpts].
|
|
||||||
//
|
|
||||||
// Deprecated: import [tailscale.com/client/local] instead.
|
|
||||||
type PingOpts = local.PingOpts
|
|
||||||
|
|
||||||
// SetVersionMismatchHandler is an alias for [tailscale.com/client/local.SetVersionMismatchHandler].
|
|
||||||
//
|
|
||||||
// Deprecated: import [tailscale.com/client/local] instead.
|
|
||||||
func SetVersionMismatchHandler(f func(clientVer, serverVer string)) {
|
|
||||||
local.SetVersionMismatchHandler(f)
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsAccessDeniedError is an alias for [tailscale.com/client/local.IsAccessDeniedError].
|
|
||||||
//
|
|
||||||
// Deprecated: import [tailscale.com/client/local] instead.
|
|
||||||
func IsAccessDeniedError(err error) bool {
|
|
||||||
return local.IsAccessDeniedError(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsPreconditionsFailedError is an alias for [tailscale.com/client/local.IsPreconditionsFailedError].
|
|
||||||
//
|
|
||||||
// Deprecated: import [tailscale.com/client/local] instead.
|
|
||||||
func IsPreconditionsFailedError(err error) bool {
|
|
||||||
return local.IsPreconditionsFailedError(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WhoIs is an alias for [tailscale.com/client/local.WhoIs].
|
|
||||||
//
|
|
||||||
// Deprecated: import [tailscale.com/client/local] instead and use [local.Client.WhoIs].
|
|
||||||
func WhoIs(ctx context.Context, remoteAddr string) (*apitype.WhoIsResponse, error) {
|
|
||||||
return local.WhoIs(ctx, remoteAddr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Status is an alias for [tailscale.com/client/local.Status].
|
|
||||||
//
|
|
||||||
// Deprecated: import [tailscale.com/client/local] instead.
|
|
||||||
func Status(ctx context.Context) (*ipnstate.Status, error) {
|
|
||||||
return local.Status(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// StatusWithoutPeers is an alias for [tailscale.com/client/local.StatusWithoutPeers].
|
|
||||||
//
|
|
||||||
// Deprecated: import [tailscale.com/client/local] instead.
|
|
||||||
func StatusWithoutPeers(ctx context.Context) (*ipnstate.Status, error) {
|
|
||||||
return local.StatusWithoutPeers(ctx)
|
|
||||||
}
|
|
||||||
@ -1,86 +0,0 @@
|
|||||||
// Copyright (c) Tailscale Inc & AUTHORS
|
|
||||||
// SPDX-License-Identifier: BSD-3-Clause
|
|
||||||
|
|
||||||
package tailscale
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/url"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestClientBuildURL(t *testing.T) {
|
|
||||||
c := Client{BaseURL: "http://127.0.0.1:1234"}
|
|
||||||
for _, tt := range []struct {
|
|
||||||
desc string
|
|
||||||
elements []any
|
|
||||||
want string
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
desc: "single-element",
|
|
||||||
elements: []any{"devices"},
|
|
||||||
want: "http://127.0.0.1:1234/api/v2/devices",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
desc: "multiple-elements",
|
|
||||||
elements: []any{"tailnet", "example.com"},
|
|
||||||
want: "http://127.0.0.1:1234/api/v2/tailnet/example.com",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
desc: "escape-element",
|
|
||||||
elements: []any{"tailnet", "example dot com?foo=bar"},
|
|
||||||
want: `http://127.0.0.1:1234/api/v2/tailnet/example%20dot%20com%3Ffoo=bar`,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
desc: "url.Values",
|
|
||||||
elements: []any{"tailnet", "example.com", "acl", url.Values{"details": {"1"}}},
|
|
||||||
want: `http://127.0.0.1:1234/api/v2/tailnet/example.com/acl?details=1`,
|
|
||||||
},
|
|
||||||
} {
|
|
||||||
t.Run(tt.desc, func(t *testing.T) {
|
|
||||||
got := c.BuildURL(tt.elements...)
|
|
||||||
if got != tt.want {
|
|
||||||
t.Errorf("got %q, want %q", got, tt.want)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestClientBuildTailnetURL(t *testing.T) {
|
|
||||||
c := Client{
|
|
||||||
BaseURL: "http://127.0.0.1:1234",
|
|
||||||
tailnet: "example.com",
|
|
||||||
}
|
|
||||||
for _, tt := range []struct {
|
|
||||||
desc string
|
|
||||||
elements []any
|
|
||||||
want string
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
desc: "single-element",
|
|
||||||
elements: []any{"devices"},
|
|
||||||
want: "http://127.0.0.1:1234/api/v2/tailnet/example.com/devices",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
desc: "multiple-elements",
|
|
||||||
elements: []any{"devices", 123},
|
|
||||||
want: "http://127.0.0.1:1234/api/v2/tailnet/example.com/devices/123",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
desc: "escape-element",
|
|
||||||
elements: []any{"foo bar?baz=qux"},
|
|
||||||
want: `http://127.0.0.1:1234/api/v2/tailnet/example.com/foo%20bar%3Fbaz=qux`,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
desc: "url.Values",
|
|
||||||
elements: []any{"acl", url.Values{"details": {"1"}}},
|
|
||||||
want: `http://127.0.0.1:1234/api/v2/tailnet/example.com/acl?details=1`,
|
|
||||||
},
|
|
||||||
} {
|
|
||||||
t.Run(tt.desc, func(t *testing.T) {
|
|
||||||
got := c.BuildTailnetURL(tt.elements...)
|
|
||||||
if got != tt.want {
|
|
||||||
t.Errorf("got %q, want %q", got, tt.want)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
File diff suppressed because it is too large
Load Diff
@ -1,372 +0,0 @@
|
|||||||
// Copyright (c) Tailscale Inc & AUTHORS
|
|
||||||
// SPDX-License-Identifier: BSD-3-Clause
|
|
||||||
|
|
||||||
// cigocacher is an opinionated-to-Tailscale client for gocached. It connects
|
|
||||||
// at a URL like "https://ci-gocached-azure-1.corp.ts.net:31364", but that is
|
|
||||||
// stored in a GitHub actions variable so that its hostname can be updated for
|
|
||||||
// all branches at the same time in sync with the actual infrastructure.
|
|
||||||
//
|
|
||||||
// It authenticates using GitHub OIDC tokens, and all HTTP errors are ignored
|
|
||||||
// so that its failure mode is just that builds get slower and fall back to
|
|
||||||
// disk-only cache.
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
jsonv1 "encoding/json"
|
|
||||||
"errors"
|
|
||||||
"flag"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"log"
|
|
||||||
"net"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"runtime/debug"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"sync/atomic"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/bradfitz/go-tool-cache/cacheproc"
|
|
||||||
"github.com/bradfitz/go-tool-cache/cachers"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
var (
|
|
||||||
version = flag.Bool("version", false, "print version and exit")
|
|
||||||
auth = flag.Bool("auth", false, "auth with cigocached and exit, printing the access token as output")
|
|
||||||
stats = flag.Bool("stats", false, "fetch and print cigocached stats and exit")
|
|
||||||
token = flag.String("token", "", "the cigocached access token to use, as created using --auth")
|
|
||||||
srvURL = flag.String("cigocached-url", "", "optional cigocached URL (scheme, host, and port). Empty means to not use one.")
|
|
||||||
srvHostDial = flag.String("cigocached-host", "", "optional cigocached host to dial instead of the host in the provided --cigocached-url. Useful for public TLS certs on private addresses.")
|
|
||||||
dir = flag.String("cache-dir", "", "cache directory; empty means automatic")
|
|
||||||
verbose = flag.Bool("verbose", false, "enable verbose logging")
|
|
||||||
)
|
|
||||||
flag.Parse()
|
|
||||||
|
|
||||||
if *version {
|
|
||||||
info, ok := debug.ReadBuildInfo()
|
|
||||||
if !ok {
|
|
||||||
log.Fatal("no build info")
|
|
||||||
}
|
|
||||||
var (
|
|
||||||
rev string
|
|
||||||
dirty bool
|
|
||||||
)
|
|
||||||
for _, s := range info.Settings {
|
|
||||||
switch s.Key {
|
|
||||||
case "vcs.revision":
|
|
||||||
rev = s.Value
|
|
||||||
case "vcs.modified":
|
|
||||||
dirty, _ = strconv.ParseBool(s.Value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if dirty {
|
|
||||||
rev += "-dirty"
|
|
||||||
}
|
|
||||||
fmt.Println(rev)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var srvHost string
|
|
||||||
if *srvHostDial != "" && *srvURL != "" {
|
|
||||||
u, err := url.Parse(*srvURL)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
srvHost = u.Hostname()
|
|
||||||
}
|
|
||||||
|
|
||||||
if *auth {
|
|
||||||
if *srvURL == "" {
|
|
||||||
log.Print("--cigocached-url is empty, skipping auth")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
tk, err := fetchAccessToken(httpClient(srvHost, *srvHostDial), os.Getenv("ACTIONS_ID_TOKEN_REQUEST_URL"), os.Getenv("ACTIONS_ID_TOKEN_REQUEST_TOKEN"), *srvURL)
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("error fetching access token, skipping auth: %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
fmt.Println(tk)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if *stats {
|
|
||||||
if *srvURL == "" {
|
|
||||||
log.Fatal("--cigocached-url is empty; cannot fetch stats")
|
|
||||||
}
|
|
||||||
tk := *token
|
|
||||||
if tk == "" {
|
|
||||||
log.Fatal("--token is empty; cannot fetch stats")
|
|
||||||
}
|
|
||||||
c := &gocachedClient{
|
|
||||||
baseURL: *srvURL,
|
|
||||||
cl: httpClient(srvHost, *srvHostDial),
|
|
||||||
accessToken: tk,
|
|
||||||
verbose: *verbose,
|
|
||||||
}
|
|
||||||
stats, err := c.fetchStats()
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("error fetching gocached stats: %v", err)
|
|
||||||
}
|
|
||||||
fmt.Println(stats)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if *dir == "" {
|
|
||||||
d, err := os.UserCacheDir()
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
*dir = filepath.Join(d, "go-cacher")
|
|
||||||
log.Printf("Defaulting to cache dir %v ...", *dir)
|
|
||||||
}
|
|
||||||
if err := os.MkdirAll(*dir, 0750); err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
c := &cigocacher{
|
|
||||||
disk: &cachers.DiskCache{
|
|
||||||
Dir: *dir,
|
|
||||||
Verbose: *verbose,
|
|
||||||
},
|
|
||||||
verbose: *verbose,
|
|
||||||
}
|
|
||||||
if *srvURL != "" {
|
|
||||||
if *verbose {
|
|
||||||
log.Printf("Using cigocached at %s", *srvURL)
|
|
||||||
}
|
|
||||||
c.gocached = &gocachedClient{
|
|
||||||
baseURL: *srvURL,
|
|
||||||
cl: httpClient(srvHost, *srvHostDial),
|
|
||||||
accessToken: *token,
|
|
||||||
verbose: *verbose,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
var p *cacheproc.Process
|
|
||||||
p = &cacheproc.Process{
|
|
||||||
Close: func() error {
|
|
||||||
if c.verbose {
|
|
||||||
log.Printf("gocacheprog: closing; %d gets (%d hits, %d misses, %d errors); %d puts (%d errors)",
|
|
||||||
p.Gets.Load(), p.GetHits.Load(), p.GetMisses.Load(), p.GetErrors.Load(), p.Puts.Load(), p.PutErrors.Load())
|
|
||||||
}
|
|
||||||
return c.close()
|
|
||||||
},
|
|
||||||
Get: c.get,
|
|
||||||
Put: c.put,
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := p.Run(); err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func httpClient(srvHost, srvHostDial string) *http.Client {
|
|
||||||
if srvHost == "" || srvHostDial == "" {
|
|
||||||
return http.DefaultClient
|
|
||||||
}
|
|
||||||
return &http.Client{
|
|
||||||
Transport: &http.Transport{
|
|
||||||
DialContext: func(ctx context.Context, network, addr string) (net.Conn, error) {
|
|
||||||
if host, port, err := net.SplitHostPort(addr); err == nil && host == srvHost {
|
|
||||||
// This allows us to serve a publicly trusted TLS cert
|
|
||||||
// while also minimising latency by explicitly using a
|
|
||||||
// private network address.
|
|
||||||
addr = net.JoinHostPort(srvHostDial, port)
|
|
||||||
}
|
|
||||||
var d net.Dialer
|
|
||||||
return d.DialContext(ctx, network, addr)
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type cigocacher struct {
|
|
||||||
disk *cachers.DiskCache
|
|
||||||
gocached *gocachedClient
|
|
||||||
verbose bool
|
|
||||||
|
|
||||||
getNanos atomic.Int64 // total nanoseconds spent in gets
|
|
||||||
putNanos atomic.Int64 // total nanoseconds spent in puts
|
|
||||||
getHTTP atomic.Int64 // HTTP get requests made
|
|
||||||
getHTTPBytes atomic.Int64 // HTTP get bytes transferred
|
|
||||||
getHTTPHits atomic.Int64 // HTTP get hits
|
|
||||||
getHTTPMisses atomic.Int64 // HTTP get misses
|
|
||||||
getHTTPErrors atomic.Int64 // HTTP get errors ignored on best-effort basis
|
|
||||||
getHTTPNanos atomic.Int64 // total nanoseconds spent in HTTP gets
|
|
||||||
putHTTP atomic.Int64 // HTTP put requests made
|
|
||||||
putHTTPBytes atomic.Int64 // HTTP put bytes transferred
|
|
||||||
putHTTPErrors atomic.Int64 // HTTP put errors ignored on best-effort basis
|
|
||||||
putHTTPNanos atomic.Int64 // total nanoseconds spent in HTTP puts
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *cigocacher) get(ctx context.Context, actionID string) (outputID, diskPath string, err error) {
|
|
||||||
t0 := time.Now()
|
|
||||||
defer func() {
|
|
||||||
c.getNanos.Add(time.Since(t0).Nanoseconds())
|
|
||||||
}()
|
|
||||||
if c.gocached == nil {
|
|
||||||
return c.disk.Get(ctx, actionID)
|
|
||||||
}
|
|
||||||
|
|
||||||
outputID, diskPath, err = c.disk.Get(ctx, actionID)
|
|
||||||
if err == nil && outputID != "" {
|
|
||||||
return outputID, diskPath, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
c.getHTTP.Add(1)
|
|
||||||
t0HTTP := time.Now()
|
|
||||||
defer func() {
|
|
||||||
c.getHTTPNanos.Add(time.Since(t0HTTP).Nanoseconds())
|
|
||||||
}()
|
|
||||||
outputID, res, err := c.gocached.get(ctx, actionID)
|
|
||||||
if err != nil {
|
|
||||||
c.getHTTPErrors.Add(1)
|
|
||||||
return "", "", nil
|
|
||||||
}
|
|
||||||
if outputID == "" || res == nil {
|
|
||||||
c.getHTTPMisses.Add(1)
|
|
||||||
return "", "", nil
|
|
||||||
}
|
|
||||||
|
|
||||||
defer res.Body.Close()
|
|
||||||
|
|
||||||
diskPath, err = put(c.disk, actionID, outputID, res.ContentLength, res.Body)
|
|
||||||
if err != nil {
|
|
||||||
return "", "", fmt.Errorf("error filling disk cache from HTTP: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
c.getHTTPHits.Add(1)
|
|
||||||
c.getHTTPBytes.Add(res.ContentLength)
|
|
||||||
return outputID, diskPath, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *cigocacher) put(ctx context.Context, actionID, outputID string, size int64, r io.Reader) (diskPath string, err error) {
|
|
||||||
t0 := time.Now()
|
|
||||||
defer func() {
|
|
||||||
c.putNanos.Add(time.Since(t0).Nanoseconds())
|
|
||||||
}()
|
|
||||||
if c.gocached == nil {
|
|
||||||
return put(c.disk, actionID, outputID, size, r)
|
|
||||||
}
|
|
||||||
|
|
||||||
c.putHTTP.Add(1)
|
|
||||||
var diskReader, httpReader io.Reader
|
|
||||||
tee := &bestEffortTeeReader{r: r}
|
|
||||||
if size == 0 {
|
|
||||||
// Special case the empty file so NewRequest sets "Content-Length: 0",
|
|
||||||
// as opposed to thinking we didn't set it and not being able to sniff its size
|
|
||||||
// from the type.
|
|
||||||
diskReader, httpReader = bytes.NewReader(nil), bytes.NewReader(nil)
|
|
||||||
} else {
|
|
||||||
pr, pw := io.Pipe()
|
|
||||||
defer pw.Close()
|
|
||||||
// The diskReader is in the driving seat. We will try to forward data
|
|
||||||
// to httpReader as well, but only best-effort.
|
|
||||||
diskReader = tee
|
|
||||||
tee.w = pw
|
|
||||||
httpReader = pr
|
|
||||||
}
|
|
||||||
httpErrCh := make(chan error)
|
|
||||||
go func() {
|
|
||||||
t0HTTP := time.Now()
|
|
||||||
defer func() {
|
|
||||||
c.putHTTPNanos.Add(time.Since(t0HTTP).Nanoseconds())
|
|
||||||
}()
|
|
||||||
httpErrCh <- c.gocached.put(ctx, actionID, outputID, size, httpReader)
|
|
||||||
}()
|
|
||||||
|
|
||||||
diskPath, err = put(c.disk, actionID, outputID, size, diskReader)
|
|
||||||
if err != nil {
|
|
||||||
return "", fmt.Errorf("error writing to disk cache: %w", errors.Join(err, tee.err))
|
|
||||||
}
|
|
||||||
|
|
||||||
select {
|
|
||||||
case err := <-httpErrCh:
|
|
||||||
if err != nil {
|
|
||||||
c.putHTTPErrors.Add(1)
|
|
||||||
} else {
|
|
||||||
c.putHTTPBytes.Add(size)
|
|
||||||
}
|
|
||||||
case <-ctx.Done():
|
|
||||||
}
|
|
||||||
|
|
||||||
return diskPath, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *cigocacher) close() error {
|
|
||||||
if !c.verbose || c.gocached == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Printf("cigocacher HTTP stats: %d gets (%.1fMiB, %.2fs, %d hits, %d misses, %d errors ignored); %d puts (%.1fMiB, %.2fs, %d errors ignored)",
|
|
||||||
c.getHTTP.Load(), float64(c.getHTTPBytes.Load())/float64(1<<20), float64(c.getHTTPNanos.Load())/float64(time.Second), c.getHTTPHits.Load(), c.getHTTPMisses.Load(), c.getHTTPErrors.Load(),
|
|
||||||
c.putHTTP.Load(), float64(c.putHTTPBytes.Load())/float64(1<<20), float64(c.putHTTPNanos.Load())/float64(time.Second), c.putHTTPErrors.Load())
|
|
||||||
|
|
||||||
stats, err := c.gocached.fetchStats()
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("error fetching gocached stats: %v", err)
|
|
||||||
} else {
|
|
||||||
log.Printf("gocached session stats: %s", stats)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func fetchAccessToken(cl *http.Client, idTokenURL, idTokenRequestToken, gocachedURL string) (string, error) {
|
|
||||||
req, err := http.NewRequest("GET", idTokenURL+"&audience=gocached", nil)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
req.Header.Set("Authorization", "Bearer "+idTokenRequestToken)
|
|
||||||
resp, err := cl.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
type idTokenResp struct {
|
|
||||||
Value string `json:"value"`
|
|
||||||
}
|
|
||||||
var idToken idTokenResp
|
|
||||||
if err := jsonv1.NewDecoder(resp.Body).Decode(&idToken); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
req, _ = http.NewRequest("POST", gocachedURL+"/auth/exchange-token", strings.NewReader(`{"jwt":"`+idToken.Value+`"}`))
|
|
||||||
req.Header.Set("Content-Type", "application/json")
|
|
||||||
resp, err = cl.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
type accessTokenResp struct {
|
|
||||||
AccessToken string `json:"access_token"`
|
|
||||||
}
|
|
||||||
var accessToken accessTokenResp
|
|
||||||
if err := jsonv1.NewDecoder(resp.Body).Decode(&accessToken); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
return accessToken.AccessToken, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type bestEffortTeeReader struct {
|
|
||||||
r io.Reader
|
|
||||||
w io.WriteCloser
|
|
||||||
err error
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *bestEffortTeeReader) Read(p []byte) (int, error) {
|
|
||||||
n, err := t.r.Read(p)
|
|
||||||
if n > 0 && t.w != nil {
|
|
||||||
if _, err := t.w.Write(p[:n]); err != nil {
|
|
||||||
t.err = errors.Join(err, t.w.Close())
|
|
||||||
t.w = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
@ -1,88 +0,0 @@
|
|||||||
// Copyright (c) Tailscale Inc & AUTHORS
|
|
||||||
// SPDX-License-Identifier: BSD-3-Clause
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/bradfitz/go-tool-cache/cachers"
|
|
||||||
)
|
|
||||||
|
|
||||||
// indexEntry is the metadata that DiskCache stores on disk for an ActionID.
|
|
||||||
type indexEntry struct {
|
|
||||||
Version int `json:"v"`
|
|
||||||
OutputID string `json:"o"`
|
|
||||||
Size int64 `json:"n"`
|
|
||||||
TimeNanos int64 `json:"t"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func validHex(x string) bool {
|
|
||||||
if len(x) < 4 || len(x) > 100 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
for _, b := range x {
|
|
||||||
if b >= '0' && b <= '9' || b >= 'a' && b <= 'f' {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// put is like dc.Put but refactored to support safe concurrent writes on Windows.
|
|
||||||
// TODO(tomhjp): upstream these changes to go-tool-cache once they look stable.
|
|
||||||
func put(dc *cachers.DiskCache, actionID, outputID string, size int64, body io.Reader) (diskPath string, _ error) {
|
|
||||||
if len(actionID) < 4 || len(outputID) < 4 {
|
|
||||||
return "", fmt.Errorf("actionID and outputID must be at least 4 characters long")
|
|
||||||
}
|
|
||||||
if !validHex(actionID) {
|
|
||||||
log.Printf("diskcache: got invalid actionID %q", actionID)
|
|
||||||
return "", errors.New("actionID must be hex")
|
|
||||||
}
|
|
||||||
if !validHex(outputID) {
|
|
||||||
log.Printf("diskcache: got invalid outputID %q", outputID)
|
|
||||||
return "", errors.New("outputID must be hex")
|
|
||||||
}
|
|
||||||
|
|
||||||
actionFile := dc.ActionFilename(actionID)
|
|
||||||
outputFile := dc.OutputFilename(outputID)
|
|
||||||
actionDir := filepath.Dir(actionFile)
|
|
||||||
outputDir := filepath.Dir(outputFile)
|
|
||||||
|
|
||||||
if err := os.MkdirAll(actionDir, 0755); err != nil {
|
|
||||||
return "", fmt.Errorf("failed to create action directory: %w", err)
|
|
||||||
}
|
|
||||||
if err := os.MkdirAll(outputDir, 0755); err != nil {
|
|
||||||
return "", fmt.Errorf("failed to create output directory: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
wrote, err := writeOutputFile(outputFile, body, size, outputID)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
if wrote != size {
|
|
||||||
return "", fmt.Errorf("wrote %d bytes, expected %d", wrote, size)
|
|
||||||
}
|
|
||||||
|
|
||||||
ij, err := json.Marshal(indexEntry{
|
|
||||||
Version: 1,
|
|
||||||
OutputID: outputID,
|
|
||||||
Size: size,
|
|
||||||
TimeNanos: time.Now().UnixNano(),
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
if err := writeActionFile(dc.ActionFilename(actionID), ij); err != nil {
|
|
||||||
return "", fmt.Errorf("atomic write failed: %w", err)
|
|
||||||
}
|
|
||||||
return outputFile, nil
|
|
||||||
}
|
|
||||||
@ -1,44 +0,0 @@
|
|||||||
// Copyright (c) Tailscale Inc & AUTHORS
|
|
||||||
// SPDX-License-Identifier: BSD-3-Clause
|
|
||||||
|
|
||||||
//go:build !windows
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
)
|
|
||||||
|
|
||||||
func writeActionFile(dest string, b []byte) error {
|
|
||||||
_, err := writeAtomic(dest, bytes.NewReader(b))
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func writeOutputFile(dest string, r io.Reader, _ int64, _ string) (int64, error) {
|
|
||||||
return writeAtomic(dest, r)
|
|
||||||
}
|
|
||||||
|
|
||||||
func writeAtomic(dest string, r io.Reader) (int64, error) {
|
|
||||||
tf, err := os.CreateTemp(filepath.Dir(dest), filepath.Base(dest)+".*")
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
size, err := io.Copy(tf, r)
|
|
||||||
if err != nil {
|
|
||||||
tf.Close()
|
|
||||||
os.Remove(tf.Name())
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
if err := tf.Close(); err != nil {
|
|
||||||
os.Remove(tf.Name())
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
if err := os.Rename(tf.Name(), dest); err != nil {
|
|
||||||
os.Remove(tf.Name())
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
return size, nil
|
|
||||||
}
|
|
||||||
@ -1,102 +0,0 @@
|
|||||||
// Copyright (c) Tailscale Inc & AUTHORS
|
|
||||||
// SPDX-License-Identifier: BSD-3-Clause
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/sha256"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
)
|
|
||||||
|
|
||||||
// The functions in this file are based on go's own cache in
|
|
||||||
// cmd/go/internal/cache/cache.go, particularly putIndexEntry and copyFile.
|
|
||||||
|
|
||||||
// writeActionFile writes the indexEntry metadata for an ActionID to disk. It
|
|
||||||
// may be called for the same actionID concurrently from multiple processes,
|
|
||||||
// and the outputID for a specific actionID may change from time to time due
|
|
||||||
// to non-deterministic builds. It makes a best-effort to delete the file if
|
|
||||||
// anything goes wrong.
|
|
||||||
func writeActionFile(dest string, b []byte) (retErr error) {
|
|
||||||
f, err := os.OpenFile(dest, os.O_WRONLY|os.O_CREATE, 0o666)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
cerr := f.Close()
|
|
||||||
if retErr != nil || cerr != nil {
|
|
||||||
retErr = errors.Join(retErr, cerr, os.Remove(dest))
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
_, err = f.Write(b)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Truncate the file only *after* writing it.
|
|
||||||
// (This should be a no-op, but truncate just in case of previous corruption.)
|
|
||||||
//
|
|
||||||
// This differs from os.WriteFile, which truncates to 0 *before* writing
|
|
||||||
// via os.O_TRUNC. Truncating only after writing ensures that a second write
|
|
||||||
// of the same content to the same file is idempotent, and does not - even
|
|
||||||
// temporarily! - undo the effect of the first write.
|
|
||||||
return f.Truncate(int64(len(b)))
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeOutputFile writes content to be cached to disk. The outputID is the
|
|
||||||
// sha256 hash of the content, and each file should only be written ~once,
|
|
||||||
// assuming no sha256 hash collisions. It may be written multiple times if
|
|
||||||
// concurrent processes are both populating the same output. The file is opened
|
|
||||||
// with FILE_SHARE_READ|FILE_SHARE_WRITE, which means both processes can write
|
|
||||||
// the same contents concurrently without conflict.
|
|
||||||
//
|
|
||||||
// It makes a best effort to clean up if anything goes wrong, but the file may
|
|
||||||
// be left in an inconsistent state in the event of disk-related errors such as
|
|
||||||
// another process taking file locks, or power loss etc.
|
|
||||||
func writeOutputFile(dest string, r io.Reader, size int64, outputID string) (_ int64, retErr error) {
|
|
||||||
info, err := os.Stat(dest)
|
|
||||||
if err == nil && info.Size() == size {
|
|
||||||
// Already exists, check the hash.
|
|
||||||
if f, err := os.Open(dest); err == nil {
|
|
||||||
h := sha256.New()
|
|
||||||
io.Copy(h, f)
|
|
||||||
f.Close()
|
|
||||||
if fmt.Sprintf("%x", h.Sum(nil)) == outputID {
|
|
||||||
// Still drain the reader to ensure associated resources are released.
|
|
||||||
return io.Copy(io.Discard, r)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Didn't successfully find the pre-existing file, write it.
|
|
||||||
mode := os.O_WRONLY | os.O_CREATE
|
|
||||||
if err == nil && info.Size() > size {
|
|
||||||
mode |= os.O_TRUNC // Should never happen, but self-heal.
|
|
||||||
}
|
|
||||||
f, err := os.OpenFile(dest, mode, 0644)
|
|
||||||
if err != nil {
|
|
||||||
return 0, fmt.Errorf("failed to open output file %q: %w", dest, err)
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
cerr := f.Close()
|
|
||||||
if retErr != nil || cerr != nil {
|
|
||||||
retErr = errors.Join(retErr, cerr, os.Remove(dest))
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Copy file to f, but also into h to double-check hash.
|
|
||||||
h := sha256.New()
|
|
||||||
w := io.MultiWriter(f, h)
|
|
||||||
n, err := io.Copy(w, r)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
if fmt.Sprintf("%x", h.Sum(nil)) != outputID {
|
|
||||||
return 0, errors.New("file content changed underfoot")
|
|
||||||
}
|
|
||||||
|
|
||||||
return n, nil
|
|
||||||
}
|
|
||||||
@ -1,109 +0,0 @@
|
|||||||
// Copyright (c) Tailscale Inc & AUTHORS
|
|
||||||
// SPDX-License-Identifier: BSD-3-Clause
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"log"
|
|
||||||
"net/http"
|
|
||||||
)
|
|
||||||
|
|
||||||
type gocachedClient struct {
|
|
||||||
baseURL string // base URL of the cacher server, like "http://localhost:31364".
|
|
||||||
cl *http.Client // http.Client to use.
|
|
||||||
accessToken string // Bearer token to use in the Authorization header.
|
|
||||||
verbose bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// drainAndClose reads and throws away a small bounded amount of data. This is a
|
|
||||||
// best-effort attempt to allow connection reuse; Go's HTTP/1 Transport won't
|
|
||||||
// reuse a TCP connection unless you fully consume HTTP responses.
|
|
||||||
func drainAndClose(body io.ReadCloser) {
|
|
||||||
io.CopyN(io.Discard, body, 4<<10)
|
|
||||||
body.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
func tryReadErrorMessage(res *http.Response) []byte {
|
|
||||||
msg, _ := io.ReadAll(io.LimitReader(res.Body, 4<<10))
|
|
||||||
return msg
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *gocachedClient) get(ctx context.Context, actionID string) (outputID string, resp *http.Response, err error) {
|
|
||||||
req, _ := http.NewRequestWithContext(ctx, "GET", c.baseURL+"/action/"+actionID, nil)
|
|
||||||
req.Header.Set("Want-Object", "1") // opt in to single roundtrip protocol
|
|
||||||
if c.accessToken != "" {
|
|
||||||
req.Header.Set("Authorization", "Bearer "+c.accessToken)
|
|
||||||
}
|
|
||||||
|
|
||||||
res, err := c.cl.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return "", nil, err
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
if resp == nil {
|
|
||||||
drainAndClose(res.Body)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
if res.StatusCode == http.StatusNotFound {
|
|
||||||
return "", nil, nil
|
|
||||||
}
|
|
||||||
if res.StatusCode != http.StatusOK {
|
|
||||||
msg := tryReadErrorMessage(res)
|
|
||||||
if c.verbose {
|
|
||||||
log.Printf("error GET /action/%s: %v, %s", actionID, res.Status, msg)
|
|
||||||
}
|
|
||||||
return "", nil, fmt.Errorf("unexpected GET /action/%s status %v", actionID, res.Status)
|
|
||||||
}
|
|
||||||
|
|
||||||
outputID = res.Header.Get("Go-Output-Id")
|
|
||||||
if outputID == "" {
|
|
||||||
return "", nil, fmt.Errorf("missing Go-Output-Id header in response")
|
|
||||||
}
|
|
||||||
if res.ContentLength == -1 {
|
|
||||||
return "", nil, fmt.Errorf("no Content-Length from server")
|
|
||||||
}
|
|
||||||
return outputID, res, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *gocachedClient) put(ctx context.Context, actionID, outputID string, size int64, body io.Reader) error {
|
|
||||||
req, _ := http.NewRequestWithContext(ctx, "PUT", c.baseURL+"/"+actionID+"/"+outputID, body)
|
|
||||||
req.ContentLength = size
|
|
||||||
if c.accessToken != "" {
|
|
||||||
req.Header.Set("Authorization", "Bearer "+c.accessToken)
|
|
||||||
}
|
|
||||||
res, err := c.cl.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
if c.verbose {
|
|
||||||
log.Printf("error PUT /%s/%s: %v", actionID, outputID, err)
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer res.Body.Close()
|
|
||||||
if res.StatusCode != http.StatusNoContent {
|
|
||||||
msg := tryReadErrorMessage(res)
|
|
||||||
if c.verbose {
|
|
||||||
log.Printf("error PUT /%s/%s: %v, %s", actionID, outputID, res.Status, msg)
|
|
||||||
}
|
|
||||||
return fmt.Errorf("unexpected PUT /%s/%s status %v", actionID, outputID, res.Status)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *gocachedClient) fetchStats() (string, error) {
|
|
||||||
req, _ := http.NewRequest("GET", c.baseURL+"/session/stats", nil)
|
|
||||||
req.Header.Set("Authorization", "Bearer "+c.accessToken)
|
|
||||||
resp, err := c.cl.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
b, err := io.ReadAll(resp.Body)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return string(b), nil
|
|
||||||
}
|
|
||||||
@ -0,0 +1,50 @@
|
|||||||
|
// Copyright (c) Tailscale Inc & AUTHORS
|
||||||
|
// SPDX-License-Identifier: BSD-3-Clause
|
||||||
|
|
||||||
|
//go:build linux
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
"net/http"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// healthz is a simple health check server, if enabled it returns 200 OK if
|
||||||
|
// this tailscale node currently has at least one tailnet IP address else
|
||||||
|
// returns 503.
|
||||||
|
type healthz struct {
|
||||||
|
sync.Mutex
|
||||||
|
hasAddrs bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *healthz) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||||
|
h.Lock()
|
||||||
|
defer h.Unlock()
|
||||||
|
|
||||||
|
if h.hasAddrs {
|
||||||
|
w.Write([]byte("ok"))
|
||||||
|
} else {
|
||||||
|
http.Error(w, "node currently has no tailscale IPs", http.StatusServiceUnavailable)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *healthz) update(healthy bool) {
|
||||||
|
h.Lock()
|
||||||
|
defer h.Unlock()
|
||||||
|
|
||||||
|
if h.hasAddrs != healthy {
|
||||||
|
log.Println("Setting healthy", healthy)
|
||||||
|
}
|
||||||
|
h.hasAddrs = healthy
|
||||||
|
}
|
||||||
|
|
||||||
|
// healthHandlers registers a simple health handler at /healthz.
|
||||||
|
// A containerized tailscale instance is considered healthy if
|
||||||
|
// it has at least one tailnet IP address.
|
||||||
|
func healthHandlers(mux *http.ServeMux) *healthz {
|
||||||
|
h := &healthz{}
|
||||||
|
mux.Handle("GET /healthz", h)
|
||||||
|
return h
|
||||||
|
}
|
||||||
@ -1,331 +0,0 @@
|
|||||||
// Copyright (c) Tailscale Inc & AUTHORS
|
|
||||||
// SPDX-License-Identifier: BSD-3-Clause
|
|
||||||
|
|
||||||
//go:build linux
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"net/netip"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"reflect"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/fsnotify/fsnotify"
|
|
||||||
"tailscale.com/kube/ingressservices"
|
|
||||||
"tailscale.com/kube/kubeclient"
|
|
||||||
"tailscale.com/util/linuxfw"
|
|
||||||
"tailscale.com/util/mak"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ingressProxy corresponds to a Kubernetes Operator's network layer ingress
|
|
||||||
// proxy. It configures firewall rules (iptables or nftables) to proxy tailnet
|
|
||||||
// traffic to Kubernetes Services. Currently this is only used for network
|
|
||||||
// layer proxies in HA mode.
|
|
||||||
type ingressProxy struct {
|
|
||||||
cfgPath string // path to ingress configfile.
|
|
||||||
|
|
||||||
// nfr is the netfilter runner used to configure firewall rules.
|
|
||||||
// This is going to be either iptables or nftables based runner.
|
|
||||||
// Never nil.
|
|
||||||
nfr linuxfw.NetfilterRunner
|
|
||||||
|
|
||||||
kc kubeclient.Client // never nil
|
|
||||||
stateSecret string // Secret that holds Tailscale state
|
|
||||||
|
|
||||||
// Pod's IP addresses are used as an identifier of this particular Pod.
|
|
||||||
podIPv4 string // empty if Pod does not have IPv4 address
|
|
||||||
podIPv6 string // empty if Pod does not have IPv6 address
|
|
||||||
}
|
|
||||||
|
|
||||||
// run starts the ingress proxy and ensures that firewall rules are set on start
|
|
||||||
// and refreshed as ingress config changes.
|
|
||||||
func (p *ingressProxy) run(ctx context.Context, opts ingressProxyOpts) error {
|
|
||||||
log.Printf("starting ingress proxy...")
|
|
||||||
p.configure(opts)
|
|
||||||
var tickChan <-chan time.Time
|
|
||||||
var eventChan <-chan fsnotify.Event
|
|
||||||
if w, err := fsnotify.NewWatcher(); err != nil {
|
|
||||||
log.Printf("failed to create fsnotify watcher, timer-only mode: %v", err)
|
|
||||||
ticker := time.NewTicker(5 * time.Second)
|
|
||||||
defer ticker.Stop()
|
|
||||||
tickChan = ticker.C
|
|
||||||
} else {
|
|
||||||
defer w.Close()
|
|
||||||
dir := filepath.Dir(p.cfgPath)
|
|
||||||
if err := w.Add(dir); err != nil {
|
|
||||||
return fmt.Errorf("failed to add fsnotify watch for %v: %w", dir, err)
|
|
||||||
}
|
|
||||||
eventChan = w.Events
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := p.sync(ctx); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return nil
|
|
||||||
case <-tickChan:
|
|
||||||
log.Printf("periodic sync, ensuring firewall config is up to date...")
|
|
||||||
case <-eventChan:
|
|
||||||
log.Printf("config file change detected, ensuring firewall config is up to date...")
|
|
||||||
}
|
|
||||||
if err := p.sync(ctx); err != nil {
|
|
||||||
return fmt.Errorf("error syncing ingress service config: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// sync reconciles proxy's firewall rules (iptables or nftables) on ingress config changes:
|
|
||||||
// - ensures that new firewall rules are added
|
|
||||||
// - ensures that old firewall rules are deleted
|
|
||||||
// - updates ingress proxy's status in the state Secret
|
|
||||||
func (p *ingressProxy) sync(ctx context.Context) error {
|
|
||||||
// 1. Get the desired firewall configuration
|
|
||||||
cfgs, err := p.getConfigs()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("ingress proxy: error retrieving configs: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// 2. Get the recorded firewall status
|
|
||||||
status, err := p.getStatus(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("ingress proxy: error retrieving current status: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// 3. Ensure that firewall configuration is up to date
|
|
||||||
if err := p.syncIngressConfigs(cfgs, status); err != nil {
|
|
||||||
return fmt.Errorf("ingress proxy: error syncing configs: %w", err)
|
|
||||||
}
|
|
||||||
var existingConfigs *ingressservices.Configs
|
|
||||||
if status != nil {
|
|
||||||
existingConfigs = &status.Configs
|
|
||||||
}
|
|
||||||
|
|
||||||
// 4. Update the recorded firewall status
|
|
||||||
if !(ingressServicesStatusIsEqual(cfgs, existingConfigs) && p.isCurrentStatus(status)) {
|
|
||||||
if err := p.recordStatus(ctx, cfgs); err != nil {
|
|
||||||
return fmt.Errorf("ingress proxy: error setting status: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// getConfigs returns the desired ingress service configuration from the mounted
|
|
||||||
// configfile.
|
|
||||||
func (p *ingressProxy) getConfigs() (*ingressservices.Configs, error) {
|
|
||||||
j, err := os.ReadFile(p.cfgPath)
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if len(j) == 0 || string(j) == "" {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
cfg := &ingressservices.Configs{}
|
|
||||||
if err := json.Unmarshal(j, &cfg); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return cfg, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// getStatus gets the recorded status of the configured firewall. The status is
|
|
||||||
// stored in the proxy's state Secret. Note that the recorded status might not
|
|
||||||
// be the current status of the firewall if it belongs to a previous Pod- we
|
|
||||||
// take that into account further down the line when determining if the desired
|
|
||||||
// rules are actually present.
|
|
||||||
func (p *ingressProxy) getStatus(ctx context.Context) (*ingressservices.Status, error) {
|
|
||||||
secret, err := p.kc.GetSecret(ctx, p.stateSecret)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("error retrieving state Secret: %w", err)
|
|
||||||
}
|
|
||||||
status := &ingressservices.Status{}
|
|
||||||
raw, ok := secret.Data[ingressservices.IngressConfigKey]
|
|
||||||
if !ok {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
if err := json.Unmarshal([]byte(raw), status); err != nil {
|
|
||||||
return nil, fmt.Errorf("error unmarshalling previous config: %w", err)
|
|
||||||
}
|
|
||||||
return status, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// syncIngressConfigs takes the desired firewall configuration and the recorded
|
|
||||||
// status and ensures that any missing rules are added and no longer needed
|
|
||||||
// rules are deleted.
|
|
||||||
func (p *ingressProxy) syncIngressConfigs(cfgs *ingressservices.Configs, status *ingressservices.Status) error {
|
|
||||||
rulesToAdd := p.getRulesToAdd(cfgs, status)
|
|
||||||
rulesToDelete := p.getRulesToDelete(cfgs, status)
|
|
||||||
|
|
||||||
if err := ensureIngressRulesDeleted(rulesToDelete, p.nfr); err != nil {
|
|
||||||
return fmt.Errorf("error deleting ingress rules: %w", err)
|
|
||||||
}
|
|
||||||
if err := ensureIngressRulesAdded(rulesToAdd, p.nfr); err != nil {
|
|
||||||
return fmt.Errorf("error adding ingress rules: %w", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// recordStatus writes the configured firewall status to the proxy's state
|
|
||||||
// Secret. This allows the Kubernetes Operator to determine whether this proxy
|
|
||||||
// Pod has setup firewall rules to route traffic for an ingress service.
|
|
||||||
func (p *ingressProxy) recordStatus(ctx context.Context, newCfg *ingressservices.Configs) error {
|
|
||||||
status := &ingressservices.Status{}
|
|
||||||
if newCfg != nil {
|
|
||||||
status.Configs = *newCfg
|
|
||||||
}
|
|
||||||
// Pod IPs are used to determine if recorded status applies to THIS proxy Pod.
|
|
||||||
status.PodIPv4 = p.podIPv4
|
|
||||||
status.PodIPv6 = p.podIPv6
|
|
||||||
secret, err := p.kc.GetSecret(ctx, p.stateSecret)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("error retrieving state Secret: %w", err)
|
|
||||||
}
|
|
||||||
bs, err := json.Marshal(status)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("error marshalling status: %w", err)
|
|
||||||
}
|
|
||||||
secret.Data[ingressservices.IngressConfigKey] = bs
|
|
||||||
patch := kubeclient.JSONPatch{
|
|
||||||
Op: "replace",
|
|
||||||
Path: fmt.Sprintf("/data/%s", ingressservices.IngressConfigKey),
|
|
||||||
Value: bs,
|
|
||||||
}
|
|
||||||
if err := p.kc.JSONPatchResource(ctx, p.stateSecret, kubeclient.TypeSecrets, []kubeclient.JSONPatch{patch}); err != nil {
|
|
||||||
return fmt.Errorf("error patching state Secret: %w", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// getRulesToAdd takes the desired firewall configuration and the recorded
|
|
||||||
// firewall status and returns a map of missing Tailscale Services and rules.
|
|
||||||
func (p *ingressProxy) getRulesToAdd(cfgs *ingressservices.Configs, status *ingressservices.Status) map[string]ingressservices.Config {
|
|
||||||
if cfgs == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
var rulesToAdd map[string]ingressservices.Config
|
|
||||||
for tsSvc, wantsCfg := range *cfgs {
|
|
||||||
if status == nil || !p.isCurrentStatus(status) {
|
|
||||||
mak.Set(&rulesToAdd, tsSvc, wantsCfg)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
gotCfg := status.Configs.GetConfig(tsSvc)
|
|
||||||
if gotCfg == nil || !reflect.DeepEqual(wantsCfg, *gotCfg) {
|
|
||||||
mak.Set(&rulesToAdd, tsSvc, wantsCfg)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return rulesToAdd
|
|
||||||
}
|
|
||||||
|
|
||||||
// getRulesToDelete takes the desired firewall configuration and the recorded
|
|
||||||
// status and returns a map of Tailscale Services and rules that need to be deleted.
|
|
||||||
func (p *ingressProxy) getRulesToDelete(cfgs *ingressservices.Configs, status *ingressservices.Status) map[string]ingressservices.Config {
|
|
||||||
if status == nil || !p.isCurrentStatus(status) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
var rulesToDelete map[string]ingressservices.Config
|
|
||||||
for tsSvc, gotCfg := range status.Configs {
|
|
||||||
if cfgs == nil {
|
|
||||||
mak.Set(&rulesToDelete, tsSvc, gotCfg)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
wantsCfg := cfgs.GetConfig(tsSvc)
|
|
||||||
if wantsCfg != nil && reflect.DeepEqual(*wantsCfg, gotCfg) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
mak.Set(&rulesToDelete, tsSvc, gotCfg)
|
|
||||||
}
|
|
||||||
return rulesToDelete
|
|
||||||
}
|
|
||||||
|
|
||||||
// ensureIngressRulesAdded takes a map of Tailscale Services and rules and ensures that the firewall rules are added.
|
|
||||||
func ensureIngressRulesAdded(cfgs map[string]ingressservices.Config, nfr linuxfw.NetfilterRunner) error {
|
|
||||||
for serviceName, cfg := range cfgs {
|
|
||||||
if cfg.IPv4Mapping != nil {
|
|
||||||
if err := addDNATRuleForSvc(nfr, serviceName, cfg.IPv4Mapping.TailscaleServiceIP, cfg.IPv4Mapping.ClusterIP); err != nil {
|
|
||||||
return fmt.Errorf("error adding ingress rule for %s: %w", serviceName, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if cfg.IPv6Mapping != nil {
|
|
||||||
if err := addDNATRuleForSvc(nfr, serviceName, cfg.IPv6Mapping.TailscaleServiceIP, cfg.IPv6Mapping.ClusterIP); err != nil {
|
|
||||||
return fmt.Errorf("error adding ingress rule for %s: %w", serviceName, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func addDNATRuleForSvc(nfr linuxfw.NetfilterRunner, serviceName string, tsIP, clusterIP netip.Addr) error {
|
|
||||||
log.Printf("adding DNAT rule for Tailscale Service %s with IP %s to Kubernetes Service IP %s", serviceName, tsIP, clusterIP)
|
|
||||||
return nfr.EnsureDNATRuleForSvc(serviceName, tsIP, clusterIP)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ensureIngressRulesDeleted takes a map of Tailscale Services and rules and ensures that the firewall rules are deleted.
|
|
||||||
func ensureIngressRulesDeleted(cfgs map[string]ingressservices.Config, nfr linuxfw.NetfilterRunner) error {
|
|
||||||
for serviceName, cfg := range cfgs {
|
|
||||||
if cfg.IPv4Mapping != nil {
|
|
||||||
if err := deleteDNATRuleForSvc(nfr, serviceName, cfg.IPv4Mapping.TailscaleServiceIP, cfg.IPv4Mapping.ClusterIP); err != nil {
|
|
||||||
return fmt.Errorf("error deleting ingress rule for %s: %w", serviceName, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if cfg.IPv6Mapping != nil {
|
|
||||||
if err := deleteDNATRuleForSvc(nfr, serviceName, cfg.IPv6Mapping.TailscaleServiceIP, cfg.IPv6Mapping.ClusterIP); err != nil {
|
|
||||||
return fmt.Errorf("error deleting ingress rule for %s: %w", serviceName, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func deleteDNATRuleForSvc(nfr linuxfw.NetfilterRunner, serviceName string, tsIP, clusterIP netip.Addr) error {
|
|
||||||
log.Printf("deleting DNAT rule for Tailscale Service %s with IP %s to Kubernetes Service IP %s", serviceName, tsIP, clusterIP)
|
|
||||||
return nfr.DeleteDNATRuleForSvc(serviceName, tsIP, clusterIP)
|
|
||||||
}
|
|
||||||
|
|
||||||
// isCurrentStatus returns true if the status of an ingress proxy as read from
|
|
||||||
// the proxy's state Secret is the status of the current proxy Pod. We use
|
|
||||||
// Pod's IP addresses to determine that the status is for this Pod.
|
|
||||||
func (p *ingressProxy) isCurrentStatus(status *ingressservices.Status) bool {
|
|
||||||
if status == nil {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return status.PodIPv4 == p.podIPv4 && status.PodIPv6 == p.podIPv6
|
|
||||||
}
|
|
||||||
|
|
||||||
type ingressProxyOpts struct {
|
|
||||||
cfgPath string
|
|
||||||
nfr linuxfw.NetfilterRunner // never nil
|
|
||||||
kc kubeclient.Client // never nil
|
|
||||||
stateSecret string
|
|
||||||
podIPv4 string
|
|
||||||
podIPv6 string
|
|
||||||
}
|
|
||||||
|
|
||||||
// configure sets the ingress proxy's configuration. It is called once on start
|
|
||||||
// so we don't care about concurrent access to fields.
|
|
||||||
func (p *ingressProxy) configure(opts ingressProxyOpts) {
|
|
||||||
p.cfgPath = opts.cfgPath
|
|
||||||
p.nfr = opts.nfr
|
|
||||||
p.kc = opts.kc
|
|
||||||
p.stateSecret = opts.stateSecret
|
|
||||||
p.podIPv4 = opts.podIPv4
|
|
||||||
p.podIPv6 = opts.podIPv6
|
|
||||||
}
|
|
||||||
|
|
||||||
func ingressServicesStatusIsEqual(st, st1 *ingressservices.Configs) bool {
|
|
||||||
if st == nil && st1 == nil {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if st == nil || st1 == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return reflect.DeepEqual(*st, *st1)
|
|
||||||
}
|
|
||||||
@ -1,223 +0,0 @@
|
|||||||
// Copyright (c) Tailscale Inc & AUTHORS
|
|
||||||
// SPDX-License-Identifier: BSD-3-Clause
|
|
||||||
|
|
||||||
//go:build linux
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/netip"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"tailscale.com/kube/ingressservices"
|
|
||||||
"tailscale.com/util/linuxfw"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestSyncIngressConfigs(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
currentConfigs *ingressservices.Configs
|
|
||||||
currentStatus *ingressservices.Status
|
|
||||||
wantServices map[string]struct {
|
|
||||||
TailscaleServiceIP netip.Addr
|
|
||||||
ClusterIP netip.Addr
|
|
||||||
}
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "add_new_rules_when_no_existing_config",
|
|
||||||
currentConfigs: &ingressservices.Configs{
|
|
||||||
"svc:foo": makeServiceConfig("100.64.0.1", "10.0.0.1", "", ""),
|
|
||||||
},
|
|
||||||
currentStatus: nil,
|
|
||||||
wantServices: map[string]struct {
|
|
||||||
TailscaleServiceIP netip.Addr
|
|
||||||
ClusterIP netip.Addr
|
|
||||||
}{
|
|
||||||
"svc:foo": makeWantService("100.64.0.1", "10.0.0.1"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "add_multiple_services",
|
|
||||||
currentConfigs: &ingressservices.Configs{
|
|
||||||
"svc:foo": makeServiceConfig("100.64.0.1", "10.0.0.1", "", ""),
|
|
||||||
"svc:bar": makeServiceConfig("100.64.0.2", "10.0.0.2", "", ""),
|
|
||||||
"svc:baz": makeServiceConfig("100.64.0.3", "10.0.0.3", "", ""),
|
|
||||||
},
|
|
||||||
currentStatus: nil,
|
|
||||||
wantServices: map[string]struct {
|
|
||||||
TailscaleServiceIP netip.Addr
|
|
||||||
ClusterIP netip.Addr
|
|
||||||
}{
|
|
||||||
"svc:foo": makeWantService("100.64.0.1", "10.0.0.1"),
|
|
||||||
"svc:bar": makeWantService("100.64.0.2", "10.0.0.2"),
|
|
||||||
"svc:baz": makeWantService("100.64.0.3", "10.0.0.3"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "add_both_ipv4_and_ipv6_rules",
|
|
||||||
currentConfigs: &ingressservices.Configs{
|
|
||||||
"svc:foo": makeServiceConfig("100.64.0.1", "10.0.0.1", "2001:db8::1", "2001:db8::2"),
|
|
||||||
},
|
|
||||||
currentStatus: nil,
|
|
||||||
wantServices: map[string]struct {
|
|
||||||
TailscaleServiceIP netip.Addr
|
|
||||||
ClusterIP netip.Addr
|
|
||||||
}{
|
|
||||||
"svc:foo": makeWantService("2001:db8::1", "2001:db8::2"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "add_ipv6_only_rules",
|
|
||||||
currentConfigs: &ingressservices.Configs{
|
|
||||||
"svc:ipv6": makeServiceConfig("", "", "2001:db8::10", "2001:db8::20"),
|
|
||||||
},
|
|
||||||
currentStatus: nil,
|
|
||||||
wantServices: map[string]struct {
|
|
||||||
TailscaleServiceIP netip.Addr
|
|
||||||
ClusterIP netip.Addr
|
|
||||||
}{
|
|
||||||
"svc:ipv6": makeWantService("2001:db8::10", "2001:db8::20"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "delete_all_rules_when_config_removed",
|
|
||||||
currentConfigs: nil,
|
|
||||||
currentStatus: &ingressservices.Status{
|
|
||||||
Configs: ingressservices.Configs{
|
|
||||||
"svc:foo": makeServiceConfig("100.64.0.1", "10.0.0.1", "", ""),
|
|
||||||
"svc:bar": makeServiceConfig("100.64.0.2", "10.0.0.2", "", ""),
|
|
||||||
},
|
|
||||||
PodIPv4: "10.0.0.2", // Current pod IPv4
|
|
||||||
PodIPv6: "2001:db8::2", // Current pod IPv6
|
|
||||||
},
|
|
||||||
wantServices: map[string]struct {
|
|
||||||
TailscaleServiceIP netip.Addr
|
|
||||||
ClusterIP netip.Addr
|
|
||||||
}{},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "add_remove_modify",
|
|
||||||
currentConfigs: &ingressservices.Configs{
|
|
||||||
"svc:foo": makeServiceConfig("100.64.0.1", "10.0.0.2", "", ""), // Changed cluster IP
|
|
||||||
"svc:new": makeServiceConfig("100.64.0.4", "10.0.0.4", "", ""),
|
|
||||||
},
|
|
||||||
currentStatus: &ingressservices.Status{
|
|
||||||
Configs: ingressservices.Configs{
|
|
||||||
"svc:foo": makeServiceConfig("100.64.0.1", "10.0.0.1", "", ""),
|
|
||||||
"svc:bar": makeServiceConfig("100.64.0.2", "10.0.0.2", "", ""),
|
|
||||||
"svc:baz": makeServiceConfig("100.64.0.3", "10.0.0.3", "", ""),
|
|
||||||
},
|
|
||||||
PodIPv4: "10.0.0.2", // Current pod IPv4
|
|
||||||
PodIPv6: "2001:db8::2", // Current pod IPv6
|
|
||||||
},
|
|
||||||
wantServices: map[string]struct {
|
|
||||||
TailscaleServiceIP netip.Addr
|
|
||||||
ClusterIP netip.Addr
|
|
||||||
}{
|
|
||||||
"svc:foo": makeWantService("100.64.0.1", "10.0.0.2"),
|
|
||||||
"svc:new": makeWantService("100.64.0.4", "10.0.0.4"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "update_with_outdated_status",
|
|
||||||
currentConfigs: &ingressservices.Configs{
|
|
||||||
"svc:web": makeServiceConfig("100.64.0.10", "10.0.0.10", "", ""),
|
|
||||||
"svc:web-ipv6": {
|
|
||||||
IPv6Mapping: &ingressservices.Mapping{
|
|
||||||
TailscaleServiceIP: netip.MustParseAddr("2001:db8::10"),
|
|
||||||
ClusterIP: netip.MustParseAddr("2001:db8::20"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"svc:api": makeServiceConfig("100.64.0.20", "10.0.0.20", "", ""),
|
|
||||||
},
|
|
||||||
currentStatus: &ingressservices.Status{
|
|
||||||
Configs: ingressservices.Configs{
|
|
||||||
"svc:web": makeServiceConfig("100.64.0.10", "10.0.0.10", "", ""),
|
|
||||||
"svc:web-ipv6": {
|
|
||||||
IPv6Mapping: &ingressservices.Mapping{
|
|
||||||
TailscaleServiceIP: netip.MustParseAddr("2001:db8::10"),
|
|
||||||
ClusterIP: netip.MustParseAddr("2001:db8::20"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"svc:old": makeServiceConfig("100.64.0.30", "10.0.0.30", "", ""),
|
|
||||||
},
|
|
||||||
PodIPv4: "10.0.0.1", // Outdated pod IP
|
|
||||||
PodIPv6: "2001:db8::1", // Outdated pod IP
|
|
||||||
},
|
|
||||||
wantServices: map[string]struct {
|
|
||||||
TailscaleServiceIP netip.Addr
|
|
||||||
ClusterIP netip.Addr
|
|
||||||
}{
|
|
||||||
"svc:web": makeWantService("100.64.0.10", "10.0.0.10"),
|
|
||||||
"svc:web-ipv6": makeWantService("2001:db8::10", "2001:db8::20"),
|
|
||||||
"svc:api": makeWantService("100.64.0.20", "10.0.0.20"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
var nfr linuxfw.NetfilterRunner = linuxfw.NewFakeNetfilterRunner()
|
|
||||||
|
|
||||||
ep := &ingressProxy{
|
|
||||||
nfr: nfr,
|
|
||||||
podIPv4: "10.0.0.2", // Current pod IPv4
|
|
||||||
podIPv6: "2001:db8::2", // Current pod IPv6
|
|
||||||
}
|
|
||||||
|
|
||||||
err := ep.syncIngressConfigs(tt.currentConfigs, tt.currentStatus)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("syncIngressConfigs failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
fake := nfr.(*linuxfw.FakeNetfilterRunner)
|
|
||||||
gotServices := fake.GetServiceState()
|
|
||||||
if len(gotServices) != len(tt.wantServices) {
|
|
||||||
t.Errorf("got %d services, want %d", len(gotServices), len(tt.wantServices))
|
|
||||||
}
|
|
||||||
for svc, want := range tt.wantServices {
|
|
||||||
got, ok := gotServices[svc]
|
|
||||||
if !ok {
|
|
||||||
t.Errorf("service %s not found", svc)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if got.TailscaleServiceIP != want.TailscaleServiceIP {
|
|
||||||
t.Errorf("service %s: got TailscaleServiceIP %v, want %v", svc, got.TailscaleServiceIP, want.TailscaleServiceIP)
|
|
||||||
}
|
|
||||||
if got.ClusterIP != want.ClusterIP {
|
|
||||||
t.Errorf("service %s: got ClusterIP %v, want %v", svc, got.ClusterIP, want.ClusterIP)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func makeServiceConfig(tsIP, clusterIP string, tsIP6, clusterIP6 string) ingressservices.Config {
|
|
||||||
cfg := ingressservices.Config{}
|
|
||||||
if tsIP != "" && clusterIP != "" {
|
|
||||||
cfg.IPv4Mapping = &ingressservices.Mapping{
|
|
||||||
TailscaleServiceIP: netip.MustParseAddr(tsIP),
|
|
||||||
ClusterIP: netip.MustParseAddr(clusterIP),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if tsIP6 != "" && clusterIP6 != "" {
|
|
||||||
cfg.IPv6Mapping = &ingressservices.Mapping{
|
|
||||||
TailscaleServiceIP: netip.MustParseAddr(tsIP6),
|
|
||||||
ClusterIP: netip.MustParseAddr(clusterIP6),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return cfg
|
|
||||||
}
|
|
||||||
|
|
||||||
func makeWantService(tsIP, clusterIP string) struct {
|
|
||||||
TailscaleServiceIP netip.Addr
|
|
||||||
ClusterIP netip.Addr
|
|
||||||
} {
|
|
||||||
return struct {
|
|
||||||
TailscaleServiceIP netip.Addr
|
|
||||||
ClusterIP netip.Addr
|
|
||||||
}{
|
|
||||||
TailscaleServiceIP: netip.MustParseAddr(tsIP),
|
|
||||||
ClusterIP: netip.MustParseAddr(clusterIP),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue