mirror of https://github.com/tailscale/tailscale/
Compare commits
198 Commits
| Author | SHA1 | Date |
|---|---|---|
|
|
7bc25f77f4 | 4 hours ago |
|
|
6a44990b09 | 5 hours ago |
|
|
e33f6aa3ba | 6 hours ago |
|
|
f8cd07fb8a | 1 day ago |
|
|
b8c58ca7c1 | 1 day ago |
|
|
536188c1b5 | 1 day ago |
|
|
957a443b23 | 1 day ago |
|
|
bd5c50909f | 1 day ago |
|
|
22a815b6d2 | 1 day ago |
|
|
8976b34cb8 | 1 day ago |
|
|
77dcdc223e | 1 day ago |
|
|
ece6e27f39 | 2 days ago |
|
|
97f1fd6d48 | 2 days ago |
|
|
37b4dd047f | 2 days ago |
|
|
bd12d8f12f | 2 days ago |
|
|
34dff57137 | 2 days ago |
|
|
f36eb81e61 | 5 days ago |
|
|
7c5c02b77a | 5 days ago |
|
|
411cee0dc9 | 6 days ago |
|
|
b40272e767 | 6 days ago |
|
|
22bdf34a00 | 6 days ago |
|
|
c0c0d45114 | 6 days ago |
|
|
3e2476ec13 | 6 days ago |
|
|
9500689bc1 | 6 days ago |
|
|
9cc07bf9c0 | 7 days ago |
|
|
74ed589042 | 1 week ago |
|
|
3f9f0ed93c | 1 week ago |
|
|
5ee0c6bf1d | 1 week ago |
|
|
9eff8a4503 | 1 week ago |
|
|
8af7778ce0 | 1 week ago |
|
|
b7658a4ad2 | 1 week ago |
|
|
824027305a | 1 week ago |
|
|
53476ce872 | 1 week ago |
|
|
c54d243690 | 1 week ago |
|
|
b38dd1ae06 | 1 week ago |
|
|
f4a4bab105 | 1 week ago |
|
|
ac0b15356d | 1 week ago |
|
|
848978e664 | 1 week ago |
|
|
7073f246d3 | 1 week ago |
|
|
d4821cdc2f | 1 week ago |
|
|
9c3a2aa797 | 1 week ago |
|
|
7426eca163 | 1 week ago |
|
|
755309c04e | 1 week ago |
|
|
6637003cc8 | 1 week ago |
|
|
698eecda04 | 1 week ago |
|
|
a20cdb5c93 | 1 week ago |
|
|
16587746ed | 2 weeks ago |
|
|
1ccece0f78 | 2 weeks ago |
|
|
9245c7131b | 2 weeks ago |
|
|
e7f5ca1d5e | 2 weeks ago |
|
|
3780f25d51 | 2 weeks ago |
|
|
016ccae2da | 2 weeks ago |
|
|
ce95bc77fb | 2 weeks ago |
|
|
c679aaba32 | 2 weeks ago |
|
|
de8ed203e0 | 2 weeks ago |
|
|
ac74d28190 | 2 weeks ago |
|
|
42a5262016 | 2 weeks ago |
|
|
682172ca2d | 2 weeks ago |
|
|
7d19813618 | 2 weeks ago |
|
|
86a849860e | 2 weeks ago |
|
|
a0d059d74c | 2 weeks ago |
|
|
12c598de28 | 2 weeks ago |
|
|
976bf24f5e | 2 weeks ago |
|
|
6ac4356bce | 2 weeks ago |
|
|
336df56f85 | 2 weeks ago |
|
|
aeda3e8183 | 2 weeks ago |
|
|
62d64c05e1 | 2 weeks ago |
|
|
e1dd9222d4 | 2 weeks ago |
|
|
38ccdbe35c | 2 weeks ago |
|
|
408336a089 | 2 weeks ago |
|
|
5b0c57f497 | 2 weeks ago |
|
|
3b865d7c33 | 2 weeks ago |
|
|
c09c95ef67 | 2 weeks ago |
|
|
da508c504d | 2 weeks ago |
|
|
d0daa5a398 | 2 weeks ago |
|
|
04a9d25a54 | 2 weeks ago |
|
|
bd29b189fe | 2 weeks ago |
|
|
2a6cbb70d9 | 2 weeks ago |
|
|
4e2f2d1088 | 2 weeks ago |
|
|
af7c26aa05 | 2 weeks ago |
|
|
85373ef822 | 2 weeks ago |
|
|
c2e474e729 | 2 weeks ago |
|
|
9048ea25db | 2 weeks ago |
|
|
a2e9dfacde | 2 weeks ago |
|
|
4860c460f5 | 2 weeks ago |
|
|
41662f5128 | 2 weeks ago |
|
|
26f9b50247 | 2 weeks ago |
|
|
f1cddc6ecf | 2 weeks ago |
|
|
165a24744e | 2 weeks ago |
|
|
1723cb83ed | 2 weeks ago |
|
|
d01081683c | 2 weeks ago |
|
|
200383dce5 | 2 weeks ago |
|
|
1e95bfa184 | 2 weeks ago |
|
|
a5b2f18567 | 2 weeks ago |
|
|
139c395d7d | 2 weeks ago |
|
|
99b06eac49 | 2 weeks ago |
|
|
3a41c0c585 | 2 weeks ago |
|
|
653d0738f9 | 2 weeks ago |
|
|
98aadbaf54 | 3 weeks ago |
|
|
4e01e8a66e | 3 weeks ago |
|
|
8aa46a3956 | 3 weeks ago |
|
|
8444659ed8 | 3 weeks ago |
|
|
e1f0ad7a05 | 3 weeks ago |
|
|
a96ef432cf | 3 weeks ago |
|
|
c5919b4ed1 | 3 weeks ago |
|
|
888a5d4812 | 3 weeks ago |
|
|
9134440008 | 3 weeks ago |
|
|
bd36817e84 | 3 weeks ago |
|
|
ab4b990d51 | 3 weeks ago |
|
|
ce10f7c14c | 3 weeks ago |
|
|
208a32af5b | 3 weeks ago |
|
|
052602752f | 3 weeks ago |
|
|
0285e1d5fb | 3 weeks ago |
|
|
124301fbb6 | 3 weeks ago |
|
|
b5cd29932e | 3 weeks ago |
|
|
9e4d1fd87f | 3 weeks ago |
|
|
146ea42822 | 3 weeks ago |
|
|
08e74effc0 | 3 weeks ago |
|
|
ca9b68aafd | 3 weeks ago |
|
|
6ac80b7334 | 3 weeks ago |
|
|
f4f9dd7f8c | 3 weeks ago |
|
|
31fe75ad9e | 3 weeks ago |
|
|
37aa7e6935 | 3 weeks ago |
|
|
f387b1010e | 3 weeks ago |
|
|
27a0168cdc | 3 weeks ago |
|
|
e8d2f96449 | 3 weeks ago |
|
|
16e90dcb27 | 3 weeks ago |
|
|
d37884c734 | 3 weeks ago |
|
|
85cb64c4ff | 3 weeks ago |
|
|
3280dac797 | 3 weeks ago |
|
|
1eba5b0cbd | 3 weeks ago |
|
|
42ce5c88be | 3 weeks ago |
|
|
2ad2d4d409 | 3 weeks ago |
|
|
18806de400 | 3 weeks ago |
|
|
4650061326 | 3 weeks ago |
|
|
6e24f50946 | 3 weeks ago |
|
|
8ed6bb3198 | 3 weeks ago |
|
|
e0e8731130 | 3 weeks ago |
|
|
e059382174 | 3 weeks ago |
|
|
fe5501a4e9 | 3 weeks ago |
|
|
4c67df42f6 | 3 weeks ago |
|
|
c7dbd3987e | 3 weeks ago |
|
|
ae3dff15e4 | 3 weeks ago |
|
|
2e265213fd | 3 weeks ago |
|
|
de733c5951 | 3 weeks ago |
|
|
875a9c526d | 4 weeks ago |
|
|
bab5e68d0a | 4 weeks ago |
|
|
d4c5b278b3 | 4 weeks ago |
|
|
1ed117dbc0 | 4 weeks ago |
|
|
5b40f0bc54 | 4 weeks ago |
|
|
446752687c | 4 weeks ago |
|
|
77123a569b | 4 weeks ago |
|
|
db7dcd516f | 1 month ago |
|
|
4c856078e4 | 1 month ago |
|
|
061e6266cf | 1 month ago |
|
|
f522b9dbb7 | 1 month ago |
|
|
b6c6960e40 | 1 month ago |
|
|
adee8b9180 | 1 month ago |
|
|
95426b79a9 | 1 month ago |
|
|
d68513b0db | 1 month ago |
|
|
05d2dcaf49 | 1 month ago |
|
|
8996254647 | 1 month ago |
|
|
d5a40c01ab | 1 month ago |
|
|
74f1d8bd87 | 1 month ago |
|
|
da90e3d8f2 | 1 month ago |
|
|
06b092388e | 1 month ago |
|
|
3c19addc21 | 1 month ago |
|
|
9ac8105fda | 1 month ago |
|
|
478342a642 | 1 month ago |
|
|
fcb614a53e | 1 month ago |
|
|
09a2a1048d | 1 month ago |
|
|
edb11e0e60 | 1 month ago |
|
|
0a5ba8280f | 1 month ago |
|
|
db5815fb97 | 1 month ago |
|
|
02681732d1 | 1 month ago |
|
|
d2e4a20f26 | 1 month ago |
|
|
d6fa899eba | 1 month ago |
|
|
576aacd459 | 1 month ago |
|
|
f4e2720821 | 1 month ago |
|
|
34e992f59d | 1 month ago |
|
|
a760cbe33f | 1 month ago |
|
|
4346615d77 | 1 month ago |
|
|
fd0e541e5d | 1 month ago |
|
|
7418583e47 | 1 month ago |
|
|
d47c697748 | 1 month ago |
|
|
8576a802ca | 1 month ago |
|
|
672b1f0e76 | 1 month ago |
|
|
36ad24b20f | 1 month ago |
|
|
afaa23c3b4 | 1 month ago |
|
|
c2d62d25c6 | 1 month ago |
|
|
c59c859f7d | 1 month ago |
|
|
23359dc727 | 1 month ago |
|
|
2b448f0696 | 1 month ago |
|
|
3944809a11 | 1 month ago |
|
|
675b1c6d54 | 1 month ago |
|
|
ab435ce3a6 | 1 month ago |
|
|
3dde233cd3 | 1 month ago |
|
|
bf47d8e72b | 1 month ago |
@ -0,0 +1,78 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# This script sets up cigocacher, but should never fail the build if unsuccessful.
|
||||
# It expects to run on a GitHub-hosted runner, and connects to cigocached over a
|
||||
# private Azure network that is configured at the runner group level in GitHub.
|
||||
#
|
||||
# Usage: ./action.sh
|
||||
# Inputs:
|
||||
# URL: The cigocached server URL.
|
||||
# Outputs:
|
||||
# success: Whether cigocacher was set up successfully.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
if [ -z "${GITHUB_ACTIONS:-}" ]; then
|
||||
echo "This script is intended to run within GitHub Actions"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "${URL:-}" ]; then
|
||||
echo "No cigocached URL is set, skipping cigocacher setup"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
curl_and_parse() {
|
||||
local jq_filter="$1"
|
||||
local step="$2"
|
||||
shift 2
|
||||
|
||||
local response
|
||||
local curl_exit
|
||||
response="$(curl -sSL "$@" 2>&1)" || curl_exit="$?"
|
||||
if [ "${curl_exit:-0}" -ne "0" ]; then
|
||||
echo "${step}: ${response}" >&2
|
||||
return 1
|
||||
fi
|
||||
|
||||
local parsed
|
||||
local jq_exit
|
||||
parsed=$(echo "${response}" | jq -e -r "${jq_filter}" 2>&1) || jq_exit=$?
|
||||
if [ "${jq_exit:-0}" -ne "0" ]; then
|
||||
echo "${step}: Failed to parse JSON response:" >&2
|
||||
echo "${response}" >&2
|
||||
return 1
|
||||
fi
|
||||
|
||||
echo "${parsed}"
|
||||
return 0
|
||||
}
|
||||
|
||||
JWT="$(curl_and_parse ".value" "Fetching GitHub identity JWT" \
|
||||
-H "Authorization: Bearer ${ACTIONS_ID_TOKEN_REQUEST_TOKEN}" \
|
||||
"${ACTIONS_ID_TOKEN_REQUEST_URL}&audience=gocached")" || exit 0
|
||||
|
||||
# cigocached serves a TLS cert with an FQDN, but DNS is based on VM name.
|
||||
HOST_AND_PORT="${URL#http*://}"
|
||||
FIRST_LABEL="${HOST_AND_PORT/.*/}"
|
||||
# Save CONNECT_TO for later steps to use.
|
||||
echo "CONNECT_TO=${HOST_AND_PORT}:${FIRST_LABEL}:" >> "${GITHUB_ENV}"
|
||||
BODY="$(jq -n --arg jwt "$JWT" '{"jwt": $jwt}')"
|
||||
CIGOCACHER_TOKEN="$(curl_and_parse ".access_token" "Exchanging token with cigocached" \
|
||||
--connect-to "${HOST_AND_PORT}:${FIRST_LABEL}:" \
|
||||
-H "Content-Type: application/json" \
|
||||
"$URL/auth/exchange-token" \
|
||||
-d "$BODY")" || exit 0
|
||||
|
||||
# Wait until we successfully auth before building cigocacher to ensure we know
|
||||
# it's worth building.
|
||||
# TODO(tomhjp): bake cigocacher into runner image and use it for auth.
|
||||
echo "Fetched cigocacher token successfully"
|
||||
echo "::add-mask::${CIGOCACHER_TOKEN}"
|
||||
echo "CIGOCACHER_TOKEN=${CIGOCACHER_TOKEN}" >> "${GITHUB_ENV}"
|
||||
|
||||
BIN_PATH="${RUNNER_TEMP:-/tmp}/cigocacher$(go env GOEXE)"
|
||||
|
||||
go build -o "${BIN_PATH}" ./cmd/cigocacher
|
||||
echo "GOCACHEPROG=${BIN_PATH} --cache-dir ${CACHE_DIR} --cigocached-url ${URL} --token ${CIGOCACHER_TOKEN}" >> "${GITHUB_ENV}"
|
||||
echo "success=true" >> "${GITHUB_OUTPUT}"
|
||||
@ -0,0 +1,30 @@
|
||||
name: go-cache
|
||||
description: Set up build to use cigocacher
|
||||
|
||||
inputs:
|
||||
cigocached-url:
|
||||
description: URL of the cigocached server
|
||||
required: true
|
||||
checkout-path:
|
||||
description: Path to cloned repository
|
||||
required: true
|
||||
cache-dir:
|
||||
description: Directory to use for caching
|
||||
required: true
|
||||
|
||||
outputs:
|
||||
success:
|
||||
description: Whether cigocacher was set up successfully
|
||||
value: ${{ steps.setup.outputs.success }}
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Setup cigocacher
|
||||
id: setup
|
||||
shell: bash
|
||||
env:
|
||||
URL: ${{ inputs.cigocached-url }}
|
||||
CACHE_DIR: ${{ inputs.cache-dir }}
|
||||
working-directory: ${{ inputs.checkout-path }}
|
||||
run: .github/actions/go-cache/action.sh
|
||||
@ -0,0 +1,38 @@
|
||||
name: tailscale.com/cmd/vet
|
||||
|
||||
env:
|
||||
HOME: ${{ github.workspace }}
|
||||
# GOMODCACHE is the same definition on all OSes. Within the workspace, we use
|
||||
# toplevel directories "src" (for the checked out source code), and "gomodcache"
|
||||
# and other caches as siblings to follow.
|
||||
GOMODCACHE: ${{ github.workspace }}/gomodcache
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- "release-branch/*"
|
||||
paths:
|
||||
- "**.go"
|
||||
pull_request:
|
||||
paths:
|
||||
- "**.go"
|
||||
|
||||
jobs:
|
||||
vet:
|
||||
runs-on: [ self-hosted, linux ]
|
||||
timeout-minutes: 5
|
||||
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
path: src
|
||||
|
||||
- name: Build 'go vet' tool
|
||||
working-directory: src
|
||||
run: ./tool/go build -o /tmp/vettool tailscale.com/cmd/vet
|
||||
|
||||
- name: Run 'go vet'
|
||||
working-directory: src
|
||||
run: ./tool/go vet -vettool=/tmp/vettool tailscale.com/...
|
||||
@ -1,147 +1,103 @@
|
||||
# Contributor Covenant Code of Conduct
|
||||
# Tailscale Community Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
We are committed to creating an open, welcoming, diverse, inclusive,
|
||||
healthy and respectful community.
|
||||
We are committed to creating an open, welcoming, diverse, inclusive, healthy and respectful community.
|
||||
Unacceptable, harmful and inappropriate behavior will not be tolerated.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to a positive environment for our
|
||||
community include:
|
||||
* Demonstrating empathy and kindness toward other people.
|
||||
* Being respectful of differing opinions, viewpoints, and experiences.
|
||||
* Giving and gracefully accepting constructive feedback.
|
||||
* Accepting responsibility and apologizing to those affected by our
|
||||
mistakes, and learning from the experience.
|
||||
* Focusing on what is best not just for us as individuals, but for the
|
||||
overall community.
|
||||
Examples of behavior that contributes to a positive environment for our community include:
|
||||
|
||||
- Demonstrating empathy and kindness toward other people.
|
||||
- Being respectful of differing opinions, viewpoints, and experiences.
|
||||
- Giving and gracefully accepting constructive feedback.
|
||||
- Accepting responsibility and apologizing to those affected by our mistakes, and learning from the experience.
|
||||
- Focusing on what is best not just for us as individuals, but for the overall community.
|
||||
|
||||
Examples of unacceptable behavior include without limitation:
|
||||
* The use of sexualized language or imagery, and sexual attention or
|
||||
advances of any kind.
|
||||
* The use of violent, intimidating or bullying language or imagery.
|
||||
* Trolling, insulting or derogatory comments, and personal or
|
||||
political attacks.
|
||||
* Public or private harassment.
|
||||
* Publishing others' private information, such as a physical or email
|
||||
address, without their explicit permission.
|
||||
* Spamming community channels and members, such as sending repeat messages,
|
||||
low-effort content, or automated messages.
|
||||
* Phishing or any similar activity;
|
||||
* Distributing or promoting malware;
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting.
|
||||
|
||||
Please also see the Tailscale Acceptable Use Policy, available at
|
||||
[tailscale.com/tailscale-aup](https://tailscale.com/tailscale-aup).
|
||||
|
||||
# Reporting Incidents
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior
|
||||
may be reported to Tailscale directly via info@tailscale.com, or to
|
||||
the community leaders or moderators via DM or similar.
|
||||
|
||||
- The use of language, imagery or emojis (collectively "content") that is racist, sexist, homophobic, transphobic, or otherwise harassing or discriminatory based on any protected characteristic.
|
||||
- The use of sexualized content and sexual attention or advances of any kind.
|
||||
- The use of violent, intimidating or bullying content.
|
||||
- Trolling, concern trolling, insulting or derogatory comments, and personal or political attacks.
|
||||
- Public or private harassment.
|
||||
- Publishing others' personal information, such as a photo, physical address, email address, online profile information, or other personal information, without their explicit permission or with the intent to bully or harass the other person.
|
||||
- Posting deep fake or other AI generated content about or involving another person without the explicit permission.
|
||||
- Spamming community channels and members, such as sending repeat messages, low-effort content, or automated messages.
|
||||
- Phishing or any similar activity.
|
||||
- Distributing or promoting malware.
|
||||
- The use of any coded or suggestive content to hide or provoke otherwise unacceptable behavior.
|
||||
- Other conduct which could reasonably be considered harmful, illegal, or inappropriate in a professional setting.
|
||||
|
||||
Please also see the Tailscale Acceptable Use Policy, available at [tailscale.com/tailscale-aup](https://tailscale.com/tailscale-aup).
|
||||
|
||||
## Reporting Incidents
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to Tailscale directly via <info@tailscale.com>, or to the community leaders or moderators via DM or similar.
|
||||
All complaints will be reviewed and investigated promptly and fairly.
|
||||
We will respect the privacy and safety of the reporter of any issues.
|
||||
|
||||
Please note that this community is not moderated by staff 24/7, and we
|
||||
do not have, and do not undertake, any obligation to prescreen, monitor,
|
||||
edit, or remove any content or data, or to actively seek facts or
|
||||
circumstances indicating illegal activity. While we strive to keep the
|
||||
community safe and welcoming, moderation may not be immediate at all hours.
|
||||
Please note that this community is not moderated by staff 24/7, and we do not have, and do not undertake, any obligation to prescreen, monitor, edit, or remove any content or data, or to actively seek facts or circumstances indicating illegal activity.
|
||||
While we strive to keep the community safe and welcoming, moderation may not be immediate at all hours.
|
||||
If you encounter any issues, report them using the appropriate channels.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Community leaders and moderators are responsible for clarifying and
|
||||
enforcing our standards of acceptable behavior and will take appropriate
|
||||
and fair corrective action in response to any behavior that they deem
|
||||
inappropriate, threatening, offensive, or harmful.
|
||||
## Enforcement Guidelines
|
||||
|
||||
Community leaders and moderators have the right and responsibility to remove,
|
||||
edit, or reject comments, commits, code, wiki edits, issues, and other
|
||||
contributions that are not aligned to this Community Code of Conduct.
|
||||
Tailscale retains full discretion to take action (or not) in response
|
||||
to a violation of these guidelines with or without notice or liability
|
||||
to you. We will interpret our policies and resolve disputes in favor of
|
||||
protecting users, customers, the public, our community and our company,
|
||||
as a whole.
|
||||
Community leaders and moderators are responsible for clarifying and enforcing our standards of acceptable behavior and will take appropriate and fair corrective action in response to any behavior that they deem inappropriate, threatening, offensive, or harmful.
|
||||
|
||||
## Enforcement Guidelines
|
||||
Community leaders and moderators have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Community Code of Conduct.
|
||||
Tailscale retains full discretion to take action (or not) in response to a violation of these guidelines with or without notice or liability to you.
|
||||
We will interpret our policies and resolve disputes in favor of protecting users, customers, the public, our community and our company, as a whole.
|
||||
|
||||
Community leaders will follow these Community Impact Guidelines in
|
||||
determining the consequences for any action they deem in violation of
|
||||
this Code of Conduct:
|
||||
Community leaders will follow these community enforcement guidelines in determining the consequences for any action they deem in violation of this Code of Conduct,
|
||||
and retain full discretion to apply the enforcement guidelines as necessary depending on the circumstances:
|
||||
|
||||
### 1. Correction
|
||||
|
||||
Community Impact: Use of inappropriate language or other behavior
|
||||
deemed unprofessional or unwelcome in the community.
|
||||
Community Impact: Use of inappropriate language or other behavior deemed unprofessional or unwelcome in the community.
|
||||
|
||||
Consequence: A private, written warning from community leaders,
|
||||
providing clarity around the nature of the violation and an
|
||||
explanation of why the behavior was inappropriate. A public apology
|
||||
may be requested.
|
||||
Consequence: A private, written warning from community leaders, providing clarity around the nature of the violation and an explanation of why the behavior was inappropriate.
|
||||
A public apology may be requested.
|
||||
|
||||
### 2. Warning
|
||||
|
||||
Community Impact: A violation through a single incident or series
|
||||
of actions.
|
||||
Community Impact: A violation through a single incident or series of actions.
|
||||
|
||||
Consequence: A warning with consequences for continued
|
||||
behavior. No interaction with the people involved, including
|
||||
unsolicited interaction with those enforcing this Community Code of Conduct,
|
||||
for a specified period of time. This includes avoiding interactions in
|
||||
community spaces as well as external channels like social
|
||||
media. Violating these terms may lead to a temporary or permanent ban.
|
||||
Consequence: A warning with consequences for continued behavior.
|
||||
No interaction with the people involved, including unsolicited interaction with those enforcing this Community Code of Conduct, for a specified period of time.
|
||||
This includes avoiding interactions in community spaces as well as external channels like social media.
|
||||
Violating these terms may lead to a temporary or permanent ban.
|
||||
|
||||
### 3. Temporary Ban
|
||||
|
||||
Community Impact: A serious violation of community standards,
|
||||
including sustained inappropriate behavior.
|
||||
Community Impact: A serious violation of community standards, including sustained inappropriate behavior.
|
||||
|
||||
Consequence: A temporary ban from any sort of interaction or
|
||||
public communication with the community for a specified period of
|
||||
time. No public or private interaction with the people involved,
|
||||
including unsolicited interaction with those enforcing the Code of Conduct,
|
||||
is allowed during this period. Violating these terms may lead to a permanent ban.
|
||||
Consequence: A temporary ban from any sort of interaction or public communication with the community for a specified period of time.
|
||||
No public or private interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, is allowed during this period. Violating these terms may lead to a permanent ban.
|
||||
|
||||
### 4. Permanent Ban
|
||||
|
||||
Community Impact: Demonstrating a pattern of violation of community
|
||||
standards, including sustained inappropriate behavior, harassment of
|
||||
an individual, or aggression toward or disparagement of
|
||||
classes of individuals.
|
||||
Community Impact: Demonstrating a pattern of violation of community standards, including sustained inappropriate behavior, harassment of an individual, or aggression toward or disparagement of classes of individuals.
|
||||
|
||||
Consequence: A permanent ban from any sort of public interaction
|
||||
within the community.
|
||||
Consequence: A permanent ban from any sort of public interaction within the community.
|
||||
|
||||
## Acceptable Use Policy
|
||||
|
||||
Violation of this Community Code of Conduct may also violate the
|
||||
Tailscale Acceptable Use Policy, which may result in suspension or
|
||||
termination of your Tailscale account. For more information, please
|
||||
see the Tailscale Acceptable Use Policy, available at
|
||||
[tailscale.com/tailscale-aup](https://tailscale.com/tailscale-aup).
|
||||
Violation of this Community Code of Conduct may also violate the Tailscale Acceptable Use Policy, which may result in suspension or termination of your Tailscale account.
|
||||
For more information, please see the Tailscale Acceptable Use Policy, available at [tailscale.com/tailscale-aup](https://tailscale.com/tailscale-aup).
|
||||
|
||||
## Privacy
|
||||
|
||||
Please see the Tailscale [Privacy Policy](http://tailscale.com/privacy-policy)
|
||||
for more information about how Tailscale collects, uses, discloses and protects
|
||||
information.
|
||||
Please see the Tailscale [Privacy Policy](https://tailscale.com/privacy-policy) for more information about how Tailscale collects, uses, discloses and protects information.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor
|
||||
Covenant][homepage], version 2.0, available at
|
||||
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 2.0, available at <https://www.contributor-covenant.org/version/2/0/code_of_conduct.html>.
|
||||
|
||||
Community Impact Guidelines were inspired by [Mozilla's code of
|
||||
conduct enforcement ladder](https://github.com/mozilla/diversity).
|
||||
Community Impact Guidelines were inspired by [Mozilla's code of conduct enforcement ladder](https://github.com/mozilla/diversity).
|
||||
|
||||
[homepage]: https://www.contributor-covenant.org
|
||||
|
||||
For answers to common questions about this code of conduct, see the
|
||||
FAQ at https://www.contributor-covenant.org/faq. Translations are
|
||||
available at https://www.contributor-covenant.org/translations.
|
||||
|
||||
For answers to common questions about this code of conduct, see the FAQ at <https://www.contributor-covenant.org/faq>.
|
||||
Translations are available at <https://www.contributor-covenant.org/translations>.
|
||||
|
||||
@ -1 +1 @@
|
||||
1.90.3
|
||||
1.93.0
|
||||
|
||||
@ -0,0 +1,61 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
package appc
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/netip"
|
||||
|
||||
"go4.org/netipx"
|
||||
)
|
||||
|
||||
// errPoolExhausted is returned when there are no more addresses to iterate over.
|
||||
var errPoolExhausted = errors.New("ip pool exhausted")
|
||||
|
||||
// ippool allows for iteration over all the addresses within a netipx.IPSet.
|
||||
// netipx.IPSet has a Ranges call that returns the "minimum and sorted set of IP ranges that covers [the set]".
|
||||
// netipx.IPRange is "an inclusive range of IP addresses from the same address family.". So we can iterate over
|
||||
// all the addresses in the set by keeping a track of the last address we returned, calling Next on the last address
|
||||
// to get the new one, and if we run off the edge of the current range, starting on the next one.
|
||||
type ippool struct {
|
||||
// ranges defines the addresses in the pool
|
||||
ranges []netipx.IPRange
|
||||
// last is internal tracking of which the last address provided was.
|
||||
last netip.Addr
|
||||
// rangeIdx is internal tracking of which netipx.IPRange from the IPSet we are currently on.
|
||||
rangeIdx int
|
||||
}
|
||||
|
||||
func newIPPool(ipset *netipx.IPSet) *ippool {
|
||||
if ipset == nil {
|
||||
return &ippool{}
|
||||
}
|
||||
return &ippool{ranges: ipset.Ranges()}
|
||||
}
|
||||
|
||||
// next returns the next address from the set, or errPoolExhausted if we have
|
||||
// iterated over the whole set.
|
||||
func (ipp *ippool) next() (netip.Addr, error) {
|
||||
if ipp.rangeIdx >= len(ipp.ranges) {
|
||||
// ipset is empty or we have iterated off the end
|
||||
return netip.Addr{}, errPoolExhausted
|
||||
}
|
||||
if !ipp.last.IsValid() {
|
||||
// not initialized yet
|
||||
ipp.last = ipp.ranges[0].From()
|
||||
return ipp.last, nil
|
||||
}
|
||||
currRange := ipp.ranges[ipp.rangeIdx]
|
||||
if ipp.last == currRange.To() {
|
||||
// then we need to move to the next range
|
||||
ipp.rangeIdx++
|
||||
if ipp.rangeIdx >= len(ipp.ranges) {
|
||||
return netip.Addr{}, errPoolExhausted
|
||||
}
|
||||
ipp.last = ipp.ranges[ipp.rangeIdx].From()
|
||||
return ipp.last, nil
|
||||
}
|
||||
ipp.last = ipp.last.Next()
|
||||
return ipp.last, nil
|
||||
}
|
||||
@ -0,0 +1,60 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
package appc
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/netip"
|
||||
"testing"
|
||||
|
||||
"go4.org/netipx"
|
||||
"tailscale.com/util/must"
|
||||
)
|
||||
|
||||
func TestNext(t *testing.T) {
|
||||
a := ippool{}
|
||||
_, err := a.next()
|
||||
if !errors.Is(err, errPoolExhausted) {
|
||||
t.Fatalf("expected errPoolExhausted, got %v", err)
|
||||
}
|
||||
|
||||
var isb netipx.IPSetBuilder
|
||||
ipset := must.Get(isb.IPSet())
|
||||
b := newIPPool(ipset)
|
||||
_, err = b.next()
|
||||
if !errors.Is(err, errPoolExhausted) {
|
||||
t.Fatalf("expected errPoolExhausted, got %v", err)
|
||||
}
|
||||
|
||||
isb.AddRange(netipx.IPRangeFrom(netip.MustParseAddr("192.168.0.0"), netip.MustParseAddr("192.168.0.2")))
|
||||
isb.AddRange(netipx.IPRangeFrom(netip.MustParseAddr("200.0.0.0"), netip.MustParseAddr("200.0.0.0")))
|
||||
isb.AddRange(netipx.IPRangeFrom(netip.MustParseAddr("201.0.0.0"), netip.MustParseAddr("201.0.0.1")))
|
||||
ipset = must.Get(isb.IPSet())
|
||||
c := newIPPool(ipset)
|
||||
expected := []string{
|
||||
"192.168.0.0",
|
||||
"192.168.0.1",
|
||||
"192.168.0.2",
|
||||
"200.0.0.0",
|
||||
"201.0.0.0",
|
||||
"201.0.0.1",
|
||||
}
|
||||
for i, want := range expected {
|
||||
addr, err := c.next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if addr != netip.MustParseAddr(want) {
|
||||
t.Fatalf("next call %d want: %s, got: %v", i, want, addr)
|
||||
}
|
||||
}
|
||||
_, err = c.next()
|
||||
if !errors.Is(err, errPoolExhausted) {
|
||||
t.Fatalf("expected errPoolExhausted, got %v", err)
|
||||
}
|
||||
_, err = c.next()
|
||||
if !errors.Is(err, errPoolExhausted) {
|
||||
t.Fatalf("expected errPoolExhausted, got %v", err)
|
||||
}
|
||||
}
|
||||
@ -0,0 +1,311 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
// cigocacher is an opinionated-to-Tailscale client for gocached. It connects
|
||||
// at a URL like "https://ci-gocached-azure-1.corp.ts.net:31364", but that is
|
||||
// stored in a GitHub actions variable so that its hostname can be updated for
|
||||
// all branches at the same time in sync with the actual infrastructure.
|
||||
//
|
||||
// It authenticates using GitHub OIDC tokens, and all HTTP errors are ignored
|
||||
// so that its failure mode is just that builds get slower and fall back to
|
||||
// disk-only cache.
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
jsonv1 "encoding/json"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/bradfitz/go-tool-cache/cacheproc"
|
||||
"github.com/bradfitz/go-tool-cache/cachers"
|
||||
)
|
||||
|
||||
func main() {
|
||||
var (
|
||||
auth = flag.Bool("auth", false, "auth with cigocached and exit, printing the access token as output")
|
||||
token = flag.String("token", "", "the cigocached access token to use, as created using --auth")
|
||||
cigocachedURL = flag.String("cigocached-url", "", "optional cigocached URL (scheme, host, and port). empty means to not use one.")
|
||||
dir = flag.String("cache-dir", "", "cache directory; empty means automatic")
|
||||
verbose = flag.Bool("verbose", false, "enable verbose logging")
|
||||
)
|
||||
flag.Parse()
|
||||
|
||||
if *auth {
|
||||
if *cigocachedURL == "" {
|
||||
log.Print("--cigocached-url is empty, skipping auth")
|
||||
return
|
||||
}
|
||||
tk, err := fetchAccessToken(httpClient(), os.Getenv("ACTIONS_ID_TOKEN_REQUEST_URL"), os.Getenv("ACTIONS_ID_TOKEN_REQUEST_TOKEN"), *cigocachedURL)
|
||||
if err != nil {
|
||||
log.Printf("error fetching access token, skipping auth: %v", err)
|
||||
return
|
||||
}
|
||||
fmt.Println(tk)
|
||||
return
|
||||
}
|
||||
|
||||
if *dir == "" {
|
||||
d, err := os.UserCacheDir()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
*dir = filepath.Join(d, "go-cacher")
|
||||
log.Printf("Defaulting to cache dir %v ...", *dir)
|
||||
}
|
||||
if err := os.MkdirAll(*dir, 0750); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
c := &cigocacher{
|
||||
disk: &cachers.DiskCache{
|
||||
Dir: *dir,
|
||||
Verbose: *verbose,
|
||||
},
|
||||
verbose: *verbose,
|
||||
}
|
||||
if *cigocachedURL != "" {
|
||||
if *verbose {
|
||||
log.Printf("Using cigocached at %s", *cigocachedURL)
|
||||
}
|
||||
c.gocached = &gocachedClient{
|
||||
baseURL: *cigocachedURL,
|
||||
cl: httpClient(),
|
||||
accessToken: *token,
|
||||
verbose: *verbose,
|
||||
}
|
||||
}
|
||||
var p *cacheproc.Process
|
||||
p = &cacheproc.Process{
|
||||
Close: func() error {
|
||||
if c.verbose {
|
||||
log.Printf("gocacheprog: closing; %d gets (%d hits, %d misses, %d errors); %d puts (%d errors)",
|
||||
p.Gets.Load(), p.GetHits.Load(), p.GetMisses.Load(), p.GetErrors.Load(), p.Puts.Load(), p.PutErrors.Load())
|
||||
}
|
||||
return c.close()
|
||||
},
|
||||
Get: c.get,
|
||||
Put: c.put,
|
||||
}
|
||||
|
||||
if err := p.Run(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func httpClient() *http.Client {
|
||||
return &http.Client{
|
||||
Transport: &http.Transport{
|
||||
DialContext: func(ctx context.Context, network, addr string) (net.Conn, error) {
|
||||
host, port, err := net.SplitHostPort(addr)
|
||||
if err == nil {
|
||||
// This does not run in a tailnet. We serve corp.ts.net
|
||||
// TLS certs, and override DNS resolution to lookup the
|
||||
// private IP for the VM by its hostname.
|
||||
if vm, ok := strings.CutSuffix(host, ".corp.ts.net"); ok {
|
||||
addr = net.JoinHostPort(vm, port)
|
||||
}
|
||||
}
|
||||
var d net.Dialer
|
||||
return d.DialContext(ctx, network, addr)
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type cigocacher struct {
|
||||
disk *cachers.DiskCache
|
||||
gocached *gocachedClient
|
||||
verbose bool
|
||||
|
||||
getNanos atomic.Int64 // total nanoseconds spent in gets
|
||||
putNanos atomic.Int64 // total nanoseconds spent in puts
|
||||
getHTTP atomic.Int64 // HTTP get requests made
|
||||
getHTTPBytes atomic.Int64 // HTTP get bytes transferred
|
||||
getHTTPHits atomic.Int64 // HTTP get hits
|
||||
getHTTPMisses atomic.Int64 // HTTP get misses
|
||||
getHTTPErrors atomic.Int64 // HTTP get errors ignored on best-effort basis
|
||||
getHTTPNanos atomic.Int64 // total nanoseconds spent in HTTP gets
|
||||
putHTTP atomic.Int64 // HTTP put requests made
|
||||
putHTTPBytes atomic.Int64 // HTTP put bytes transferred
|
||||
putHTTPErrors atomic.Int64 // HTTP put errors ignored on best-effort basis
|
||||
putHTTPNanos atomic.Int64 // total nanoseconds spent in HTTP puts
|
||||
}
|
||||
|
||||
func (c *cigocacher) get(ctx context.Context, actionID string) (outputID, diskPath string, err error) {
|
||||
t0 := time.Now()
|
||||
defer func() {
|
||||
c.getNanos.Add(time.Since(t0).Nanoseconds())
|
||||
}()
|
||||
if c.gocached == nil {
|
||||
return c.disk.Get(ctx, actionID)
|
||||
}
|
||||
|
||||
outputID, diskPath, err = c.disk.Get(ctx, actionID)
|
||||
if err == nil && outputID != "" {
|
||||
return outputID, diskPath, nil
|
||||
}
|
||||
|
||||
c.getHTTP.Add(1)
|
||||
t0HTTP := time.Now()
|
||||
defer func() {
|
||||
c.getHTTPNanos.Add(time.Since(t0HTTP).Nanoseconds())
|
||||
}()
|
||||
outputID, res, err := c.gocached.get(ctx, actionID)
|
||||
if err != nil {
|
||||
c.getHTTPErrors.Add(1)
|
||||
return "", "", nil
|
||||
}
|
||||
if outputID == "" || res == nil {
|
||||
c.getHTTPMisses.Add(1)
|
||||
return "", "", nil
|
||||
}
|
||||
|
||||
defer res.Body.Close()
|
||||
|
||||
diskPath, err = put(c.disk, actionID, outputID, res.ContentLength, res.Body)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("error filling disk cache from HTTP: %w", err)
|
||||
}
|
||||
|
||||
c.getHTTPHits.Add(1)
|
||||
c.getHTTPBytes.Add(res.ContentLength)
|
||||
return outputID, diskPath, nil
|
||||
}
|
||||
|
||||
func (c *cigocacher) put(ctx context.Context, actionID, outputID string, size int64, r io.Reader) (diskPath string, err error) {
|
||||
t0 := time.Now()
|
||||
defer func() {
|
||||
c.putNanos.Add(time.Since(t0).Nanoseconds())
|
||||
}()
|
||||
if c.gocached == nil {
|
||||
return put(c.disk, actionID, outputID, size, r)
|
||||
}
|
||||
|
||||
c.putHTTP.Add(1)
|
||||
var diskReader, httpReader io.Reader
|
||||
tee := &bestEffortTeeReader{r: r}
|
||||
if size == 0 {
|
||||
// Special case the empty file so NewRequest sets "Content-Length: 0",
|
||||
// as opposed to thinking we didn't set it and not being able to sniff its size
|
||||
// from the type.
|
||||
diskReader, httpReader = bytes.NewReader(nil), bytes.NewReader(nil)
|
||||
} else {
|
||||
pr, pw := io.Pipe()
|
||||
defer pw.Close()
|
||||
// The diskReader is in the driving seat. We will try to forward data
|
||||
// to httpReader as well, but only best-effort.
|
||||
diskReader = tee
|
||||
tee.w = pw
|
||||
httpReader = pr
|
||||
}
|
||||
httpErrCh := make(chan error)
|
||||
go func() {
|
||||
t0HTTP := time.Now()
|
||||
defer func() {
|
||||
c.putHTTPNanos.Add(time.Since(t0HTTP).Nanoseconds())
|
||||
}()
|
||||
httpErrCh <- c.gocached.put(ctx, actionID, outputID, size, httpReader)
|
||||
}()
|
||||
|
||||
diskPath, err = put(c.disk, actionID, outputID, size, diskReader)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error writing to disk cache: %w", errors.Join(err, tee.err))
|
||||
}
|
||||
|
||||
select {
|
||||
case err := <-httpErrCh:
|
||||
if err != nil {
|
||||
c.putHTTPErrors.Add(1)
|
||||
} else {
|
||||
c.putHTTPBytes.Add(size)
|
||||
}
|
||||
case <-ctx.Done():
|
||||
}
|
||||
|
||||
return diskPath, nil
|
||||
}
|
||||
|
||||
func (c *cigocacher) close() error {
|
||||
if !c.verbose || c.gocached == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Printf("cigocacher HTTP stats: %d gets (%.1fMiB, %.2fs, %d hits, %d misses, %d errors ignored); %d puts (%.1fMiB, %.2fs, %d errors ignored)",
|
||||
c.getHTTP.Load(), float64(c.getHTTPBytes.Load())/float64(1<<20), float64(c.getHTTPNanos.Load())/float64(time.Second), c.getHTTPHits.Load(), c.getHTTPMisses.Load(), c.getHTTPErrors.Load(),
|
||||
c.putHTTP.Load(), float64(c.putHTTPBytes.Load())/float64(1<<20), float64(c.putHTTPNanos.Load())/float64(time.Second), c.putHTTPErrors.Load())
|
||||
|
||||
stats, err := c.gocached.fetchStats()
|
||||
if err != nil {
|
||||
log.Printf("error fetching gocached stats: %v", err)
|
||||
} else {
|
||||
log.Printf("gocached session stats: %s", stats)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func fetchAccessToken(cl *http.Client, idTokenURL, idTokenRequestToken, gocachedURL string) (string, error) {
|
||||
req, err := http.NewRequest("GET", idTokenURL+"&audience=gocached", nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
req.Header.Set("Authorization", "Bearer "+idTokenRequestToken)
|
||||
resp, err := cl.Do(req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
type idTokenResp struct {
|
||||
Value string `json:"value"`
|
||||
}
|
||||
var idToken idTokenResp
|
||||
if err := jsonv1.NewDecoder(resp.Body).Decode(&idToken); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
req, _ = http.NewRequest("POST", gocachedURL+"/auth/exchange-token", strings.NewReader(`{"jwt":"`+idToken.Value+`"}`))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
resp, err = cl.Do(req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
type accessTokenResp struct {
|
||||
AccessToken string `json:"access_token"`
|
||||
}
|
||||
var accessToken accessTokenResp
|
||||
if err := jsonv1.NewDecoder(resp.Body).Decode(&accessToken); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return accessToken.AccessToken, nil
|
||||
}
|
||||
|
||||
type bestEffortTeeReader struct {
|
||||
r io.Reader
|
||||
w io.WriteCloser
|
||||
err error
|
||||
}
|
||||
|
||||
func (t *bestEffortTeeReader) Read(p []byte) (int, error) {
|
||||
n, err := t.r.Read(p)
|
||||
if n > 0 && t.w != nil {
|
||||
if _, err := t.w.Write(p[:n]); err != nil {
|
||||
t.err = errors.Join(err, t.w.Close())
|
||||
t.w = nil
|
||||
}
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
@ -0,0 +1,88 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/bradfitz/go-tool-cache/cachers"
|
||||
)
|
||||
|
||||
// indexEntry is the metadata that DiskCache stores on disk for an ActionID.
|
||||
type indexEntry struct {
|
||||
Version int `json:"v"`
|
||||
OutputID string `json:"o"`
|
||||
Size int64 `json:"n"`
|
||||
TimeNanos int64 `json:"t"`
|
||||
}
|
||||
|
||||
func validHex(x string) bool {
|
||||
if len(x) < 4 || len(x) > 100 {
|
||||
return false
|
||||
}
|
||||
for _, b := range x {
|
||||
if b >= '0' && b <= '9' || b >= 'a' && b <= 'f' {
|
||||
continue
|
||||
}
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// put is like dc.Put but refactored to support safe concurrent writes on Windows.
|
||||
// TODO(tomhjp): upstream these changes to go-tool-cache once they look stable.
|
||||
func put(dc *cachers.DiskCache, actionID, outputID string, size int64, body io.Reader) (diskPath string, _ error) {
|
||||
if len(actionID) < 4 || len(outputID) < 4 {
|
||||
return "", fmt.Errorf("actionID and outputID must be at least 4 characters long")
|
||||
}
|
||||
if !validHex(actionID) {
|
||||
log.Printf("diskcache: got invalid actionID %q", actionID)
|
||||
return "", errors.New("actionID must be hex")
|
||||
}
|
||||
if !validHex(outputID) {
|
||||
log.Printf("diskcache: got invalid outputID %q", outputID)
|
||||
return "", errors.New("outputID must be hex")
|
||||
}
|
||||
|
||||
actionFile := dc.ActionFilename(actionID)
|
||||
outputFile := dc.OutputFilename(outputID)
|
||||
actionDir := filepath.Dir(actionFile)
|
||||
outputDir := filepath.Dir(outputFile)
|
||||
|
||||
if err := os.MkdirAll(actionDir, 0755); err != nil {
|
||||
return "", fmt.Errorf("failed to create action directory: %w", err)
|
||||
}
|
||||
if err := os.MkdirAll(outputDir, 0755); err != nil {
|
||||
return "", fmt.Errorf("failed to create output directory: %w", err)
|
||||
}
|
||||
|
||||
wrote, err := writeOutputFile(outputFile, body, size, outputID)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if wrote != size {
|
||||
return "", fmt.Errorf("wrote %d bytes, expected %d", wrote, size)
|
||||
}
|
||||
|
||||
ij, err := json.Marshal(indexEntry{
|
||||
Version: 1,
|
||||
OutputID: outputID,
|
||||
Size: size,
|
||||
TimeNanos: time.Now().UnixNano(),
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if err := writeActionFile(dc.ActionFilename(actionID), ij); err != nil {
|
||||
return "", fmt.Errorf("atomic write failed: %w", err)
|
||||
}
|
||||
return outputFile, nil
|
||||
}
|
||||
@ -0,0 +1,44 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !windows
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
func writeActionFile(dest string, b []byte) error {
|
||||
_, err := writeAtomic(dest, bytes.NewReader(b))
|
||||
return err
|
||||
}
|
||||
|
||||
func writeOutputFile(dest string, r io.Reader, _ int64, _ string) (int64, error) {
|
||||
return writeAtomic(dest, r)
|
||||
}
|
||||
|
||||
func writeAtomic(dest string, r io.Reader) (int64, error) {
|
||||
tf, err := os.CreateTemp(filepath.Dir(dest), filepath.Base(dest)+".*")
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
size, err := io.Copy(tf, r)
|
||||
if err != nil {
|
||||
tf.Close()
|
||||
os.Remove(tf.Name())
|
||||
return 0, err
|
||||
}
|
||||
if err := tf.Close(); err != nil {
|
||||
os.Remove(tf.Name())
|
||||
return 0, err
|
||||
}
|
||||
if err := os.Rename(tf.Name(), dest); err != nil {
|
||||
os.Remove(tf.Name())
|
||||
return 0, err
|
||||
}
|
||||
return size, nil
|
||||
}
|
||||
@ -0,0 +1,102 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
// The functions in this file are based on go's own cache in
|
||||
// cmd/go/internal/cache/cache.go, particularly putIndexEntry and copyFile.
|
||||
|
||||
// writeActionFile writes the indexEntry metadata for an ActionID to disk. It
|
||||
// may be called for the same actionID concurrently from multiple processes,
|
||||
// and the outputID for a specific actionID may change from time to time due
|
||||
// to non-deterministic builds. It makes a best-effort to delete the file if
|
||||
// anything goes wrong.
|
||||
func writeActionFile(dest string, b []byte) (retErr error) {
|
||||
f, err := os.OpenFile(dest, os.O_WRONLY|os.O_CREATE, 0o666)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
cerr := f.Close()
|
||||
if retErr != nil || cerr != nil {
|
||||
retErr = errors.Join(retErr, cerr, os.Remove(dest))
|
||||
}
|
||||
}()
|
||||
|
||||
_, err = f.Write(b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Truncate the file only *after* writing it.
|
||||
// (This should be a no-op, but truncate just in case of previous corruption.)
|
||||
//
|
||||
// This differs from os.WriteFile, which truncates to 0 *before* writing
|
||||
// via os.O_TRUNC. Truncating only after writing ensures that a second write
|
||||
// of the same content to the same file is idempotent, and does not - even
|
||||
// temporarily! - undo the effect of the first write.
|
||||
return f.Truncate(int64(len(b)))
|
||||
}
|
||||
|
||||
// writeOutputFile writes content to be cached to disk. The outputID is the
|
||||
// sha256 hash of the content, and each file should only be written ~once,
|
||||
// assuming no sha256 hash collisions. It may be written multiple times if
|
||||
// concurrent processes are both populating the same output. The file is opened
|
||||
// with FILE_SHARE_READ|FILE_SHARE_WRITE, which means both processes can write
|
||||
// the same contents concurrently without conflict.
|
||||
//
|
||||
// It makes a best effort to clean up if anything goes wrong, but the file may
|
||||
// be left in an inconsistent state in the event of disk-related errors such as
|
||||
// another process taking file locks, or power loss etc.
|
||||
func writeOutputFile(dest string, r io.Reader, size int64, outputID string) (_ int64, retErr error) {
|
||||
info, err := os.Stat(dest)
|
||||
if err == nil && info.Size() == size {
|
||||
// Already exists, check the hash.
|
||||
if f, err := os.Open(dest); err == nil {
|
||||
h := sha256.New()
|
||||
io.Copy(h, f)
|
||||
f.Close()
|
||||
if fmt.Sprintf("%x", h.Sum(nil)) == outputID {
|
||||
// Still drain the reader to ensure associated resources are released.
|
||||
return io.Copy(io.Discard, r)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Didn't successfully find the pre-existing file, write it.
|
||||
mode := os.O_WRONLY | os.O_CREATE
|
||||
if err == nil && info.Size() > size {
|
||||
mode |= os.O_TRUNC // Should never happen, but self-heal.
|
||||
}
|
||||
f, err := os.OpenFile(dest, mode, 0644)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to open output file %q: %w", dest, err)
|
||||
}
|
||||
defer func() {
|
||||
cerr := f.Close()
|
||||
if retErr != nil || cerr != nil {
|
||||
retErr = errors.Join(retErr, cerr, os.Remove(dest))
|
||||
}
|
||||
}()
|
||||
|
||||
// Copy file to f, but also into h to double-check hash.
|
||||
h := sha256.New()
|
||||
w := io.MultiWriter(f, h)
|
||||
n, err := io.Copy(w, r)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if fmt.Sprintf("%x", h.Sum(nil)) != outputID {
|
||||
return 0, errors.New("file content changed underfoot")
|
||||
}
|
||||
|
||||
return n, nil
|
||||
}
|
||||
@ -0,0 +1,115 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
type gocachedClient struct {
|
||||
baseURL string // base URL of the cacher server, like "http://localhost:31364".
|
||||
cl *http.Client // http.Client to use.
|
||||
accessToken string // Bearer token to use in the Authorization header.
|
||||
verbose bool
|
||||
}
|
||||
|
||||
// drainAndClose reads and throws away a small bounded amount of data. This is a
|
||||
// best-effort attempt to allow connection reuse; Go's HTTP/1 Transport won't
|
||||
// reuse a TCP connection unless you fully consume HTTP responses.
|
||||
func drainAndClose(body io.ReadCloser) {
|
||||
io.CopyN(io.Discard, body, 4<<10)
|
||||
body.Close()
|
||||
}
|
||||
|
||||
func tryReadErrorMessage(res *http.Response) []byte {
|
||||
msg, _ := io.ReadAll(io.LimitReader(res.Body, 4<<10))
|
||||
return msg
|
||||
}
|
||||
|
||||
func (c *gocachedClient) get(ctx context.Context, actionID string) (outputID string, resp *http.Response, err error) {
|
||||
// TODO(tomhjp): make sure we timeout if cigocached disappears, but for some
|
||||
// reason, this seemed to tank network performance.
|
||||
// // Set a generous upper limit on the time we'll wait for a response. We'll
|
||||
// // shorten this deadline later once we know the content length.
|
||||
// ctx, cancel := context.WithTimeout(ctx, time.Minute)
|
||||
// defer cancel()
|
||||
req, _ := http.NewRequestWithContext(ctx, "GET", c.baseURL+"/action/"+actionID, nil)
|
||||
req.Header.Set("Want-Object", "1") // opt in to single roundtrip protocol
|
||||
if c.accessToken != "" {
|
||||
req.Header.Set("Authorization", "Bearer "+c.accessToken)
|
||||
}
|
||||
|
||||
res, err := c.cl.Do(req)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
defer func() {
|
||||
if resp == nil {
|
||||
drainAndClose(res.Body)
|
||||
}
|
||||
}()
|
||||
if res.StatusCode == http.StatusNotFound {
|
||||
return "", nil, nil
|
||||
}
|
||||
if res.StatusCode != http.StatusOK {
|
||||
msg := tryReadErrorMessage(res)
|
||||
if c.verbose {
|
||||
log.Printf("error GET /action/%s: %v, %s", actionID, res.Status, msg)
|
||||
}
|
||||
return "", nil, fmt.Errorf("unexpected GET /action/%s status %v", actionID, res.Status)
|
||||
}
|
||||
|
||||
outputID = res.Header.Get("Go-Output-Id")
|
||||
if outputID == "" {
|
||||
return "", nil, fmt.Errorf("missing Go-Output-Id header in response")
|
||||
}
|
||||
if res.ContentLength == -1 {
|
||||
return "", nil, fmt.Errorf("no Content-Length from server")
|
||||
}
|
||||
return outputID, res, nil
|
||||
}
|
||||
|
||||
func (c *gocachedClient) put(ctx context.Context, actionID, outputID string, size int64, body io.Reader) error {
|
||||
req, _ := http.NewRequestWithContext(ctx, "PUT", c.baseURL+"/"+actionID+"/"+outputID, body)
|
||||
req.ContentLength = size
|
||||
if c.accessToken != "" {
|
||||
req.Header.Set("Authorization", "Bearer "+c.accessToken)
|
||||
}
|
||||
res, err := c.cl.Do(req)
|
||||
if err != nil {
|
||||
if c.verbose {
|
||||
log.Printf("error PUT /%s/%s: %v", actionID, outputID, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != http.StatusNoContent {
|
||||
msg := tryReadErrorMessage(res)
|
||||
if c.verbose {
|
||||
log.Printf("error PUT /%s/%s: %v, %s", actionID, outputID, res.Status, msg)
|
||||
}
|
||||
return fmt.Errorf("unexpected PUT /%s/%s status %v", actionID, outputID, res.Status)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *gocachedClient) fetchStats() (string, error) {
|
||||
req, _ := http.NewRequest("GET", c.baseURL+"/session/stats", nil)
|
||||
req.Header.Set("Authorization", "Bearer "+c.accessToken)
|
||||
resp, err := c.cl.Do(req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
b, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(b), nil
|
||||
}
|
||||
@ -0,0 +1,175 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"go/ast"
|
||||
"go/format"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"go/types"
|
||||
"path"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"tailscale.com/util/must"
|
||||
)
|
||||
|
||||
// mustFormatFile formats a Go source file and adjust "json" imports.
|
||||
// It panics if there are any parsing errors.
|
||||
//
|
||||
// - "encoding/json" is imported under the name "jsonv1" or "jsonv1std"
|
||||
// - "encoding/json/v2" is rewritten to import "github.com/go-json-experiment/json" instead
|
||||
// - "encoding/json/jsontext" is rewritten to import "github.com/go-json-experiment/json/jsontext" instead
|
||||
// - "github.com/go-json-experiment/json" is imported under the name "jsonv2"
|
||||
// - "github.com/go-json-experiment/json/v1" is imported under the name "jsonv1"
|
||||
//
|
||||
// If no changes to the file is made, it returns input.
|
||||
func mustFormatFile(in []byte) (out []byte) {
|
||||
fset := token.NewFileSet()
|
||||
f := must.Get(parser.ParseFile(fset, "", in, parser.ParseComments))
|
||||
|
||||
// Check for the existence of "json" imports.
|
||||
jsonImports := make(map[string][]*ast.ImportSpec)
|
||||
for _, imp := range f.Imports {
|
||||
switch pkgPath := must.Get(strconv.Unquote(imp.Path.Value)); pkgPath {
|
||||
case
|
||||
"encoding/json",
|
||||
"encoding/json/v2",
|
||||
"encoding/json/jsontext",
|
||||
"github.com/go-json-experiment/json",
|
||||
"github.com/go-json-experiment/json/v1",
|
||||
"github.com/go-json-experiment/json/jsontext":
|
||||
jsonImports[pkgPath] = append(jsonImports[pkgPath], imp)
|
||||
}
|
||||
}
|
||||
if len(jsonImports) == 0 {
|
||||
return in
|
||||
}
|
||||
|
||||
// Best-effort local type-check of the file
|
||||
// to resolve local declarations to detect shadowed variables.
|
||||
typeInfo := &types.Info{Uses: make(map[*ast.Ident]types.Object)}
|
||||
(&types.Config{
|
||||
Error: func(err error) {},
|
||||
}).Check("", fset, []*ast.File{f}, typeInfo)
|
||||
|
||||
// Rewrite imports to instead use "github.com/go-json-experiment/json".
|
||||
// This ensures that code continues to build even if
|
||||
// goexperiment.jsonv2 is *not* specified.
|
||||
// As of https://github.com/go-json-experiment/json/pull/186,
|
||||
// imports to "github.com/go-json-experiment/json" are identical
|
||||
// to the standard library if built with goexperiment.jsonv2.
|
||||
for fromPath, toPath := range map[string]string{
|
||||
"encoding/json/v2": "github.com/go-json-experiment/json",
|
||||
"encoding/json/jsontext": "github.com/go-json-experiment/json/jsontext",
|
||||
} {
|
||||
for _, imp := range jsonImports[fromPath] {
|
||||
imp.Path.Value = strconv.Quote(toPath)
|
||||
jsonImports[toPath] = append(jsonImports[toPath], imp)
|
||||
}
|
||||
delete(jsonImports, fromPath)
|
||||
}
|
||||
|
||||
// While in a transitory state, where both v1 and v2 json imports
|
||||
// may exist in our codebase, always explicitly import with
|
||||
// either jsonv1 or jsonv2 in the package name to avoid ambiguities
|
||||
// when looking at a particular Marshal or Unmarshal call site.
|
||||
renames := make(map[string]string) // mapping of old names to new names
|
||||
deletes := make(map[*ast.ImportSpec]bool) // set of imports to delete
|
||||
for pkgPath, imps := range jsonImports {
|
||||
var newName string
|
||||
switch pkgPath {
|
||||
case "encoding/json":
|
||||
newName = "jsonv1"
|
||||
// If "github.com/go-json-experiment/json/v1" is also imported,
|
||||
// then use jsonv1std for "encoding/json" to avoid a conflict.
|
||||
if len(jsonImports["github.com/go-json-experiment/json/v1"]) > 0 {
|
||||
newName += "std"
|
||||
}
|
||||
case "github.com/go-json-experiment/json":
|
||||
newName = "jsonv2"
|
||||
case "github.com/go-json-experiment/json/v1":
|
||||
newName = "jsonv1"
|
||||
}
|
||||
|
||||
// Rename the import if different than expected.
|
||||
if oldName := importName(imps[0]); oldName != newName && newName != "" {
|
||||
renames[oldName] = newName
|
||||
pos := imps[0].Pos() // preserve original positioning
|
||||
imps[0].Name = ast.NewIdent(newName)
|
||||
imps[0].Name.NamePos = pos
|
||||
}
|
||||
|
||||
// For all redundant imports, use the first imported name.
|
||||
for _, imp := range imps[1:] {
|
||||
renames[importName(imp)] = importName(imps[0])
|
||||
deletes[imp] = true
|
||||
}
|
||||
}
|
||||
if len(deletes) > 0 {
|
||||
f.Imports = slices.DeleteFunc(f.Imports, func(imp *ast.ImportSpec) bool {
|
||||
return deletes[imp]
|
||||
})
|
||||
for _, decl := range f.Decls {
|
||||
if genDecl, ok := decl.(*ast.GenDecl); ok && genDecl.Tok == token.IMPORT {
|
||||
genDecl.Specs = slices.DeleteFunc(genDecl.Specs, func(spec ast.Spec) bool {
|
||||
return deletes[spec.(*ast.ImportSpec)]
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(renames) > 0 {
|
||||
ast.Walk(astVisitor(func(n ast.Node) bool {
|
||||
if sel, ok := n.(*ast.SelectorExpr); ok {
|
||||
if id, ok := sel.X.(*ast.Ident); ok {
|
||||
// Just because the selector looks like "json.Marshal"
|
||||
// does not mean that it is referencing the "json" package.
|
||||
// There could be a local "json" declaration that shadows
|
||||
// the package import. Check partial type information
|
||||
// to see if there was a local declaration.
|
||||
if obj, ok := typeInfo.Uses[id]; ok {
|
||||
if _, ok := obj.(*types.PkgName); !ok {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
if newName, ok := renames[id.String()]; ok {
|
||||
id.Name = newName
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}), f)
|
||||
}
|
||||
|
||||
bb := new(bytes.Buffer)
|
||||
must.Do(format.Node(bb, fset, f))
|
||||
return must.Get(format.Source(bb.Bytes()))
|
||||
}
|
||||
|
||||
// importName is the local package name used for an import.
|
||||
// If no explicit local name is used, then it uses string parsing
|
||||
// to derive the package name from the path, relying on the convention
|
||||
// that the package name is the base name of the package path.
|
||||
func importName(imp *ast.ImportSpec) string {
|
||||
if imp.Name != nil {
|
||||
return imp.Name.String()
|
||||
}
|
||||
pkgPath, _ := strconv.Unquote(imp.Path.Value)
|
||||
pkgPath = strings.TrimRight(pkgPath, "/v0123456789") // exclude version directories
|
||||
return path.Base(pkgPath)
|
||||
}
|
||||
|
||||
// astVisitor is a function that implements [ast.Visitor].
|
||||
type astVisitor func(ast.Node) bool
|
||||
|
||||
func (f astVisitor) Visit(node ast.Node) ast.Visitor {
|
||||
if !f(node) {
|
||||
return nil
|
||||
}
|
||||
return f
|
||||
}
|
||||
@ -0,0 +1,162 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"go/format"
|
||||
"testing"
|
||||
|
||||
"tailscale.com/util/must"
|
||||
"tailscale.com/util/safediff"
|
||||
)
|
||||
|
||||
func TestFormatFile(t *testing.T) {
|
||||
tests := []struct{ in, want string }{{
|
||||
in: `package foobar
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
jsonv2exp "github.com/go-json-experiment/json"
|
||||
)
|
||||
|
||||
func main() {
|
||||
json.Marshal()
|
||||
jsonv2exp.Marshal()
|
||||
{
|
||||
var json T // deliberately shadow "json" package name
|
||||
json.Marshal() // should not be re-written
|
||||
}
|
||||
}
|
||||
`,
|
||||
want: `package foobar
|
||||
|
||||
import (
|
||||
jsonv1 "encoding/json"
|
||||
jsonv2 "github.com/go-json-experiment/json"
|
||||
)
|
||||
|
||||
func main() {
|
||||
jsonv1.Marshal()
|
||||
jsonv2.Marshal()
|
||||
{
|
||||
var json T // deliberately shadow "json" package name
|
||||
json.Marshal() // should not be re-written
|
||||
}
|
||||
}
|
||||
`,
|
||||
}, {
|
||||
in: `package foobar
|
||||
|
||||
import (
|
||||
"github.com/go-json-experiment/json"
|
||||
jsonv2exp "github.com/go-json-experiment/json"
|
||||
)
|
||||
|
||||
func main() {
|
||||
json.Marshal()
|
||||
jsonv2exp.Marshal()
|
||||
}
|
||||
`,
|
||||
want: `package foobar
|
||||
import (
|
||||
jsonv2 "github.com/go-json-experiment/json"
|
||||
)
|
||||
func main() {
|
||||
jsonv2.Marshal()
|
||||
jsonv2.Marshal()
|
||||
}
|
||||
`,
|
||||
}, {
|
||||
in: `package foobar
|
||||
import "github.com/go-json-experiment/json/v1"
|
||||
func main() {
|
||||
json.Marshal()
|
||||
}
|
||||
`,
|
||||
want: `package foobar
|
||||
import jsonv1 "github.com/go-json-experiment/json/v1"
|
||||
func main() {
|
||||
jsonv1.Marshal()
|
||||
}
|
||||
`,
|
||||
}, {
|
||||
in: `package foobar
|
||||
import (
|
||||
"encoding/json"
|
||||
jsonv1in2 "github.com/go-json-experiment/json/v1"
|
||||
)
|
||||
func main() {
|
||||
json.Marshal()
|
||||
jsonv1in2.Marshal()
|
||||
}
|
||||
`,
|
||||
want: `package foobar
|
||||
import (
|
||||
jsonv1std "encoding/json"
|
||||
jsonv1 "github.com/go-json-experiment/json/v1"
|
||||
)
|
||||
func main() {
|
||||
jsonv1std.Marshal()
|
||||
jsonv1.Marshal()
|
||||
}
|
||||
`,
|
||||
}, {
|
||||
in: `package foobar
|
||||
import (
|
||||
"encoding/json"
|
||||
jsonv1in2 "github.com/go-json-experiment/json/v1"
|
||||
)
|
||||
func main() {
|
||||
json.Marshal()
|
||||
jsonv1in2.Marshal()
|
||||
}
|
||||
`,
|
||||
want: `package foobar
|
||||
import (
|
||||
jsonv1std "encoding/json"
|
||||
jsonv1 "github.com/go-json-experiment/json/v1"
|
||||
)
|
||||
func main() {
|
||||
jsonv1std.Marshal()
|
||||
jsonv1.Marshal()
|
||||
}
|
||||
`,
|
||||
}, {
|
||||
in: `package foobar
|
||||
import (
|
||||
"encoding/json"
|
||||
j2 "encoding/json/v2"
|
||||
"encoding/json/jsontext"
|
||||
)
|
||||
func main() {
|
||||
json.Marshal()
|
||||
j2.Marshal()
|
||||
jsontext.NewEncoder
|
||||
}
|
||||
`,
|
||||
want: `package foobar
|
||||
import (
|
||||
jsonv1 "encoding/json"
|
||||
jsonv2 "github.com/go-json-experiment/json"
|
||||
"github.com/go-json-experiment/json/jsontext"
|
||||
)
|
||||
func main() {
|
||||
jsonv1.Marshal()
|
||||
jsonv2.Marshal()
|
||||
jsontext.NewEncoder
|
||||
}
|
||||
`,
|
||||
}}
|
||||
for _, tt := range tests {
|
||||
got := string(must.Get(format.Source([]byte(tt.in))))
|
||||
got = string(mustFormatFile([]byte(got)))
|
||||
want := string(must.Get(format.Source([]byte(tt.want))))
|
||||
if got != want {
|
||||
diff, _ := safediff.Lines(got, want, -1)
|
||||
t.Errorf("mismatch (-got +want)\n%s", diff)
|
||||
t.Error(got)
|
||||
t.Error(want)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -0,0 +1,124 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
// The jsonimports tool formats all Go source files in the repository
|
||||
// to enforce that "json" imports are consistent.
|
||||
//
|
||||
// With Go 1.25, the "encoding/json/v2" and "encoding/json/jsontext"
|
||||
// packages are now available under goexperiment.jsonv2.
|
||||
// This leads to possible confusion over the following:
|
||||
//
|
||||
// - "encoding/json"
|
||||
// - "encoding/json/v2"
|
||||
// - "encoding/json/jsontext"
|
||||
// - "github.com/go-json-experiment/json/v1"
|
||||
// - "github.com/go-json-experiment/json"
|
||||
// - "github.com/go-json-experiment/json/jsontext"
|
||||
//
|
||||
// In order to enforce consistent usage, we apply the following rules:
|
||||
//
|
||||
// - Until the Go standard library formally accepts "encoding/json/v2"
|
||||
// and "encoding/json/jsontext" into the standard library
|
||||
// (i.e., they are no longer considered experimental),
|
||||
// we forbid any code from directly importing those packages.
|
||||
// Go code should instead import "github.com/go-json-experiment/json"
|
||||
// and "github.com/go-json-experiment/json/jsontext".
|
||||
// The latter packages contain aliases to the standard library
|
||||
// if built on Go 1.25 with the goexperiment.jsonv2 tag specified.
|
||||
//
|
||||
// - Imports of "encoding/json" or "github.com/go-json-experiment/json/v1"
|
||||
// must be explicitly imported under the package name "jsonv1".
|
||||
// If both packages need to be imported, then the former should
|
||||
// be imported under the package name "jsonv1std".
|
||||
//
|
||||
// - Imports of "github.com/go-json-experiment/json"
|
||||
// must be explicitly imported under the package name "jsonv2".
|
||||
//
|
||||
// The latter two rules exist to provide clarity when reading code.
|
||||
// Without them, it is unclear whether "json.Marshal" refers to v1 or v2.
|
||||
// With them, however, it is clear that "jsonv1.Marshal" is calling v1 and
|
||||
// that "jsonv2.Marshal" is calling v2.
|
||||
//
|
||||
// TODO(@joetsai): At this present moment, there is no guidance given on
|
||||
// whether to use v1 or v2 for newly written Go source code.
|
||||
// I will write a document in the near future providing more guidance.
|
||||
// Feel free to continue using v1 "encoding/json" as you are accustomed to.
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"tailscale.com/syncs"
|
||||
"tailscale.com/util/must"
|
||||
"tailscale.com/util/safediff"
|
||||
)
|
||||
|
||||
func main() {
|
||||
update := flag.Bool("update", false, "update all Go source files")
|
||||
flag.Parse()
|
||||
|
||||
// Change working directory to Git repository root.
|
||||
repoRoot := strings.TrimSuffix(string(must.Get(exec.Command(
|
||||
"git", "rev-parse", "--show-toplevel",
|
||||
).Output())), "\n")
|
||||
must.Do(os.Chdir(repoRoot))
|
||||
|
||||
// Iterate over all indexed files in the Git repository.
|
||||
var printMu sync.Mutex
|
||||
var group sync.WaitGroup
|
||||
sema := syncs.NewSemaphore(runtime.NumCPU())
|
||||
var numDiffs int
|
||||
files := string(must.Get(exec.Command("git", "ls-files").Output()))
|
||||
for file := range strings.Lines(files) {
|
||||
sema.Acquire()
|
||||
group.Go(func() {
|
||||
defer sema.Release()
|
||||
|
||||
// Ignore non-Go source files.
|
||||
file = strings.TrimSuffix(file, "\n")
|
||||
if !strings.HasSuffix(file, ".go") {
|
||||
return
|
||||
}
|
||||
|
||||
// Format all "json" imports in the Go source file.
|
||||
srcIn := must.Get(os.ReadFile(file))
|
||||
srcOut := mustFormatFile(srcIn)
|
||||
|
||||
// Print differences with each formatted file.
|
||||
if !bytes.Equal(srcIn, srcOut) {
|
||||
numDiffs++
|
||||
|
||||
printMu.Lock()
|
||||
fmt.Println(file)
|
||||
lines, _ := safediff.Lines(string(srcIn), string(srcOut), -1)
|
||||
for line := range strings.Lines(lines) {
|
||||
fmt.Print("\t", line)
|
||||
}
|
||||
fmt.Println()
|
||||
printMu.Unlock()
|
||||
|
||||
// If -update is specified, write out the changes.
|
||||
if *update {
|
||||
mode := must.Get(os.Stat(file)).Mode()
|
||||
must.Do(os.WriteFile(file, srcOut, mode))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
group.Wait()
|
||||
|
||||
// Report whether any differences were detected.
|
||||
if numDiffs > 0 && !*update {
|
||||
fmt.Printf(`%d files with "json" imports that need formatting`+"\n", numDiffs)
|
||||
fmt.Println("Please run:")
|
||||
fmt.Println("\t./tool/go run tailscale.com/cmd/jsonimports -update")
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
@ -0,0 +1,135 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !plan9
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"go.uber.org/zap"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
func TestNewStaticClient(t *testing.T) {
|
||||
const (
|
||||
clientIDFile = "client-id"
|
||||
clientSecretFile = "client-secret"
|
||||
)
|
||||
|
||||
tmp := t.TempDir()
|
||||
clientIDPath := filepath.Join(tmp, clientIDFile)
|
||||
if err := os.WriteFile(clientIDPath, []byte("test-client-id"), 0600); err != nil {
|
||||
t.Fatalf("error writing test file %q: %v", clientIDPath, err)
|
||||
}
|
||||
clientSecretPath := filepath.Join(tmp, clientSecretFile)
|
||||
if err := os.WriteFile(clientSecretPath, []byte("test-client-secret"), 0600); err != nil {
|
||||
t.Fatalf("error writing test file %q: %v", clientSecretPath, err)
|
||||
}
|
||||
|
||||
srv := testAPI(t, 3600)
|
||||
cl, err := newTSClient(zap.NewNop().Sugar(), "", clientIDPath, clientSecretPath, srv.URL)
|
||||
if err != nil {
|
||||
t.Fatalf("error creating Tailscale client: %v", err)
|
||||
}
|
||||
|
||||
resp, err := cl.HTTPClient.Get(srv.URL)
|
||||
if err != nil {
|
||||
t.Fatalf("error making test API call: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
got, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
t.Fatalf("error reading response body: %v", err)
|
||||
}
|
||||
want := "Bearer " + testToken("/api/v2/oauth/token", "test-client-id", "test-client-secret", "")
|
||||
if string(got) != want {
|
||||
t.Errorf("got %q; want %q", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewWorkloadIdentityClient(t *testing.T) {
|
||||
// 5 seconds is within expiryDelta leeway, so the access token will
|
||||
// immediately be considered expired and get refreshed on each access.
|
||||
srv := testAPI(t, 5)
|
||||
cl, err := newTSClient(zap.NewNop().Sugar(), "test-client-id", "", "", srv.URL)
|
||||
if err != nil {
|
||||
t.Fatalf("error creating Tailscale client: %v", err)
|
||||
}
|
||||
|
||||
// Modify the path where the JWT will be read from.
|
||||
oauth2Transport, ok := cl.HTTPClient.Transport.(*oauth2.Transport)
|
||||
if !ok {
|
||||
t.Fatalf("expected oauth2.Transport, got %T", cl.HTTPClient.Transport)
|
||||
}
|
||||
jwtTokenSource, ok := oauth2Transport.Source.(*jwtTokenSource)
|
||||
if !ok {
|
||||
t.Fatalf("expected jwtTokenSource, got %T", oauth2Transport.Source)
|
||||
}
|
||||
tmp := t.TempDir()
|
||||
jwtPath := filepath.Join(tmp, "token")
|
||||
jwtTokenSource.jwtPath = jwtPath
|
||||
|
||||
for _, jwt := range []string{"test-jwt", "updated-test-jwt"} {
|
||||
if err := os.WriteFile(jwtPath, []byte(jwt), 0600); err != nil {
|
||||
t.Fatalf("error writing test file %q: %v", jwtPath, err)
|
||||
}
|
||||
resp, err := cl.HTTPClient.Get(srv.URL)
|
||||
if err != nil {
|
||||
t.Fatalf("error making test API call: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
got, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
t.Fatalf("error reading response body: %v", err)
|
||||
}
|
||||
if want := "Bearer " + testToken("/api/v2/oauth/token-exchange", "test-client-id", "", jwt); string(got) != want {
|
||||
t.Errorf("got %q; want %q", got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testAPI(t *testing.T, expirationSeconds int) *httptest.Server {
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
t.Logf("test server got request: %s %s", r.Method, r.URL.Path)
|
||||
switch r.URL.Path {
|
||||
case "/api/v2/oauth/token", "/api/v2/oauth/token-exchange":
|
||||
id, secret, ok := r.BasicAuth()
|
||||
if !ok {
|
||||
t.Fatal("missing or invalid basic auth")
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
if err := json.NewEncoder(w).Encode(map[string]any{
|
||||
"access_token": testToken(r.URL.Path, id, secret, r.FormValue("jwt")),
|
||||
"token_type": "Bearer",
|
||||
"expires_in": expirationSeconds,
|
||||
}); err != nil {
|
||||
t.Fatalf("error writing response: %v", err)
|
||||
}
|
||||
case "/":
|
||||
// Echo back the authz header for test assertions.
|
||||
_, err := w.Write([]byte(r.Header.Get("Authorization")))
|
||||
if err != nil {
|
||||
t.Fatalf("error writing response: %v", err)
|
||||
}
|
||||
default:
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
}
|
||||
}))
|
||||
t.Cleanup(srv.Close)
|
||||
return srv
|
||||
}
|
||||
|
||||
func testToken(path, id, secret, jwt string) string {
|
||||
return fmt.Sprintf("%s|%s|%s|%s", path, id, secret, jwt)
|
||||
}
|
||||
@ -0,0 +1,84 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
// Package jsonoutput provides stable and versioned JSON serialisation for CLI output.
|
||||
// This allows us to provide stable output to scripts/clients, but also make
|
||||
// breaking changes to the output when it's useful.
|
||||
//
|
||||
// Historically we only used `--json` as a boolean flag, so changing the output
|
||||
// could break scripts that rely on the existing format.
|
||||
//
|
||||
// This package allows callers to pass a version number to `--json` and get
|
||||
// a consistent output. We'll bump the version when we make a breaking change
|
||||
// that's likely to break scripts that rely on the existing output, e.g. if
|
||||
// we remove a field or change the type/format.
|
||||
//
|
||||
// Passing just the boolean flag `--json` will always return v1, to preserve
|
||||
// compatibility with scripts written before we versioned our output.
|
||||
package jsonoutput
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// JSONSchemaVersion implements flag.Value, and tracks whether the CLI has
|
||||
// been called with `--json`, and if so, with what value.
|
||||
type JSONSchemaVersion struct {
|
||||
// IsSet tracks if the flag was provided at all.
|
||||
IsSet bool
|
||||
|
||||
// Value tracks the desired schema version, which defaults to 1 if
|
||||
// the user passes `--json` without an argument.
|
||||
Value int
|
||||
}
|
||||
|
||||
// String returns the default value which is printed in the CLI help text.
|
||||
func (v *JSONSchemaVersion) String() string {
|
||||
if v.IsSet {
|
||||
return strconv.Itoa(v.Value)
|
||||
} else {
|
||||
return "(not set)"
|
||||
}
|
||||
}
|
||||
|
||||
// Set is called when the user passes the flag as a command-line argument.
|
||||
func (v *JSONSchemaVersion) Set(s string) error {
|
||||
if v.IsSet {
|
||||
return errors.New("received multiple instances of --json; only pass it once")
|
||||
}
|
||||
|
||||
v.IsSet = true
|
||||
|
||||
// If the user doesn't supply a schema version, default to 1.
|
||||
// This ensures that any existing scripts will continue to get their
|
||||
// current output.
|
||||
if s == "true" {
|
||||
v.Value = 1
|
||||
return nil
|
||||
}
|
||||
|
||||
version, err := strconv.Atoi(s)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid integer value passed to --json: %q", s)
|
||||
}
|
||||
v.Value = version
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsBoolFlag tells the flag package that JSONSchemaVersion can be set
|
||||
// without an argument.
|
||||
func (v *JSONSchemaVersion) IsBoolFlag() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// ResponseEnvelope is a set of fields common to all versioned JSON output.
|
||||
type ResponseEnvelope struct {
|
||||
// SchemaVersion is the version of the JSON output, e.g. "1", "2", "3"
|
||||
SchemaVersion string
|
||||
|
||||
// ResponseWarning tells a user if a newer version of the JSON output
|
||||
// is available.
|
||||
ResponseWarning string `json:"_WARNING,omitzero"`
|
||||
}
|
||||
@ -0,0 +1,203 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
package jsonoutput
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"tailscale.com/ipn/ipnstate"
|
||||
"tailscale.com/tka"
|
||||
)
|
||||
|
||||
// PrintNetworkLockJSONV1 prints the stored TKA state as a JSON object to the CLI,
|
||||
// in a stable "v1" format.
|
||||
//
|
||||
// This format includes:
|
||||
//
|
||||
// - the AUM hash as a base32-encoded string
|
||||
// - the raw AUM as base64-encoded bytes
|
||||
// - the expanded AUM, which prints named fields for consumption by other tools
|
||||
func PrintNetworkLockJSONV1(out io.Writer, updates []ipnstate.NetworkLockUpdate) error {
|
||||
messages := make([]logMessageV1, len(updates))
|
||||
|
||||
for i, update := range updates {
|
||||
var aum tka.AUM
|
||||
if err := aum.Unserialize(update.Raw); err != nil {
|
||||
return fmt.Errorf("decoding: %w", err)
|
||||
}
|
||||
|
||||
h := aum.Hash()
|
||||
|
||||
if !bytes.Equal(h[:], update.Hash[:]) {
|
||||
return fmt.Errorf("incorrect AUM hash: got %v, want %v", h, update)
|
||||
}
|
||||
|
||||
messages[i] = toLogMessageV1(aum, update)
|
||||
}
|
||||
|
||||
result := struct {
|
||||
ResponseEnvelope
|
||||
Messages []logMessageV1
|
||||
}{
|
||||
ResponseEnvelope: ResponseEnvelope{
|
||||
SchemaVersion: "1",
|
||||
},
|
||||
Messages: messages,
|
||||
}
|
||||
|
||||
enc := json.NewEncoder(out)
|
||||
enc.SetIndent("", " ")
|
||||
return enc.Encode(result)
|
||||
}
|
||||
|
||||
// toLogMessageV1 converts a [tka.AUM] and [ipnstate.NetworkLockUpdate] to the
|
||||
// JSON output returned by the CLI.
|
||||
func toLogMessageV1(aum tka.AUM, update ipnstate.NetworkLockUpdate) logMessageV1 {
|
||||
expandedAUM := expandedAUMV1{}
|
||||
expandedAUM.MessageKind = aum.MessageKind.String()
|
||||
if len(aum.PrevAUMHash) > 0 {
|
||||
expandedAUM.PrevAUMHash = aum.PrevAUMHash.String()
|
||||
}
|
||||
if key := aum.Key; key != nil {
|
||||
expandedAUM.Key = toExpandedKeyV1(key)
|
||||
}
|
||||
if keyID := aum.KeyID; keyID != nil {
|
||||
expandedAUM.KeyID = fmt.Sprintf("tlpub:%x", keyID)
|
||||
}
|
||||
if state := aum.State; state != nil {
|
||||
expandedState := expandedStateV1{}
|
||||
if h := state.LastAUMHash; h != nil {
|
||||
expandedState.LastAUMHash = h.String()
|
||||
}
|
||||
for _, secret := range state.DisablementSecrets {
|
||||
expandedState.DisablementSecrets = append(expandedState.DisablementSecrets, fmt.Sprintf("%x", secret))
|
||||
}
|
||||
for _, key := range state.Keys {
|
||||
expandedState.Keys = append(expandedState.Keys, toExpandedKeyV1(&key))
|
||||
}
|
||||
expandedState.StateID1 = state.StateID1
|
||||
expandedState.StateID2 = state.StateID2
|
||||
expandedAUM.State = expandedState
|
||||
}
|
||||
if votes := aum.Votes; votes != nil {
|
||||
expandedAUM.Votes = *votes
|
||||
}
|
||||
expandedAUM.Meta = aum.Meta
|
||||
for _, signature := range aum.Signatures {
|
||||
expandedAUM.Signatures = append(expandedAUM.Signatures, expandedSignatureV1{
|
||||
KeyID: fmt.Sprintf("tlpub:%x", signature.KeyID),
|
||||
Signature: base64.URLEncoding.EncodeToString(signature.Signature),
|
||||
})
|
||||
}
|
||||
|
||||
return logMessageV1{
|
||||
Hash: aum.Hash().String(),
|
||||
AUM: expandedAUM,
|
||||
Raw: base64.URLEncoding.EncodeToString(update.Raw),
|
||||
}
|
||||
}
|
||||
|
||||
// toExpandedKeyV1 converts a [tka.Key] to the JSON output returned
|
||||
// by the CLI.
|
||||
func toExpandedKeyV1(key *tka.Key) expandedKeyV1 {
|
||||
return expandedKeyV1{
|
||||
Kind: key.Kind.String(),
|
||||
Votes: key.Votes,
|
||||
Public: fmt.Sprintf("tlpub:%x", key.Public),
|
||||
Meta: key.Meta,
|
||||
}
|
||||
}
|
||||
|
||||
// logMessageV1 is the JSON representation of an AUM as both raw bytes and
|
||||
// in its expanded form, and the CLI output is a list of these entries.
|
||||
type logMessageV1 struct {
|
||||
// The BLAKE2s digest of the CBOR-encoded AUM. This is printed as a
|
||||
// base32-encoded string, e.g. KCE…XZQ
|
||||
Hash string
|
||||
|
||||
// The expanded form of the AUM, which presents the fields in a more
|
||||
// accessible format than doing a CBOR decoding.
|
||||
AUM expandedAUMV1
|
||||
|
||||
// The raw bytes of the CBOR-encoded AUM, encoded as base64.
|
||||
// This is useful for verifying the AUM hash.
|
||||
Raw string
|
||||
}
|
||||
|
||||
// expandedAUMV1 is the expanded version of a [tka.AUM], designed so external tools
|
||||
// can read the AUM without knowing our CBOR definitions.
|
||||
type expandedAUMV1 struct {
|
||||
MessageKind string
|
||||
PrevAUMHash string `json:"PrevAUMHash,omitzero"`
|
||||
|
||||
// Key encodes a public key to be added to the key authority.
|
||||
// This field is used for AddKey AUMs.
|
||||
Key expandedKeyV1 `json:"Key,omitzero"`
|
||||
|
||||
// KeyID references a public key which is part of the key authority.
|
||||
// This field is used for RemoveKey and UpdateKey AUMs.
|
||||
KeyID string `json:"KeyID,omitzero"`
|
||||
|
||||
// State describes the full state of the key authority.
|
||||
// This field is used for Checkpoint AUMs.
|
||||
State expandedStateV1 `json:"State,omitzero"`
|
||||
|
||||
// Votes and Meta describe properties of a key in the key authority.
|
||||
// These fields are used for UpdateKey AUMs.
|
||||
Votes uint `json:"Votes,omitzero"`
|
||||
Meta map[string]string `json:"Meta,omitzero"`
|
||||
|
||||
// Signatures lists the signatures over this AUM.
|
||||
Signatures []expandedSignatureV1 `json:"Signatures,omitzero"`
|
||||
}
|
||||
|
||||
// expandedAUMV1 is the expanded version of a [tka.Key], which describes
|
||||
// the public components of a key known to network-lock.
|
||||
type expandedKeyV1 struct {
|
||||
Kind string
|
||||
|
||||
// Votes describes the weight applied to signatures using this key.
|
||||
Votes uint
|
||||
|
||||
// Public encodes the public key of the key as a hex string.
|
||||
Public string
|
||||
|
||||
// Meta describes arbitrary metadata about the key. This could be
|
||||
// used to store the name of the key, for instance.
|
||||
Meta map[string]string `json:"Meta,omitzero"`
|
||||
}
|
||||
|
||||
// expandedStateV1 is the expanded version of a [tka.State], which describes
|
||||
// Tailnet Key Authority state at an instant in time.
|
||||
type expandedStateV1 struct {
|
||||
// LastAUMHash is the blake2s digest of the last-applied AUM.
|
||||
LastAUMHash string `json:"LastAUMHash,omitzero"`
|
||||
|
||||
// DisablementSecrets are KDF-derived values which can be used
|
||||
// to turn off the TKA in the event of a consensus-breaking bug.
|
||||
DisablementSecrets []string
|
||||
|
||||
// Keys are the public keys of either:
|
||||
//
|
||||
// 1. The signing nodes currently trusted by the TKA.
|
||||
// 2. Ephemeral keys that were used to generate pre-signed auth keys.
|
||||
Keys []expandedKeyV1
|
||||
|
||||
// StateID's are nonce's, generated on enablement and fixed for
|
||||
// the lifetime of the Tailnet Key Authority.
|
||||
StateID1 uint64
|
||||
StateID2 uint64
|
||||
}
|
||||
|
||||
// expandedSignatureV1 is the expanded form of a [tka.Signature], which
|
||||
// describes a signature over an AUM. This signature can be verified
|
||||
// using the key referenced by KeyID.
|
||||
type expandedSignatureV1 struct {
|
||||
KeyID string
|
||||
Signature string
|
||||
}
|
||||
@ -0,0 +1,204 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
package cli
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"tailscale.com/cmd/tailscale/cli/jsonoutput"
|
||||
"tailscale.com/ipn/ipnstate"
|
||||
"tailscale.com/tka"
|
||||
"tailscale.com/types/tkatype"
|
||||
)
|
||||
|
||||
func TestNetworkLockLogOutput(t *testing.T) {
|
||||
votes := uint(1)
|
||||
aum1 := tka.AUM{
|
||||
MessageKind: tka.AUMAddKey,
|
||||
Key: &tka.Key{
|
||||
Kind: tka.Key25519,
|
||||
Votes: 1,
|
||||
Public: []byte{2, 2},
|
||||
},
|
||||
}
|
||||
h1 := aum1.Hash()
|
||||
aum2 := tka.AUM{
|
||||
MessageKind: tka.AUMRemoveKey,
|
||||
KeyID: []byte{3, 3},
|
||||
PrevAUMHash: h1[:],
|
||||
Signatures: []tkatype.Signature{
|
||||
{
|
||||
KeyID: []byte{3, 4},
|
||||
Signature: []byte{4, 5},
|
||||
},
|
||||
},
|
||||
Meta: map[string]string{"en": "three", "de": "drei", "es": "tres"},
|
||||
}
|
||||
h2 := aum2.Hash()
|
||||
aum3 := tka.AUM{
|
||||
MessageKind: tka.AUMCheckpoint,
|
||||
PrevAUMHash: h2[:],
|
||||
State: &tka.State{
|
||||
Keys: []tka.Key{
|
||||
{
|
||||
Kind: tka.Key25519,
|
||||
Votes: 1,
|
||||
Public: []byte{1, 1},
|
||||
Meta: map[string]string{"en": "one", "de": "eins", "es": "uno"},
|
||||
},
|
||||
},
|
||||
DisablementSecrets: [][]byte{
|
||||
{1, 2, 3},
|
||||
{4, 5, 6},
|
||||
{7, 8, 9},
|
||||
},
|
||||
},
|
||||
Votes: &votes,
|
||||
}
|
||||
|
||||
updates := []ipnstate.NetworkLockUpdate{
|
||||
{
|
||||
Hash: aum3.Hash(),
|
||||
Change: aum3.MessageKind.String(),
|
||||
Raw: aum3.Serialize(),
|
||||
},
|
||||
{
|
||||
Hash: aum2.Hash(),
|
||||
Change: aum2.MessageKind.String(),
|
||||
Raw: aum2.Serialize(),
|
||||
},
|
||||
{
|
||||
Hash: aum1.Hash(),
|
||||
Change: aum1.MessageKind.String(),
|
||||
Raw: aum1.Serialize(),
|
||||
},
|
||||
}
|
||||
|
||||
t.Run("human-readable", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var outBuf bytes.Buffer
|
||||
json := jsonoutput.JSONSchemaVersion{}
|
||||
useColor := false
|
||||
|
||||
printNetworkLockLog(updates, &outBuf, json, useColor)
|
||||
|
||||
t.Logf("%s", outBuf.String())
|
||||
|
||||
want := `update 4M4Q3IXBARPQMFVXHJBDCYQMWU5H5FBKD7MFF75HE4O5JMIWR2UA (checkpoint)
|
||||
Disablement values:
|
||||
- 010203
|
||||
- 040506
|
||||
- 070809
|
||||
Keys:
|
||||
Type: 25519
|
||||
KeyID: tlpub:0101
|
||||
Metadata: map[de:eins en:one es:uno]
|
||||
|
||||
update BKVVXHOVBW7Y7YXYTLVVLMNSYG6DS5GVRVSYZLASNU3AQKA732XQ (remove-key)
|
||||
KeyID: tlpub:0303
|
||||
|
||||
update UKJIKFHILQ62AEN7MQIFHXJ6SFVDGQCQA3OHVI3LWVPM736EMSAA (add-key)
|
||||
Type: 25519
|
||||
KeyID: tlpub:0202
|
||||
|
||||
`
|
||||
|
||||
if diff := cmp.Diff(outBuf.String(), want); diff != "" {
|
||||
t.Fatalf("wrong output (-got, +want):\n%s", diff)
|
||||
}
|
||||
})
|
||||
|
||||
jsonV1 := `{
|
||||
"SchemaVersion": "1",
|
||||
"Messages": [
|
||||
{
|
||||
"Hash": "4M4Q3IXBARPQMFVXHJBDCYQMWU5H5FBKD7MFF75HE4O5JMIWR2UA",
|
||||
"AUM": {
|
||||
"MessageKind": "checkpoint",
|
||||
"PrevAUMHash": "BKVVXHOVBW7Y7YXYTLVVLMNSYG6DS5GVRVSYZLASNU3AQKA732XQ",
|
||||
"State": {
|
||||
"DisablementSecrets": [
|
||||
"010203",
|
||||
"040506",
|
||||
"070809"
|
||||
],
|
||||
"Keys": [
|
||||
{
|
||||
"Kind": "25519",
|
||||
"Votes": 1,
|
||||
"Public": "tlpub:0101",
|
||||
"Meta": {
|
||||
"de": "eins",
|
||||
"en": "one",
|
||||
"es": "uno"
|
||||
}
|
||||
}
|
||||
],
|
||||
"StateID1": 0,
|
||||
"StateID2": 0
|
||||
},
|
||||
"Votes": 1
|
||||
},
|
||||
"Raw": "pAEFAlggCqtbndUNv4_i-JrrVbGywbw5dNWNZYysEm02CCgf3q8FowH2AoNDAQIDQwQFBkMHCAkDgaQBAQIBA0IBAQyjYmRlZGVpbnNiZW5jb25lYmVzY3VubwYB"
|
||||
},
|
||||
{
|
||||
"Hash": "BKVVXHOVBW7Y7YXYTLVVLMNSYG6DS5GVRVSYZLASNU3AQKA732XQ",
|
||||
"AUM": {
|
||||
"MessageKind": "remove-key",
|
||||
"PrevAUMHash": "UKJIKFHILQ62AEN7MQIFHXJ6SFVDGQCQA3OHVI3LWVPM736EMSAA",
|
||||
"KeyID": "tlpub:0303",
|
||||
"Meta": {
|
||||
"de": "drei",
|
||||
"en": "three",
|
||||
"es": "tres"
|
||||
},
|
||||
"Signatures": [
|
||||
{
|
||||
"KeyID": "tlpub:0304",
|
||||
"Signature": "BAU="
|
||||
}
|
||||
]
|
||||
},
|
||||
"Raw": "pQECAlggopKFFOhcPaARv2QQU90-kWozQFAG3Hqja7Vez-_EZIAEQgMDB6NiZGVkZHJlaWJlbmV0aHJlZWJlc2R0cmVzF4GiAUIDBAJCBAU="
|
||||
},
|
||||
{
|
||||
"Hash": "UKJIKFHILQ62AEN7MQIFHXJ6SFVDGQCQA3OHVI3LWVPM736EMSAA",
|
||||
"AUM": {
|
||||
"MessageKind": "add-key",
|
||||
"Key": {
|
||||
"Kind": "25519",
|
||||
"Votes": 1,
|
||||
"Public": "tlpub:0202"
|
||||
}
|
||||
},
|
||||
"Raw": "owEBAvYDowEBAgEDQgIC"
|
||||
}
|
||||
]
|
||||
}
|
||||
`
|
||||
|
||||
t.Run("json-1", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
t.Logf("BOOM")
|
||||
|
||||
var outBuf bytes.Buffer
|
||||
json := jsonoutput.JSONSchemaVersion{
|
||||
IsSet: true,
|
||||
Value: 1,
|
||||
}
|
||||
useColor := false
|
||||
|
||||
printNetworkLockLog(updates, &outBuf, json, useColor)
|
||||
|
||||
want := jsonV1
|
||||
t.Logf("%s", outBuf.String())
|
||||
|
||||
if diff := cmp.Diff(outBuf.String(), want); diff != "" {
|
||||
t.Fatalf("wrong output (-got, +want):\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue