all: annotate log verbosity levels on most egregiously spammy log prints

Fixes #924
Fixes #282

Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
crawshaw/dnsguid
Brad Fitzpatrick 4 years ago committed by Brad Fitzpatrick
parent 57dd247376
commit 053a1d1340

@ -231,7 +231,7 @@ func (c *Client) cancelMapSafely() {
c.mu.Lock()
defer c.mu.Unlock()
c.logf("cancelMapSafely: synced=%v", c.synced)
c.logf("[v1] cancelMapSafely: synced=%v", c.synced)
if c.inPollNetMap {
// received at least one netmap since the last
@ -253,12 +253,12 @@ func (c *Client) cancelMapSafely() {
// request.
select {
case c.newMapCh <- struct{}{}:
c.logf("cancelMapSafely: wrote to channel")
c.logf("[v1] cancelMapSafely: wrote to channel")
default:
// if channel write failed, then there was already
// an outstanding newMapCh request. One is enough,
// since it'll always use the latest endpoints.
c.logf("cancelMapSafely: channel was full")
c.logf("[v1] cancelMapSafely: channel was full")
}
}
}
@ -280,13 +280,13 @@ func (c *Client) authRoutine() {
select {
case <-c.quit:
c.logf("authRoutine: quit")
c.logf("[v1] authRoutine: quit")
return
default:
}
report := func(err error, msg string) {
c.logf("%s: %v", msg, err)
c.logf("[v1] %s: %v", msg, err)
err = fmt.Errorf("%s: %v", msg, err)
// don't send status updates for context errors,
// since context cancelation is always on purpose.
@ -298,7 +298,7 @@ func (c *Client) authRoutine() {
if goal == nil {
// Wait for user to Login or Logout.
<-ctx.Done()
c.logf("authRoutine: context done.")
c.logf("[v1] authRoutine: context done.")
continue
}
@ -434,7 +434,7 @@ func (c *Client) mapRoutine() {
}
report := func(err error, msg string) {
c.logf("%s: %v", msg, err)
c.logf("[v1] %s: %v", msg, err)
err = fmt.Errorf("%s: %v", msg, err)
// don't send status updates for context errors,
// since context cancelation is always on purpose.
@ -469,7 +469,7 @@ func (c *Client) mapRoutine() {
select {
case <-c.newMapCh:
c.logf("mapRoutine: new map request during PollNetMap. canceling.")
c.logf("[v1] mapRoutine: new map request during PollNetMap. canceling.")
c.cancelMapLocked()
// Don't emit this netmap; we're
@ -491,7 +491,7 @@ func (c *Client) mapRoutine() {
c.mu.Unlock()
c.logf("mapRoutine: netmap received: %s", state)
c.logf("[v1] mapRoutine: netmap received: %s", state)
if stillAuthed {
c.sendStatus("mapRoutine-got-netmap", nil, "", nm)
}
@ -572,7 +572,7 @@ func (c *Client) sendStatus(who string, err error, url string, nm *NetworkMap) {
c.inSendStatus++
c.mu.Unlock()
c.logf("sendStatus: %s: %v", who, state)
c.logf("[v1] sendStatus: %s: %v", who, state)
var p *Persist
var fin *empty.Message

@ -537,10 +537,12 @@ func (c *Direct) PollNetMap(ctx context.Context, maxPolls int, cb func(*NetworkM
}
allowStream := maxPolls != 1
c.logf("PollNetMap: stream=%v :%v ep=%v", allowStream, localPort, ep)
c.logf("[v1] PollNetMap: stream=%v :%v ep=%v", allowStream, localPort, ep)
vlogf := logger.Discard
if Debug.NetMap {
// TODO(bradfitz): update this to use "[v2]" prefix perhaps? but we don't
// want to upload it always.
vlogf = c.logf
}
@ -681,7 +683,7 @@ func (c *Direct) PollNetMap(ctx context.Context, maxPolls int, cb func(*NetworkM
case timeoutReset <- struct{}{}:
vlogf("netmap: sent timer reset")
case <-ctx.Done():
c.logf("netmap: not resetting timer; context done: %v", ctx.Err())
c.logf("[v1] netmap: not resetting timer; context done: %v", ctx.Err())
return ctx.Err()
}
if resp.KeepAlive {
@ -776,7 +778,7 @@ func (c *Direct) PollNetMap(ctx context.Context, maxPolls int, cb func(*NetworkM
now := c.timeNow()
if now.Sub(c.lastPrintMap) >= 5*time.Minute {
c.lastPrintMap = now
c.logf("new network map[%d]:\n%s", i, nm.Concise())
c.logf("[v1] new network map[%d]:\n%s", i, nm.Concise())
}
c.mu.Lock()

@ -280,12 +280,12 @@ func (nm *NetworkMap) WGCfg(logf logger.Logf, flags WGConfigFlags) (*wgcfg.Confi
for _, allowedIP := range peer.AllowedIPs {
if allowedIP.Mask == 0 {
if (flags & AllowDefaultRoute) == 0 {
logf("wgcfg: %v skipping default route", peer.Key.ShortString())
logf("[v1] wgcfg: %v skipping default route", peer.Key.ShortString())
continue
}
} else if cidrIsSubnet(peer, allowedIP) {
if (flags & AllowSubnetRoutes) == 0 {
logf("wgcfg: %v skipping subnet route", peer.Key.ShortString())
logf("[v1] wgcfg: %v skipping subnet route", peer.Key.ShortString())
continue
}
}

@ -266,12 +266,14 @@ func (s *server) serveConn(ctx context.Context, c net.Conn, logf logger.Logf) {
}
defer s.removeAndCloseConn(c)
logf("incoming control connection")
logf("[v1] incoming control connection")
for ctx.Err() == nil {
msg, err := ipn.ReadMsg(br)
if err != nil {
if ctx.Err() == nil {
if errors.Is(err, io.EOF) {
logf("[v1] ReadMsg: %v", err)
} else if ctx.Err() == nil {
logf("ReadMsg: %v", err)
}
return

@ -251,7 +251,11 @@ func (b *LocalBackend) setClientStatus(st controlclient.Status) {
// The following do not depend on any data for which we need to lock b.
if st.Err != "" {
// TODO(crawshaw): display in the UI.
if st.Err == "EOF" {
b.logf("[v1] Received error: EOF")
} else {
b.logf("Received error: %v", st.Err)
}
return
}
if st.LoginFinished != nil {
@ -315,7 +319,7 @@ func (b *LocalBackend) setClientStatus(st controlclient.Status) {
if netMap != nil {
diff := st.NetMap.ConciseDiffFrom(netMap)
if strings.TrimSpace(diff) == "" {
b.logf("netmap diff: (none)")
b.logf("[v1] netmap diff: (none)")
} else {
b.logf("netmap diff:\n%v", diff)
}
@ -1003,8 +1007,8 @@ func (b *LocalBackend) parseWgStatusLocked(s *wgengine.Status) (ret EngineStatus
// [GRINDER STATS LINES] - please don't remove (used for log parsing)
if peerStats.Len() > 0 {
b.keyLogf("peer keys: %s", strings.TrimSpace(peerKeys.String()))
b.statsLogf("v%v peers: %v", version.Long, strings.TrimSpace(peerStats.String()))
b.keyLogf("[v1] peer keys: %s", strings.TrimSpace(peerKeys.String()))
b.statsLogf("[v1] v%v peers: %v", version.Long, strings.TrimSpace(peerStats.String()))
}
return ret
}
@ -1227,7 +1231,7 @@ func (b *LocalBackend) authReconfig() {
if err == wgengine.ErrNoChanges {
return
}
b.logf("authReconfig: ra=%v dns=%v 0x%02x: %v", uc.RouteAll, uc.CorpDNS, flags, err)
b.logf("[v1] authReconfig: ra=%v dns=%v 0x%02x: %v", uc.RouteAll, uc.CorpDNS, flags, err)
}
// domainsForProxying produces a list of search domains for proxied DNS.

@ -23,8 +23,8 @@ import (
func TestLocalLogLines(t *testing.T) {
logListen := tstest.NewLogLineTracker(t.Logf, []string{
"SetPrefs: %v",
"peer keys: %s",
"v%v peers: %v",
"[v1] peer keys: %s",
"[v1] v%v peers: %v",
})
logid := func(hex byte) logtail.PublicID {
@ -71,7 +71,7 @@ func TestLocalLogLines(t *testing.T) {
prefs.Persist = persist
lb.SetPrefs(prefs)
t.Run("after_prefs", testWantRemain("peer keys: %s", "v%v peers: %v"))
t.Run("after_prefs", testWantRemain("[v1] peer keys: %s", "[v1] v%v peers: %v"))
// log peers, peer keys
status := &wgengine.Status{

@ -692,7 +692,7 @@ func (rs *reportState) probePortMapServices() {
port1900 := netaddr.IPPort{IP: gw, Port: 1900}.UDPAddr()
port5351 := netaddr.IPPort{IP: gw, Port: 5351}.UDPAddr()
rs.c.logf("probePortMapServices: me %v -> gw %v", myIP, gw)
rs.c.logf("[v1] probePortMapServices: me %v -> gw %v", myIP, gw)
// Create a UDP4 socket used just for querying for UPnP, NAT-PMP, and PCP.
uc, err := netns.Listener().ListenPacket(context.Background(), "udp4", ":0")
@ -842,7 +842,7 @@ func (c *Client) GetReport(ctx context.Context, dm *tailcfg.DERPMap) (*Report, e
ifState, err := interfaces.GetState()
if err != nil {
c.logf("interfaces: %v", err)
c.logf("[v1] interfaces: %v", err)
return nil, err
}
@ -951,7 +951,7 @@ func (c *Client) GetReport(ctx context.Context, dm *tailcfg.DERPMap) (*Report, e
go func(reg *tailcfg.DERPRegion) {
defer wg.Done()
if d, ip, err := c.measureHTTPSLatency(ctx, reg); err != nil {
c.logf("netcheck: measuring HTTPS latency of %v (%d): %v", reg.RegionCode, reg.RegionID, err)
c.logf("[v1] netcheck: measuring HTTPS latency of %v (%d): %v", reg.RegionCode, reg.RegionID, err)
} else {
rs.mu.Lock()
rs.report.RegionLatency[reg.RegionID] = d
@ -1045,7 +1045,7 @@ func (c *Client) measureHTTPSLatency(ctx context.Context, reg *tailcfg.DERPRegio
}
func (c *Client) logConciseReport(r *Report, dm *tailcfg.DERPMap) {
c.logf("%v", logger.ArgWriter(func(w *bufio.Writer) {
c.logf("[v1] report: %v", logger.ArgWriter(func(w *bufio.Writer) {
fmt.Fprintf(w, "udp=%v", r.UDP)
if !r.IPv4 {
fmt.Fprintf(w, " v4=%v", r.IPv4)

@ -554,7 +554,7 @@ func TestLogConciseReport(t *testing.T) {
var buf bytes.Buffer
c := &Client{Logf: func(f string, a ...interface{}) { fmt.Fprintf(&buf, f, a...) }}
c.logConciseReport(tt.r, dm)
if got := buf.String(); got != tt.want {
if got := strings.TrimPrefix(buf.String(), "[v1] report: "); got != tt.want {
t.Errorf("unexpected result.\n got: %#q\nwant: %#q\n", got, tt.want)
}
})

@ -501,7 +501,7 @@ func (c *Conn) updateEndpoints(why string) {
}
}()
c.logf("magicsock: starting endpoint update (%s)", why)
c.logf("[v1] magicsock: starting endpoint update (%s)", why)
endpoints, reasons, err := c.determineEndpoints(c.connCtx)
if err != nil {
@ -540,7 +540,7 @@ func (c *Conn) setEndpoints(endpoints []string, reasons map[string]string) (chan
// skipped during the e2e tests because they depend
// too much on the exact sequence of updates. Fix the
// tests. But a protocol rewrite might happen first.
c.logf("magicsock: ignoring pre-DERP map, STUN-less endpoint update: %v", endpoints)
c.logf("[v1] magicsock: ignoring pre-DERP map, STUN-less endpoint update: %v", endpoints)
return false
}
@ -677,7 +677,7 @@ func (c *Conn) callNetInfoCallback(ni *tailcfg.NetInfo) {
}
c.netInfoLast = ni
if c.netInfoFunc != nil {
c.logf("magicsock: netInfo update: %+v", ni)
c.logf("[v1] magicsock: netInfo update: %+v", ni)
go c.netInfoFunc(ni)
}
}
@ -776,7 +776,7 @@ func (c *Conn) Ping(ip netaddr.IP, cb func(*ipnstate.PingResult)) {
cb(res)
return
}
c.logf("magicsock: started peer %v for ping to %v", dk.ShortString(), peer.Key.ShortString())
c.logf("[v1] magicsock: started peer %v for ping to %v", dk.ShortString(), peer.Key.ShortString())
}
de.cliPing(res, cb)
}
@ -1224,9 +1224,9 @@ func (c *Conn) setPeerLastDerpLocked(peer key.Public, regionID, homeID int) {
newDesc = "alt"
}
if old == 0 {
c.logf("magicsock: derp route for %s set to derp-%d (%s)", peerShort(peer), regionID, newDesc)
c.logf("[v1] magicsock: derp route for %s set to derp-%d (%s)", peerShort(peer), regionID, newDesc)
} else {
c.logf("magicsock: derp route for %s changed from derp-%d => derp-%d (%s)", peerShort(peer), old, regionID, newDesc)
c.logf("[v1] magicsock: derp route for %s changed from derp-%d => derp-%d (%s)", peerShort(peer), old, regionID, newDesc)
}
}
@ -1285,7 +1285,7 @@ func (c *Conn) runDerpReader(ctx context.Context, derpFakeAddr netaddr.IPPort, d
return
}
if c.networkDown() {
c.logf("magicsock: derp.Recv(derp-%d): network down, closing", regionID)
c.logf("[v1] magicsock: derp.Recv(derp-%d): network down, closing", regionID)
return
}
select {
@ -1662,7 +1662,7 @@ func (c *Conn) sendDiscoMessage(dst netaddr.IPPort, dstKey tailcfg.NodeKey, dstD
sent, err = c.sendAddr(dst, key.Public(dstKey), pkt)
if sent {
if logLevel == discoLog || (logLevel == discoVerboseLog && debugDisco) {
c.logf("magicsock: disco: %v->%v (%v, %v) sent %v", c.discoShort, dstDisco.ShortString(), dstKey.ShortString(), derpStr(dst.String()), disco.MessageSummary(m))
c.logf("[v1] magicsock: disco: %v->%v (%v, %v) sent %v", c.discoShort, dstDisco.ShortString(), dstKey.ShortString(), derpStr(dst.String()), disco.MessageSummary(m))
}
} else if err == nil {
// Can't send. (e.g. no IPv6 locally)
@ -1838,7 +1838,7 @@ func (c *Conn) handlePingLocked(dm *disco.Ping, de *discoEndpoint, src netaddr.I
de.lastPingFrom = src
de.lastPingTime = time.Now()
if !likelyHeartBeat || debugDisco {
c.logf("magicsock: disco: %v<-%v (%v, %v) got ping tx=%x", c.discoShort, de.discoShort, peerNode.Key.ShortString(), src, dm.TxID[:6])
c.logf("[v1] magicsock: disco: %v<-%v (%v, %v) got ping tx=%x", c.discoShort, de.discoShort, peerNode.Key.ShortString(), src, dm.TxID[:6])
}
// Remember this route if not present.
@ -1866,9 +1866,9 @@ func (c *Conn) setAddrToDiscoLocked(src netaddr.IPPort, newk tailcfg.DiscoKey, a
return
}
if ok {
c.logf("magicsock: disco: changing mapping of %v from %x=>%x", src, oldk.ShortString(), newk.ShortString())
c.logf("[v1] magicsock: disco: changing mapping of %v from %x=>%x", src, oldk.ShortString(), newk.ShortString())
} else {
c.logf("magicsock: disco: adding mapping of %v to %v", src, newk.ShortString())
c.logf("[v1] magicsock: disco: adding mapping of %v to %v", src, newk.ShortString())
}
c.discoOfAddr[src] = newk
if !ok {
@ -2075,7 +2075,7 @@ func (c *Conn) SetNetworkMap(nm *controlclient.NetworkMap) {
}
}
c.logf("magicsock: got updated network map; %d peers (%d with discokey)", len(nm.Peers), numDisco)
c.logf("[v1] magicsock: got updated network map; %d peers (%d with discokey)", len(nm.Peers), numDisco)
c.netMap = nm
// Build and/or update node<->disco maps, only reallocating if
@ -2319,9 +2319,9 @@ func (c *Conn) periodicReSTUN() {
doReSTUN := c.shouldDoPeriodicReSTUN()
if !lastIdleState.EqualBool(doReSTUN) {
if doReSTUN {
c.logf("magicsock: periodicReSTUN enabled")
c.logf("[v1] magicsock: periodicReSTUN enabled")
} else {
c.logf("magicsock: periodicReSTUN disabled due to inactivity")
c.logf("[v1] magicsock: periodicReSTUN disabled due to inactivity")
}
lastIdleState.Set(doReSTUN)
}
@ -2376,7 +2376,7 @@ func (c *Conn) ReSTUN(why string) {
if c.endpointsUpdateActive {
if c.wantEndpointsUpdate != why {
c.logf("magicsock: ReSTUN: endpoint update active, need another later (%q)", why)
c.logf("[v1] magicsock: ReSTUN: endpoint update active, need another later (%q)", why)
c.wantEndpointsUpdate = why
}
} else {
@ -2994,7 +2994,7 @@ func (de *discoEndpoint) heartbeat() {
if time.Since(de.lastSend) > sessionActiveTimeout {
// Session's idle. Stop heartbeating.
de.c.logf("magicsock: disco: ending heartbeats for idle session to %v (%v)", de.publicKey.ShortString(), de.discoShort)
de.c.logf("[v1] magicsock: disco: ending heartbeats for idle session to %v (%v)", de.publicKey.ShortString(), de.discoShort)
return
}
@ -3101,7 +3101,7 @@ func (de *discoEndpoint) pingTimeout(txid stun.TxID) {
return
}
if debugDisco || de.bestAddr.IsZero() || time.Now().After(de.trustBestAddrUntil) {
de.c.logf("magicsock: disco: timeout waiting for pong %x from %v (%v, %v)", txid[:6], sp.to, de.publicKey.ShortString(), de.discoShort)
de.c.logf("[v1] magicsock: disco: timeout waiting for pong %x from %v (%v, %v)", txid[:6], sp.to, de.publicKey.ShortString(), de.discoShort)
}
de.removeSentPingLocked(txid, sp)
}
@ -3194,7 +3194,7 @@ func (de *discoEndpoint) sendPingsLocked(now time.Time, sendCallMeMaybe bool) {
sentAny = true
if firstPing && sendCallMeMaybe {
de.c.logf("magicsock: disco: send, starting discovery for %v (%v)", de.publicKey.ShortString(), de.discoShort)
de.c.logf("[v1] magicsock: disco: send, starting discovery for %v (%v)", de.publicKey.ShortString(), de.discoShort)
}
de.startPingLocked(ep, now, pingDiscovery)
@ -3340,7 +3340,7 @@ func (de *discoEndpoint) handlePongConnLocked(m *disco.Pong, src netaddr.IPPort)
}
if sp.purpose != pingHeartbeat {
de.c.logf("magicsock: disco: %v<-%v (%v, %v) got pong tx=%x latency=%v pong.src=%v%v", de.c.discoShort, de.discoShort, de.publicKey.ShortString(), src, m.TxID[:6], latency.Round(time.Millisecond), m.Src, logger.ArgWriter(func(bw *bufio.Writer) {
de.c.logf("[v1] magicsock: disco: %v<-%v (%v, %v) got pong tx=%x latency=%v pong.src=%v%v", de.c.discoShort, de.discoShort, de.publicKey.ShortString(), src, m.TxID[:6], latency.Round(time.Millisecond), m.Src, logger.ArgWriter(func(bw *bufio.Writer) {
if sp.to != src {
fmt.Fprintf(bw, " ping.to=%v", sp.to)
}
@ -3424,7 +3424,7 @@ func (de *discoEndpoint) stopAndReset() {
de.mu.Lock()
defer de.mu.Unlock()
de.c.logf("magicsock: doing cleanup for discovery key %x", de.discoKey[:])
de.c.logf("[v1] magicsock: doing cleanup for discovery key %x", de.discoKey[:])
// Zero these fields so if the user re-starts the network, the discovery
// state isn't a mix of before & after two sessions.

@ -21,16 +21,16 @@ type fakeRouter struct {
}
func (r fakeRouter) Up() error {
r.logf("Warning: fakeRouter.Up: not implemented.")
r.logf("[v1] warning: fakeRouter.Up: not implemented.")
return nil
}
func (r fakeRouter) Set(cfg *Config) error {
r.logf("Warning: fakeRouter.Set: not implemented.")
r.logf("[v1] warning: fakeRouter.Set: not implemented.")
return nil
}
func (r fakeRouter) Close() error {
r.logf("Warning: fakeRouter.Close: not implemented.")
r.logf("[v1] warning: fakeRouter.Close: not implemented.")
return nil
}

@ -559,7 +559,7 @@ func (p *pinger) run(ctx context.Context, peerKey wgcfg.Key, ips []wgcfg.IP, src
// This is only used with legacy peers (before 0.100.0) that don't
// have advertised discovery keys.
func (e *userspaceEngine) pinger(peerKey wgcfg.Key, ips []wgcfg.IP) {
e.logf("generating initial ping traffic to %s (%v)", peerKey.ShortString(), ips)
e.logf("[v1] generating initial ping traffic to %s (%v)", peerKey.ShortString(), ips)
var srcIP packet.IP4
e.wgLock.Lock()
@ -1002,7 +1002,7 @@ func (e *userspaceEngine) Reconfig(cfg *wgcfg.Config, routerCfg *router.Config)
}
}
e.logf("wgengine: Reconfig done")
e.logf("[v1] wgengine: Reconfig done")
return nil
}
@ -1253,7 +1253,7 @@ func (e *userspaceEngine) LinkChange(isExpensive bool) {
} else if needRebind {
e.logf("LinkChange: major, rebinding. New state: %v", cur)
} else {
e.logf("LinkChange: minor")
e.logf("[v1] LinkChange: minor")
}
e.magicConn.SetNetworkUp(up)

Loading…
Cancel
Save