From 48112361894cc741416168b00f101e46822aa4c8 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 15 Jan 2021 19:13:59 -0800 Subject: [PATCH] wgengine/magicsock: speed up BenchmarkReceiveFrom, store context.Done chan context.cancelCtx.Done involves a mutex and isn't as cheap as I previously assumed. Convert the donec method into a struct field and store the channel value once. Our one magicsock.Conn gets one pointer larger, but it cuts ~1% of the CPU time of the ReceiveFrom benchmark and removes a bubble from the --svg output :) --- wgengine/magicsock/magicsock.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index f181e0f3f..4e1edcf33 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -128,6 +128,7 @@ type Conn struct { connCtx context.Context // closed on Conn.Close connCtxCancel func() // closes connCtx + donec <-chan struct{} // connCtx.Done()'s to avoid context.cancelCtx.Done()'s mutex per call // pconn4 and pconn6 are the underlying UDP sockets used to // send/receive packets for wireguard and other magicsock @@ -454,6 +455,7 @@ func NewConn(opts Options) (*Conn, error) { } c.connCtx, c.connCtxCancel = context.WithCancel(context.Background()) + c.donec = c.connCtx.Done() c.netChecker = &netcheck.Client{ Logf: logger.WithPrefix(c.logf, "netcheck: "), GetSTUNConn4: func() netcheck.STUNConn { return c.pconn4 }, @@ -486,8 +488,6 @@ func (c *Conn) Start() { go c.periodicDerpCleanup() } -func (c *Conn) donec() <-chan struct{} { return c.connCtx.Done() } - // ignoreSTUNPackets sets a STUN packet processing func that does nothing. func (c *Conn) ignoreSTUNPackets() { c.stunReceiveFunc.Store(func([]byte, netaddr.IPPort) {}) @@ -1099,7 +1099,7 @@ func (c *Conn) sendAddr(addr netaddr.IPPort, pubKey key.Public, b []byte) (sent copy(pkt, b) select { - case <-c.donec(): + case <-c.donec: return false, errConnClosed case ch <- derpWriteRequest{addr, pubKey, pkt}: return true, nil @@ -1468,7 +1468,7 @@ func (c *Conn) awaitUDP4(b []byte) { if err != nil { select { case c.udpRecvCh <- udpReadResult{err: err}: - case <-c.donec(): + case <-c.donec: } return } @@ -1487,7 +1487,7 @@ func (c *Conn) awaitUDP4(b []byte) { select { case c.udpRecvCh <- udpReadResult{n: n, addr: addr, ipp: ipp}: - case <-c.donec(): + case <-c.donec: } return } @@ -1548,7 +1548,7 @@ Top: c.bufferedIPv4Packet = append(c.bufferedIPv4Packet[:0], b[:um.n]...) } c.pconn4.SetReadDeadline(time.Time{}) - case <-c.donec(): + case <-c.donec: return 0, nil, errors.New("Conn closed") } var regionID int @@ -1627,7 +1627,7 @@ Top: c.noteRecvActivityFromEndpoint(ep) return n, ep, nil - case <-c.donec(): + case <-c.donec: // Socket has been shut down. All the producers of packets // respond to the context cancellation and go away, so we have // to also unblock and return an error, to inform wireguard-go @@ -2362,7 +2362,7 @@ func (c *Conn) periodicReSTUN() { var lastIdleState opt.Bool for { select { - case <-c.donec(): + case <-c.donec: return case <-timer.C: doReSTUN := c.shouldDoPeriodicReSTUN() @@ -2387,7 +2387,7 @@ func (c *Conn) periodicDerpCleanup() { defer ticker.Stop() for { select { - case <-c.donec(): + case <-c.donec: return case <-ticker.C: c.cleanStaleDerp()