logtail: add a re-usable buffer for uploads

This avoids a per-upload alloc (which in practice
often means per-log-line), up to 4k.

Signed-off-by: Josh Bleecher Snyder <josh@tailscale.com>
pull/2677/head
Josh Bleecher Snyder 3 years ago committed by Josh Bleecher Snyder
parent 278e7de9c9
commit 0c038b477f

@ -200,9 +200,10 @@ func (l *Logger) drainBlock() (shuttingDown bool) {
} }
// drainPending drains and encodes a batch of logs from the buffer for upload. // drainPending drains and encodes a batch of logs from the buffer for upload.
// It uses scratch as its initial buffer.
// If no logs are available, drainPending blocks until logs are available. // If no logs are available, drainPending blocks until logs are available.
func (l *Logger) drainPending() (res []byte) { func (l *Logger) drainPending(scratch []byte) (res []byte) {
buf := new(bytes.Buffer) buf := bytes.NewBuffer(scratch[:0])
buf.WriteByte('[') buf.WriteByte('[')
entries := 0 entries := 0
@ -261,8 +262,9 @@ func (l *Logger) drainPending() (res []byte) {
func (l *Logger) uploading(ctx context.Context) { func (l *Logger) uploading(ctx context.Context) {
defer close(l.shutdownDone) defer close(l.shutdownDone)
scratch := make([]byte, 4096) // reusable buffer to write into
for { for {
body := l.drainPending() body := l.drainPending(scratch)
origlen := -1 // sentinel value: uncompressed origlen := -1 // sentinel value: uncompressed
// Don't attempt to compress tiny bodies; not worth the CPU cycles. // Don't attempt to compress tiny bodies; not worth the CPU cycles.
if l.zstdEncoder != nil && len(body) > 256 { if l.zstdEncoder != nil && len(body) > 256 {

Loading…
Cancel
Save