go/src/runtime/tracebuf.go

265 lines
6.8 KiB
Go
Raw Normal View History

// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Trace buffer management.
package runtime
import (
"internal/runtime/sys"
"unsafe"
)
// Maximum number of bytes required to encode uint64 in base-128.
const traceBytesPerNumber = 10
// traceWriter is the interface for writing all trace data.
//
// This type is passed around as a value, and all of its methods return
// a new traceWriter. This allows for chaining together calls in a fluent-style
// API. This is partly stylistic, and very slightly for performance, since
// the compiler can destructure this value and pass it between calls as
// just regular arguments. However, this style is not load-bearing, and
// we can change it if it's deemed too error-prone.
type traceWriter struct {
traceLocker
*traceBuf
}
// write returns an a traceWriter that writes into the current M's stream.
func (tl traceLocker) writer() traceWriter {
return traceWriter{traceLocker: tl, traceBuf: tl.mp.trace.buf[tl.gen%2]}
}
// unsafeTraceWriter produces a traceWriter that doesn't lock the trace.
//
// It should only be used in contexts where either:
// - Another traceLocker is held.
// - trace.gen is prevented from advancing.
//
// buf may be nil.
func unsafeTraceWriter(gen uintptr, buf *traceBuf) traceWriter {
return traceWriter{traceLocker: traceLocker{gen: gen}, traceBuf: buf}
}
// end writes the buffer back into the m.
func (w traceWriter) end() {
if w.mp == nil {
// Tolerate a nil mp. It makes code that creates traceWriters directly
// less error-prone.
return
}
w.mp.trace.buf[w.gen%2] = w.traceBuf
}
// ensure makes sure that at least maxSize bytes are available to write.
//
// Returns whether the buffer was flushed.
func (w traceWriter) ensure(maxSize int) (traceWriter, bool) {
refill := w.traceBuf == nil || !w.available(maxSize)
if refill {
runtime: add traceallocfree GODEBUG for alloc/free events in traces This change adds expensive alloc/free events to traces, guarded by a GODEBUG that can be set at run time by mutating the GODEBUG environment variable. This supersedes the alloc/free trace deleted in a previous CL. There are two parts to this CL. The first part is adding a mechanism for exposing experimental events through the tracer and trace parser. This boils down to a new ExperimentalEvent event type in the parser API which simply reveals the raw event data for the event. Each experimental event can also be associated with "experimental data" which is associated with a particular generation. This experimental data is just exposed as a bag of bytes that supplements the experimental events. In the runtime, this CL organizes experimental events by experiment. An experiment is defined by a set of experimental events and a single special batch type. Batches of this special type are exposed through the parser's API as the aforementioned "experimental data". The second part of this CL is defining the AllocFree experiment, which defines 9 new experimental events covering heap object alloc/frees, span alloc/frees, and goroutine stack alloc/frees. It also generates special batches that contain a type table: a mapping of IDs to type information. Change-Id: I965c00e3dcfdf5570f365ff89d0f70d8aeca219c Reviewed-on: https://go-review.googlesource.com/c/go/+/583377 Reviewed-by: Michael Pratt <mpratt@google.com> Auto-Submit: Michael Knyszek <mknyszek@google.com> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
2024-04-24 16:26:39 +00:00
w = w.refill(traceNoExperiment)
}
return w, refill
}
// flush puts w.traceBuf on the queue of full buffers.
func (w traceWriter) flush() traceWriter {
systemstack(func() {
lock(&trace.lock)
if w.traceBuf != nil {
traceBufFlush(w.traceBuf, w.gen)
}
unlock(&trace.lock)
})
w.traceBuf = nil
return w
}
// refill puts w.traceBuf on the queue of full buffers and refresh's w's buffer.
runtime: add traceallocfree GODEBUG for alloc/free events in traces This change adds expensive alloc/free events to traces, guarded by a GODEBUG that can be set at run time by mutating the GODEBUG environment variable. This supersedes the alloc/free trace deleted in a previous CL. There are two parts to this CL. The first part is adding a mechanism for exposing experimental events through the tracer and trace parser. This boils down to a new ExperimentalEvent event type in the parser API which simply reveals the raw event data for the event. Each experimental event can also be associated with "experimental data" which is associated with a particular generation. This experimental data is just exposed as a bag of bytes that supplements the experimental events. In the runtime, this CL organizes experimental events by experiment. An experiment is defined by a set of experimental events and a single special batch type. Batches of this special type are exposed through the parser's API as the aforementioned "experimental data". The second part of this CL is defining the AllocFree experiment, which defines 9 new experimental events covering heap object alloc/frees, span alloc/frees, and goroutine stack alloc/frees. It also generates special batches that contain a type table: a mapping of IDs to type information. Change-Id: I965c00e3dcfdf5570f365ff89d0f70d8aeca219c Reviewed-on: https://go-review.googlesource.com/c/go/+/583377 Reviewed-by: Michael Pratt <mpratt@google.com> Auto-Submit: Michael Knyszek <mknyszek@google.com> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
2024-04-24 16:26:39 +00:00
//
// exp indicates whether the refilled batch should be EvExperimentalBatch.
func (w traceWriter) refill(exp traceExperiment) traceWriter {
systemstack(func() {
lock(&trace.lock)
if w.traceBuf != nil {
traceBufFlush(w.traceBuf, w.gen)
}
if trace.empty != nil {
w.traceBuf = trace.empty
trace.empty = w.traceBuf.link
unlock(&trace.lock)
} else {
unlock(&trace.lock)
w.traceBuf = (*traceBuf)(sysAlloc(unsafe.Sizeof(traceBuf{}), &memstats.other_sys))
if w.traceBuf == nil {
throw("trace: out of memory")
}
}
})
// Initialize the buffer.
ts := traceClockNow()
if ts <= w.traceBuf.lastTime {
ts = w.traceBuf.lastTime + 1
}
w.traceBuf.lastTime = ts
w.traceBuf.link = nil
w.traceBuf.pos = 0
// Tolerate a nil mp.
mID := ^uint64(0)
if w.mp != nil {
mID = uint64(w.mp.procid)
}
// Write the buffer's header.
runtime: add traceallocfree GODEBUG for alloc/free events in traces This change adds expensive alloc/free events to traces, guarded by a GODEBUG that can be set at run time by mutating the GODEBUG environment variable. This supersedes the alloc/free trace deleted in a previous CL. There are two parts to this CL. The first part is adding a mechanism for exposing experimental events through the tracer and trace parser. This boils down to a new ExperimentalEvent event type in the parser API which simply reveals the raw event data for the event. Each experimental event can also be associated with "experimental data" which is associated with a particular generation. This experimental data is just exposed as a bag of bytes that supplements the experimental events. In the runtime, this CL organizes experimental events by experiment. An experiment is defined by a set of experimental events and a single special batch type. Batches of this special type are exposed through the parser's API as the aforementioned "experimental data". The second part of this CL is defining the AllocFree experiment, which defines 9 new experimental events covering heap object alloc/frees, span alloc/frees, and goroutine stack alloc/frees. It also generates special batches that contain a type table: a mapping of IDs to type information. Change-Id: I965c00e3dcfdf5570f365ff89d0f70d8aeca219c Reviewed-on: https://go-review.googlesource.com/c/go/+/583377 Reviewed-by: Michael Pratt <mpratt@google.com> Auto-Submit: Michael Knyszek <mknyszek@google.com> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
2024-04-24 16:26:39 +00:00
if exp == traceNoExperiment {
w.byte(byte(traceEvEventBatch))
} else {
w.byte(byte(traceEvExperimentalBatch))
w.byte(byte(exp))
}
w.varint(uint64(w.gen))
w.varint(uint64(mID))
w.varint(uint64(ts))
w.traceBuf.lenPos = w.varintReserve()
return w
}
// traceBufQueue is a FIFO of traceBufs.
type traceBufQueue struct {
head, tail *traceBuf
}
// push queues buf into queue of buffers.
func (q *traceBufQueue) push(buf *traceBuf) {
buf.link = nil
if q.head == nil {
q.head = buf
} else {
q.tail.link = buf
}
q.tail = buf
}
// pop dequeues from the queue of buffers.
func (q *traceBufQueue) pop() *traceBuf {
buf := q.head
if buf == nil {
return nil
}
q.head = buf.link
if q.head == nil {
q.tail = nil
}
buf.link = nil
return buf
}
func (q *traceBufQueue) empty() bool {
return q.head == nil
}
// traceBufHeader is per-P tracing buffer.
type traceBufHeader struct {
link *traceBuf // in trace.empty/full
lastTime traceTime // when we wrote the last event
pos int // next write offset in arr
lenPos int // position of batch length value
}
// traceBuf is per-M tracing buffer.
//
// TODO(mknyszek): Rename traceBuf to traceBatch, since they map 1:1 with event batches.
type traceBuf struct {
_ sys.NotInHeap
traceBufHeader
arr [64<<10 - unsafe.Sizeof(traceBufHeader{})]byte // underlying buffer for traceBufHeader.buf
}
// byte appends v to buf.
func (buf *traceBuf) byte(v byte) {
buf.arr[buf.pos] = v
buf.pos++
}
// varint appends v to buf in little-endian-base-128 encoding.
func (buf *traceBuf) varint(v uint64) {
pos := buf.pos
arr := buf.arr[pos : pos+traceBytesPerNumber]
for i := range arr {
if v < 0x80 {
pos += i + 1
arr[i] = byte(v)
break
}
arr[i] = 0x80 | byte(v)
v >>= 7
}
buf.pos = pos
}
// varintReserve reserves enough space in buf to hold any varint.
//
// Space reserved this way can be filled in with the varintAt method.
func (buf *traceBuf) varintReserve() int {
p := buf.pos
buf.pos += traceBytesPerNumber
return p
}
// stringData appends s's data directly to buf.
func (buf *traceBuf) stringData(s string) {
buf.pos += copy(buf.arr[buf.pos:], s)
}
func (buf *traceBuf) available(size int) bool {
return len(buf.arr)-buf.pos >= size
}
// varintAt writes varint v at byte position pos in buf. This always
// consumes traceBytesPerNumber bytes. This is intended for when the caller
// needs to reserve space for a varint but can't populate it until later.
// Use varintReserve to reserve this space.
func (buf *traceBuf) varintAt(pos int, v uint64) {
for i := 0; i < traceBytesPerNumber; i++ {
if i < traceBytesPerNumber-1 {
buf.arr[pos] = 0x80 | byte(v)
} else {
buf.arr[pos] = byte(v)
}
v >>= 7
pos++
}
if v != 0 {
throw("v could not fit in traceBytesPerNumber")
}
}
// traceBufFlush flushes a trace buffer.
//
// Must run on the system stack because trace.lock must be held.
//
//go:systemstack
func traceBufFlush(buf *traceBuf, gen uintptr) {
assertLockHeld(&trace.lock)
// Write out the non-header length of the batch in the header.
//
// Note: the length of the header is not included to make it easier
// to calculate this value when deserializing and reserializing the
// trace. Varints can have additional padding of zero bits that is
// quite difficult to preserve, and if we include the header we
// force serializers to do more work. Nothing else actually needs
// padding.
buf.varintAt(buf.lenPos, uint64(buf.pos-(buf.lenPos+traceBytesPerNumber)))
trace.full[gen%2].push(buf)
// Notify the scheduler that there's work available and that the trace
// reader should be scheduled.
if !trace.workAvailable.Load() {
trace.workAvailable.Store(true)
}
}