mirror of https://github.com/golang/go.git
cmd/trace: add almost full support for v2 traces in the trace viewer
This change refactors the cmd/trace package and adds most of the support for v2 traces. The following features of note are missing in this CL and will be implemented in follow-up CLs: - The focustask filter for the trace viewer - The taskid filter for the trace viewer - The goid filter for the trace viewer - Pprof profiles - The MMU graph - The goroutine analysis pages - The task analysis pages - The region analysis pages This CL makes one notable change to the trace CLI: it makes the -d flag accept an integer to set the debug mode. For old traces -d != 0 works just like -d. For new traces -d=1 means the high-level events and -d=2 means the low-level events. Thanks to Felix Geisendörfer (felix.geisendoerfer@datadoghq.com) for doing a lot of work on this CL; I picked this up from him and got a massive headstart as a result. For #60773. For #63960. Change-Id: I3626e22473227c5980134a85f1bb6a845f567b1b Reviewed-on: https://go-review.googlesource.com/c/go/+/542218 Reviewed-by: Michael Pratt <mpratt@google.com> Auto-Submit: Michael Knyszek <mknyszek@google.com> TryBot-Bypass: Michael Knyszek <mknyszek@google.com>
This commit is contained in:
parent
3548f9a01f
commit
748952f494
|
|
@ -5,10 +5,10 @@
|
|||
package trace
|
||||
|
||||
import (
|
||||
"cmd/internal/traceviewer"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"internal/trace/traceviewer/format"
|
||||
"os"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
|
|
@ -47,7 +47,7 @@ func StartSpan(ctx context.Context, name string) (context.Context, *Span) {
|
|||
return ctx, nil
|
||||
}
|
||||
childSpan := &Span{t: tc.t, name: name, tid: tc.tid, start: time.Now()}
|
||||
tc.t.writeEvent(&traceviewer.Event{
|
||||
tc.t.writeEvent(&format.Event{
|
||||
Name: childSpan.name,
|
||||
Time: float64(childSpan.start.UnixNano()) / float64(time.Microsecond),
|
||||
TID: childSpan.tid,
|
||||
|
|
@ -77,7 +77,7 @@ func Flow(ctx context.Context, from *Span, to *Span) {
|
|||
}
|
||||
|
||||
id := tc.t.getNextFlowID()
|
||||
tc.t.writeEvent(&traceviewer.Event{
|
||||
tc.t.writeEvent(&format.Event{
|
||||
Name: from.name + " -> " + to.name,
|
||||
Category: "flow",
|
||||
ID: id,
|
||||
|
|
@ -85,7 +85,7 @@ func Flow(ctx context.Context, from *Span, to *Span) {
|
|||
Phase: phaseFlowStart,
|
||||
TID: from.tid,
|
||||
})
|
||||
tc.t.writeEvent(&traceviewer.Event{
|
||||
tc.t.writeEvent(&format.Event{
|
||||
Name: from.name + " -> " + to.name,
|
||||
Category: "flow", // TODO(matloob): Add Category to Flow?
|
||||
ID: id,
|
||||
|
|
@ -110,7 +110,7 @@ func (s *Span) Done() {
|
|||
return
|
||||
}
|
||||
s.end = time.Now()
|
||||
s.t.writeEvent(&traceviewer.Event{
|
||||
s.t.writeEvent(&format.Event{
|
||||
Name: s.name,
|
||||
Time: float64(s.end.UnixNano()) / float64(time.Microsecond),
|
||||
TID: s.tid,
|
||||
|
|
@ -125,7 +125,7 @@ type tracer struct {
|
|||
nextFlowID atomic.Uint64
|
||||
}
|
||||
|
||||
func (t *tracer) writeEvent(ev *traceviewer.Event) error {
|
||||
func (t *tracer) writeEvent(ev *format.Event) error {
|
||||
f := <-t.file
|
||||
defer func() { t.file <- f }()
|
||||
var err error
|
||||
|
|
|
|||
|
|
@ -7,10 +7,11 @@ package main
|
|||
import (
|
||||
"bufio"
|
||||
"cmd/internal/browser"
|
||||
cmdv2 "cmd/trace/v2"
|
||||
"flag"
|
||||
"fmt"
|
||||
"html/template"
|
||||
"internal/trace"
|
||||
"internal/trace/traceviewer"
|
||||
"io"
|
||||
"log"
|
||||
"net"
|
||||
|
|
@ -46,7 +47,7 @@ Supported profile types are:
|
|||
Flags:
|
||||
-http=addr: HTTP service address (e.g., ':6060')
|
||||
-pprof=type: print a pprof-like profile instead
|
||||
-d: print debug info such as parsed events
|
||||
-d=int: print debug info such as parsed events (1 for high-level, 2 for low-level)
|
||||
|
||||
Note that while the various profiles available when launching
|
||||
'go tool trace' work on every browser, the trace viewer itself
|
||||
|
|
@ -57,7 +58,7 @@ and is only actively tested on that browser.
|
|||
var (
|
||||
httpFlag = flag.String("http", "localhost:0", "HTTP service address (e.g., ':6060')")
|
||||
pprofFlag = flag.String("pprof", "", "print a pprof-like profile instead")
|
||||
debugFlag = flag.Bool("d", false, "print debug information such as parsed events list")
|
||||
debugFlag = flag.Int("d", 0, "print debug information (1 for basic debug info, 2 for lower-level info)")
|
||||
|
||||
// The binary file name, left here for serveSVGProfile.
|
||||
programBinary string
|
||||
|
|
@ -83,6 +84,13 @@ func main() {
|
|||
flag.Usage()
|
||||
}
|
||||
|
||||
if isTraceV2(traceFile) {
|
||||
if err := cmdv2.Main(traceFile, *httpFlag, *pprofFlag, *debugFlag); err != nil {
|
||||
dief("%s\n", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var pprofFunc func(io.Writer, *http.Request) error
|
||||
switch *pprofFlag {
|
||||
case "net":
|
||||
|
|
@ -115,7 +123,7 @@ func main() {
|
|||
dief("%v\n", err)
|
||||
}
|
||||
|
||||
if *debugFlag {
|
||||
if *debugFlag != 0 {
|
||||
trace.Print(res.Events)
|
||||
os.Exit(0)
|
||||
}
|
||||
|
|
@ -132,12 +140,27 @@ func main() {
|
|||
browser.Open(addr)
|
||||
|
||||
// Start http server.
|
||||
http.HandleFunc("/", httpMain)
|
||||
http.Handle("/", traceviewer.MainHandler(ranges))
|
||||
err = http.Serve(ln, nil)
|
||||
dief("failed to start http server: %v\n", err)
|
||||
}
|
||||
|
||||
var ranges []Range
|
||||
// isTraceV2 returns true if filename holds a v2 trace.
|
||||
func isTraceV2(filename string) bool {
|
||||
file, err := os.Open(filename)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
ver, _, err := trace.ReadVersion(file)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return ver >= 1022
|
||||
}
|
||||
|
||||
var ranges []traceviewer.Range
|
||||
|
||||
var loader struct {
|
||||
once sync.Once
|
||||
|
|
@ -175,209 +198,6 @@ func parseTrace() (trace.ParseResult, error) {
|
|||
return loader.res, loader.err
|
||||
}
|
||||
|
||||
// httpMain serves the starting page.
|
||||
func httpMain(w http.ResponseWriter, r *http.Request) {
|
||||
if err := templMain.Execute(w, ranges); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
var templMain = template.Must(template.New("").Parse(`
|
||||
<html>
|
||||
<style>
|
||||
/* See https://github.com/golang/pkgsite/blob/master/static/shared/typography/typography.css */
|
||||
body {
|
||||
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Helvetica, Arial, sans-serif, 'Apple Color Emoji', 'Segoe UI Emoji';
|
||||
font-size: 1rem;
|
||||
line-height: normal;
|
||||
max-width: 9in;
|
||||
margin: 1em;
|
||||
}
|
||||
h1 { font-size: 1.5rem; }
|
||||
h2 { font-size: 1.375rem; }
|
||||
h1,h2 {
|
||||
font-weight: 600;
|
||||
line-height: 1.25em;
|
||||
word-break: break-word;
|
||||
}
|
||||
p { color: grey85; font-size:85%; }
|
||||
</style>
|
||||
<body>
|
||||
<h1>cmd/trace: the Go trace event viewer</h1>
|
||||
<p>
|
||||
This web server provides various visualizations of an event log gathered during
|
||||
the execution of a Go program that uses the <a href='https://pkg.go.dev/runtime/trace'>runtime/trace</a> package.
|
||||
</p>
|
||||
|
||||
<h2>Event timelines for running goroutines</h2>
|
||||
{{if $}}
|
||||
<p>
|
||||
Large traces are split into multiple sections of equal data size
|
||||
(not duration) to avoid overwhelming the visualizer.
|
||||
</p>
|
||||
<ul>
|
||||
{{range $e := $}}
|
||||
<li><a href="{{$e.URL}}">View trace ({{$e.Name}})</a></li>
|
||||
{{end}}
|
||||
</ul>
|
||||
{{else}}
|
||||
<ul>
|
||||
<li><a href="/trace">View trace</a></li>
|
||||
</ul>
|
||||
{{end}}
|
||||
<p>
|
||||
This view displays a timeline for each of the GOMAXPROCS logical
|
||||
processors, showing which goroutine (if any) was running on that
|
||||
logical processor at each moment.
|
||||
|
||||
Each goroutine has an identifying number (e.g. G123), main function,
|
||||
and color.
|
||||
|
||||
A colored bar represents an uninterrupted span of execution.
|
||||
|
||||
Execution of a goroutine may migrate from one logical processor to another,
|
||||
causing a single colored bar to be horizontally continuous but
|
||||
vertically displaced.
|
||||
</p>
|
||||
<p>
|
||||
Clicking on a span reveals information about it, such as its
|
||||
duration, its causal predecessors and successors, and the stack trace
|
||||
at the final moment when it yielded the logical processor, for example
|
||||
because it made a system call or tried to acquire a mutex.
|
||||
|
||||
Directly underneath each bar, a smaller bar or more commonly a fine
|
||||
vertical line indicates an event occurring during its execution.
|
||||
Some of these are related to garbage collection; most indicate that
|
||||
a goroutine yielded its logical processor but then immediately resumed execution
|
||||
on the same logical processor. Clicking on the event displays the stack trace
|
||||
at the moment it occurred.
|
||||
</p>
|
||||
<p>
|
||||
The causal relationships between spans of goroutine execution
|
||||
can be displayed by clicking the Flow Events button at the top.
|
||||
</p>
|
||||
<p>
|
||||
At the top ("STATS"), there are three additional timelines that
|
||||
display statistical information.
|
||||
|
||||
"Goroutines" is a time series of the count of existing goroutines;
|
||||
clicking on it displays their breakdown by state at that moment:
|
||||
running, runnable, or waiting.
|
||||
|
||||
"Heap" is a time series of the amount of heap memory allocated (in orange)
|
||||
and (in green) the allocation limit at which the next GC cycle will begin.
|
||||
|
||||
"Threads" shows the number of kernel threads in existence: there is
|
||||
always one kernel thread per logical processor, and additional threads
|
||||
are created for calls to non-Go code such as a system call or a
|
||||
function written in C.
|
||||
</p>
|
||||
<p>
|
||||
Above the event trace for the first logical processor are
|
||||
traces for various runtime-internal events.
|
||||
|
||||
The "GC" bar shows when the garbage collector is running, and in which stage.
|
||||
Garbage collection may temporarily affect all the logical processors
|
||||
and the other metrics.
|
||||
|
||||
The "Network", "Timers", and "Syscalls" traces indicate events in
|
||||
the runtime that cause goroutines to wake up.
|
||||
</p>
|
||||
<p>
|
||||
The visualization allows you to navigate events at scales ranging from several
|
||||
seconds to a handful of nanoseconds.
|
||||
|
||||
Consult the documentation for the Chromium <a href='https://www.chromium.org/developers/how-tos/trace-event-profiling-tool/'>Trace Event Profiling Tool<a/>
|
||||
for help navigating the view.
|
||||
</p>
|
||||
|
||||
<ul>
|
||||
<li><a href="/goroutines">Goroutine analysis</a></li>
|
||||
</ul>
|
||||
<p>
|
||||
This view displays information about each set of goroutines that
|
||||
shares the same main function.
|
||||
|
||||
Clicking on a main function shows links to the four types of
|
||||
blocking profile (see below) applied to that subset of goroutines.
|
||||
|
||||
It also shows a table of specific goroutine instances, with various
|
||||
execution statistics and a link to the event timeline for each one.
|
||||
|
||||
The timeline displays only the selected goroutine and any others it
|
||||
interacts with via block/unblock events. (The timeline is
|
||||
goroutine-oriented rather than logical processor-oriented.)
|
||||
</p>
|
||||
|
||||
<h2>Profiles</h2>
|
||||
<p>
|
||||
Each link below displays a global profile in zoomable graph form as
|
||||
produced by <a href='https://go.dev/blog/pprof'>pprof</a>'s "web" command.
|
||||
|
||||
In addition there is a link to download the profile for offline
|
||||
analysis with pprof.
|
||||
|
||||
All four profiles represent causes of delay that prevent a goroutine
|
||||
from running on a logical processor: because it was waiting for the network,
|
||||
for a synchronization operation on a mutex or channel, for a system call,
|
||||
or for a logical processor to become available.
|
||||
</p>
|
||||
<ul>
|
||||
<li><a href="/io">Network blocking profile</a> (<a href="/io?raw=1" download="io.profile">⬇</a>)</li>
|
||||
<li><a href="/block">Synchronization blocking profile</a> (<a href="/block?raw=1" download="block.profile">⬇</a>)</li>
|
||||
<li><a href="/syscall">Syscall blocking profile</a> (<a href="/syscall?raw=1" download="syscall.profile">⬇</a>)</li>
|
||||
<li><a href="/sched">Scheduler latency profile</a> (<a href="/sche?raw=1" download="sched.profile">⬇</a>)</li>
|
||||
</ul>
|
||||
|
||||
<h2>User-defined tasks and regions</h2>
|
||||
<p>
|
||||
The trace API allows a target program to annotate a <a
|
||||
href='https://pkg.go.dev/runtime/trace#Region'>region</a> of code
|
||||
within a goroutine, such as a key function, so that its performance
|
||||
can be analyzed.
|
||||
|
||||
<a href='https://pkg.go.dev/runtime/trace#Log'>Log events</a> may be
|
||||
associated with a region to record progress and relevant values.
|
||||
|
||||
The API also allows annotation of higher-level
|
||||
<a href='https://pkg.go.dev/runtime/trace#Task'>tasks</a>,
|
||||
which may involve work across many goroutines.
|
||||
</p>
|
||||
<p>
|
||||
The links below display, for each region and task, a histogram of its execution times.
|
||||
|
||||
Each histogram bucket contains a sample trace that records the
|
||||
sequence of events such as goroutine creations, log events, and
|
||||
subregion start/end times.
|
||||
|
||||
For each task, you can click through to a logical-processor or
|
||||
goroutine-oriented view showing the tasks and regions on the
|
||||
timeline.
|
||||
|
||||
Such information may help uncover which steps in a region are
|
||||
unexpectedly slow, or reveal relationships between the data values
|
||||
logged in a request and its running time.
|
||||
</p>
|
||||
<ul>
|
||||
<li><a href="/usertasks">User-defined tasks</a></li>
|
||||
<li><a href="/userregions">User-defined regions</a></li>
|
||||
</ul>
|
||||
|
||||
<h2>Garbage collection metrics</h2>
|
||||
<ul>
|
||||
<li><a href="/mmu">Minimum mutator utilization</a></li>
|
||||
</ul>
|
||||
<p>
|
||||
This chart indicates the maximum GC pause time (the largest x value
|
||||
for which y is zero), and more generally, the fraction of time that
|
||||
the processors are available to application goroutines ("mutators"),
|
||||
for any time window of a specified size, in the worst case.
|
||||
</p>
|
||||
</body>
|
||||
</html>
|
||||
`))
|
||||
|
||||
func dief(msg string, args ...any) {
|
||||
fmt.Fprintf(os.Stderr, msg, args...)
|
||||
os.Exit(1)
|
||||
|
|
|
|||
|
|
@ -29,6 +29,7 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"internal/trace"
|
||||
"internal/trace/traceviewer"
|
||||
"log"
|
||||
"math"
|
||||
"net/http"
|
||||
|
|
@ -393,7 +394,7 @@ type linkedUtilWindow struct {
|
|||
|
||||
func newLinkedUtilWindow(ui trace.UtilWindow, window time.Duration) linkedUtilWindow {
|
||||
// Find the range containing this window.
|
||||
var r Range
|
||||
var r traceviewer.Range
|
||||
for _, r = range ranges {
|
||||
if r.EndTime > ui.Time {
|
||||
break
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
|
|
@ -7,9 +7,10 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"cmd/internal/traceviewer"
|
||||
"context"
|
||||
"internal/trace"
|
||||
"internal/trace/traceviewer"
|
||||
"internal/trace/traceviewer/format"
|
||||
"io"
|
||||
rtrace "runtime/trace"
|
||||
"strings"
|
||||
|
|
@ -78,10 +79,10 @@ func TestGoroutineCount(t *testing.T) {
|
|||
|
||||
// Use the default viewerDataTraceConsumer but replace
|
||||
// consumeViewerEvent to intercept the ViewerEvents for testing.
|
||||
c := viewerDataTraceConsumer(io.Discard, 0, 1<<63-1)
|
||||
c.consumeViewerEvent = func(ev *traceviewer.Event, _ bool) {
|
||||
c := traceviewer.ViewerDataTraceConsumer(io.Discard, 0, 1<<63-1)
|
||||
c.ConsumeViewerEvent = func(ev *format.Event, _ bool) {
|
||||
if ev.Name == "Goroutines" {
|
||||
cnt := ev.Arg.(*goroutineCountersArg)
|
||||
cnt := ev.Arg.(*format.GoroutineCountersArg)
|
||||
if cnt.Runnable+cnt.Running > 2 {
|
||||
t.Errorf("goroutine count=%+v; want no more than 2 goroutines in runnable/running state", cnt)
|
||||
}
|
||||
|
|
@ -131,7 +132,7 @@ func TestGoroutineFilter(t *testing.T) {
|
|||
gs: map[uint64]bool{10: true},
|
||||
}
|
||||
|
||||
c := viewerDataTraceConsumer(io.Discard, 0, 1<<63-1)
|
||||
c := traceviewer.ViewerDataTraceConsumer(io.Discard, 0, 1<<63-1)
|
||||
if err := generateTrace(params, c); err != nil {
|
||||
t.Fatalf("generateTrace failed: %v", err)
|
||||
}
|
||||
|
|
@ -163,10 +164,10 @@ func TestPreemptedMarkAssist(t *testing.T) {
|
|||
endTime: int64(1<<63 - 1),
|
||||
}
|
||||
|
||||
c := viewerDataTraceConsumer(io.Discard, 0, 1<<63-1)
|
||||
c := traceviewer.ViewerDataTraceConsumer(io.Discard, 0, 1<<63-1)
|
||||
|
||||
marks := 0
|
||||
c.consumeViewerEvent = func(ev *traceviewer.Event, _ bool) {
|
||||
c.ConsumeViewerEvent = func(ev *format.Event, _ bool) {
|
||||
if strings.Contains(ev.Name, "MARK ASSIST") {
|
||||
marks++
|
||||
}
|
||||
|
|
@ -208,16 +209,16 @@ func TestFoo(t *testing.T) {
|
|||
|
||||
params := &traceParams{
|
||||
parsed: res,
|
||||
mode: modeTaskOriented,
|
||||
mode: traceviewer.ModeTaskOriented,
|
||||
startTime: task.firstTimestamp() - 1,
|
||||
endTime: task.lastTimestamp() + 1,
|
||||
tasks: []*taskDesc{task},
|
||||
}
|
||||
|
||||
c := viewerDataTraceConsumer(io.Discard, 0, 1<<63-1)
|
||||
c := traceviewer.ViewerDataTraceConsumer(io.Discard, 0, 1<<63-1)
|
||||
|
||||
var logBeforeTaskEnd, logAfterTaskEnd bool
|
||||
c.consumeViewerEvent = func(ev *traceviewer.Event, _ bool) {
|
||||
c.ConsumeViewerEvent = func(ev *format.Event, _ bool) {
|
||||
if ev.Name == "log before task ends" {
|
||||
logBeforeTaskEnd = true
|
||||
}
|
||||
|
|
|
|||
|
|
@ -8,9 +8,10 @@ package main
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"cmd/internal/traceviewer"
|
||||
"internal/goexperiment"
|
||||
traceparser "internal/trace"
|
||||
"internal/trace/traceviewer"
|
||||
"internal/trace/traceviewer/format"
|
||||
"io"
|
||||
"runtime"
|
||||
"runtime/trace"
|
||||
|
|
@ -87,10 +88,10 @@ func TestGoroutineInSyscall(t *testing.T) {
|
|||
|
||||
// Check only one thread for the pipe read goroutine is
|
||||
// considered in-syscall.
|
||||
c := viewerDataTraceConsumer(io.Discard, 0, 1<<63-1)
|
||||
c.consumeViewerEvent = func(ev *traceviewer.Event, _ bool) {
|
||||
c := traceviewer.ViewerDataTraceConsumer(io.Discard, 0, 1<<63-1)
|
||||
c.ConsumeViewerEvent = func(ev *format.Event, _ bool) {
|
||||
if ev.Name == "Threads" {
|
||||
arg := ev.Arg.(*threadCountersArg)
|
||||
arg := ev.Arg.(*format.ThreadCountersArg)
|
||||
if arg.InSyscall > 1 {
|
||||
t.Errorf("%d threads in syscall at time %v; want less than 1 thread in syscall", arg.InSyscall, ev.Time)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,249 @@
|
|||
// Copyright 2023 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package trace
|
||||
|
||||
import (
|
||||
"internal/trace"
|
||||
"internal/trace/traceviewer"
|
||||
tracev2 "internal/trace/v2"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// generator is an interface for generating a JSON trace for the trace viewer
|
||||
// from a trace. Each method in this interface is a handler for a kind of event
|
||||
// that is interesting to render in the UI via the JSON trace.
|
||||
type generator interface {
|
||||
// Global parts.
|
||||
Sync() // Notifies the generator of an EventSync event.
|
||||
StackSample(ctx *traceContext, ev *tracev2.Event)
|
||||
GlobalRange(ctx *traceContext, ev *tracev2.Event)
|
||||
GlobalMetric(ctx *traceContext, ev *tracev2.Event)
|
||||
|
||||
// Goroutine parts.
|
||||
GoroutineLabel(ctx *traceContext, ev *tracev2.Event)
|
||||
GoroutineRange(ctx *traceContext, ev *tracev2.Event)
|
||||
GoroutineTransition(ctx *traceContext, ev *tracev2.Event)
|
||||
|
||||
// Proc parts.
|
||||
ProcRange(ctx *traceContext, ev *tracev2.Event)
|
||||
ProcTransition(ctx *traceContext, ev *tracev2.Event)
|
||||
|
||||
// Finish indicates the end of the trace and finalizes generation.
|
||||
Finish(ctx *traceContext, endTime tracev2.Time)
|
||||
}
|
||||
|
||||
// runGenerator produces a trace into ctx by running the generator over the parsed trace.
|
||||
func runGenerator(ctx *traceContext, g generator, parsed *parsedTrace) {
|
||||
for i := range parsed.events {
|
||||
ev := &parsed.events[i]
|
||||
|
||||
switch ev.Kind() {
|
||||
case tracev2.EventSync:
|
||||
g.Sync()
|
||||
case tracev2.EventStackSample:
|
||||
g.StackSample(ctx, ev)
|
||||
case tracev2.EventRangeBegin, tracev2.EventRangeActive, tracev2.EventRangeEnd:
|
||||
r := ev.Range()
|
||||
switch r.Scope.Kind {
|
||||
case tracev2.ResourceGoroutine:
|
||||
g.GoroutineRange(ctx, ev)
|
||||
case tracev2.ResourceProc:
|
||||
g.ProcRange(ctx, ev)
|
||||
case tracev2.ResourceNone:
|
||||
g.GlobalRange(ctx, ev)
|
||||
}
|
||||
case tracev2.EventMetric:
|
||||
g.GlobalMetric(ctx, ev)
|
||||
case tracev2.EventLabel:
|
||||
l := ev.Label()
|
||||
if l.Resource.Kind == tracev2.ResourceGoroutine {
|
||||
g.GoroutineLabel(ctx, ev)
|
||||
}
|
||||
case tracev2.EventStateTransition:
|
||||
switch ev.StateTransition().Resource.Kind {
|
||||
case tracev2.ResourceProc:
|
||||
g.ProcTransition(ctx, ev)
|
||||
case tracev2.ResourceGoroutine:
|
||||
g.GoroutineTransition(ctx, ev)
|
||||
}
|
||||
}
|
||||
}
|
||||
g.Finish(ctx, parsed.events[len(parsed.events)-1].Time())
|
||||
}
|
||||
|
||||
// Building blocks for generators.
|
||||
|
||||
// stackSampleGenerator implements a generic handler for stack sample events.
|
||||
// The provided resource is the resource the stack sample should count against.
|
||||
type stackSampleGenerator[R resource] struct {
|
||||
// getResource is a function to extract a resource ID from a stack sample event.
|
||||
getResource func(*tracev2.Event) R
|
||||
}
|
||||
|
||||
// StackSample implements a stack sample event handler. It expects ev to be one such event.
|
||||
func (g *stackSampleGenerator[R]) StackSample(ctx *traceContext, ev *tracev2.Event) {
|
||||
id := g.getResource(ev)
|
||||
if id == R(noResource) {
|
||||
// We have nowhere to put this in the UI.
|
||||
return
|
||||
}
|
||||
ctx.Instant(traceviewer.InstantEvent{
|
||||
Name: "CPU profile sample",
|
||||
Ts: ctx.elapsed(ev.Time()),
|
||||
Resource: uint64(id),
|
||||
Stack: ctx.Stack(viewerFrames(ev.Stack())),
|
||||
})
|
||||
}
|
||||
|
||||
// globalRangeGenerator implements a generic handler for EventRange* events that pertain
|
||||
// to tracev2.ResourceNone (the global scope).
|
||||
type globalRangeGenerator struct {
|
||||
ranges map[string]activeRange
|
||||
seenSync bool
|
||||
}
|
||||
|
||||
// Sync notifies the generator of an EventSync event.
|
||||
func (g *globalRangeGenerator) Sync() {
|
||||
g.seenSync = true
|
||||
}
|
||||
|
||||
// GlobalRange implements a handler for EventRange* events whose Scope.Kind is ResourceNone.
|
||||
// It expects ev to be one such event.
|
||||
func (g *globalRangeGenerator) GlobalRange(ctx *traceContext, ev *tracev2.Event) {
|
||||
if g.ranges == nil {
|
||||
g.ranges = make(map[string]activeRange)
|
||||
}
|
||||
r := ev.Range()
|
||||
switch ev.Kind() {
|
||||
case tracev2.EventRangeBegin:
|
||||
g.ranges[r.Name] = activeRange{ev.Time(), ev.Stack()}
|
||||
case tracev2.EventRangeActive:
|
||||
// If we've seen a Sync event, then Active events are always redundant.
|
||||
if !g.seenSync {
|
||||
// Otherwise, they extend back to the start of the trace.
|
||||
g.ranges[r.Name] = activeRange{ctx.startTime, ev.Stack()}
|
||||
}
|
||||
case tracev2.EventRangeEnd:
|
||||
// Only emit GC events, because we have nowhere to
|
||||
// put other events.
|
||||
ar := g.ranges[r.Name]
|
||||
if strings.Contains(r.Name, "GC") {
|
||||
ctx.Slice(traceviewer.SliceEvent{
|
||||
Name: r.Name,
|
||||
Ts: ctx.elapsed(ar.time),
|
||||
Dur: ev.Time().Sub(ar.time),
|
||||
Resource: trace.GCP,
|
||||
Stack: ctx.Stack(viewerFrames(ar.stack)),
|
||||
EndStack: ctx.Stack(viewerFrames(ev.Stack())),
|
||||
})
|
||||
}
|
||||
delete(g.ranges, r.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// Finish flushes any outstanding ranges at the end of the trace.
|
||||
func (g *globalRangeGenerator) Finish(ctx *traceContext, endTime tracev2.Time) {
|
||||
for name, ar := range g.ranges {
|
||||
if !strings.Contains(name, "GC") {
|
||||
continue
|
||||
}
|
||||
ctx.Slice(traceviewer.SliceEvent{
|
||||
Name: name,
|
||||
Ts: ctx.elapsed(ar.time),
|
||||
Dur: endTime.Sub(ar.time),
|
||||
Resource: trace.GCP,
|
||||
Stack: ctx.Stack(viewerFrames(ar.stack)),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// globalMetricGenerator implements a generic handler for Metric events.
|
||||
type globalMetricGenerator struct {
|
||||
}
|
||||
|
||||
// GlobalMetric implements an event handler for EventMetric events. ev must be one such event.
|
||||
func (g *globalMetricGenerator) GlobalMetric(ctx *traceContext, ev *tracev2.Event) {
|
||||
m := ev.Metric()
|
||||
switch m.Name {
|
||||
case "/memory/classes/heap/objects:bytes":
|
||||
ctx.HeapAlloc(ctx.elapsed(ev.Time()), m.Value.Uint64())
|
||||
case "/gc/heap/goal:bytes":
|
||||
ctx.HeapGoal(ctx.elapsed(ev.Time()), m.Value.Uint64())
|
||||
case "/sched/gomaxprocs:threads":
|
||||
ctx.Gomaxprocs(m.Value.Uint64())
|
||||
}
|
||||
}
|
||||
|
||||
// procRangeGenerator implements a generic handler for EventRange* events whose Scope.Kind is
|
||||
// ResourceProc.
|
||||
type procRangeGenerator struct {
|
||||
ranges map[tracev2.Range]activeRange
|
||||
seenSync bool
|
||||
}
|
||||
|
||||
// Sync notifies the generator of an EventSync event.
|
||||
func (g *procRangeGenerator) Sync() {
|
||||
g.seenSync = true
|
||||
}
|
||||
|
||||
// ProcRange implements a handler for EventRange* events whose Scope.Kind is ResourceProc.
|
||||
// It expects ev to be one such event.
|
||||
func (g *procRangeGenerator) ProcRange(ctx *traceContext, ev *tracev2.Event) {
|
||||
if g.ranges == nil {
|
||||
g.ranges = make(map[tracev2.Range]activeRange)
|
||||
}
|
||||
r := ev.Range()
|
||||
switch ev.Kind() {
|
||||
case tracev2.EventRangeBegin:
|
||||
g.ranges[r] = activeRange{ev.Time(), ev.Stack()}
|
||||
case tracev2.EventRangeActive:
|
||||
// If we've seen a Sync event, then Active events are always redundant.
|
||||
if !g.seenSync {
|
||||
// Otherwise, they extend back to the start of the trace.
|
||||
g.ranges[r] = activeRange{ctx.startTime, ev.Stack()}
|
||||
}
|
||||
case tracev2.EventRangeEnd:
|
||||
// Emit proc-based ranges.
|
||||
ar := g.ranges[r]
|
||||
ctx.Slice(traceviewer.SliceEvent{
|
||||
Name: r.Name,
|
||||
Ts: ctx.elapsed(ar.time),
|
||||
Dur: ev.Time().Sub(ar.time),
|
||||
Resource: uint64(r.Scope.Proc()),
|
||||
Stack: ctx.Stack(viewerFrames(ar.stack)),
|
||||
EndStack: ctx.Stack(viewerFrames(ev.Stack())),
|
||||
})
|
||||
delete(g.ranges, r)
|
||||
}
|
||||
}
|
||||
|
||||
// Finish flushes any outstanding ranges at the end of the trace.
|
||||
func (g *procRangeGenerator) Finish(ctx *traceContext, endTime tracev2.Time) {
|
||||
for r, ar := range g.ranges {
|
||||
ctx.Slice(traceviewer.SliceEvent{
|
||||
Name: r.Name,
|
||||
Ts: ctx.elapsed(ar.time),
|
||||
Dur: endTime.Sub(ar.time),
|
||||
Resource: uint64(r.Scope.Proc()),
|
||||
Stack: ctx.Stack(viewerFrames(ar.stack)),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// activeRange represents an active EventRange* range.
|
||||
type activeRange struct {
|
||||
time tracev2.Time
|
||||
stack tracev2.Stack
|
||||
}
|
||||
|
||||
// completedRange represents a completed EventRange* range.
|
||||
type completedRange struct {
|
||||
name string
|
||||
startTime tracev2.Time
|
||||
endTime tracev2.Time
|
||||
startStack tracev2.Stack
|
||||
endStack tracev2.Stack
|
||||
arg any
|
||||
}
|
||||
|
|
@ -0,0 +1,362 @@
|
|||
// Copyright 2023 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package trace
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"internal/trace"
|
||||
"internal/trace/traceviewer"
|
||||
"internal/trace/traceviewer/format"
|
||||
tracev2 "internal/trace/v2"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// resource is a generic constraint interface for resource IDs.
|
||||
type resource interface{ tracev2.GoID | tracev2.ProcID }
|
||||
|
||||
// noResource indicates the lack of a resource.
|
||||
const noResource = -1
|
||||
|
||||
// gState represents the trace viewer state of a goroutine in a trace.
|
||||
//
|
||||
// The type parameter on this type is the resource which is used to construct
|
||||
// a timeline of events. e.g. R=ProcID for a proc-oriented view, R=GoID for
|
||||
// a goroutine-oriented view, etc.
|
||||
type gState[R resource] struct {
|
||||
baseName string
|
||||
named bool // Whether baseName has been set.
|
||||
label string // EventLabel extension.
|
||||
isSystemG bool
|
||||
|
||||
executing R // The resource this goroutine is executing on. (Could be itself.)
|
||||
|
||||
// lastStopStack is the stack trace at the point of the last
|
||||
// call to the stop method. This tends to be a more reliable way
|
||||
// of picking up stack traces, since the parser doesn't provide
|
||||
// a stack for every state transition event.
|
||||
lastStopStack tracev2.Stack
|
||||
|
||||
// activeRanges is the set of all active ranges on the goroutine.
|
||||
activeRanges map[string]activeRange
|
||||
|
||||
// completedRanges is a list of ranges that completed since before the
|
||||
// goroutine stopped executing. These are flushed on every stop or block.
|
||||
completedRanges []completedRange
|
||||
|
||||
// startRunning is the most recent event that caused a goroutine to
|
||||
// transition to GoRunning.
|
||||
startRunningTime tracev2.Time
|
||||
|
||||
// startSyscall is the most recent event that caused a goroutine to
|
||||
// transition to GoSyscall.
|
||||
syscall struct {
|
||||
time tracev2.Time
|
||||
stack tracev2.Stack
|
||||
active bool
|
||||
}
|
||||
|
||||
// startBlockReason is the StateTransition.Reason of the most recent
|
||||
// event that caused a gorotuine to transition to GoWaiting.
|
||||
startBlockReason string
|
||||
|
||||
// startCause is the event that allowed this goroutine to start running.
|
||||
// It's used to generate flow events. This is typically something like
|
||||
// an unblock event or a goroutine creation event.
|
||||
//
|
||||
// startCause.resource is the resource on which startCause happened, but is
|
||||
// listed separately because the cause may have happened on a resource that
|
||||
// isn't R (or perhaps on some abstract nebulous resource, like trace.NetpollP).
|
||||
startCause struct {
|
||||
time tracev2.Time
|
||||
name string
|
||||
resource uint64
|
||||
stack tracev2.Stack
|
||||
}
|
||||
}
|
||||
|
||||
// newGState constructs a new goroutine state for the goroutine
|
||||
// identified by the provided ID.
|
||||
func newGState[R resource](goID tracev2.GoID) *gState[R] {
|
||||
return &gState[R]{
|
||||
baseName: fmt.Sprintf("G%d", goID),
|
||||
executing: R(noResource),
|
||||
activeRanges: make(map[string]activeRange),
|
||||
}
|
||||
}
|
||||
|
||||
// augmentName attempts to use stk to augment the name of the goroutine
|
||||
// with stack information. This stack must be related to the goroutine
|
||||
// in some way, but it doesn't really matter which stack.
|
||||
func (gs *gState[R]) augmentName(stk tracev2.Stack) {
|
||||
if gs.named {
|
||||
return
|
||||
}
|
||||
if stk == tracev2.NoStack {
|
||||
return
|
||||
}
|
||||
name := lastFunc(stk)
|
||||
gs.baseName += fmt.Sprintf(" %s", name)
|
||||
gs.named = true
|
||||
gs.isSystemG = trace.IsSystemGoroutine(name)
|
||||
}
|
||||
|
||||
// setLabel adds an additional label to the goroutine's name.
|
||||
func (gs *gState[R]) setLabel(label string) {
|
||||
gs.label = label
|
||||
}
|
||||
|
||||
// name returns a name for the goroutine.
|
||||
func (gs *gState[R]) name() string {
|
||||
name := gs.baseName
|
||||
if gs.label != "" {
|
||||
name += " (" + gs.label + ")"
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
// setStartCause sets the reason a goroutine will be allowed to start soon.
|
||||
// For example, via unblocking or exiting a blocked syscall.
|
||||
func (gs *gState[R]) setStartCause(ts tracev2.Time, name string, resource uint64, stack tracev2.Stack) {
|
||||
gs.startCause.time = ts
|
||||
gs.startCause.name = name
|
||||
gs.startCause.resource = resource
|
||||
gs.startCause.stack = stack
|
||||
}
|
||||
|
||||
// created indicates that this goroutine was just created by the provided creator.
|
||||
func (gs *gState[R]) created(ts tracev2.Time, creator R, stack tracev2.Stack) {
|
||||
if creator == R(noResource) {
|
||||
return
|
||||
}
|
||||
gs.setStartCause(ts, "go", uint64(creator), stack)
|
||||
}
|
||||
|
||||
// start indicates that a goroutine has started running on a proc.
|
||||
func (gs *gState[R]) start(ts tracev2.Time, resource R, ctx *traceContext) {
|
||||
// Set the time for all the active ranges.
|
||||
for name := range gs.activeRanges {
|
||||
gs.activeRanges[name] = activeRange{ts, tracev2.NoStack}
|
||||
}
|
||||
|
||||
if gs.startCause.name != "" {
|
||||
// It has a start cause. Emit a flow event.
|
||||
ctx.Arrow(traceviewer.ArrowEvent{
|
||||
Name: gs.startCause.name,
|
||||
Start: ctx.elapsed(gs.startCause.time),
|
||||
End: ctx.elapsed(ts),
|
||||
FromResource: uint64(gs.startCause.resource),
|
||||
ToResource: uint64(resource),
|
||||
FromStack: ctx.Stack(viewerFrames(gs.startCause.stack)),
|
||||
})
|
||||
gs.startCause.time = 0
|
||||
gs.startCause.name = ""
|
||||
gs.startCause.resource = 0
|
||||
gs.startCause.stack = tracev2.NoStack
|
||||
}
|
||||
gs.executing = resource
|
||||
gs.startRunningTime = ts
|
||||
}
|
||||
|
||||
// syscallBegin indicates that the goroutine entered a syscall on a proc.
|
||||
func (gs *gState[R]) syscallBegin(ts tracev2.Time, resource R, stack tracev2.Stack) {
|
||||
gs.syscall.time = ts
|
||||
gs.syscall.stack = stack
|
||||
gs.syscall.active = true
|
||||
if gs.executing == R(noResource) {
|
||||
gs.executing = resource
|
||||
gs.startRunningTime = ts
|
||||
}
|
||||
}
|
||||
|
||||
// syscallEnd ends the syscall slice, wherever the syscall is at. This is orthogonal
|
||||
// to blockedSyscallEnd -- both must be called when a syscall ends and that syscall
|
||||
// blocked. They're kept separate because syscallEnd indicates the point at which the
|
||||
// goroutine is no longer executing on the resource (e.g. a proc) whereas blockedSyscallEnd
|
||||
// is the point at which the goroutine actually exited the syscall regardless of which
|
||||
// resource that happened on.
|
||||
func (gs *gState[R]) syscallEnd(ts tracev2.Time, blocked bool, ctx *traceContext) {
|
||||
if !gs.syscall.active {
|
||||
return
|
||||
}
|
||||
blockString := "no"
|
||||
if blocked {
|
||||
blockString = "yes"
|
||||
}
|
||||
gs.completedRanges = append(gs.completedRanges, completedRange{
|
||||
name: "syscall",
|
||||
startTime: gs.syscall.time,
|
||||
endTime: ts,
|
||||
startStack: gs.syscall.stack,
|
||||
arg: format.BlockedArg{Blocked: blockString},
|
||||
})
|
||||
gs.syscall.active = false
|
||||
gs.syscall.time = 0
|
||||
gs.syscall.stack = tracev2.NoStack
|
||||
}
|
||||
|
||||
// blockedSyscallEnd indicates the point at which the blocked syscall ended. This is distinct
|
||||
// and orthogonal to syscallEnd; both must be called if the syscall blocked. This sets up an instant
|
||||
// to emit a flow event from, indicating explicitly that this goroutine was unblocked by the system.
|
||||
func (gs *gState[R]) blockedSyscallEnd(ts tracev2.Time, stack tracev2.Stack, ctx *traceContext) {
|
||||
name := "exit blocked syscall"
|
||||
gs.setStartCause(ts, name, trace.SyscallP, stack)
|
||||
|
||||
// Emit an syscall exit instant event for the "Syscall" lane.
|
||||
ctx.Instant(traceviewer.InstantEvent{
|
||||
Name: name,
|
||||
Ts: ctx.elapsed(ts),
|
||||
Resource: trace.SyscallP,
|
||||
Stack: ctx.Stack(viewerFrames(stack)),
|
||||
})
|
||||
}
|
||||
|
||||
// unblock indicates that the goroutine gs represents has been unblocked.
|
||||
func (gs *gState[R]) unblock(ts tracev2.Time, stack tracev2.Stack, resource R, ctx *traceContext) {
|
||||
// Unblocking goroutine.
|
||||
name := "unblock"
|
||||
viewerResource := uint64(resource)
|
||||
if strings.Contains(gs.startBlockReason, "network") {
|
||||
// Emit an unblock instant event for the "Network" lane.
|
||||
ctx.Instant(traceviewer.InstantEvent{
|
||||
Name: name,
|
||||
Ts: ctx.elapsed(ts),
|
||||
Resource: trace.NetpollP,
|
||||
Stack: ctx.Stack(viewerFrames(stack)),
|
||||
})
|
||||
gs.startBlockReason = ""
|
||||
viewerResource = trace.NetpollP
|
||||
}
|
||||
if viewerResource != 0 {
|
||||
gs.setStartCause(ts, name, viewerResource, stack)
|
||||
}
|
||||
}
|
||||
|
||||
// block indicates that the goroutine has stopped executing on a proc -- specifically,
|
||||
// it blocked for some reason.
|
||||
func (gs *gState[R]) block(ts tracev2.Time, stack tracev2.Stack, reason string, ctx *traceContext) {
|
||||
gs.startBlockReason = reason
|
||||
gs.stop(ts, stack, ctx)
|
||||
}
|
||||
|
||||
// stop indicates that the goroutine has stopped executing on a proc.
|
||||
func (gs *gState[R]) stop(ts tracev2.Time, stack tracev2.Stack, ctx *traceContext) {
|
||||
// Emit the execution time slice.
|
||||
var stk int
|
||||
if gs.lastStopStack != tracev2.NoStack {
|
||||
stk = ctx.Stack(viewerFrames(gs.lastStopStack))
|
||||
}
|
||||
// Check invariants.
|
||||
if gs.startRunningTime == 0 {
|
||||
panic("silently broken trace or generator invariant (startRunningTime != 0) not held")
|
||||
}
|
||||
if gs.executing == R(noResource) {
|
||||
panic("non-executing goroutine stopped")
|
||||
}
|
||||
ctx.Slice(traceviewer.SliceEvent{
|
||||
Name: gs.name(),
|
||||
Ts: ctx.elapsed(gs.startRunningTime),
|
||||
Dur: ts.Sub(gs.startRunningTime),
|
||||
Resource: uint64(gs.executing),
|
||||
Stack: stk,
|
||||
})
|
||||
|
||||
// Flush completed ranges.
|
||||
for _, cr := range gs.completedRanges {
|
||||
ctx.Slice(traceviewer.SliceEvent{
|
||||
Name: cr.name,
|
||||
Ts: ctx.elapsed(cr.startTime),
|
||||
Dur: cr.endTime.Sub(cr.startTime),
|
||||
Resource: uint64(gs.executing),
|
||||
Stack: ctx.Stack(viewerFrames(cr.startStack)),
|
||||
EndStack: ctx.Stack(viewerFrames(cr.endStack)),
|
||||
Arg: cr.arg,
|
||||
})
|
||||
}
|
||||
gs.completedRanges = gs.completedRanges[:0]
|
||||
|
||||
// Continue in-progress ranges.
|
||||
for name, r := range gs.activeRanges {
|
||||
// Check invariant.
|
||||
if r.time == 0 {
|
||||
panic("silently broken trace or generator invariant (activeRanges time != 0) not held")
|
||||
}
|
||||
ctx.Slice(traceviewer.SliceEvent{
|
||||
Name: name,
|
||||
Ts: ctx.elapsed(r.time),
|
||||
Dur: ts.Sub(r.time),
|
||||
Resource: uint64(gs.executing),
|
||||
Stack: ctx.Stack(viewerFrames(r.stack)),
|
||||
})
|
||||
}
|
||||
|
||||
// Clear the range info.
|
||||
for name := range gs.activeRanges {
|
||||
gs.activeRanges[name] = activeRange{0, tracev2.NoStack}
|
||||
}
|
||||
|
||||
gs.startRunningTime = 0
|
||||
gs.lastStopStack = stack
|
||||
gs.executing = R(noResource)
|
||||
}
|
||||
|
||||
// finalize writes out any in-progress slices as if the goroutine stopped.
|
||||
// This must only be used once the trace has been fully processed and no
|
||||
// further events will be processed. This method may leave the gState in
|
||||
// an inconsistent state.
|
||||
func (gs *gState[R]) finish(ts tracev2.Time, ctx *traceContext) {
|
||||
if gs.executing != R(noResource) {
|
||||
gs.syscallEnd(ts, false, ctx)
|
||||
gs.stop(ts, tracev2.NoStack, ctx)
|
||||
}
|
||||
}
|
||||
|
||||
// rangeBegin indicates the start of a special range of time.
|
||||
func (gs *gState[R]) rangeBegin(ts tracev2.Time, name string, stack tracev2.Stack) {
|
||||
if gs.executing != R(noResource) {
|
||||
// If we're executing, start the slice from here.
|
||||
gs.activeRanges[name] = activeRange{ts, stack}
|
||||
} else {
|
||||
// If the goroutine isn't executing, there's no place for
|
||||
// us to create a slice from. Wait until it starts executing.
|
||||
gs.activeRanges[name] = activeRange{0, stack}
|
||||
}
|
||||
}
|
||||
|
||||
// rangeActive indicates that a special range of time has been in progress.
|
||||
func (gs *gState[R]) rangeActive(name string) {
|
||||
if gs.executing != R(noResource) {
|
||||
// If we're executing, and the range is active, then start
|
||||
// from wherever the goroutine started running from.
|
||||
gs.activeRanges[name] = activeRange{gs.startRunningTime, tracev2.NoStack}
|
||||
} else {
|
||||
// If the goroutine isn't executing, there's no place for
|
||||
// us to create a slice from. Wait until it starts executing.
|
||||
gs.activeRanges[name] = activeRange{0, tracev2.NoStack}
|
||||
}
|
||||
}
|
||||
|
||||
// rangeEnd indicates the end of a special range of time.
|
||||
func (gs *gState[R]) rangeEnd(ts tracev2.Time, name string, stack tracev2.Stack, ctx *traceContext) {
|
||||
if gs.executing != R(noResource) {
|
||||
r := gs.activeRanges[name]
|
||||
gs.completedRanges = append(gs.completedRanges, completedRange{
|
||||
name: name,
|
||||
startTime: r.time,
|
||||
endTime: ts,
|
||||
startStack: r.stack,
|
||||
endStack: stack,
|
||||
})
|
||||
}
|
||||
delete(gs.activeRanges, name)
|
||||
}
|
||||
|
||||
func lastFunc(s tracev2.Stack) string {
|
||||
var last tracev2.StackFrame
|
||||
s.Frames(func(f tracev2.StackFrame) bool {
|
||||
last = f
|
||||
return true
|
||||
})
|
||||
return last.Func
|
||||
}
|
||||
|
|
@ -0,0 +1,67 @@
|
|||
// Copyright 2023 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package trace
|
||||
|
||||
import (
|
||||
"log"
|
||||
"math"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"internal/trace/traceviewer"
|
||||
tracev2 "internal/trace/v2"
|
||||
)
|
||||
|
||||
func JSONTraceHandler(parsed *parsedTrace) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// Parse start and end options. Both or none must be present.
|
||||
start := int64(0)
|
||||
end := int64(math.MaxInt64)
|
||||
if startStr, endStr := r.FormValue("start"), r.FormValue("end"); startStr != "" && endStr != "" {
|
||||
var err error
|
||||
start, err = strconv.ParseInt(startStr, 10, 64)
|
||||
if err != nil {
|
||||
log.Printf("failed to parse start parameter %q: %v", startStr, err)
|
||||
return
|
||||
}
|
||||
|
||||
end, err = strconv.ParseInt(endStr, 10, 64)
|
||||
if err != nil {
|
||||
log.Printf("failed to parse end parameter %q: %v", endStr, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
c := traceviewer.ViewerDataTraceConsumer(w, start, end)
|
||||
if err := generateTrace(parsed, c); err != nil {
|
||||
log.Printf("failed to generate trace: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// traceContext is a wrapper around a traceviewer.Emitter with some additional
|
||||
// information that's useful to most parts of trace viewer JSON emission.
|
||||
type traceContext struct {
|
||||
*traceviewer.Emitter
|
||||
startTime tracev2.Time
|
||||
}
|
||||
|
||||
// elapsed returns the elapsed time between the trace time and the start time
|
||||
// of the trace.
|
||||
func (ctx *traceContext) elapsed(now tracev2.Time) time.Duration {
|
||||
return now.Sub(ctx.startTime)
|
||||
}
|
||||
|
||||
func generateTrace(parsed *parsedTrace, c traceviewer.TraceConsumer) error {
|
||||
ctx := &traceContext{
|
||||
Emitter: traceviewer.NewEmitter(c, 0, time.Duration(0), time.Duration(math.MaxInt64)),
|
||||
startTime: parsed.events[0].Time(),
|
||||
}
|
||||
defer ctx.Flush()
|
||||
|
||||
runGenerator(ctx, newProcGenerator(), parsed)
|
||||
return nil
|
||||
}
|
||||
|
|
@ -0,0 +1,291 @@
|
|||
// Copyright 2023 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package trace
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
tracev1 "internal/trace"
|
||||
"io"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"internal/trace/traceviewer/format"
|
||||
"internal/trace/v2/raw"
|
||||
)
|
||||
|
||||
func TestJSONTraceHandler(t *testing.T) {
|
||||
testPaths, err := filepath.Glob("./testdata/*.test")
|
||||
if err != nil {
|
||||
t.Fatalf("discovering tests: %v", err)
|
||||
}
|
||||
for _, testPath := range testPaths {
|
||||
t.Run(filepath.Base(testPath), func(t *testing.T) {
|
||||
parsed := getTestTrace(t, testPath)
|
||||
data := recordJSONTraceHandlerResponse(t, parsed)
|
||||
// TODO(mknyszek): Check that there's one at most goroutine per proc at any given time.
|
||||
checkExecutionTimes(t, data)
|
||||
checkPlausibleHeapMetrics(t, data)
|
||||
// TODO(mknyszek): Check for plausible thread and goroutine metrics.
|
||||
checkMetaNamesEmitted(t, data, "process_name", []string{"STATS", "PROCS"})
|
||||
checkMetaNamesEmitted(t, data, "thread_name", []string{"GC", "Network", "Timers", "Syscalls", "Proc 0"})
|
||||
checkProcStartStop(t, data)
|
||||
checkSyscalls(t, data)
|
||||
checkNetworkUnblock(t, data)
|
||||
// TODO(mknyszek): Check for flow events.
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func checkSyscalls(t *testing.T, data format.Data) {
|
||||
data = filterViewerTrace(data,
|
||||
filterEventName("syscall"),
|
||||
filterStackRootFunc("main.blockingSyscall"))
|
||||
if len(data.Events) <= 1 {
|
||||
t.Errorf("got %d events, want > 1", len(data.Events))
|
||||
}
|
||||
data = filterViewerTrace(data, filterBlocked("yes"))
|
||||
if len(data.Events) != 1 {
|
||||
t.Errorf("got %d events, want 1", len(data.Events))
|
||||
}
|
||||
}
|
||||
|
||||
type eventFilterFn func(*format.Event, *format.Data) bool
|
||||
|
||||
func filterEventName(name string) eventFilterFn {
|
||||
return func(e *format.Event, _ *format.Data) bool {
|
||||
return e.Name == name
|
||||
}
|
||||
}
|
||||
|
||||
// filterGoRoutineName returns an event filter that returns true if the event's
|
||||
// goroutine name is equal to name.
|
||||
func filterGoRoutineName(name string) eventFilterFn {
|
||||
return func(e *format.Event, _ *format.Data) bool {
|
||||
return parseGoroutineName(e) == name
|
||||
}
|
||||
}
|
||||
|
||||
// parseGoroutineName returns the goroutine name from the event's name field.
|
||||
// E.g. if e.Name is "G42 main.cpu10", this returns "main.cpu10".
|
||||
func parseGoroutineName(e *format.Event) string {
|
||||
parts := strings.SplitN(e.Name, " ", 2)
|
||||
if len(parts) != 2 || !strings.HasPrefix(parts[0], "G") {
|
||||
return ""
|
||||
}
|
||||
return parts[1]
|
||||
}
|
||||
|
||||
// filterBlocked returns an event filter that returns true if the event's
|
||||
// "blocked" argument is equal to blocked.
|
||||
func filterBlocked(blocked string) eventFilterFn {
|
||||
return func(e *format.Event, _ *format.Data) bool {
|
||||
m, ok := e.Arg.(map[string]any)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return m["blocked"] == blocked
|
||||
}
|
||||
}
|
||||
|
||||
// filterStackRootFunc returns an event filter that returns true if the function
|
||||
// at the root of the stack trace is named name.
|
||||
func filterStackRootFunc(name string) eventFilterFn {
|
||||
return func(e *format.Event, data *format.Data) bool {
|
||||
frames := stackFrames(data, e.Stack)
|
||||
rootFrame := frames[len(frames)-1]
|
||||
return strings.HasPrefix(rootFrame, name+":")
|
||||
}
|
||||
}
|
||||
|
||||
// filterViewerTrace returns a copy of data with only the events that pass all
|
||||
// of the given filters.
|
||||
func filterViewerTrace(data format.Data, fns ...eventFilterFn) (filtered format.Data) {
|
||||
filtered = data
|
||||
filtered.Events = nil
|
||||
for _, e := range data.Events {
|
||||
keep := true
|
||||
for _, fn := range fns {
|
||||
keep = keep && fn(e, &filtered)
|
||||
}
|
||||
if keep {
|
||||
filtered.Events = append(filtered.Events, e)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func stackFrames(data *format.Data, stackID int) (frames []string) {
|
||||
for {
|
||||
frame, ok := data.Frames[strconv.Itoa(stackID)]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
frames = append(frames, frame.Name)
|
||||
stackID = frame.Parent
|
||||
}
|
||||
}
|
||||
|
||||
func checkProcStartStop(t *testing.T, data format.Data) {
|
||||
procStarted := map[uint64]bool{}
|
||||
for _, e := range data.Events {
|
||||
if e.Name == "proc start" {
|
||||
if procStarted[e.TID] == true {
|
||||
t.Errorf("proc started twice: %d", e.TID)
|
||||
}
|
||||
procStarted[e.TID] = true
|
||||
}
|
||||
if e.Name == "proc stop" {
|
||||
if procStarted[e.TID] == false {
|
||||
t.Errorf("proc stopped twice: %d", e.TID)
|
||||
}
|
||||
procStarted[e.TID] = false
|
||||
}
|
||||
}
|
||||
if got, want := len(procStarted), 8; got != want {
|
||||
t.Errorf("wrong number of procs started/stopped got=%d want=%d", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func checkNetworkUnblock(t *testing.T, data format.Data) {
|
||||
count := 0
|
||||
var netBlockEv *format.Event
|
||||
for _, e := range data.Events {
|
||||
if e.TID == tracev1.NetpollP && e.Name == "unblock" && e.Phase == "I" && e.Scope == "t" {
|
||||
count++
|
||||
netBlockEv = e
|
||||
}
|
||||
}
|
||||
if netBlockEv == nil {
|
||||
t.Error("failed to find a network unblock")
|
||||
}
|
||||
if count == 0 || count > 2 {
|
||||
t.Errorf("found too many network block events: want 1 or 2, found %d", count)
|
||||
}
|
||||
// TODO(mknyszek): Check for the flow of this event to some slice event of a goroutine running.
|
||||
}
|
||||
|
||||
func checkExecutionTimes(t *testing.T, data format.Data) {
|
||||
cpu10 := sumExecutionTime(filterViewerTrace(data, filterGoRoutineName("main.cpu10")))
|
||||
cpu20 := sumExecutionTime(filterViewerTrace(data, filterGoRoutineName("main.cpu20")))
|
||||
if cpu10 <= 0 || cpu20 <= 0 || cpu10 >= cpu20 {
|
||||
t.Errorf("bad execution times: cpu10=%v, cpu20=%v", cpu10, cpu20)
|
||||
}
|
||||
}
|
||||
|
||||
func checkMetaNamesEmitted(t *testing.T, data format.Data, category string, want []string) {
|
||||
t.Helper()
|
||||
names := metaEventNameArgs(category, data)
|
||||
for _, wantName := range want {
|
||||
if !slices.Contains(names, wantName) {
|
||||
t.Errorf("%s: names=%v, want %q", category, names, wantName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func metaEventNameArgs(category string, data format.Data) (names []string) {
|
||||
for _, e := range data.Events {
|
||||
if e.Name == category && e.Phase == "M" {
|
||||
names = append(names, e.Arg.(map[string]any)["name"].(string))
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func checkPlausibleHeapMetrics(t *testing.T, data format.Data) {
|
||||
hms := heapMetrics(data)
|
||||
var nonZeroAllocated, nonZeroNextGC bool
|
||||
for _, hm := range hms {
|
||||
if hm.Allocated > 0 {
|
||||
nonZeroAllocated = true
|
||||
}
|
||||
if hm.NextGC > 0 {
|
||||
nonZeroNextGC = true
|
||||
}
|
||||
}
|
||||
|
||||
if !nonZeroAllocated {
|
||||
t.Errorf("nonZeroAllocated=%v, want true", nonZeroAllocated)
|
||||
}
|
||||
if !nonZeroNextGC {
|
||||
t.Errorf("nonZeroNextGC=%v, want true", nonZeroNextGC)
|
||||
}
|
||||
}
|
||||
|
||||
func heapMetrics(data format.Data) (metrics []format.HeapCountersArg) {
|
||||
for _, e := range data.Events {
|
||||
if e.Phase == "C" && e.Name == "Heap" {
|
||||
j, _ := json.Marshal(e.Arg)
|
||||
var metric format.HeapCountersArg
|
||||
json.Unmarshal(j, &metric)
|
||||
metrics = append(metrics, metric)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func recordJSONTraceHandlerResponse(t *testing.T, parsed *parsedTrace) format.Data {
|
||||
h := JSONTraceHandler(parsed)
|
||||
recorder := httptest.NewRecorder()
|
||||
r := httptest.NewRequest("GET", "/jsontrace", nil)
|
||||
h.ServeHTTP(recorder, r)
|
||||
|
||||
var data format.Data
|
||||
if err := json.Unmarshal(recorder.Body.Bytes(), &data); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return data
|
||||
}
|
||||
|
||||
func sumExecutionTime(data format.Data) (sum time.Duration) {
|
||||
for _, e := range data.Events {
|
||||
sum += time.Duration(e.Dur) * time.Microsecond
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func getTestTrace(t *testing.T, testPath string) *parsedTrace {
|
||||
t.Helper()
|
||||
|
||||
// First read in the text trace and write it out as bytes.
|
||||
f, err := os.Open(testPath)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to open test %s: %v", testPath, err)
|
||||
}
|
||||
r, err := raw.NewTextReader(f)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to read test %s: %v", testPath, err)
|
||||
}
|
||||
var trace bytes.Buffer
|
||||
w, err := raw.NewWriter(&trace, r.Version())
|
||||
if err != nil {
|
||||
t.Fatalf("failed to write out test %s: %v", testPath, err)
|
||||
}
|
||||
for {
|
||||
ev, err := r.ReadEvent()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("failed to read test %s: %v", testPath, err)
|
||||
}
|
||||
if err := w.WriteEvent(ev); err != nil {
|
||||
t.Fatalf("failed to write out test %s: %v", testPath, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Parse the test trace.
|
||||
parsed, err := parseTrace(&trace)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to parse trace: %v", err)
|
||||
}
|
||||
return parsed
|
||||
}
|
||||
|
|
@ -0,0 +1,136 @@
|
|||
// Copyright 2023 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package trace
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"internal/trace/traceviewer"
|
||||
tracev2 "internal/trace/v2"
|
||||
"io"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
"internal/trace/v2/raw"
|
||||
|
||||
"cmd/internal/browser"
|
||||
)
|
||||
|
||||
// Main is the main function for cmd/trace v2.
|
||||
func Main(traceFile, httpAddr, pprof string, debug int) error {
|
||||
tracef, err := os.Open(traceFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read trace file: %w", err)
|
||||
}
|
||||
defer tracef.Close()
|
||||
|
||||
// Debug flags.
|
||||
switch debug {
|
||||
case 1:
|
||||
return debugProcessedEvents(tracef)
|
||||
case 2:
|
||||
return debugRawEvents(tracef)
|
||||
}
|
||||
|
||||
ln, err := net.Listen("tcp", httpAddr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create server socket: %w", err)
|
||||
}
|
||||
|
||||
addr := "http://" + ln.Addr().String()
|
||||
log.Print("Parsing trace...")
|
||||
parsed, err := parseTrace(tracef)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// N.B. tracef not needed after this point.
|
||||
// We might double-close, but that's fine; we ignore the error.
|
||||
tracef.Close()
|
||||
|
||||
log.Print("Splitting trace...")
|
||||
ranges, err := splitTrace(parsed)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Printf("Opening browser. Trace viewer is listening on %s", addr)
|
||||
browser.Open(addr)
|
||||
|
||||
mux := http.NewServeMux()
|
||||
mux.Handle("/", traceviewer.MainHandler(ranges))
|
||||
mux.Handle("/trace", traceviewer.TraceHandler())
|
||||
mux.Handle("/jsontrace", JSONTraceHandler(parsed))
|
||||
mux.Handle("/static/", traceviewer.StaticHandler())
|
||||
|
||||
err = http.Serve(ln, mux)
|
||||
return fmt.Errorf("failed to start http server: %w", err)
|
||||
}
|
||||
|
||||
type parsedTrace struct {
|
||||
events []tracev2.Event
|
||||
}
|
||||
|
||||
func parseTrace(trace io.Reader) (*parsedTrace, error) {
|
||||
r, err := tracev2.NewReader(trace)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create trace reader: %w", err)
|
||||
}
|
||||
var t parsedTrace
|
||||
for {
|
||||
ev, err := r.ReadEvent()
|
||||
if err == io.EOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
return nil, fmt.Errorf("failed to read event: %w", err)
|
||||
}
|
||||
t.events = append(t.events, ev)
|
||||
}
|
||||
return &t, nil
|
||||
}
|
||||
|
||||
// splitTrace splits the trace into a number of ranges, each resulting in approx 100 MiB of
|
||||
// json output (the trace viewer can hardly handle more).
|
||||
func splitTrace(parsed *parsedTrace) ([]traceviewer.Range, error) {
|
||||
// TODO(mknyszek): Split traces by generation by doing a quick first pass over the
|
||||
// trace to identify all the generation boundaries.
|
||||
s, c := traceviewer.SplittingTraceConsumer(100 << 20) // 100 MiB
|
||||
if err := generateTrace(parsed, c); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s.Ranges, nil
|
||||
}
|
||||
|
||||
func debugProcessedEvents(trace io.Reader) error {
|
||||
tr, err := tracev2.NewReader(trace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for {
|
||||
ev, err := tr.ReadEvent()
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Println(ev.String())
|
||||
}
|
||||
}
|
||||
|
||||
func debugRawEvents(trace io.Reader) error {
|
||||
rr, err := raw.NewReader(trace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for {
|
||||
ev, err := rr.ReadEvent()
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Println(ev.String())
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,209 @@
|
|||
// Copyright 2023 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package trace
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"internal/trace/traceviewer"
|
||||
"internal/trace/traceviewer/format"
|
||||
tracev2 "internal/trace/v2"
|
||||
)
|
||||
|
||||
var _ generator = &procGenerator{}
|
||||
|
||||
type procGenerator struct {
|
||||
globalRangeGenerator
|
||||
globalMetricGenerator
|
||||
procRangeGenerator
|
||||
stackSampleGenerator[tracev2.ProcID]
|
||||
|
||||
gStates map[tracev2.GoID]*gState[tracev2.ProcID]
|
||||
inSyscall map[tracev2.ProcID]*gState[tracev2.ProcID]
|
||||
maxProc tracev2.ProcID
|
||||
}
|
||||
|
||||
func newProcGenerator() *procGenerator {
|
||||
pg := new(procGenerator)
|
||||
pg.stackSampleGenerator.getResource = func(ev *tracev2.Event) tracev2.ProcID {
|
||||
return ev.Proc()
|
||||
}
|
||||
pg.gStates = make(map[tracev2.GoID]*gState[tracev2.ProcID])
|
||||
pg.inSyscall = make(map[tracev2.ProcID]*gState[tracev2.ProcID])
|
||||
return pg
|
||||
}
|
||||
|
||||
func (g *procGenerator) Sync() {
|
||||
g.globalRangeGenerator.Sync()
|
||||
g.procRangeGenerator.Sync()
|
||||
}
|
||||
|
||||
func (g *procGenerator) GoroutineLabel(ctx *traceContext, ev *tracev2.Event) {
|
||||
l := ev.Label()
|
||||
g.gStates[l.Resource.Goroutine()].setLabel(l.Label)
|
||||
}
|
||||
|
||||
func (g *procGenerator) GoroutineRange(ctx *traceContext, ev *tracev2.Event) {
|
||||
r := ev.Range()
|
||||
switch ev.Kind() {
|
||||
case tracev2.EventRangeBegin:
|
||||
g.gStates[r.Scope.Goroutine()].rangeBegin(ev.Time(), r.Name, ev.Stack())
|
||||
case tracev2.EventRangeActive:
|
||||
g.gStates[r.Scope.Goroutine()].rangeActive(r.Name)
|
||||
case tracev2.EventRangeEnd:
|
||||
gs := g.gStates[r.Scope.Goroutine()]
|
||||
gs.rangeEnd(ev.Time(), r.Name, ev.Stack(), ctx)
|
||||
}
|
||||
}
|
||||
|
||||
func (g *procGenerator) GoroutineTransition(ctx *traceContext, ev *tracev2.Event) {
|
||||
st := ev.StateTransition()
|
||||
goID := st.Resource.Goroutine()
|
||||
|
||||
// If we haven't seen this goroutine before, create a new
|
||||
// gState for it.
|
||||
gs, ok := g.gStates[goID]
|
||||
if !ok {
|
||||
gs = newGState[tracev2.ProcID](goID)
|
||||
g.gStates[goID] = gs
|
||||
}
|
||||
// If we haven't already named this goroutine, try to name it.
|
||||
gs.augmentName(st.Stack)
|
||||
|
||||
// Handle the goroutine state transition.
|
||||
from, to := st.Goroutine()
|
||||
if from == to {
|
||||
// Filter out no-op events.
|
||||
return
|
||||
}
|
||||
if from == tracev2.GoRunning && !to.Executing() {
|
||||
if to == tracev2.GoWaiting {
|
||||
// Goroutine started blocking.
|
||||
gs.block(ev.Time(), ev.Stack(), st.Reason, ctx)
|
||||
} else {
|
||||
gs.stop(ev.Time(), ev.Stack(), ctx)
|
||||
}
|
||||
}
|
||||
if !from.Executing() && to == tracev2.GoRunning {
|
||||
start := ev.Time()
|
||||
if from == tracev2.GoUndetermined {
|
||||
// Back-date the event to the start of the trace.
|
||||
start = ctx.startTime
|
||||
}
|
||||
gs.start(start, ev.Proc(), ctx)
|
||||
}
|
||||
|
||||
if from == tracev2.GoWaiting {
|
||||
// Goroutine was unblocked.
|
||||
gs.unblock(ev.Time(), ev.Stack(), ev.Proc(), ctx)
|
||||
}
|
||||
if from == tracev2.GoNotExist && to == tracev2.GoRunnable {
|
||||
// Goroutine was created.
|
||||
gs.created(ev.Time(), ev.Proc(), ev.Stack())
|
||||
}
|
||||
if from == tracev2.GoSyscall && to != tracev2.GoRunning {
|
||||
// Goroutine exited a blocked syscall.
|
||||
gs.blockedSyscallEnd(ev.Time(), ev.Stack(), ctx)
|
||||
}
|
||||
|
||||
// Handle syscalls.
|
||||
if to == tracev2.GoSyscall && ev.Proc() != tracev2.NoProc {
|
||||
start := ev.Time()
|
||||
if from == tracev2.GoUndetermined {
|
||||
// Back-date the event to the start of the trace.
|
||||
start = ctx.startTime
|
||||
}
|
||||
// Write down that we've entered a syscall. Note: we might have no P here
|
||||
// if we're in a cgo callback or this is a transition from GoUndetermined
|
||||
// (i.e. the G has been blocked in a syscall).
|
||||
gs.syscallBegin(start, ev.Proc(), ev.Stack())
|
||||
g.inSyscall[ev.Proc()] = gs
|
||||
}
|
||||
// Check if we're exiting a non-blocking syscall.
|
||||
_, didNotBlock := g.inSyscall[ev.Proc()]
|
||||
if from == tracev2.GoSyscall && didNotBlock {
|
||||
gs.syscallEnd(ev.Time(), false, ctx)
|
||||
delete(g.inSyscall, ev.Proc())
|
||||
}
|
||||
|
||||
// Note down the goroutine transition.
|
||||
_, inMarkAssist := gs.activeRanges["GC mark assist"]
|
||||
ctx.GoroutineTransition(ctx.elapsed(ev.Time()), viewerGState(from, inMarkAssist), viewerGState(to, inMarkAssist))
|
||||
}
|
||||
|
||||
func (g *procGenerator) ProcTransition(ctx *traceContext, ev *tracev2.Event) {
|
||||
st := ev.StateTransition()
|
||||
proc := st.Resource.Proc()
|
||||
|
||||
g.maxProc = max(g.maxProc, proc)
|
||||
viewerEv := traceviewer.InstantEvent{
|
||||
Resource: uint64(proc),
|
||||
Stack: ctx.Stack(viewerFrames(ev.Stack())),
|
||||
}
|
||||
|
||||
from, to := st.Proc()
|
||||
if from == to {
|
||||
// Filter out no-op events.
|
||||
return
|
||||
}
|
||||
if to.Executing() {
|
||||
start := ev.Time()
|
||||
if from == tracev2.ProcUndetermined {
|
||||
start = ctx.startTime
|
||||
}
|
||||
viewerEv.Name = "proc start"
|
||||
viewerEv.Arg = format.ThreadIDArg{ThreadID: uint64(ev.Thread())}
|
||||
viewerEv.Ts = ctx.elapsed(start)
|
||||
ctx.IncThreadStateCount(ctx.elapsed(start), traceviewer.ThreadStateRunning, 1)
|
||||
}
|
||||
if from.Executing() {
|
||||
start := ev.Time()
|
||||
viewerEv.Name = "proc stop"
|
||||
viewerEv.Ts = ctx.elapsed(start)
|
||||
ctx.IncThreadStateCount(ctx.elapsed(start), traceviewer.ThreadStateRunning, -1)
|
||||
|
||||
// Check if this proc was in a syscall before it stopped.
|
||||
// This means the syscall blocked. We need to emit it to the
|
||||
// viewer at this point because we only display the time the
|
||||
// syscall occupied a P when the viewer is in per-P mode.
|
||||
//
|
||||
// TODO(mknyszek): We could do better in a per-M mode because
|
||||
// all events have to happen on *some* thread, and in v2 traces
|
||||
// we know what that thread is.
|
||||
gs, ok := g.inSyscall[proc]
|
||||
if ok {
|
||||
// Emit syscall slice for blocked syscall.
|
||||
gs.syscallEnd(start, true, ctx)
|
||||
gs.stop(start, ev.Stack(), ctx)
|
||||
delete(g.inSyscall, proc)
|
||||
}
|
||||
}
|
||||
// TODO(mknyszek): Consider modeling procs differently and have them be
|
||||
// transition to and from NotExist when GOMAXPROCS changes. We can emit
|
||||
// events for this to clearly delineate GOMAXPROCS changes.
|
||||
|
||||
if viewerEv.Name != "" {
|
||||
ctx.Instant(viewerEv)
|
||||
}
|
||||
}
|
||||
|
||||
func (g *procGenerator) Finish(ctx *traceContext, endTime tracev2.Time) {
|
||||
ctx.SetResourceType("PROCS")
|
||||
|
||||
// Finish off ranges first. It doesn't really matter for the global ranges,
|
||||
// but the proc ranges need to either be a subset of a goroutine slice or
|
||||
// their own slice entirely. If the former, it needs to end first.
|
||||
g.procRangeGenerator.Finish(ctx, endTime)
|
||||
g.globalRangeGenerator.Finish(ctx, endTime)
|
||||
|
||||
// Finish off all the goroutine slices.
|
||||
for _, gs := range g.gStates {
|
||||
gs.finish(endTime, ctx)
|
||||
}
|
||||
|
||||
// Name all the procs to the emitter.
|
||||
for i := uint64(0); i <= uint64(g.maxProc); i++ {
|
||||
ctx.Resource(i, fmt.Sprintf("Proc %v", i))
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,6 @@
|
|||
// Copyright 2023 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:generate go run mktests.go
|
||||
package testdata
|
||||
File diff suppressed because it is too large
Load Diff
|
|
@ -0,0 +1,60 @@
|
|||
// Copyright 2023 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"internal/trace/v2/raw"
|
||||
"internal/trace/v2/version"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Create command.
|
||||
var trace, stderr bytes.Buffer
|
||||
cmd := exec.Command("go", "run", "./testprog/main.go")
|
||||
// TODO(mknyszek): Remove if goexperiment.Exectracer2 becomes the default.
|
||||
cmd.Env = append(os.Environ(), "GOEXPERIMENT=exectracer2")
|
||||
cmd.Stdout = &trace
|
||||
cmd.Stderr = &stderr
|
||||
|
||||
// Run trace program; the trace will appear in stdout.
|
||||
fmt.Fprintln(os.Stderr, "running trace program...")
|
||||
if err := cmd.Run(); err != nil {
|
||||
log.Fatalf("running trace program: %v:\n%s", err, stderr.String())
|
||||
}
|
||||
|
||||
// Create file.
|
||||
f, err := os.Create(fmt.Sprintf("./go1%d.test", version.Current))
|
||||
if err != nil {
|
||||
log.Fatalf("creating output file: %v", err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
// Write out the trace.
|
||||
r, err := raw.NewReader(&trace)
|
||||
if err != nil {
|
||||
log.Fatalf("reading trace: %v", err)
|
||||
}
|
||||
w, err := raw.NewTextWriter(f, version.Current)
|
||||
for {
|
||||
ev, err := r.ReadEvent()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatalf("reading trace: %v", err)
|
||||
}
|
||||
if err := w.WriteEvent(ev); err != nil {
|
||||
log.Fatalf("writing trace: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,129 @@
|
|||
// Copyright 2023 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"os"
|
||||
"runtime"
|
||||
"runtime/trace"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
func main() {
|
||||
if err := trace.Start(os.Stdout); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// checkExecutionTimes relies on this.
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(2)
|
||||
go cpu10(&wg)
|
||||
go cpu20(&wg)
|
||||
wg.Wait()
|
||||
|
||||
// checkHeapMetrics relies on this.
|
||||
allocHog(25 * time.Millisecond)
|
||||
|
||||
// checkProcStartStop relies on this.
|
||||
var wg2 sync.WaitGroup
|
||||
for i := 0; i < runtime.GOMAXPROCS(0); i++ {
|
||||
wg2.Add(1)
|
||||
go func() {
|
||||
defer wg2.Done()
|
||||
cpuHog(50 * time.Millisecond)
|
||||
}()
|
||||
}
|
||||
wg2.Wait()
|
||||
|
||||
// checkSyscalls relies on this.
|
||||
done := make(chan error)
|
||||
go blockingSyscall(50*time.Millisecond, done)
|
||||
if err := <-done; err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// checkNetworkUnblock relies on this.
|
||||
ln, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
log.Fatalf("listen failed: %v", err)
|
||||
}
|
||||
defer ln.Close()
|
||||
go func() {
|
||||
c, err := ln.Accept()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
time.Sleep(time.Millisecond)
|
||||
var buf [1]byte
|
||||
c.Write(buf[:])
|
||||
c.Close()
|
||||
}()
|
||||
c, err := net.Dial("tcp", ln.Addr().String())
|
||||
if err != nil {
|
||||
log.Fatalf("dial failed: %v", err)
|
||||
}
|
||||
var tmp [1]byte
|
||||
c.Read(tmp[:])
|
||||
c.Close()
|
||||
|
||||
trace.Stop()
|
||||
}
|
||||
|
||||
// blockingSyscall blocks the current goroutine for duration d in a syscall and
|
||||
// sends a message to done when it is done or if the syscall failed.
|
||||
func blockingSyscall(d time.Duration, done chan<- error) {
|
||||
r, w, err := os.Pipe()
|
||||
if err != nil {
|
||||
done <- err
|
||||
return
|
||||
}
|
||||
start := time.Now()
|
||||
msg := []byte("hello")
|
||||
time.AfterFunc(d, func() { w.Write(msg) })
|
||||
_, err = syscall.Read(int(r.Fd()), make([]byte, len(msg)))
|
||||
if err == nil && time.Since(start) < d {
|
||||
err = fmt.Errorf("syscall returned too early: want=%s got=%s", d, time.Since(start))
|
||||
}
|
||||
done <- err
|
||||
}
|
||||
|
||||
func cpu10(wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
cpuHog(10 * time.Millisecond)
|
||||
}
|
||||
|
||||
func cpu20(wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
cpuHog(20 * time.Millisecond)
|
||||
}
|
||||
|
||||
func cpuHog(dt time.Duration) {
|
||||
start := time.Now()
|
||||
for i := 0; ; i++ {
|
||||
if i%1000 == 0 && time.Since(start) > dt {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func allocHog(dt time.Duration) {
|
||||
start := time.Now()
|
||||
var s [][]byte
|
||||
for i := 0; ; i++ {
|
||||
if i%1000 == 0 {
|
||||
if time.Since(start) > dt {
|
||||
return
|
||||
}
|
||||
// Take a break... this will generate a ton of events otherwise.
|
||||
time.Sleep(50 * time.Microsecond)
|
||||
}
|
||||
s = append(s, make([]byte, 1024))
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,56 @@
|
|||
// Copyright 2023 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package trace
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"internal/trace"
|
||||
"internal/trace/traceviewer"
|
||||
tracev2 "internal/trace/v2"
|
||||
"time"
|
||||
)
|
||||
|
||||
// viewerFrames returns the frames of the stack of ev. The given frame slice is
|
||||
// used to store the frames to reduce allocations.
|
||||
func viewerFrames(stk tracev2.Stack) []*trace.Frame {
|
||||
var frames []*trace.Frame
|
||||
stk.Frames(func(f tracev2.StackFrame) bool {
|
||||
frames = append(frames, &trace.Frame{
|
||||
PC: f.PC,
|
||||
Fn: f.Func,
|
||||
File: f.File,
|
||||
Line: int(f.Line),
|
||||
})
|
||||
return true
|
||||
})
|
||||
return frames
|
||||
}
|
||||
|
||||
func viewerGState(state tracev2.GoState, inMarkAssist bool) traceviewer.GState {
|
||||
switch state {
|
||||
case tracev2.GoUndetermined:
|
||||
return traceviewer.GDead
|
||||
case tracev2.GoNotExist:
|
||||
return traceviewer.GDead
|
||||
case tracev2.GoRunnable:
|
||||
return traceviewer.GRunnable
|
||||
case tracev2.GoRunning:
|
||||
return traceviewer.GRunning
|
||||
case tracev2.GoWaiting:
|
||||
if inMarkAssist {
|
||||
return traceviewer.GWaitingGC
|
||||
}
|
||||
return traceviewer.GWaiting
|
||||
case tracev2.GoSyscall:
|
||||
// N.B. A goroutine in a syscall is considered "executing" (state.Executing() == true).
|
||||
return traceviewer.GRunning
|
||||
default:
|
||||
panic(fmt.Sprintf("unknown GoState: %s", state.String()))
|
||||
}
|
||||
}
|
||||
|
||||
func viewerTime(t time.Duration) float64 {
|
||||
return float64(t) / float64(time.Microsecond)
|
||||
}
|
||||
|
|
@ -45,6 +45,7 @@ var depsRules = `
|
|||
internal/cpu, internal/goarch, internal/godebugs,
|
||||
internal/goexperiment, internal/goos,
|
||||
internal/goversion, internal/nettrace, internal/platform,
|
||||
internal/trace/traceviewer/format,
|
||||
log/internal,
|
||||
unicode/utf8, unicode/utf16, unicode,
|
||||
unsafe;
|
||||
|
|
@ -633,6 +634,16 @@ var depsRules = `
|
|||
FMT, container/heap, math/rand, internal/trace/v2
|
||||
< internal/trace;
|
||||
|
||||
# cmd/trace dependencies.
|
||||
FMT,
|
||||
embed,
|
||||
encoding/json,
|
||||
html/template,
|
||||
internal/trace,
|
||||
internal/trace/traceviewer/format,
|
||||
net/http
|
||||
< internal/trace/traceviewer;
|
||||
|
||||
# Coverage.
|
||||
FMT, crypto/md5, encoding/binary, regexp, sort, text/tabwriter, unsafe,
|
||||
internal/coverage, internal/coverage/uleb128
|
||||
|
|
|
|||
|
|
@ -136,17 +136,23 @@ type rawEvent struct {
|
|||
sargs []string
|
||||
}
|
||||
|
||||
// readTrace does wire-format parsing and verification.
|
||||
// It does not care about specific event types and argument meaning.
|
||||
func readTrace(r io.Reader) (ver int, events []rawEvent, strings map[uint64]string, err error) {
|
||||
func ReadVersion(r io.Reader) (ver int, off int, err error) {
|
||||
// Read and validate trace header.
|
||||
var buf [16]byte
|
||||
off, err := io.ReadFull(r, buf[:])
|
||||
off, err = io.ReadFull(r, buf[:])
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to read header: read %v, err %v", off, err)
|
||||
return
|
||||
}
|
||||
ver, err = parseHeader(buf[:])
|
||||
return
|
||||
}
|
||||
|
||||
// readTrace does wire-format parsing and verification.
|
||||
// It does not care about specific event types and argument meaning.
|
||||
func readTrace(r io.Reader) (ver int, events []rawEvent, strings map[uint64]string, err error) {
|
||||
var off int
|
||||
ver, off, err = ReadVersion(r)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
|
@ -161,6 +167,7 @@ func readTrace(r io.Reader) (ver int, events []rawEvent, strings map[uint64]stri
|
|||
}
|
||||
|
||||
// Read events.
|
||||
var buf [16]byte
|
||||
strings = make(map[uint64]string)
|
||||
for {
|
||||
// Read event type and number of arguments (1 byte).
|
||||
|
|
|
|||
|
|
@ -0,0 +1,666 @@
|
|||
// Copyright 2023 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package traceviewer
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"internal/trace"
|
||||
"internal/trace/traceviewer/format"
|
||||
"io"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
type TraceConsumer struct {
|
||||
ConsumeTimeUnit func(unit string)
|
||||
ConsumeViewerEvent func(v *format.Event, required bool)
|
||||
ConsumeViewerFrame func(key string, f format.Frame)
|
||||
Flush func()
|
||||
}
|
||||
|
||||
// ViewerDataTraceConsumer returns a TraceConsumer that writes to w. The
|
||||
// startIdx and endIdx are used for splitting large traces. They refer to
|
||||
// indexes in the traceEvents output array, not the events in the trace input.
|
||||
func ViewerDataTraceConsumer(w io.Writer, startIdx, endIdx int64) TraceConsumer {
|
||||
allFrames := make(map[string]format.Frame)
|
||||
requiredFrames := make(map[string]format.Frame)
|
||||
enc := json.NewEncoder(w)
|
||||
written := 0
|
||||
index := int64(-1)
|
||||
|
||||
io.WriteString(w, "{")
|
||||
return TraceConsumer{
|
||||
ConsumeTimeUnit: func(unit string) {
|
||||
io.WriteString(w, `"displayTimeUnit":`)
|
||||
enc.Encode(unit)
|
||||
io.WriteString(w, ",")
|
||||
},
|
||||
ConsumeViewerEvent: func(v *format.Event, required bool) {
|
||||
index++
|
||||
if !required && (index < startIdx || index > endIdx) {
|
||||
// not in the range. Skip!
|
||||
return
|
||||
}
|
||||
WalkStackFrames(allFrames, v.Stack, func(id int) {
|
||||
s := strconv.Itoa(id)
|
||||
requiredFrames[s] = allFrames[s]
|
||||
})
|
||||
WalkStackFrames(allFrames, v.EndStack, func(id int) {
|
||||
s := strconv.Itoa(id)
|
||||
requiredFrames[s] = allFrames[s]
|
||||
})
|
||||
if written == 0 {
|
||||
io.WriteString(w, `"traceEvents": [`)
|
||||
}
|
||||
if written > 0 {
|
||||
io.WriteString(w, ",")
|
||||
}
|
||||
enc.Encode(v)
|
||||
// TODO(mknyszek): get rid of the extra \n inserted by enc.Encode.
|
||||
// Same should be applied to splittingTraceConsumer.
|
||||
written++
|
||||
},
|
||||
ConsumeViewerFrame: func(k string, v format.Frame) {
|
||||
allFrames[k] = v
|
||||
},
|
||||
Flush: func() {
|
||||
io.WriteString(w, `], "stackFrames":`)
|
||||
enc.Encode(requiredFrames)
|
||||
io.WriteString(w, `}`)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func SplittingTraceConsumer(max int) (*splitter, TraceConsumer) {
|
||||
type eventSz struct {
|
||||
Time float64
|
||||
Sz int
|
||||
Frames []int
|
||||
}
|
||||
|
||||
var (
|
||||
// data.Frames contains only the frames for required events.
|
||||
data = format.Data{Frames: make(map[string]format.Frame)}
|
||||
|
||||
allFrames = make(map[string]format.Frame)
|
||||
|
||||
sizes []eventSz
|
||||
cw countingWriter
|
||||
)
|
||||
|
||||
s := new(splitter)
|
||||
|
||||
return s, TraceConsumer{
|
||||
ConsumeTimeUnit: func(unit string) {
|
||||
data.TimeUnit = unit
|
||||
},
|
||||
ConsumeViewerEvent: func(v *format.Event, required bool) {
|
||||
if required {
|
||||
// Store required events inside data so flush
|
||||
// can include them in the required part of the
|
||||
// trace.
|
||||
data.Events = append(data.Events, v)
|
||||
WalkStackFrames(allFrames, v.Stack, func(id int) {
|
||||
s := strconv.Itoa(id)
|
||||
data.Frames[s] = allFrames[s]
|
||||
})
|
||||
WalkStackFrames(allFrames, v.EndStack, func(id int) {
|
||||
s := strconv.Itoa(id)
|
||||
data.Frames[s] = allFrames[s]
|
||||
})
|
||||
return
|
||||
}
|
||||
enc := json.NewEncoder(&cw)
|
||||
enc.Encode(v)
|
||||
size := eventSz{Time: v.Time, Sz: cw.size + 1} // +1 for ",".
|
||||
// Add referenced stack frames. Their size is computed
|
||||
// in flush, where we can dedup across events.
|
||||
WalkStackFrames(allFrames, v.Stack, func(id int) {
|
||||
size.Frames = append(size.Frames, id)
|
||||
})
|
||||
WalkStackFrames(allFrames, v.EndStack, func(id int) {
|
||||
size.Frames = append(size.Frames, id) // This may add duplicates. We'll dedup later.
|
||||
})
|
||||
sizes = append(sizes, size)
|
||||
cw.size = 0
|
||||
},
|
||||
ConsumeViewerFrame: func(k string, v format.Frame) {
|
||||
allFrames[k] = v
|
||||
},
|
||||
Flush: func() {
|
||||
// Calculate size of the mandatory part of the trace.
|
||||
// This includes thread names and stack frames for
|
||||
// required events.
|
||||
cw.size = 0
|
||||
enc := json.NewEncoder(&cw)
|
||||
enc.Encode(data)
|
||||
requiredSize := cw.size
|
||||
|
||||
// Then calculate size of each individual event and
|
||||
// their stack frames, grouping them into ranges. We
|
||||
// only include stack frames relevant to the events in
|
||||
// the range to reduce overhead.
|
||||
|
||||
var (
|
||||
start = 0
|
||||
|
||||
eventsSize = 0
|
||||
|
||||
frames = make(map[string]format.Frame)
|
||||
framesSize = 0
|
||||
)
|
||||
for i, ev := range sizes {
|
||||
eventsSize += ev.Sz
|
||||
|
||||
// Add required stack frames. Note that they
|
||||
// may already be in the map.
|
||||
for _, id := range ev.Frames {
|
||||
s := strconv.Itoa(id)
|
||||
_, ok := frames[s]
|
||||
if ok {
|
||||
continue
|
||||
}
|
||||
f := allFrames[s]
|
||||
frames[s] = f
|
||||
framesSize += stackFrameEncodedSize(uint(id), f)
|
||||
}
|
||||
|
||||
total := requiredSize + framesSize + eventsSize
|
||||
if total < max {
|
||||
continue
|
||||
}
|
||||
|
||||
// Reached max size, commit this range and
|
||||
// start a new range.
|
||||
startTime := time.Duration(sizes[start].Time * 1000)
|
||||
endTime := time.Duration(ev.Time * 1000)
|
||||
s.Ranges = append(s.Ranges, Range{
|
||||
Name: fmt.Sprintf("%v-%v", startTime, endTime),
|
||||
Start: start,
|
||||
End: i + 1,
|
||||
StartTime: int64(startTime),
|
||||
EndTime: int64(endTime),
|
||||
})
|
||||
start = i + 1
|
||||
frames = make(map[string]format.Frame)
|
||||
framesSize = 0
|
||||
eventsSize = 0
|
||||
}
|
||||
if len(s.Ranges) <= 1 {
|
||||
s.Ranges = nil
|
||||
return
|
||||
}
|
||||
|
||||
if end := len(sizes) - 1; start < end {
|
||||
s.Ranges = append(s.Ranges, Range{
|
||||
Name: fmt.Sprintf("%v-%v", time.Duration(sizes[start].Time*1000), time.Duration(sizes[end].Time*1000)),
|
||||
Start: start,
|
||||
End: end,
|
||||
StartTime: int64(sizes[start].Time * 1000),
|
||||
EndTime: int64(sizes[end].Time * 1000),
|
||||
})
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type splitter struct {
|
||||
Ranges []Range
|
||||
}
|
||||
|
||||
type countingWriter struct {
|
||||
size int
|
||||
}
|
||||
|
||||
func (cw *countingWriter) Write(data []byte) (int, error) {
|
||||
cw.size += len(data)
|
||||
return len(data), nil
|
||||
}
|
||||
|
||||
func stackFrameEncodedSize(id uint, f format.Frame) int {
|
||||
// We want to know the marginal size of traceviewer.Data.Frames for
|
||||
// each event. Running full JSON encoding of the map for each event is
|
||||
// far too slow.
|
||||
//
|
||||
// Since the format is fixed, we can easily compute the size without
|
||||
// encoding.
|
||||
//
|
||||
// A single entry looks like one of the following:
|
||||
//
|
||||
// "1":{"name":"main.main:30"},
|
||||
// "10":{"name":"pkg.NewSession:173","parent":9},
|
||||
//
|
||||
// The parent is omitted if 0. The trailing comma is omitted from the
|
||||
// last entry, but we don't need that much precision.
|
||||
const (
|
||||
baseSize = len(`"`) + len(`":{"name":"`) + len(`"},`)
|
||||
|
||||
// Don't count the trailing quote on the name, as that is
|
||||
// counted in baseSize.
|
||||
parentBaseSize = len(`,"parent":`)
|
||||
)
|
||||
|
||||
size := baseSize
|
||||
|
||||
size += len(f.Name)
|
||||
|
||||
// Bytes for id (always positive).
|
||||
for id > 0 {
|
||||
size += 1
|
||||
id /= 10
|
||||
}
|
||||
|
||||
if f.Parent > 0 {
|
||||
size += parentBaseSize
|
||||
// Bytes for parent (always positive).
|
||||
for f.Parent > 0 {
|
||||
size += 1
|
||||
f.Parent /= 10
|
||||
}
|
||||
}
|
||||
|
||||
return size
|
||||
}
|
||||
|
||||
// WalkStackFrames calls fn for id and all of its parent frames from allFrames.
|
||||
func WalkStackFrames(allFrames map[string]format.Frame, id int, fn func(id int)) {
|
||||
for id != 0 {
|
||||
f, ok := allFrames[strconv.Itoa(id)]
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
fn(id)
|
||||
id = f.Parent
|
||||
}
|
||||
}
|
||||
|
||||
type Mode int
|
||||
|
||||
const (
|
||||
ModeGoroutineOriented Mode = 1 << iota
|
||||
ModeTaskOriented
|
||||
)
|
||||
|
||||
// NewEmitter returns a new Emitter that writes to c. The rangeStart and
|
||||
// rangeEnd args are used for splitting large traces.
|
||||
func NewEmitter(c TraceConsumer, mode Mode, rangeStart, rangeEnd time.Duration) *Emitter {
|
||||
c.ConsumeTimeUnit("ns")
|
||||
|
||||
return &Emitter{
|
||||
c: c,
|
||||
mode: mode,
|
||||
rangeStart: rangeStart,
|
||||
rangeEnd: rangeEnd,
|
||||
frameTree: frameNode{children: make(map[uint64]frameNode)},
|
||||
resources: make(map[uint64]string),
|
||||
}
|
||||
}
|
||||
|
||||
type Emitter struct {
|
||||
c TraceConsumer
|
||||
mode Mode
|
||||
rangeStart time.Duration
|
||||
rangeEnd time.Duration
|
||||
|
||||
heapStats, prevHeapStats heapStats
|
||||
gstates, prevGstates [gStateCount]int64
|
||||
threadStats, prevThreadStats [threadStateCount]int64
|
||||
gomaxprocs uint64
|
||||
frameTree frameNode
|
||||
frameSeq int
|
||||
arrowSeq uint64
|
||||
filter func(uint64) bool
|
||||
resourceType string
|
||||
resources map[uint64]string
|
||||
focusResource uint64
|
||||
}
|
||||
|
||||
func (e *Emitter) Gomaxprocs(v uint64) {
|
||||
if v > e.gomaxprocs {
|
||||
e.gomaxprocs = v
|
||||
}
|
||||
}
|
||||
|
||||
func (e *Emitter) Resource(id uint64, name string) {
|
||||
if e.filter != nil && !e.filter(id) {
|
||||
return
|
||||
}
|
||||
e.resources[id] = name
|
||||
}
|
||||
|
||||
func (e *Emitter) SetResourceType(name string) {
|
||||
e.resourceType = name
|
||||
}
|
||||
|
||||
func (e *Emitter) SetResourceFilter(filter func(uint64) bool) {
|
||||
e.filter = filter
|
||||
}
|
||||
|
||||
func (e *Emitter) Slice(s SliceEvent) {
|
||||
if !e.tsWithinRange(s.Ts) && !e.tsWithinRange(s.Ts+s.Dur) {
|
||||
return
|
||||
}
|
||||
if e.filter != nil && !e.filter(s.Resource) {
|
||||
return
|
||||
}
|
||||
e.OptionalEvent(&format.Event{
|
||||
Name: s.Name,
|
||||
Phase: "X",
|
||||
Time: viewerTime(s.Ts),
|
||||
Dur: viewerTime(s.Dur),
|
||||
TID: s.Resource,
|
||||
Stack: s.Stack,
|
||||
EndStack: s.EndStack,
|
||||
Arg: s.Arg,
|
||||
})
|
||||
}
|
||||
|
||||
type SliceEvent struct {
|
||||
Name string
|
||||
Ts time.Duration
|
||||
Dur time.Duration
|
||||
Resource uint64
|
||||
Stack int
|
||||
EndStack int
|
||||
Arg any
|
||||
}
|
||||
|
||||
func (e *Emitter) Instant(i InstantEvent) {
|
||||
if !e.tsWithinRange(i.Ts) {
|
||||
return
|
||||
}
|
||||
if e.filter != nil && !e.filter(i.Resource) {
|
||||
return
|
||||
}
|
||||
// TODO(mknyszek): Handle ModeTaskOriented here. See cmd/trace.(*traceContext).emitInstant.
|
||||
cname := ""
|
||||
e.OptionalEvent(&format.Event{
|
||||
Name: i.Name,
|
||||
Category: i.Category,
|
||||
Phase: "I",
|
||||
Scope: "t",
|
||||
Time: viewerTime(i.Ts),
|
||||
TID: i.Resource,
|
||||
Stack: i.Stack,
|
||||
Cname: cname,
|
||||
Arg: i.Arg,
|
||||
})
|
||||
}
|
||||
|
||||
type InstantEvent struct {
|
||||
Ts time.Duration
|
||||
Name string
|
||||
Category string
|
||||
Resource uint64
|
||||
Stack int
|
||||
Arg any
|
||||
}
|
||||
|
||||
func (e *Emitter) Arrow(a ArrowEvent) {
|
||||
if !e.tsWithinRange(a.Start) || !e.tsWithinRange(a.End) {
|
||||
return
|
||||
}
|
||||
if e.filter != nil && (!e.filter(a.FromResource) || !e.filter(a.ToResource)) {
|
||||
return
|
||||
}
|
||||
// TODO(mknyszek): Handle ModeTaskOriented here. See cmd/trace.(*traceContext).emitArrow.
|
||||
e.arrowSeq++
|
||||
e.OptionalEvent(&format.Event{
|
||||
Name: a.Name,
|
||||
Phase: "s",
|
||||
TID: a.FromResource,
|
||||
ID: e.arrowSeq,
|
||||
Time: viewerTime(a.Start),
|
||||
Stack: a.FromStack,
|
||||
})
|
||||
e.OptionalEvent(&format.Event{
|
||||
Name: a.Name,
|
||||
Phase: "t",
|
||||
TID: a.ToResource,
|
||||
ID: e.arrowSeq,
|
||||
Time: viewerTime(a.End),
|
||||
})
|
||||
}
|
||||
|
||||
type ArrowEvent struct {
|
||||
Name string
|
||||
Start time.Duration
|
||||
End time.Duration
|
||||
FromResource uint64
|
||||
FromStack int
|
||||
ToResource uint64
|
||||
}
|
||||
|
||||
func (e *Emitter) Event(ev *format.Event) {
|
||||
e.c.ConsumeViewerEvent(ev, true)
|
||||
}
|
||||
|
||||
func (e *Emitter) HeapAlloc(ts time.Duration, v uint64) {
|
||||
e.heapStats.heapAlloc = v
|
||||
e.emitHeapCounters(ts)
|
||||
}
|
||||
|
||||
func (e *Emitter) Focus(id uint64) {
|
||||
e.focusResource = id
|
||||
}
|
||||
|
||||
func (e *Emitter) GoroutineTransition(ts time.Duration, from, to GState) {
|
||||
e.gstates[from]--
|
||||
e.gstates[to]++
|
||||
if e.prevGstates == e.gstates {
|
||||
return
|
||||
}
|
||||
if e.tsWithinRange(ts) {
|
||||
e.OptionalEvent(&format.Event{
|
||||
Name: "Goroutines",
|
||||
Phase: "C",
|
||||
Time: viewerTime(ts),
|
||||
PID: 1,
|
||||
Arg: &format.GoroutineCountersArg{
|
||||
Running: uint64(e.gstates[GRunning]),
|
||||
Runnable: uint64(e.gstates[GRunnable]),
|
||||
GCWaiting: uint64(e.gstates[GWaitingGC]),
|
||||
},
|
||||
})
|
||||
}
|
||||
e.prevGstates = e.gstates
|
||||
}
|
||||
|
||||
func (e *Emitter) IncThreadStateCount(ts time.Duration, state ThreadState, delta int64) {
|
||||
e.threadStats[state] += delta
|
||||
if e.prevThreadStats == e.threadStats {
|
||||
return
|
||||
}
|
||||
if e.tsWithinRange(ts) {
|
||||
e.OptionalEvent(&format.Event{
|
||||
Name: "Threads",
|
||||
Phase: "C",
|
||||
Time: viewerTime(ts),
|
||||
PID: 1,
|
||||
Arg: &format.ThreadCountersArg{
|
||||
Running: int64(e.threadStats[ThreadStateRunning]),
|
||||
InSyscall: int64(e.threadStats[ThreadStateInSyscall]),
|
||||
// TODO(mknyszek): Why is InSyscallRuntime not included here?
|
||||
},
|
||||
})
|
||||
}
|
||||
e.prevThreadStats = e.threadStats
|
||||
}
|
||||
|
||||
func (e *Emitter) HeapGoal(ts time.Duration, v uint64) {
|
||||
// This cutoff at 1 PiB is a Workaround for https://github.com/golang/go/issues/63864.
|
||||
//
|
||||
// TODO(mknyszek): Remove this once the problem has been fixed.
|
||||
const PB = 1 << 50
|
||||
if v > PB {
|
||||
v = 0
|
||||
}
|
||||
e.heapStats.nextGC = v
|
||||
e.emitHeapCounters(ts)
|
||||
}
|
||||
|
||||
func (e *Emitter) emitHeapCounters(ts time.Duration) {
|
||||
if e.prevHeapStats == e.heapStats {
|
||||
return
|
||||
}
|
||||
diff := uint64(0)
|
||||
if e.heapStats.nextGC > e.heapStats.heapAlloc {
|
||||
diff = e.heapStats.nextGC - e.heapStats.heapAlloc
|
||||
}
|
||||
if e.tsWithinRange(ts) {
|
||||
e.OptionalEvent(&format.Event{
|
||||
Name: "Heap",
|
||||
Phase: "C",
|
||||
Time: viewerTime(ts),
|
||||
PID: 1,
|
||||
Arg: &format.HeapCountersArg{Allocated: e.heapStats.heapAlloc, NextGC: diff},
|
||||
})
|
||||
}
|
||||
e.prevHeapStats = e.heapStats
|
||||
}
|
||||
|
||||
// Err returns an error if the emitter is in an invalid state.
|
||||
func (e *Emitter) Err() error {
|
||||
if e.gstates[GRunnable] < 0 || e.gstates[GRunning] < 0 || e.threadStats[ThreadStateInSyscall] < 0 || e.threadStats[ThreadStateInSyscallRuntime] < 0 {
|
||||
return fmt.Errorf(
|
||||
"runnable=%d running=%d insyscall=%d insyscallRuntime=%d",
|
||||
e.gstates[GRunnable],
|
||||
e.gstates[GRunning],
|
||||
e.threadStats[ThreadStateInSyscall],
|
||||
e.threadStats[ThreadStateInSyscallRuntime],
|
||||
)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *Emitter) tsWithinRange(ts time.Duration) bool {
|
||||
return e.rangeStart <= ts && ts <= e.rangeEnd
|
||||
}
|
||||
|
||||
// OptionalEvent emits ev if it's within the time range of of the consumer, i.e.
|
||||
// the selected trace split range.
|
||||
func (e *Emitter) OptionalEvent(ev *format.Event) {
|
||||
e.c.ConsumeViewerEvent(ev, false)
|
||||
}
|
||||
|
||||
func (e *Emitter) Flush() {
|
||||
e.processMeta(format.StatsSection, "STATS", 0)
|
||||
if e.mode&ModeTaskOriented != 0 {
|
||||
e.processMeta(format.TasksSection, "TASKS", 1)
|
||||
}
|
||||
|
||||
e.processMeta(format.ProcsSection, e.resourceType, 2)
|
||||
|
||||
e.threadMeta(format.ProcsSection, trace.GCP, "GC", -6)
|
||||
e.threadMeta(format.ProcsSection, trace.NetpollP, "Network", -5)
|
||||
e.threadMeta(format.ProcsSection, trace.TimerP, "Timers", -4)
|
||||
e.threadMeta(format.ProcsSection, trace.SyscallP, "Syscalls", -3)
|
||||
|
||||
for id, name := range e.resources {
|
||||
priority := int(id)
|
||||
if e.focusResource != 0 && id == e.focusResource {
|
||||
// Put the focus goroutine on top.
|
||||
priority = -2
|
||||
}
|
||||
e.threadMeta(format.ProcsSection, id, name, priority)
|
||||
}
|
||||
|
||||
e.c.Flush()
|
||||
}
|
||||
|
||||
func (e *Emitter) threadMeta(sectionID, tid uint64, name string, priority int) {
|
||||
e.Event(&format.Event{
|
||||
Name: "thread_name",
|
||||
Phase: "M",
|
||||
PID: sectionID,
|
||||
TID: tid,
|
||||
Arg: &format.NameArg{Name: name},
|
||||
})
|
||||
e.Event(&format.Event{
|
||||
Name: "thread_sort_index",
|
||||
Phase: "M",
|
||||
PID: sectionID,
|
||||
TID: tid,
|
||||
Arg: &format.SortIndexArg{Index: priority},
|
||||
})
|
||||
}
|
||||
|
||||
func (e *Emitter) processMeta(sectionID uint64, name string, priority int) {
|
||||
e.Event(&format.Event{
|
||||
Name: "process_name",
|
||||
Phase: "M",
|
||||
PID: sectionID,
|
||||
Arg: &format.NameArg{Name: name},
|
||||
})
|
||||
e.Event(&format.Event{
|
||||
Name: "process_sort_index",
|
||||
Phase: "M",
|
||||
PID: sectionID,
|
||||
Arg: &format.SortIndexArg{Index: priority},
|
||||
})
|
||||
}
|
||||
|
||||
// Stack emits the given frames and returns a unique id for the stack. No
|
||||
// pointers to the given data are being retained beyond the call to Stack.
|
||||
func (e *Emitter) Stack(stk []*trace.Frame) int {
|
||||
return e.buildBranch(e.frameTree, stk)
|
||||
}
|
||||
|
||||
// buildBranch builds one branch in the prefix tree rooted at ctx.frameTree.
|
||||
func (e *Emitter) buildBranch(parent frameNode, stk []*trace.Frame) int {
|
||||
if len(stk) == 0 {
|
||||
return parent.id
|
||||
}
|
||||
last := len(stk) - 1
|
||||
frame := stk[last]
|
||||
stk = stk[:last]
|
||||
|
||||
node, ok := parent.children[frame.PC]
|
||||
if !ok {
|
||||
e.frameSeq++
|
||||
node.id = e.frameSeq
|
||||
node.children = make(map[uint64]frameNode)
|
||||
parent.children[frame.PC] = node
|
||||
e.c.ConsumeViewerFrame(strconv.Itoa(node.id), format.Frame{Name: fmt.Sprintf("%v:%v", frame.Fn, frame.Line), Parent: parent.id})
|
||||
}
|
||||
return e.buildBranch(node, stk)
|
||||
}
|
||||
|
||||
type heapStats struct {
|
||||
heapAlloc uint64
|
||||
nextGC uint64
|
||||
}
|
||||
|
||||
func viewerTime(t time.Duration) float64 {
|
||||
return float64(t) / float64(time.Microsecond)
|
||||
}
|
||||
|
||||
type GState int
|
||||
|
||||
const (
|
||||
GDead GState = iota
|
||||
GRunnable
|
||||
GRunning
|
||||
GWaiting
|
||||
GWaitingGC
|
||||
|
||||
gStateCount
|
||||
)
|
||||
|
||||
type ThreadState int
|
||||
|
||||
const (
|
||||
ThreadStateInSyscall ThreadState = iota
|
||||
ThreadStateInSyscallRuntime
|
||||
ThreadStateRunning
|
||||
|
||||
threadStateCount
|
||||
)
|
||||
|
||||
type frameNode struct {
|
||||
id int
|
||||
children map[uint64]frameNode
|
||||
}
|
||||
|
|
@ -7,7 +7,10 @@
|
|||
//
|
||||
// The official description of the format is in this file:
|
||||
// https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU/preview
|
||||
package traceviewer
|
||||
//
|
||||
// Note: This can't be part of the parent traceviewer package as that would
|
||||
// throw. go_bootstrap cannot depend on the cgo version of package net in ./make.bash.
|
||||
package format
|
||||
|
||||
type Data struct {
|
||||
Events []*Event `json:"traceEvents"`
|
||||
|
|
@ -36,3 +39,41 @@ type Frame struct {
|
|||
Name string `json:"name"`
|
||||
Parent int `json:"parent,omitempty"`
|
||||
}
|
||||
|
||||
type NameArg struct {
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
type BlockedArg struct {
|
||||
Blocked string `json:"blocked"`
|
||||
}
|
||||
|
||||
type SortIndexArg struct {
|
||||
Index int `json:"sort_index"`
|
||||
}
|
||||
|
||||
type HeapCountersArg struct {
|
||||
Allocated uint64
|
||||
NextGC uint64
|
||||
}
|
||||
|
||||
const (
|
||||
ProcsSection = 0 // where Goroutines or per-P timelines are presented.
|
||||
StatsSection = 1 // where counters are presented.
|
||||
TasksSection = 2 // where Task hierarchy & timeline is presented.
|
||||
)
|
||||
|
||||
type GoroutineCountersArg struct {
|
||||
Running uint64
|
||||
Runnable uint64
|
||||
GCWaiting uint64
|
||||
}
|
||||
|
||||
type ThreadCountersArg struct {
|
||||
Running int64
|
||||
InSyscall int64
|
||||
}
|
||||
|
||||
type ThreadIDArg struct {
|
||||
ThreadID uint64
|
||||
}
|
||||
|
|
@ -0,0 +1,375 @@
|
|||
// Copyright 2023 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package traceviewer
|
||||
|
||||
import (
|
||||
"embed"
|
||||
"fmt"
|
||||
"html/template"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func MainHandler(ranges []Range) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
|
||||
if err := templMain.Execute(w, ranges); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
var templMain = template.Must(template.New("").Parse(`
|
||||
<html>
|
||||
<style>
|
||||
/* See https://github.com/golang/pkgsite/blob/master/static/shared/typography/typography.css */
|
||||
body {
|
||||
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Helvetica, Arial, sans-serif, 'Apple Color Emoji', 'Segoe UI Emoji';
|
||||
font-size: 1rem;
|
||||
line-height: normal;
|
||||
max-width: 9in;
|
||||
margin: 1em;
|
||||
}
|
||||
h1 { font-size: 1.5rem; }
|
||||
h2 { font-size: 1.375rem; }
|
||||
h1,h2 {
|
||||
font-weight: 600;
|
||||
line-height: 1.25em;
|
||||
word-break: break-word;
|
||||
}
|
||||
p { color: grey85; font-size:85%; }
|
||||
</style>
|
||||
<body>
|
||||
<h1>cmd/trace: the Go trace event viewer</h1>
|
||||
<p>
|
||||
This web server provides various visualizations of an event log gathered during
|
||||
the execution of a Go program that uses the <a href='https://pkg.go.dev/runtime/trace'>runtime/trace</a> package.
|
||||
</p>
|
||||
|
||||
<h2>Event timelines for running goroutines</h2>
|
||||
{{if $}}
|
||||
<p>
|
||||
Large traces are split into multiple sections of equal data size
|
||||
(not duration) to avoid overwhelming the visualizer.
|
||||
</p>
|
||||
<ul>
|
||||
{{range $e := $}}
|
||||
<li><a href="{{$e.URL}}">View trace ({{$e.Name}})</a></li>
|
||||
{{end}}
|
||||
</ul>
|
||||
{{else}}
|
||||
<ul>
|
||||
<li><a href="/trace">View trace</a></li>
|
||||
</ul>
|
||||
{{end}}
|
||||
<p>
|
||||
This view displays a timeline for each of the GOMAXPROCS logical
|
||||
processors, showing which goroutine (if any) was running on that
|
||||
logical processor at each moment.
|
||||
|
||||
Each goroutine has an identifying number (e.g. G123), main function,
|
||||
and color.
|
||||
|
||||
A colored bar represents an uninterrupted span of execution.
|
||||
|
||||
Execution of a goroutine may migrate from one logical processor to another,
|
||||
causing a single colored bar to be horizontally continuous but
|
||||
vertically displaced.
|
||||
</p>
|
||||
<p>
|
||||
Clicking on a span reveals information about it, such as its
|
||||
duration, its causal predecessors and successors, and the stack trace
|
||||
at the final moment when it yielded the logical processor, for example
|
||||
because it made a system call or tried to acquire a mutex.
|
||||
|
||||
Directly underneath each bar, a smaller bar or more commonly a fine
|
||||
vertical line indicates an event occurring during its execution.
|
||||
Some of these are related to garbage collection; most indicate that
|
||||
a goroutine yielded its logical processor but then immediately resumed execution
|
||||
on the same logical processor. Clicking on the event displays the stack trace
|
||||
at the moment it occurred.
|
||||
</p>
|
||||
<p>
|
||||
The causal relationships between spans of goroutine execution
|
||||
can be displayed by clicking the Flow Events button at the top.
|
||||
</p>
|
||||
<p>
|
||||
At the top ("STATS"), there are three additional timelines that
|
||||
display statistical information.
|
||||
|
||||
"Goroutines" is a time series of the count of existing goroutines;
|
||||
clicking on it displays their breakdown by state at that moment:
|
||||
running, runnable, or waiting.
|
||||
|
||||
"Heap" is a time series of the amount of heap memory allocated (in orange)
|
||||
and (in green) the allocation limit at which the next GC cycle will begin.
|
||||
|
||||
"Threads" shows the number of kernel threads in existence: there is
|
||||
always one kernel thread per logical processor, and additional threads
|
||||
are created for calls to non-Go code such as a system call or a
|
||||
function written in C.
|
||||
</p>
|
||||
<p>
|
||||
Above the event trace for the first logical processor are
|
||||
traces for various runtime-internal events.
|
||||
|
||||
The "GC" bar shows when the garbage collector is running, and in which stage.
|
||||
Garbage collection may temporarily affect all the logical processors
|
||||
and the other metrics.
|
||||
|
||||
The "Network", "Timers", and "Syscalls" traces indicate events in
|
||||
the runtime that cause goroutines to wake up.
|
||||
</p>
|
||||
<p>
|
||||
The visualization allows you to navigate events at scales ranging from several
|
||||
seconds to a handful of nanoseconds.
|
||||
|
||||
Consult the documentation for the Chromium <a href='https://www.chromium.org/developers/how-tos/trace-event-profiling-tool/'>Trace Event Profiling Tool<a/>
|
||||
for help navigating the view.
|
||||
</p>
|
||||
|
||||
<ul>
|
||||
<li><a href="/goroutines">Goroutine analysis</a></li>
|
||||
</ul>
|
||||
<p>
|
||||
This view displays information about each set of goroutines that
|
||||
shares the same main function.
|
||||
|
||||
Clicking on a main function shows links to the four types of
|
||||
blocking profile (see below) applied to that subset of goroutines.
|
||||
|
||||
It also shows a table of specific goroutine instances, with various
|
||||
execution statistics and a link to the event timeline for each one.
|
||||
|
||||
The timeline displays only the selected goroutine and any others it
|
||||
interacts with via block/unblock events. (The timeline is
|
||||
goroutine-oriented rather than logical processor-oriented.)
|
||||
</p>
|
||||
|
||||
<h2>Profiles</h2>
|
||||
<p>
|
||||
Each link below displays a global profile in zoomable graph form as
|
||||
produced by <a href='https://go.dev/blog/pprof'>pprof</a>'s "web" command.
|
||||
|
||||
In addition there is a link to download the profile for offline
|
||||
analysis with pprof.
|
||||
|
||||
All four profiles represent causes of delay that prevent a goroutine
|
||||
from running on a logical processor: because it was waiting for the network,
|
||||
for a synchronization operation on a mutex or channel, for a system call,
|
||||
or for a logical processor to become available.
|
||||
</p>
|
||||
<ul>
|
||||
<li><a href="/io">Network blocking profile</a> (<a href="/io?raw=1" download="io.profile">⬇</a>)</li>
|
||||
<li><a href="/block">Synchronization blocking profile</a> (<a href="/block?raw=1" download="block.profile">⬇</a>)</li>
|
||||
<li><a href="/syscall">Syscall blocking profile</a> (<a href="/syscall?raw=1" download="syscall.profile">⬇</a>)</li>
|
||||
<li><a href="/sched">Scheduler latency profile</a> (<a href="/sched?raw=1" download="sched.profile">⬇</a>)</li>
|
||||
</ul>
|
||||
|
||||
<h2>User-defined tasks and regions</h2>
|
||||
<p>
|
||||
The trace API allows a target program to annotate a <a
|
||||
href='https://pkg.go.dev/runtime/trace#Region'>region</a> of code
|
||||
within a goroutine, such as a key function, so that its performance
|
||||
can be analyzed.
|
||||
|
||||
<a href='https://pkg.go.dev/runtime/trace#Log'>Log events</a> may be
|
||||
associated with a region to record progress and relevant values.
|
||||
|
||||
The API also allows annotation of higher-level
|
||||
<a href='https://pkg.go.dev/runtime/trace#Task'>tasks</a>,
|
||||
which may involve work across many goroutines.
|
||||
</p>
|
||||
<p>
|
||||
The links below display, for each region and task, a histogram of its execution times.
|
||||
|
||||
Each histogram bucket contains a sample trace that records the
|
||||
sequence of events such as goroutine creations, log events, and
|
||||
subregion start/end times.
|
||||
|
||||
For each task, you can click through to a logical-processor or
|
||||
goroutine-oriented view showing the tasks and regions on the
|
||||
timeline.
|
||||
|
||||
Such information may help uncover which steps in a region are
|
||||
unexpectedly slow, or reveal relationships between the data values
|
||||
logged in a request and its running time.
|
||||
</p>
|
||||
<ul>
|
||||
<li><a href="/usertasks">User-defined tasks</a></li>
|
||||
<li><a href="/userregions">User-defined regions</a></li>
|
||||
</ul>
|
||||
|
||||
<h2>Garbage collection metrics</h2>
|
||||
<ul>
|
||||
<li><a href="/mmu">Minimum mutator utilization</a></li>
|
||||
</ul>
|
||||
<p>
|
||||
This chart indicates the maximum GC pause time (the largest x value
|
||||
for which y is zero), and more generally, the fraction of time that
|
||||
the processors are available to application goroutines ("mutators"),
|
||||
for any time window of a specified size, in the worst case.
|
||||
</p>
|
||||
</body>
|
||||
</html>
|
||||
`))
|
||||
|
||||
type Range struct {
|
||||
Name string
|
||||
Start int
|
||||
End int
|
||||
StartTime int64
|
||||
EndTime int64
|
||||
}
|
||||
|
||||
func (r Range) URL() string {
|
||||
return fmt.Sprintf("/trace?start=%d&end=%d", r.Start, r.End)
|
||||
}
|
||||
|
||||
func TraceHandler() http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if err := r.ParseForm(); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
html := strings.ReplaceAll(templTrace, "{{PARAMS}}", r.Form.Encode())
|
||||
w.Write([]byte(html))
|
||||
})
|
||||
}
|
||||
|
||||
// https://chromium.googlesource.com/catapult/+/9508452e18f130c98499cb4c4f1e1efaedee8962/tracing/docs/embedding-trace-viewer.md
|
||||
// This is almost verbatim copy of https://chromium-review.googlesource.com/c/catapult/+/2062938/2/tracing/bin/index.html
|
||||
var templTrace = `
|
||||
<html>
|
||||
<head>
|
||||
<script src="/static/webcomponents.min.js"></script>
|
||||
<script>
|
||||
'use strict';
|
||||
|
||||
function onTraceViewerImportFail() {
|
||||
document.addEventListener('DOMContentLoaded', function() {
|
||||
document.body.textContent =
|
||||
'/static/trace_viewer_full.html is missing. File a bug in https://golang.org/issue';
|
||||
});
|
||||
}
|
||||
</script>
|
||||
|
||||
<link rel="import" href="/static/trace_viewer_full.html"
|
||||
onerror="onTraceViewerImportFail(event)">
|
||||
|
||||
<style type="text/css">
|
||||
html, body {
|
||||
box-sizing: border-box;
|
||||
overflow: hidden;
|
||||
margin: 0px;
|
||||
padding: 0;
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
}
|
||||
#trace-viewer {
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
}
|
||||
#trace-viewer:focus {
|
||||
outline: none;
|
||||
}
|
||||
</style>
|
||||
<script>
|
||||
'use strict';
|
||||
(function() {
|
||||
var viewer;
|
||||
var url;
|
||||
var model;
|
||||
|
||||
function load() {
|
||||
var req = new XMLHttpRequest();
|
||||
var isBinary = /[.]gz$/.test(url) || /[.]zip$/.test(url);
|
||||
req.overrideMimeType('text/plain; charset=x-user-defined');
|
||||
req.open('GET', url, true);
|
||||
if (isBinary)
|
||||
req.responseType = 'arraybuffer';
|
||||
|
||||
req.onreadystatechange = function(event) {
|
||||
if (req.readyState !== 4)
|
||||
return;
|
||||
|
||||
window.setTimeout(function() {
|
||||
if (req.status === 200)
|
||||
onResult(isBinary ? req.response : req.responseText);
|
||||
else
|
||||
onResultFail(req.status);
|
||||
}, 0);
|
||||
};
|
||||
req.send(null);
|
||||
}
|
||||
|
||||
function onResultFail(err) {
|
||||
var overlay = new tr.ui.b.Overlay();
|
||||
overlay.textContent = err + ': ' + url + ' could not be loaded';
|
||||
overlay.title = 'Failed to fetch data';
|
||||
overlay.visible = true;
|
||||
}
|
||||
|
||||
function onResult(result) {
|
||||
model = new tr.Model();
|
||||
var opts = new tr.importer.ImportOptions();
|
||||
opts.shiftWorldToZero = false;
|
||||
var i = new tr.importer.Import(model, opts);
|
||||
var p = i.importTracesWithProgressDialog([result]);
|
||||
p.then(onModelLoaded, onImportFail);
|
||||
}
|
||||
|
||||
function onModelLoaded() {
|
||||
viewer.model = model;
|
||||
viewer.viewTitle = "trace";
|
||||
|
||||
if (!model || model.bounds.isEmpty)
|
||||
return;
|
||||
var sel = window.location.hash.substr(1);
|
||||
if (sel === '')
|
||||
return;
|
||||
var parts = sel.split(':');
|
||||
var range = new (tr.b.Range || tr.b.math.Range)();
|
||||
range.addValue(parseFloat(parts[0]));
|
||||
range.addValue(parseFloat(parts[1]));
|
||||
viewer.trackView.viewport.interestRange.set(range);
|
||||
}
|
||||
|
||||
function onImportFail(err) {
|
||||
var overlay = new tr.ui.b.Overlay();
|
||||
overlay.textContent = tr.b.normalizeException(err).message;
|
||||
overlay.title = 'Import error';
|
||||
overlay.visible = true;
|
||||
}
|
||||
|
||||
document.addEventListener('WebComponentsReady', function() {
|
||||
var container = document.createElement('track-view-container');
|
||||
container.id = 'track_view_container';
|
||||
|
||||
viewer = document.createElement('tr-ui-timeline-view');
|
||||
viewer.track_view_container = container;
|
||||
Polymer.dom(viewer).appendChild(container);
|
||||
|
||||
viewer.id = 'trace-viewer';
|
||||
viewer.globalMode = true;
|
||||
Polymer.dom(document.body).appendChild(viewer);
|
||||
|
||||
url = '/jsontrace?{{PARAMS}}';
|
||||
load();
|
||||
});
|
||||
}());
|
||||
</script>
|
||||
</head>
|
||||
<body>
|
||||
</body>
|
||||
</html>
|
||||
`
|
||||
|
||||
//go:embed static/trace_viewer_full.html static/webcomponents.min.js
|
||||
var staticContent embed.FS
|
||||
|
||||
func StaticHandler() http.Handler {
|
||||
return http.FileServer(http.FS(staticContent))
|
||||
}
|
||||
|
|
@ -16,7 +16,8 @@ import (
|
|||
type Version uint32
|
||||
|
||||
const (
|
||||
Go122 Version = 22
|
||||
Go122 Version = 22
|
||||
Current = Go122
|
||||
)
|
||||
|
||||
var versions = map[Version][]event.Spec{
|
||||
|
|
|
|||
Loading…
Reference in New Issue