1// Copyright 2020 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// Package benchmark provides a Metrics object that enables memory and CPU
6// profiling for the linker. The Metrics objects can be used to mark stages
7// of the code, and name the measurements during that stage. There is also
8// optional GCs that can be performed at the end of each stage, so you
9// can get an accurate measurement of how each stage changes live memory.
10package benchmark
11
12import (
13	"fmt"
14	"io"
15	"os"
16	"runtime"
17	"runtime/pprof"
18	"time"
19	"unicode"
20)
21
22type Flags int
23
24const (
25	GC         = 1 << iota
26	NoGC Flags = 0
27)
28
29type Metrics struct {
30	gc        Flags
31	marks     []*mark
32	curMark   *mark
33	filebase  string
34	pprofFile *os.File
35}
36
37type mark struct {
38	name              string
39	startM, endM, gcM runtime.MemStats
40	startT, endT      time.Time
41}
42
43// New creates a new Metrics object.
44//
45// Typical usage should look like:
46//
47//	func main() {
48//	  filename := "" // Set to enable per-phase pprof file output.
49//	  bench := benchmark.New(benchmark.GC, filename)
50//	  defer bench.Report(os.Stdout)
51//	  // etc
52//	  bench.Start("foo")
53//	  foo()
54//	  bench.Start("bar")
55//	  bar()
56//	}
57//
58// Note that a nil Metrics object won't cause any errors, so one could write
59// code like:
60//
61//	func main() {
62//	  enableBenchmarking := flag.Bool("enable", true, "enables benchmarking")
63//	  flag.Parse()
64//	  var bench *benchmark.Metrics
65//	  if *enableBenchmarking {
66//	    bench = benchmark.New(benchmark.GC)
67//	  }
68//	  bench.Start("foo")
69//	  // etc.
70//	}
71func New(gc Flags, filebase string) *Metrics {
72	if gc == GC {
73		runtime.GC()
74	}
75	return &Metrics{gc: gc, filebase: filebase}
76}
77
78// Report reports the metrics.
79// Closes the currently Start(ed) range, and writes the report to the given io.Writer.
80func (m *Metrics) Report(w io.Writer) {
81	if m == nil {
82		return
83	}
84
85	m.closeMark()
86
87	gcString := ""
88	if m.gc == GC {
89		gcString = "_GC"
90	}
91
92	var totTime time.Duration
93	for _, curMark := range m.marks {
94		dur := curMark.endT.Sub(curMark.startT)
95		totTime += dur
96		fmt.Fprintf(w, "%s 1 %d ns/op", makeBenchString(curMark.name+gcString), dur.Nanoseconds())
97		fmt.Fprintf(w, "\t%d B/op", curMark.endM.TotalAlloc-curMark.startM.TotalAlloc)
98		fmt.Fprintf(w, "\t%d allocs/op", curMark.endM.Mallocs-curMark.startM.Mallocs)
99		if m.gc == GC {
100			fmt.Fprintf(w, "\t%d live-B", curMark.gcM.HeapAlloc)
101		} else {
102			fmt.Fprintf(w, "\t%d heap-B", curMark.endM.HeapAlloc)
103		}
104		fmt.Fprintf(w, "\n")
105	}
106	fmt.Fprintf(w, "%s 1 %d ns/op\n", makeBenchString("total time"+gcString), totTime.Nanoseconds())
107}
108
109// Start marks the beginning of a new measurement phase.
110// Once a metric is started, it continues until either a Report is issued, or another Start is called.
111func (m *Metrics) Start(name string) {
112	if m == nil {
113		return
114	}
115	m.closeMark()
116	m.curMark = &mark{name: name}
117	// Unlikely we need to a GC here, as one was likely just done in closeMark.
118	if m.shouldPProf() {
119		f, err := os.Create(makePProfFilename(m.filebase, name, "cpuprof"))
120		if err != nil {
121			panic(err)
122		}
123		m.pprofFile = f
124		if err = pprof.StartCPUProfile(m.pprofFile); err != nil {
125			panic(err)
126		}
127	}
128	runtime.ReadMemStats(&m.curMark.startM)
129	m.curMark.startT = time.Now()
130}
131
132func (m *Metrics) closeMark() {
133	if m == nil || m.curMark == nil {
134		return
135	}
136	m.curMark.endT = time.Now()
137	if m.shouldPProf() {
138		pprof.StopCPUProfile()
139		m.pprofFile.Close()
140		m.pprofFile = nil
141	}
142	runtime.ReadMemStats(&m.curMark.endM)
143	if m.gc == GC {
144		runtime.GC()
145		runtime.ReadMemStats(&m.curMark.gcM)
146		if m.shouldPProf() {
147			// Collect a profile of the live heap. Do a
148			// second GC to force sweep completion so we
149			// get a complete snapshot of the live heap at
150			// the end of this phase.
151			runtime.GC()
152			f, err := os.Create(makePProfFilename(m.filebase, m.curMark.name, "memprof"))
153			if err != nil {
154				panic(err)
155			}
156			err = pprof.WriteHeapProfile(f)
157			if err != nil {
158				panic(err)
159			}
160			err = f.Close()
161			if err != nil {
162				panic(err)
163			}
164		}
165	}
166	m.marks = append(m.marks, m.curMark)
167	m.curMark = nil
168}
169
170// shouldPProf returns true if we should be doing pprof runs.
171func (m *Metrics) shouldPProf() bool {
172	return m != nil && len(m.filebase) > 0
173}
174
175// makeBenchString makes a benchmark string consumable by Go's benchmarking tools.
176func makeBenchString(name string) string {
177	needCap := true
178	ret := []rune("Benchmark")
179	for _, r := range name {
180		if unicode.IsSpace(r) {
181			needCap = true
182			continue
183		}
184		if needCap {
185			r = unicode.ToUpper(r)
186			needCap = false
187		}
188		ret = append(ret, r)
189	}
190	return string(ret)
191}
192
193func makePProfFilename(filebase, name, typ string) string {
194	return fmt.Sprintf("%s_%s.%s", filebase, makeBenchString(name), typ)
195}
196