1// Copyright 2020 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package runtime
6
7// Metrics implementation exported to runtime/metrics.
8
9import (
10	"internal/godebugs"
11	"unsafe"
12)
13
14var (
15	// metrics is a map of runtime/metrics keys to data used by the runtime
16	// to sample each metric's value. metricsInit indicates it has been
17	// initialized.
18	//
19	// These fields are protected by metricsSema which should be
20	// locked/unlocked with metricsLock() / metricsUnlock().
21	metricsSema uint32 = 1
22	metricsInit bool
23	metrics     map[string]metricData
24
25	sizeClassBuckets []float64
26	timeHistBuckets  []float64
27)
28
29type metricData struct {
30	// deps is the set of runtime statistics that this metric
31	// depends on. Before compute is called, the statAggregate
32	// which will be passed must ensure() these dependencies.
33	deps statDepSet
34
35	// compute is a function that populates a metricValue
36	// given a populated statAggregate structure.
37	compute func(in *statAggregate, out *metricValue)
38}
39
40func metricsLock() {
41	// Acquire the metricsSema but with handoff. Operations are typically
42	// expensive enough that queueing up goroutines and handing off between
43	// them will be noticeably better-behaved.
44	semacquire1(&metricsSema, true, 0, 0, waitReasonSemacquire)
45	if raceenabled {
46		raceacquire(unsafe.Pointer(&metricsSema))
47	}
48}
49
50func metricsUnlock() {
51	if raceenabled {
52		racerelease(unsafe.Pointer(&metricsSema))
53	}
54	semrelease(&metricsSema)
55}
56
57// initMetrics initializes the metrics map if it hasn't been yet.
58//
59// metricsSema must be held.
60func initMetrics() {
61	if metricsInit {
62		return
63	}
64
65	sizeClassBuckets = make([]float64, _NumSizeClasses, _NumSizeClasses+1)
66	// Skip size class 0 which is a stand-in for large objects, but large
67	// objects are tracked separately (and they actually get placed in
68	// the last bucket, not the first).
69	sizeClassBuckets[0] = 1 // The smallest allocation is 1 byte in size.
70	for i := 1; i < _NumSizeClasses; i++ {
71		// Size classes have an inclusive upper-bound
72		// and exclusive lower bound (e.g. 48-byte size class is
73		// (32, 48]) whereas we want and inclusive lower-bound
74		// and exclusive upper-bound (e.g. 48-byte size class is
75		// [33, 49)). We can achieve this by shifting all bucket
76		// boundaries up by 1.
77		//
78		// Also, a float64 can precisely represent integers with
79		// value up to 2^53 and size classes are relatively small
80		// (nowhere near 2^48 even) so this will give us exact
81		// boundaries.
82		sizeClassBuckets[i] = float64(class_to_size[i] + 1)
83	}
84	sizeClassBuckets = append(sizeClassBuckets, float64Inf())
85
86	timeHistBuckets = timeHistogramMetricsBuckets()
87	metrics = map[string]metricData{
88		"/cgo/go-to-c-calls:calls": {
89			compute: func(_ *statAggregate, out *metricValue) {
90				out.kind = metricKindUint64
91				out.scalar = uint64(NumCgoCall())
92			},
93		},
94		"/cpu/classes/gc/mark/assist:cpu-seconds": {
95			deps: makeStatDepSet(cpuStatsDep),
96			compute: func(in *statAggregate, out *metricValue) {
97				out.kind = metricKindFloat64
98				out.scalar = float64bits(nsToSec(in.cpuStats.GCAssistTime))
99			},
100		},
101		"/cpu/classes/gc/mark/dedicated:cpu-seconds": {
102			deps: makeStatDepSet(cpuStatsDep),
103			compute: func(in *statAggregate, out *metricValue) {
104				out.kind = metricKindFloat64
105				out.scalar = float64bits(nsToSec(in.cpuStats.GCDedicatedTime))
106			},
107		},
108		"/cpu/classes/gc/mark/idle:cpu-seconds": {
109			deps: makeStatDepSet(cpuStatsDep),
110			compute: func(in *statAggregate, out *metricValue) {
111				out.kind = metricKindFloat64
112				out.scalar = float64bits(nsToSec(in.cpuStats.GCIdleTime))
113			},
114		},
115		"/cpu/classes/gc/pause:cpu-seconds": {
116			deps: makeStatDepSet(cpuStatsDep),
117			compute: func(in *statAggregate, out *metricValue) {
118				out.kind = metricKindFloat64
119				out.scalar = float64bits(nsToSec(in.cpuStats.GCPauseTime))
120			},
121		},
122		"/cpu/classes/gc/total:cpu-seconds": {
123			deps: makeStatDepSet(cpuStatsDep),
124			compute: func(in *statAggregate, out *metricValue) {
125				out.kind = metricKindFloat64
126				out.scalar = float64bits(nsToSec(in.cpuStats.GCTotalTime))
127			},
128		},
129		"/cpu/classes/idle:cpu-seconds": {
130			deps: makeStatDepSet(cpuStatsDep),
131			compute: func(in *statAggregate, out *metricValue) {
132				out.kind = metricKindFloat64
133				out.scalar = float64bits(nsToSec(in.cpuStats.IdleTime))
134			},
135		},
136		"/cpu/classes/scavenge/assist:cpu-seconds": {
137			deps: makeStatDepSet(cpuStatsDep),
138			compute: func(in *statAggregate, out *metricValue) {
139				out.kind = metricKindFloat64
140				out.scalar = float64bits(nsToSec(in.cpuStats.ScavengeAssistTime))
141			},
142		},
143		"/cpu/classes/scavenge/background:cpu-seconds": {
144			deps: makeStatDepSet(cpuStatsDep),
145			compute: func(in *statAggregate, out *metricValue) {
146				out.kind = metricKindFloat64
147				out.scalar = float64bits(nsToSec(in.cpuStats.ScavengeBgTime))
148			},
149		},
150		"/cpu/classes/scavenge/total:cpu-seconds": {
151			deps: makeStatDepSet(cpuStatsDep),
152			compute: func(in *statAggregate, out *metricValue) {
153				out.kind = metricKindFloat64
154				out.scalar = float64bits(nsToSec(in.cpuStats.ScavengeTotalTime))
155			},
156		},
157		"/cpu/classes/total:cpu-seconds": {
158			deps: makeStatDepSet(cpuStatsDep),
159			compute: func(in *statAggregate, out *metricValue) {
160				out.kind = metricKindFloat64
161				out.scalar = float64bits(nsToSec(in.cpuStats.TotalTime))
162			},
163		},
164		"/cpu/classes/user:cpu-seconds": {
165			deps: makeStatDepSet(cpuStatsDep),
166			compute: func(in *statAggregate, out *metricValue) {
167				out.kind = metricKindFloat64
168				out.scalar = float64bits(nsToSec(in.cpuStats.UserTime))
169			},
170		},
171		"/gc/cycles/automatic:gc-cycles": {
172			deps: makeStatDepSet(sysStatsDep),
173			compute: func(in *statAggregate, out *metricValue) {
174				out.kind = metricKindUint64
175				out.scalar = in.sysStats.gcCyclesDone - in.sysStats.gcCyclesForced
176			},
177		},
178		"/gc/cycles/forced:gc-cycles": {
179			deps: makeStatDepSet(sysStatsDep),
180			compute: func(in *statAggregate, out *metricValue) {
181				out.kind = metricKindUint64
182				out.scalar = in.sysStats.gcCyclesForced
183			},
184		},
185		"/gc/cycles/total:gc-cycles": {
186			deps: makeStatDepSet(sysStatsDep),
187			compute: func(in *statAggregate, out *metricValue) {
188				out.kind = metricKindUint64
189				out.scalar = in.sysStats.gcCyclesDone
190			},
191		},
192		"/gc/scan/globals:bytes": {
193			deps: makeStatDepSet(gcStatsDep),
194			compute: func(in *statAggregate, out *metricValue) {
195				out.kind = metricKindUint64
196				out.scalar = in.gcStats.globalsScan
197			},
198		},
199		"/gc/scan/heap:bytes": {
200			deps: makeStatDepSet(gcStatsDep),
201			compute: func(in *statAggregate, out *metricValue) {
202				out.kind = metricKindUint64
203				out.scalar = in.gcStats.heapScan
204			},
205		},
206		"/gc/scan/stack:bytes": {
207			deps: makeStatDepSet(gcStatsDep),
208			compute: func(in *statAggregate, out *metricValue) {
209				out.kind = metricKindUint64
210				out.scalar = in.gcStats.stackScan
211			},
212		},
213		"/gc/scan/total:bytes": {
214			deps: makeStatDepSet(gcStatsDep),
215			compute: func(in *statAggregate, out *metricValue) {
216				out.kind = metricKindUint64
217				out.scalar = in.gcStats.totalScan
218			},
219		},
220		"/gc/heap/allocs-by-size:bytes": {
221			deps: makeStatDepSet(heapStatsDep),
222			compute: func(in *statAggregate, out *metricValue) {
223				hist := out.float64HistOrInit(sizeClassBuckets)
224				hist.counts[len(hist.counts)-1] = in.heapStats.largeAllocCount
225				// Cut off the first index which is ostensibly for size class 0,
226				// but large objects are tracked separately so it's actually unused.
227				for i, count := range in.heapStats.smallAllocCount[1:] {
228					hist.counts[i] = count
229				}
230			},
231		},
232		"/gc/heap/allocs:bytes": {
233			deps: makeStatDepSet(heapStatsDep),
234			compute: func(in *statAggregate, out *metricValue) {
235				out.kind = metricKindUint64
236				out.scalar = in.heapStats.totalAllocated
237			},
238		},
239		"/gc/heap/allocs:objects": {
240			deps: makeStatDepSet(heapStatsDep),
241			compute: func(in *statAggregate, out *metricValue) {
242				out.kind = metricKindUint64
243				out.scalar = in.heapStats.totalAllocs
244			},
245		},
246		"/gc/heap/frees-by-size:bytes": {
247			deps: makeStatDepSet(heapStatsDep),
248			compute: func(in *statAggregate, out *metricValue) {
249				hist := out.float64HistOrInit(sizeClassBuckets)
250				hist.counts[len(hist.counts)-1] = in.heapStats.largeFreeCount
251				// Cut off the first index which is ostensibly for size class 0,
252				// but large objects are tracked separately so it's actually unused.
253				for i, count := range in.heapStats.smallFreeCount[1:] {
254					hist.counts[i] = count
255				}
256			},
257		},
258		"/gc/heap/frees:bytes": {
259			deps: makeStatDepSet(heapStatsDep),
260			compute: func(in *statAggregate, out *metricValue) {
261				out.kind = metricKindUint64
262				out.scalar = in.heapStats.totalFreed
263			},
264		},
265		"/gc/heap/frees:objects": {
266			deps: makeStatDepSet(heapStatsDep),
267			compute: func(in *statAggregate, out *metricValue) {
268				out.kind = metricKindUint64
269				out.scalar = in.heapStats.totalFrees
270			},
271		},
272		"/gc/heap/goal:bytes": {
273			deps: makeStatDepSet(sysStatsDep),
274			compute: func(in *statAggregate, out *metricValue) {
275				out.kind = metricKindUint64
276				out.scalar = in.sysStats.heapGoal
277			},
278		},
279		"/gc/gomemlimit:bytes": {
280			compute: func(in *statAggregate, out *metricValue) {
281				out.kind = metricKindUint64
282				out.scalar = uint64(gcController.memoryLimit.Load())
283			},
284		},
285		"/gc/gogc:percent": {
286			compute: func(in *statAggregate, out *metricValue) {
287				out.kind = metricKindUint64
288				out.scalar = uint64(gcController.gcPercent.Load())
289			},
290		},
291		"/gc/heap/live:bytes": {
292			deps: makeStatDepSet(heapStatsDep),
293			compute: func(in *statAggregate, out *metricValue) {
294				out.kind = metricKindUint64
295				out.scalar = gcController.heapMarked
296			},
297		},
298		"/gc/heap/objects:objects": {
299			deps: makeStatDepSet(heapStatsDep),
300			compute: func(in *statAggregate, out *metricValue) {
301				out.kind = metricKindUint64
302				out.scalar = in.heapStats.numObjects
303			},
304		},
305		"/gc/heap/tiny/allocs:objects": {
306			deps: makeStatDepSet(heapStatsDep),
307			compute: func(in *statAggregate, out *metricValue) {
308				out.kind = metricKindUint64
309				out.scalar = in.heapStats.tinyAllocCount
310			},
311		},
312		"/gc/limiter/last-enabled:gc-cycle": {
313			compute: func(_ *statAggregate, out *metricValue) {
314				out.kind = metricKindUint64
315				out.scalar = uint64(gcCPULimiter.lastEnabledCycle.Load())
316			},
317		},
318		"/gc/pauses:seconds": {
319			compute: func(_ *statAggregate, out *metricValue) {
320				// N.B. this is identical to /sched/pauses/total/gc:seconds.
321				sched.stwTotalTimeGC.write(out)
322			},
323		},
324		"/gc/stack/starting-size:bytes": {
325			compute: func(in *statAggregate, out *metricValue) {
326				out.kind = metricKindUint64
327				out.scalar = uint64(startingStackSize)
328			},
329		},
330		"/memory/classes/heap/free:bytes": {
331			deps: makeStatDepSet(heapStatsDep),
332			compute: func(in *statAggregate, out *metricValue) {
333				out.kind = metricKindUint64
334				out.scalar = uint64(in.heapStats.committed - in.heapStats.inHeap -
335					in.heapStats.inStacks - in.heapStats.inWorkBufs -
336					in.heapStats.inPtrScalarBits)
337			},
338		},
339		"/memory/classes/heap/objects:bytes": {
340			deps: makeStatDepSet(heapStatsDep),
341			compute: func(in *statAggregate, out *metricValue) {
342				out.kind = metricKindUint64
343				out.scalar = in.heapStats.inObjects
344			},
345		},
346		"/memory/classes/heap/released:bytes": {
347			deps: makeStatDepSet(heapStatsDep),
348			compute: func(in *statAggregate, out *metricValue) {
349				out.kind = metricKindUint64
350				out.scalar = uint64(in.heapStats.released)
351			},
352		},
353		"/memory/classes/heap/stacks:bytes": {
354			deps: makeStatDepSet(heapStatsDep),
355			compute: func(in *statAggregate, out *metricValue) {
356				out.kind = metricKindUint64
357				out.scalar = uint64(in.heapStats.inStacks)
358			},
359		},
360		"/memory/classes/heap/unused:bytes": {
361			deps: makeStatDepSet(heapStatsDep),
362			compute: func(in *statAggregate, out *metricValue) {
363				out.kind = metricKindUint64
364				out.scalar = uint64(in.heapStats.inHeap) - in.heapStats.inObjects
365			},
366		},
367		"/memory/classes/metadata/mcache/free:bytes": {
368			deps: makeStatDepSet(sysStatsDep),
369			compute: func(in *statAggregate, out *metricValue) {
370				out.kind = metricKindUint64
371				out.scalar = in.sysStats.mCacheSys - in.sysStats.mCacheInUse
372			},
373		},
374		"/memory/classes/metadata/mcache/inuse:bytes": {
375			deps: makeStatDepSet(sysStatsDep),
376			compute: func(in *statAggregate, out *metricValue) {
377				out.kind = metricKindUint64
378				out.scalar = in.sysStats.mCacheInUse
379			},
380		},
381		"/memory/classes/metadata/mspan/free:bytes": {
382			deps: makeStatDepSet(sysStatsDep),
383			compute: func(in *statAggregate, out *metricValue) {
384				out.kind = metricKindUint64
385				out.scalar = in.sysStats.mSpanSys - in.sysStats.mSpanInUse
386			},
387		},
388		"/memory/classes/metadata/mspan/inuse:bytes": {
389			deps: makeStatDepSet(sysStatsDep),
390			compute: func(in *statAggregate, out *metricValue) {
391				out.kind = metricKindUint64
392				out.scalar = in.sysStats.mSpanInUse
393			},
394		},
395		"/memory/classes/metadata/other:bytes": {
396			deps: makeStatDepSet(heapStatsDep, sysStatsDep),
397			compute: func(in *statAggregate, out *metricValue) {
398				out.kind = metricKindUint64
399				out.scalar = uint64(in.heapStats.inWorkBufs+in.heapStats.inPtrScalarBits) + in.sysStats.gcMiscSys
400			},
401		},
402		"/memory/classes/os-stacks:bytes": {
403			deps: makeStatDepSet(sysStatsDep),
404			compute: func(in *statAggregate, out *metricValue) {
405				out.kind = metricKindUint64
406				out.scalar = in.sysStats.stacksSys
407			},
408		},
409		"/memory/classes/other:bytes": {
410			deps: makeStatDepSet(sysStatsDep),
411			compute: func(in *statAggregate, out *metricValue) {
412				out.kind = metricKindUint64
413				out.scalar = in.sysStats.otherSys
414			},
415		},
416		"/memory/classes/profiling/buckets:bytes": {
417			deps: makeStatDepSet(sysStatsDep),
418			compute: func(in *statAggregate, out *metricValue) {
419				out.kind = metricKindUint64
420				out.scalar = in.sysStats.buckHashSys
421			},
422		},
423		"/memory/classes/total:bytes": {
424			deps: makeStatDepSet(heapStatsDep, sysStatsDep),
425			compute: func(in *statAggregate, out *metricValue) {
426				out.kind = metricKindUint64
427				out.scalar = uint64(in.heapStats.committed+in.heapStats.released) +
428					in.sysStats.stacksSys + in.sysStats.mSpanSys +
429					in.sysStats.mCacheSys + in.sysStats.buckHashSys +
430					in.sysStats.gcMiscSys + in.sysStats.otherSys
431			},
432		},
433		"/sched/gomaxprocs:threads": {
434			compute: func(_ *statAggregate, out *metricValue) {
435				out.kind = metricKindUint64
436				out.scalar = uint64(gomaxprocs)
437			},
438		},
439		"/sched/goroutines:goroutines": {
440			compute: func(_ *statAggregate, out *metricValue) {
441				out.kind = metricKindUint64
442				out.scalar = uint64(gcount())
443			},
444		},
445		"/sched/latencies:seconds": {
446			compute: func(_ *statAggregate, out *metricValue) {
447				sched.timeToRun.write(out)
448			},
449		},
450		"/sched/pauses/stopping/gc:seconds": {
451			compute: func(_ *statAggregate, out *metricValue) {
452				sched.stwStoppingTimeGC.write(out)
453			},
454		},
455		"/sched/pauses/stopping/other:seconds": {
456			compute: func(_ *statAggregate, out *metricValue) {
457				sched.stwStoppingTimeOther.write(out)
458			},
459		},
460		"/sched/pauses/total/gc:seconds": {
461			compute: func(_ *statAggregate, out *metricValue) {
462				sched.stwTotalTimeGC.write(out)
463			},
464		},
465		"/sched/pauses/total/other:seconds": {
466			compute: func(_ *statAggregate, out *metricValue) {
467				sched.stwTotalTimeOther.write(out)
468			},
469		},
470		"/sync/mutex/wait/total:seconds": {
471			compute: func(_ *statAggregate, out *metricValue) {
472				out.kind = metricKindFloat64
473				out.scalar = float64bits(nsToSec(totalMutexWaitTimeNanos()))
474			},
475		},
476	}
477
478	for _, info := range godebugs.All {
479		if !info.Opaque {
480			metrics["/godebug/non-default-behavior/"+info.Name+":events"] = metricData{compute: compute0}
481		}
482	}
483
484	metricsInit = true
485}
486
487func compute0(_ *statAggregate, out *metricValue) {
488	out.kind = metricKindUint64
489	out.scalar = 0
490}
491
492type metricReader func() uint64
493
494func (f metricReader) compute(_ *statAggregate, out *metricValue) {
495	out.kind = metricKindUint64
496	out.scalar = f()
497}
498
499//go:linkname godebug_registerMetric internal/godebug.registerMetric
500func godebug_registerMetric(name string, read func() uint64) {
501	metricsLock()
502	initMetrics()
503	d, ok := metrics[name]
504	if !ok {
505		throw("runtime: unexpected metric registration for " + name)
506	}
507	d.compute = metricReader(read).compute
508	metrics[name] = d
509	metricsUnlock()
510}
511
512// statDep is a dependency on a group of statistics
513// that a metric might have.
514type statDep uint
515
516const (
517	heapStatsDep statDep = iota // corresponds to heapStatsAggregate
518	sysStatsDep                 // corresponds to sysStatsAggregate
519	cpuStatsDep                 // corresponds to cpuStatsAggregate
520	gcStatsDep                  // corresponds to gcStatsAggregate
521	numStatsDeps
522)
523
524// statDepSet represents a set of statDeps.
525//
526// Under the hood, it's a bitmap.
527type statDepSet [1]uint64
528
529// makeStatDepSet creates a new statDepSet from a list of statDeps.
530func makeStatDepSet(deps ...statDep) statDepSet {
531	var s statDepSet
532	for _, d := range deps {
533		s[d/64] |= 1 << (d % 64)
534	}
535	return s
536}
537
538// difference returns set difference of s from b as a new set.
539func (s statDepSet) difference(b statDepSet) statDepSet {
540	var c statDepSet
541	for i := range s {
542		c[i] = s[i] &^ b[i]
543	}
544	return c
545}
546
547// union returns the union of the two sets as a new set.
548func (s statDepSet) union(b statDepSet) statDepSet {
549	var c statDepSet
550	for i := range s {
551		c[i] = s[i] | b[i]
552	}
553	return c
554}
555
556// empty returns true if there are no dependencies in the set.
557func (s *statDepSet) empty() bool {
558	for _, c := range s {
559		if c != 0 {
560			return false
561		}
562	}
563	return true
564}
565
566// has returns true if the set contains a given statDep.
567func (s *statDepSet) has(d statDep) bool {
568	return s[d/64]&(1<<(d%64)) != 0
569}
570
571// heapStatsAggregate represents memory stats obtained from the
572// runtime. This set of stats is grouped together because they
573// depend on each other in some way to make sense of the runtime's
574// current heap memory use. They're also sharded across Ps, so it
575// makes sense to grab them all at once.
576type heapStatsAggregate struct {
577	heapStatsDelta
578
579	// Derived from values in heapStatsDelta.
580
581	// inObjects is the bytes of memory occupied by objects,
582	inObjects uint64
583
584	// numObjects is the number of live objects in the heap.
585	numObjects uint64
586
587	// totalAllocated is the total bytes of heap objects allocated
588	// over the lifetime of the program.
589	totalAllocated uint64
590
591	// totalFreed is the total bytes of heap objects freed
592	// over the lifetime of the program.
593	totalFreed uint64
594
595	// totalAllocs is the number of heap objects allocated over
596	// the lifetime of the program.
597	totalAllocs uint64
598
599	// totalFrees is the number of heap objects freed over
600	// the lifetime of the program.
601	totalFrees uint64
602}
603
604// compute populates the heapStatsAggregate with values from the runtime.
605func (a *heapStatsAggregate) compute() {
606	memstats.heapStats.read(&a.heapStatsDelta)
607
608	// Calculate derived stats.
609	a.totalAllocs = a.largeAllocCount
610	a.totalFrees = a.largeFreeCount
611	a.totalAllocated = a.largeAlloc
612	a.totalFreed = a.largeFree
613	for i := range a.smallAllocCount {
614		na := a.smallAllocCount[i]
615		nf := a.smallFreeCount[i]
616		a.totalAllocs += na
617		a.totalFrees += nf
618		a.totalAllocated += na * uint64(class_to_size[i])
619		a.totalFreed += nf * uint64(class_to_size[i])
620	}
621	a.inObjects = a.totalAllocated - a.totalFreed
622	a.numObjects = a.totalAllocs - a.totalFrees
623}
624
625// sysStatsAggregate represents system memory stats obtained
626// from the runtime. This set of stats is grouped together because
627// they're all relatively cheap to acquire and generally independent
628// of one another and other runtime memory stats. The fact that they
629// may be acquired at different times, especially with respect to
630// heapStatsAggregate, means there could be some skew, but because of
631// these stats are independent, there's no real consistency issue here.
632type sysStatsAggregate struct {
633	stacksSys      uint64
634	mSpanSys       uint64
635	mSpanInUse     uint64
636	mCacheSys      uint64
637	mCacheInUse    uint64
638	buckHashSys    uint64
639	gcMiscSys      uint64
640	otherSys       uint64
641	heapGoal       uint64
642	gcCyclesDone   uint64
643	gcCyclesForced uint64
644}
645
646// compute populates the sysStatsAggregate with values from the runtime.
647func (a *sysStatsAggregate) compute() {
648	a.stacksSys = memstats.stacks_sys.load()
649	a.buckHashSys = memstats.buckhash_sys.load()
650	a.gcMiscSys = memstats.gcMiscSys.load()
651	a.otherSys = memstats.other_sys.load()
652	a.heapGoal = gcController.heapGoal()
653	a.gcCyclesDone = uint64(memstats.numgc)
654	a.gcCyclesForced = uint64(memstats.numforcedgc)
655
656	systemstack(func() {
657		lock(&mheap_.lock)
658		a.mSpanSys = memstats.mspan_sys.load()
659		a.mSpanInUse = uint64(mheap_.spanalloc.inuse)
660		a.mCacheSys = memstats.mcache_sys.load()
661		a.mCacheInUse = uint64(mheap_.cachealloc.inuse)
662		unlock(&mheap_.lock)
663	})
664}
665
666// cpuStatsAggregate represents CPU stats obtained from the runtime
667// acquired together to avoid skew and inconsistencies.
668type cpuStatsAggregate struct {
669	cpuStats
670}
671
672// compute populates the cpuStatsAggregate with values from the runtime.
673func (a *cpuStatsAggregate) compute() {
674	a.cpuStats = work.cpuStats
675	// TODO(mknyszek): Update the CPU stats again so that we're not
676	// just relying on the STW snapshot. The issue here is that currently
677	// this will cause non-monotonicity in the "user" CPU time metric.
678	//
679	// a.cpuStats.accumulate(nanotime(), gcphase == _GCmark)
680}
681
682// gcStatsAggregate represents various GC stats obtained from the runtime
683// acquired together to avoid skew and inconsistencies.
684type gcStatsAggregate struct {
685	heapScan    uint64
686	stackScan   uint64
687	globalsScan uint64
688	totalScan   uint64
689}
690
691// compute populates the gcStatsAggregate with values from the runtime.
692func (a *gcStatsAggregate) compute() {
693	a.heapScan = gcController.heapScan.Load()
694	a.stackScan = gcController.lastStackScan.Load()
695	a.globalsScan = gcController.globalsScan.Load()
696	a.totalScan = a.heapScan + a.stackScan + a.globalsScan
697}
698
699// nsToSec takes a duration in nanoseconds and converts it to seconds as
700// a float64.
701func nsToSec(ns int64) float64 {
702	return float64(ns) / 1e9
703}
704
705// statAggregate is the main driver of the metrics implementation.
706//
707// It contains multiple aggregates of runtime statistics, as well
708// as a set of these aggregates that it has populated. The aggregates
709// are populated lazily by its ensure method.
710type statAggregate struct {
711	ensured   statDepSet
712	heapStats heapStatsAggregate
713	sysStats  sysStatsAggregate
714	cpuStats  cpuStatsAggregate
715	gcStats   gcStatsAggregate
716}
717
718// ensure populates statistics aggregates determined by deps if they
719// haven't yet been populated.
720func (a *statAggregate) ensure(deps *statDepSet) {
721	missing := deps.difference(a.ensured)
722	if missing.empty() {
723		return
724	}
725	for i := statDep(0); i < numStatsDeps; i++ {
726		if !missing.has(i) {
727			continue
728		}
729		switch i {
730		case heapStatsDep:
731			a.heapStats.compute()
732		case sysStatsDep:
733			a.sysStats.compute()
734		case cpuStatsDep:
735			a.cpuStats.compute()
736		case gcStatsDep:
737			a.gcStats.compute()
738		}
739	}
740	a.ensured = a.ensured.union(missing)
741}
742
743// metricKind is a runtime copy of runtime/metrics.ValueKind and
744// must be kept structurally identical to that type.
745type metricKind int
746
747const (
748	// These values must be kept identical to their corresponding Kind* values
749	// in the runtime/metrics package.
750	metricKindBad metricKind = iota
751	metricKindUint64
752	metricKindFloat64
753	metricKindFloat64Histogram
754)
755
756// metricSample is a runtime copy of runtime/metrics.Sample and
757// must be kept structurally identical to that type.
758type metricSample struct {
759	name  string
760	value metricValue
761}
762
763// metricValue is a runtime copy of runtime/metrics.Sample and
764// must be kept structurally identical to that type.
765type metricValue struct {
766	kind    metricKind
767	scalar  uint64         // contains scalar values for scalar Kinds.
768	pointer unsafe.Pointer // contains non-scalar values.
769}
770
771// float64HistOrInit tries to pull out an existing float64Histogram
772// from the value, but if none exists, then it allocates one with
773// the given buckets.
774func (v *metricValue) float64HistOrInit(buckets []float64) *metricFloat64Histogram {
775	var hist *metricFloat64Histogram
776	if v.kind == metricKindFloat64Histogram && v.pointer != nil {
777		hist = (*metricFloat64Histogram)(v.pointer)
778	} else {
779		v.kind = metricKindFloat64Histogram
780		hist = new(metricFloat64Histogram)
781		v.pointer = unsafe.Pointer(hist)
782	}
783	hist.buckets = buckets
784	if len(hist.counts) != len(hist.buckets)-1 {
785		hist.counts = make([]uint64, len(buckets)-1)
786	}
787	return hist
788}
789
790// metricFloat64Histogram is a runtime copy of runtime/metrics.Float64Histogram
791// and must be kept structurally identical to that type.
792type metricFloat64Histogram struct {
793	counts  []uint64
794	buckets []float64
795}
796
797// agg is used by readMetrics, and is protected by metricsSema.
798//
799// Managed as a global variable because its pointer will be
800// an argument to a dynamically-defined function, and we'd
801// like to avoid it escaping to the heap.
802var agg statAggregate
803
804type metricName struct {
805	name string
806	kind metricKind
807}
808
809// readMetricNames is the implementation of runtime/metrics.readMetricNames,
810// used by the runtime/metrics test and otherwise unreferenced.
811//
812//go:linkname readMetricNames runtime/metrics_test.runtime_readMetricNames
813func readMetricNames() []string {
814	metricsLock()
815	initMetrics()
816	n := len(metrics)
817	metricsUnlock()
818
819	list := make([]string, 0, n)
820
821	metricsLock()
822	for name := range metrics {
823		list = append(list, name)
824	}
825	metricsUnlock()
826
827	return list
828}
829
830// readMetrics is the implementation of runtime/metrics.Read.
831//
832//go:linkname readMetrics runtime/metrics.runtime_readMetrics
833func readMetrics(samplesp unsafe.Pointer, len int, cap int) {
834	metricsLock()
835
836	// Ensure the map is initialized.
837	initMetrics()
838
839	// Read the metrics.
840	readMetricsLocked(samplesp, len, cap)
841	metricsUnlock()
842}
843
844// readMetricsLocked is the internal, locked portion of readMetrics.
845//
846// Broken out for more robust testing. metricsLock must be held and
847// initMetrics must have been called already.
848func readMetricsLocked(samplesp unsafe.Pointer, len int, cap int) {
849	// Construct a slice from the args.
850	sl := slice{samplesp, len, cap}
851	samples := *(*[]metricSample)(unsafe.Pointer(&sl))
852
853	// Clear agg defensively.
854	agg = statAggregate{}
855
856	// Sample.
857	for i := range samples {
858		sample := &samples[i]
859		data, ok := metrics[sample.name]
860		if !ok {
861			sample.value.kind = metricKindBad
862			continue
863		}
864		// Ensure we have all the stats we need.
865		// agg is populated lazily.
866		agg.ensure(&data.deps)
867
868		// Compute the value based on the stats we have.
869		data.compute(&agg, &sample.value)
870	}
871}
872