1// Copyright 2009 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package time_test
6
7import (
8	"errors"
9	"fmt"
10	"internal/testenv"
11	"math/rand"
12	"runtime"
13	"strings"
14	"sync"
15	"sync/atomic"
16	"testing"
17	. "time"
18	_ "unsafe" // for go:linkname
19)
20
21// newTimerFunc simulates NewTimer using AfterFunc,
22// but this version will not hit the special cases for channels
23// that are used when calling NewTimer.
24// This makes it easy to test both paths.
25func newTimerFunc(d Duration) *Timer {
26	c := make(chan Time, 1)
27	t := AfterFunc(d, func() { c <- Now() })
28	t.C = c
29	return t
30}
31
32// haveHighResSleep is true if the system supports at least ~1ms sleeps.
33//
34//go:linkname haveHighResSleep runtime.haveHighResSleep
35var haveHighResSleep bool
36
37// adjustDelay returns an adjusted delay based on the system sleep resolution.
38// Go runtime uses different Windows timers for time.Now and sleeping.
39// These can tick at different frequencies and can arrive out of sync.
40// The effect can be seen, for example, as time.Sleep(100ms) is actually
41// shorter then 100ms when measured as difference between time.Now before and
42// after time.Sleep call. This was observed on Windows XP SP3 (windows/386).
43func adjustDelay(t *testing.T, delay Duration) Duration {
44	if haveHighResSleep {
45		return delay
46	}
47	t.Log("adjusting delay for low resolution sleep")
48	switch runtime.GOOS {
49	case "windows":
50		return delay - 17*Millisecond
51	default:
52		t.Fatal("adjustDelay unimplemented on " + runtime.GOOS)
53		return 0
54	}
55}
56
57func TestSleep(t *testing.T) {
58	const delay = 100 * Millisecond
59	go func() {
60		Sleep(delay / 2)
61		Interrupt()
62	}()
63	start := Now()
64	Sleep(delay)
65	delayadj := adjustDelay(t, delay)
66	duration := Since(start)
67	if duration < delayadj {
68		t.Fatalf("Sleep(%s) slept for only %s", delay, duration)
69	}
70}
71
72// Test the basic function calling behavior. Correct queuing
73// behavior is tested elsewhere, since After and AfterFunc share
74// the same code.
75func TestAfterFunc(t *testing.T) {
76	i := 10
77	c := make(chan bool)
78	var f func()
79	f = func() {
80		i--
81		if i >= 0 {
82			AfterFunc(0, f)
83			Sleep(1 * Second)
84		} else {
85			c <- true
86		}
87	}
88
89	AfterFunc(0, f)
90	<-c
91}
92
93func TestTickerStress(t *testing.T) {
94	var stop atomic.Bool
95	go func() {
96		for !stop.Load() {
97			runtime.GC()
98			// Yield so that the OS can wake up the timer thread,
99			// so that it can generate channel sends for the main goroutine,
100			// which will eventually set stop = 1 for us.
101			Sleep(Nanosecond)
102		}
103	}()
104	ticker := NewTicker(1)
105	for i := 0; i < 100; i++ {
106		<-ticker.C
107	}
108	ticker.Stop()
109	stop.Store(true)
110}
111
112func TestTickerConcurrentStress(t *testing.T) {
113	var stop atomic.Bool
114	go func() {
115		for !stop.Load() {
116			runtime.GC()
117			// Yield so that the OS can wake up the timer thread,
118			// so that it can generate channel sends for the main goroutine,
119			// which will eventually set stop = 1 for us.
120			Sleep(Nanosecond)
121		}
122	}()
123	ticker := NewTicker(1)
124	var wg sync.WaitGroup
125	for i := 0; i < 10; i++ {
126		wg.Add(1)
127		go func() {
128			defer wg.Done()
129			for i := 0; i < 100; i++ {
130				<-ticker.C
131			}
132		}()
133	}
134	wg.Wait()
135	ticker.Stop()
136	stop.Store(true)
137}
138
139func TestAfterFuncStarvation(t *testing.T) {
140	// Start two goroutines ping-ponging on a channel send.
141	// At any given time, at least one of these goroutines is runnable:
142	// if the channel buffer is full, the receiver is runnable,
143	// and if it is not full, the sender is runnable.
144	//
145	// In addition, the AfterFunc callback should become runnable after
146	// the indicated delay.
147	//
148	// Even if GOMAXPROCS=1, we expect the runtime to eventually schedule
149	// the AfterFunc goroutine instead of the runnable channel goroutine.
150	// However, in https://go.dev/issue/65178 this was observed to live-lock
151	// on wasip1/wasm and js/wasm after <10000 runs.
152	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
153
154	var (
155		wg   sync.WaitGroup
156		stop atomic.Bool
157		c    = make(chan bool, 1)
158	)
159
160	wg.Add(2)
161	go func() {
162		for !stop.Load() {
163			c <- true
164		}
165		close(c)
166		wg.Done()
167	}()
168	go func() {
169		for range c {
170		}
171		wg.Done()
172	}()
173
174	AfterFunc(1*Microsecond, func() { stop.Store(true) })
175	wg.Wait()
176}
177
178func benchmark(b *testing.B, bench func(*testing.PB)) {
179	// Create equal number of garbage timers on each P before starting
180	// the benchmark.
181	var wg sync.WaitGroup
182	garbageAll := make([][]*Timer, runtime.GOMAXPROCS(0))
183	for i := range garbageAll {
184		wg.Add(1)
185		go func(i int) {
186			defer wg.Done()
187			garbage := make([]*Timer, 1<<15)
188			for j := range garbage {
189				garbage[j] = AfterFunc(Hour, nil)
190			}
191			garbageAll[i] = garbage
192		}(i)
193	}
194	wg.Wait()
195
196	b.ResetTimer()
197	b.RunParallel(bench)
198	b.StopTimer()
199
200	for _, garbage := range garbageAll {
201		for _, t := range garbage {
202			t.Stop()
203		}
204	}
205}
206
207func BenchmarkAfterFunc1000(b *testing.B) {
208	benchmark(b, func(pb *testing.PB) {
209		for pb.Next() {
210			n := 1000
211			c := make(chan bool)
212			var f func()
213			f = func() {
214				n--
215				if n >= 0 {
216					AfterFunc(0, f)
217				} else {
218					c <- true
219				}
220			}
221			AfterFunc(0, f)
222			<-c
223		}
224	})
225}
226
227func BenchmarkAfter(b *testing.B) {
228	benchmark(b, func(pb *testing.PB) {
229		for pb.Next() {
230			<-After(1)
231		}
232	})
233}
234
235func BenchmarkStop(b *testing.B) {
236	b.Run("impl=chan", func(b *testing.B) {
237		benchmark(b, func(pb *testing.PB) {
238			for pb.Next() {
239				NewTimer(1 * Second).Stop()
240			}
241		})
242	})
243	b.Run("impl=func", func(b *testing.B) {
244		benchmark(b, func(pb *testing.PB) {
245			for pb.Next() {
246				newTimerFunc(1 * Second).Stop()
247			}
248		})
249	})
250}
251
252func BenchmarkSimultaneousAfterFunc1000(b *testing.B) {
253	benchmark(b, func(pb *testing.PB) {
254		for pb.Next() {
255			n := 1000
256			var wg sync.WaitGroup
257			wg.Add(n)
258			for range n {
259				AfterFunc(0, wg.Done)
260			}
261			wg.Wait()
262		}
263	})
264}
265
266func BenchmarkStartStop1000(b *testing.B) {
267	benchmark(b, func(pb *testing.PB) {
268		for pb.Next() {
269			const N = 1000
270			timers := make([]*Timer, N)
271			for i := range timers {
272				timers[i] = AfterFunc(Hour, nil)
273			}
274
275			for i := range timers {
276				timers[i].Stop()
277			}
278		}
279	})
280}
281
282func BenchmarkReset(b *testing.B) {
283	b.Run("impl=chan", func(b *testing.B) {
284		benchmark(b, func(pb *testing.PB) {
285			t := NewTimer(Hour)
286			for pb.Next() {
287				t.Reset(Hour)
288			}
289			t.Stop()
290		})
291	})
292	b.Run("impl=func", func(b *testing.B) {
293		benchmark(b, func(pb *testing.PB) {
294			t := newTimerFunc(Hour)
295			for pb.Next() {
296				t.Reset(Hour)
297			}
298			t.Stop()
299		})
300	})
301}
302
303func BenchmarkSleep1000(b *testing.B) {
304	benchmark(b, func(pb *testing.PB) {
305		for pb.Next() {
306			const N = 1000
307			var wg sync.WaitGroup
308			wg.Add(N)
309			for range N {
310				go func() {
311					Sleep(Nanosecond)
312					wg.Done()
313				}()
314			}
315			wg.Wait()
316		}
317	})
318}
319
320func TestAfter(t *testing.T) {
321	const delay = 100 * Millisecond
322	start := Now()
323	end := <-After(delay)
324	delayadj := adjustDelay(t, delay)
325	if duration := Since(start); duration < delayadj {
326		t.Fatalf("After(%s) slept for only %d ns", delay, duration)
327	}
328	if min := start.Add(delayadj); end.Before(min) {
329		t.Fatalf("After(%s) expect >= %s, got %s", delay, min, end)
330	}
331}
332
333func TestAfterTick(t *testing.T) {
334	t.Parallel()
335	const Count = 10
336	Delta := 100 * Millisecond
337	if testing.Short() {
338		Delta = 10 * Millisecond
339	}
340	t0 := Now()
341	for i := 0; i < Count; i++ {
342		<-After(Delta)
343	}
344	t1 := Now()
345	d := t1.Sub(t0)
346	target := Delta * Count
347	if d < target*9/10 {
348		t.Fatalf("%d ticks of %s too fast: took %s, expected %s", Count, Delta, d, target)
349	}
350	if !testing.Short() && d > target*30/10 {
351		t.Fatalf("%d ticks of %s too slow: took %s, expected %s", Count, Delta, d, target)
352	}
353}
354
355func TestAfterStop(t *testing.T) {
356	t.Run("impl=chan", func(t *testing.T) {
357		testAfterStop(t, NewTimer)
358	})
359	t.Run("impl=func", func(t *testing.T) {
360		testAfterStop(t, newTimerFunc)
361	})
362}
363
364func testAfterStop(t *testing.T, newTimer func(Duration) *Timer) {
365	// We want to test that we stop a timer before it runs.
366	// We also want to test that it didn't run after a longer timer.
367	// Since we don't want the test to run for too long, we don't
368	// want to use lengthy times. That makes the test inherently flaky.
369	// So only report an error if it fails five times in a row.
370
371	var errs []string
372	logErrs := func() {
373		for _, e := range errs {
374			t.Log(e)
375		}
376	}
377
378	for i := 0; i < 5; i++ {
379		AfterFunc(100*Millisecond, func() {})
380		t0 := newTimer(50 * Millisecond)
381		c1 := make(chan bool, 1)
382		t1 := AfterFunc(150*Millisecond, func() { c1 <- true })
383		c2 := After(200 * Millisecond)
384		if !t0.Stop() {
385			errs = append(errs, "failed to stop event 0")
386			continue
387		}
388		if !t1.Stop() {
389			errs = append(errs, "failed to stop event 1")
390			continue
391		}
392		<-c2
393		select {
394		case <-t0.C:
395			errs = append(errs, "event 0 was not stopped")
396			continue
397		case <-c1:
398			errs = append(errs, "event 1 was not stopped")
399			continue
400		default:
401		}
402		if t1.Stop() {
403			errs = append(errs, "Stop returned true twice")
404			continue
405		}
406
407		// Test passed, so all done.
408		if len(errs) > 0 {
409			t.Logf("saw %d errors, ignoring to avoid flakiness", len(errs))
410			logErrs()
411		}
412
413		return
414	}
415
416	t.Errorf("saw %d errors", len(errs))
417	logErrs()
418}
419
420func TestAfterQueuing(t *testing.T) {
421	t.Run("impl=chan", func(t *testing.T) {
422		testAfterQueuing(t, After)
423	})
424	t.Run("impl=func", func(t *testing.T) {
425		testAfterQueuing(t, func(d Duration) <-chan Time { return newTimerFunc(d).C })
426	})
427}
428
429func testAfterQueuing(t *testing.T, after func(Duration) <-chan Time) {
430	// This test flakes out on some systems,
431	// so we'll try it a few times before declaring it a failure.
432	const attempts = 5
433	err := errors.New("!=nil")
434	for i := 0; i < attempts && err != nil; i++ {
435		delta := Duration(20+i*50) * Millisecond
436		if err = testAfterQueuing1(delta, after); err != nil {
437			t.Logf("attempt %v failed: %v", i, err)
438		}
439	}
440	if err != nil {
441		t.Fatal(err)
442	}
443}
444
445var slots = []int{5, 3, 6, 6, 6, 1, 1, 2, 7, 9, 4, 8, 0}
446
447type afterResult struct {
448	slot int
449	t    Time
450}
451
452func await(slot int, result chan<- afterResult, ac <-chan Time) {
453	result <- afterResult{slot, <-ac}
454}
455
456func testAfterQueuing1(delta Duration, after func(Duration) <-chan Time) error {
457	// make the result channel buffered because we don't want
458	// to depend on channel queuing semantics that might
459	// possibly change in the future.
460	result := make(chan afterResult, len(slots))
461
462	t0 := Now()
463	for _, slot := range slots {
464		go await(slot, result, After(Duration(slot)*delta))
465	}
466	var order []int
467	var times []Time
468	for range slots {
469		r := <-result
470		order = append(order, r.slot)
471		times = append(times, r.t)
472	}
473	for i := range order {
474		if i > 0 && order[i] < order[i-1] {
475			return fmt.Errorf("After calls returned out of order: %v", order)
476		}
477	}
478	for i, t := range times {
479		dt := t.Sub(t0)
480		target := Duration(order[i]) * delta
481		if dt < target-delta/2 || dt > target+delta*10 {
482			return fmt.Errorf("After(%s) arrived at %s, expected [%s,%s]", target, dt, target-delta/2, target+delta*10)
483		}
484	}
485	return nil
486}
487
488func TestTimerStopStress(t *testing.T) {
489	if testing.Short() {
490		return
491	}
492	t.Parallel()
493	for i := 0; i < 100; i++ {
494		go func(i int) {
495			timer := AfterFunc(2*Second, func() {
496				t.Errorf("timer %d was not stopped", i)
497			})
498			Sleep(1 * Second)
499			timer.Stop()
500		}(i)
501	}
502	Sleep(3 * Second)
503}
504
505func TestSleepZeroDeadlock(t *testing.T) {
506	// Sleep(0) used to hang, the sequence of events was as follows.
507	// Sleep(0) sets G's status to Gwaiting, but then immediately returns leaving the status.
508	// Then the goroutine calls e.g. new and falls down into the scheduler due to pending GC.
509	// After the GC nobody wakes up the goroutine from Gwaiting status.
510	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
511	c := make(chan bool)
512	go func() {
513		for i := 0; i < 100; i++ {
514			runtime.GC()
515		}
516		c <- true
517	}()
518	for i := 0; i < 100; i++ {
519		Sleep(0)
520		tmp := make(chan bool, 1)
521		tmp <- true
522		<-tmp
523	}
524	<-c
525}
526
527func testReset(d Duration) error {
528	t0 := NewTimer(2 * d)
529	Sleep(d)
530	if !t0.Reset(3 * d) {
531		return errors.New("resetting unfired timer returned false")
532	}
533	Sleep(2 * d)
534	select {
535	case <-t0.C:
536		return errors.New("timer fired early")
537	default:
538	}
539	Sleep(2 * d)
540	select {
541	case <-t0.C:
542	default:
543		return errors.New("reset timer did not fire")
544	}
545
546	if t0.Reset(50 * Millisecond) {
547		return errors.New("resetting expired timer returned true")
548	}
549	return nil
550}
551
552func TestReset(t *testing.T) {
553	// We try to run this test with increasingly larger multiples
554	// until one works so slow, loaded hardware isn't as flaky,
555	// but without slowing down fast machines unnecessarily.
556	//
557	// (maxDuration is several orders of magnitude longer than we
558	// expect this test to actually take on a fast, unloaded machine.)
559	d := 1 * Millisecond
560	const maxDuration = 10 * Second
561	for {
562		err := testReset(d)
563		if err == nil {
564			break
565		}
566		d *= 2
567		if d > maxDuration {
568			t.Error(err)
569		}
570		t.Logf("%v; trying duration %v", err, d)
571	}
572}
573
574// Test that sleeping (via Sleep or Timer) for an interval so large it
575// overflows does not result in a short sleep duration. Nor does it interfere
576// with execution of other timers. If it does, timers in this or subsequent
577// tests may not fire.
578func TestOverflowSleep(t *testing.T) {
579	const big = Duration(int64(1<<63 - 1))
580
581	go func() {
582		Sleep(big)
583		// On failure, this may return after the test has completed, so
584		// we need to panic instead.
585		panic("big sleep returned")
586	}()
587
588	select {
589	case <-After(big):
590		t.Fatalf("big timeout fired")
591	case <-After(25 * Millisecond):
592		// OK
593	}
594
595	const neg = Duration(-1 << 63)
596	Sleep(neg) // Returns immediately.
597	select {
598	case <-After(neg):
599		// OK
600	case <-After(1 * Second):
601		t.Fatalf("negative timeout didn't fire")
602	}
603}
604
605// Test that a panic while deleting a timer does not leave
606// the timers mutex held, deadlocking a ticker.Stop in a defer.
607func TestIssue5745(t *testing.T) {
608	ticker := NewTicker(Hour)
609	defer func() {
610		// would deadlock here before the fix due to
611		// lock taken before the segfault.
612		ticker.Stop()
613
614		if r := recover(); r == nil {
615			t.Error("Expected panic, but none happened.")
616		}
617	}()
618
619	// cause a panic due to a segfault
620	var timer *Timer
621	timer.Stop()
622	t.Error("Should be unreachable.")
623}
624
625func TestOverflowPeriodRuntimeTimer(t *testing.T) {
626	// This may hang forever if timers are broken. See comment near
627	// the end of CheckRuntimeTimerOverflow in internal_test.go.
628	CheckRuntimeTimerPeriodOverflow()
629}
630
631func checkZeroPanicString(t *testing.T) {
632	e := recover()
633	s, _ := e.(string)
634	if want := "called on uninitialized Timer"; !strings.Contains(s, want) {
635		t.Errorf("panic = %v; want substring %q", e, want)
636	}
637}
638
639func TestZeroTimerResetPanics(t *testing.T) {
640	defer checkZeroPanicString(t)
641	var tr Timer
642	tr.Reset(1)
643}
644
645func TestZeroTimerStopPanics(t *testing.T) {
646	defer checkZeroPanicString(t)
647	var tr Timer
648	tr.Stop()
649}
650
651// Test that zero duration timers aren't missed by the scheduler. Regression test for issue 44868.
652func TestZeroTimer(t *testing.T) {
653	t.Run("impl=chan", func(t *testing.T) {
654		testZeroTimer(t, NewTimer)
655	})
656	t.Run("impl=func", func(t *testing.T) {
657		testZeroTimer(t, newTimerFunc)
658	})
659	t.Run("impl=cache", func(t *testing.T) {
660		timer := newTimerFunc(Hour)
661		testZeroTimer(t, func(d Duration) *Timer {
662			timer.Reset(d)
663			return timer
664		})
665	})
666}
667
668func testZeroTimer(t *testing.T, newTimer func(Duration) *Timer) {
669	if testing.Short() {
670		t.Skip("-short")
671	}
672
673	for i := 0; i < 1000000; i++ {
674		s := Now()
675		ti := newTimer(0)
676		<-ti.C
677		if diff := Since(s); diff > 2*Second {
678			t.Errorf("Expected time to get value from Timer channel in less than 2 sec, took %v", diff)
679		}
680	}
681}
682
683// Test that rapidly moving a timer earlier doesn't cause it to get dropped.
684// Issue 47329.
685func TestTimerModifiedEarlier(t *testing.T) {
686	if runtime.GOOS == "plan9" && runtime.GOARCH == "arm" {
687		testenv.SkipFlaky(t, 50470)
688	}
689
690	past := Until(Unix(0, 0))
691	count := 1000
692	fail := 0
693	for i := 0; i < count; i++ {
694		timer := newTimerFunc(Hour)
695		for j := 0; j < 10; j++ {
696			if !timer.Stop() {
697				<-timer.C
698			}
699			timer.Reset(past)
700		}
701
702		deadline := NewTimer(10 * Second)
703		defer deadline.Stop()
704		now := Now()
705		select {
706		case <-timer.C:
707			if since := Since(now); since > 8*Second {
708				t.Errorf("timer took too long (%v)", since)
709				fail++
710			}
711		case <-deadline.C:
712			t.Error("deadline expired")
713		}
714	}
715
716	if fail > 0 {
717		t.Errorf("%d failures", fail)
718	}
719}
720
721// Test that rapidly moving timers earlier and later doesn't cause
722// some of the sleep times to be lost.
723// Issue 47762
724func TestAdjustTimers(t *testing.T) {
725	var rnd = rand.New(rand.NewSource(Now().UnixNano()))
726
727	timers := make([]*Timer, 100)
728	states := make([]int, len(timers))
729	indices := rnd.Perm(len(timers))
730
731	for len(indices) != 0 {
732		var ii = rnd.Intn(len(indices))
733		var i = indices[ii]
734
735		var timer = timers[i]
736		var state = states[i]
737		states[i]++
738
739		switch state {
740		case 0:
741			timers[i] = newTimerFunc(0)
742
743		case 1:
744			<-timer.C // Timer is now idle.
745
746		// Reset to various long durations, which we'll cancel.
747		case 2:
748			if timer.Reset(1 * Minute) {
749				panic("shouldn't be active (1)")
750			}
751		case 4:
752			if timer.Reset(3 * Minute) {
753				panic("shouldn't be active (3)")
754			}
755		case 6:
756			if timer.Reset(2 * Minute) {
757				panic("shouldn't be active (2)")
758			}
759
760		// Stop and drain a long-duration timer.
761		case 3, 5, 7:
762			if !timer.Stop() {
763				t.Logf("timer %d state %d Stop returned false", i, state)
764				<-timer.C
765			}
766
767		// Start a short-duration timer we expect to select without blocking.
768		case 8:
769			if timer.Reset(0) {
770				t.Fatal("timer.Reset returned true")
771			}
772		case 9:
773			now := Now()
774			<-timer.C
775			dur := Since(now)
776			if dur > 750*Millisecond {
777				t.Errorf("timer %d took %v to complete", i, dur)
778			}
779
780		// Timer is done. Swap with tail and remove.
781		case 10:
782			indices[ii] = indices[len(indices)-1]
783			indices = indices[:len(indices)-1]
784		}
785	}
786}
787
788func TestStopResult(t *testing.T) {
789	testStopResetResult(t, true)
790}
791
792func TestResetResult(t *testing.T) {
793	testStopResetResult(t, false)
794}
795
796// Test that when racing between running a timer and stopping a timer Stop
797// consistently indicates whether a value can be read from the channel.
798// Issue #69312.
799func testStopResetResult(t *testing.T, testStop bool) {
800	for _, name := range []string{"0", "1", "2"} {
801		t.Run("asynctimerchan="+name, func(t *testing.T) {
802			testStopResetResultGODEBUG(t, testStop, name)
803		})
804	}
805}
806
807func testStopResetResultGODEBUG(t *testing.T, testStop bool, godebug string) {
808	t.Setenv("GODEBUG", "asynctimerchan="+godebug)
809
810	stopOrReset := func(timer *Timer) bool {
811		if testStop {
812			return timer.Stop()
813		} else {
814			return timer.Reset(1 * Hour)
815		}
816	}
817
818	start := make(chan struct{})
819	var wg sync.WaitGroup
820	const N = 1000
821	wg.Add(N)
822	for range N {
823		go func() {
824			defer wg.Done()
825			<-start
826			for j := 0; j < 100; j++ {
827				timer1 := NewTimer(1 * Millisecond)
828				timer2 := NewTimer(1 * Millisecond)
829				select {
830				case <-timer1.C:
831					if !stopOrReset(timer2) {
832						// The test fails if this
833						// channel read times out.
834						<-timer2.C
835					}
836				case <-timer2.C:
837					if !stopOrReset(timer1) {
838						// The test fails if this
839						// channel read times out.
840						<-timer1.C
841					}
842				}
843			}
844		}()
845	}
846	close(start)
847	wg.Wait()
848}
849
850// Benchmark timer latency when the thread that creates the timer is busy with
851// other work and the timers must be serviced by other threads.
852// https://golang.org/issue/38860
853func BenchmarkParallelTimerLatency(b *testing.B) {
854	gmp := runtime.GOMAXPROCS(0)
855	if gmp < 2 || runtime.NumCPU() < gmp {
856		b.Skip("skipping with GOMAXPROCS < 2 or NumCPU < GOMAXPROCS")
857	}
858
859	// allocate memory now to avoid GC interference later.
860	timerCount := gmp - 1
861	stats := make([]struct {
862		sum   float64
863		max   Duration
864		count int64
865		_     [5]int64 // cache line padding
866	}, timerCount)
867
868	// Ensure the time to start new threads to service timers will not pollute
869	// the results.
870	warmupScheduler(gmp)
871
872	// Note that other than the AfterFunc calls this benchmark is measuring it
873	// avoids using any other timers. In particular, the main goroutine uses
874	// doWork to spin for some durations because up through Go 1.15 if all
875	// threads are idle sysmon could leave deep sleep when we wake.
876
877	// Ensure sysmon is in deep sleep.
878	doWork(30 * Millisecond)
879
880	b.ResetTimer()
881
882	const delay = Millisecond
883	var wg sync.WaitGroup
884	var count int32
885	for i := 0; i < b.N; i++ {
886		wg.Add(timerCount)
887		atomic.StoreInt32(&count, 0)
888		for j := 0; j < timerCount; j++ {
889			j := j
890			expectedWakeup := Now().Add(delay)
891			AfterFunc(delay, func() {
892				late := Since(expectedWakeup)
893				if late < 0 {
894					late = 0
895				}
896				stats[j].count++
897				stats[j].sum += float64(late.Nanoseconds())
898				if late > stats[j].max {
899					stats[j].max = late
900				}
901				atomic.AddInt32(&count, 1)
902				for atomic.LoadInt32(&count) < int32(timerCount) {
903					// spin until all timers fired
904				}
905				wg.Done()
906			})
907		}
908
909		for atomic.LoadInt32(&count) < int32(timerCount) {
910			// spin until all timers fired
911		}
912		wg.Wait()
913
914		// Spin for a bit to let the other scheduler threads go idle before the
915		// next round.
916		doWork(Millisecond)
917	}
918	var total float64
919	var samples float64
920	max := Duration(0)
921	for _, s := range stats {
922		if s.max > max {
923			max = s.max
924		}
925		total += s.sum
926		samples += float64(s.count)
927	}
928	b.ReportMetric(0, "ns/op")
929	b.ReportMetric(total/samples, "avg-late-ns")
930	b.ReportMetric(float64(max.Nanoseconds()), "max-late-ns")
931}
932
933// Benchmark timer latency with staggered wakeup times and varying CPU bound
934// workloads. https://golang.org/issue/38860
935func BenchmarkStaggeredTickerLatency(b *testing.B) {
936	gmp := runtime.GOMAXPROCS(0)
937	if gmp < 2 || runtime.NumCPU() < gmp {
938		b.Skip("skipping with GOMAXPROCS < 2 or NumCPU < GOMAXPROCS")
939	}
940
941	const delay = 3 * Millisecond
942
943	for _, dur := range []Duration{300 * Microsecond, 2 * Millisecond} {
944		b.Run(fmt.Sprintf("work-dur=%s", dur), func(b *testing.B) {
945			for tickersPerP := 1; tickersPerP < int(delay/dur)+1; tickersPerP++ {
946				tickerCount := gmp * tickersPerP
947				b.Run(fmt.Sprintf("tickers-per-P=%d", tickersPerP), func(b *testing.B) {
948					// allocate memory now to avoid GC interference later.
949					stats := make([]struct {
950						sum   float64
951						max   Duration
952						count int64
953						_     [5]int64 // cache line padding
954					}, tickerCount)
955
956					// Ensure the time to start new threads to service timers
957					// will not pollute the results.
958					warmupScheduler(gmp)
959
960					b.ResetTimer()
961
962					var wg sync.WaitGroup
963					wg.Add(tickerCount)
964					for j := 0; j < tickerCount; j++ {
965						j := j
966						doWork(delay / Duration(gmp))
967						expectedWakeup := Now().Add(delay)
968						ticker := NewTicker(delay)
969						go func(c int, ticker *Ticker, firstWake Time) {
970							defer ticker.Stop()
971
972							for ; c > 0; c-- {
973								<-ticker.C
974								late := Since(expectedWakeup)
975								if late < 0 {
976									late = 0
977								}
978								stats[j].count++
979								stats[j].sum += float64(late.Nanoseconds())
980								if late > stats[j].max {
981									stats[j].max = late
982								}
983								expectedWakeup = expectedWakeup.Add(delay)
984								doWork(dur)
985							}
986							wg.Done()
987						}(b.N, ticker, expectedWakeup)
988					}
989					wg.Wait()
990
991					var total float64
992					var samples float64
993					max := Duration(0)
994					for _, s := range stats {
995						if s.max > max {
996							max = s.max
997						}
998						total += s.sum
999						samples += float64(s.count)
1000					}
1001					b.ReportMetric(0, "ns/op")
1002					b.ReportMetric(total/samples, "avg-late-ns")
1003					b.ReportMetric(float64(max.Nanoseconds()), "max-late-ns")
1004				})
1005			}
1006		})
1007	}
1008}
1009
1010// warmupScheduler ensures the scheduler has at least targetThreadCount threads
1011// in its thread pool.
1012func warmupScheduler(targetThreadCount int) {
1013	var wg sync.WaitGroup
1014	var count int32
1015	for i := 0; i < targetThreadCount; i++ {
1016		wg.Add(1)
1017		go func() {
1018			atomic.AddInt32(&count, 1)
1019			for atomic.LoadInt32(&count) < int32(targetThreadCount) {
1020				// spin until all threads started
1021			}
1022
1023			// spin a bit more to ensure they are all running on separate CPUs.
1024			doWork(Millisecond)
1025			wg.Done()
1026		}()
1027	}
1028	wg.Wait()
1029}
1030
1031func doWork(dur Duration) {
1032	start := Now()
1033	for Since(start) < dur {
1034	}
1035}
1036
1037func BenchmarkAdjustTimers10000(b *testing.B) {
1038	benchmark(b, func(pb *testing.PB) {
1039		for pb.Next() {
1040			const n = 10000
1041			timers := make([]*Timer, 0, n)
1042			for range n {
1043				t := AfterFunc(Hour, func() {})
1044				timers = append(timers, t)
1045			}
1046			timers[n-1].Reset(Nanosecond)
1047			Sleep(Microsecond)
1048			for _, t := range timers {
1049				t.Stop()
1050			}
1051		}
1052	})
1053}
1054