1// Copyright 2009 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package net
6
7import (
8	"context"
9	"errors"
10	"fmt"
11	"io"
12	"os"
13	"runtime"
14	"sync"
15	"testing"
16	"time"
17)
18
19func init() {
20	// Install a hook to ensure that a 1ns timeout will always
21	// be exceeded by the time Dial gets to the relevant system call.
22	//
23	// Without this, systems with a very large timer granularity — such as
24	// Windows — may be able to accept connections without measurably exceeding
25	// even an implausibly short deadline.
26	testHookStepTime = func() {
27		now := time.Now()
28		for time.Since(now) == 0 {
29			time.Sleep(1 * time.Nanosecond)
30		}
31	}
32}
33
34var dialTimeoutTests = []struct {
35	initialTimeout time.Duration
36	initialDelta   time.Duration // for deadline
37}{
38	// Tests that dial timeouts, deadlines in the past work.
39	{-5 * time.Second, 0},
40	{0, -5 * time.Second},
41	{-5 * time.Second, 5 * time.Second}, // timeout over deadline
42	{-1 << 63, 0},
43	{0, -1 << 63},
44
45	{1 * time.Millisecond, 0},
46	{0, 1 * time.Millisecond},
47	{1 * time.Millisecond, 5 * time.Second}, // timeout over deadline
48}
49
50func TestDialTimeout(t *testing.T) {
51	switch runtime.GOOS {
52	case "plan9":
53		t.Skipf("not supported on %s", runtime.GOOS)
54	}
55
56	t.Parallel()
57
58	ln := newLocalListener(t, "tcp")
59	defer func() {
60		if err := ln.Close(); err != nil {
61			t.Error(err)
62		}
63	}()
64
65	for _, tt := range dialTimeoutTests {
66		t.Run(fmt.Sprintf("%v/%v", tt.initialTimeout, tt.initialDelta), func(t *testing.T) {
67			// We don't run these subtests in parallel because we don't know how big
68			// the kernel's accept queue is, and we don't want to accidentally saturate
69			// it with concurrent calls. (That could cause the Dial to fail with
70			// ECONNREFUSED or ECONNRESET instead of a timeout error.)
71			d := Dialer{Timeout: tt.initialTimeout}
72			delta := tt.initialDelta
73
74			var (
75				beforeDial time.Time
76				afterDial  time.Time
77				err        error
78			)
79			for {
80				if delta != 0 {
81					d.Deadline = time.Now().Add(delta)
82				}
83
84				beforeDial = time.Now()
85
86				var c Conn
87				c, err = d.Dial(ln.Addr().Network(), ln.Addr().String())
88				afterDial = time.Now()
89
90				if err != nil {
91					break
92				}
93
94				// Even though we're not calling Accept on the Listener, the kernel may
95				// spuriously accept connections on its behalf. If that happens, we will
96				// close the connection (to try to get it out of the kernel's accept
97				// queue) and try a shorter timeout.
98				//
99				// We assume that we will reach a point where the call actually does
100				// time out, although in theory (since this socket is on a loopback
101				// address) a sufficiently clever kernel could notice that no Accept
102				// call is pending and bypass both the queue and the timeout to return
103				// another error immediately.
104				t.Logf("closing spurious connection from Dial")
105				c.Close()
106
107				if delta <= 1 && d.Timeout <= 1 {
108					t.Fatalf("can't reduce Timeout or Deadline")
109				}
110				if delta > 1 {
111					delta /= 2
112					t.Logf("reducing Deadline delta to %v", delta)
113				}
114				if d.Timeout > 1 {
115					d.Timeout /= 2
116					t.Logf("reducing Timeout to %v", d.Timeout)
117				}
118			}
119
120			if d.Deadline.IsZero() || afterDial.Before(d.Deadline) {
121				delay := afterDial.Sub(beforeDial)
122				if delay < d.Timeout {
123					t.Errorf("Dial returned after %v; want ≥%v", delay, d.Timeout)
124				}
125			}
126
127			if perr := parseDialError(err); perr != nil {
128				t.Errorf("unexpected error from Dial: %v", perr)
129			}
130			if nerr, ok := err.(Error); !ok || !nerr.Timeout() {
131				t.Errorf("Dial: %v, want timeout", err)
132			}
133		})
134	}
135}
136
137func TestDialTimeoutMaxDuration(t *testing.T) {
138	ln := newLocalListener(t, "tcp")
139	defer func() {
140		if err := ln.Close(); err != nil {
141			t.Error(err)
142		}
143	}()
144
145	for _, tt := range []struct {
146		timeout time.Duration
147		delta   time.Duration // for deadline
148	}{
149		// Large timeouts that will overflow an int64 unix nanos.
150		{1<<63 - 1, 0},
151		{0, 1<<63 - 1},
152	} {
153		t.Run(fmt.Sprintf("timeout=%s/delta=%s", tt.timeout, tt.delta), func(t *testing.T) {
154			d := Dialer{Timeout: tt.timeout}
155			if tt.delta != 0 {
156				d.Deadline = time.Now().Add(tt.delta)
157			}
158			c, err := d.Dial(ln.Addr().Network(), ln.Addr().String())
159			if err != nil {
160				t.Fatal(err)
161			}
162			if err := c.Close(); err != nil {
163				t.Error(err)
164			}
165		})
166	}
167}
168
169func TestAcceptTimeout(t *testing.T) {
170	t.Parallel()
171
172	switch runtime.GOOS {
173	case "plan9":
174		t.Skipf("not supported on %s", runtime.GOOS)
175	}
176
177	timeouts := []time.Duration{
178		-5 * time.Second,
179		10 * time.Millisecond,
180	}
181
182	for _, timeout := range timeouts {
183		timeout := timeout
184		t.Run(fmt.Sprintf("%v", timeout), func(t *testing.T) {
185			t.Parallel()
186
187			ln := newLocalListener(t, "tcp")
188			defer ln.Close()
189
190			if timeout >= 0 {
191				// Don't dial the listener at all, so that Accept will hang.
192			} else {
193				// A deadline in the past should cause Accept to fail even if there are
194				// incoming connections available. Try to make one available before the
195				// call to Accept happens. (It's ok if the timing doesn't always work
196				// out that way, though: the test should pass regardless.)
197				ctx, cancel := context.WithCancel(context.Background())
198				dialDone := make(chan struct{})
199
200				// Ensure that our background Dial returns before we close the listener.
201				// Otherwise, the listener's port could be reused immediately and we
202				// might spuriously Dial some completely unrelated socket, causing some
203				// other test to see an unexpected extra connection.
204				defer func() {
205					cancel()
206					<-dialDone
207				}()
208
209				go func() {
210					defer close(dialDone)
211					d := Dialer{}
212					c, err := d.DialContext(ctx, ln.Addr().Network(), ln.Addr().String())
213					if err != nil {
214						// If the timing didn't work out, it is possible for this Dial
215						// to return an error (depending on the kernel's buffering behavior).
216						// In https://go.dev/issue/65240 we saw failures with ECONNREFUSED
217						// and ECONNRESET.
218						//
219						// What this test really cares about is the behavior of Accept, not
220						// Dial, so just log the error and ignore it.
221						t.Logf("DialContext: %v", err)
222						return
223					}
224					t.Logf("Dialed %v -> %v", c.LocalAddr(), c.RemoteAddr())
225					c.Close()
226				}()
227
228				time.Sleep(10 * time.Millisecond)
229			}
230
231			if err := ln.(*TCPListener).SetDeadline(time.Now().Add(timeout)); err != nil {
232				t.Fatal(err)
233			}
234			t.Logf("ln.SetDeadline(time.Now().Add(%v))", timeout)
235
236			c, err := ln.Accept()
237			if err == nil {
238				c.Close()
239			}
240			t.Logf("ln.Accept: %v", err)
241
242			if perr := parseAcceptError(err); perr != nil {
243				t.Error(perr)
244			}
245			if !isDeadlineExceeded(err) {
246				t.Error("wanted deadline exceeded")
247			}
248		})
249	}
250}
251
252func TestAcceptTimeoutMustReturn(t *testing.T) {
253	t.Parallel()
254
255	switch runtime.GOOS {
256	case "plan9":
257		t.Skipf("not supported on %s", runtime.GOOS)
258	}
259
260	ln := newLocalListener(t, "tcp")
261	defer ln.Close()
262
263	if err := ln.(*TCPListener).SetDeadline(noDeadline); err != nil {
264		t.Error(err)
265	}
266	if err := ln.(*TCPListener).SetDeadline(time.Now().Add(10 * time.Millisecond)); err != nil {
267		t.Error(err)
268	}
269	c, err := ln.Accept()
270	if err == nil {
271		c.Close()
272	}
273
274	if perr := parseAcceptError(err); perr != nil {
275		t.Error(perr)
276	}
277	if !isDeadlineExceeded(err) {
278		t.Fatal(err)
279	}
280}
281
282func TestAcceptTimeoutMustNotReturn(t *testing.T) {
283	t.Parallel()
284
285	switch runtime.GOOS {
286	case "plan9":
287		t.Skipf("not supported on %s", runtime.GOOS)
288	}
289
290	ln := newLocalListener(t, "tcp")
291	defer ln.Close()
292
293	maxch := make(chan *time.Timer)
294	ch := make(chan error)
295	go func() {
296		if err := ln.(*TCPListener).SetDeadline(time.Now().Add(-5 * time.Second)); err != nil {
297			t.Error(err)
298		}
299		if err := ln.(*TCPListener).SetDeadline(noDeadline); err != nil {
300			t.Error(err)
301		}
302		maxch <- time.NewTimer(100 * time.Millisecond)
303		_, err := ln.Accept()
304		ch <- err
305	}()
306
307	max := <-maxch
308	defer max.Stop()
309
310	select {
311	case err := <-ch:
312		if perr := parseAcceptError(err); perr != nil {
313			t.Error(perr)
314		}
315		t.Fatalf("expected Accept to not return, but it returned with %v", err)
316	case <-max.C:
317		ln.Close()
318		<-ch // wait for tester goroutine to stop
319	}
320}
321
322var readTimeoutTests = []struct {
323	timeout time.Duration
324	xerrs   [2]error // expected errors in transition
325}{
326	// Tests that read deadlines work, even if there's data ready
327	// to be read.
328	{-5 * time.Second, [2]error{os.ErrDeadlineExceeded, os.ErrDeadlineExceeded}},
329
330	{50 * time.Millisecond, [2]error{nil, os.ErrDeadlineExceeded}},
331}
332
333// There is a very similar copy of this in os/timeout_test.go.
334func TestReadTimeout(t *testing.T) {
335	handler := func(ls *localServer, ln Listener) {
336		c, err := ln.Accept()
337		if err != nil {
338			t.Error(err)
339			return
340		}
341		c.Write([]byte("READ TIMEOUT TEST"))
342		defer c.Close()
343	}
344	ls := newLocalServer(t, "tcp")
345	defer ls.teardown()
346	if err := ls.buildup(handler); err != nil {
347		t.Fatal(err)
348	}
349
350	c, err := Dial(ls.Listener.Addr().Network(), ls.Listener.Addr().String())
351	if err != nil {
352		t.Fatal(err)
353	}
354	defer c.Close()
355
356	for i, tt := range readTimeoutTests {
357		if err := c.SetReadDeadline(time.Now().Add(tt.timeout)); err != nil {
358			t.Fatalf("#%d: %v", i, err)
359		}
360		var b [1]byte
361		for j, xerr := range tt.xerrs {
362			for {
363				n, err := c.Read(b[:])
364				if xerr != nil {
365					if perr := parseReadError(err); perr != nil {
366						t.Errorf("#%d/%d: %v", i, j, perr)
367					}
368					if !isDeadlineExceeded(err) {
369						t.Fatalf("#%d/%d: %v", i, j, err)
370					}
371				}
372				if err == nil {
373					time.Sleep(tt.timeout / 3)
374					continue
375				}
376				if n != 0 {
377					t.Fatalf("#%d/%d: read %d; want 0", i, j, n)
378				}
379				break
380			}
381		}
382	}
383}
384
385// There is a very similar copy of this in os/timeout_test.go.
386func TestReadTimeoutMustNotReturn(t *testing.T) {
387	t.Parallel()
388
389	switch runtime.GOOS {
390	case "plan9":
391		t.Skipf("not supported on %s", runtime.GOOS)
392	}
393
394	ln := newLocalListener(t, "tcp")
395	defer ln.Close()
396
397	c, err := Dial(ln.Addr().Network(), ln.Addr().String())
398	if err != nil {
399		t.Fatal(err)
400	}
401	defer c.Close()
402
403	maxch := make(chan *time.Timer)
404	ch := make(chan error)
405	go func() {
406		if err := c.SetDeadline(time.Now().Add(-5 * time.Second)); err != nil {
407			t.Error(err)
408		}
409		if err := c.SetWriteDeadline(time.Now().Add(-5 * time.Second)); err != nil {
410			t.Error(err)
411		}
412		if err := c.SetReadDeadline(noDeadline); err != nil {
413			t.Error(err)
414		}
415		maxch <- time.NewTimer(100 * time.Millisecond)
416		var b [1]byte
417		_, err := c.Read(b[:])
418		ch <- err
419	}()
420
421	max := <-maxch
422	defer max.Stop()
423
424	select {
425	case err := <-ch:
426		if perr := parseReadError(err); perr != nil {
427			t.Error(perr)
428		}
429		t.Fatalf("expected Read to not return, but it returned with %v", err)
430	case <-max.C:
431		c.Close()
432		err := <-ch // wait for tester goroutine to stop
433		if perr := parseReadError(err); perr != nil {
434			t.Error(perr)
435		}
436		if nerr, ok := err.(Error); !ok || nerr.Timeout() || nerr.Temporary() {
437			t.Fatal(err)
438		}
439	}
440}
441
442var readFromTimeoutTests = []struct {
443	timeout time.Duration
444	xerrs   [2]error // expected errors in transition
445}{
446	// Tests that read deadlines work, even if there's data ready
447	// to be read.
448	{-5 * time.Second, [2]error{os.ErrDeadlineExceeded, os.ErrDeadlineExceeded}},
449
450	{50 * time.Millisecond, [2]error{nil, os.ErrDeadlineExceeded}},
451}
452
453func TestReadFromTimeout(t *testing.T) {
454	ch := make(chan Addr)
455	defer close(ch)
456	handler := func(ls *localPacketServer, c PacketConn) {
457		if dst, ok := <-ch; ok {
458			c.WriteTo([]byte("READFROM TIMEOUT TEST"), dst)
459		}
460	}
461	ls := newLocalPacketServer(t, "udp")
462	defer ls.teardown()
463	if err := ls.buildup(handler); err != nil {
464		t.Fatal(err)
465	}
466
467	host, _, err := SplitHostPort(ls.PacketConn.LocalAddr().String())
468	if err != nil {
469		t.Fatal(err)
470	}
471	c, err := ListenPacket(ls.PacketConn.LocalAddr().Network(), JoinHostPort(host, "0"))
472	if err != nil {
473		t.Fatal(err)
474	}
475	defer c.Close()
476	ch <- c.LocalAddr()
477
478	for i, tt := range readFromTimeoutTests {
479		if err := c.SetReadDeadline(time.Now().Add(tt.timeout)); err != nil {
480			t.Fatalf("#%d: %v", i, err)
481		}
482		var b [1]byte
483		for j, xerr := range tt.xerrs {
484			for {
485				n, _, err := c.ReadFrom(b[:])
486				if xerr != nil {
487					if perr := parseReadError(err); perr != nil {
488						t.Errorf("#%d/%d: %v", i, j, perr)
489					}
490					if !isDeadlineExceeded(err) {
491						t.Fatalf("#%d/%d: %v", i, j, err)
492					}
493				}
494				if err == nil {
495					time.Sleep(tt.timeout / 3)
496					continue
497				}
498				if nerr, ok := err.(Error); ok && nerr.Timeout() && n != 0 {
499					t.Fatalf("#%d/%d: read %d; want 0", i, j, n)
500				}
501				break
502			}
503		}
504	}
505}
506
507var writeTimeoutTests = []struct {
508	timeout time.Duration
509	xerrs   [2]error // expected errors in transition
510}{
511	// Tests that write deadlines work, even if there's buffer
512	// space available to write.
513	{-5 * time.Second, [2]error{os.ErrDeadlineExceeded, os.ErrDeadlineExceeded}},
514
515	{10 * time.Millisecond, [2]error{nil, os.ErrDeadlineExceeded}},
516}
517
518// There is a very similar copy of this in os/timeout_test.go.
519func TestWriteTimeout(t *testing.T) {
520	t.Parallel()
521
522	ln := newLocalListener(t, "tcp")
523	defer ln.Close()
524
525	for i, tt := range writeTimeoutTests {
526		c, err := Dial(ln.Addr().Network(), ln.Addr().String())
527		if err != nil {
528			t.Fatal(err)
529		}
530		defer c.Close()
531
532		if err := c.SetWriteDeadline(time.Now().Add(tt.timeout)); err != nil {
533			t.Fatalf("#%d: %v", i, err)
534		}
535		for j, xerr := range tt.xerrs {
536			for {
537				n, err := c.Write([]byte("WRITE TIMEOUT TEST"))
538				if xerr != nil {
539					if perr := parseWriteError(err); perr != nil {
540						t.Errorf("#%d/%d: %v", i, j, perr)
541					}
542					if !isDeadlineExceeded(err) {
543						t.Fatalf("#%d/%d: %v", i, j, err)
544					}
545				}
546				if err == nil {
547					time.Sleep(tt.timeout / 3)
548					continue
549				}
550				if n != 0 {
551					t.Fatalf("#%d/%d: wrote %d; want 0", i, j, n)
552				}
553				break
554			}
555		}
556	}
557}
558
559// There is a very similar copy of this in os/timeout_test.go.
560func TestWriteTimeoutMustNotReturn(t *testing.T) {
561	t.Parallel()
562
563	switch runtime.GOOS {
564	case "plan9":
565		t.Skipf("not supported on %s", runtime.GOOS)
566	}
567
568	ln := newLocalListener(t, "tcp")
569	defer ln.Close()
570
571	c, err := Dial(ln.Addr().Network(), ln.Addr().String())
572	if err != nil {
573		t.Fatal(err)
574	}
575	defer c.Close()
576
577	maxch := make(chan *time.Timer)
578	ch := make(chan error)
579	go func() {
580		if err := c.SetDeadline(time.Now().Add(-5 * time.Second)); err != nil {
581			t.Error(err)
582		}
583		if err := c.SetReadDeadline(time.Now().Add(-5 * time.Second)); err != nil {
584			t.Error(err)
585		}
586		if err := c.SetWriteDeadline(noDeadline); err != nil {
587			t.Error(err)
588		}
589		maxch <- time.NewTimer(100 * time.Millisecond)
590		var b [1024]byte
591		for {
592			if _, err := c.Write(b[:]); err != nil {
593				ch <- err
594				break
595			}
596		}
597	}()
598
599	max := <-maxch
600	defer max.Stop()
601
602	select {
603	case err := <-ch:
604		if perr := parseWriteError(err); perr != nil {
605			t.Error(perr)
606		}
607		t.Fatalf("expected Write to not return, but it returned with %v", err)
608	case <-max.C:
609		c.Close()
610		err := <-ch // wait for tester goroutine to stop
611		if perr := parseWriteError(err); perr != nil {
612			t.Error(perr)
613		}
614		if nerr, ok := err.(Error); !ok || nerr.Timeout() || nerr.Temporary() {
615			t.Fatal(err)
616		}
617	}
618}
619
620func TestWriteToTimeout(t *testing.T) {
621	t.Parallel()
622
623	c1 := newLocalPacketListener(t, "udp")
624	defer c1.Close()
625
626	host, _, err := SplitHostPort(c1.LocalAddr().String())
627	if err != nil {
628		t.Fatal(err)
629	}
630
631	timeouts := []time.Duration{
632		-5 * time.Second,
633		10 * time.Millisecond,
634	}
635
636	for _, timeout := range timeouts {
637		t.Run(fmt.Sprint(timeout), func(t *testing.T) {
638			c2, err := ListenPacket(c1.LocalAddr().Network(), JoinHostPort(host, "0"))
639			if err != nil {
640				t.Fatal(err)
641			}
642			defer c2.Close()
643
644			if err := c2.SetWriteDeadline(time.Now().Add(timeout)); err != nil {
645				t.Fatalf("SetWriteDeadline: %v", err)
646			}
647			backoff := 1 * time.Millisecond
648			nDeadlineExceeded := 0
649			for j := 0; nDeadlineExceeded < 2; j++ {
650				n, err := c2.WriteTo([]byte("WRITETO TIMEOUT TEST"), c1.LocalAddr())
651				t.Logf("#%d: WriteTo: %d, %v", j, n, err)
652				if err == nil && timeout >= 0 && nDeadlineExceeded == 0 {
653					// If the timeout is nonnegative, some number of WriteTo calls may
654					// succeed before the timeout takes effect.
655					t.Logf("WriteTo succeeded; sleeping %v", timeout/3)
656					time.Sleep(timeout / 3)
657					continue
658				}
659				if isENOBUFS(err) {
660					t.Logf("WriteTo: %v", err)
661					// We're looking for a deadline exceeded error, but if the kernel's
662					// network buffers are saturated we may see ENOBUFS instead (see
663					// https://go.dev/issue/49930). Give it some time to unsaturate.
664					time.Sleep(backoff)
665					backoff *= 2
666					continue
667				}
668				if perr := parseWriteError(err); perr != nil {
669					t.Errorf("failed to parse error: %v", perr)
670				}
671				if !isDeadlineExceeded(err) {
672					t.Errorf("error is not 'deadline exceeded'")
673				}
674				if n != 0 {
675					t.Errorf("unexpectedly wrote %d bytes", n)
676				}
677				if !t.Failed() {
678					t.Logf("WriteTo timed out as expected")
679				}
680				nDeadlineExceeded++
681			}
682		})
683	}
684}
685
686const (
687	// minDynamicTimeout is the minimum timeout to attempt for
688	// tests that automatically increase timeouts until success.
689	//
690	// Lower values may allow tests to succeed more quickly if the value is close
691	// to the true minimum, but may require more iterations (and waste more time
692	// and CPU power on failed attempts) if the timeout is too low.
693	minDynamicTimeout = 1 * time.Millisecond
694
695	// maxDynamicTimeout is the maximum timeout to attempt for
696	// tests that automatically increase timeouts until success.
697	//
698	// This should be a strict upper bound on the latency required to hit a
699	// timeout accurately, even on a slow or heavily-loaded machine. If a test
700	// would increase the timeout beyond this value, the test fails.
701	maxDynamicTimeout = 4 * time.Second
702)
703
704// timeoutUpperBound returns the maximum time that we expect a timeout of
705// duration d to take to return the caller.
706func timeoutUpperBound(d time.Duration) time.Duration {
707	switch runtime.GOOS {
708	case "openbsd", "netbsd":
709		// NetBSD and OpenBSD seem to be unable to reliably hit deadlines even when
710		// the absolute durations are long.
711		// In https://build.golang.org/log/c34f8685d020b98377dd4988cd38f0c5bd72267e,
712		// we observed that an openbsd-amd64-68 builder took 4.090948779s for a
713		// 2.983020682s timeout (37.1% overhead).
714		// (See https://go.dev/issue/50189 for further detail.)
715		// Give them lots of slop to compensate.
716		return d * 3 / 2
717	}
718	// Other platforms seem to hit their deadlines more reliably,
719	// at least when they are long enough to cover scheduling jitter.
720	return d * 11 / 10
721}
722
723// nextTimeout returns the next timeout to try after an operation took the given
724// actual duration with a timeout shorter than that duration.
725func nextTimeout(actual time.Duration) (next time.Duration, ok bool) {
726	if actual >= maxDynamicTimeout {
727		return maxDynamicTimeout, false
728	}
729	// Since the previous attempt took actual, we can't expect to beat that
730	// duration by any significant margin. Try the next attempt with an arbitrary
731	// factor above that, so that our growth curve is at least exponential.
732	next = actual * 5 / 4
733	if next > maxDynamicTimeout {
734		return maxDynamicTimeout, true
735	}
736	return next, true
737}
738
739// There is a very similar copy of this in os/timeout_test.go.
740func TestReadTimeoutFluctuation(t *testing.T) {
741	ln := newLocalListener(t, "tcp")
742	defer ln.Close()
743
744	c, err := Dial(ln.Addr().Network(), ln.Addr().String())
745	if err != nil {
746		t.Fatal(err)
747	}
748	defer c.Close()
749
750	d := minDynamicTimeout
751	b := make([]byte, 256)
752	for {
753		t.Logf("SetReadDeadline(+%v)", d)
754		t0 := time.Now()
755		deadline := t0.Add(d)
756		if err = c.SetReadDeadline(deadline); err != nil {
757			t.Fatalf("SetReadDeadline(%v): %v", deadline, err)
758		}
759		var n int
760		n, err = c.Read(b)
761		t1 := time.Now()
762
763		if n != 0 || err == nil || !err.(Error).Timeout() {
764			t.Errorf("Read did not return (0, timeout): (%d, %v)", n, err)
765		}
766		if perr := parseReadError(err); perr != nil {
767			t.Error(perr)
768		}
769		if !isDeadlineExceeded(err) {
770			t.Errorf("Read error is not DeadlineExceeded: %v", err)
771		}
772
773		actual := t1.Sub(t0)
774		if t1.Before(deadline) {
775			t.Errorf("Read took %s; expected at least %s", actual, d)
776		}
777		if t.Failed() {
778			return
779		}
780		if want := timeoutUpperBound(d); actual > want {
781			next, ok := nextTimeout(actual)
782			if !ok {
783				t.Fatalf("Read took %s; expected at most %v", actual, want)
784			}
785			// Maybe this machine is too slow to reliably schedule goroutines within
786			// the requested duration. Increase the timeout and try again.
787			t.Logf("Read took %s (expected %s); trying with longer timeout", actual, d)
788			d = next
789			continue
790		}
791
792		break
793	}
794}
795
796// There is a very similar copy of this in os/timeout_test.go.
797func TestReadFromTimeoutFluctuation(t *testing.T) {
798	c1 := newLocalPacketListener(t, "udp")
799	defer c1.Close()
800
801	c2, err := Dial(c1.LocalAddr().Network(), c1.LocalAddr().String())
802	if err != nil {
803		t.Fatal(err)
804	}
805	defer c2.Close()
806
807	d := minDynamicTimeout
808	b := make([]byte, 256)
809	for {
810		t.Logf("SetReadDeadline(+%v)", d)
811		t0 := time.Now()
812		deadline := t0.Add(d)
813		if err = c2.SetReadDeadline(deadline); err != nil {
814			t.Fatalf("SetReadDeadline(%v): %v", deadline, err)
815		}
816		var n int
817		n, _, err = c2.(PacketConn).ReadFrom(b)
818		t1 := time.Now()
819
820		if n != 0 || err == nil || !err.(Error).Timeout() {
821			t.Errorf("ReadFrom did not return (0, timeout): (%d, %v)", n, err)
822		}
823		if perr := parseReadError(err); perr != nil {
824			t.Error(perr)
825		}
826		if !isDeadlineExceeded(err) {
827			t.Errorf("ReadFrom error is not DeadlineExceeded: %v", err)
828		}
829
830		actual := t1.Sub(t0)
831		if t1.Before(deadline) {
832			t.Errorf("ReadFrom took %s; expected at least %s", actual, d)
833		}
834		if t.Failed() {
835			return
836		}
837		if want := timeoutUpperBound(d); actual > want {
838			next, ok := nextTimeout(actual)
839			if !ok {
840				t.Fatalf("ReadFrom took %s; expected at most %s", actual, want)
841			}
842			// Maybe this machine is too slow to reliably schedule goroutines within
843			// the requested duration. Increase the timeout and try again.
844			t.Logf("ReadFrom took %s (expected %s); trying with longer timeout", actual, d)
845			d = next
846			continue
847		}
848
849		break
850	}
851}
852
853func TestWriteTimeoutFluctuation(t *testing.T) {
854	switch runtime.GOOS {
855	case "plan9":
856		t.Skipf("not supported on %s", runtime.GOOS)
857	}
858
859	ln := newLocalListener(t, "tcp")
860	defer ln.Close()
861
862	c, err := Dial(ln.Addr().Network(), ln.Addr().String())
863	if err != nil {
864		t.Fatal(err)
865	}
866	defer c.Close()
867
868	d := minDynamicTimeout
869	for {
870		t.Logf("SetWriteDeadline(+%v)", d)
871		t0 := time.Now()
872		deadline := t0.Add(d)
873		if err := c.SetWriteDeadline(deadline); err != nil {
874			t.Fatalf("SetWriteDeadline(%v): %v", deadline, err)
875		}
876		var n int64
877		var err error
878		for {
879			var dn int
880			dn, err = c.Write([]byte("TIMEOUT TRANSMITTER"))
881			n += int64(dn)
882			if err != nil {
883				break
884			}
885		}
886		t1 := time.Now()
887		// Inv: err != nil
888		if !err.(Error).Timeout() {
889			t.Fatalf("Write did not return (any, timeout): (%d, %v)", n, err)
890		}
891		if perr := parseWriteError(err); perr != nil {
892			t.Error(perr)
893		}
894		if !isDeadlineExceeded(err) {
895			t.Errorf("Write error is not DeadlineExceeded: %v", err)
896		}
897
898		actual := t1.Sub(t0)
899		if t1.Before(deadline) {
900			t.Errorf("Write took %s; expected at least %s", actual, d)
901		}
902		if t.Failed() {
903			return
904		}
905		if want := timeoutUpperBound(d); actual > want {
906			if n > 0 {
907				// SetWriteDeadline specifies a time “after which I/O operations fail
908				// instead of blocking”. However, the kernel's send buffer is not yet
909				// full, we may be able to write some arbitrary (but finite) number of
910				// bytes to it without blocking.
911				t.Logf("Wrote %d bytes into send buffer; retrying until buffer is full", n)
912				if d <= maxDynamicTimeout/2 {
913					// We don't know how long the actual write loop would have taken if
914					// the buffer were full, so just guess and double the duration so that
915					// the next attempt can make twice as much progress toward filling it.
916					d *= 2
917				}
918			} else if next, ok := nextTimeout(actual); !ok {
919				t.Fatalf("Write took %s; expected at most %s", actual, want)
920			} else {
921				// Maybe this machine is too slow to reliably schedule goroutines within
922				// the requested duration. Increase the timeout and try again.
923				t.Logf("Write took %s (expected %s); trying with longer timeout", actual, d)
924				d = next
925			}
926			continue
927		}
928
929		break
930	}
931}
932
933// There is a very similar copy of this in os/timeout_test.go.
934func TestVariousDeadlines(t *testing.T) {
935	t.Parallel()
936	testVariousDeadlines(t)
937}
938
939// There is a very similar copy of this in os/timeout_test.go.
940func TestVariousDeadlines1Proc(t *testing.T) {
941	// Cannot use t.Parallel - modifies global GOMAXPROCS.
942	if testing.Short() {
943		t.Skip("skipping in short mode")
944	}
945	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
946	testVariousDeadlines(t)
947}
948
949// There is a very similar copy of this in os/timeout_test.go.
950func TestVariousDeadlines4Proc(t *testing.T) {
951	// Cannot use t.Parallel - modifies global GOMAXPROCS.
952	if testing.Short() {
953		t.Skip("skipping in short mode")
954	}
955	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
956	testVariousDeadlines(t)
957}
958
959func testVariousDeadlines(t *testing.T) {
960	handler := func(ls *localServer, ln Listener) {
961		for {
962			c, err := ln.Accept()
963			if err != nil {
964				break
965			}
966			c.Read(make([]byte, 1)) // wait for client to close connection
967			c.Close()
968		}
969	}
970	ls := newLocalServer(t, "tcp")
971	defer ls.teardown()
972	if err := ls.buildup(handler); err != nil {
973		t.Fatal(err)
974	}
975
976	for _, timeout := range []time.Duration{
977		1 * time.Nanosecond,
978		2 * time.Nanosecond,
979		5 * time.Nanosecond,
980		50 * time.Nanosecond,
981		100 * time.Nanosecond,
982		200 * time.Nanosecond,
983		500 * time.Nanosecond,
984		750 * time.Nanosecond,
985		1 * time.Microsecond,
986		5 * time.Microsecond,
987		25 * time.Microsecond,
988		250 * time.Microsecond,
989		500 * time.Microsecond,
990		1 * time.Millisecond,
991		5 * time.Millisecond,
992		100 * time.Millisecond,
993		250 * time.Millisecond,
994		500 * time.Millisecond,
995		1 * time.Second,
996	} {
997		numRuns := 3
998		if testing.Short() {
999			numRuns = 1
1000			if timeout > 500*time.Microsecond {
1001				continue
1002			}
1003		}
1004		for run := 0; run < numRuns; run++ {
1005			name := fmt.Sprintf("%v %d/%d", timeout, run, numRuns)
1006			t.Log(name)
1007
1008			c, err := Dial(ls.Listener.Addr().Network(), ls.Listener.Addr().String())
1009			if err != nil {
1010				t.Fatal(err)
1011			}
1012
1013			t0 := time.Now()
1014			if err := c.SetDeadline(t0.Add(timeout)); err != nil {
1015				t.Error(err)
1016			}
1017			n, err := io.Copy(io.Discard, c)
1018			dt := time.Since(t0)
1019			c.Close()
1020
1021			if nerr, ok := err.(Error); ok && nerr.Timeout() {
1022				t.Logf("%v: good timeout after %v; %d bytes", name, dt, n)
1023			} else {
1024				t.Fatalf("%v: Copy = %d, %v; want timeout", name, n, err)
1025			}
1026		}
1027	}
1028}
1029
1030// TestReadWriteProlongedTimeout tests concurrent deadline
1031// modification. Known to cause data races in the past.
1032func TestReadWriteProlongedTimeout(t *testing.T) {
1033	t.Parallel()
1034
1035	switch runtime.GOOS {
1036	case "plan9":
1037		t.Skipf("not supported on %s", runtime.GOOS)
1038	}
1039
1040	handler := func(ls *localServer, ln Listener) {
1041		c, err := ln.Accept()
1042		if err != nil {
1043			t.Error(err)
1044			return
1045		}
1046		defer c.Close()
1047
1048		var wg sync.WaitGroup
1049		wg.Add(2)
1050		go func() {
1051			defer wg.Done()
1052			var b [1]byte
1053			for {
1054				if err := c.SetReadDeadline(time.Now().Add(time.Hour)); err != nil {
1055					if perr := parseCommonError(err); perr != nil {
1056						t.Error(perr)
1057					}
1058					t.Error(err)
1059					return
1060				}
1061				if _, err := c.Read(b[:]); err != nil {
1062					if perr := parseReadError(err); perr != nil {
1063						t.Error(perr)
1064					}
1065					return
1066				}
1067			}
1068		}()
1069		go func() {
1070			defer wg.Done()
1071			var b [1]byte
1072			for {
1073				if err := c.SetWriteDeadline(time.Now().Add(time.Hour)); err != nil {
1074					if perr := parseCommonError(err); perr != nil {
1075						t.Error(perr)
1076					}
1077					t.Error(err)
1078					return
1079				}
1080				if _, err := c.Write(b[:]); err != nil {
1081					if perr := parseWriteError(err); perr != nil {
1082						t.Error(perr)
1083					}
1084					return
1085				}
1086			}
1087		}()
1088		wg.Wait()
1089	}
1090	ls := newLocalServer(t, "tcp")
1091	defer ls.teardown()
1092	if err := ls.buildup(handler); err != nil {
1093		t.Fatal(err)
1094	}
1095
1096	c, err := Dial(ls.Listener.Addr().Network(), ls.Listener.Addr().String())
1097	if err != nil {
1098		t.Fatal(err)
1099	}
1100	defer c.Close()
1101
1102	var b [1]byte
1103	for i := 0; i < 1000; i++ {
1104		c.Write(b[:])
1105		c.Read(b[:])
1106	}
1107}
1108
1109// There is a very similar copy of this in os/timeout_test.go.
1110func TestReadWriteDeadlineRace(t *testing.T) {
1111	t.Parallel()
1112
1113	N := 1000
1114	if testing.Short() {
1115		N = 50
1116	}
1117
1118	ln := newLocalListener(t, "tcp")
1119	defer ln.Close()
1120
1121	c, err := Dial(ln.Addr().Network(), ln.Addr().String())
1122	if err != nil {
1123		t.Fatal(err)
1124	}
1125	defer c.Close()
1126
1127	var wg sync.WaitGroup
1128	wg.Add(3)
1129	go func() {
1130		defer wg.Done()
1131		tic := time.NewTicker(2 * time.Microsecond)
1132		defer tic.Stop()
1133		for i := 0; i < N; i++ {
1134			if err := c.SetReadDeadline(time.Now().Add(2 * time.Microsecond)); err != nil {
1135				if perr := parseCommonError(err); perr != nil {
1136					t.Error(perr)
1137				}
1138				break
1139			}
1140			if err := c.SetWriteDeadline(time.Now().Add(2 * time.Microsecond)); err != nil {
1141				if perr := parseCommonError(err); perr != nil {
1142					t.Error(perr)
1143				}
1144				break
1145			}
1146			<-tic.C
1147		}
1148	}()
1149	go func() {
1150		defer wg.Done()
1151		var b [1]byte
1152		for i := 0; i < N; i++ {
1153			c.Read(b[:]) // ignore possible timeout errors
1154		}
1155	}()
1156	go func() {
1157		defer wg.Done()
1158		var b [1]byte
1159		for i := 0; i < N; i++ {
1160			c.Write(b[:]) // ignore possible timeout errors
1161		}
1162	}()
1163	wg.Wait() // wait for tester goroutine to stop
1164}
1165
1166// Issue 35367.
1167func TestConcurrentSetDeadline(t *testing.T) {
1168	ln := newLocalListener(t, "tcp")
1169	defer ln.Close()
1170
1171	const goroutines = 8
1172	const conns = 10
1173	const tries = 100
1174
1175	var c [conns]Conn
1176	for i := 0; i < conns; i++ {
1177		var err error
1178		c[i], err = Dial(ln.Addr().Network(), ln.Addr().String())
1179		if err != nil {
1180			t.Fatal(err)
1181		}
1182		defer c[i].Close()
1183	}
1184
1185	var wg sync.WaitGroup
1186	wg.Add(goroutines)
1187	now := time.Now()
1188	for i := 0; i < goroutines; i++ {
1189		go func(i int) {
1190			defer wg.Done()
1191			// Make the deadlines steadily earlier,
1192			// to trigger runtime adjusttimers calls.
1193			for j := tries; j > 0; j-- {
1194				for k := 0; k < conns; k++ {
1195					c[k].SetReadDeadline(now.Add(2*time.Hour + time.Duration(i*j*k)*time.Second))
1196					c[k].SetWriteDeadline(now.Add(1*time.Hour + time.Duration(i*j*k)*time.Second))
1197				}
1198			}
1199		}(i)
1200	}
1201	wg.Wait()
1202}
1203
1204// isDeadlineExceeded reports whether err is or wraps os.ErrDeadlineExceeded.
1205// We also check that the error implements net.Error, and that the
1206// Timeout method returns true.
1207func isDeadlineExceeded(err error) bool {
1208	nerr, ok := err.(Error)
1209	if !ok {
1210		return false
1211	}
1212	if !nerr.Timeout() {
1213		return false
1214	}
1215	if !errors.Is(err, os.ErrDeadlineExceeded) {
1216		return false
1217	}
1218	return true
1219}
1220