1// Copyright 2013 The Go Authors. All rights reserved. 2// Use of this source code is governed by a BSD-style 3// license that can be found in the LICENSE file. 4 5//go:build unix || (js && wasm) || wasip1 || windows 6 7package runtime 8 9import ( 10 "internal/runtime/atomic" 11 "runtime/internal/sys" 12 "unsafe" 13) 14 15// Integrated network poller (platform-independent part). 16// A particular implementation (epoll/kqueue/port/AIX/Windows) 17// must define the following functions: 18// 19// func netpollinit() 20// Initialize the poller. Only called once. 21// 22// func netpollopen(fd uintptr, pd *pollDesc) int32 23// Arm edge-triggered notifications for fd. The pd argument is to pass 24// back to netpollready when fd is ready. Return an errno value. 25// 26// func netpollclose(fd uintptr) int32 27// Disable notifications for fd. Return an errno value. 28// 29// func netpoll(delta int64) (gList, int32) 30// Poll the network. If delta < 0, block indefinitely. If delta == 0, 31// poll without blocking. If delta > 0, block for up to delta nanoseconds. 32// Return a list of goroutines built by calling netpollready, 33// and a delta to add to netpollWaiters when all goroutines are ready. 34// This will never return an empty list with a non-zero delta. 35// 36// func netpollBreak() 37// Wake up the network poller, assumed to be blocked in netpoll. 38// 39// func netpollIsPollDescriptor(fd uintptr) bool 40// Reports whether fd is a file descriptor used by the poller. 41 42// Error codes returned by runtime_pollReset and runtime_pollWait. 43// These must match the values in internal/poll/fd_poll_runtime.go. 44const ( 45 pollNoError = 0 // no error 46 pollErrClosing = 1 // descriptor is closed 47 pollErrTimeout = 2 // I/O timeout 48 pollErrNotPollable = 3 // general error polling descriptor 49) 50 51// pollDesc contains 2 binary semaphores, rg and wg, to park reader and writer 52// goroutines respectively. The semaphore can be in the following states: 53// 54// pdReady - io readiness notification is pending; 55// a goroutine consumes the notification by changing the state to pdNil. 56// pdWait - a goroutine prepares to park on the semaphore, but not yet parked; 57// the goroutine commits to park by changing the state to G pointer, 58// or, alternatively, concurrent io notification changes the state to pdReady, 59// or, alternatively, concurrent timeout/close changes the state to pdNil. 60// G pointer - the goroutine is blocked on the semaphore; 61// io notification or timeout/close changes the state to pdReady or pdNil respectively 62// and unparks the goroutine. 63// pdNil - none of the above. 64const ( 65 pdNil uintptr = 0 66 pdReady uintptr = 1 67 pdWait uintptr = 2 68) 69 70const pollBlockSize = 4 * 1024 71 72// Network poller descriptor. 73// 74// No heap pointers. 75type pollDesc struct { 76 _ sys.NotInHeap 77 link *pollDesc // in pollcache, protected by pollcache.lock 78 fd uintptr // constant for pollDesc usage lifetime 79 fdseq atomic.Uintptr // protects against stale pollDesc 80 81 // atomicInfo holds bits from closing, rd, and wd, 82 // which are only ever written while holding the lock, 83 // summarized for use by netpollcheckerr, 84 // which cannot acquire the lock. 85 // After writing these fields under lock in a way that 86 // might change the summary, code must call publishInfo 87 // before releasing the lock. 88 // Code that changes fields and then calls netpollunblock 89 // (while still holding the lock) must call publishInfo 90 // before calling netpollunblock, because publishInfo is what 91 // stops netpollblock from blocking anew 92 // (by changing the result of netpollcheckerr). 93 // atomicInfo also holds the eventErr bit, 94 // recording whether a poll event on the fd got an error; 95 // atomicInfo is the only source of truth for that bit. 96 atomicInfo atomic.Uint32 // atomic pollInfo 97 98 // rg, wg are accessed atomically and hold g pointers. 99 // (Using atomic.Uintptr here is similar to using guintptr elsewhere.) 100 rg atomic.Uintptr // pdReady, pdWait, G waiting for read or pdNil 101 wg atomic.Uintptr // pdReady, pdWait, G waiting for write or pdNil 102 103 lock mutex // protects the following fields 104 closing bool 105 rrun bool // whether rt is running 106 wrun bool // whether wt is running 107 user uint32 // user settable cookie 108 rseq uintptr // protects from stale read timers 109 rt timer // read deadline timer 110 rd int64 // read deadline (a nanotime in the future, -1 when expired) 111 wseq uintptr // protects from stale write timers 112 wt timer // write deadline timer 113 wd int64 // write deadline (a nanotime in the future, -1 when expired) 114 self *pollDesc // storage for indirect interface. See (*pollDesc).makeArg. 115} 116 117// pollInfo is the bits needed by netpollcheckerr, stored atomically, 118// mostly duplicating state that is manipulated under lock in pollDesc. 119// The one exception is the pollEventErr bit, which is maintained only 120// in the pollInfo. 121type pollInfo uint32 122 123const ( 124 pollClosing = 1 << iota 125 pollEventErr 126 pollExpiredReadDeadline 127 pollExpiredWriteDeadline 128 pollFDSeq // 20 bit field, low 20 bits of fdseq field 129) 130 131const ( 132 pollFDSeqBits = 20 // number of bits in pollFDSeq 133 pollFDSeqMask = 1<<pollFDSeqBits - 1 // mask for pollFDSeq 134) 135 136func (i pollInfo) closing() bool { return i&pollClosing != 0 } 137func (i pollInfo) eventErr() bool { return i&pollEventErr != 0 } 138func (i pollInfo) expiredReadDeadline() bool { return i&pollExpiredReadDeadline != 0 } 139func (i pollInfo) expiredWriteDeadline() bool { return i&pollExpiredWriteDeadline != 0 } 140 141// info returns the pollInfo corresponding to pd. 142func (pd *pollDesc) info() pollInfo { 143 return pollInfo(pd.atomicInfo.Load()) 144} 145 146// publishInfo updates pd.atomicInfo (returned by pd.info) 147// using the other values in pd. 148// It must be called while holding pd.lock, 149// and it must be called after changing anything 150// that might affect the info bits. 151// In practice this means after changing closing 152// or changing rd or wd from < 0 to >= 0. 153func (pd *pollDesc) publishInfo() { 154 var info uint32 155 if pd.closing { 156 info |= pollClosing 157 } 158 if pd.rd < 0 { 159 info |= pollExpiredReadDeadline 160 } 161 if pd.wd < 0 { 162 info |= pollExpiredWriteDeadline 163 } 164 info |= uint32(pd.fdseq.Load()&pollFDSeqMask) << pollFDSeq 165 166 // Set all of x except the pollEventErr bit. 167 x := pd.atomicInfo.Load() 168 for !pd.atomicInfo.CompareAndSwap(x, (x&pollEventErr)|info) { 169 x = pd.atomicInfo.Load() 170 } 171} 172 173// setEventErr sets the result of pd.info().eventErr() to b. 174// We only change the error bit if seq == 0 or if seq matches pollFDSeq 175// (issue #59545). 176func (pd *pollDesc) setEventErr(b bool, seq uintptr) { 177 mSeq := uint32(seq & pollFDSeqMask) 178 x := pd.atomicInfo.Load() 179 xSeq := (x >> pollFDSeq) & pollFDSeqMask 180 if seq != 0 && xSeq != mSeq { 181 return 182 } 183 for (x&pollEventErr != 0) != b && !pd.atomicInfo.CompareAndSwap(x, x^pollEventErr) { 184 x = pd.atomicInfo.Load() 185 xSeq := (x >> pollFDSeq) & pollFDSeqMask 186 if seq != 0 && xSeq != mSeq { 187 return 188 } 189 } 190} 191 192type pollCache struct { 193 lock mutex 194 first *pollDesc 195 // PollDesc objects must be type-stable, 196 // because we can get ready notification from epoll/kqueue 197 // after the descriptor is closed/reused. 198 // Stale notifications are detected using seq variable, 199 // seq is incremented when deadlines are changed or descriptor is reused. 200} 201 202var ( 203 netpollInitLock mutex 204 netpollInited atomic.Uint32 205 206 pollcache pollCache 207 netpollWaiters atomic.Uint32 208) 209 210// netpollWaiters is accessed in tests 211//go:linkname netpollWaiters 212 213//go:linkname poll_runtime_pollServerInit internal/poll.runtime_pollServerInit 214func poll_runtime_pollServerInit() { 215 netpollGenericInit() 216} 217 218func netpollGenericInit() { 219 if netpollInited.Load() == 0 { 220 lockInit(&netpollInitLock, lockRankNetpollInit) 221 lockInit(&pollcache.lock, lockRankPollCache) 222 lock(&netpollInitLock) 223 if netpollInited.Load() == 0 { 224 netpollinit() 225 netpollInited.Store(1) 226 } 227 unlock(&netpollInitLock) 228 } 229} 230 231func netpollinited() bool { 232 return netpollInited.Load() != 0 233} 234 235//go:linkname poll_runtime_isPollServerDescriptor internal/poll.runtime_isPollServerDescriptor 236 237// poll_runtime_isPollServerDescriptor reports whether fd is a 238// descriptor being used by netpoll. 239func poll_runtime_isPollServerDescriptor(fd uintptr) bool { 240 return netpollIsPollDescriptor(fd) 241} 242 243//go:linkname poll_runtime_pollOpen internal/poll.runtime_pollOpen 244func poll_runtime_pollOpen(fd uintptr) (*pollDesc, int) { 245 pd := pollcache.alloc() 246 lock(&pd.lock) 247 wg := pd.wg.Load() 248 if wg != pdNil && wg != pdReady { 249 throw("runtime: blocked write on free polldesc") 250 } 251 rg := pd.rg.Load() 252 if rg != pdNil && rg != pdReady { 253 throw("runtime: blocked read on free polldesc") 254 } 255 pd.fd = fd 256 if pd.fdseq.Load() == 0 { 257 // The value 0 is special in setEventErr, so don't use it. 258 pd.fdseq.Store(1) 259 } 260 pd.closing = false 261 pd.setEventErr(false, 0) 262 pd.rseq++ 263 pd.rg.Store(pdNil) 264 pd.rd = 0 265 pd.wseq++ 266 pd.wg.Store(pdNil) 267 pd.wd = 0 268 pd.self = pd 269 pd.publishInfo() 270 unlock(&pd.lock) 271 272 errno := netpollopen(fd, pd) 273 if errno != 0 { 274 pollcache.free(pd) 275 return nil, int(errno) 276 } 277 return pd, 0 278} 279 280//go:linkname poll_runtime_pollClose internal/poll.runtime_pollClose 281func poll_runtime_pollClose(pd *pollDesc) { 282 if !pd.closing { 283 throw("runtime: close polldesc w/o unblock") 284 } 285 wg := pd.wg.Load() 286 if wg != pdNil && wg != pdReady { 287 throw("runtime: blocked write on closing polldesc") 288 } 289 rg := pd.rg.Load() 290 if rg != pdNil && rg != pdReady { 291 throw("runtime: blocked read on closing polldesc") 292 } 293 netpollclose(pd.fd) 294 pollcache.free(pd) 295} 296 297func (c *pollCache) free(pd *pollDesc) { 298 // pd can't be shared here, but lock anyhow because 299 // that's what publishInfo documents. 300 lock(&pd.lock) 301 302 // Increment the fdseq field, so that any currently 303 // running netpoll calls will not mark pd as ready. 304 fdseq := pd.fdseq.Load() 305 fdseq = (fdseq + 1) & (1<<taggedPointerBits - 1) 306 pd.fdseq.Store(fdseq) 307 308 pd.publishInfo() 309 310 unlock(&pd.lock) 311 312 lock(&c.lock) 313 pd.link = c.first 314 c.first = pd 315 unlock(&c.lock) 316} 317 318// poll_runtime_pollReset, which is internal/poll.runtime_pollReset, 319// prepares a descriptor for polling in mode, which is 'r' or 'w'. 320// This returns an error code; the codes are defined above. 321// 322//go:linkname poll_runtime_pollReset internal/poll.runtime_pollReset 323func poll_runtime_pollReset(pd *pollDesc, mode int) int { 324 errcode := netpollcheckerr(pd, int32(mode)) 325 if errcode != pollNoError { 326 return errcode 327 } 328 if mode == 'r' { 329 pd.rg.Store(pdNil) 330 } else if mode == 'w' { 331 pd.wg.Store(pdNil) 332 } 333 return pollNoError 334} 335 336// poll_runtime_pollWait, which is internal/poll.runtime_pollWait, 337// waits for a descriptor to be ready for reading or writing, 338// according to mode, which is 'r' or 'w'. 339// This returns an error code; the codes are defined above. 340// 341//go:linkname poll_runtime_pollWait internal/poll.runtime_pollWait 342func poll_runtime_pollWait(pd *pollDesc, mode int) int { 343 errcode := netpollcheckerr(pd, int32(mode)) 344 if errcode != pollNoError { 345 return errcode 346 } 347 // As for now only Solaris, illumos, AIX and wasip1 use level-triggered IO. 348 if GOOS == "solaris" || GOOS == "illumos" || GOOS == "aix" || GOOS == "wasip1" { 349 netpollarm(pd, mode) 350 } 351 for !netpollblock(pd, int32(mode), false) { 352 errcode = netpollcheckerr(pd, int32(mode)) 353 if errcode != pollNoError { 354 return errcode 355 } 356 // Can happen if timeout has fired and unblocked us, 357 // but before we had a chance to run, timeout has been reset. 358 // Pretend it has not happened and retry. 359 } 360 return pollNoError 361} 362 363//go:linkname poll_runtime_pollWaitCanceled internal/poll.runtime_pollWaitCanceled 364func poll_runtime_pollWaitCanceled(pd *pollDesc, mode int) { 365 // This function is used only on windows after a failed attempt to cancel 366 // a pending async IO operation. Wait for ioready, ignore closing or timeouts. 367 for !netpollblock(pd, int32(mode), true) { 368 } 369} 370 371//go:linkname poll_runtime_pollSetDeadline internal/poll.runtime_pollSetDeadline 372func poll_runtime_pollSetDeadline(pd *pollDesc, d int64, mode int) { 373 lock(&pd.lock) 374 if pd.closing { 375 unlock(&pd.lock) 376 return 377 } 378 rd0, wd0 := pd.rd, pd.wd 379 combo0 := rd0 > 0 && rd0 == wd0 380 if d > 0 { 381 d += nanotime() 382 if d <= 0 { 383 // If the user has a deadline in the future, but the delay calculation 384 // overflows, then set the deadline to the maximum possible value. 385 d = 1<<63 - 1 386 } 387 } 388 if mode == 'r' || mode == 'r'+'w' { 389 pd.rd = d 390 } 391 if mode == 'w' || mode == 'r'+'w' { 392 pd.wd = d 393 } 394 pd.publishInfo() 395 combo := pd.rd > 0 && pd.rd == pd.wd 396 rtf := netpollReadDeadline 397 if combo { 398 rtf = netpollDeadline 399 } 400 if !pd.rrun { 401 if pd.rd > 0 { 402 // Copy current seq into the timer arg. 403 // Timer func will check the seq against current descriptor seq, 404 // if they differ the descriptor was reused or timers were reset. 405 pd.rt.modify(pd.rd, 0, rtf, pd.makeArg(), pd.rseq) 406 pd.rrun = true 407 } 408 } else if pd.rd != rd0 || combo != combo0 { 409 pd.rseq++ // invalidate current timers 410 if pd.rd > 0 { 411 pd.rt.modify(pd.rd, 0, rtf, pd.makeArg(), pd.rseq) 412 } else { 413 pd.rt.stop() 414 pd.rrun = false 415 } 416 } 417 if !pd.wrun { 418 if pd.wd > 0 && !combo { 419 pd.wt.modify(pd.wd, 0, netpollWriteDeadline, pd.makeArg(), pd.wseq) 420 pd.wrun = true 421 } 422 } else if pd.wd != wd0 || combo != combo0 { 423 pd.wseq++ // invalidate current timers 424 if pd.wd > 0 && !combo { 425 pd.wt.modify(pd.wd, 0, netpollWriteDeadline, pd.makeArg(), pd.wseq) 426 } else { 427 pd.wt.stop() 428 pd.wrun = false 429 } 430 } 431 // If we set the new deadline in the past, unblock currently pending IO if any. 432 // Note that pd.publishInfo has already been called, above, immediately after modifying rd and wd. 433 delta := int32(0) 434 var rg, wg *g 435 if pd.rd < 0 { 436 rg = netpollunblock(pd, 'r', false, &delta) 437 } 438 if pd.wd < 0 { 439 wg = netpollunblock(pd, 'w', false, &delta) 440 } 441 unlock(&pd.lock) 442 if rg != nil { 443 netpollgoready(rg, 3) 444 } 445 if wg != nil { 446 netpollgoready(wg, 3) 447 } 448 netpollAdjustWaiters(delta) 449} 450 451//go:linkname poll_runtime_pollUnblock internal/poll.runtime_pollUnblock 452func poll_runtime_pollUnblock(pd *pollDesc) { 453 lock(&pd.lock) 454 if pd.closing { 455 throw("runtime: unblock on closing polldesc") 456 } 457 pd.closing = true 458 pd.rseq++ 459 pd.wseq++ 460 var rg, wg *g 461 pd.publishInfo() 462 delta := int32(0) 463 rg = netpollunblock(pd, 'r', false, &delta) 464 wg = netpollunblock(pd, 'w', false, &delta) 465 if pd.rrun { 466 pd.rt.stop() 467 pd.rrun = false 468 } 469 if pd.wrun { 470 pd.wt.stop() 471 pd.wrun = false 472 } 473 unlock(&pd.lock) 474 if rg != nil { 475 netpollgoready(rg, 3) 476 } 477 if wg != nil { 478 netpollgoready(wg, 3) 479 } 480 netpollAdjustWaiters(delta) 481} 482 483// netpollready is called by the platform-specific netpoll function. 484// It declares that the fd associated with pd is ready for I/O. 485// The toRun argument is used to build a list of goroutines to return 486// from netpoll. The mode argument is 'r', 'w', or 'r'+'w' to indicate 487// whether the fd is ready for reading or writing or both. 488// 489// This returns a delta to apply to netpollWaiters. 490// 491// This may run while the world is stopped, so write barriers are not allowed. 492// 493//go:nowritebarrier 494func netpollready(toRun *gList, pd *pollDesc, mode int32) int32 { 495 delta := int32(0) 496 var rg, wg *g 497 if mode == 'r' || mode == 'r'+'w' { 498 rg = netpollunblock(pd, 'r', true, &delta) 499 } 500 if mode == 'w' || mode == 'r'+'w' { 501 wg = netpollunblock(pd, 'w', true, &delta) 502 } 503 if rg != nil { 504 toRun.push(rg) 505 } 506 if wg != nil { 507 toRun.push(wg) 508 } 509 return delta 510} 511 512func netpollcheckerr(pd *pollDesc, mode int32) int { 513 info := pd.info() 514 if info.closing() { 515 return pollErrClosing 516 } 517 if (mode == 'r' && info.expiredReadDeadline()) || (mode == 'w' && info.expiredWriteDeadline()) { 518 return pollErrTimeout 519 } 520 // Report an event scanning error only on a read event. 521 // An error on a write event will be captured in a subsequent 522 // write call that is able to report a more specific error. 523 if mode == 'r' && info.eventErr() { 524 return pollErrNotPollable 525 } 526 return pollNoError 527} 528 529func netpollblockcommit(gp *g, gpp unsafe.Pointer) bool { 530 r := atomic.Casuintptr((*uintptr)(gpp), pdWait, uintptr(unsafe.Pointer(gp))) 531 if r { 532 // Bump the count of goroutines waiting for the poller. 533 // The scheduler uses this to decide whether to block 534 // waiting for the poller if there is nothing else to do. 535 netpollAdjustWaiters(1) 536 } 537 return r 538} 539 540func netpollgoready(gp *g, traceskip int) { 541 goready(gp, traceskip+1) 542} 543 544// returns true if IO is ready, or false if timed out or closed 545// waitio - wait only for completed IO, ignore errors 546// Concurrent calls to netpollblock in the same mode are forbidden, as pollDesc 547// can hold only a single waiting goroutine for each mode. 548func netpollblock(pd *pollDesc, mode int32, waitio bool) bool { 549 gpp := &pd.rg 550 if mode == 'w' { 551 gpp = &pd.wg 552 } 553 554 // set the gpp semaphore to pdWait 555 for { 556 // Consume notification if already ready. 557 if gpp.CompareAndSwap(pdReady, pdNil) { 558 return true 559 } 560 if gpp.CompareAndSwap(pdNil, pdWait) { 561 break 562 } 563 564 // Double check that this isn't corrupt; otherwise we'd loop 565 // forever. 566 if v := gpp.Load(); v != pdReady && v != pdNil { 567 throw("runtime: double wait") 568 } 569 } 570 571 // need to recheck error states after setting gpp to pdWait 572 // this is necessary because runtime_pollUnblock/runtime_pollSetDeadline/deadlineimpl 573 // do the opposite: store to closing/rd/wd, publishInfo, load of rg/wg 574 if waitio || netpollcheckerr(pd, mode) == pollNoError { 575 gopark(netpollblockcommit, unsafe.Pointer(gpp), waitReasonIOWait, traceBlockNet, 5) 576 } 577 // be careful to not lose concurrent pdReady notification 578 old := gpp.Swap(pdNil) 579 if old > pdWait { 580 throw("runtime: corrupted polldesc") 581 } 582 return old == pdReady 583} 584 585// netpollunblock moves either pd.rg (if mode == 'r') or 586// pd.wg (if mode == 'w') into the pdReady state. 587// This returns any goroutine blocked on pd.{rg,wg}. 588// It adds any adjustment to netpollWaiters to *delta; 589// this adjustment should be applied after the goroutine has 590// been marked ready. 591func netpollunblock(pd *pollDesc, mode int32, ioready bool, delta *int32) *g { 592 gpp := &pd.rg 593 if mode == 'w' { 594 gpp = &pd.wg 595 } 596 597 for { 598 old := gpp.Load() 599 if old == pdReady { 600 return nil 601 } 602 if old == pdNil && !ioready { 603 // Only set pdReady for ioready. runtime_pollWait 604 // will check for timeout/cancel before waiting. 605 return nil 606 } 607 new := pdNil 608 if ioready { 609 new = pdReady 610 } 611 if gpp.CompareAndSwap(old, new) { 612 if old == pdWait { 613 old = pdNil 614 } else if old != pdNil { 615 *delta -= 1 616 } 617 return (*g)(unsafe.Pointer(old)) 618 } 619 } 620} 621 622func netpolldeadlineimpl(pd *pollDesc, seq uintptr, read, write bool) { 623 lock(&pd.lock) 624 // Seq arg is seq when the timer was set. 625 // If it's stale, ignore the timer event. 626 currentSeq := pd.rseq 627 if !read { 628 currentSeq = pd.wseq 629 } 630 if seq != currentSeq { 631 // The descriptor was reused or timers were reset. 632 unlock(&pd.lock) 633 return 634 } 635 delta := int32(0) 636 var rg *g 637 if read { 638 if pd.rd <= 0 || !pd.rrun { 639 throw("runtime: inconsistent read deadline") 640 } 641 pd.rd = -1 642 pd.publishInfo() 643 rg = netpollunblock(pd, 'r', false, &delta) 644 } 645 var wg *g 646 if write { 647 if pd.wd <= 0 || !pd.wrun && !read { 648 throw("runtime: inconsistent write deadline") 649 } 650 pd.wd = -1 651 pd.publishInfo() 652 wg = netpollunblock(pd, 'w', false, &delta) 653 } 654 unlock(&pd.lock) 655 if rg != nil { 656 netpollgoready(rg, 0) 657 } 658 if wg != nil { 659 netpollgoready(wg, 0) 660 } 661 netpollAdjustWaiters(delta) 662} 663 664func netpollDeadline(arg any, seq uintptr, delta int64) { 665 netpolldeadlineimpl(arg.(*pollDesc), seq, true, true) 666} 667 668func netpollReadDeadline(arg any, seq uintptr, delta int64) { 669 netpolldeadlineimpl(arg.(*pollDesc), seq, true, false) 670} 671 672func netpollWriteDeadline(arg any, seq uintptr, delta int64) { 673 netpolldeadlineimpl(arg.(*pollDesc), seq, false, true) 674} 675 676// netpollAnyWaiters reports whether any goroutines are waiting for I/O. 677func netpollAnyWaiters() bool { 678 return netpollWaiters.Load() > 0 679} 680 681// netpollAdjustWaiters adds delta to netpollWaiters. 682func netpollAdjustWaiters(delta int32) { 683 if delta != 0 { 684 netpollWaiters.Add(delta) 685 } 686} 687 688func (c *pollCache) alloc() *pollDesc { 689 lock(&c.lock) 690 if c.first == nil { 691 const pdSize = unsafe.Sizeof(pollDesc{}) 692 n := pollBlockSize / pdSize 693 if n == 0 { 694 n = 1 695 } 696 // Must be in non-GC memory because can be referenced 697 // only from epoll/kqueue internals. 698 mem := persistentalloc(n*pdSize, 0, &memstats.other_sys) 699 for i := uintptr(0); i < n; i++ { 700 pd := (*pollDesc)(add(mem, i*pdSize)) 701 lockInit(&pd.lock, lockRankPollDesc) 702 pd.rt.init(nil, nil) 703 pd.wt.init(nil, nil) 704 pd.link = c.first 705 c.first = pd 706 } 707 } 708 pd := c.first 709 c.first = pd.link 710 unlock(&c.lock) 711 return pd 712} 713 714// makeArg converts pd to an interface{}. 715// makeArg does not do any allocation. Normally, such 716// a conversion requires an allocation because pointers to 717// types which embed runtime/internal/sys.NotInHeap (which pollDesc is) 718// must be stored in interfaces indirectly. See issue 42076. 719func (pd *pollDesc) makeArg() (i any) { 720 x := (*eface)(unsafe.Pointer(&i)) 721 x._type = pdType 722 x.data = unsafe.Pointer(&pd.self) 723 return 724} 725 726var ( 727 pdEface any = (*pollDesc)(nil) 728 pdType *_type = efaceOf(&pdEface)._type 729) 730