1 #![allow(dead_code)]
2 use crate::Error;
3 use core::{
4     mem::MaybeUninit,
5     num::NonZeroU32,
6     ptr::NonNull,
7     sync::atomic::{fence, AtomicPtr, Ordering},
8 };
9 use libc::c_void;
10 
11 cfg_if! {
12     if #[cfg(any(target_os = "netbsd", target_os = "openbsd", target_os = "android"))] {
13         use libc::__errno as errno_location;
14     } else if #[cfg(any(target_os = "linux", target_os = "emscripten", target_os = "hurd", target_os = "redox"))] {
15         use libc::__errno_location as errno_location;
16     } else if #[cfg(any(target_os = "solaris", target_os = "illumos"))] {
17         use libc::___errno as errno_location;
18     } else if #[cfg(any(target_os = "macos", target_os = "freebsd"))] {
19         use libc::__error as errno_location;
20     } else if #[cfg(target_os = "haiku")] {
21         use libc::_errnop as errno_location;
22     } else if #[cfg(target_os = "nto")] {
23         use libc::__get_errno_ptr as errno_location;
24     } else if #[cfg(any(all(target_os = "horizon", target_arch = "arm"), target_os = "vita"))] {
25         extern "C" {
26             // Not provided by libc: https://github.com/rust-lang/libc/issues/1995
27             fn __errno() -> *mut libc::c_int;
28         }
29         use __errno as errno_location;
30     } else if #[cfg(target_os = "aix")] {
31         use libc::_Errno as errno_location;
32     }
33 }
34 
35 cfg_if! {
36     if #[cfg(target_os = "vxworks")] {
37         use libc::errnoGet as get_errno;
38     } else if #[cfg(target_os = "dragonfly")] {
39         // Until rust-lang/rust#29594 is stable, we cannot get the errno value
40         // on DragonFlyBSD. So we just return an out-of-range errno.
41         unsafe fn get_errno() -> libc::c_int { -1 }
42     } else {
43         unsafe fn get_errno() -> libc::c_int { *errno_location() }
44     }
45 }
46 
last_os_error() -> Error47 pub fn last_os_error() -> Error {
48     let errno = unsafe { get_errno() };
49     if errno > 0 {
50         Error::from(NonZeroU32::new(errno as u32).unwrap())
51     } else {
52         Error::ERRNO_NOT_POSITIVE
53     }
54 }
55 
56 // Fill a buffer by repeatedly invoking a system call. The `sys_fill` function:
57 //   - should return -1 and set errno on failure
58 //   - should return the number of bytes written on success
sys_fill_exact( mut buf: &mut [MaybeUninit<u8>], sys_fill: impl Fn(&mut [MaybeUninit<u8>]) -> libc::ssize_t, ) -> Result<(), Error>59 pub fn sys_fill_exact(
60     mut buf: &mut [MaybeUninit<u8>],
61     sys_fill: impl Fn(&mut [MaybeUninit<u8>]) -> libc::ssize_t,
62 ) -> Result<(), Error> {
63     while !buf.is_empty() {
64         let res = sys_fill(buf);
65         match res {
66             res if res > 0 => buf = buf.get_mut(res as usize..).ok_or(Error::UNEXPECTED)?,
67             -1 => {
68                 let err = last_os_error();
69                 // We should try again if the call was interrupted.
70                 if err.raw_os_error() != Some(libc::EINTR) {
71                     return Err(err);
72                 }
73             }
74             // Negative return codes not equal to -1 should be impossible.
75             // EOF (ret = 0) should be impossible, as the data we are reading
76             // should be an infinite stream of random bytes.
77             _ => return Err(Error::UNEXPECTED),
78         }
79     }
80     Ok(())
81 }
82 
83 // A "weak" binding to a C function that may or may not be present at runtime.
84 // Used for supporting newer OS features while still building on older systems.
85 // Based off of the DlsymWeak struct in libstd:
86 // https://github.com/rust-lang/rust/blob/1.61.0/library/std/src/sys/unix/weak.rs#L84
87 // except that the caller must manually cast self.ptr() to a function pointer.
88 pub struct Weak {
89     name: &'static str,
90     addr: AtomicPtr<c_void>,
91 }
92 
93 impl Weak {
94     // A non-null pointer value which indicates we are uninitialized. This
95     // constant should ideally not be a valid address of a function pointer.
96     // However, if by chance libc::dlsym does return UNINIT, there will not
97     // be undefined behavior. libc::dlsym will just be called each time ptr()
98     // is called. This would be inefficient, but correct.
99     // TODO: Replace with core::ptr::invalid_mut(1) when that is stable.
100     const UNINIT: *mut c_void = 1 as *mut c_void;
101 
102     // Construct a binding to a C function with a given name. This function is
103     // unsafe because `name` _must_ be null terminated.
new(name: &'static str) -> Self104     pub const unsafe fn new(name: &'static str) -> Self {
105         Self {
106             name,
107             addr: AtomicPtr::new(Self::UNINIT),
108         }
109     }
110 
111     // Return the address of a function if present at runtime. Otherwise,
112     // return None. Multiple callers can call ptr() concurrently. It will
113     // always return _some_ value returned by libc::dlsym. However, the
114     // dlsym function may be called multiple times.
ptr(&self) -> Option<NonNull<c_void>>115     pub fn ptr(&self) -> Option<NonNull<c_void>> {
116         // Despite having only a single atomic variable (self.addr), we still
117         // cannot always use Ordering::Relaxed, as we need to make sure a
118         // successful call to dlsym() is "ordered before" any data read through
119         // the returned pointer (which occurs when the function is called).
120         // Our implementation mirrors that of the one in libstd, meaning that
121         // the use of non-Relaxed operations is probably unnecessary.
122         match self.addr.load(Ordering::Relaxed) {
123             Self::UNINIT => {
124                 let symbol = self.name.as_ptr() as *const _;
125                 let addr = unsafe { libc::dlsym(libc::RTLD_DEFAULT, symbol) };
126                 // Synchronizes with the Acquire fence below
127                 self.addr.store(addr, Ordering::Release);
128                 NonNull::new(addr)
129             }
130             addr => {
131                 let func = NonNull::new(addr)?;
132                 fence(Ordering::Acquire);
133                 Some(func)
134             }
135         }
136     }
137 }
138 
139 // SAFETY: path must be null terminated, FD must be manually closed.
open_readonly(path: &str) -> Result<libc::c_int, Error>140 pub unsafe fn open_readonly(path: &str) -> Result<libc::c_int, Error> {
141     debug_assert_eq!(path.as_bytes().last(), Some(&0));
142     loop {
143         let fd = libc::open(path.as_ptr() as *const _, libc::O_RDONLY | libc::O_CLOEXEC);
144         if fd >= 0 {
145             return Ok(fd);
146         }
147         let err = last_os_error();
148         // We should try again if open() was interrupted.
149         if err.raw_os_error() != Some(libc::EINTR) {
150             return Err(err);
151         }
152     }
153 }
154