1 // Copyright 2012 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/files/file_path_watcher_kqueue.h"
6
7 #include <fcntl.h>
8 #include <stddef.h>
9 #include <sys/param.h>
10
11 #include <string>
12 #include <vector>
13
14 #include "base/file_descriptor_posix.h"
15 #include "base/files/file_util.h"
16 #include "base/functional/bind.h"
17 #include "base/logging.h"
18 #include "base/ranges/algorithm.h"
19 #include "base/strings/stringprintf.h"
20 #include "base/task/sequenced_task_runner.h"
21 #include "base/threading/scoped_blocking_call.h"
22
23 // On some platforms these are not defined.
24 #if !defined(EV_RECEIPT)
25 #define EV_RECEIPT 0
26 #endif
27 #if !defined(O_EVTONLY)
28 #define O_EVTONLY O_RDONLY
29 #endif
30
31 namespace base {
32
FilePathWatcherKQueue()33 FilePathWatcherKQueue::FilePathWatcherKQueue() : kqueue_(-1) {}
34
~FilePathWatcherKQueue()35 FilePathWatcherKQueue::~FilePathWatcherKQueue() {
36 DCHECK(!task_runner() || task_runner()->RunsTasksInCurrentSequence());
37 }
38
ReleaseEvent(struct kevent & event)39 void FilePathWatcherKQueue::ReleaseEvent(struct kevent& event) {
40 CloseFileDescriptor(&event.ident);
41 EventData* entry = EventDataForKevent(event);
42 delete entry;
43 event.udata = NULL;
44 }
45
EventsForPath(FilePath path,EventVector * events)46 size_t FilePathWatcherKQueue::EventsForPath(FilePath path,
47 EventVector* events) {
48 // Make sure that we are working with a clean slate.
49 DCHECK(events->empty());
50
51 std::vector<FilePath::StringType> components = path.GetComponents();
52
53 if (components.empty()) {
54 return 0;
55 }
56
57 size_t last_existing_entry = 0;
58 FilePath built_path;
59 bool path_still_exists = true;
60 for (std::vector<FilePath::StringType>::iterator i = components.begin();
61 i != components.end(); ++i) {
62 if (i == components.begin()) {
63 built_path = FilePath(*i);
64 } else {
65 built_path = built_path.Append(*i);
66 }
67 uintptr_t fd = kNoFileDescriptor;
68 if (path_still_exists) {
69 fd = FileDescriptorForPath(built_path);
70 if (fd == kNoFileDescriptor) {
71 path_still_exists = false;
72 } else {
73 ++last_existing_entry;
74 }
75 }
76 FilePath::StringType subdir = (i != (components.end() - 1)) ? *(i + 1) : "";
77 EventData* data = new EventData(built_path, subdir);
78 struct kevent event;
79 EV_SET(&event, fd, EVFILT_VNODE, (EV_ADD | EV_CLEAR | EV_RECEIPT),
80 (NOTE_DELETE | NOTE_WRITE | NOTE_ATTRIB |
81 NOTE_RENAME | NOTE_REVOKE | NOTE_EXTEND), 0, data);
82 events->push_back(event);
83 }
84 return last_existing_entry;
85 }
86
87 // static
EventForItem(const FilePath & path,EventVector * events)88 size_t FilePathWatcherKQueue::EventForItem(const FilePath& path,
89 EventVector* events) {
90 // Make sure that we are working with a clean slate.
91 DCHECK(events->empty());
92
93 events->resize(1);
94 auto& event = events->front();
95 EV_SET(&event, FileDescriptorForPath(path), EVFILT_VNODE,
96 (EV_ADD | EV_CLEAR | EV_RECEIPT),
97 (NOTE_DELETE | NOTE_WRITE | NOTE_ATTRIB | NOTE_RENAME | NOTE_REVOKE |
98 NOTE_EXTEND),
99 0, new EventData(path, /*subdir=*/FilePath::StringType()));
100
101 return event.ident != kNoFileDescriptor ? 1 : 0;
102 }
103
FileDescriptorForPath(const FilePath & path)104 uintptr_t FilePathWatcherKQueue::FileDescriptorForPath(const FilePath& path) {
105 ScopedBlockingCall scoped_blocking_call(FROM_HERE, BlockingType::MAY_BLOCK);
106 int fd = HANDLE_EINTR(open(path.value().c_str(), O_EVTONLY));
107 if (fd < 0)
108 return kNoFileDescriptor;
109 return static_cast<uintptr_t>(fd);
110 }
111
CloseFileDescriptor(uintptr_t * fd)112 void FilePathWatcherKQueue::CloseFileDescriptor(uintptr_t* fd) {
113 if (*fd == kNoFileDescriptor) {
114 return;
115 }
116
117 if (IGNORE_EINTR(close(checked_cast<int>(*fd))) != 0) {
118 DPLOG(ERROR) << "close";
119 }
120 *fd = kNoFileDescriptor;
121 }
122
AreKeventValuesValid(struct kevent * kevents,int count)123 bool FilePathWatcherKQueue::AreKeventValuesValid(struct kevent* kevents,
124 int count) {
125 if (count < 0) {
126 DPLOG(ERROR) << "kevent";
127 return false;
128 }
129 bool valid = true;
130 for (int i = 0; i < count; ++i) {
131 if (kevents[i].flags & EV_ERROR && kevents[i].data) {
132 // Find the kevent in |events_| that matches the kevent with the error.
133 EventVector::iterator event = events_.begin();
134 for (; event != events_.end(); ++event) {
135 if (event->ident == kevents[i].ident) {
136 break;
137 }
138 }
139 std::string path_name;
140 if (event != events_.end()) {
141 EventData* event_data = EventDataForKevent(*event);
142 if (event_data != NULL) {
143 path_name = event_data->path_.value();
144 }
145 }
146 if (path_name.empty()) {
147 path_name = base::StringPrintf(
148 "fd %ld", reinterpret_cast<long>(&kevents[i].ident));
149 }
150 DLOG(ERROR) << "Error: " << kevents[i].data << " for " << path_name;
151 valid = false;
152 }
153 }
154 return valid;
155 }
156
HandleAttributesChange(const EventVector::iterator & event,bool * target_file_affected,bool * update_watches)157 void FilePathWatcherKQueue::HandleAttributesChange(
158 const EventVector::iterator& event,
159 bool* target_file_affected,
160 bool* update_watches) {
161 EventVector::iterator next_event = event + 1;
162 EventData* next_event_data = EventDataForKevent(*next_event);
163 // Check to see if the next item in path is still accessible.
164 uintptr_t have_access = FileDescriptorForPath(next_event_data->path_);
165 if (have_access == kNoFileDescriptor) {
166 *target_file_affected = true;
167 *update_watches = true;
168 EventVector::iterator local_event(event);
169 for (; local_event != events_.end(); ++local_event) {
170 // Close all nodes from the event down. This has the side effect of
171 // potentially rendering other events in |updates| invalid.
172 // There is no need to remove the events from |kqueue_| because this
173 // happens as a side effect of closing the file descriptor.
174 CloseFileDescriptor(&local_event->ident);
175 }
176 } else {
177 CloseFileDescriptor(&have_access);
178 }
179 }
180
HandleDeleteOrMoveChange(const EventVector::iterator & event,bool * target_file_affected,bool * update_watches)181 void FilePathWatcherKQueue::HandleDeleteOrMoveChange(
182 const EventVector::iterator& event,
183 bool* target_file_affected,
184 bool* update_watches) {
185 *target_file_affected = true;
186 *update_watches = true;
187 EventVector::iterator local_event(event);
188 for (; local_event != events_.end(); ++local_event) {
189 // Close all nodes from the event down. This has the side effect of
190 // potentially rendering other events in |updates| invalid.
191 // There is no need to remove the events from |kqueue_| because this
192 // happens as a side effect of closing the file descriptor.
193 CloseFileDescriptor(&local_event->ident);
194 }
195 }
196
HandleCreateItemChange(const EventVector::iterator & event,bool * target_file_affected,bool * update_watches)197 void FilePathWatcherKQueue::HandleCreateItemChange(
198 const EventVector::iterator& event,
199 bool* target_file_affected,
200 bool* update_watches) {
201 // Get the next item in the path.
202 EventVector::iterator next_event = event + 1;
203 // Check to see if it already has a valid file descriptor.
204 if (!IsKeventFileDescriptorOpen(*next_event)) {
205 EventData* next_event_data = EventDataForKevent(*next_event);
206 // If not, attempt to open a file descriptor for it.
207 next_event->ident = FileDescriptorForPath(next_event_data->path_);
208 if (IsKeventFileDescriptorOpen(*next_event)) {
209 *update_watches = true;
210 if (next_event_data->subdir_.empty()) {
211 *target_file_affected = true;
212 }
213 }
214 }
215 }
216
UpdateWatches(bool * target_file_affected)217 bool FilePathWatcherKQueue::UpdateWatches(bool* target_file_affected) {
218 // Iterate over events adding kevents for items that exist to the kqueue.
219 // Then check to see if new components in the path have been created.
220 // Repeat until no new components in the path are detected.
221 // This is to get around races in directory creation in a watched path.
222 bool update_watches = true;
223 while (update_watches) {
224 size_t valid;
225 for (valid = 0; valid < events_.size(); ++valid) {
226 if (!IsKeventFileDescriptorOpen(events_[valid])) {
227 break;
228 }
229 }
230 if (valid == 0) {
231 // The root of the file path is inaccessible?
232 return false;
233 }
234
235 EventVector updates(valid);
236 ScopedBlockingCall scoped_blocking_call(FROM_HERE, BlockingType::MAY_BLOCK);
237 const int valid_int = checked_cast<int>(valid);
238 int count = HANDLE_EINTR(
239 kevent(kqueue_, &events_[0], valid_int, &updates[0], valid_int, NULL));
240 if (!AreKeventValuesValid(&updates[0], count)) {
241 return false;
242 }
243 update_watches = false;
244 for (; valid < events_.size(); ++valid) {
245 EventData* event_data = EventDataForKevent(events_[valid]);
246 events_[valid].ident = FileDescriptorForPath(event_data->path_);
247 if (IsKeventFileDescriptorOpen(events_[valid])) {
248 update_watches = true;
249 if (event_data->subdir_.empty()) {
250 *target_file_affected = true;
251 }
252 } else {
253 break;
254 }
255 }
256 }
257 return true;
258 }
259
Watch(const FilePath & path,Type type,const FilePathWatcher::Callback & callback)260 bool FilePathWatcherKQueue::Watch(const FilePath& path,
261 Type type,
262 const FilePathWatcher::Callback& callback) {
263 DCHECK(target_.value().empty()); // Can only watch one path.
264 DCHECK(!callback.is_null());
265 DCHECK_EQ(kqueue_, -1);
266 // Recursive watch is not supported using kqueue.
267 DCHECK_NE(type, Type::kRecursive);
268
269 callback_ = callback;
270 target_ = path;
271
272 set_task_runner(SequencedTaskRunner::GetCurrentDefault());
273
274 kqueue_ = kqueue();
275 if (kqueue_ == -1) {
276 DPLOG(ERROR) << "kqueue";
277 return false;
278 }
279
280 size_t last_entry = type == Type::kNonRecursive
281 ? EventsForPath(target_, &events_)
282 : EventForItem(target_, &events_);
283 if (!last_entry) {
284 // No notifications can possibly come in, so fail fast.
285 Cancel();
286 return false;
287 }
288
289 EventVector responses(last_entry);
290
291 ScopedBlockingCall scoped_blocking_call(FROM_HERE, BlockingType::MAY_BLOCK);
292 const int last_entry_int = checked_cast<int>(last_entry);
293 int count = HANDLE_EINTR(kevent(kqueue_, &events_[0], last_entry_int,
294 &responses[0], last_entry_int, NULL));
295 if (!AreKeventValuesValid(&responses[0], count)) {
296 // Calling Cancel() here to close any file descriptors that were opened.
297 // This would happen in the destructor anyways, but FilePathWatchers tend to
298 // be long lived, and if an error has occurred, there is no reason to waste
299 // the file descriptors.
300 Cancel();
301 return false;
302 }
303
304 // It's safe to use Unretained() because the watch is cancelled and the
305 // callback cannot be invoked after |kqueue_watch_controller_| (which is a
306 // member of |this|) has been deleted.
307 kqueue_watch_controller_ = FileDescriptorWatcher::WatchReadable(
308 kqueue_, BindRepeating(&FilePathWatcherKQueue::OnKQueueReadable,
309 Unretained(this)));
310
311 return true;
312 }
313
Cancel()314 void FilePathWatcherKQueue::Cancel() {
315 if (!task_runner()) {
316 set_cancelled();
317 return;
318 }
319
320 DCHECK(task_runner()->RunsTasksInCurrentSequence());
321 if (!is_cancelled()) {
322 set_cancelled();
323 kqueue_watch_controller_.reset();
324 if (IGNORE_EINTR(close(kqueue_)) != 0) {
325 DPLOG(ERROR) << "close kqueue";
326 }
327 kqueue_ = -1;
328 base::ranges::for_each(events_, ReleaseEvent);
329 events_.clear();
330 callback_.Reset();
331 }
332 }
333
OnKQueueReadable()334 void FilePathWatcherKQueue::OnKQueueReadable() {
335 DCHECK(task_runner()->RunsTasksInCurrentSequence());
336 DCHECK(events_.size());
337
338 // Request the file system update notifications that have occurred and return
339 // them in |updates|. |count| will contain the number of updates that have
340 // occurred.
341 EventVector updates(events_.size());
342 struct timespec timeout = {0, 0};
343 int count = HANDLE_EINTR(kevent(kqueue_, NULL, 0, &updates[0],
344 checked_cast<int>(updates.size()), &timeout));
345
346 // Error values are stored within updates, so check to make sure that no
347 // errors occurred.
348 if (!AreKeventValuesValid(&updates[0], count)) {
349 callback_.Run(target_, true /* error */);
350 Cancel();
351 return;
352 }
353
354 bool update_watches = false;
355 bool send_notification = false;
356
357 // Iterate through each of the updates and react to them.
358 // AreKeventValuesValid() guarantees `count` is non-negative.
359 for (size_t i = 0; i < static_cast<size_t>(count); ++i) {
360 // Find our kevent record that matches the update notification.
361 EventVector::iterator event = events_.begin();
362 for (; event != events_.end(); ++event) {
363 if (!IsKeventFileDescriptorOpen(*event) ||
364 event->ident == updates[i].ident) {
365 break;
366 }
367 }
368 if (event == events_.end() || !IsKeventFileDescriptorOpen(*event)) {
369 // The event may no longer exist in |events_| because another event
370 // modified |events_| in such a way to make it invalid. For example if
371 // the path is /foo/bar/bam and foo is deleted, NOTE_DELETE events for
372 // foo, bar and bam will be sent. If foo is processed first, then
373 // the file descriptors for bar and bam will already be closed and set
374 // to -1 before they get a chance to be processed.
375 continue;
376 }
377
378 EventData* event_data = EventDataForKevent(*event);
379
380 // If the subdir is empty, this is the last item on the path and is the
381 // target file.
382 bool target_file_affected = event_data->subdir_.empty();
383 if ((updates[i].fflags & NOTE_ATTRIB) && !target_file_affected) {
384 HandleAttributesChange(event, &target_file_affected, &update_watches);
385 }
386 if (updates[i].fflags & (NOTE_DELETE | NOTE_REVOKE | NOTE_RENAME)) {
387 HandleDeleteOrMoveChange(event, &target_file_affected, &update_watches);
388 }
389 if ((updates[i].fflags & NOTE_WRITE) && !target_file_affected) {
390 HandleCreateItemChange(event, &target_file_affected, &update_watches);
391 }
392 send_notification |= target_file_affected;
393 }
394
395 if (update_watches) {
396 if (!UpdateWatches(&send_notification)) {
397 callback_.Run(target_, true /* error */);
398 Cancel();
399 return;
400 }
401 }
402
403 if (send_notification) {
404 callback_.Run(target_, false);
405 }
406 }
407
408 } // namespace base
409