xref: /aosp_15_r20/external/minijail/libminijail.c (revision 4b9c6d91573e8b3a96609339b46361b5476dd0f9)
1 /* Copyright 2012 The ChromiumOS Authors
2  * Use of this source code is governed by a BSD-style license that can be
3  * found in the LICENSE file.
4  */
5 
6 #define _BSD_SOURCE
7 #define _DEFAULT_SOURCE
8 #define _GNU_SOURCE
9 
10 #include <asm/unistd.h>
11 #include <assert.h>
12 #include <dirent.h>
13 #include <errno.h>
14 #include <fcntl.h>
15 #include <grp.h>
16 #include <linux/capability.h>
17 #include <linux/filter.h>
18 #include <sched.h>
19 #include <signal.h>
20 #include <stdbool.h>
21 #include <stddef.h>
22 #include <stdio.h>
23 #include <stdlib.h>
24 #include <string.h>
25 #include <sys/capability.h>
26 #include <sys/mount.h>
27 #include <sys/param.h>
28 #include <sys/prctl.h>
29 #include <sys/resource.h>
30 #include <sys/select.h>
31 #include <sys/stat.h>
32 #include <sys/sysmacros.h>
33 #include <sys/types.h>
34 #include <sys/user.h>
35 #include <sys/wait.h>
36 #include <syscall.h>
37 #include <unistd.h>
38 
39 #include "landlock_util.h"
40 #include "libminijail-private.h"
41 #include "libminijail.h"
42 
43 #include "signal_handler.h"
44 #include "syscall_filter.h"
45 #include "syscall_wrapper.h"
46 #include "system.h"
47 #include "util.h"
48 
49 /* Until these are reliably available in linux/prctl.h. */
50 #ifndef PR_ALT_SYSCALL
51 #define PR_ALT_SYSCALL 0x43724f53
52 #endif
53 
54 /* New cgroup namespace might not be in linux-headers yet. */
55 #ifndef CLONE_NEWCGROUP
56 #define CLONE_NEWCGROUP 0x02000000
57 #endif
58 
59 #define MAX_CGROUPS 10 /* 10 different controllers supported by Linux. */
60 
61 #define MAX_RLIMITS 32 /* Currently there are 15 supported by Linux. */
62 
63 #define MAX_PRESERVED_FDS 128U
64 
65 /* Keyctl commands. */
66 #define KEYCTL_JOIN_SESSION_KEYRING 1
67 
68 /*
69  * The userspace equivalent of MNT_USER_SETTABLE_MASK, which is the mask of all
70  * flags that can be modified by MS_REMOUNT.
71  */
72 #define MS_USER_SETTABLE_MASK                                                  \
73 	(MS_NOSUID | MS_NODEV | MS_NOEXEC | MS_NOATIME | MS_NODIRATIME |       \
74 	 MS_RELATIME | MS_RDONLY)
75 
76 /*
77  * TODO(b/235960683): Drop this after CrOS upgrades to glibc >= 2.34
78  * because MS_NOSYMFOLLOW will be defined in sys/mount.h.
79  */
80 #ifndef MS_NOSYMFOLLOW
81 /* Added locally in kernels 4.x+. */
82 #define MS_NOSYMFOLLOW 256
83 #endif
84 
85 struct minijail_rlimit {
86 	int type;
87 	rlim_t cur;
88 	rlim_t max;
89 };
90 
91 struct mountpoint {
92 	char *src;
93 	char *dest;
94 	char *type;
95 	char *data;
96 	int has_data;
97 	unsigned long flags;
98 	struct mountpoint *next;
99 };
100 
101 struct minijail_remount {
102 	unsigned long remount_mode;
103 	char *mount_name;
104 	struct minijail_remount *next;
105 };
106 
107 struct hook {
108 	minijail_hook_t hook;
109 	void *payload;
110 	minijail_hook_event_t event;
111 	struct hook *next;
112 };
113 
114 struct fs_rule {
115 	char *path;
116 	uint64_t landlock_flags;
117 	struct fs_rule *next;
118 };
119 
120 struct preserved_fd {
121 	int parent_fd;
122 	int child_fd;
123 };
124 
125 struct minijail {
126 	/*
127 	 * WARNING: if you add a flag here you need to make sure it's
128 	 * accounted for in minijail_pre{enter|exec}() below.
129 	 */
130 	struct {
131 		bool uid : 1;
132 		bool gid : 1;
133 		bool inherit_suppl_gids : 1;
134 		bool set_suppl_gids : 1;
135 		bool keep_suppl_gids : 1;
136 		bool use_caps : 1;
137 		bool capbset_drop : 1;
138 		bool set_ambient_caps : 1;
139 		bool vfs : 1;
140 		bool enter_vfs : 1;
141 		bool pids : 1;
142 		bool ipc : 1;
143 		bool uts : 1;
144 		bool net : 1;
145 		bool enter_net : 1;
146 		bool ns_cgroups : 1;
147 		bool userns : 1;
148 		bool disable_setgroups : 1;
149 		bool seccomp : 1;
150 		bool remount_proc_ro : 1;
151 		bool no_new_privs : 1;
152 		bool seccomp_filter : 1;
153 		bool seccomp_filter_tsync : 1;
154 		bool seccomp_filter_logging : 1;
155 		bool seccomp_filter_allow_speculation : 1;
156 		bool chroot : 1;
157 		bool pivot_root : 1;
158 		bool mount_dev : 1;
159 		bool mount_tmp : 1;
160 		bool do_init : 1;
161 		bool run_as_init : 1;
162 		bool pid_file : 1;
163 		bool cgroups : 1;
164 		bool alt_syscall : 1;
165 		bool reset_signal_mask : 1;
166 		bool reset_signal_handlers : 1;
167 		bool close_open_fds : 1;
168 		bool new_session_keyring : 1;
169 		bool forward_signals : 1;
170 		bool setsid : 1;
171 	} flags;
172 	uid_t uid;
173 	gid_t gid;
174 	gid_t usergid;
175 	char *user;
176 	size_t suppl_gid_count;
177 	gid_t *suppl_gid_list;
178 	uint64_t caps;
179 	uint64_t cap_bset;
180 	pid_t initpid;
181 	int mountns_fd;
182 	int netns_fd;
183 	char *chrootdir;
184 	char *pid_file_path;
185 	char *uidmap;
186 	char *gidmap;
187 	char *hostname;
188 	char *preload_path;
189 	size_t filter_len;
190 	struct sock_fprog *filter_prog;
191 	char *alt_syscall_table;
192 	struct mountpoint *mounts_head;
193 	struct mountpoint *mounts_tail;
194 	size_t mounts_count;
195 	unsigned long remount_mode;
196 	struct minijail_remount *remounts_head;
197 	struct minijail_remount *remounts_tail;
198 	size_t tmpfs_size;
199 	bool using_minimalistic_mountns;
200 	struct fs_rule *fs_rules_head;
201 	struct fs_rule *fs_rules_tail;
202 	char *cgroups[MAX_CGROUPS];
203 	size_t cgroup_count;
204 	struct minijail_rlimit rlimits[MAX_RLIMITS];
205 	size_t rlimit_count;
206 	uint64_t securebits_skip_mask;
207 	struct hook *hooks_head;
208 	struct hook *hooks_tail;
209 	struct preserved_fd preserved_fds[MAX_PRESERVED_FDS];
210 	size_t preserved_fd_count;
211 	char *seccomp_policy_path;
212 };
213 
214 static void run_hooks_or_die(const struct minijail *j,
215 			     minijail_hook_event_t event);
216 
seccomp_is_logging_allowed(const struct minijail * j)217 static bool seccomp_is_logging_allowed(const struct minijail *j)
218 {
219 	return seccomp_default_ret_log() || j->flags.seccomp_filter_logging;
220 }
221 
free_mounts_list(struct minijail * j)222 static void free_mounts_list(struct minijail *j)
223 {
224 	while (j->mounts_head) {
225 		struct mountpoint *m = j->mounts_head;
226 		j->mounts_head = j->mounts_head->next;
227 		free(m->data);
228 		free(m->type);
229 		free(m->dest);
230 		free(m->src);
231 		free(m);
232 	}
233 	// No need to clear mounts_head as we know it's NULL after the loop.
234 	j->mounts_tail = NULL;
235 }
236 
free_remounts_list(struct minijail * j)237 static void free_remounts_list(struct minijail *j)
238 {
239 	while (j->remounts_head) {
240 		struct minijail_remount *m = j->remounts_head;
241 		j->remounts_head = j->remounts_head->next;
242 		free(m->mount_name);
243 		free(m);
244 	}
245 	// No need to clear remounts_head as we know it's NULL after the loop.
246 	j->remounts_tail = NULL;
247 }
248 
249 /*
250  * Writes exactly n bytes from buf to file descriptor fd.
251  * Returns 0 on success or a negative error code on error.
252  */
write_exactly(int fd,const void * buf,size_t n)253 static int write_exactly(int fd, const void *buf, size_t n)
254 {
255 	const char *p = buf;
256 	while (n > 0) {
257 		const ssize_t written = write(fd, p, n);
258 		if (written < 0) {
259 			if (errno == EINTR)
260 				continue;
261 
262 			return -errno;
263 		}
264 
265 		p += written;
266 		n -= written;
267 	}
268 
269 	return 0;
270 }
271 
272 /* Closes *pfd and sets it to -1. */
close_and_reset(int * pfd)273 static void close_and_reset(int *pfd)
274 {
275 	if (*pfd != -1)
276 		close(*pfd);
277 	*pfd = -1;
278 }
279 
280 /*
281  * Strip out flags meant for the parent.
282  * We keep things that are not inherited across execve(2) (e.g. capabilities),
283  * or are easier to set after execve(2) (e.g. seccomp filters).
284  */
minijail_preenter(struct minijail * j)285 void minijail_preenter(struct minijail *j)
286 {
287 	j->flags.vfs = 0;
288 	j->flags.enter_vfs = 0;
289 	j->flags.ns_cgroups = 0;
290 	j->flags.net = 0;
291 	j->flags.uts = 0;
292 	j->flags.remount_proc_ro = 0;
293 	j->flags.pids = 0;
294 	j->flags.do_init = 0;
295 	j->flags.run_as_init = 0;
296 	j->flags.pid_file = 0;
297 	j->flags.cgroups = 0;
298 	j->flags.forward_signals = 0;
299 	j->flags.setsid = 0;
300 	j->remount_mode = 0;
301 	free_remounts_list(j);
302 }
303 
304 /* Adds a rule for a given path to apply once minijail is entered. */
add_fs_restriction_path(struct minijail * j,const char * path,uint64_t landlock_flags)305 int add_fs_restriction_path(struct minijail *j,
306 		const char *path,
307 		uint64_t landlock_flags)
308 {
309 	struct fs_rule *r = calloc(1, sizeof(*r));
310 	if (!r)
311 		return -ENOMEM;
312 	r->path = strdup(path);
313 	r->landlock_flags = landlock_flags;
314 
315 	if (j->fs_rules_tail) {
316 		j->fs_rules_tail->next = r;
317 		j->fs_rules_tail = r;
318 	} else {
319 		j->fs_rules_head = r;
320 		j->fs_rules_tail = r;
321 	}
322 
323 	return 0;
324 }
325 
mount_has_bind_flag(struct mountpoint * m)326 bool mount_has_bind_flag(struct mountpoint *m) {
327 	return !!(m->flags & MS_BIND);
328 }
329 
mount_has_readonly_flag(struct mountpoint * m)330 bool mount_has_readonly_flag(struct mountpoint *m) {
331 	return !!(m->flags & MS_RDONLY);
332 }
333 
mount_events_allowed(struct mountpoint * m)334 bool mount_events_allowed(struct mountpoint *m) {
335 	return !!(m->flags & MS_SHARED) || !!(m->flags & MS_SLAVE);
336 }
337 
338 /*
339  * Strip out flags meant for the child.
340  * We keep things that are inherited across execve(2).
341  */
minijail_preexec(struct minijail * j)342 void minijail_preexec(struct minijail *j)
343 {
344 	int vfs = j->flags.vfs;
345 	int enter_vfs = j->flags.enter_vfs;
346 	int ns_cgroups = j->flags.ns_cgroups;
347 	int net = j->flags.net;
348 	int uts = j->flags.uts;
349 	int remount_proc_ro = j->flags.remount_proc_ro;
350 	int userns = j->flags.userns;
351 	if (j->user)
352 		free(j->user);
353 	j->user = NULL;
354 	if (j->suppl_gid_list)
355 		free(j->suppl_gid_list);
356 	j->suppl_gid_list = NULL;
357 	if (j->preload_path)
358 		free(j->preload_path);
359 	j->preload_path = NULL;
360 	free_mounts_list(j);
361 	memset(&j->flags, 0, sizeof(j->flags));
362 	/* Now restore anything we meant to keep. */
363 	j->flags.vfs = vfs;
364 	j->flags.enter_vfs = enter_vfs;
365 	j->flags.ns_cgroups = ns_cgroups;
366 	j->flags.net = net;
367 	j->flags.uts = uts;
368 	j->flags.remount_proc_ro = remount_proc_ro;
369 	j->flags.userns = userns;
370 	/* Note, |pids| will already have been used before this call. */
371 }
372 
373 /* Minijail API. */
374 
minijail_new(void)375 struct minijail API *minijail_new(void)
376 {
377 	struct minijail *j = calloc(1, sizeof(struct minijail));
378 	if (j) {
379 		j->remount_mode = MS_PRIVATE;
380 		j->using_minimalistic_mountns = false;
381 	}
382 	return j;
383 }
384 
minijail_change_uid(struct minijail * j,uid_t uid)385 void API minijail_change_uid(struct minijail *j, uid_t uid)
386 {
387 	if (uid == 0)
388 		die("useless change to uid 0");
389 	j->uid = uid;
390 	j->flags.uid = 1;
391 }
392 
minijail_change_gid(struct minijail * j,gid_t gid)393 void API minijail_change_gid(struct minijail *j, gid_t gid)
394 {
395 	if (gid == 0)
396 		die("useless change to gid 0");
397 	j->gid = gid;
398 	j->flags.gid = 1;
399 }
400 
minijail_set_supplementary_gids(struct minijail * j,size_t size,const gid_t * list)401 void API minijail_set_supplementary_gids(struct minijail *j, size_t size,
402 					 const gid_t *list)
403 {
404 	size_t i;
405 
406 	if (j->flags.inherit_suppl_gids)
407 		die("cannot inherit *and* set supplementary groups");
408 	if (j->flags.keep_suppl_gids)
409 		die("cannot keep *and* set supplementary groups");
410 
411 	if (size == 0) {
412 		/* Clear supplementary groups. */
413 		j->suppl_gid_list = NULL;
414 		j->suppl_gid_count = 0;
415 		j->flags.set_suppl_gids = 1;
416 		return;
417 	}
418 
419 	/* Copy the gid_t array. */
420 	j->suppl_gid_list = calloc(size, sizeof(gid_t));
421 	if (!j->suppl_gid_list) {
422 		die("failed to allocate internal supplementary group array");
423 	}
424 	for (i = 0; i < size; i++) {
425 		j->suppl_gid_list[i] = list[i];
426 	}
427 	j->suppl_gid_count = size;
428 	j->flags.set_suppl_gids = 1;
429 }
430 
minijail_keep_supplementary_gids(struct minijail * j)431 void API minijail_keep_supplementary_gids(struct minijail *j)
432 {
433 	j->flags.keep_suppl_gids = 1;
434 }
435 
minijail_change_user(struct minijail * j,const char * user)436 int API minijail_change_user(struct minijail *j, const char *user)
437 {
438 	uid_t uid;
439 	gid_t gid;
440 	int rc = lookup_user(user, &uid, &gid);
441 	if (rc)
442 		return rc;
443 	minijail_change_uid(j, uid);
444 	j->user = strdup(user);
445 	if (!j->user)
446 		return -ENOMEM;
447 	j->usergid = gid;
448 	return 0;
449 }
450 
minijail_change_group(struct minijail * j,const char * group)451 int API minijail_change_group(struct minijail *j, const char *group)
452 {
453 	gid_t gid;
454 	int rc = lookup_group(group, &gid);
455 	if (rc)
456 		return rc;
457 	minijail_change_gid(j, gid);
458 	return 0;
459 }
460 
minijail_use_seccomp(struct minijail * j)461 void API minijail_use_seccomp(struct minijail *j)
462 {
463 	j->flags.seccomp = 1;
464 }
465 
minijail_no_new_privs(struct minijail * j)466 void API minijail_no_new_privs(struct minijail *j)
467 {
468 	j->flags.no_new_privs = 1;
469 }
470 
minijail_use_seccomp_filter(struct minijail * j)471 void API minijail_use_seccomp_filter(struct minijail *j)
472 {
473 	j->flags.seccomp_filter = 1;
474 }
475 
minijail_set_seccomp_filter_tsync(struct minijail * j)476 void API minijail_set_seccomp_filter_tsync(struct minijail *j)
477 {
478 	if (j->filter_len > 0 && j->filter_prog != NULL) {
479 		die("minijail_set_seccomp_filter_tsync() must be called "
480 		    "before minijail_parse_seccomp_filters()");
481 	}
482 
483 	if (seccomp_is_logging_allowed(j) && !seccomp_ret_log_available()) {
484 		/*
485 		 * If SECCOMP_RET_LOG is not available, we don't want to use
486 		 * SECCOMP_RET_TRAP to both kill the entire process and report
487 		 * failing syscalls, since it will be brittle. Just bail.
488 		 */
489 		die("SECCOMP_RET_LOG not available, cannot use logging with "
490 		    "thread sync at the same time");
491 	}
492 
493 	j->flags.seccomp_filter_tsync = 1;
494 }
495 
minijail_set_seccomp_filter_allow_speculation(struct minijail * j)496 void API minijail_set_seccomp_filter_allow_speculation(struct minijail *j)
497 {
498 	if (j->filter_len > 0 && j->filter_prog != NULL) {
499 		die("minijail_set_seccomp_filter_allow_speculation() must be "
500 		    "called before minijail_parse_seccomp_filters()");
501 	}
502 
503 	j->flags.seccomp_filter_allow_speculation = 1;
504 }
505 
minijail_log_seccomp_filter_failures(struct minijail * j)506 void API minijail_log_seccomp_filter_failures(struct minijail *j)
507 {
508 	if (j->filter_len > 0 && j->filter_prog != NULL) {
509 		die("minijail_log_seccomp_filter_failures() must be called "
510 		    "before minijail_parse_seccomp_filters()");
511 	}
512 
513 	if (j->flags.seccomp_filter_tsync && !seccomp_ret_log_available()) {
514 		/*
515 		 * If SECCOMP_RET_LOG is not available, we don't want to use
516 		 * SECCOMP_RET_TRAP to both kill the entire process and report
517 		 * failing syscalls, since it will be brittle. Just bail.
518 		 */
519 		die("SECCOMP_RET_LOG not available, cannot use thread sync "
520 		    "with logging at the same time");
521 	}
522 
523 	if (debug_logging_allowed()) {
524 		j->flags.seccomp_filter_logging = 1;
525 	} else {
526 		warn("non-debug build: ignoring request to enable seccomp "
527 		     "logging");
528 	}
529 }
530 
minijail_set_using_minimalistic_mountns(struct minijail * j)531 void API minijail_set_using_minimalistic_mountns(struct minijail *j)
532 {
533 	j->using_minimalistic_mountns = true;
534 }
535 
minijail_add_minimalistic_mountns_fs_rules(struct minijail * j)536 void API minijail_add_minimalistic_mountns_fs_rules(struct minijail *j)
537 {
538 	struct mountpoint *m = j->mounts_head;
539 	bool landlock_enabled_by_profile = false;
540 	if (!j->using_minimalistic_mountns)
541 		return;
542 
543 	/* Apply Landlock rules. */
544 	while (m) {
545 		landlock_enabled_by_profile = true;
546 		minijail_add_fs_restriction_rx(j, m->dest);
547 		/* Allow rw if mounted as writable, or mount flags allow mount events.*/
548 		if (!mount_has_readonly_flag(m) || mount_events_allowed(m))
549 			minijail_add_fs_restriction_rw(j, m->dest);
550 		m = m->next;
551 	}
552 	if (landlock_enabled_by_profile) {
553 		minijail_enable_default_fs_restrictions(j);
554 		minijail_add_fs_restriction_edit(j, "/dev");
555 		minijail_add_fs_restriction_ro(j, "/proc");
556 		if (j->flags.vfs)
557 			minijail_add_fs_restriction_rw(j, "/tmp");
558 	}
559 }
560 
minijail_enable_default_fs_restrictions(struct minijail * j)561 void API minijail_enable_default_fs_restrictions(struct minijail *j)
562 {
563 	// Common library locations.
564 	minijail_add_fs_restriction_rx(j, "/lib");
565 	minijail_add_fs_restriction_rx(j, "/lib64");
566 	minijail_add_fs_restriction_rx(j, "/usr/lib");
567 	minijail_add_fs_restriction_rx(j, "/usr/lib64");
568 	// Common locations for services invoking Minijail.
569 	minijail_add_fs_restriction_rx(j, "/bin");
570 	minijail_add_fs_restriction_rx(j, "/sbin");
571 	minijail_add_fs_restriction_rx(j, "/usr/sbin");
572 	minijail_add_fs_restriction_rx(j, "/usr/bin");
573 }
574 
minijail_use_caps(struct minijail * j,uint64_t capmask)575 void API minijail_use_caps(struct minijail *j, uint64_t capmask)
576 {
577 	/*
578 	 * 'minijail_use_caps' configures a runtime-capabilities-only
579 	 * environment, including a bounding set matching the thread's runtime
580 	 * (permitted|inheritable|effective) sets.
581 	 * Therefore, it will override any existing bounding set configurations
582 	 * since the latter would allow gaining extra runtime capabilities from
583 	 * file capabilities.
584 	 */
585 	if (j->flags.capbset_drop) {
586 		warn("overriding bounding set configuration");
587 		j->cap_bset = 0;
588 		j->flags.capbset_drop = 0;
589 	}
590 	j->caps = capmask;
591 	j->flags.use_caps = 1;
592 }
593 
minijail_capbset_drop(struct minijail * j,uint64_t capmask)594 void API minijail_capbset_drop(struct minijail *j, uint64_t capmask)
595 {
596 	if (j->flags.use_caps) {
597 		/*
598 		 * 'minijail_use_caps' will have already configured a capability
599 		 * bounding set matching the (permitted|inheritable|effective)
600 		 * sets. Abort if the user tries to configure a separate
601 		 * bounding set. 'minijail_capbset_drop' and 'minijail_use_caps'
602 		 * are mutually exclusive.
603 		 */
604 		die("runtime capabilities already configured, can't drop "
605 		    "bounding set separately");
606 	}
607 	j->cap_bset = capmask;
608 	j->flags.capbset_drop = 1;
609 }
610 
minijail_set_ambient_caps(struct minijail * j)611 void API minijail_set_ambient_caps(struct minijail *j)
612 {
613 	j->flags.set_ambient_caps = 1;
614 }
615 
minijail_reset_signal_mask(struct minijail * j)616 void API minijail_reset_signal_mask(struct minijail *j)
617 {
618 	j->flags.reset_signal_mask = 1;
619 }
620 
minijail_reset_signal_handlers(struct minijail * j)621 void API minijail_reset_signal_handlers(struct minijail *j)
622 {
623 	j->flags.reset_signal_handlers = 1;
624 }
625 
minijail_namespace_vfs(struct minijail * j)626 void API minijail_namespace_vfs(struct minijail *j)
627 {
628 	j->flags.vfs = 1;
629 }
630 
minijail_namespace_enter_vfs(struct minijail * j,const char * ns_path)631 void API minijail_namespace_enter_vfs(struct minijail *j, const char *ns_path)
632 {
633 	/* Note: Do not use O_CLOEXEC here.  We'll close it after we use it. */
634 	int ns_fd = open(ns_path, O_RDONLY);
635 	if (ns_fd < 0) {
636 		pdie("failed to open namespace '%s'", ns_path);
637 	}
638 	j->mountns_fd = ns_fd;
639 	j->flags.enter_vfs = 1;
640 }
641 
minijail_new_session_keyring(struct minijail * j)642 void API minijail_new_session_keyring(struct minijail *j)
643 {
644 	j->flags.new_session_keyring = 1;
645 }
646 
minijail_skip_setting_securebits(struct minijail * j,uint64_t securebits_skip_mask)647 void API minijail_skip_setting_securebits(struct minijail *j,
648 					  uint64_t securebits_skip_mask)
649 {
650 	j->securebits_skip_mask = securebits_skip_mask;
651 }
652 
minijail_remount_mode(struct minijail * j,unsigned long mode)653 void API minijail_remount_mode(struct minijail *j, unsigned long mode)
654 {
655 	j->remount_mode = mode;
656 }
657 
minijail_skip_remount_private(struct minijail * j)658 void API minijail_skip_remount_private(struct minijail *j)
659 {
660 	j->remount_mode = 0;
661 }
662 
minijail_namespace_pids(struct minijail * j)663 void API minijail_namespace_pids(struct minijail *j)
664 {
665 	j->flags.vfs = 1;
666 	j->flags.remount_proc_ro = 1;
667 	j->flags.pids = 1;
668 	j->flags.do_init = 1;
669 }
670 
minijail_namespace_pids_rw_proc(struct minijail * j)671 void API minijail_namespace_pids_rw_proc(struct minijail *j)
672 {
673 	j->flags.vfs = 1;
674 	j->flags.pids = 1;
675 	j->flags.do_init = 1;
676 }
677 
minijail_namespace_ipc(struct minijail * j)678 void API minijail_namespace_ipc(struct minijail *j)
679 {
680 	j->flags.ipc = 1;
681 }
682 
minijail_namespace_uts(struct minijail * j)683 void API minijail_namespace_uts(struct minijail *j)
684 {
685 	j->flags.uts = 1;
686 }
687 
minijail_namespace_set_hostname(struct minijail * j,const char * name)688 int API minijail_namespace_set_hostname(struct minijail *j, const char *name)
689 {
690 	if (j->hostname)
691 		return -EINVAL;
692 	minijail_namespace_uts(j);
693 	j->hostname = strdup(name);
694 	if (!j->hostname)
695 		return -ENOMEM;
696 	return 0;
697 }
698 
minijail_namespace_net(struct minijail * j)699 void API minijail_namespace_net(struct minijail *j)
700 {
701 	j->flags.net = 1;
702 }
703 
minijail_namespace_enter_net(struct minijail * j,const char * ns_path)704 void API minijail_namespace_enter_net(struct minijail *j, const char *ns_path)
705 {
706 	/* Note: Do not use O_CLOEXEC here.  We'll close it after we use it. */
707 	int ns_fd = open(ns_path, O_RDONLY);
708 	if (ns_fd < 0) {
709 		pdie("failed to open namespace '%s'", ns_path);
710 	}
711 	j->netns_fd = ns_fd;
712 	j->flags.enter_net = 1;
713 }
714 
minijail_namespace_cgroups(struct minijail * j)715 void API minijail_namespace_cgroups(struct minijail *j)
716 {
717 	j->flags.ns_cgroups = 1;
718 }
719 
minijail_close_open_fds(struct minijail * j)720 void API minijail_close_open_fds(struct minijail *j)
721 {
722 	j->flags.close_open_fds = 1;
723 }
724 
minijail_remount_proc_readonly(struct minijail * j)725 void API minijail_remount_proc_readonly(struct minijail *j)
726 {
727 	j->flags.vfs = 1;
728 	j->flags.remount_proc_ro = 1;
729 }
730 
minijail_namespace_user(struct minijail * j)731 void API minijail_namespace_user(struct minijail *j)
732 {
733 	j->flags.userns = 1;
734 }
735 
minijail_namespace_user_disable_setgroups(struct minijail * j)736 void API minijail_namespace_user_disable_setgroups(struct minijail *j)
737 {
738 	j->flags.disable_setgroups = 1;
739 }
740 
minijail_uidmap(struct minijail * j,const char * uidmap)741 int API minijail_uidmap(struct minijail *j, const char *uidmap)
742 {
743 	j->uidmap = strdup(uidmap);
744 	if (!j->uidmap)
745 		return -ENOMEM;
746 	char *ch;
747 	for (ch = j->uidmap; *ch; ch++) {
748 		if (*ch == ',')
749 			*ch = '\n';
750 	}
751 	return 0;
752 }
753 
minijail_gidmap(struct minijail * j,const char * gidmap)754 int API minijail_gidmap(struct minijail *j, const char *gidmap)
755 {
756 	j->gidmap = strdup(gidmap);
757 	if (!j->gidmap)
758 		return -ENOMEM;
759 	char *ch;
760 	for (ch = j->gidmap; *ch; ch++) {
761 		if (*ch == ',')
762 			*ch = '\n';
763 	}
764 	return 0;
765 }
766 
minijail_inherit_usergroups(struct minijail * j)767 void API minijail_inherit_usergroups(struct minijail *j)
768 {
769 	j->flags.inherit_suppl_gids = 1;
770 }
771 
minijail_run_as_init(struct minijail * j)772 void API minijail_run_as_init(struct minijail *j)
773 {
774 	/*
775 	 * Since the jailed program will become 'init' in the new PID namespace,
776 	 * Minijail does not need to fork an 'init' process.
777 	 */
778 	j->flags.run_as_init = 1;
779 }
780 
minijail_enter_chroot(struct minijail * j,const char * dir)781 int API minijail_enter_chroot(struct minijail *j, const char *dir)
782 {
783 	if (j->chrootdir)
784 		return -EINVAL;
785 	j->chrootdir = strdup(dir);
786 	if (!j->chrootdir)
787 		return -ENOMEM;
788 	j->flags.chroot = 1;
789 	return 0;
790 }
791 
minijail_enter_pivot_root(struct minijail * j,const char * dir)792 int API minijail_enter_pivot_root(struct minijail *j, const char *dir)
793 {
794 	if (j->chrootdir)
795 		return -EINVAL;
796 	j->chrootdir = strdup(dir);
797 	if (!j->chrootdir)
798 		return -ENOMEM;
799 	j->flags.pivot_root = 1;
800 	return 0;
801 }
802 
minijail_get_original_path(struct minijail * j,const char * path_inside_chroot)803 char API *minijail_get_original_path(struct minijail *j,
804 				     const char *path_inside_chroot)
805 {
806 	struct mountpoint *b;
807 
808 	b = j->mounts_head;
809 	while (b) {
810 		/*
811 		 * If |path_inside_chroot| is the exact destination of a
812 		 * mount, then the original path is exactly the source of
813 		 * the mount.
814 		 *  for example: "-b /some/path/exe,/chroot/path/exe"
815 		 *    mount source = /some/path/exe, mount dest =
816 		 *    /chroot/path/exe Then when getting the original path of
817 		 *    "/chroot/path/exe", the source of that mount,
818 		 *    "/some/path/exe" is what should be returned.
819 		 */
820 		if (streq(b->dest, path_inside_chroot))
821 			return strdup(b->src);
822 
823 		/*
824 		 * If |path_inside_chroot| is within the destination path of a
825 		 * mount, take the suffix of the chroot path relative to the
826 		 * mount destination path, and append it to the mount source
827 		 * path.
828 		 */
829 		if (!strncmp(b->dest, path_inside_chroot, strlen(b->dest))) {
830 			const char *relative_path =
831 			    path_inside_chroot + strlen(b->dest);
832 			return path_join(b->src, relative_path);
833 		}
834 		b = b->next;
835 	}
836 
837 	/* If there is a chroot path, append |path_inside_chroot| to that. */
838 	if (j->chrootdir)
839 		return path_join(j->chrootdir, path_inside_chroot);
840 
841 	/* No chroot, so the path outside is the same as it is inside. */
842 	return strdup(path_inside_chroot);
843 }
844 
minijail_mount_dev(struct minijail * j)845 void API minijail_mount_dev(struct minijail *j)
846 {
847 	j->flags.mount_dev = 1;
848 }
849 
minijail_mount_tmp(struct minijail * j)850 void API minijail_mount_tmp(struct minijail *j)
851 {
852 	minijail_mount_tmp_size(j, 64 * 1024 * 1024);
853 }
854 
minijail_mount_tmp_size(struct minijail * j,size_t size)855 void API minijail_mount_tmp_size(struct minijail *j, size_t size)
856 {
857 	j->tmpfs_size = size;
858 	j->flags.mount_tmp = 1;
859 }
860 
minijail_write_pid_file(struct minijail * j,const char * path)861 int API minijail_write_pid_file(struct minijail *j, const char *path)
862 {
863 	j->pid_file_path = strdup(path);
864 	if (!j->pid_file_path)
865 		return -ENOMEM;
866 	j->flags.pid_file = 1;
867 	return 0;
868 }
869 
minijail_add_to_cgroup(struct minijail * j,const char * path)870 int API minijail_add_to_cgroup(struct minijail *j, const char *path)
871 {
872 	if (j->cgroup_count >= MAX_CGROUPS)
873 		return -ENOMEM;
874 	j->cgroups[j->cgroup_count] = strdup(path);
875 	if (!j->cgroups[j->cgroup_count])
876 		return -ENOMEM;
877 	j->cgroup_count++;
878 	j->flags.cgroups = 1;
879 	return 0;
880 }
881 
minijail_rlimit(struct minijail * j,int type,rlim_t cur,rlim_t max)882 int API minijail_rlimit(struct minijail *j, int type, rlim_t cur, rlim_t max)
883 {
884 	size_t i;
885 
886 	if (j->rlimit_count >= MAX_RLIMITS)
887 		return -ENOMEM;
888 	/* It's an error if the caller sets the same rlimit multiple times. */
889 	for (i = 0; i < j->rlimit_count; i++) {
890 		if (j->rlimits[i].type == type)
891 			return -EEXIST;
892 	}
893 
894 	j->rlimits[j->rlimit_count].type = type;
895 	j->rlimits[j->rlimit_count].cur = cur;
896 	j->rlimits[j->rlimit_count].max = max;
897 	j->rlimit_count++;
898 	return 0;
899 }
900 
minijail_forward_signals(struct minijail * j)901 int API minijail_forward_signals(struct minijail *j)
902 {
903 	j->flags.forward_signals = 1;
904 	return 0;
905 }
906 
minijail_create_session(struct minijail * j)907 int API minijail_create_session(struct minijail *j)
908 {
909 	j->flags.setsid = 1;
910 	return 0;
911 }
912 
minijail_add_fs_restriction_rx(struct minijail * j,const char * path)913 int API minijail_add_fs_restriction_rx(struct minijail *j, const char *path)
914 {
915 	return !add_fs_restriction_path(j, path,
916 		ACCESS_FS_ROUGHLY_READ_EXECUTE);
917 }
918 
minijail_add_fs_restriction_ro(struct minijail * j,const char * path)919 int API minijail_add_fs_restriction_ro(struct minijail *j, const char *path)
920 {
921 	return !add_fs_restriction_path(j, path, ACCESS_FS_ROUGHLY_READ);
922 }
923 
minijail_add_fs_restriction_rw(struct minijail * j,const char * path)924 int API minijail_add_fs_restriction_rw(struct minijail *j, const char *path)
925 {
926 	return !add_fs_restriction_path(j, path,
927 		ACCESS_FS_ROUGHLY_READ | ACCESS_FS_ROUGHLY_BASIC_WRITE);
928 }
929 
minijail_add_fs_restriction_advanced_rw(struct minijail * j,const char * path)930 int API minijail_add_fs_restriction_advanced_rw(struct minijail *j,
931 						const char *path)
932 {
933 	return !add_fs_restriction_path(j, path,
934 		ACCESS_FS_ROUGHLY_READ | ACCESS_FS_ROUGHLY_FULL_WRITE);
935 }
936 
minijail_add_fs_restriction_edit(struct minijail * j,const char * path)937 int API minijail_add_fs_restriction_edit(struct minijail *j,
938 						const char *path)
939 {
940 	return !add_fs_restriction_path(j, path,
941 		ACCESS_FS_ROUGHLY_READ | ACCESS_FS_ROUGHLY_EDIT);
942 }
943 
is_valid_bind_path(const char * path)944 static bool is_valid_bind_path(const char *path)
945 {
946 	if (!block_symlinks_in_bindmount_paths()) {
947 		return true;
948 	}
949 
950 	/*
951 	 * tokenize() will modify both the |prefixes| pointer and the contents
952 	 * of the string, so:
953 	 * -Copy |BINDMOUNT_ALLOWED_PREFIXES| since it lives in .rodata.
954 	 * -Save the original pointer for free()ing.
955 	 */
956 	char *prefixes = strdup(BINDMOUNT_ALLOWED_PREFIXES);
957 	attribute_cleanup_str char *orig_prefixes = prefixes;
958 	(void)orig_prefixes;
959 
960 	char *prefix = NULL;
961 	bool found_prefix = false;
962 	if (!is_canonical_path(path)) {
963 		while ((prefix = tokenize(&prefixes, ",")) != NULL) {
964 			if (path_is_parent(prefix, path)) {
965 				found_prefix = true;
966 				break;
967 			}
968 		}
969 		if (!found_prefix) {
970 			/*
971 			 * If the path does not include one of the allowed
972 			 * prefixes, fail.
973 			 */
974 			warn("path '%s' is not a canonical path", path);
975 			return false;
976 		}
977 	}
978 	return true;
979 }
980 
minijail_mount_with_data(struct minijail * j,const char * src,const char * dest,const char * type,unsigned long flags,const char * data)981 int API minijail_mount_with_data(struct minijail *j, const char *src,
982 				 const char *dest, const char *type,
983 				 unsigned long flags, const char *data)
984 {
985 	struct mountpoint *m;
986 
987 	if (*dest != '/')
988 		return -EINVAL;
989 	m = calloc(1, sizeof(*m));
990 	if (!m)
991 		return -ENOMEM;
992 	m->dest = strdup(dest);
993 	if (!m->dest)
994 		goto error;
995 	m->src = strdup(src);
996 	if (!m->src)
997 		goto error;
998 	m->type = strdup(type);
999 	if (!m->type)
1000 		goto error;
1001 
1002 	if (!data || !data[0]) {
1003 		/*
1004 		 * Set up secure defaults for certain filesystems.  Adding this
1005 		 * fs-specific logic here kind of sucks, but considering how
1006 		 * people use these in practice, it's probably OK.  If they want
1007 		 * the kernel defaults, they can pass data="" instead of NULL.
1008 		 */
1009 		if (streq(type, "tmpfs")) {
1010 			/* tmpfs defaults to mode=1777 and size=50%. */
1011 			data = "mode=0755,size=10M";
1012 		}
1013 	}
1014 	if (data) {
1015 		m->data = strdup(data);
1016 		if (!m->data)
1017 			goto error;
1018 		m->has_data = 1;
1019 	}
1020 
1021 	/* If they don't specify any flags, default to secure ones. */
1022 	if (flags == 0)
1023 		flags = MS_NODEV | MS_NOEXEC | MS_NOSUID;
1024 	m->flags = flags;
1025 
1026 	/*
1027 	 * Unless asked to enter an existing namespace, force vfs namespacing
1028 	 * so the mounts don't leak out into the containing vfs namespace.
1029 	 * If Minijail is being asked to enter the root vfs namespace this will
1030 	 * leak mounts, but it's unlikely that the user would ask to do that by
1031 	 * mistake.
1032 	 */
1033 	if (!j->flags.enter_vfs)
1034 		minijail_namespace_vfs(j);
1035 
1036 	if (j->mounts_tail)
1037 		j->mounts_tail->next = m;
1038 	else
1039 		j->mounts_head = m;
1040 	j->mounts_tail = m;
1041 	j->mounts_count++;
1042 
1043 	return 0;
1044 
1045 error:
1046 	free(m->type);
1047 	free(m->src);
1048 	free(m->dest);
1049 	free(m);
1050 	return -ENOMEM;
1051 }
1052 
minijail_mount(struct minijail * j,const char * src,const char * dest,const char * type,unsigned long flags)1053 int API minijail_mount(struct minijail *j, const char *src, const char *dest,
1054 		       const char *type, unsigned long flags)
1055 {
1056 	return minijail_mount_with_data(j, src, dest, type, flags, NULL);
1057 }
1058 
minijail_bind(struct minijail * j,const char * src,const char * dest,int writeable)1059 int API minijail_bind(struct minijail *j, const char *src, const char *dest,
1060 		      int writeable)
1061 {
1062 	unsigned long flags = MS_BIND;
1063 
1064 	/*
1065 	 * Check for symlinks in bind-mount source paths to warn the user early.
1066 	 * Minijail will perform one final check immediately before the mount()
1067 	 * call.
1068 	 */
1069 	if (!is_valid_bind_path(src)) {
1070 		warn("src '%s' is not a valid bind mount path", src);
1071 		return -ELOOP;
1072 	}
1073 
1074 	/*
1075 	 * Symlinks in |dest| are blocked by the ChromiumOS LSM:
1076 	 * <kernel>/security/chromiumos/lsm.c#77
1077 	 */
1078 
1079 	if (!writeable)
1080 		flags |= MS_RDONLY;
1081 
1082 	/*
1083 	 * |type| is ignored for bind mounts, use it to signal that this mount
1084 	 * came from minijail_bind().
1085 	 * TODO(b/238362528): Implement a better way to signal this.
1086 	 */
1087 	return minijail_mount(j, src, dest, "minijail_bind", flags);
1088 }
1089 
minijail_add_remount(struct minijail * j,const char * mount_name,unsigned long remount_mode)1090 int API minijail_add_remount(struct minijail *j, const char *mount_name,
1091 			     unsigned long remount_mode)
1092 {
1093 	struct minijail_remount *m;
1094 
1095 	if (*mount_name != '/')
1096 		return -EINVAL;
1097 	m = calloc(1, sizeof(*m));
1098 	if (!m)
1099 		return -ENOMEM;
1100 	m->mount_name = strdup(mount_name);
1101 	if (!m->mount_name) {
1102 		free(m);
1103 		return -ENOMEM;
1104 	}
1105 
1106 	m->remount_mode = remount_mode;
1107 
1108 	if (j->remounts_tail)
1109 		j->remounts_tail->next = m;
1110 	else
1111 		j->remounts_head = m;
1112 	j->remounts_tail = m;
1113 
1114 	return 0;
1115 }
1116 
minijail_add_hook(struct minijail * j,minijail_hook_t hook,void * payload,minijail_hook_event_t event)1117 int API minijail_add_hook(struct minijail *j, minijail_hook_t hook,
1118 			  void *payload, minijail_hook_event_t event)
1119 {
1120 	struct hook *c;
1121 
1122 	if (hook == NULL)
1123 		return -EINVAL;
1124 	if (event >= MINIJAIL_HOOK_EVENT_MAX)
1125 		return -EINVAL;
1126 	c = calloc(1, sizeof(*c));
1127 	if (!c)
1128 		return -ENOMEM;
1129 
1130 	c->hook = hook;
1131 	c->payload = payload;
1132 	c->event = event;
1133 
1134 	if (j->hooks_tail)
1135 		j->hooks_tail->next = c;
1136 	else
1137 		j->hooks_head = c;
1138 	j->hooks_tail = c;
1139 
1140 	return 0;
1141 }
1142 
minijail_preserve_fd(struct minijail * j,int parent_fd,int child_fd)1143 int API minijail_preserve_fd(struct minijail *j, int parent_fd, int child_fd)
1144 {
1145 	if (parent_fd < 0 || child_fd < 0)
1146 		return -EINVAL;
1147 	if (j->preserved_fd_count >= MAX_PRESERVED_FDS)
1148 		return -ENOMEM;
1149 	j->preserved_fds[j->preserved_fd_count].parent_fd = parent_fd;
1150 	j->preserved_fds[j->preserved_fd_count].child_fd = child_fd;
1151 	j->preserved_fd_count++;
1152 	return 0;
1153 }
1154 
minijail_set_preload_path(struct minijail * j,const char * preload_path)1155 int API minijail_set_preload_path(struct minijail *j, const char *preload_path)
1156 {
1157 	if (j->preload_path)
1158 		return -EINVAL;
1159 	j->preload_path = strdup(preload_path);
1160 	if (!j->preload_path)
1161 		return -ENOMEM;
1162 	return 0;
1163 }
1164 
clear_seccomp_options(struct minijail * j)1165 static void clear_seccomp_options(struct minijail *j)
1166 {
1167 	j->flags.seccomp_filter = 0;
1168 	j->flags.seccomp_filter_tsync = 0;
1169 	j->flags.seccomp_filter_logging = 0;
1170 	j->flags.seccomp_filter_allow_speculation = 0;
1171 	j->filter_len = 0;
1172 	j->filter_prog = NULL;
1173 	j->flags.no_new_privs = 0;
1174 	if (j->seccomp_policy_path) {
1175 		free(j->seccomp_policy_path);
1176 	}
1177 	j->seccomp_policy_path = NULL;
1178 }
1179 
seccomp_should_use_filters(struct minijail * j)1180 static int seccomp_should_use_filters(struct minijail *j)
1181 {
1182 	if (prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, NULL) == -1) {
1183 		/*
1184 		 * |errno| will be set to EINVAL when seccomp has not been
1185 		 * compiled into the kernel. On certain platforms and kernel
1186 		 * versions this is not a fatal failure. In that case, and only
1187 		 * in that case, disable seccomp and skip loading the filters.
1188 		 */
1189 		if ((errno == EINVAL) && seccomp_can_softfail()) {
1190 			warn("not loading seccomp filters, seccomp filter not "
1191 			     "supported");
1192 			clear_seccomp_options(j);
1193 			return 0;
1194 		}
1195 		/*
1196 		 * If |errno| != EINVAL or seccomp_can_softfail() is false,
1197 		 * we can proceed. Worst case scenario minijail_enter() will
1198 		 * abort() if seccomp fails.
1199 		 */
1200 	}
1201 	if (j->flags.seccomp_filter_tsync) {
1202 		/* Are the seccomp(2) syscall and the TSYNC option supported? */
1203 		if (sys_seccomp(SECCOMP_SET_MODE_FILTER,
1204 				SECCOMP_FILTER_FLAG_TSYNC, NULL) == -1) {
1205 			int saved_errno = errno;
1206 			if (saved_errno == ENOSYS && seccomp_can_softfail()) {
1207 				warn("seccomp(2) syscall not supported");
1208 				clear_seccomp_options(j);
1209 				return 0;
1210 			} else if (saved_errno == EINVAL &&
1211 				   seccomp_can_softfail()) {
1212 				warn(
1213 				    "seccomp filter thread sync not supported");
1214 				clear_seccomp_options(j);
1215 				return 0;
1216 			}
1217 			/*
1218 			 * Similar logic here. If seccomp_can_softfail() is
1219 			 * false, or |errno| != ENOSYS, or |errno| != EINVAL,
1220 			 * we can proceed. Worst case scenario minijail_enter()
1221 			 * will abort() if seccomp or TSYNC fail.
1222 			 */
1223 		}
1224 	}
1225 	if (j->flags.seccomp_filter_allow_speculation) {
1226 		/* Is the SPEC_ALLOW flag supported? */
1227 		if (!seccomp_filter_flags_available(
1228 			SECCOMP_FILTER_FLAG_SPEC_ALLOW)) {
1229 			warn("allowing speculative execution on seccomp "
1230 			     "processes not supported");
1231 			j->flags.seccomp_filter_allow_speculation = 0;
1232 		}
1233 	}
1234 	return 1;
1235 }
1236 
set_seccomp_filters_internal(struct minijail * j,const struct sock_fprog * filter,bool owned)1237 static int set_seccomp_filters_internal(struct minijail *j,
1238 					const struct sock_fprog *filter,
1239 					bool owned)
1240 {
1241 	struct sock_fprog *fprog;
1242 
1243 	if (owned) {
1244 		/*
1245 		 * If |owned| is true, it's OK to cast away the const-ness since
1246 		 * we'll own the pointer going forward.
1247 		 */
1248 		fprog = (struct sock_fprog *)filter;
1249 	} else {
1250 		fprog = malloc(sizeof(struct sock_fprog));
1251 		if (!fprog)
1252 			return -ENOMEM;
1253 		fprog->len = filter->len;
1254 		fprog->filter = malloc(sizeof(struct sock_filter) * fprog->len);
1255 		if (!fprog->filter) {
1256 			free(fprog);
1257 			return -ENOMEM;
1258 		}
1259 		memcpy(fprog->filter, filter->filter,
1260 		       sizeof(struct sock_filter) * fprog->len);
1261 	}
1262 
1263 	if (j->filter_prog) {
1264 		free(j->filter_prog->filter);
1265 		free(j->filter_prog);
1266 	}
1267 
1268 	j->filter_len = fprog->len;
1269 	j->filter_prog = fprog;
1270 	return 0;
1271 }
1272 
parse_seccomp_filters(struct minijail * j,const char * filename,FILE * policy_file)1273 static int parse_seccomp_filters(struct minijail *j, const char *filename,
1274 				 FILE *policy_file)
1275 {
1276 	struct sock_fprog *fprog = malloc(sizeof(struct sock_fprog));
1277 	if (!fprog)
1278 		return -ENOMEM;
1279 
1280 	struct filter_options filteropts;
1281 
1282 	/*
1283 	 * Figure out filter options.
1284 	 * Allow logging?
1285 	 */
1286 	filteropts.allow_logging =
1287 	    debug_logging_allowed() && seccomp_is_logging_allowed(j);
1288 
1289 	/* What to do on a blocked system call? */
1290 	if (filteropts.allow_logging) {
1291 		if (seccomp_ret_log_available())
1292 			filteropts.action = ACTION_RET_LOG;
1293 		else
1294 			filteropts.action = ACTION_RET_TRAP;
1295 	} else {
1296 		if (j->flags.seccomp_filter_tsync) {
1297 			if (seccomp_ret_kill_process_available()) {
1298 				filteropts.action = ACTION_RET_KILL_PROCESS;
1299 			} else {
1300 				filteropts.action = ACTION_RET_TRAP;
1301 			}
1302 		} else {
1303 			filteropts.action = ACTION_RET_KILL;
1304 		}
1305 	}
1306 
1307 	/*
1308 	 * If SECCOMP_RET_LOG is not available, need to allow extra syscalls
1309 	 * for logging.
1310 	 */
1311 	filteropts.allow_syscalls_for_logging =
1312 	    filteropts.allow_logging && !seccomp_ret_log_available();
1313 
1314 	/* Whether to fail on duplicate syscalls. */
1315 	filteropts.allow_duplicate_syscalls = allow_duplicate_syscalls();
1316 
1317 	if (compile_filter(filename, policy_file, fprog, &filteropts)) {
1318 		free(fprog);
1319 		return -1;
1320 	}
1321 
1322 	return set_seccomp_filters_internal(j, fprog, true /* owned */);
1323 }
1324 
minijail_parse_seccomp_filters(struct minijail * j,const char * path)1325 void API minijail_parse_seccomp_filters(struct minijail *j, const char *path)
1326 {
1327 	if (!seccomp_should_use_filters(j))
1328 		return;
1329 
1330 	attribute_cleanup_fp FILE *file = fopen(path, "re");
1331 	if (!file) {
1332 		pdie("failed to open seccomp filter file '%s'", path);
1333 	}
1334 
1335 	if (parse_seccomp_filters(j, path, file) != 0) {
1336 		die("failed to compile seccomp filter BPF program in '%s'",
1337 		    path);
1338 	}
1339 	if (j->seccomp_policy_path) {
1340 		free(j->seccomp_policy_path);
1341 	}
1342 	j->seccomp_policy_path = strdup(path);
1343 }
1344 
minijail_parse_seccomp_filters_from_fd(struct minijail * j,int fd)1345 void API minijail_parse_seccomp_filters_from_fd(struct minijail *j, int fd)
1346 {
1347 	char *fd_path, *path;
1348 	attribute_cleanup_fp FILE *file = NULL;
1349 
1350 	if (!seccomp_should_use_filters(j))
1351 		return;
1352 
1353 	file = fdopen(fd, "r");
1354 	if (!file) {
1355 		pdie("failed to associate stream with fd %d", fd);
1356 	}
1357 
1358 	if (asprintf(&fd_path, "/proc/self/fd/%d", fd) == -1)
1359 		pdie("failed to create path for fd %d", fd);
1360 	path = realpath(fd_path, NULL);
1361 	if (path == NULL)
1362 		pwarn("failed to get path of fd %d", fd);
1363 	free(fd_path);
1364 
1365 	if (parse_seccomp_filters(j, path ? path : "<fd>", file) != 0) {
1366 		die("failed to compile seccomp filter BPF program from fd %d",
1367 		    fd);
1368 	}
1369 	if (j->seccomp_policy_path) {
1370 		free(j->seccomp_policy_path);
1371 	}
1372 	j->seccomp_policy_path = path;
1373 }
1374 
minijail_set_seccomp_filters(struct minijail * j,const struct sock_fprog * filter)1375 void API minijail_set_seccomp_filters(struct minijail *j,
1376 				      const struct sock_fprog *filter)
1377 {
1378 	if (!seccomp_should_use_filters(j))
1379 		return;
1380 
1381 	if (seccomp_is_logging_allowed(j)) {
1382 		die("minijail_log_seccomp_filter_failures() is incompatible "
1383 		    "with minijail_set_seccomp_filters()");
1384 	}
1385 
1386 	/*
1387 	 * set_seccomp_filters_internal() can only fail with ENOMEM.
1388 	 * Furthermore, since we won't own the incoming filter, it will not be
1389 	 * modified.
1390 	 */
1391 	if (set_seccomp_filters_internal(j, filter, false /* owned */) < 0) {
1392 		die("failed to set seccomp filter");
1393 	}
1394 }
1395 
minijail_use_alt_syscall(struct minijail * j,const char * table)1396 int API minijail_use_alt_syscall(struct minijail *j, const char *table)
1397 {
1398 	j->alt_syscall_table = strdup(table);
1399 	if (!j->alt_syscall_table)
1400 		return -ENOMEM;
1401 	j->flags.alt_syscall = 1;
1402 	return 0;
1403 }
1404 
1405 struct marshal_state {
1406 	size_t available;
1407 	size_t total;
1408 	char *buf;
1409 };
1410 
marshal_state_init(struct marshal_state * state,char * buf,size_t available)1411 static void marshal_state_init(struct marshal_state *state, char *buf,
1412 			       size_t available)
1413 {
1414 	state->available = available;
1415 	state->buf = buf;
1416 	state->total = 0;
1417 }
1418 
marshal_append(struct marshal_state * state,const void * src,size_t length)1419 static void marshal_append(struct marshal_state *state, const void *src,
1420 			   size_t length)
1421 {
1422 	size_t copy_len = MIN(state->available, length);
1423 
1424 	/* Up to |available| will be written. */
1425 	if (copy_len) {
1426 		memcpy(state->buf, src, copy_len);
1427 		state->buf += copy_len;
1428 		state->available -= copy_len;
1429 	}
1430 	/* |total| will contain the expected length. */
1431 	state->total += length;
1432 }
1433 
marshal_append_string(struct marshal_state * state,const char * src)1434 static void marshal_append_string(struct marshal_state *state, const char *src)
1435 {
1436 	marshal_append(state, src, strlen(src) + 1);
1437 }
1438 
marshal_mount(struct marshal_state * state,const struct mountpoint * m)1439 static void marshal_mount(struct marshal_state *state,
1440 			  const struct mountpoint *m)
1441 {
1442 	marshal_append(state, m->src, strlen(m->src) + 1);
1443 	marshal_append(state, m->dest, strlen(m->dest) + 1);
1444 	marshal_append(state, m->type, strlen(m->type) + 1);
1445 	marshal_append(state, (char *)&m->has_data, sizeof(m->has_data));
1446 	if (m->has_data)
1447 		marshal_append(state, m->data, strlen(m->data) + 1);
1448 	marshal_append(state, (char *)&m->flags, sizeof(m->flags));
1449 }
1450 
minijail_marshal_helper(struct marshal_state * state,const struct minijail * j)1451 static void minijail_marshal_helper(struct marshal_state *state,
1452 				    const struct minijail *j)
1453 {
1454 	struct mountpoint *m = NULL;
1455 	size_t i;
1456 
1457 	marshal_append(state, (char *)j, sizeof(*j));
1458 	if (j->user)
1459 		marshal_append_string(state, j->user);
1460 	if (j->suppl_gid_list) {
1461 		marshal_append(state, j->suppl_gid_list,
1462 			       j->suppl_gid_count * sizeof(gid_t));
1463 	}
1464 	if (j->chrootdir)
1465 		marshal_append_string(state, j->chrootdir);
1466 	if (j->hostname)
1467 		marshal_append_string(state, j->hostname);
1468 	if (j->alt_syscall_table) {
1469 		marshal_append(state, j->alt_syscall_table,
1470 			       strlen(j->alt_syscall_table) + 1);
1471 	}
1472 	if (j->flags.seccomp_filter && j->filter_prog) {
1473 		struct sock_fprog *fp = j->filter_prog;
1474 		marshal_append(state, (char *)fp->filter,
1475 			       fp->len * sizeof(struct sock_filter));
1476 	}
1477 	for (m = j->mounts_head; m; m = m->next) {
1478 		marshal_mount(state, m);
1479 	}
1480 	for (i = 0; i < j->cgroup_count; ++i)
1481 		marshal_append_string(state, j->cgroups[i]);
1482 	if (j->seccomp_policy_path)
1483 		marshal_append_string(state, j->seccomp_policy_path);
1484 }
1485 
minijail_size(const struct minijail * j)1486 size_t API minijail_size(const struct minijail *j)
1487 {
1488 	struct marshal_state state;
1489 	marshal_state_init(&state, NULL, 0);
1490 	minijail_marshal_helper(&state, j);
1491 	return state.total;
1492 }
1493 
minijail_marshal(const struct minijail * j,char * buf,size_t available)1494 int minijail_marshal(const struct minijail *j, char *buf, size_t available)
1495 {
1496 	struct marshal_state state;
1497 	marshal_state_init(&state, buf, available);
1498 	minijail_marshal_helper(&state, j);
1499 	return (state.total > available);
1500 }
1501 
minijail_unmarshal(struct minijail * j,char * serialized,size_t length)1502 int minijail_unmarshal(struct minijail *j, char *serialized, size_t length)
1503 {
1504 	size_t i;
1505 	size_t count;
1506 	int ret = -EINVAL;
1507 
1508 	if (length < sizeof(*j))
1509 		goto out;
1510 	memcpy((void *)j, serialized, sizeof(*j));
1511 	serialized += sizeof(*j);
1512 	length -= sizeof(*j);
1513 
1514 	/* Potentially stale pointers not used as signals. */
1515 	j->preload_path = NULL;
1516 	j->pid_file_path = NULL;
1517 	j->uidmap = NULL;
1518 	j->gidmap = NULL;
1519 	j->mounts_head = NULL;
1520 	j->mounts_tail = NULL;
1521 	j->remounts_head = NULL;
1522 	j->remounts_tail = NULL;
1523 	j->filter_prog = NULL;
1524 	j->hooks_head = NULL;
1525 	j->hooks_tail = NULL;
1526 	j->fs_rules_head = NULL;
1527 	j->fs_rules_tail = NULL;
1528 
1529 	if (j->user) { /* stale pointer */
1530 		char *user = consumestr(&serialized, &length);
1531 		if (!user)
1532 			goto clear_pointers;
1533 		j->user = strdup(user);
1534 		if (!j->user)
1535 			goto clear_pointers;
1536 	}
1537 
1538 	if (j->suppl_gid_list) { /* stale pointer */
1539 		if (j->suppl_gid_count > NGROUPS_MAX) {
1540 			goto bad_gid_list;
1541 		}
1542 		size_t gid_list_size = j->suppl_gid_count * sizeof(gid_t);
1543 		void *gid_list_bytes =
1544 		    consumebytes(gid_list_size, &serialized, &length);
1545 		if (!gid_list_bytes)
1546 			goto bad_gid_list;
1547 
1548 		j->suppl_gid_list = calloc(j->suppl_gid_count, sizeof(gid_t));
1549 		if (!j->suppl_gid_list)
1550 			goto bad_gid_list;
1551 
1552 		memcpy(j->suppl_gid_list, gid_list_bytes, gid_list_size);
1553 	}
1554 
1555 	if (j->chrootdir) { /* stale pointer */
1556 		char *chrootdir = consumestr(&serialized, &length);
1557 		if (!chrootdir)
1558 			goto bad_chrootdir;
1559 		j->chrootdir = strdup(chrootdir);
1560 		if (!j->chrootdir)
1561 			goto bad_chrootdir;
1562 	}
1563 
1564 	if (j->hostname) { /* stale pointer */
1565 		char *hostname = consumestr(&serialized, &length);
1566 		if (!hostname)
1567 			goto bad_hostname;
1568 		j->hostname = strdup(hostname);
1569 		if (!j->hostname)
1570 			goto bad_hostname;
1571 	}
1572 
1573 	if (j->alt_syscall_table) { /* stale pointer */
1574 		char *alt_syscall_table = consumestr(&serialized, &length);
1575 		if (!alt_syscall_table)
1576 			goto bad_syscall_table;
1577 		j->alt_syscall_table = strdup(alt_syscall_table);
1578 		if (!j->alt_syscall_table)
1579 			goto bad_syscall_table;
1580 	}
1581 
1582 	if (j->flags.seccomp_filter && j->filter_len > 0) {
1583 		size_t ninstrs = j->filter_len;
1584 		if (ninstrs > (SIZE_MAX / sizeof(struct sock_filter)) ||
1585 		    ninstrs > USHRT_MAX)
1586 			goto bad_filters;
1587 
1588 		size_t program_len = ninstrs * sizeof(struct sock_filter);
1589 		void *program = consumebytes(program_len, &serialized, &length);
1590 		if (!program)
1591 			goto bad_filters;
1592 
1593 		j->filter_prog = malloc(sizeof(struct sock_fprog));
1594 		if (!j->filter_prog)
1595 			goto bad_filters;
1596 
1597 		j->filter_prog->len = ninstrs;
1598 		j->filter_prog->filter = malloc(program_len);
1599 		if (!j->filter_prog->filter)
1600 			goto bad_filter_prog_instrs;
1601 
1602 		memcpy(j->filter_prog->filter, program, program_len);
1603 	}
1604 
1605 	count = j->mounts_count;
1606 	j->mounts_count = 0;
1607 	for (i = 0; i < count; ++i) {
1608 		unsigned long *flags;
1609 		int *has_data;
1610 		const char *dest;
1611 		const char *type;
1612 		const char *data = NULL;
1613 		const char *src = consumestr(&serialized, &length);
1614 		if (!src)
1615 			goto bad_mounts;
1616 		dest = consumestr(&serialized, &length);
1617 		if (!dest)
1618 			goto bad_mounts;
1619 		type = consumestr(&serialized, &length);
1620 		if (!type)
1621 			goto bad_mounts;
1622 		has_data =
1623 		    consumebytes(sizeof(*has_data), &serialized, &length);
1624 		if (!has_data)
1625 			goto bad_mounts;
1626 		if (*has_data) {
1627 			data = consumestr(&serialized, &length);
1628 			if (!data)
1629 				goto bad_mounts;
1630 		}
1631 		flags = consumebytes(sizeof(*flags), &serialized, &length);
1632 		if (!flags)
1633 			goto bad_mounts;
1634 		if (minijail_mount_with_data(j, src, dest, type, *flags, data))
1635 			goto bad_mounts;
1636 	}
1637 
1638 	count = j->cgroup_count;
1639 	j->cgroup_count = 0;
1640 	for (i = 0; i < count; ++i) {
1641 		char *cgroup = consumestr(&serialized, &length);
1642 		if (!cgroup)
1643 			goto bad_cgroups;
1644 		j->cgroups[i] = strdup(cgroup);
1645 		if (!j->cgroups[i])
1646 			goto bad_cgroups;
1647 		++j->cgroup_count;
1648 	}
1649 
1650 	if (j->seccomp_policy_path) { /* stale pointer */
1651 		char *seccomp_policy_path = consumestr(&serialized, &length);
1652 		if (!seccomp_policy_path)
1653 			goto bad_cgroups;
1654 		j->seccomp_policy_path = strdup(seccomp_policy_path);
1655 		if (!j->seccomp_policy_path)
1656 			goto bad_cgroups;
1657 	}
1658 
1659 	return 0;
1660 
1661 	/*
1662 	 * If more is added after j->seccomp_policy_path, then this is needed:
1663 	 * if (j->seccomp_policy_path)
1664 	 * 	free(j->seccomp_policy_path);
1665 	 */
1666 
1667 bad_cgroups:
1668 	free_mounts_list(j);
1669 	free_remounts_list(j);
1670 	for (i = 0; i < j->cgroup_count; ++i)
1671 		free(j->cgroups[i]);
1672 bad_mounts:
1673 	if (j->filter_prog && j->filter_prog->filter)
1674 		free(j->filter_prog->filter);
1675 bad_filter_prog_instrs:
1676 	if (j->filter_prog)
1677 		free(j->filter_prog);
1678 bad_filters:
1679 	if (j->alt_syscall_table)
1680 		free(j->alt_syscall_table);
1681 bad_syscall_table:
1682 	if (j->hostname)
1683 		free(j->hostname);
1684 bad_hostname:
1685 	if (j->chrootdir)
1686 		free(j->chrootdir);
1687 bad_chrootdir:
1688 	if (j->suppl_gid_list)
1689 		free(j->suppl_gid_list);
1690 bad_gid_list:
1691 	if (j->user)
1692 		free(j->user);
1693 clear_pointers:
1694 	j->user = NULL;
1695 	j->suppl_gid_list = NULL;
1696 	j->chrootdir = NULL;
1697 	j->hostname = NULL;
1698 	j->alt_syscall_table = NULL;
1699 	j->cgroup_count = 0;
1700 	j->seccomp_policy_path = NULL;
1701 out:
1702 	return ret;
1703 }
1704 
1705 struct dev_spec {
1706 	const char *name;
1707 	mode_t mode;
1708 	dev_t major, minor;
1709 };
1710 
1711 // clang-format off
1712 static const struct dev_spec device_nodes[] = {
1713     {
1714 "null",
1715 	S_IFCHR | 0666, 1, 3,
1716     },
1717     {
1718 	"zero",
1719 	S_IFCHR | 0666, 1, 5,
1720     },
1721     {
1722 	"full",
1723 	S_IFCHR | 0666, 1, 7,
1724     },
1725     {
1726 	"urandom",
1727 	S_IFCHR | 0444, 1, 9,
1728     },
1729     {
1730 	"tty",
1731 	S_IFCHR | 0666, 5, 0,
1732     },
1733 };
1734 // clang-format on
1735 
1736 struct dev_sym_spec {
1737 	const char *source, *dest;
1738 };
1739 
1740 static const struct dev_sym_spec device_symlinks[] = {
1741     {
1742 	"ptmx",
1743 	"pts/ptmx",
1744     },
1745     {
1746 	"fd",
1747 	"/proc/self/fd",
1748     },
1749     {
1750 	"stdin",
1751 	"fd/0",
1752     },
1753     {
1754 	"stdout",
1755 	"fd/1",
1756     },
1757     {
1758 	"stderr",
1759 	"fd/2",
1760     },
1761 };
1762 
1763 /*
1764  * Clean up the temporary dev path we had setup previously.  In case of errors,
1765  * we don't want to go leaking empty tempdirs.
1766  */
mount_dev_cleanup(char * dev_path)1767 static void mount_dev_cleanup(char *dev_path)
1768 {
1769 	umount2(dev_path, MNT_DETACH);
1770 	rmdir(dev_path);
1771 	free(dev_path);
1772 }
1773 
1774 /*
1775  * Set up the pseudo /dev path at the temporary location.
1776  * See mount_dev_finalize for more details.
1777  */
mount_dev(char ** dev_path_ret)1778 static int mount_dev(char **dev_path_ret)
1779 {
1780 	int ret;
1781 	attribute_cleanup_fd int dev_fd = -1;
1782 	size_t i;
1783 	mode_t mask;
1784 	char *dev_path;
1785 
1786 	/*
1787 	 * Create a temp path for the /dev init.  We'll relocate this to the
1788 	 * final location later on in the startup process.
1789 	 */
1790 	dev_path = *dev_path_ret = strdup("/tmp/minijail.dev.XXXXXX");
1791 	if (dev_path == NULL || mkdtemp(dev_path) == NULL)
1792 		pdie("could not create temp path for /dev");
1793 
1794 	/* Set up the empty /dev mount point first. */
1795 	ret = mount("minijail-devfs", dev_path, "tmpfs", MS_NOEXEC | MS_NOSUID,
1796 		    "size=5M,mode=755");
1797 	if (ret) {
1798 		rmdir(dev_path);
1799 		return ret;
1800 	}
1801 
1802 	/* We want to set the mode directly from the spec. */
1803 	mask = umask(0);
1804 
1805 	/* Get a handle to the temp dev path for *at funcs below. */
1806 	dev_fd = open(dev_path, O_DIRECTORY | O_PATH | O_CLOEXEC);
1807 	if (dev_fd < 0) {
1808 		ret = 1;
1809 		goto done;
1810 	}
1811 
1812 	/* Create all the nodes in /dev. */
1813 	for (i = 0; i < ARRAY_SIZE(device_nodes); ++i) {
1814 		const struct dev_spec *ds = &device_nodes[i];
1815 		ret = mknodat(dev_fd, ds->name, ds->mode,
1816 			      makedev(ds->major, ds->minor));
1817 		if (ret)
1818 			goto done;
1819 	}
1820 
1821 	/* Create all the symlinks in /dev. */
1822 	for (i = 0; i < ARRAY_SIZE(device_symlinks); ++i) {
1823 		const struct dev_sym_spec *ds = &device_symlinks[i];
1824 		ret = symlinkat(ds->dest, dev_fd, ds->source);
1825 		if (ret)
1826 			goto done;
1827 	}
1828 
1829 	/* Create empty dir for glibc shared mem APIs. */
1830 	ret = mkdirat(dev_fd, "shm", 01777);
1831 	if (ret)
1832 		goto done;
1833 
1834 	/* Restore old mask. */
1835 done:
1836 	umask(mask);
1837 
1838 	if (ret)
1839 		mount_dev_cleanup(dev_path);
1840 
1841 	return ret;
1842 }
1843 
1844 /*
1845  * Relocate the temporary /dev mount to its final /dev place.
1846  * We have to do this two step process so people can bind mount extra
1847  * /dev paths like /dev/log.
1848  */
mount_dev_finalize(const struct minijail * j,char * dev_path)1849 static int mount_dev_finalize(const struct minijail *j, char *dev_path)
1850 {
1851 	int ret = -1;
1852 	char *dest = NULL;
1853 
1854 	/* Unmount the /dev mount if possible. */
1855 	if (umount2("/dev", MNT_DETACH))
1856 		goto done;
1857 
1858 	if (asprintf(&dest, "%s/dev", j->chrootdir ?: "") < 0)
1859 		goto done;
1860 
1861 	if (mount(dev_path, dest, NULL, MS_MOVE, NULL))
1862 		goto done;
1863 
1864 	ret = 0;
1865 done:
1866 	free(dest);
1867 	mount_dev_cleanup(dev_path);
1868 
1869 	return ret;
1870 }
1871 
1872 /*
1873  * mount_one: Applies mounts from @m for @j, recursing as needed.
1874  * @j Minijail these mounts are for
1875  * @m Head of list of mounts
1876  *
1877  * Returns 0 for success.
1878  */
mount_one(const struct minijail * j,struct mountpoint * m,const char * dev_path)1879 static int mount_one(const struct minijail *j, struct mountpoint *m,
1880 		     const char *dev_path)
1881 {
1882 	int ret;
1883 	char *dest;
1884 	bool do_remount = false;
1885 	bool has_bind_flag = mount_has_bind_flag(m);
1886 	bool has_remount_flag = !!(m->flags & MS_REMOUNT);
1887 	unsigned long original_mnt_flags = 0;
1888 
1889 	/* We assume |dest| has a leading "/". */
1890 	if (dev_path && strncmp("/dev/", m->dest, 5) == 0) {
1891 		/*
1892 		 * Since the temp path is rooted at /dev, skip that dest part.
1893 		 */
1894 		if (asprintf(&dest, "%s%s", dev_path, m->dest + 4) < 0)
1895 			return -ENOMEM;
1896 	} else {
1897 		if (asprintf(&dest, "%s%s", j->chrootdir ?: "", m->dest) < 0)
1898 			return -ENOMEM;
1899 	}
1900 
1901 	ret = setup_mount_destination(m->src, dest, j->uid, j->gid,
1902 				      has_bind_flag);
1903 	if (ret) {
1904 		warn("cannot create mount target '%s'", dest);
1905 		goto error;
1906 	}
1907 
1908 	/*
1909 	 * Remount bind mounts that:
1910 	 * - Come from the minijail_bind() API, and
1911 	 * - Add the 'ro' flag
1912 	 * since 'bind' and other flags can't both be specified in the same
1913 	 * mount(2) call.
1914 	 * Callers using minijail_mount() to perform bind mounts are expected to
1915 	 * know what they're doing and call minijail_mount() with MS_REMOUNT as
1916 	 * needed.
1917 	 * Therefore, if the caller is asking for a remount (using MS_REMOUNT),
1918 	 * there is no need to do an extra remount here.
1919 	 */
1920 	if (has_bind_flag && strcmp(m->type, "minijail_bind") == 0 &&
1921 	    !has_remount_flag) {
1922 		/*
1923 		 * Grab the mount flags of the source. These are used to figure
1924 		 * out whether the bind mount needs to be remounted read-only.
1925 		 */
1926 		if (get_mount_flags(m->src, &original_mnt_flags)) {
1927 			warn("cannot get mount flags for '%s'", m->src);
1928 			goto error;
1929 		}
1930 
1931 		if ((m->flags & MS_RDONLY) !=
1932 		    (original_mnt_flags & MS_RDONLY)) {
1933 			do_remount = 1;
1934 			/*
1935 			 * Restrict the mount flags to those that are
1936 			 * user-settable in a MS_REMOUNT request, but excluding
1937 			 * MS_RDONLY. The user-requested mount flags will
1938 			 * dictate whether the remount will have that flag or
1939 			 * not.
1940 			 */
1941 			original_mnt_flags &=
1942 			    (MS_USER_SETTABLE_MASK & ~MS_RDONLY);
1943 		}
1944 	}
1945 
1946 	/*
1947 	 * Do a final check for symlinks in |m->src|.
1948 	 * |m->src| will only contain a valid path when purely bind-mounting
1949 	 * (but not when remounting a bind mount).
1950 	 *
1951 	 * Short of having a version of mount(2) that can take fd's, this is the
1952 	 * smallest we can make the TOCTOU window.
1953 	 */
1954 	if (has_bind_flag && !has_remount_flag && !is_valid_bind_path(m->src)) {
1955 		warn("src '%s' is not a valid bind mount path", m->src);
1956 		goto error;
1957 	}
1958 
1959 	ret = mount(m->src, dest, m->type, m->flags, m->data);
1960 	if (ret) {
1961 		pwarn("cannot mount '%s' as '%s' with flags %#lx", m->src, dest,
1962 		      m->flags);
1963 		goto error;
1964 	}
1965 
1966 	/* Remount *after* the initial mount. */
1967 	if (do_remount) {
1968 		ret =
1969 		    mount(m->src, dest, NULL,
1970 			  m->flags | original_mnt_flags | MS_REMOUNT, m->data);
1971 		if (ret) {
1972 			pwarn(
1973 			    "cannot bind-remount '%s' as '%s' with flags %#lx",
1974 			    m->src, dest,
1975 			    m->flags | original_mnt_flags | MS_REMOUNT);
1976 			goto error;
1977 		}
1978 	}
1979 
1980 	free(dest);
1981 	if (m->next)
1982 		return mount_one(j, m->next, dev_path);
1983 	return 0;
1984 
1985 error:
1986 	free(dest);
1987 	return ret;
1988 }
1989 
process_mounts_or_die(const struct minijail * j)1990 static void process_mounts_or_die(const struct minijail *j)
1991 {
1992 	/*
1993 	 * We have to mount /dev first in case there are bind mounts from
1994 	 * the original /dev into the new unique tmpfs one.
1995 	 */
1996 	char *dev_path = NULL;
1997 	if (j->flags.mount_dev && mount_dev(&dev_path))
1998 		pdie("mount_dev failed");
1999 
2000 	if (j->mounts_head && mount_one(j, j->mounts_head, dev_path)) {
2001 		warn("mount_one failed with /dev at '%s'", dev_path);
2002 
2003 		if (dev_path)
2004 			mount_dev_cleanup(dev_path);
2005 
2006 		_exit(MINIJAIL_ERR_MOUNT);
2007 	}
2008 
2009 	/*
2010 	 * Once all bind mounts have been processed, move the temp dev to
2011 	 * its final /dev home.
2012 	 */
2013 	if (j->flags.mount_dev && mount_dev_finalize(j, dev_path))
2014 		pdie("mount_dev_finalize failed");
2015 }
2016 
enter_chroot(const struct minijail * j)2017 static int enter_chroot(const struct minijail *j)
2018 {
2019 	run_hooks_or_die(j, MINIJAIL_HOOK_EVENT_PRE_CHROOT);
2020 
2021 	if (chroot(j->chrootdir))
2022 		return -errno;
2023 
2024 	if (chdir("/"))
2025 		return -errno;
2026 
2027 	return 0;
2028 }
2029 
enter_pivot_root(const struct minijail * j)2030 static int enter_pivot_root(const struct minijail *j)
2031 {
2032 	attribute_cleanup_fd int oldroot = -1;
2033 	attribute_cleanup_fd int newroot = -1;
2034 
2035 	run_hooks_or_die(j, MINIJAIL_HOOK_EVENT_PRE_CHROOT);
2036 
2037 	/*
2038 	 * Keep the fd for both old and new root.
2039 	 * It will be used in fchdir(2) later.
2040 	 */
2041 	oldroot = open("/", O_DIRECTORY | O_RDONLY | O_CLOEXEC);
2042 	if (oldroot < 0)
2043 		pdie("failed to open / for fchdir");
2044 	newroot = open(j->chrootdir, O_DIRECTORY | O_RDONLY | O_CLOEXEC);
2045 	if (newroot < 0)
2046 		pdie("failed to open %s for fchdir", j->chrootdir);
2047 
2048 	/*
2049 	 * To ensure j->chrootdir is the root of a filesystem,
2050 	 * do a self bind mount.
2051 	 */
2052 	if (mount(j->chrootdir, j->chrootdir, "bind", MS_BIND | MS_REC, ""))
2053 		pdie("failed to bind mount '%s'", j->chrootdir);
2054 	if (chdir(j->chrootdir))
2055 		return -errno;
2056 	if (syscall(SYS_pivot_root, ".", "."))
2057 		pdie("pivot_root");
2058 
2059 	/*
2060 	 * Now the old root is mounted on top of the new root. Use fchdir(2) to
2061 	 * change to the old root and unmount it.
2062 	 */
2063 	if (fchdir(oldroot))
2064 		pdie("failed to fchdir to old /");
2065 
2066 	/*
2067 	 * If skip_remount_private was enabled for minijail_enter(),
2068 	 * there could be a shared mount point under |oldroot|. In that case,
2069 	 * mounts under this shared mount point will be unmounted below, and
2070 	 * this unmounting will propagate to the original mount namespace
2071 	 * (because the mount point is shared). To prevent this unexpected
2072 	 * unmounting, remove these mounts from their peer groups by recursively
2073 	 * remounting them as MS_PRIVATE.
2074 	 */
2075 	if (mount(NULL, ".", NULL, MS_REC | MS_PRIVATE, NULL))
2076 		pdie("failed to mount(/, private) before umount(/)");
2077 	/* The old root might be busy, so use lazy unmount. */
2078 	if (umount2(".", MNT_DETACH))
2079 		pdie("umount(/)");
2080 	/* Change back to the new root. */
2081 	if (fchdir(newroot))
2082 		return -errno;
2083 	if (chroot("/"))
2084 		return -errno;
2085 	/* Set correct CWD for getcwd(3). */
2086 	if (chdir("/"))
2087 		return -errno;
2088 
2089 	return 0;
2090 }
2091 
mount_tmp(const struct minijail * j)2092 static int mount_tmp(const struct minijail *j)
2093 {
2094 	const char fmt[] = "size=%zu,mode=1777";
2095 	/* Count for the user storing ULLONG_MAX literally + extra space. */
2096 	char data[sizeof(fmt) + sizeof("18446744073709551615ULL")];
2097 	int ret;
2098 
2099 	ret = snprintf(data, sizeof(data), fmt, j->tmpfs_size);
2100 
2101 	if (ret <= 0)
2102 		pdie("tmpfs size spec error");
2103 	else if ((size_t)ret >= sizeof(data))
2104 		pdie("tmpfs size spec too large");
2105 
2106 	unsigned long flags = MS_NODEV | MS_NOEXEC | MS_NOSUID;
2107 
2108 	if (block_symlinks_in_noninit_mountns_tmp()) {
2109 		flags |= MS_NOSYMFOLLOW;
2110 	}
2111 
2112 	return mount("none", "/tmp", "tmpfs", flags, data);
2113 }
2114 
remount_proc_readonly(const struct minijail * j)2115 static int remount_proc_readonly(const struct minijail *j)
2116 {
2117 	const char *kProcPath = "/proc";
2118 	const unsigned int kSafeFlags = MS_NODEV | MS_NOEXEC | MS_NOSUID;
2119 	/*
2120 	 * Right now, we're holding a reference to our parent's old mount of
2121 	 * /proc in our namespace, which means using MS_REMOUNT here would
2122 	 * mutate our parent's mount as well, even though we're in a VFS
2123 	 * namespace (!). Instead, remove their mount from our namespace lazily
2124 	 * (MNT_DETACH) and make our own.
2125 	 *
2126 	 * However, we skip this in the user namespace case because it will
2127 	 * invariably fail. Every mount namespace is "owned" by the
2128 	 * user namespace of the process that creates it. Mount namespace A is
2129 	 * "less privileged" than mount namespace B if A is created off of B,
2130 	 * and B is owned by a different user namespace.
2131 	 * When a less privileged mount namespace is created, the mounts used to
2132 	 * initialize it (coming from the more privileged mount namespace) come
2133 	 * as a unit, and are locked together. This means that code running in
2134 	 * the new mount (and user) namespace cannot piecemeal unmount
2135 	 * individual mounts inherited from a more privileged mount namespace.
2136 	 * See https://man7.org/linux/man-pages/man7/mount_namespaces.7.html,
2137 	 * "Restrictions on mount namespaces" for details.
2138 	 *
2139 	 * This happens in our use case because we first enter a new user
2140 	 * namespace (on clone(2)) and then we unshare(2) a new mount namespace,
2141 	 * which means the new mount namespace is less privileged than its
2142 	 * parent mount namespace. This would also happen if we entered a new
2143 	 * mount namespace on clone(2), since the user namespace is created
2144 	 * first.
2145 	 * In all other non-user-namespace cases the new mount namespace is
2146 	 * similarly privileged as the parent mount namespace so unmounting a
2147 	 * single mount is allowed.
2148 	 *
2149 	 * We still remount /proc as read-only in the user namespace case
2150 	 * because while a process with CAP_SYS_ADMIN in the new user namespace
2151 	 * can unmount the RO mount and get at the RW mount, an attacker with
2152 	 * access only to a write primitive will not be able to modify /proc.
2153 	 */
2154 	if (!j->flags.userns && umount2(kProcPath, MNT_DETACH))
2155 		return -errno;
2156 	if (mount("proc", kProcPath, "proc", kSafeFlags | MS_RDONLY, ""))
2157 		return -errno;
2158 	return 0;
2159 }
2160 
kill_child_and_die(const struct minijail * j,const char * msg)2161 static void kill_child_and_die(const struct minijail *j, const char *msg)
2162 {
2163 	kill(j->initpid, SIGKILL);
2164 	die("%s", msg);
2165 }
2166 
write_pid_file_or_die(const struct minijail * j)2167 static void write_pid_file_or_die(const struct minijail *j)
2168 {
2169 	if (write_pid_to_path(j->initpid, j->pid_file_path))
2170 		kill_child_and_die(j, "failed to write pid file");
2171 }
2172 
add_to_cgroups_or_die(const struct minijail * j)2173 static void add_to_cgroups_or_die(const struct minijail *j)
2174 {
2175 	size_t i;
2176 
2177 	for (i = 0; i < j->cgroup_count; ++i) {
2178 		if (write_pid_to_path(j->initpid, j->cgroups[i]))
2179 			kill_child_and_die(j, "failed to add to cgroups");
2180 	}
2181 }
2182 
set_rlimits_or_die(const struct minijail * j)2183 static void set_rlimits_or_die(const struct minijail *j)
2184 {
2185 	size_t i;
2186 
2187 	for (i = 0; i < j->rlimit_count; ++i) {
2188 		struct rlimit limit;
2189 		limit.rlim_cur = j->rlimits[i].cur;
2190 		limit.rlim_max = j->rlimits[i].max;
2191 		if (prlimit(j->initpid, j->rlimits[i].type, &limit, NULL))
2192 			kill_child_and_die(j, "failed to set rlimit");
2193 	}
2194 }
2195 
write_ugid_maps_or_die(const struct minijail * j)2196 static void write_ugid_maps_or_die(const struct minijail *j)
2197 {
2198 	if (j->uidmap && write_proc_file(j->initpid, j->uidmap, "uid_map") != 0)
2199 		kill_child_and_die(j, "failed to write uid_map");
2200 	if (j->gidmap && j->flags.disable_setgroups) {
2201 		/*
2202 		 * Older kernels might not have the /proc/<pid>/setgroups files.
2203 		 */
2204 		int ret = write_proc_file(j->initpid, "deny", "setgroups");
2205 		if (ret != 0) {
2206 			if (ret == -ENOENT) {
2207 				/*
2208 				 * See
2209 				 * http://man7.org/linux/man-pages/man7/user_namespaces.7.html.
2210 				 */
2211 				warn("could not disable setgroups(2)");
2212 			} else
2213 				kill_child_and_die(
2214 				    j, "failed to disable setgroups(2)");
2215 		}
2216 	}
2217 	if (j->gidmap && write_proc_file(j->initpid, j->gidmap, "gid_map") != 0)
2218 		kill_child_and_die(j, "failed to write gid_map");
2219 }
2220 
enter_user_namespace(const struct minijail * j)2221 static void enter_user_namespace(const struct minijail *j)
2222 {
2223 	int uid = j->flags.uid ? j->uid : 0;
2224 	int gid = j->flags.gid ? j->gid : 0;
2225 	if (j->gidmap && setresgid(gid, gid, gid)) {
2226 		pdie("user_namespaces: setresgid(%d, %d, %d) failed", gid, gid,
2227 		     gid);
2228 	}
2229 	if (j->uidmap && setresuid(uid, uid, uid)) {
2230 		pdie("user_namespaces: setresuid(%d, %d, %d) failed", uid, uid,
2231 		     uid);
2232 	}
2233 }
2234 
parent_setup_complete(int * pipe_fds)2235 static void parent_setup_complete(int *pipe_fds)
2236 {
2237 	close_and_reset(&pipe_fds[0]);
2238 	close_and_reset(&pipe_fds[1]);
2239 }
2240 
2241 /*
2242  * wait_for_parent_setup: Called by the child process to wait for any
2243  * further parent-side setup to complete before continuing.
2244  */
wait_for_parent_setup(int * pipe_fds)2245 static void wait_for_parent_setup(int *pipe_fds)
2246 {
2247 	char buf;
2248 
2249 	close_and_reset(&pipe_fds[1]);
2250 
2251 	/* Wait for parent to complete setup and close the pipe. */
2252 	if (read(pipe_fds[0], &buf, 1) != 0)
2253 		die("failed to sync with parent");
2254 	close_and_reset(&pipe_fds[0]);
2255 }
2256 
drop_ugid(const struct minijail * j)2257 static void drop_ugid(const struct minijail *j)
2258 {
2259 	if (j->flags.inherit_suppl_gids + j->flags.keep_suppl_gids +
2260 		j->flags.set_suppl_gids >
2261 	    1) {
2262 		die("can only do one of inherit, keep, or set supplementary "
2263 		    "groups");
2264 	}
2265 
2266 	if (j->flags.inherit_suppl_gids) {
2267 		if (initgroups(j->user, j->usergid))
2268 			pdie("initgroups(%s, %d) failed", j->user, j->usergid);
2269 	} else if (j->flags.set_suppl_gids) {
2270 		if (setgroups(j->suppl_gid_count, j->suppl_gid_list))
2271 			pdie("setgroups(suppl_gids) failed");
2272 	} else if (!j->flags.keep_suppl_gids && !j->flags.disable_setgroups) {
2273 		/*
2274 		 * Only attempt to clear supplementary groups if we are changing
2275 		 * users or groups, and if the caller did not request to disable
2276 		 * setgroups (used when entering a user namespace as a
2277 		 * non-privileged user).
2278 		 */
2279 		if ((j->flags.uid || j->flags.gid) && setgroups(0, NULL))
2280 			pdie("setgroups(0, NULL) failed");
2281 	}
2282 
2283 	if (j->flags.gid && setresgid(j->gid, j->gid, j->gid))
2284 		pdie("setresgid(%d, %d, %d) failed", j->gid, j->gid, j->gid);
2285 
2286 	if (j->flags.uid && setresuid(j->uid, j->uid, j->uid))
2287 		pdie("setresuid(%d, %d, %d) failed", j->uid, j->uid, j->uid);
2288 }
2289 
drop_capbset(uint64_t keep_mask,unsigned int last_valid_cap)2290 static void drop_capbset(uint64_t keep_mask, unsigned int last_valid_cap)
2291 {
2292 	const uint64_t one = 1;
2293 	unsigned int i;
2294 	for (i = 0; i < sizeof(keep_mask) * 8 && i <= last_valid_cap; ++i) {
2295 		if (keep_mask & (one << i))
2296 			continue;
2297 		if (prctl(PR_CAPBSET_DROP, i))
2298 			pdie("could not drop capability from bounding set");
2299 	}
2300 }
2301 
drop_caps(const struct minijail * j,unsigned int last_valid_cap)2302 static void drop_caps(const struct minijail *j, unsigned int last_valid_cap)
2303 {
2304 	if (!j->flags.use_caps)
2305 		return;
2306 
2307 	cap_t caps = cap_get_proc();
2308 	cap_value_t flag[1];
2309 	const size_t ncaps = sizeof(j->caps) * 8;
2310 	const uint64_t one = 1;
2311 	unsigned int i;
2312 	if (!caps)
2313 		die("can't get process caps");
2314 	if (cap_clear(caps))
2315 		die("can't clear caps");
2316 
2317 	for (i = 0; i < ncaps && i <= last_valid_cap; ++i) {
2318 		/* Keep CAP_SETPCAP for dropping bounding set bits. */
2319 		if (i != CAP_SETPCAP && !(j->caps & (one << i)))
2320 			continue;
2321 		flag[0] = i;
2322 		if (cap_set_flag(caps, CAP_EFFECTIVE, 1, flag, CAP_SET))
2323 			die("can't add effective cap");
2324 		if (cap_set_flag(caps, CAP_PERMITTED, 1, flag, CAP_SET))
2325 			die("can't add permitted cap");
2326 		if (cap_set_flag(caps, CAP_INHERITABLE, 1, flag, CAP_SET))
2327 			die("can't add inheritable cap");
2328 	}
2329 	if (cap_set_proc(caps))
2330 		die("can't apply initial cleaned capset");
2331 
2332 	/*
2333 	 * Instead of dropping the bounding set first, do it here in case
2334 	 * the caller had a more permissive bounding set which could
2335 	 * have been used above to raise a capability that wasn't already
2336 	 * present. This requires CAP_SETPCAP, so we raised/kept it above.
2337 	 *
2338 	 * However, if we're asked to skip setting *and* locking the
2339 	 * SECURE_NOROOT securebit, also skip dropping the bounding set.
2340 	 * If the caller wants to regain all capabilities when executing a
2341 	 * set-user-ID-root program, allow them to do so. The default behavior
2342 	 * (i.e. the behavior without |securebits_skip_mask| set) will still put
2343 	 * the jailed process tree in a capabilities-only environment.
2344 	 *
2345 	 * We check the negated skip mask for SECURE_NOROOT and
2346 	 * SECURE_NOROOT_LOCKED. If the bits are set in the negated mask they
2347 	 * will *not* be skipped in lock_securebits(), and therefore we should
2348 	 * drop the bounding set.
2349 	 */
2350 	if (secure_noroot_set_and_locked(~j->securebits_skip_mask)) {
2351 		drop_capbset(j->caps, last_valid_cap);
2352 	} else {
2353 		warn("SECURE_NOROOT not set, not dropping bounding set");
2354 	}
2355 
2356 	/* If CAP_SETPCAP wasn't specifically requested, now we remove it. */
2357 	if ((j->caps & (one << CAP_SETPCAP)) == 0) {
2358 		flag[0] = CAP_SETPCAP;
2359 		if (cap_set_flag(caps, CAP_EFFECTIVE, 1, flag, CAP_CLEAR))
2360 			die("can't clear effective cap");
2361 		if (cap_set_flag(caps, CAP_PERMITTED, 1, flag, CAP_CLEAR))
2362 			die("can't clear permitted cap");
2363 		if (cap_set_flag(caps, CAP_INHERITABLE, 1, flag, CAP_CLEAR))
2364 			die("can't clear inheritable cap");
2365 	}
2366 
2367 	if (cap_set_proc(caps))
2368 		die("can't apply final cleaned capset");
2369 
2370 	/*
2371 	 * If ambient capabilities are supported, clear all capabilities first,
2372 	 * then raise the requested ones.
2373 	 */
2374 	if (j->flags.set_ambient_caps) {
2375 		if (!cap_ambient_supported()) {
2376 			pdie("ambient capabilities not supported");
2377 		}
2378 		if (prctl(PR_CAP_AMBIENT, PR_CAP_AMBIENT_CLEAR_ALL, 0, 0, 0) !=
2379 		    0) {
2380 			pdie("can't clear ambient capabilities");
2381 		}
2382 
2383 		for (i = 0; i < ncaps && i <= last_valid_cap; ++i) {
2384 			if (!(j->caps & (one << i)))
2385 				continue;
2386 
2387 			if (prctl(PR_CAP_AMBIENT, PR_CAP_AMBIENT_RAISE, i, 0,
2388 				  0) != 0) {
2389 				pdie("prctl(PR_CAP_AMBIENT, "
2390 				     "PR_CAP_AMBIENT_RAISE, %u) failed",
2391 				     i);
2392 			}
2393 		}
2394 	}
2395 
2396 	cap_free(caps);
2397 }
2398 
2399 /* Creates a ruleset for current inodes then calls landlock_restrict_self(). */
apply_landlock_restrictions(const struct minijail * j)2400 static void apply_landlock_restrictions(const struct minijail *j)
2401 {
2402 	struct fs_rule *r;
2403 	attribute_cleanup_fd int ruleset_fd = -1;
2404 
2405 	r = j->fs_rules_head;
2406 	while (r) {
2407 		if (ruleset_fd < 0) {
2408 			struct minijail_landlock_ruleset_attr ruleset_attr = {
2409 				.handled_access_fs = HANDLED_ACCESS_TYPES
2410 			};
2411 			ruleset_fd = landlock_create_ruleset(
2412 				&ruleset_attr, sizeof(ruleset_attr), 0);
2413 			if (ruleset_fd < 0) {
2414 				const int err = errno;
2415 				pwarn("Failed to create a ruleset");
2416 				switch (err) {
2417 				case ENOSYS:
2418 					pwarn("Landlock is not supported by the current kernel");
2419 					break;
2420 				case EOPNOTSUPP:
2421 					pwarn("Landlock is currently disabled by kernel config");
2422 					break;
2423 				}
2424 				return;
2425 			}
2426 		}
2427 		populate_ruleset_internal(r->path, ruleset_fd, r->landlock_flags);
2428 		r = r->next;
2429 	}
2430 
2431 	if (ruleset_fd >= 0) {
2432 		if (landlock_restrict_self(ruleset_fd, 0)) {
2433 			pdie("Failed to enforce ruleset");
2434 		}
2435 	}
2436 }
2437 
set_seccomp_filter(const struct minijail * j)2438 static void set_seccomp_filter(const struct minijail *j)
2439 {
2440 	/*
2441 	 * Set no_new_privs. See </kernel/seccomp.c> and </kernel/sys.c>
2442 	 * in the kernel source tree for an explanation of the parameters.
2443 	 */
2444 	if (j->flags.no_new_privs) {
2445 		if (prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0))
2446 			pdie("prctl(PR_SET_NO_NEW_PRIVS)");
2447 	}
2448 
2449 	/*
2450 	 * Code running with ASan
2451 	 * (https://github.com/google/sanitizers/wiki/AddressSanitizer)
2452 	 * will make system calls not included in the syscall filter policy,
2453 	 * which will likely crash the program. Skip setting seccomp filter in
2454 	 * that case.
2455 	 * 'running_with_asan()' has no inputs and is completely defined at
2456 	 * build time, so this cannot be used by an attacker to skip setting
2457 	 * seccomp filter.
2458 	 */
2459 	if (j->flags.seccomp_filter && running_with_asan()) {
2460 		warn("running with (HW)ASan, not setting seccomp filter");
2461 		return;
2462 	}
2463 
2464 	if (j->flags.seccomp_filter) {
2465 		if (seccomp_is_logging_allowed(j)) {
2466 			warn("logging seccomp filter failures");
2467 			if (!seccomp_ret_log_available()) {
2468 				/*
2469 				 * If SECCOMP_RET_LOG is not available,
2470 				 * install the SIGSYS handler first.
2471 				 */
2472 				if (install_sigsys_handler())
2473 					pdie(
2474 					    "failed to install SIGSYS handler");
2475 			}
2476 		} else if (j->flags.seccomp_filter_tsync) {
2477 			/*
2478 			 * If setting thread sync,
2479 			 * reset the SIGSYS signal handler so that
2480 			 * the entire thread group is killed.
2481 			 */
2482 			if (signal(SIGSYS, SIG_DFL) == SIG_ERR)
2483 				pdie("failed to reset SIGSYS disposition");
2484 		}
2485 	}
2486 
2487 	/*
2488 	 * Install the syscall filter.
2489 	 */
2490 	if (j->flags.seccomp_filter) {
2491 		if (j->flags.seccomp_filter_tsync ||
2492 		    j->flags.seccomp_filter_allow_speculation) {
2493 			int filter_flags =
2494 			    (j->flags.seccomp_filter_tsync
2495 				 ? SECCOMP_FILTER_FLAG_TSYNC
2496 				 : 0) |
2497 			    (j->flags.seccomp_filter_allow_speculation
2498 				 ? SECCOMP_FILTER_FLAG_SPEC_ALLOW
2499 				 : 0);
2500 			if (sys_seccomp(SECCOMP_SET_MODE_FILTER, filter_flags,
2501 					j->filter_prog)) {
2502 				pdie("seccomp(tsync) failed");
2503 			}
2504 		} else {
2505 			if (prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER,
2506 				  j->filter_prog)) {
2507 				pdie("prctl(seccomp_filter) failed");
2508 			}
2509 		}
2510 	}
2511 }
2512 
2513 static pid_t forward_pid = -1;
2514 
forward_signal(int sig,siginfo_t * siginfo attribute_unused,void * void_context attribute_unused)2515 static void forward_signal(int sig, siginfo_t *siginfo attribute_unused,
2516 			   void *void_context attribute_unused)
2517 {
2518 	if (forward_pid != -1) {
2519 		kill(forward_pid, sig);
2520 	}
2521 }
2522 
install_signal_handlers(void)2523 static void install_signal_handlers(void)
2524 {
2525 	struct sigaction act;
2526 
2527 	memset(&act, 0, sizeof(act));
2528 	act.sa_sigaction = &forward_signal;
2529 	act.sa_flags = SA_SIGINFO | SA_RESTART;
2530 
2531 	/* Handle all signals, except SIGCHLD. */
2532 	for (int sig = 1; sig < NSIG; sig++) {
2533 		/*
2534 		 * We don't care if we get EINVAL: that just means that we
2535 		 * can't handle this signal, so let's skip it and continue.
2536 		 */
2537 		sigaction(sig, &act, NULL);
2538 	}
2539 	/* Reset SIGCHLD's handler. */
2540 	signal(SIGCHLD, SIG_DFL);
2541 
2542 	/* Handle real-time signals. */
2543 	for (int sig = SIGRTMIN; sig <= SIGRTMAX; sig++) {
2544 		sigaction(sig, &act, NULL);
2545 	}
2546 }
2547 
lookup_hook_name(minijail_hook_event_t event)2548 static const char *lookup_hook_name(minijail_hook_event_t event)
2549 {
2550 	switch (event) {
2551 	case MINIJAIL_HOOK_EVENT_PRE_DROP_CAPS:
2552 		return "pre-drop-caps";
2553 	case MINIJAIL_HOOK_EVENT_PRE_EXECVE:
2554 		return "pre-execve";
2555 	case MINIJAIL_HOOK_EVENT_PRE_CHROOT:
2556 		return "pre-chroot";
2557 	case MINIJAIL_HOOK_EVENT_MAX:
2558 		/*
2559 		 * Adding this in favor of a default case to force the
2560 		 * compiler to error out if a new enum value is added.
2561 		 */
2562 		break;
2563 	}
2564 	return "unknown";
2565 }
2566 
run_hooks_or_die(const struct minijail * j,minijail_hook_event_t event)2567 static void run_hooks_or_die(const struct minijail *j,
2568 			     minijail_hook_event_t event)
2569 {
2570 	int rc;
2571 	int hook_index = 0;
2572 	for (struct hook *c = j->hooks_head; c; c = c->next) {
2573 		if (c->event != event)
2574 			continue;
2575 		rc = c->hook(c->payload);
2576 		if (rc != 0) {
2577 			errno = -rc;
2578 			pdie("%s hook (index %d) failed",
2579 			     lookup_hook_name(event), hook_index);
2580 		}
2581 		/* Only increase the index within the same hook event type. */
2582 		++hook_index;
2583 	}
2584 }
2585 
minijail_enter(const struct minijail * j)2586 void API minijail_enter(const struct minijail *j)
2587 {
2588 	/*
2589 	 * If we're dropping caps, get the last valid cap from /proc now,
2590 	 * since /proc can be unmounted before drop_caps() is called.
2591 	 */
2592 	unsigned int last_valid_cap = 0;
2593 	if (j->flags.capbset_drop || j->flags.use_caps)
2594 		last_valid_cap = get_last_valid_cap();
2595 
2596 	if (j->flags.pids)
2597 		die("tried to enter a pid-namespaced jail;"
2598 		    " try minijail_run()?");
2599 
2600 	if (j->flags.inherit_suppl_gids && !j->user)
2601 		die("cannot inherit supplementary groups without setting a "
2602 		    "username");
2603 
2604 	/*
2605 	 * We can't recover from failures if we've dropped privileges partially,
2606 	 * so we don't even try. If any of our operations fail, we abort() the
2607 	 * entire process.
2608 	 */
2609 	if (j->flags.enter_vfs) {
2610 		if (setns(j->mountns_fd, CLONE_NEWNS))
2611 			pdie("setns(CLONE_NEWNS) failed");
2612 		close(j->mountns_fd);
2613 	}
2614 
2615 	if (j->flags.vfs) {
2616 		if (unshare(CLONE_NEWNS))
2617 			pdie("unshare(CLONE_NEWNS) failed");
2618 		/*
2619 		 * By default, remount all filesystems as private, unless
2620 		 * - Passed a specific remount mode, in which case remount with
2621 		 *   that,
2622 		 * - Asked not to remount at all, in which case skip the
2623 		 *   mount(2) call.
2624 		 * https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt
2625 		 */
2626 		if (j->remount_mode) {
2627 			if (mount(NULL, "/", NULL, MS_REC | j->remount_mode,
2628 				  NULL))
2629 				pdie("mount(NULL, /, NULL, "
2630 				     "MS_REC | j->remount_mode, NULL) failed");
2631 
2632 			struct minijail_remount *temp = j->remounts_head;
2633 			while (temp) {
2634 				if (temp->remount_mode < j->remount_mode)
2635 					die("cannot remount %s as stricter "
2636 					    "than the root dir",
2637 					    temp->mount_name);
2638 				if (mount(NULL, temp->mount_name, NULL,
2639 					  MS_REC | temp->remount_mode, NULL))
2640 					pdie("mount(NULL, %s, NULL, "
2641 					     "MS_REC | temp->remount_mode, "
2642 					     "NULL) failed",
2643 					     temp->mount_name);
2644 				temp = temp->next;
2645 			}
2646 		}
2647 	}
2648 
2649 	if (j->flags.ipc && unshare(CLONE_NEWIPC)) {
2650 		pdie("unshare(CLONE_NEWIPC) failed");
2651 	}
2652 
2653 	if (j->flags.uts) {
2654 		if (unshare(CLONE_NEWUTS))
2655 			pdie("unshare(CLONE_NEWUTS) failed");
2656 
2657 		if (j->hostname &&
2658 		    sethostname(j->hostname, strlen(j->hostname)))
2659 			pdie("sethostname(%s) failed", j->hostname);
2660 	}
2661 
2662 	if (j->flags.enter_net) {
2663 		if (setns(j->netns_fd, CLONE_NEWNET))
2664 			pdie("setns(CLONE_NEWNET) failed");
2665 		close(j->netns_fd);
2666 	} else if (j->flags.net) {
2667 		if (unshare(CLONE_NEWNET))
2668 			pdie("unshare(CLONE_NEWNET) failed");
2669 		config_net_loopback();
2670 	}
2671 
2672 	if (j->flags.ns_cgroups && unshare(CLONE_NEWCGROUP))
2673 		pdie("unshare(CLONE_NEWCGROUP) failed");
2674 
2675 	if (j->flags.new_session_keyring) {
2676 		if (syscall(SYS_keyctl, KEYCTL_JOIN_SESSION_KEYRING, NULL) < 0)
2677 			pdie("keyctl(KEYCTL_JOIN_SESSION_KEYRING) failed");
2678 	}
2679 
2680 	/* We have to process all the mounts before we chroot/pivot_root. */
2681 	process_mounts_or_die(j);
2682 
2683 	if (j->flags.chroot && enter_chroot(j))
2684 		pdie("chroot");
2685 
2686 	if (j->flags.pivot_root && enter_pivot_root(j))
2687 		pdie("pivot_root");
2688 
2689 	if (j->flags.mount_tmp && mount_tmp(j))
2690 		pdie("mount_tmp");
2691 
2692 	if (j->flags.remount_proc_ro && remount_proc_readonly(j))
2693 		pdie("remount");
2694 
2695 	run_hooks_or_die(j, MINIJAIL_HOOK_EVENT_PRE_DROP_CAPS);
2696 
2697 	/*
2698 	 * If we're only dropping capabilities from the bounding set, but not
2699 	 * from the thread's (permitted|inheritable|effective) sets, do it now.
2700 	 */
2701 	if (j->flags.capbset_drop) {
2702 		drop_capbset(j->cap_bset, last_valid_cap);
2703 	}
2704 
2705 	/*
2706 	 * POSIX capabilities are a bit tricky. We must set SECBIT_KEEP_CAPS
2707 	 * before drop_ugid() below as the latter would otherwise drop all
2708 	 * capabilities.
2709 	 */
2710 	if (j->flags.use_caps) {
2711 		/*
2712 		 * When using ambient capabilities, CAP_SET{GID,UID} can be
2713 		 * inherited across execve(2), so SECBIT_KEEP_CAPS is not
2714 		 * strictly needed.
2715 		 */
2716 		bool require_keep_caps = !j->flags.set_ambient_caps;
2717 		if (lock_securebits(j->securebits_skip_mask,
2718 				    require_keep_caps) < 0) {
2719 			pdie("locking securebits failed");
2720 		}
2721 	}
2722 
2723 	if (j->flags.no_new_privs) {
2724 		/*
2725 		 * If we're setting no_new_privs, we can drop privileges
2726 		 * before setting seccomp filter. This way filter policies
2727 		 * don't need to allow privilege-dropping syscalls.
2728 		 */
2729 		drop_ugid(j);
2730 		drop_caps(j, last_valid_cap);
2731 
2732 		// Landlock is applied as late as possible. If no_new_privs is
2733 		// set, then it can be applied after dropping caps.
2734 		apply_landlock_restrictions(j);
2735 		set_seccomp_filter(j);
2736 	} else {
2737 		apply_landlock_restrictions(j);
2738 
2739 		/*
2740 		 * If we're not setting no_new_privs,
2741 		 * we need to set seccomp filter *before* dropping privileges.
2742 		 * WARNING: this means that filter policies *must* allow
2743 		 * setgroups()/setresgid()/setresuid() for dropping root and
2744 		 * capget()/capset()/prctl() for dropping caps.
2745 		 */
2746 		set_seccomp_filter(j);
2747 		drop_ugid(j);
2748 		drop_caps(j, last_valid_cap);
2749 	}
2750 
2751 	/*
2752 	 * Select the specified alternate syscall table.  The table must not
2753 	 * block prctl(2) if we're using seccomp as well.
2754 	 */
2755 	if (j->flags.alt_syscall) {
2756 		if (prctl(PR_ALT_SYSCALL, 1, j->alt_syscall_table))
2757 			pdie("prctl(PR_ALT_SYSCALL) failed");
2758 	}
2759 
2760 	/*
2761 	 * seccomp has to come last since it cuts off all the other
2762 	 * privilege-dropping syscalls :)
2763 	 */
2764 	if (j->flags.seccomp && prctl(PR_SET_SECCOMP, 1)) {
2765 		if ((errno == EINVAL) && seccomp_can_softfail()) {
2766 			warn("seccomp not supported");
2767 			return;
2768 		}
2769 		pdie("prctl(PR_SET_SECCOMP) failed");
2770 	}
2771 }
2772 
2773 /* TODO(wad): will visibility affect this variable? */
2774 static int init_exitstatus = 0;
2775 
init_term(int sig attribute_unused)2776 static void init_term(int sig attribute_unused)
2777 {
2778 	_exit(init_exitstatus);
2779 }
2780 
init(pid_t rootpid)2781 static void init(pid_t rootpid)
2782 {
2783 	pid_t pid;
2784 	int status;
2785 	/* So that we exit with the right status. */
2786 	signal(SIGTERM, init_term);
2787 	/* TODO(wad): self jail with seccomp filters here. */
2788 	while ((pid = wait(&status)) > 0) {
2789 		/*
2790 		 * This loop will only end when either there are no processes
2791 		 * left inside our pid namespace or we get a signal.
2792 		 */
2793 		if (pid == rootpid)
2794 			init_exitstatus = status;
2795 	}
2796 	if (!WIFEXITED(init_exitstatus))
2797 		_exit(MINIJAIL_ERR_INIT);
2798 	_exit(WEXITSTATUS(init_exitstatus));
2799 }
2800 
minijail_from_fd(int fd,struct minijail * j)2801 int API minijail_from_fd(int fd, struct minijail *j)
2802 {
2803 	size_t sz = 0;
2804 	size_t bytes = read(fd, &sz, sizeof(sz));
2805 	attribute_cleanup_str char *buf = NULL;
2806 	int r;
2807 	if (sizeof(sz) != bytes)
2808 		return -EINVAL;
2809 	if (sz > USHRT_MAX) /* arbitrary check */
2810 		return -E2BIG;
2811 	buf = malloc(sz);
2812 	if (!buf)
2813 		return -ENOMEM;
2814 	bytes = read(fd, buf, sz);
2815 	if (bytes != sz)
2816 		return -EINVAL;
2817 	r = minijail_unmarshal(j, buf, sz);
2818 	return r;
2819 }
2820 
minijail_to_fd(struct minijail * j,int fd)2821 int API minijail_to_fd(struct minijail *j, int fd)
2822 {
2823 	size_t sz = minijail_size(j);
2824 	if (!sz)
2825 		return -EINVAL;
2826 
2827 	attribute_cleanup_str char *buf = malloc(sz);
2828 	if (!buf)
2829 		return -ENOMEM;
2830 
2831 	int err = minijail_marshal(j, buf, sz);
2832 	if (err)
2833 		return err;
2834 
2835 	/* Sends [size][minijail]. */
2836 	err = write_exactly(fd, &sz, sizeof(sz));
2837 	if (err)
2838 		return err;
2839 
2840 	return write_exactly(fd, buf, sz);
2841 }
2842 
minijail_copy_jail(const struct minijail * from,struct minijail * out)2843 int API minijail_copy_jail(const struct minijail *from, struct minijail *out)
2844 {
2845 	size_t sz = minijail_size(from);
2846 	if (!sz)
2847 		return -EINVAL;
2848 
2849 	attribute_cleanup_str char *buf = malloc(sz);
2850 	if (!buf)
2851 		return -ENOMEM;
2852 
2853 	int err = minijail_marshal(from, buf, sz);
2854 	if (err)
2855 		return err;
2856 
2857 	return minijail_unmarshal(out, buf, sz);
2858 }
2859 
setup_preload(const struct minijail * j attribute_unused,char *** child_env attribute_unused)2860 static int setup_preload(const struct minijail *j attribute_unused,
2861 			 char ***child_env attribute_unused)
2862 {
2863 #if defined(__ANDROID__)
2864 	/* Don't use LDPRELOAD on Android. */
2865 	return 0;
2866 #else
2867 	const char *preload_path = j->preload_path ?: PRELOADPATH;
2868 	char *newenv = NULL;
2869 	int ret = 0;
2870 	const char *oldenv = minijail_getenv(*child_env, kLdPreloadEnvVar);
2871 
2872 	if (!oldenv)
2873 		oldenv = "";
2874 
2875 	/* Only insert a separating space if we have something to separate... */
2876 	if (asprintf(&newenv, "%s%s%s", oldenv, oldenv[0] != '\0' ? " " : "",
2877 		     preload_path) < 0) {
2878 		return -1;
2879 	}
2880 
2881 	ret = minijail_setenv(child_env, kLdPreloadEnvVar, newenv, 1);
2882 	free(newenv);
2883 	return ret;
2884 #endif
2885 }
2886 
2887 /*
2888  * This is for logging purposes and does not change the enforced seccomp
2889  * filter.
2890  */
setup_seccomp_policy_path(const struct minijail * j,char *** child_env)2891 static int setup_seccomp_policy_path(const struct minijail *j,
2892 				     char ***child_env)
2893 {
2894 	return minijail_setenv(child_env, kSeccompPolicyPathEnvVar,
2895 			       j->seccomp_policy_path ? j->seccomp_policy_path
2896 						      : "NO-LABEL",
2897 			       1 /* overwrite */);
2898 }
2899 
setup_pipe(char *** child_env,int fds[2])2900 static int setup_pipe(char ***child_env, int fds[2])
2901 {
2902 	int r = pipe(fds);
2903 	char fd_buf[11];
2904 	if (r)
2905 		return r;
2906 	r = snprintf(fd_buf, sizeof(fd_buf), "%d", fds[0]);
2907 	if (r <= 0)
2908 		return -EINVAL;
2909 	return minijail_setenv(child_env, kFdEnvVar, fd_buf, 1);
2910 }
2911 
close_open_fds(int * inheritable_fds,size_t size)2912 static int close_open_fds(int *inheritable_fds, size_t size)
2913 {
2914 	const char *kFdPath = "/proc/self/fd";
2915 
2916 	DIR *d = opendir(kFdPath);
2917 	struct dirent *dir_entry;
2918 
2919 	if (d == NULL)
2920 		return -1;
2921 	int dir_fd = dirfd(d);
2922 	while ((dir_entry = readdir(d)) != NULL) {
2923 		size_t i;
2924 		char *end;
2925 		bool should_close = true;
2926 		const int fd = strtol(dir_entry->d_name, &end, 10);
2927 
2928 		if ((*end) != '\0') {
2929 			continue;
2930 		}
2931 		/*
2932 		 * We might have set up some pipes that we want to share with
2933 		 * the parent process, and should not be closed.
2934 		 */
2935 		for (i = 0; i < size; ++i) {
2936 			if (fd == inheritable_fds[i]) {
2937 				should_close = false;
2938 				break;
2939 			}
2940 		}
2941 		/* Also avoid closing the directory fd. */
2942 		if (should_close && fd != dir_fd)
2943 			close(fd);
2944 	}
2945 	closedir(d);
2946 	return 0;
2947 }
2948 
2949 /* Return true if the specified file descriptor is already open. */
fd_is_open(int fd)2950 static int fd_is_open(int fd)
2951 {
2952 	return fcntl(fd, F_GETFD) != -1 || errno != EBADF;
2953 }
2954 
2955 static_assert(FD_SETSIZE >= MAX_PRESERVED_FDS * 2 - 1,
2956 	      "If true, ensure_no_fd_conflict will always find an unused fd.");
2957 
2958 /* If parent_fd will be used by a child fd, move it to an unused fd. */
ensure_no_fd_conflict(const fd_set * child_fds,int child_fd,int * parent_fd)2959 static int ensure_no_fd_conflict(const fd_set *child_fds, int child_fd,
2960 				 int *parent_fd)
2961 {
2962 	if (!FD_ISSET(*parent_fd, child_fds)) {
2963 		return 0;
2964 	}
2965 
2966 	/*
2967 	 * If no other parent_fd matches the child_fd then use it instead of a
2968 	 * temporary.
2969 	 */
2970 	int fd = child_fd;
2971 	if (fd == -1 || fd_is_open(fd)) {
2972 		fd = FD_SETSIZE - 1;
2973 		while (FD_ISSET(fd, child_fds) || fd_is_open(fd)) {
2974 			--fd;
2975 			if (fd < 0) {
2976 				die("failed to find an unused fd");
2977 			}
2978 		}
2979 	}
2980 
2981 	int ret = dup2(*parent_fd, fd);
2982 	/*
2983 	 * warn() opens a file descriptor so it needs to happen after dup2 to
2984 	 * avoid unintended side effects. This can be avoided by reordering the
2985 	 * mapping requests so that the source fds with overlap are mapped
2986 	 * first (unless there are cycles).
2987 	 */
2988 	warn("mapped fd overlap: moving %d to %d", *parent_fd, fd);
2989 	if (ret == -1) {
2990 		return -1;
2991 	}
2992 
2993 	*parent_fd = fd;
2994 	return 0;
2995 }
2996 
2997 /*
2998  * Populate child_fds_out with the set of file descriptors that will be replaced
2999  * by redirect_fds().
3000  *
3001  * NOTE: This creates temporaries for parent file descriptors that would
3002  * otherwise be overwritten during redirect_fds().
3003  */
get_child_fds(struct minijail * j,fd_set * child_fds_out)3004 static int get_child_fds(struct minijail *j, fd_set *child_fds_out)
3005 {
3006 	/* Relocate parent_fds that would be replaced by a child_fd. */
3007 	for (size_t i = 0; i < j->preserved_fd_count; i++) {
3008 		int child_fd = j->preserved_fds[i].child_fd;
3009 		if (FD_ISSET(child_fd, child_fds_out)) {
3010 			die("fd %d is mapped more than once", child_fd);
3011 		}
3012 
3013 		int *parent_fd = &j->preserved_fds[i].parent_fd;
3014 		if (ensure_no_fd_conflict(child_fds_out, child_fd, parent_fd) ==
3015 		    -1) {
3016 			return -1;
3017 		}
3018 
3019 		FD_SET(child_fd, child_fds_out);
3020 	}
3021 	return 0;
3022 }
3023 
3024 /*
3025  * Structure holding resources and state created when running a minijail.
3026  */
3027 struct minijail_run_state {
3028 	pid_t child_pid;
3029 	int pipe_fds[2];
3030 	int stdin_fds[2];
3031 	int stdout_fds[2];
3032 	int stderr_fds[2];
3033 	int child_sync_pipe_fds[2];
3034 	char **child_env;
3035 };
3036 
3037 /*
3038  * Move pipe_fds if they conflict with a child_fd.
3039  */
avoid_pipe_conflicts(struct minijail_run_state * state,fd_set * child_fds_out)3040 static int avoid_pipe_conflicts(struct minijail_run_state *state,
3041 				fd_set *child_fds_out)
3042 {
3043 	int *pipe_fds[] = {
3044 	    state->pipe_fds,   state->child_sync_pipe_fds, state->stdin_fds,
3045 	    state->stdout_fds, state->stderr_fds,
3046 	};
3047 	for (size_t i = 0; i < ARRAY_SIZE(pipe_fds); ++i) {
3048 		if (pipe_fds[i][0] != -1 &&
3049 		    ensure_no_fd_conflict(child_fds_out, -1, &pipe_fds[i][0]) ==
3050 			-1) {
3051 			return -1;
3052 		}
3053 		if (pipe_fds[i][1] != -1 &&
3054 		    ensure_no_fd_conflict(child_fds_out, -1, &pipe_fds[i][1]) ==
3055 			-1) {
3056 			return -1;
3057 		}
3058 	}
3059 	return 0;
3060 }
3061 
3062 /*
3063  * Redirect j->preserved_fds from the parent_fd to the child_fd.
3064  *
3065  * NOTE: This will clear FD_CLOEXEC since otherwise the child_fd would not be
3066  * inherited after the exec call.
3067  */
redirect_fds(struct minijail * j,fd_set * child_fds)3068 static int redirect_fds(struct minijail *j, fd_set *child_fds)
3069 {
3070 	for (size_t i = 0; i < j->preserved_fd_count; i++) {
3071 		if (j->preserved_fds[i].parent_fd ==
3072 		    j->preserved_fds[i].child_fd) {
3073 			// Clear CLOEXEC if it is set so the FD will be
3074 			// inherited by the child.
3075 			int flags =
3076 			    fcntl(j->preserved_fds[i].child_fd, F_GETFD);
3077 			if (flags == -1 || (flags & FD_CLOEXEC) == 0) {
3078 				continue;
3079 			}
3080 
3081 			// Currently FD_CLOEXEC is cleared without being
3082 			// restored. It may make sense to track when this
3083 			// happens and restore FD_CLOEXEC in the child process.
3084 			flags &= ~FD_CLOEXEC;
3085 			if (fcntl(j->preserved_fds[i].child_fd, F_SETFD,
3086 				  flags) == -1) {
3087 				pwarn("failed to clear CLOEXEC for %d",
3088 				      j->preserved_fds[i].parent_fd);
3089 			}
3090 			continue;
3091 		}
3092 		if (dup2(j->preserved_fds[i].parent_fd,
3093 			 j->preserved_fds[i].child_fd) == -1) {
3094 			return -1;
3095 		}
3096 	}
3097 
3098 	/*
3099 	 * After all fds have been duped, we are now free to close all parent
3100 	 * fds that are *not* child fds.
3101 	 */
3102 	for (size_t i = 0; i < j->preserved_fd_count; i++) {
3103 		int parent_fd = j->preserved_fds[i].parent_fd;
3104 		if (!FD_ISSET(parent_fd, child_fds)) {
3105 			close(parent_fd);
3106 		}
3107 	}
3108 	return 0;
3109 }
3110 
minijail_free_run_state(struct minijail_run_state * state)3111 static void minijail_free_run_state(struct minijail_run_state *state)
3112 {
3113 	state->child_pid = -1;
3114 
3115 	int *fd_pairs[] = {state->pipe_fds, state->stdin_fds, state->stdout_fds,
3116 			   state->stderr_fds, state->child_sync_pipe_fds};
3117 	for (size_t i = 0; i < ARRAY_SIZE(fd_pairs); ++i) {
3118 		close_and_reset(&fd_pairs[i][0]);
3119 		close_and_reset(&fd_pairs[i][1]);
3120 	}
3121 
3122 	minijail_free_env(state->child_env);
3123 	state->child_env = NULL;
3124 }
3125 
3126 /* Set up stdin/stdout/stderr file descriptors in the child. */
setup_child_std_fds(struct minijail * j,struct minijail_run_state * state)3127 static void setup_child_std_fds(struct minijail *j,
3128 				struct minijail_run_state *state)
3129 {
3130 	struct {
3131 		const char *name;
3132 		int from;
3133 		int to;
3134 	} fd_map[] = {
3135 	    {"stdin", state->stdin_fds[0], STDIN_FILENO},
3136 	    {"stdout", state->stdout_fds[1], STDOUT_FILENO},
3137 	    {"stderr", state->stderr_fds[1], STDERR_FILENO},
3138 	};
3139 
3140 	for (size_t i = 0; i < ARRAY_SIZE(fd_map); ++i) {
3141 		if (fd_map[i].from == -1 || fd_map[i].from == fd_map[i].to)
3142 			continue;
3143 		if (dup2(fd_map[i].from, fd_map[i].to) == -1)
3144 			die("failed to set up %s pipe", fd_map[i].name);
3145 	}
3146 
3147 	/* Close temporary pipe file descriptors. */
3148 	int *std_pipes[] = {state->stdin_fds, state->stdout_fds,
3149 			    state->stderr_fds};
3150 	for (size_t i = 0; i < ARRAY_SIZE(std_pipes); ++i) {
3151 		close_and_reset(&std_pipes[i][0]);
3152 		close_and_reset(&std_pipes[i][1]);
3153 	}
3154 
3155 	/*
3156 	 * If any of stdin, stdout, or stderr are TTYs, or setsid flag is
3157 	 * set, create a new session. This prevents the jailed process from
3158 	 * using the TIOCSTI ioctl to push characters into the parent process
3159 	 * terminal's input buffer, therefore escaping the jail.
3160 	 *
3161 	 * Since it has just forked, the child will not be a process group
3162 	 * leader, and this call to setsid() should always succeed.
3163 	 */
3164 	if (j->flags.setsid || isatty(STDIN_FILENO) || isatty(STDOUT_FILENO) ||
3165 	    isatty(STDERR_FILENO)) {
3166 		if (setsid() < 0) {
3167 			pdie("setsid() failed");
3168 		}
3169 
3170 		if (isatty(STDIN_FILENO)) {
3171 			if (ioctl(STDIN_FILENO, TIOCSCTTY, 0) != 0) {
3172 				pwarn("failed to set controlling terminal");
3173 			}
3174 		}
3175 	}
3176 }
3177 
3178 /*
3179  * Structure that specifies how to start a minijail.
3180  *
3181  * filename - The program to exec in the child. Should be NULL if elf_fd is set.
3182  * elf_fd - A fd to be used with fexecve. Should be -1 if filename is set.
3183  *   NOTE: either filename or elf_fd is required if |exec_in_child| = 1.
3184  * argv - Arguments for the child program. Required if |exec_in_child| = 1.
3185  * envp - Environment for the child program. Available if |exec_in_child| = 1.
3186  * use_preload - If true use LD_PRELOAD.
3187  * exec_in_child - If true, run |filename|. Otherwise, the child will return to
3188  *     the caller.
3189  * pstdin_fd - Filled with stdin pipe if non-NULL.
3190  * pstdout_fd - Filled with stdout pipe if non-NULL.
3191  * pstderr_fd - Filled with stderr pipe if non-NULL.
3192  * pchild_pid - Filled with the pid of the child process if non-NULL.
3193  */
3194 struct minijail_run_config {
3195 	const char *filename;
3196 	int elf_fd;
3197 	char *const *argv;
3198 	char *const *envp;
3199 	int use_preload;
3200 	int exec_in_child;
3201 	int *pstdin_fd;
3202 	int *pstdout_fd;
3203 	int *pstderr_fd;
3204 	pid_t *pchild_pid;
3205 };
3206 
3207 static int
3208 minijail_run_config_internal(struct minijail *j,
3209 			     const struct minijail_run_config *config);
3210 
minijail_run(struct minijail * j,const char * filename,char * const argv[])3211 int API minijail_run(struct minijail *j, const char *filename,
3212 		     char *const argv[])
3213 {
3214 	struct minijail_run_config config = {
3215 	    .filename = filename,
3216 	    .elf_fd = -1,
3217 	    .argv = argv,
3218 	    .envp = NULL,
3219 	    .use_preload = true,
3220 	    .exec_in_child = true,
3221 	};
3222 	return minijail_run_config_internal(j, &config);
3223 }
3224 
minijail_run_env(struct minijail * j,const char * filename,char * const argv[],char * const envp[])3225 int API minijail_run_env(struct minijail *j, const char *filename,
3226 			 char *const argv[], char *const envp[])
3227 {
3228 	struct minijail_run_config config = {
3229 	    .filename = filename,
3230 	    .elf_fd = -1,
3231 	    .argv = argv,
3232 	    .envp = envp,
3233 	    .use_preload = true,
3234 	    .exec_in_child = true,
3235 	};
3236 	return minijail_run_config_internal(j, &config);
3237 }
3238 
minijail_run_pid(struct minijail * j,const char * filename,char * const argv[],pid_t * pchild_pid)3239 int API minijail_run_pid(struct minijail *j, const char *filename,
3240 			 char *const argv[], pid_t *pchild_pid)
3241 {
3242 	struct minijail_run_config config = {
3243 	    .filename = filename,
3244 	    .elf_fd = -1,
3245 	    .argv = argv,
3246 	    .envp = NULL,
3247 	    .use_preload = true,
3248 	    .exec_in_child = true,
3249 	    .pchild_pid = pchild_pid,
3250 	};
3251 	return minijail_run_config_internal(j, &config);
3252 }
3253 
minijail_run_pipe(struct minijail * j,const char * filename,char * const argv[],int * pstdin_fd)3254 int API minijail_run_pipe(struct minijail *j, const char *filename,
3255 			  char *const argv[], int *pstdin_fd)
3256 {
3257 	struct minijail_run_config config = {
3258 	    .filename = filename,
3259 	    .elf_fd = -1,
3260 	    .argv = argv,
3261 	    .envp = NULL,
3262 	    .use_preload = true,
3263 	    .exec_in_child = true,
3264 	    .pstdin_fd = pstdin_fd,
3265 	};
3266 	return minijail_run_config_internal(j, &config);
3267 }
3268 
minijail_run_pid_pipes(struct minijail * j,const char * filename,char * const argv[],pid_t * pchild_pid,int * pstdin_fd,int * pstdout_fd,int * pstderr_fd)3269 int API minijail_run_pid_pipes(struct minijail *j, const char *filename,
3270 			       char *const argv[], pid_t *pchild_pid,
3271 			       int *pstdin_fd, int *pstdout_fd, int *pstderr_fd)
3272 {
3273 	struct minijail_run_config config = {
3274 	    .filename = filename,
3275 	    .elf_fd = -1,
3276 	    .argv = argv,
3277 	    .envp = NULL,
3278 	    .use_preload = true,
3279 	    .exec_in_child = true,
3280 	    .pstdin_fd = pstdin_fd,
3281 	    .pstdout_fd = pstdout_fd,
3282 	    .pstderr_fd = pstderr_fd,
3283 	    .pchild_pid = pchild_pid,
3284 	};
3285 	return minijail_run_config_internal(j, &config);
3286 }
3287 
minijail_run_env_pid_pipes(struct minijail * j,const char * filename,char * const argv[],char * const envp[],pid_t * pchild_pid,int * pstdin_fd,int * pstdout_fd,int * pstderr_fd)3288 int API minijail_run_env_pid_pipes(struct minijail *j, const char *filename,
3289 				   char *const argv[], char *const envp[],
3290 				   pid_t *pchild_pid, int *pstdin_fd,
3291 				   int *pstdout_fd, int *pstderr_fd)
3292 {
3293 	struct minijail_run_config config = {
3294 	    .filename = filename,
3295 	    .elf_fd = -1,
3296 	    .argv = argv,
3297 	    .envp = envp,
3298 	    .use_preload = true,
3299 	    .exec_in_child = true,
3300 	    .pstdin_fd = pstdin_fd,
3301 	    .pstdout_fd = pstdout_fd,
3302 	    .pstderr_fd = pstderr_fd,
3303 	    .pchild_pid = pchild_pid,
3304 	};
3305 	return minijail_run_config_internal(j, &config);
3306 }
3307 
minijail_run_fd_env_pid_pipes(struct minijail * j,int elf_fd,char * const argv[],char * const envp[],pid_t * pchild_pid,int * pstdin_fd,int * pstdout_fd,int * pstderr_fd)3308 int API minijail_run_fd_env_pid_pipes(struct minijail *j, int elf_fd,
3309 				      char *const argv[], char *const envp[],
3310 				      pid_t *pchild_pid, int *pstdin_fd,
3311 				      int *pstdout_fd, int *pstderr_fd)
3312 {
3313 	struct minijail_run_config config = {
3314 	    .filename = NULL,
3315 	    .elf_fd = elf_fd,
3316 	    .argv = argv,
3317 	    .envp = envp,
3318 	    .use_preload = true,
3319 	    .exec_in_child = true,
3320 	    .pstdin_fd = pstdin_fd,
3321 	    .pstdout_fd = pstdout_fd,
3322 	    .pstderr_fd = pstderr_fd,
3323 	    .pchild_pid = pchild_pid,
3324 	};
3325 	return minijail_run_config_internal(j, &config);
3326 }
3327 
minijail_run_no_preload(struct minijail * j,const char * filename,char * const argv[])3328 int API minijail_run_no_preload(struct minijail *j, const char *filename,
3329 				char *const argv[])
3330 {
3331 	struct minijail_run_config config = {
3332 	    .filename = filename,
3333 	    .elf_fd = -1,
3334 	    .argv = argv,
3335 	    .envp = NULL,
3336 	    .use_preload = false,
3337 	    .exec_in_child = true,
3338 	};
3339 	return minijail_run_config_internal(j, &config);
3340 }
3341 
minijail_run_pid_pipes_no_preload(struct minijail * j,const char * filename,char * const argv[],pid_t * pchild_pid,int * pstdin_fd,int * pstdout_fd,int * pstderr_fd)3342 int API minijail_run_pid_pipes_no_preload(struct minijail *j,
3343 					  const char *filename,
3344 					  char *const argv[], pid_t *pchild_pid,
3345 					  int *pstdin_fd, int *pstdout_fd,
3346 					  int *pstderr_fd)
3347 {
3348 	struct minijail_run_config config = {
3349 	    .filename = filename,
3350 	    .elf_fd = -1,
3351 	    .argv = argv,
3352 	    .envp = NULL,
3353 	    .use_preload = false,
3354 	    .exec_in_child = true,
3355 	    .pstdin_fd = pstdin_fd,
3356 	    .pstdout_fd = pstdout_fd,
3357 	    .pstderr_fd = pstderr_fd,
3358 	    .pchild_pid = pchild_pid,
3359 	};
3360 	return minijail_run_config_internal(j, &config);
3361 }
3362 
minijail_run_env_pid_pipes_no_preload(struct minijail * j,const char * filename,char * const argv[],char * const envp[],pid_t * pchild_pid,int * pstdin_fd,int * pstdout_fd,int * pstderr_fd)3363 int API minijail_run_env_pid_pipes_no_preload(struct minijail *j,
3364 					      const char *filename,
3365 					      char *const argv[],
3366 					      char *const envp[],
3367 					      pid_t *pchild_pid, int *pstdin_fd,
3368 					      int *pstdout_fd, int *pstderr_fd)
3369 {
3370 	struct minijail_run_config config = {
3371 	    .filename = filename,
3372 	    .elf_fd = -1,
3373 	    .argv = argv,
3374 	    .envp = envp,
3375 	    .use_preload = false,
3376 	    .exec_in_child = true,
3377 	    .pstdin_fd = pstdin_fd,
3378 	    .pstdout_fd = pstdout_fd,
3379 	    .pstderr_fd = pstderr_fd,
3380 	    .pchild_pid = pchild_pid,
3381 	};
3382 	return minijail_run_config_internal(j, &config);
3383 }
3384 
minijail_fork(struct minijail * j)3385 pid_t API minijail_fork(struct minijail *j)
3386 {
3387 	struct minijail_run_config config = {
3388 	    .elf_fd = -1,
3389 	};
3390 	return minijail_run_config_internal(j, &config);
3391 }
3392 
minijail_run_internal(struct minijail * j,const struct minijail_run_config * config,struct minijail_run_state * state_out)3393 static int minijail_run_internal(struct minijail *j,
3394 				 const struct minijail_run_config *config,
3395 				 struct minijail_run_state *state_out)
3396 {
3397 	int sync_child = 0;
3398 	int ret;
3399 	/* We need to remember this across the minijail_preexec() call. */
3400 	int pid_namespace = j->flags.pids;
3401 	/*
3402 	 * Create an init process if we are entering a pid namespace, unless the
3403 	 * user has explicitly opted out by calling minijail_run_as_init().
3404 	 */
3405 	int do_init = j->flags.do_init && !j->flags.run_as_init;
3406 	int use_preload = config->use_preload;
3407 
3408 	if (config->filename != NULL && config->elf_fd != -1) {
3409 		die("filename and elf_fd cannot be set at the same time");
3410 	}
3411 
3412 	/*
3413 	 * Only copy the environment if we need to modify it. If this is done
3414 	 * unconditionally, it triggers odd behavior in the ARC container.
3415 	 */
3416 	if (use_preload || j->seccomp_policy_path) {
3417 		state_out->child_env =
3418 		    minijail_copy_env(config->envp ? config->envp : environ);
3419 		if (!state_out->child_env)
3420 			return ENOMEM;
3421 	}
3422 
3423 	if (j->seccomp_policy_path &&
3424 	    setup_seccomp_policy_path(j, &state_out->child_env))
3425 		return -EFAULT;
3426 
3427 	if (use_preload) {
3428 		if (j->hooks_head != NULL)
3429 			die("Minijail hooks are not supported with LD_PRELOAD");
3430 		if (!config->exec_in_child)
3431 			die("minijail_fork is not supported with LD_PRELOAD");
3432 
3433 		/*
3434 		 * Before we fork(2) and execve(2) the child process, we need
3435 		 * to open a pipe(2) to send the minijail configuration over.
3436 		 */
3437 		if (setup_preload(j, &state_out->child_env) ||
3438 		    setup_pipe(&state_out->child_env, state_out->pipe_fds))
3439 			return -EFAULT;
3440 	}
3441 
3442 	if (!use_preload) {
3443 		if (j->flags.use_caps && j->caps != 0 &&
3444 		    !j->flags.set_ambient_caps) {
3445 			die("non-empty, non-ambient capabilities are not "
3446 			    "supported without LD_PRELOAD");
3447 		}
3448 	}
3449 
3450 	/* Create pipes for stdin/stdout/stderr as requested by caller. */
3451 	struct {
3452 		bool requested;
3453 		int *pipe_fds;
3454 	} pipe_fd_req[] = {
3455 	    {config->pstdin_fd != NULL, state_out->stdin_fds},
3456 	    {config->pstdout_fd != NULL, state_out->stdout_fds},
3457 	    {config->pstderr_fd != NULL, state_out->stderr_fds},
3458 	};
3459 
3460 	for (size_t i = 0; i < ARRAY_SIZE(pipe_fd_req); ++i) {
3461 		if (pipe_fd_req[i].requested &&
3462 		    pipe(pipe_fd_req[i].pipe_fds) == -1)
3463 			return EFAULT;
3464 	}
3465 
3466 	/*
3467 	 * If the parent process needs to configure the child's runtime
3468 	 * environment after forking, create a pipe(2) to block the child until
3469 	 * configuration is done.
3470 	 */
3471 	if (j->flags.forward_signals || j->flags.pid_file || j->flags.cgroups ||
3472 	    j->rlimit_count || j->flags.userns) {
3473 		sync_child = 1;
3474 		if (pipe(state_out->child_sync_pipe_fds))
3475 			return -EFAULT;
3476 	}
3477 
3478 	/*
3479 	 * Use sys_clone() if and only if we're creating a pid namespace.
3480 	 *
3481 	 * tl;dr: WARNING: do not mix pid namespaces and multithreading.
3482 	 *
3483 	 * In multithreaded programs, there are a bunch of locks inside libc,
3484 	 * some of which may be held by other threads at the time that we call
3485 	 * minijail_run_pid(). If we call fork(), glibc does its level best to
3486 	 * ensure that we hold all of these locks before it calls clone()
3487 	 * internally and drop them after clone() returns, but when we call
3488 	 * sys_clone(2) directly, all that gets bypassed and we end up with a
3489 	 * child address space where some of libc's important locks are held by
3490 	 * other threads (which did not get cloned, and hence will never release
3491 	 * those locks). This is okay so long as we call exec() immediately
3492 	 * after, but a bunch of seemingly-innocent libc functions like setenv()
3493 	 * take locks.
3494 	 *
3495 	 * Hence, only call sys_clone() if we need to, in order to get at pid
3496 	 * namespacing. If we follow this path, the child's address space might
3497 	 * have broken locks; you may only call functions that do not acquire
3498 	 * any locks.
3499 	 *
3500 	 * Unfortunately, fork() acquires every lock it can get its hands on, as
3501 	 * previously detailed, so this function is highly likely to deadlock
3502 	 * later on (see "deadlock here") if we're multithreaded.
3503 	 *
3504 	 * We might hack around this by having the clone()d child (init of the
3505 	 * pid namespace) return directly, rather than leaving the clone()d
3506 	 * process hanging around to be init for the new namespace (and having
3507 	 * its fork()ed child return in turn), but that process would be
3508 	 * crippled with its libc locks potentially broken. We might try
3509 	 * fork()ing in the parent before we clone() to ensure that we own all
3510 	 * the locks, but then we have to have the forked child hanging around
3511 	 * consuming resources (and possibly having file descriptors / shared
3512 	 * memory regions / etc attached). We'd need to keep the child around to
3513 	 * avoid having its children get reparented to init.
3514 	 *
3515 	 * TODO(ellyjones): figure out if the "forked child hanging around"
3516 	 * problem is fixable or not. It would be nice if we worked in this
3517 	 * case.
3518 	 */
3519 	pid_t child_pid;
3520 	if (pid_namespace) {
3521 		unsigned long clone_flags = CLONE_NEWPID | SIGCHLD;
3522 		if (j->flags.userns)
3523 			clone_flags |= CLONE_NEWUSER;
3524 
3525 		child_pid = syscall(SYS_clone, clone_flags, NULL, 0L, 0L, 0L);
3526 
3527 		if (child_pid < 0) {
3528 			if (errno == EPERM)
3529 				pdie("clone(CLONE_NEWPID | ...) failed with "
3530 				     "EPERM; is this process missing "
3531 				     "CAP_SYS_ADMIN?");
3532 			pdie("clone(CLONE_NEWPID | ...) failed");
3533 		}
3534 	} else {
3535 		child_pid = fork();
3536 
3537 		if (child_pid < 0)
3538 			pdie("fork failed");
3539 	}
3540 
3541 	state_out->child_pid = child_pid;
3542 	if (child_pid) {
3543 		j->initpid = child_pid;
3544 
3545 		if (j->flags.forward_signals) {
3546 			forward_pid = child_pid;
3547 			install_signal_handlers();
3548 		}
3549 
3550 		if (j->flags.pid_file)
3551 			write_pid_file_or_die(j);
3552 
3553 		if (j->flags.cgroups)
3554 			add_to_cgroups_or_die(j);
3555 
3556 		if (j->rlimit_count)
3557 			set_rlimits_or_die(j);
3558 
3559 		if (j->flags.userns)
3560 			write_ugid_maps_or_die(j);
3561 
3562 		if (j->flags.enter_vfs)
3563 			close(j->mountns_fd);
3564 
3565 		if (j->flags.enter_net)
3566 			close(j->netns_fd);
3567 
3568 		if (sync_child)
3569 			parent_setup_complete(state_out->child_sync_pipe_fds);
3570 
3571 		if (use_preload) {
3572 			/*
3573 			 * Add SIGPIPE to the signal mask to avoid getting
3574 			 * killed if the child process finishes or closes its
3575 			 * end of the pipe prematurely.
3576 			 *
3577 			 * TODO(crbug.com/1022170): Use pthread_sigmask instead
3578 			 * of sigprocmask if Minijail is used in multithreaded
3579 			 * programs.
3580 			 */
3581 			sigset_t to_block, to_restore;
3582 			if (sigemptyset(&to_block) < 0)
3583 				pdie("sigemptyset failed");
3584 			if (sigaddset(&to_block, SIGPIPE) < 0)
3585 				pdie("sigaddset failed");
3586 			if (sigprocmask(SIG_BLOCK, &to_block, &to_restore) < 0)
3587 				pdie("sigprocmask failed");
3588 
3589 			/* Send marshalled minijail. */
3590 			close_and_reset(&state_out->pipe_fds[0]);
3591 			ret = minijail_to_fd(j, state_out->pipe_fds[1]);
3592 			close_and_reset(&state_out->pipe_fds[1]);
3593 
3594 			/* Accept any pending SIGPIPE. */
3595 			while (true) {
3596 				const struct timespec zero_time = {0, 0};
3597 				const int sig =
3598 				    sigtimedwait(&to_block, NULL, &zero_time);
3599 				if (sig < 0) {
3600 					if (errno != EINTR)
3601 						break;
3602 				} else {
3603 					if (sig != SIGPIPE)
3604 						die("unexpected signal %d",
3605 						    sig);
3606 				}
3607 			}
3608 
3609 			/* Restore the signal mask to its original state. */
3610 			if (sigprocmask(SIG_SETMASK, &to_restore, NULL) < 0)
3611 				pdie("sigprocmask failed");
3612 
3613 			if (ret) {
3614 				warn("failed to send marshalled minijail: %s",
3615 				     strerror(-ret));
3616 				kill(j->initpid, SIGKILL);
3617 			}
3618 		}
3619 
3620 		return 0;
3621 	}
3622 
3623 	/* Child process. */
3624 	if (j->flags.reset_signal_mask) {
3625 		sigset_t signal_mask;
3626 		if (sigemptyset(&signal_mask) != 0)
3627 			pdie("sigemptyset failed");
3628 		if (sigprocmask(SIG_SETMASK, &signal_mask, NULL) != 0)
3629 			pdie("sigprocmask failed");
3630 	}
3631 
3632 	if (j->flags.reset_signal_handlers) {
3633 		int signum;
3634 		for (signum = 0; signum <= SIGRTMAX; signum++) {
3635 			/*
3636 			 * Ignore EINVAL since some signal numbers in the range
3637 			 * might not be valid.
3638 			 */
3639 			if (signal(signum, SIG_DFL) == SIG_ERR &&
3640 			    errno != EINVAL) {
3641 				pdie("failed to reset signal %d disposition",
3642 				     signum);
3643 			}
3644 		}
3645 	}
3646 
3647 	if (j->flags.close_open_fds) {
3648 		const size_t kMaxInheritableFdsSize = 11 + MAX_PRESERVED_FDS;
3649 		int inheritable_fds[kMaxInheritableFdsSize];
3650 		size_t size = 0;
3651 
3652 		int *pipe_fds[] = {
3653 		    state_out->pipe_fds,   state_out->child_sync_pipe_fds,
3654 		    state_out->stdin_fds,  state_out->stdout_fds,
3655 		    state_out->stderr_fds,
3656 		};
3657 
3658 		for (size_t i = 0; i < ARRAY_SIZE(pipe_fds); ++i) {
3659 			if (pipe_fds[i][0] != -1) {
3660 				inheritable_fds[size++] = pipe_fds[i][0];
3661 			}
3662 			if (pipe_fds[i][1] != -1) {
3663 				inheritable_fds[size++] = pipe_fds[i][1];
3664 			}
3665 		}
3666 
3667 		/*
3668 		 * Preserve namespace file descriptors over the close_open_fds()
3669 		 * call. These are closed in minijail_enter() so they won't leak
3670 		 * into the child process.
3671 		 */
3672 		if (j->flags.enter_vfs)
3673 			minijail_preserve_fd(j, j->mountns_fd, j->mountns_fd);
3674 		if (j->flags.enter_net)
3675 			minijail_preserve_fd(j, j->netns_fd, j->netns_fd);
3676 
3677 		for (size_t i = 0; i < j->preserved_fd_count; i++) {
3678 			/*
3679 			 * Preserve all parent_fds. They will be dup2(2)-ed in
3680 			 * the child later.
3681 			 */
3682 			inheritable_fds[size++] = j->preserved_fds[i].parent_fd;
3683 		}
3684 
3685 		if (config->elf_fd > -1) {
3686 			inheritable_fds[size++] = config->elf_fd;
3687 		}
3688 
3689 		if (close_open_fds(inheritable_fds, size) < 0)
3690 			die("failed to close open file descriptors");
3691 	}
3692 
3693 	/* The set of fds will be replaced. */
3694 	fd_set child_fds;
3695 	FD_ZERO(&child_fds);
3696 	if (get_child_fds(j, &child_fds))
3697 		die("failed to set up fd redirections");
3698 
3699 	if (avoid_pipe_conflicts(state_out, &child_fds))
3700 		die("failed to redirect conflicting pipes");
3701 
3702 	/* The elf_fd needs to be mutable so use a stack copy from now on. */
3703 	int elf_fd = config->elf_fd;
3704 	if (elf_fd != -1 && ensure_no_fd_conflict(&child_fds, -1, &elf_fd))
3705 		die("failed to redirect elf_fd");
3706 
3707 	if (redirect_fds(j, &child_fds))
3708 		die("failed to set up fd redirections");
3709 
3710 	if (sync_child)
3711 		wait_for_parent_setup(state_out->child_sync_pipe_fds);
3712 
3713 	if (j->flags.userns)
3714 		enter_user_namespace(j);
3715 
3716 	setup_child_std_fds(j, state_out);
3717 
3718 	/* If running an init program, let it decide when/how to mount /proc. */
3719 	if (pid_namespace && !do_init)
3720 		j->flags.remount_proc_ro = 0;
3721 
3722 	if (use_preload) {
3723 		/* Strip out flags that cannot be inherited across execve(2). */
3724 		minijail_preexec(j);
3725 	} else {
3726 		/*
3727 		 * If not using LD_PRELOAD, do all jailing before execve(2).
3728 		 * Note that PID namespaces can only be entered on fork(2),
3729 		 * so that flag is still cleared.
3730 		 */
3731 		j->flags.pids = 0;
3732 	}
3733 
3734 	/*
3735 	 * Jail this process.
3736 	 * If forking, return.
3737 	 * If not, execve(2) the target.
3738 	 */
3739 	minijail_enter(j);
3740 
3741 	if (config->exec_in_child && pid_namespace && do_init) {
3742 		/*
3743 		 * pid namespace: this process will become init inside the new
3744 		 * namespace. We don't want all programs we might exec to have
3745 		 * to know how to be init. Normally (do_init == 1) we fork off
3746 		 * a child to actually run the program. If |do_init == 0|, we
3747 		 * let the program keep pid 1 and be init.
3748 		 *
3749 		 * If we're multithreaded, we'll probably deadlock here. See
3750 		 * WARNING above.
3751 		 */
3752 		child_pid = fork();
3753 		if (child_pid < 0) {
3754 			_exit(child_pid);
3755 		} else if (child_pid > 0) {
3756 			minijail_free_run_state(state_out);
3757 
3758 			/*
3759 			 * Best effort. Don't bother checking the return value.
3760 			 */
3761 			prctl(PR_SET_NAME, "minijail-init");
3762 			init(child_pid); /* Never returns. */
3763 		}
3764 		state_out->child_pid = child_pid;
3765 	}
3766 
3767 	run_hooks_or_die(j, MINIJAIL_HOOK_EVENT_PRE_EXECVE);
3768 
3769 	if (!config->exec_in_child)
3770 		return 0;
3771 
3772 	/*
3773 	 * We're going to execve(), so make sure any remaining resources are
3774 	 * freed. Exceptions are:
3775 	 *  1. The child environment. No need to worry about freeing it since
3776 	 *     execve reinitializes the heap anyways.
3777 	 *  2. The read side of the LD_PRELOAD pipe, which we need to hand down
3778 	 *     into the target in which the preloaded code will read from it and
3779 	 *     then close it.
3780 	 */
3781 	state_out->pipe_fds[0] = -1;
3782 	char *const *child_env = state_out->child_env;
3783 	state_out->child_env = NULL;
3784 	minijail_free_run_state(state_out);
3785 
3786 	/*
3787 	 * If we aren't pid-namespaced, or the jailed program asked to be init:
3788 	 *   calling process
3789 	 *   -> execve()-ing process
3790 	 * If we are:
3791 	 *   calling process
3792 	 *   -> init()-ing process
3793 	 *      -> execve()-ing process
3794 	 */
3795 	if (!child_env)
3796 		child_env = config->envp ? config->envp : environ;
3797 	if (elf_fd > -1) {
3798 		fexecve(elf_fd, config->argv, child_env);
3799 		pwarn("fexecve(%d) failed", config->elf_fd);
3800 	} else {
3801 		execve(config->filename, config->argv, child_env);
3802 		pwarn("execve(%s) failed", config->filename);
3803 	}
3804 
3805 	ret = (errno == ENOENT ? MINIJAIL_ERR_NO_COMMAND
3806 			       : MINIJAIL_ERR_NO_ACCESS);
3807 	_exit(ret);
3808 }
3809 
3810 static int
minijail_run_config_internal(struct minijail * j,const struct minijail_run_config * config)3811 minijail_run_config_internal(struct minijail *j,
3812 			     const struct minijail_run_config *config)
3813 {
3814 	struct minijail_run_state state = {
3815 	    .child_pid = -1,
3816 	    .pipe_fds = {-1, -1},
3817 	    .stdin_fds = {-1, -1},
3818 	    .stdout_fds = {-1, -1},
3819 	    .stderr_fds = {-1, -1},
3820 	    .child_sync_pipe_fds = {-1, -1},
3821 	    .child_env = NULL,
3822 	};
3823 	int ret = minijail_run_internal(j, config, &state);
3824 
3825 	if (ret == 0) {
3826 		if (config->pchild_pid)
3827 			*config->pchild_pid = state.child_pid;
3828 
3829 		/* Grab stdin/stdout/stderr descriptors requested by caller. */
3830 		struct {
3831 			int *pfd;
3832 			int *psrc;
3833 		} fd_map[] = {
3834 		    {config->pstdin_fd, &state.stdin_fds[1]},
3835 		    {config->pstdout_fd, &state.stdout_fds[0]},
3836 		    {config->pstderr_fd, &state.stderr_fds[0]},
3837 		};
3838 
3839 		for (size_t i = 0; i < ARRAY_SIZE(fd_map); ++i) {
3840 			if (fd_map[i].pfd) {
3841 				*fd_map[i].pfd = *fd_map[i].psrc;
3842 				*fd_map[i].psrc = -1;
3843 			}
3844 		}
3845 
3846 		if (!config->exec_in_child)
3847 			ret = state.child_pid;
3848 	}
3849 
3850 	minijail_free_run_state(&state);
3851 
3852 	return ret;
3853 }
3854 
minijail_wait_internal(struct minijail * j,int expected_signal)3855 static int minijail_wait_internal(struct minijail *j, int expected_signal)
3856 {
3857 	if (j->initpid <= 0)
3858 		return -ECHILD;
3859 
3860 	int st;
3861 	while (true) {
3862 		const int ret = waitpid(j->initpid, &st, 0);
3863 		if (ret >= 0)
3864 			break;
3865 		if (errno != EINTR)
3866 			return -errno;
3867 	}
3868 
3869 	if (!WIFEXITED(st)) {
3870 		int error_status = st;
3871 		if (!WIFSIGNALED(st)) {
3872 			return error_status;
3873 		}
3874 
3875 		int signum = WTERMSIG(st);
3876 		/*
3877 		 * We return MINIJAIL_ERR_JAIL if the process received
3878 		 * SIGSYS, which happens when a syscall is blocked by
3879 		 * seccomp filters.
3880 		 * If not, we do what bash(1) does:
3881 		 * $? = 128 + signum
3882 		 */
3883 		if (signum == SIGSYS) {
3884 			warn("child process %d had a policy violation (%s)",
3885 			     j->initpid,
3886 			     j->seccomp_policy_path ? j->seccomp_policy_path
3887 						    : "NO-LABEL");
3888 			error_status = MINIJAIL_ERR_JAIL;
3889 		} else {
3890 			if (signum != expected_signal) {
3891 				warn("child process %d received signal %d",
3892 				     j->initpid, signum);
3893 			}
3894 			error_status = MINIJAIL_ERR_SIG_BASE + signum;
3895 		}
3896 		return error_status;
3897 	}
3898 
3899 	int exit_status = WEXITSTATUS(st);
3900 	if (exit_status != 0)
3901 		info("child process %d exited with status %d", j->initpid,
3902 		     exit_status);
3903 
3904 	return exit_status;
3905 }
3906 
minijail_kill(struct minijail * j)3907 int API minijail_kill(struct minijail *j)
3908 {
3909 	if (j->initpid <= 0)
3910 		return -ECHILD;
3911 
3912 	if (kill(j->initpid, SIGTERM))
3913 		return -errno;
3914 
3915 	return minijail_wait_internal(j, SIGTERM);
3916 }
3917 
minijail_wait(struct minijail * j)3918 int API minijail_wait(struct minijail *j)
3919 {
3920 	return minijail_wait_internal(j, 0);
3921 }
3922 
minijail_destroy(struct minijail * j)3923 void API minijail_destroy(struct minijail *j)
3924 {
3925 	size_t i;
3926 
3927 	if (j->filter_prog) {
3928 		free(j->filter_prog->filter);
3929 		free(j->filter_prog);
3930 	}
3931 	free_mounts_list(j);
3932 	free_remounts_list(j);
3933 	while (j->hooks_head) {
3934 		struct hook *c = j->hooks_head;
3935 		j->hooks_head = c->next;
3936 		free(c);
3937 	}
3938 	j->hooks_tail = NULL;
3939 	while (j->fs_rules_head) {
3940 		struct fs_rule *r = j->fs_rules_head;
3941 		j->fs_rules_head = r->next;
3942 		free(r);
3943 	}
3944 	j->fs_rules_tail = NULL;
3945 	if (j->user)
3946 		free(j->user);
3947 	if (j->suppl_gid_list)
3948 		free(j->suppl_gid_list);
3949 	if (j->chrootdir)
3950 		free(j->chrootdir);
3951 	if (j->pid_file_path)
3952 		free(j->pid_file_path);
3953 	if (j->uidmap)
3954 		free(j->uidmap);
3955 	if (j->gidmap)
3956 		free(j->gidmap);
3957 	if (j->hostname)
3958 		free(j->hostname);
3959 	if (j->preload_path)
3960 		free(j->preload_path);
3961 	if (j->alt_syscall_table)
3962 		free(j->alt_syscall_table);
3963 	for (i = 0; i < j->cgroup_count; ++i)
3964 		free(j->cgroups[i]);
3965 	if (j->seccomp_policy_path)
3966 		free(j->seccomp_policy_path);
3967 	free(j);
3968 }
3969 
minijail_log_to_fd(int fd,int min_priority)3970 void API minijail_log_to_fd(int fd, int min_priority)
3971 {
3972 	init_logging(LOG_TO_FD, fd, min_priority);
3973 }
3974