Lines Matching +full:wait +full:- +full:free +full:- +full:us

1 // SPDX-License-Identifier: GPL-2.0-only
43 #include <linux/posix-timers.h>
73 #include <uapi/linux/wait.h>
83 * overflowing 32-bit refcounts or the ldsem writer count.
127 nr_threads--; in __unhash_process()
134 list_del_rcu(&p->tasks); in __unhash_process()
135 list_del_init(&p->sibling); in __unhash_process()
138 list_del_rcu(&p->thread_node); in __unhash_process()
142 * This function expects the tasklist_lock write-locked.
146 struct signal_struct *sig = tsk->signal; in __exit_signal()
152 sighand = rcu_dereference_check(tsk->sighand, in __exit_signal()
154 spin_lock(&sighand->siglock); in __exit_signal()
163 tty = sig->tty; in __exit_signal()
164 sig->tty = NULL; in __exit_signal()
170 if (sig->notify_count > 0 && !--sig->notify_count) in __exit_signal()
171 wake_up_process(sig->group_exec_task); in __exit_signal()
173 if (tsk == sig->curr_target) in __exit_signal()
174 sig->curr_target = next_thread(tsk); in __exit_signal()
177 add_device_randomness((const void*) &tsk->se.sum_exec_runtime, in __exit_signal()
184 * see the empty ->thread_head list. in __exit_signal()
187 write_seqlock(&sig->stats_lock); in __exit_signal()
188 sig->utime += utime; in __exit_signal()
189 sig->stime += stime; in __exit_signal()
190 sig->gtime += task_gtime(tsk); in __exit_signal()
191 sig->min_flt += tsk->min_flt; in __exit_signal()
192 sig->maj_flt += tsk->maj_flt; in __exit_signal()
193 sig->nvcsw += tsk->nvcsw; in __exit_signal()
194 sig->nivcsw += tsk->nivcsw; in __exit_signal()
195 sig->inblock += task_io_get_inblock(tsk); in __exit_signal()
196 sig->oublock += task_io_get_oublock(tsk); in __exit_signal()
197 task_io_accounting_add(&sig->ioac, &tsk->ioac); in __exit_signal()
198 sig->sum_sched_runtime += tsk->se.sum_exec_runtime; in __exit_signal()
199 sig->nr_threads--; in __exit_signal()
201 write_sequnlock(&sig->stats_lock); in __exit_signal()
204 * Do this under ->siglock, we can race with another thread in __exit_signal()
207 flush_sigqueue(&tsk->pending); in __exit_signal()
208 tsk->sighand = NULL; in __exit_signal()
209 spin_unlock(&sighand->siglock); in __exit_signal()
214 flush_sigqueue(&sig->shared_pending); in __exit_signal()
232 if (refcount_dec_and_test(&task->rcu_users)) in put_task_struct_rcu_user()
233 call_rcu(&task->rcu, delayed_put_task_struct); in put_task_struct_rcu_user()
246 /* don't need to get the RCU readlock here - the process is dead and in release_task()
247 * can't be modifying its own credentials. But shut RCU-lockdep up */ in release_task()
256 thread_pid = get_pid(p->thread_pid); in release_task()
260 * If we are the last non-leader member of the thread in release_task()
265 leader = p->group_leader; in release_task()
267 && leader->exit_state == EXIT_ZOMBIE) { in release_task()
273 zap_leader = do_notify_parent(leader, leader->exit_signal); in release_task()
275 leader->exit_state = EXIT_DEAD; in release_task()
302 * WAIT WAKE in rcuwait_wake_up()
309 task = rcu_dereference(w->task); in rcuwait_wake_up()
321 * by terminal-generated stop signals. Newly orphaned process groups are
333 (p->exit_state && thread_group_empty(p)) || in will_become_orphaned_pgrp()
334 is_global_init(p->real_parent)) in will_become_orphaned_pgrp()
337 if (task_pgrp(p->real_parent) != pgrp && in will_become_orphaned_pgrp()
338 task_session(p->real_parent) == task_session(p)) in will_become_orphaned_pgrp()
361 if (p->signal->flags & SIGNAL_STOP_STOPPED) in has_stopped_jobs()
383 parent = tsk->real_parent; in kill_orphaned_pgrp()
406 * and setting PF_POSTCOREDUMP. The core-inducing thread in coredump_task_exit()
407 * will increment ->nr_threads for each thread in the in coredump_task_exit()
410 spin_lock_irq(&tsk->sighand->siglock); in coredump_task_exit()
411 tsk->flags |= PF_POSTCOREDUMP; in coredump_task_exit()
412 core_state = tsk->signal->core_state; in coredump_task_exit()
413 spin_unlock_irq(&tsk->sighand->siglock); in coredump_task_exit()
418 if (self.task->flags & PF_SIGNALED) in coredump_task_exit()
419 self.next = xchg(&core_state->dumper.next, &self); in coredump_task_exit()
424 * to core_state->dumper. in coredump_task_exit()
426 if (atomic_dec_and_test(&core_state->nr_threads)) in coredump_task_exit()
427 complete(&core_state->startup); in coredump_task_exit()
446 if (likely(tsk->mm == mm)) { in __try_to_set_owner()
449 WRITE_ONCE(mm->owner, tsk); in __try_to_set_owner()
462 struct mm_struct *t_mm = READ_ONCE(t->mm); in try_to_set_owner()
484 if (mm->owner != p) in mm_update_next_owner()
491 if (atomic_read(&mm->mm_users) <= 1) { in mm_update_next_owner()
492 WRITE_ONCE(mm->owner, NULL); in mm_update_next_owner()
500 list_for_each_entry(g, &p->children, sibling) { in mm_update_next_owner()
507 list_for_each_entry(g, &p->real_parent->children, sibling) { in mm_update_next_owner()
515 if (atomic_read(&mm->mm_users) <= 1) in mm_update_next_owner()
517 if (g->flags & PF_KTHREAD) in mm_update_next_owner()
528 WRITE_ONCE(mm->owner, NULL); in mm_update_next_owner()
536 * Turn us into a lazy TLB process if we
541 struct mm_struct *mm = current->mm; in exit_mm()
548 BUG_ON(mm != current->active_mm); in exit_mm()
554 * tsk->mm, and the loop in membarrier_global_expedited() may in exit_mm()
556 * rq->membarrier_state, so those would not issue an IPI. in exit_mm()
558 * user-space memory, before clearing tsk->mm or the in exit_mm()
559 * rq->membarrier_state. in exit_mm()
563 current->mm = NULL; in exit_mm()
580 if (!(t->flags & PF_EXITING)) in find_alive_thread()
592 struct task_struct *reaper = pid_ns->child_reaper; in find_child_reaper()
600 pid_ns->child_reaper = reaper; in find_child_reaper()
607 list_del_init(&p->ptrace_entry); in find_child_reaper()
618 * When we die, we re-parent all our children, and try to:
633 if (father->signal->has_child_subreaper) { in find_new_reaper()
634 unsigned int ns_level = task_pid(father)->level; in find_new_reaper()
636 * Find the first ->is_child_subreaper ancestor in our pid_ns. in find_new_reaper()
640 * We check pid->level, this is slightly more efficient than in find_new_reaper()
643 for (reaper = father->real_parent; in find_new_reaper()
644 task_pid(reaper)->level == ns_level; in find_new_reaper()
645 reaper = reaper->real_parent) { in find_new_reaper()
648 if (!reaper->signal->is_child_subreaper) in find_new_reaper()
665 if (unlikely(p->exit_state == EXIT_DEAD)) in reparent_leader()
669 p->exit_signal = SIGCHLD; in reparent_leader()
672 if (!p->ptrace && in reparent_leader()
673 p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) { in reparent_leader()
674 if (do_notify_parent(p, p->exit_signal)) { in reparent_leader()
675 p->exit_state = EXIT_DEAD; in reparent_leader()
676 list_add(&p->ptrace_entry, dead); in reparent_leader()
696 if (unlikely(!list_empty(&father->ptraced))) in forget_original_parent()
701 if (list_empty(&father->children)) in forget_original_parent()
705 list_for_each_entry(p, &father->children, sibling) { in forget_original_parent()
707 RCU_INIT_POINTER(t->real_parent, reaper); in forget_original_parent()
708 BUG_ON((!t->ptrace) != (rcu_access_pointer(t->parent) == father)); in forget_original_parent()
709 if (likely(!t->ptrace)) in forget_original_parent()
710 t->parent = t->real_parent; in forget_original_parent()
711 if (t->pdeath_signal) in forget_original_parent()
712 group_send_sig_info(t->pdeath_signal, in forget_original_parent()
723 list_splice_tail_init(&father->children, &reaper->children); in forget_original_parent()
728 * to properly mourn us..
740 kill_orphaned_pgrp(tsk->group_leader, NULL); in exit_notify()
742 tsk->exit_state = EXIT_ZOMBIE; in exit_notify()
744 * sub-thread or delay_group_leader(), wake up the in exit_notify()
750 if (unlikely(tsk->ptrace)) { in exit_notify()
754 tsk->exit_signal : SIGCHLD; in exit_notify()
758 do_notify_parent(tsk, tsk->exit_signal); in exit_notify()
764 tsk->exit_state = EXIT_DEAD; in exit_notify()
765 list_add(&tsk->ptrace_entry, &dead); in exit_notify()
768 /* mt-exec, de_thread() is waiting for group leader */ in exit_notify()
769 if (unlikely(tsk->signal->notify_count < 0)) in exit_notify()
770 wake_up_process(tsk->signal->group_exec_task); in exit_notify()
774 list_del_init(&p->ptrace_entry); in exit_notify()
786 n--; in stack_not_used()
793 return (unsigned long)end_of_stack(p) - (unsigned long)n; in stack_not_used()
795 return (unsigned long)n - (unsigned long)end_of_stack(p); in stack_not_used()
840 unsigned long free; in check_stack_usage() local
842 free = stack_not_used(current); in check_stack_usage()
843 kstack_histogram(THREAD_SIZE - free); in check_stack_usage()
845 if (free >= lowest_to_date) in check_stack_usage()
849 if (free < lowest_to_date) { in check_stack_usage()
851 current->comm, task_pid_nr(current), free); in check_stack_usage()
852 lowest_to_date = free; in check_stack_usage()
862 struct sighand_struct *sighand = tsk->sighand; in synchronize_group_exit()
863 struct signal_struct *signal = tsk->signal; in synchronize_group_exit()
865 spin_lock_irq(&sighand->siglock); in synchronize_group_exit()
866 signal->quick_threads--; in synchronize_group_exit()
867 if ((signal->quick_threads == 0) && in synchronize_group_exit()
868 !(signal->flags & SIGNAL_GROUP_EXIT)) { in synchronize_group_exit()
869 signal->flags = SIGNAL_GROUP_EXIT; in synchronize_group_exit()
870 signal->group_exit_code = code; in synchronize_group_exit()
871 signal->group_stop_count = 0; in synchronize_group_exit()
873 spin_unlock_irq(&sighand->siglock); in synchronize_group_exit()
885 WARN_ON(tsk->plug); in do_exit()
900 group_dead = atomic_dec_and_test(&tsk->signal->live); in do_exit()
908 tsk->signal->group_exit_code ?: (int)code); in do_exit()
911 hrtimer_cancel(&tsk->signal->real_timer); in do_exit()
914 if (tsk->mm) in do_exit()
915 setmax_mm_hiwater_rss(&tsk->signal->maxrss, tsk->mm); in do_exit()
922 tsk->exit_code = code; in do_exit()
942 * Flush inherited counters to the parent - before the parent in do_exit()
943 * gets woken up by child-exit notifications. in do_exit()
962 if (unlikely(current->pi_state_cache)) in do_exit()
963 kfree(current->pi_state_cache); in do_exit()
970 if (tsk->io_context) in do_exit()
973 if (tsk->splice_pipe) in do_exit()
974 free_pipe_info(tsk->splice_pipe); in do_exit()
976 if (tsk->task_frag.page) in do_exit()
977 put_page(tsk->task_frag.page); in do_exit()
983 if (tsk->nr_dirtied) in do_exit()
984 __this_cpu_add(dirty_throttle_leaks, tsk->nr_dirtied); in do_exit()
1008 if (unlikely(!tsk->pid)) in make_task_dead()
1013 current->comm, task_pid_nr(current)); in make_task_dead()
1018 current->comm, task_pid_nr(current), in make_task_dead()
1028 * This means that repeated oopsing can make unexploitable-looking bugs in make_task_dead()
1039 * leave this task alone and wait for reboot. in make_task_dead()
1041 if (unlikely(tsk->flags & PF_EXITING)) { in make_task_dead()
1044 tsk->exit_state = EXIT_DEAD; in make_task_dead()
1045 refcount_inc(&tsk->rcu_users); in make_task_dead()
1064 struct signal_struct *sig = current->signal; in do_group_exit()
1066 if (sig->flags & SIGNAL_GROUP_EXIT) in do_group_exit()
1067 exit_code = sig->group_exit_code; in do_group_exit()
1068 else if (sig->group_exec_task) in do_group_exit()
1071 struct sighand_struct *const sighand = current->sighand; in do_group_exit()
1073 spin_lock_irq(&sighand->siglock); in do_group_exit()
1074 if (sig->flags & SIGNAL_GROUP_EXIT) in do_group_exit()
1076 exit_code = sig->group_exit_code; in do_group_exit()
1077 else if (sig->group_exec_task) in do_group_exit()
1080 sig->group_exit_code = exit_code; in do_group_exit()
1081 sig->flags = SIGNAL_GROUP_EXIT; in do_group_exit()
1084 spin_unlock_irq(&sighand->siglock); in do_group_exit()
1093 * wait4()-ing process will get the correct exit code - even if this
1105 return wo->wo_type == PIDTYPE_MAX || in eligible_pid()
1106 task_pid_type(p, wo->wo_type) == wo->wo_pid; in eligible_pid()
1116 * Wait for all children (clone and not) if __WALL is set or in eligible_child()
1117 * if it is traced by us. in eligible_child()
1119 if (ptrace || (wo->wo_flags & __WALL)) in eligible_child()
1123 * Otherwise, wait for clone children *only* if __WCLONE is set; in eligible_child()
1124 * otherwise, wait for non-clone children *only*. in eligible_child()
1127 * using a signal other than SIGCHLD, or a non-leader thread which in eligible_child()
1128 * we can only see if it is traced by us. in eligible_child()
1130 if ((p->exit_signal != SIGCHLD) ^ !!(wo->wo_flags & __WCLONE)) in eligible_child()
1149 if (!likely(wo->wo_flags & WEXITED)) in wait_task_zombie()
1152 if (unlikely(wo->wo_flags & WNOWAIT)) { in wait_task_zombie()
1153 status = (p->signal->flags & SIGNAL_GROUP_EXIT) in wait_task_zombie()
1154 ? p->signal->group_exit_code : p->exit_code; in wait_task_zombie()
1158 if (wo->wo_rusage) in wait_task_zombie()
1159 getrusage(p, RUSAGE_BOTH, wo->wo_rusage); in wait_task_zombie()
1168 if (cmpxchg(&p->exit_state, EXIT_ZOMBIE, state) != EXIT_ZOMBIE) in wait_task_zombie()
1177 * Check thread_group_leader() to exclude the traced sub-threads. in wait_task_zombie()
1180 struct signal_struct *sig = p->signal; in wait_task_zombie()
1181 struct signal_struct *psig = current->signal; in wait_task_zombie()
1193 * p->signal fields because the whole thread group is dead in wait_task_zombie()
1196 * psig->stats_lock also protects us from our sub-threads in wait_task_zombie()
1204 write_seqlock_irq(&psig->stats_lock); in wait_task_zombie()
1205 psig->cutime += tgutime + sig->cutime; in wait_task_zombie()
1206 psig->cstime += tgstime + sig->cstime; in wait_task_zombie()
1207 psig->cgtime += task_gtime(p) + sig->gtime + sig->cgtime; in wait_task_zombie()
1208 psig->cmin_flt += in wait_task_zombie()
1209 p->min_flt + sig->min_flt + sig->cmin_flt; in wait_task_zombie()
1210 psig->cmaj_flt += in wait_task_zombie()
1211 p->maj_flt + sig->maj_flt + sig->cmaj_flt; in wait_task_zombie()
1212 psig->cnvcsw += in wait_task_zombie()
1213 p->nvcsw + sig->nvcsw + sig->cnvcsw; in wait_task_zombie()
1214 psig->cnivcsw += in wait_task_zombie()
1215 p->nivcsw + sig->nivcsw + sig->cnivcsw; in wait_task_zombie()
1216 psig->cinblock += in wait_task_zombie()
1218 sig->inblock + sig->cinblock; in wait_task_zombie()
1219 psig->coublock += in wait_task_zombie()
1221 sig->oublock + sig->coublock; in wait_task_zombie()
1222 maxrss = max(sig->maxrss, sig->cmaxrss); in wait_task_zombie()
1223 if (psig->cmaxrss < maxrss) in wait_task_zombie()
1224 psig->cmaxrss = maxrss; in wait_task_zombie()
1225 task_io_accounting_add(&psig->ioac, &p->ioac); in wait_task_zombie()
1226 task_io_accounting_add(&psig->ioac, &sig->ioac); in wait_task_zombie()
1227 write_sequnlock_irq(&psig->stats_lock); in wait_task_zombie()
1230 if (wo->wo_rusage) in wait_task_zombie()
1231 getrusage(p, RUSAGE_BOTH, wo->wo_rusage); in wait_task_zombie()
1232 status = (p->signal->flags & SIGNAL_GROUP_EXIT) in wait_task_zombie()
1233 ? p->signal->group_exit_code : p->exit_code; in wait_task_zombie()
1234 wo->wo_stat = status; in wait_task_zombie()
1243 if (do_notify_parent(p, p->exit_signal)) in wait_task_zombie()
1245 p->exit_state = state; in wait_task_zombie()
1252 infop = wo->wo_info; in wait_task_zombie()
1255 infop->cause = CLD_EXITED; in wait_task_zombie()
1256 infop->status = status >> 8; in wait_task_zombie()
1258 infop->cause = (status & 0x80) ? CLD_DUMPED : CLD_KILLED; in wait_task_zombie()
1259 infop->status = status & 0x7f; in wait_task_zombie()
1261 infop->pid = pid; in wait_task_zombie()
1262 infop->uid = uid; in wait_task_zombie()
1271 if (task_is_traced(p) && !(p->jobctl & JOBCTL_LISTENING)) in task_stopped_code()
1272 return &p->exit_code; in task_stopped_code()
1274 if (p->signal->flags & SIGNAL_STOP_STOPPED) in task_stopped_code()
1275 return &p->signal->group_exit_code; in task_stopped_code()
1281 * wait_task_stopped - Wait for %TASK_STOPPED or %TASK_TRACED
1282 * @wo: wait options
1283 * @ptrace: is the wait for ptrace
1284 * @p: task to wait for
1290 * non-zero. Also, grabs and releases @p->sighand->siglock.
1293 * 0 if wait condition didn't exist and search for other wait conditions
1294 * should continue. Non-zero return, -errno on failure and @p's pid on
1295 * success, implies that tasklist_lock is released and wait condition
1309 if (!ptrace && !(wo->wo_flags & WUNTRACED)) in wait_task_stopped()
1316 spin_lock_irq(&p->sighand->siglock); in wait_task_stopped()
1326 if (!unlikely(wo->wo_flags & WNOWAIT)) in wait_task_stopped()
1331 spin_unlock_irq(&p->sighand->siglock); in wait_task_stopped()
1337 * Make sure it doesn't get reaped out from under us while we in wait_task_stopped()
1347 if (wo->wo_rusage) in wait_task_stopped()
1348 getrusage(p, RUSAGE_BOTH, wo->wo_rusage); in wait_task_stopped()
1351 if (likely(!(wo->wo_flags & WNOWAIT))) in wait_task_stopped()
1352 wo->wo_stat = (exit_code << 8) | 0x7f; in wait_task_stopped()
1354 infop = wo->wo_info; in wait_task_stopped()
1356 infop->cause = why; in wait_task_stopped()
1357 infop->status = exit_code; in wait_task_stopped()
1358 infop->pid = pid; in wait_task_stopped()
1359 infop->uid = uid; in wait_task_stopped()
1365 * Handle do_wait work for one task in a live, non-stopped state.
1376 if (!unlikely(wo->wo_flags & WCONTINUED)) in wait_task_continued()
1379 if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) in wait_task_continued()
1382 spin_lock_irq(&p->sighand->siglock); in wait_task_continued()
1383 /* Re-check with the lock held. */ in wait_task_continued()
1384 if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) { in wait_task_continued()
1385 spin_unlock_irq(&p->sighand->siglock); in wait_task_continued()
1388 if (!unlikely(wo->wo_flags & WNOWAIT)) in wait_task_continued()
1389 p->signal->flags &= ~SIGNAL_STOP_CONTINUED; in wait_task_continued()
1391 spin_unlock_irq(&p->sighand->siglock); in wait_task_continued()
1397 if (wo->wo_rusage) in wait_task_continued()
1398 getrusage(p, RUSAGE_BOTH, wo->wo_rusage); in wait_task_continued()
1401 infop = wo->wo_info; in wait_task_continued()
1403 wo->wo_stat = 0xffff; in wait_task_continued()
1405 infop->cause = CLD_CONTINUED; in wait_task_continued()
1406 infop->pid = pid; in wait_task_continued()
1407 infop->uid = uid; in wait_task_continued()
1408 infop->status = SIGCONT; in wait_task_continued()
1414 * Consider @p for a wait by @parent.
1416 * -ECHILD should be in ->notask_error before the first call.
1419 * then ->notask_error is 0 if @p is an eligible child,
1420 * or still -ECHILD.
1427 * Ensure that EXIT_ZOMBIE -> EXIT_DEAD/EXIT_TRACE transition in wait_consider_task()
1430 int exit_state = READ_ONCE(p->exit_state); in wait_consider_task()
1443 * we should clear notask_error, debugger will notify us. in wait_consider_task()
1446 wo->notask_error = 0; in wait_consider_task()
1450 if (likely(!ptrace) && unlikely(p->ptrace)) { in wait_consider_task()
1475 if (unlikely(ptrace) || likely(!p->ptrace)) in wait_consider_task()
1488 * wait for. If all subthreads are dead, it's still safe in wait_consider_task()
1489 * to clear - this function will be called again in finite in wait_consider_task()
1495 * Stopped state is per-task and thus can't change once the in wait_consider_task()
1499 if (likely(!ptrace) || (wo->wo_flags & (WCONTINUED | WEXITED))) in wait_consider_task()
1500 wo->notask_error = 0; in wait_consider_task()
1504 * there always is something to wait for. in wait_consider_task()
1506 wo->notask_error = 0; in wait_consider_task()
1510 * Wait for stopped. Depending on @ptrace, different stopped state in wait_consider_task()
1518 * Wait for continued. There's only one continued state and the in wait_consider_task()
1528 * -ECHILD should be in ->notask_error before the first call.
1531 * ->notask_error is 0 if there were any eligible children,
1532 * or still -ECHILD.
1538 list_for_each_entry(p, &tsk->children, sibling) { in do_wait_thread()
1552 list_for_each_entry(p, &tsk->ptraced, ptrace_entry) { in ptrace_do_wait()
1567 if ((wo->wo_flags & __WNOTHREAD) && wo->child_wait.private != p->parent) in pid_child_should_wake()
1573 static int child_wait_callback(wait_queue_entry_t *wait, unsigned mode, in child_wait_callback() argument
1576 struct wait_opts *wo = container_of(wait, struct wait_opts, in child_wait_callback()
1581 return default_wake_function(wait, mode, sync, key); in child_wait_callback()
1588 __wake_up_sync_key(&parent->signal->wait_chldexit, in __wake_up_parent()
1596 !ptrace ? target->real_parent : target->parent; in is_effectively_child()
1598 return current == parent || (!(wo->wo_flags & __WNOTHREAD) && in is_effectively_child()
1613 target = pid_task(wo->wo_pid, PIDTYPE_TGID); in do_wait_pid()
1621 target = pid_task(wo->wo_pid, PIDTYPE_PID); in do_wait_pid()
1622 if (target && target->ptrace && in do_wait_pid()
1638 * We will clear ->notask_error to zero if we see any child that in __do_wait()
1642 wo->notask_error = -ECHILD; in __do_wait()
1643 if ((wo->wo_type < PIDTYPE_MAX) && in __do_wait()
1644 (!wo->wo_pid || !pid_has_task(wo->wo_pid, wo->wo_type))) in __do_wait()
1649 if (wo->wo_type == PIDTYPE_PID) { in __do_wait()
1665 if (wo->wo_flags & __WNOTHREAD) in __do_wait()
1672 retval = wo->notask_error; in __do_wait()
1673 if (!retval && !(wo->wo_flags & WNOHANG)) in __do_wait()
1674 return -ERESTARTSYS; in __do_wait()
1683 trace_sched_process_wait(wo->wo_pid); in do_wait()
1685 init_waitqueue_func_entry(&wo->child_wait, child_wait_callback); in do_wait()
1686 wo->child_wait.private = current; in do_wait()
1687 add_wait_queue(&current->signal->wait_chldexit, &wo->child_wait); in do_wait()
1692 if (retval != -ERESTARTSYS) in do_wait()
1700 remove_wait_queue(&current->signal->wait_chldexit, &wo->child_wait); in do_wait()
1714 return -EINVAL; in kernel_waitid_prepare()
1716 return -EINVAL; in kernel_waitid_prepare()
1725 return -EINVAL; in kernel_waitid_prepare()
1732 return -EINVAL; in kernel_waitid_prepare()
1742 return -EINVAL; in kernel_waitid_prepare()
1750 return -EINVAL; in kernel_waitid_prepare()
1753 wo->wo_type = type; in kernel_waitid_prepare()
1754 wo->wo_pid = pid; in kernel_waitid_prepare()
1755 wo->wo_flags = options; in kernel_waitid_prepare()
1756 wo->wo_info = infop; in kernel_waitid_prepare()
1757 wo->wo_rusage = ru; in kernel_waitid_prepare()
1759 wo->wo_flags |= WNOHANG; in kernel_waitid_prepare()
1776 ret = -EAGAIN; in kernel_waitid()
1794 return -EFAULT; in SYSCALL_DEFINE5()
1800 return -EFAULT; in SYSCALL_DEFINE5()
1802 unsafe_put_user(signo, &infop->si_signo, Efault); in SYSCALL_DEFINE5()
1803 unsafe_put_user(0, &infop->si_errno, Efault); in SYSCALL_DEFINE5()
1804 unsafe_put_user(info.cause, &infop->si_code, Efault); in SYSCALL_DEFINE5()
1805 unsafe_put_user(info.pid, &infop->si_pid, Efault); in SYSCALL_DEFINE5()
1806 unsafe_put_user(info.uid, &infop->si_uid, Efault); in SYSCALL_DEFINE5()
1807 unsafe_put_user(info.status, &infop->si_status, Efault); in SYSCALL_DEFINE5()
1812 return -EFAULT; in SYSCALL_DEFINE5()
1825 return -EINVAL; in kernel_wait4()
1827 /* -INT_MIN is not defined */ in kernel_wait4()
1829 return -ESRCH; in kernel_wait4()
1831 if (upid == -1) in kernel_wait4()
1835 pid = find_get_pid(-upid); in kernel_wait4()
1853 ret = -EFAULT; in kernel_wait4()
1882 return -EFAULT; in SYSCALL_DEFINE4()
1911 return -EFAULT; in COMPAT_SYSCALL_DEFINE4()
1935 return -EFAULT; in COMPAT_SYSCALL_DEFINE5()
1943 return -EFAULT; in COMPAT_SYSCALL_DEFINE5()
1945 unsafe_put_user(signo, &infop->si_signo, Efault); in COMPAT_SYSCALL_DEFINE5()
1946 unsafe_put_user(0, &infop->si_errno, Efault); in COMPAT_SYSCALL_DEFINE5()
1947 unsafe_put_user(info.cause, &infop->si_code, Efault); in COMPAT_SYSCALL_DEFINE5()
1948 unsafe_put_user(info.pid, &infop->si_pid, Efault); in COMPAT_SYSCALL_DEFINE5()
1949 unsafe_put_user(info.uid, &infop->si_uid, Efault); in COMPAT_SYSCALL_DEFINE5()
1950 unsafe_put_user(info.status, &infop->si_status, Efault); in COMPAT_SYSCALL_DEFINE5()
1955 return -EFAULT; in COMPAT_SYSCALL_DEFINE5()
1962 * -falign-functions=N.
1970 /* if that doesn't kill us, halt */ in abort()