signal.c 101 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 *  linux/kernel/signal.c
 *
 *  Copyright (C) 1991, 1992  Linus Torvalds
 *
 *  1997-11-02  Modified for POSIX.1b signals by Richard Henderson
 *
 *  2003-06-02  Jim Houston - Concurrent Computer Corp.
 *		Changes to use preallocated sigqueue structures
 *		to allow signals to be sent reliably.
 */

#include <linux/slab.h>
14
#include <linux/export.h>
Linus Torvalds's avatar
Linus Torvalds committed
15
#include <linux/init.h>
16
#include <linux/sched/mm.h>
17
#include <linux/sched/user.h>
18
#include <linux/sched/debug.h>
19
#include <linux/sched/task.h>
20
#include <linux/sched/task_stack.h>
21
#include <linux/sched/cputime.h>
Linus Torvalds's avatar
Linus Torvalds committed
22 23 24
#include <linux/fs.h>
#include <linux/tty.h>
#include <linux/binfmts.h>
25
#include <linux/coredump.h>
Linus Torvalds's avatar
Linus Torvalds committed
26 27 28
#include <linux/security.h>
#include <linux/syscalls.h>
#include <linux/ptrace.h>
29
#include <linux/signal.h>
30
#include <linux/signalfd.h>
31
#include <linux/ratelimit.h>
32
#include <linux/tracehook.h>
33
#include <linux/capability.h>
34
#include <linux/freezer.h>
35 36
#include <linux/pid_namespace.h>
#include <linux/nsproxy.h>
37
#include <linux/user_namespace.h>
38
#include <linux/uprobes.h>
Al Viro's avatar
Al Viro committed
39
#include <linux/compat.h>
40
#include <linux/cn_proc.h>
41
#include <linux/compiler.h>
42
#include <linux/posix-timers.h>
43
#include <linux/livepatch.h>
44

45 46
#define CREATE_TRACE_POINTS
#include <trace/events/signal.h>
47

Linus Torvalds's avatar
Linus Torvalds committed
48
#include <asm/param.h>
49
#include <linux/uaccess.h>
Linus Torvalds's avatar
Linus Torvalds committed
50 51
#include <asm/unistd.h>
#include <asm/siginfo.h>
52
#include <asm/cacheflush.h>
53
#include "audit.h"	/* audit_signal_info() */
Linus Torvalds's avatar
Linus Torvalds committed
54 55 56 57 58

/*
 * SLAB caches for signal bits.
 */

59
static struct kmem_cache *sigqueue_cachep;
Linus Torvalds's avatar
Linus Torvalds committed
60

61 62
int print_fatal_signals __read_mostly;

63
static void __user *sig_handler(struct task_struct *t, int sig)
64
{
65 66
	return t->sighand->action[sig - 1].sa.sa_handler;
}
67

68 69
static int sig_handler_ignored(void __user *handler, int sig)
{
70 71 72 73
	/* Is it explicitly or implicitly ignored? */
	return handler == SIG_IGN ||
		(handler == SIG_DFL && sig_kernel_ignore(sig));
}
Linus Torvalds's avatar
Linus Torvalds committed
74

75
static int sig_task_ignored(struct task_struct *t, int sig, bool force)
Linus Torvalds's avatar
Linus Torvalds committed
76
{
77
	void __user *handler;
Linus Torvalds's avatar
Linus Torvalds committed
78

79 80 81
	handler = sig_handler(t, sig);

	if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
82
	    handler == SIG_DFL && !(force && sig_kernel_only(sig)))
83 84 85 86 87
		return 1;

	return sig_handler_ignored(handler, sig);
}

88
static int sig_ignored(struct task_struct *t, int sig, bool force)
89
{
Linus Torvalds's avatar
Linus Torvalds committed
90 91 92 93 94
	/*
	 * Blocked signals are never ignored, since the
	 * signal handler may change by the time it is
	 * unblocked.
	 */
95
	if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
Linus Torvalds's avatar
Linus Torvalds committed
96 97
		return 0;

98
	/*
99 100 101
	 * Tracers may want to know about even ignored signal unless it
	 * is SIGKILL which can't be reported anyway but can be ignored
	 * by SIGNAL_UNKILLABLE task.
102
	 */
103 104 105 106
	if (t->ptrace && sig != SIGKILL)
		return 0;

	return sig_task_ignored(t, sig, force);
Linus Torvalds's avatar
Linus Torvalds committed
107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140
}

/*
 * Re-calculate pending state from the set of locally pending
 * signals, globally pending signals, and blocked signals.
 */
static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
{
	unsigned long ready;
	long i;

	switch (_NSIG_WORDS) {
	default:
		for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
			ready |= signal->sig[i] &~ blocked->sig[i];
		break;

	case 4: ready  = signal->sig[3] &~ blocked->sig[3];
		ready |= signal->sig[2] &~ blocked->sig[2];
		ready |= signal->sig[1] &~ blocked->sig[1];
		ready |= signal->sig[0] &~ blocked->sig[0];
		break;

	case 2: ready  = signal->sig[1] &~ blocked->sig[1];
		ready |= signal->sig[0] &~ blocked->sig[0];
		break;

	case 1: ready  = signal->sig[0] &~ blocked->sig[0];
	}
	return ready !=	0;
}

#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))

Roland McGrath's avatar
Roland McGrath committed
141
static int recalc_sigpending_tsk(struct task_struct *t)
Linus Torvalds's avatar
Linus Torvalds committed
142
{
143
	if ((t->jobctl & JOBCTL_PENDING_MASK) ||
Linus Torvalds's avatar
Linus Torvalds committed
144
	    PENDING(&t->pending, &t->blocked) ||
Roland McGrath's avatar
Roland McGrath committed
145
	    PENDING(&t->signal->shared_pending, &t->blocked)) {
Linus Torvalds's avatar
Linus Torvalds committed
146
		set_tsk_thread_flag(t, TIF_SIGPENDING);
Roland McGrath's avatar
Roland McGrath committed
147 148
		return 1;
	}
149 150 151 152 153
	/*
	 * We must never clear the flag in another thread, or in current
	 * when it's possible the current syscall is returning -ERESTART*.
	 * So we don't clear it here, and only callers who know they should do.
	 */
Roland McGrath's avatar
Roland McGrath committed
154 155 156 157 158 159 160 161 162 163 164
	return 0;
}

/*
 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
 * This is superfluous when called on current, the wakeup is a harmless no-op.
 */
void recalc_sigpending_and_wake(struct task_struct *t)
{
	if (recalc_sigpending_tsk(t))
		signal_wake_up(t, 0);
Linus Torvalds's avatar
Linus Torvalds committed
165 166 167 168
}

void recalc_sigpending(void)
{
169 170
	if (!recalc_sigpending_tsk(current) && !freezing(current) &&
	    !klp_patch_pending(current))
171 172
		clear_thread_flag(TIF_SIGPENDING);

Linus Torvalds's avatar
Linus Torvalds committed
173 174 175 176
}

/* Given the mask, find the first available signal that should be serviced. */

177 178
#define SYNCHRONOUS_MASK \
	(sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
179
	 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
180

181
int next_signal(struct sigpending *pending, sigset_t *mask)
Linus Torvalds's avatar
Linus Torvalds committed
182 183 184
{
	unsigned long i, *s, *m, x;
	int sig = 0;
185

Linus Torvalds's avatar
Linus Torvalds committed
186 187
	s = pending->signal.sig;
	m = mask->sig;
188 189 190 191 192 193 194 195 196 197 198 199 200

	/*
	 * Handle the first word specially: it contains the
	 * synchronous signals that need to be dequeued first.
	 */
	x = *s &~ *m;
	if (x) {
		if (x & SYNCHRONOUS_MASK)
			x &= SYNCHRONOUS_MASK;
		sig = ffz(~x) + 1;
		return sig;
	}

Linus Torvalds's avatar
Linus Torvalds committed
201 202
	switch (_NSIG_WORDS) {
	default:
203 204 205 206 207 208 209
		for (i = 1; i < _NSIG_WORDS; ++i) {
			x = *++s &~ *++m;
			if (!x)
				continue;
			sig = ffz(~x) + i*_NSIG_BPW + 1;
			break;
		}
Linus Torvalds's avatar
Linus Torvalds committed
210 211
		break;

212 213 214
	case 2:
		x = s[1] &~ m[1];
		if (!x)
Linus Torvalds's avatar
Linus Torvalds committed
215
			break;
216
		sig = ffz(~x) + _NSIG_BPW + 1;
Linus Torvalds's avatar
Linus Torvalds committed
217 218
		break;

219 220
	case 1:
		/* Nothing to do */
Linus Torvalds's avatar
Linus Torvalds committed
221 222
		break;
	}
223

Linus Torvalds's avatar
Linus Torvalds committed
224 225 226
	return sig;
}

227 228 229 230 231 232 233 234 235 236
static inline void print_dropped_signal(int sig)
{
	static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);

	if (!print_fatal_signals)
		return;

	if (!__ratelimit(&ratelimit_state))
		return;

237
	pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
238 239 240
				current->comm, current->pid, sig);
}

241
/**
242
 * task_set_jobctl_pending - set jobctl pending bits
243
 * @task: target task
244
 * @mask: pending bits to set
245
 *
246 247 248 249 250 251 252 253 254 255 256 257
 * Clear @mask from @task->jobctl.  @mask must be subset of
 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
 * %JOBCTL_TRAPPING.  If stop signo is being set, the existing signo is
 * cleared.  If @task is already being killed or exiting, this function
 * becomes noop.
 *
 * CONTEXT:
 * Must be called with @task->sighand->siglock held.
 *
 * RETURNS:
 * %true if @mask is set, %false if made noop because @task was dying.
 */
258
bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
259 260 261 262 263 264 265 266 267 268 269 270 271 272 273
{
	BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
			JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
	BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));

	if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
		return false;

	if (mask & JOBCTL_STOP_SIGMASK)
		task->jobctl &= ~JOBCTL_STOP_SIGMASK;

	task->jobctl |= mask;
	return true;
}

274
/**
275
 * task_clear_jobctl_trapping - clear jobctl trapping bit
276 277
 * @task: target task
 *
278 279 280 281
 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
 * Clear it and wake up the ptracer.  Note that we don't need any further
 * locking.  @task->siglock guarantees that @task->parent points to the
 * ptracer.
282 283 284 285
 *
 * CONTEXT:
 * Must be called with @task->sighand->siglock held.
 */
286
void task_clear_jobctl_trapping(struct task_struct *task)
287
{
288 289
	if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
		task->jobctl &= ~JOBCTL_TRAPPING;
290
		smp_mb();	/* advised by wake_up_bit() */
291
		wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
292 293 294
	}
}

295
/**
296
 * task_clear_jobctl_pending - clear jobctl pending bits
297
 * @task: target task
298
 * @mask: pending bits to clear
299
 *
300 301 302
 * Clear @mask from @task->jobctl.  @mask must be subset of
 * %JOBCTL_PENDING_MASK.  If %JOBCTL_STOP_PENDING is being cleared, other
 * STOP bits are cleared together.
303
 *
304 305
 * If clearing of @mask leaves no stop or trap pending, this function calls
 * task_clear_jobctl_trapping().
306 307 308 309
 *
 * CONTEXT:
 * Must be called with @task->sighand->siglock held.
 */
310
void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
311
{
312 313 314 315 316 317
	BUG_ON(mask & ~JOBCTL_PENDING_MASK);

	if (mask & JOBCTL_STOP_PENDING)
		mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;

	task->jobctl &= ~mask;
318 319 320

	if (!(task->jobctl & JOBCTL_PENDING_MASK))
		task_clear_jobctl_trapping(task);
321 322 323 324 325 326
}

/**
 * task_participate_group_stop - participate in a group stop
 * @task: task participating in a group stop
 *
327
 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
328
 * Group stop states are cleared and the group stop count is consumed if
329
 * %JOBCTL_STOP_CONSUME was set.  If the consumption completes the group
330
 * stop, the appropriate %SIGNAL_* flags are set.
331 332 333
 *
 * CONTEXT:
 * Must be called with @task->sighand->siglock held.
334 335 336 337
 *
 * RETURNS:
 * %true if group stop completion should be notified to the parent, %false
 * otherwise.
338 339 340 341
 */
static bool task_participate_group_stop(struct task_struct *task)
{
	struct signal_struct *sig = task->signal;
342
	bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
343

344
	WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
345

346
	task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
347 348 349 350 351 352 353

	if (!consume)
		return false;

	if (!WARN_ON_ONCE(sig->group_stop_count == 0))
		sig->group_stop_count--;

354 355 356 357 358
	/*
	 * Tell the caller to notify completion iff we are entering into a
	 * fresh group stop.  Read comment in do_signal_stop() for details.
	 */
	if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
359
		signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
360 361 362 363 364
		return true;
	}
	return false;
}

365 366 367
/*
 * allocate a new signal queue record
 * - this may be called without locks if and only if t == current, otherwise an
368
 *   appropriate lock must be held to stop the target task from exiting
369
 */
370 371
static struct sigqueue *
__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
Linus Torvalds's avatar
Linus Torvalds committed
372 373
{
	struct sigqueue *q = NULL;
374
	struct user_struct *user;
Linus Torvalds's avatar
Linus Torvalds committed
375

376
	/*
377 378
	 * Protect access to @t credentials. This can go away when all
	 * callers hold rcu read lock.
379
	 */
380
	rcu_read_lock();
381
	user = get_uid(__task_cred(t)->user);
382
	atomic_inc(&user->sigpending);
383
	rcu_read_unlock();
384

Linus Torvalds's avatar
Linus Torvalds committed
385
	if (override_rlimit ||
386
	    atomic_read(&user->sigpending) <=
387
			task_rlimit(t, RLIMIT_SIGPENDING)) {
Linus Torvalds's avatar
Linus Torvalds committed
388
		q = kmem_cache_alloc(sigqueue_cachep, flags);
389 390 391 392
	} else {
		print_dropped_signal(sig);
	}

Linus Torvalds's avatar
Linus Torvalds committed
393
	if (unlikely(q == NULL)) {
394
		atomic_dec(&user->sigpending);
395
		free_uid(user);
Linus Torvalds's avatar
Linus Torvalds committed
396 397 398
	} else {
		INIT_LIST_HEAD(&q->list);
		q->flags = 0;
399
		q->user = user;
Linus Torvalds's avatar
Linus Torvalds committed
400
	}
401 402

	return q;
Linus Torvalds's avatar
Linus Torvalds committed
403 404
}

405
static void __sigqueue_free(struct sigqueue *q)
Linus Torvalds's avatar
Linus Torvalds committed
406 407 408 409 410 411 412 413
{
	if (q->flags & SIGQUEUE_PREALLOC)
		return;
	atomic_dec(&q->user->sigpending);
	free_uid(q->user);
	kmem_cache_free(sigqueue_cachep, q);
}

414
void flush_sigqueue(struct sigpending *queue)
Linus Torvalds's avatar
Linus Torvalds committed
415 416 417 418 419 420 421 422 423 424 425 426
{
	struct sigqueue *q;

	sigemptyset(&queue->signal);
	while (!list_empty(&queue->list)) {
		q = list_entry(queue->list.next, struct sigqueue , list);
		list_del_init(&q->list);
		__sigqueue_free(q);
	}
}

/*
427
 * Flush all pending signals for this kthread.
Linus Torvalds's avatar
Linus Torvalds committed
428
 */
429
void flush_signals(struct task_struct *t)
Linus Torvalds's avatar
Linus Torvalds committed
430 431 432 433
{
	unsigned long flags;

	spin_lock_irqsave(&t->sighand->siglock, flags);
434 435 436
	clear_tsk_thread_flag(t, TIF_SIGPENDING);
	flush_sigqueue(&t->pending);
	flush_sigqueue(&t->signal->shared_pending);
Linus Torvalds's avatar
Linus Torvalds committed
437 438 439
	spin_unlock_irqrestore(&t->sighand->siglock, flags);
}

440
#ifdef CONFIG_POSIX_TIMERS
441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473
static void __flush_itimer_signals(struct sigpending *pending)
{
	sigset_t signal, retain;
	struct sigqueue *q, *n;

	signal = pending->signal;
	sigemptyset(&retain);

	list_for_each_entry_safe(q, n, &pending->list, list) {
		int sig = q->info.si_signo;

		if (likely(q->info.si_code != SI_TIMER)) {
			sigaddset(&retain, sig);
		} else {
			sigdelset(&signal, sig);
			list_del_init(&q->list);
			__sigqueue_free(q);
		}
	}

	sigorsets(&pending->signal, &signal, &retain);
}

void flush_itimer_signals(void)
{
	struct task_struct *tsk = current;
	unsigned long flags;

	spin_lock_irqsave(&tsk->sighand->siglock, flags);
	__flush_itimer_signals(&tsk->pending);
	__flush_itimer_signals(&tsk->signal->shared_pending);
	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
}
474
#endif
475

476 477 478 479 480 481 482 483 484 485
void ignore_signals(struct task_struct *t)
{
	int i;

	for (i = 0; i < _NSIG; ++i)
		t->sighand->action[i].sa.sa_handler = SIG_IGN;

	flush_signals(t);
}

Linus Torvalds's avatar
Linus Torvalds committed
486 487 488 489 490 491 492 493 494 495 496 497 498
/*
 * Flush all handlers for a task.
 */

void
flush_signal_handlers(struct task_struct *t, int force_default)
{
	int i;
	struct k_sigaction *ka = &t->sighand->action[0];
	for (i = _NSIG ; i != 0 ; i--) {
		if (force_default || ka->sa.sa_handler != SIG_IGN)
			ka->sa.sa_handler = SIG_DFL;
		ka->sa.sa_flags = 0;
499
#ifdef __ARCH_HAS_SA_RESTORER
500 501
		ka->sa.sa_restorer = NULL;
#endif
Linus Torvalds's avatar
Linus Torvalds committed
502 503 504 505 506
		sigemptyset(&ka->sa.sa_mask);
		ka++;
	}
}

507 508
int unhandled_signal(struct task_struct *tsk, int sig)
{
509
	void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
510
	if (is_global_init(tsk))
511
		return 1;
512
	if (handler != SIG_IGN && handler != SIG_DFL)
513
		return 0;
Tejun Heo's avatar
Tejun Heo committed
514 515
	/* if ptraced, let the tracer determine */
	return !tsk->ptrace;
516 517
}

518 519
static void collect_signal(int sig, struct sigpending *list, siginfo_t *info,
			   bool *resched_timer)
Linus Torvalds's avatar
Linus Torvalds committed
520 521 522 523 524 525 526 527 528
{
	struct sigqueue *q, *first = NULL;

	/*
	 * Collect the siginfo appropriate to this signal.  Check if
	 * there is another siginfo for the same signal.
	*/
	list_for_each_entry(q, &list->list, list) {
		if (q->info.si_signo == sig) {
529 530
			if (first)
				goto still_pending;
Linus Torvalds's avatar
Linus Torvalds committed
531 532 533
			first = q;
		}
	}
534 535 536

	sigdelset(&list->signal, sig);

Linus Torvalds's avatar
Linus Torvalds committed
537
	if (first) {
538
still_pending:
Linus Torvalds's avatar
Linus Torvalds committed
539 540
		list_del_init(&first->list);
		copy_siginfo(info, &first->info);
541 542 543 544 545 546

		*resched_timer =
			(first->flags & SIGQUEUE_PREALLOC) &&
			(info->si_code == SI_TIMER) &&
			(info->si_sys_private);

Linus Torvalds's avatar
Linus Torvalds committed
547 548
		__sigqueue_free(first);
	} else {
549 550 551 552
		/*
		 * Ok, it wasn't in the queue.  This must be
		 * a fast-pathed signal or we must have been
		 * out of queue space.  So zero out the info.
Linus Torvalds's avatar
Linus Torvalds committed
553
		 */
554
		clear_siginfo(info);
Linus Torvalds's avatar
Linus Torvalds committed
555 556
		info->si_signo = sig;
		info->si_errno = 0;
557
		info->si_code = SI_USER;
Linus Torvalds's avatar
Linus Torvalds committed
558 559 560 561 562 563
		info->si_pid = 0;
		info->si_uid = 0;
	}
}

static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
564
			siginfo_t *info, bool *resched_timer)
Linus Torvalds's avatar
Linus Torvalds committed
565
{
566
	int sig = next_signal(pending, mask);
Linus Torvalds's avatar
Linus Torvalds committed
567

568
	if (sig)
569
		collect_signal(sig, pending, info, resched_timer);
Linus Torvalds's avatar
Linus Torvalds committed
570 571 572 573
	return sig;
}

/*
574
 * Dequeue a signal and return the element to the caller, which is
Linus Torvalds's avatar
Linus Torvalds committed
575 576 577 578 579 580
 * expected to free it.
 *
 * All callers have to hold the siglock.
 */
int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
{
581
	bool resched_timer = false;
582
	int signr;
583 584 585 586

	/* We only dequeue private signals from ourselves, we don't let
	 * signalfd steal them
	 */
587
	signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
588
	if (!signr) {
Linus Torvalds's avatar
Linus Torvalds committed
589
		signr = __dequeue_signal(&tsk->signal->shared_pending,
590
					 mask, info, &resched_timer);
591
#ifdef CONFIG_POSIX_TIMERS
592 593 594 595 596 597
		/*
		 * itimer signal ?
		 *
		 * itimers are process shared and we restart periodic
		 * itimers in the signal delivery path to prevent DoS
		 * attacks in the high resolution timer case. This is
598
		 * compliant with the old way of self-restarting
599 600 601 602 603 604 605 606 607 608
		 * itimers, as the SIGALRM is a legacy signal and only
		 * queued once. Changing the restart behaviour to
		 * restart the timer in the signal dequeue path is
		 * reducing the timer noise on heavy loaded !highres
		 * systems too.
		 */
		if (unlikely(signr == SIGALRM)) {
			struct hrtimer *tmr = &tsk->signal->real_timer;

			if (!hrtimer_is_queued(tmr) &&
609
			    tsk->signal->it_real_incr != 0) {
610 611 612 613 614
				hrtimer_forward(tmr, tmr->base->get_time(),
						tsk->signal->it_real_incr);
				hrtimer_restart(tmr);
			}
		}
615
#endif
616
	}
617

Davide Libenzi's avatar
Davide Libenzi committed
618
	recalc_sigpending();
619 620 621 622
	if (!signr)
		return 0;

	if (unlikely(sig_kernel_stop(signr))) {
623 624 625 626 627 628 629 630 631 632 633 634
		/*
		 * Set a marker that we have dequeued a stop signal.  Our
		 * caller might release the siglock and then the pending
		 * stop signal it is about to process is no longer in the
		 * pending bitmasks, but must still be cleared by a SIGCONT
		 * (and overruled by a SIGKILL).  So those cases clear this
		 * shared flag after we've set it.  Note that this flag may
		 * remain set after the signal we return is ignored or
		 * handled.  That doesn't matter because its only purpose
		 * is to alert stop-signal processing code when another
		 * processor has come along and cleared the flag.
		 */
635
		current->jobctl |= JOBCTL_STOP_DEQUEUED;
636
	}
637
#ifdef CONFIG_POSIX_TIMERS
638
	if (resched_timer) {
Linus Torvalds's avatar
Linus Torvalds committed
639 640 641 642 643 644 645
		/*
		 * Release the siglock to ensure proper locking order
		 * of timer locks outside of siglocks.  Note, we leave
		 * irqs disabled here, since the posix-timers code is
		 * about to disable them again anyway.
		 */
		spin_unlock(&tsk->sighand->siglock);
646
		posixtimer_rearm(info);
Linus Torvalds's avatar
Linus Torvalds committed
647
		spin_lock(&tsk->sighand->siglock);
648 649 650

		/* Don't expose the si_sys_private value to userspace */
		info->si_sys_private = 0;
Linus Torvalds's avatar
Linus Torvalds committed
651
	}
652
#endif
Linus Torvalds's avatar
Linus Torvalds committed
653 654 655 656 657 658 659 660 661 662 663 664 665 666
	return signr;
}

/*
 * Tell a process that it has a new active signal..
 *
 * NOTE! we rely on the previous spin_lock to
 * lock interrupts for us! We can only be called with
 * "siglock" held, and the local interrupt must
 * have been disabled when that got acquired!
 *
 * No need to set need_resched since signal event passing
 * goes through ->blocked
 */
667
void signal_wake_up_state(struct task_struct *t, unsigned int state)
Linus Torvalds's avatar
Linus Torvalds committed
668 669 670
{
	set_tsk_thread_flag(t, TIF_SIGPENDING);
	/*
671
	 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
Matthew Wilcox's avatar
Matthew Wilcox committed
672
	 * case. We don't check t->state here because there is a race with it
Linus Torvalds's avatar
Linus Torvalds committed
673 674 675 676
	 * executing another processor and just now entering stopped state.
	 * By using wake_up_state, we ensure the process will wake up and
	 * handle its death signal.
	 */
677
	if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
Linus Torvalds's avatar
Linus Torvalds committed
678 679 680
		kick_process(t);
}

681 682 683 684 685 686
/*
 * Remove signals in mask from the pending set and queue.
 * Returns 1 if any signals were found.
 *
 * All callers must be holding the siglock.
 */
687
static int flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
688 689 690 691 692 693 694 695
{
	struct sigqueue *q, *n;
	sigset_t m;

	sigandsets(&m, mask, &s->signal);
	if (sigisemptyset(&m))
		return 0;

696
	sigandnsets(&s->signal, &s->signal, mask);
697 698 699 700 701 702 703 704
	list_for_each_entry_safe(q, n, &s->list, list) {
		if (sigismember(mask, q->info.si_signo)) {
			list_del_init(&q->list);
			__sigqueue_free(q);
		}
	}
	return 1;
}
Linus Torvalds's avatar
Linus Torvalds committed
705

706 707 708 709 710 711 712 713 714 715 716
static inline int is_si_special(const struct siginfo *info)
{
	return info <= SEND_SIG_FORCED;
}

static inline bool si_fromuser(const struct siginfo *info)
{
	return info == SEND_SIG_NOINFO ||
		(!is_si_special(info) && SI_FROMUSER(info));
}

717 718 719 720 721 722 723 724
/*
 * called with RCU read lock from check_kill_permission()
 */
static int kill_ok_by_cred(struct task_struct *t)
{
	const struct cred *cred = current_cred();
	const struct cred *tcred = __task_cred(t);

725 726 727 728
	if (uid_eq(cred->euid, tcred->suid) ||
	    uid_eq(cred->euid, tcred->uid)  ||
	    uid_eq(cred->uid,  tcred->suid) ||
	    uid_eq(cred->uid,  tcred->uid))
729 730
		return 1;

731
	if (ns_capable(tcred->user_ns, CAP_KILL))
732 733 734 735 736
		return 1;

	return 0;
}

Linus Torvalds's avatar
Linus Torvalds committed
737 738
/*
 * Bad permissions for sending the signal
739
 * - the caller must hold the RCU read lock
Linus Torvalds's avatar
Linus Torvalds committed
740 741 742 743
 */
static int check_kill_permission(int sig, struct siginfo *info,
				 struct task_struct *t)
{
744
	struct pid *sid;
745 746
	int error;

747
	if (!valid_signal(sig))
748 749
		return -EINVAL;

750
	if (!si_fromuser(info))
751
		return 0;
752

753 754
	error = audit_signal_info(sig, t); /* Let audit system see the signal */
	if (error)
Linus Torvalds's avatar
Linus Torvalds committed
755
		return error;
756

757
	if (!same_thread_group(current, t) &&
758
	    !kill_ok_by_cred(t)) {
759 760 761 762 763 764 765 766 767 768 769 770 771
		switch (sig) {
		case SIGCONT:
			sid = task_session(t);
			/*
			 * We don't return the error if sid == NULL. The
			 * task was unhashed, the caller must notice this.
			 */
			if (!sid || sid == task_session(current))
				break;
		default:
			return -EPERM;
		}
	}
772

773
	return security_task_kill(t, info, sig, NULL);
Linus Torvalds's avatar
Linus Torvalds committed
774 775
}

776 777 778 779 780 781 782 783
/**
 * ptrace_trap_notify - schedule trap to notify ptracer
 * @t: tracee wanting to notify tracer
 *
 * This function schedules sticky ptrace trap which is cleared on the next
 * TRAP_STOP to notify ptracer of an event.  @t must have been seized by
 * ptracer.
 *
Tejun Heo's avatar
Tejun Heo committed
784 785 786 787 788
 * If @t is running, STOP trap will be taken.  If trapped for STOP and
 * ptracer is listening for events, tracee is woken up so that it can
 * re-trap for the new event.  If trapped otherwise, STOP trap will be
 * eventually taken without returning to userland after the existing traps
 * are finished by PTRACE_CONT.
789 790 791 792 793 794 795 796 797 798
 *
 * CONTEXT:
 * Must be called with @task->sighand->siglock held.
 */
static void ptrace_trap_notify(struct task_struct *t)
{
	WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
	assert_spin_locked(&t->sighand->siglock);

	task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
799
	ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
800 801
}

Linus Torvalds's avatar
Linus Torvalds committed
802
/*
803 804
 * Handle magic process-wide effects of stop/continue signals. Unlike
 * the signal actions, these happen immediately at signal-generation
Linus Torvalds's avatar
Linus Torvalds committed
805 806
 * time regardless of blocking, ignoring, or handling.  This does the
 * actual continuing for SIGCONT, but not the actual stopping for stop
807 808 809 810
 * signals. The process stop is done as a signal action for SIG_DFL.
 *
 * Returns true if the signal should be actually delivered, otherwise
 * it should be dropped.
Linus Torvalds's avatar
Linus Torvalds committed
811
 */
812
static bool prepare_signal(int sig, struct task_struct *p, bool force)
Linus Torvalds's avatar
Linus Torvalds committed
813
{
814
	struct signal_struct *signal = p->signal;
Linus Torvalds's avatar
Linus Torvalds committed
815
	struct task_struct *t;
816
	sigset_t flush;
Linus Torvalds's avatar
Linus Torvalds committed
817

818
	if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
819
		if (!(signal->flags & SIGNAL_GROUP_EXIT))
820
			return sig == SIGKILL;
Linus Torvalds's avatar
Linus Torvalds committed
821
		/*
822
		 * The process is in the middle of dying, nothing to do.
Linus Torvalds's avatar
Linus Torvalds committed
823
		 */
824
	} else if (sig_kernel_stop(sig)) {
Linus Torvalds's avatar
Linus Torvalds committed
825 826 827
		/*
		 * This is a stop signal.  Remove SIGCONT from all queues.
		 */
828
		siginitset(&flush, sigmask(SIGCONT));
829
		flush_sigqueue_mask(&flush, &signal->shared_pending);
830
		for_each_thread(p, t)
831
			flush_sigqueue_mask(&flush, &t->pending);
Linus Torvalds's avatar
Linus Torvalds committed
832
	} else if (sig == SIGCONT) {
833
		unsigned int why;
Linus Torvalds's avatar
Linus Torvalds committed
834
		/*
835
		 * Remove all stop signals from all queues, wake all threads.
Linus Torvalds's avatar
Linus Torvalds committed
836
		 */
837
		siginitset(&flush, SIG_KERNEL_STOP_MASK);
838
		flush_sigqueue_mask(&flush, &signal->shared_pending);
839
		for_each_thread(p, t) {
840
			flush_sigqueue_mask(&flush, &t->pending);
841
			task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
842 843 844 845
			if (likely(!(t->ptrace & PT_SEIZED)))
				wake_up_state(t, __TASK_STOPPED);
			else
				ptrace_trap_notify(t);
846
		}
Linus Torvalds's avatar
Linus Torvalds committed
847

848 849 850 851 852 853 854 855 856
		/*
		 * Notify the parent with CLD_CONTINUED if we were stopped.
		 *
		 * If we were in the middle of a group stop, we pretend it
		 * was already finished, and then continued. Since SIGCHLD
		 * doesn't queue we report only CLD_STOPPED, as if the next
		 * CLD_CONTINUED was dropped.
		 */
		why = 0;
857
		if (signal->flags & SIGNAL_STOP_STOPPED)
858
			why |= SIGNAL_CLD_CONTINUED;
859
		else if (signal->group_stop_count)
860 861 862
			why |= SIGNAL_CLD_STOPPED;

		if (why) {
863
			/*
864
			 * The first thread which returns from do_signal_stop()
865 866 867
			 * will take ->siglock, notice SIGNAL_CLD_MASK, and
			 * notify its parent. See get_signal_to_deliver().
			 */
868
			signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
869 870
			signal->group_stop_count = 0;
			signal->group_exit_code = 0;
Linus Torvalds's avatar
Linus Torvalds committed
871 872
		}
	}
873

874
	return !sig_ignored(p, sig, force);
Linus Torvalds's avatar
Linus Torvalds committed
875 876
}

877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897
/*
 * Test if P wants to take SIG.  After we've checked all threads with this,
 * it's equivalent to finding no threads not blocking SIG.  Any threads not
 * blocking SIG were ruled out because they are not running and already
 * have pending signals.  Such threads will dequeue from the shared queue
 * as soon as they're available, so putting the signal on the shared queue
 * will be equivalent to sending it to one such thread.
 */
static inline int wants_signal(int sig, struct task_struct *p)
{
	if (sigismember(&p->blocked, sig))
		return 0;
	if (p->flags & PF_EXITING)
		return 0;
	if (sig == SIGKILL)
		return 1;
	if (task_is_stopped_or_traced(p))
		return 0;
	return task_curr(p) || !signal_pending(p);
}

898
static void complete_signal(int sig, struct task_struct *p, int group)
899 900 901 902 903 904 905 906 907 908 909 910
{
	struct signal_struct *signal = p->signal;
	struct task_struct *t;

	/*
	 * Now find a thread we can wake up to take the signal off the queue.
	 *
	 * If the main thread wants the signal, it gets first crack.
	 * Probably the least surprising to the average bear.
	 */
	if (wants_signal(sig, p))
		t = p;
911
	else if (!group || thread_group_empty(p))
912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938
		/*
		 * There is just one thread and it does not need to be woken.
		 * It will dequeue unblocked signals before it runs again.
		 */
		return;
	else {
		/*
		 * Otherwise try to find a suitable thread.
		 */
		t = signal->curr_target;
		while (!wants_signal(sig, t)) {
			t = next_thread(t);
			if (t == signal->curr_target)
				/*
				 * No thread needs to be woken.
				 * Any eligible threads will see
				 * the signal in the queue soon.
				 */
				return;
		}
		signal->curr_target = t;
	}

	/*
	 * Found a killable thread.  If the signal will be fatal,
	 * then start taking the whole group down immediately.
	 */
939
	if (sig_fatal(p, sig) &&
940
	    !(signal->flags & SIGNAL_GROUP_EXIT) &&
941
	    !sigismember(&t->real_blocked, sig) &&
942
	    (sig == SIGKILL || !p->ptrace)) {
943 944 945 946 947 948 949 950 951 952 953 954 955 956 957
		/*
		 * This signal will be fatal to the whole group.
		 */
		if (!sig_kernel_coredump(sig)) {
			/*
			 * Start a group exit and wake everybody up.
			 * This way we don't have other threads
			 * running and doing things after a slower
			 * thread has the fatal signal pending.
			 */
			signal->flags = SIGNAL_GROUP_EXIT;
			signal->group_exit_code = sig;
			signal->group_stop_count = 0;
			t = p;
			do {
958
				task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
959 960 961 962 963 964 965 966 967 968 969 970 971 972 973
				sigaddset(&t->pending.signal, SIGKILL);
				signal_wake_up(t, 1);
			} while_each_thread(p, t);
			return;
		}
	}

	/*
	 * The signal is already in the shared-pending queue.
	 * Tell the chosen thread to wake up and dequeue it.
	 */
	signal_wake_up(t, sig == SIGKILL);
	return;
}

974 975 976 977 978
static inline int legacy_queue(struct sigpending *signals, int sig)
{
	return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
}

979 980 981 982 983 984 985 986 987
#ifdef CONFIG_USER_NS
static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
{
	if (current_user_ns() == task_cred_xxx(t, user_ns))
		return;

	if (SI_FROMKERNEL(info))
		return;

988 989 990 991
	rcu_read_lock();
	info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns),
					make_kuid(current_user_ns(), info->si_uid));
	rcu_read_unlock();
992 993 994 995 996 997 998 999
}
#else
static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
{
	return;
}
#endif

1000 1001
static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
			int group, int from_ancestor_ns)
Linus Torvalds's avatar
Linus Torvalds committed
1002
{
1003
	struct sigpending *pending;
1004
	struct sigqueue *q;
1005
	int override_rlimit;
1006
	int ret = 0, result;
1007

1008
	assert_spin_locked(&t->sighand->siglock);
1009

1010
	result = TRACE_SIGNAL_IGNORED;
1011 1012
	if (!prepare_signal(sig, t,
			from_ancestor_ns || (info == SEND_SIG_FORCED)))
1013
		goto ret;
1014 1015

	pending = group ? &t->signal->shared_pending : &t->pending;
1016 1017 1018 1019 1020
	/*
	 * Short-circuit ignored signals and support queuing
	 * exactly one non-rt signal, so that we can get more
	 * detailed information about the cause of the signal.
	 */
1021
	result = TRACE_SIGNAL_ALREADY_PENDING;
1022
	if (legacy_queue(pending, sig))
1023 1024 1025
		goto ret;

	result = TRACE_SIGNAL_DELIVERED;
Linus Torvalds's avatar
Linus Torvalds committed
1026 1027 1028 1029
	/*
	 * fast-pathed signals for kernel-internal things like SIGSTOP
	 * or SIGKILL.
	 */
1030
	if (info == SEND_SIG_FORCED)
Linus Torvalds's avatar
Linus Torvalds committed
1031 1032
		goto out_set;

1033 1034 1035 1036 1037 1038 1039 1040 1041
	/*
	 * Real-time signals must be queued if sent by sigqueue, or
	 * some other real-time mechanism.  It is implementation
	 * defined whether kill() does so.  We attempt to do so, on
	 * the principle of least surprise, but since kill is not
	 * allowed to fail with EAGAIN when low on memory we just
	 * make sure at least one signal gets delivered and don't
	 * pass on the info struct.
	 */
1042 1043 1044 1045 1046
	if (sig < SIGRTMIN)
		override_rlimit = (is_si_special(info) || info->si_code >= 0);
	else
		override_rlimit = 0;

1047
	q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit);
Linus Torvalds's avatar
Linus Torvalds committed
1048
	if (q) {
1049
		list_add_tail(&q->list, &pending->list);
Linus Torvalds's avatar
Linus Torvalds committed
1050
		switch ((unsigned long) info) {
1051
		case (unsigned long) SEND_SIG_NOINFO:
1052
			clear_siginfo(&q->info);
Linus Torvalds's avatar
Linus Torvalds committed
1053 1054 1055
			q->info.si_signo = sig;
			q->info.si_errno = 0;
			q->info.si_code = SI_USER;
1056
			q->info.si_pid = task_tgid_nr_ns(current,
1057
							task_active_pid_ns(t));
1058
			q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
Linus Torvalds's avatar
Linus Torvalds committed
1059
			break;
1060
		case (unsigned long) SEND_SIG_PRIV:
1061
			clear_siginfo(&q->info);
Linus Torvalds's avatar
Linus Torvalds committed
1062 1063 1064 1065 1066 1067 1068 1069
			q->info.si_signo = sig;
			q->info.si_errno = 0;
			q->info.si_code = SI_KERNEL;
			q->info.si_pid = 0;
			q->info.si_uid = 0;
			break;
		default:
			copy_siginfo(&q->info, info);
1070 1071
			if (from_ancestor_ns)
				q->info.si_pid = 0;
Linus Torvalds's avatar
Linus Torvalds committed
1072 1073
			break;
		}
1074 1075 1076

		userns_fixup_signal_uid(&q->info, t);

1077
	} else if (!is_si_special(info)) {
1078 1079 1080 1081 1082 1083
		if (sig >= SIGRTMIN && info->si_code != SI_USER) {
			/*
			 * Queue overflow, abort.  We may abort if the
			 * signal was rt and sent by user using something
			 * other than kill().
			 */
1084 1085 1086
			result = TRACE_SIGNAL_OVERFLOW_FAIL;
			ret = -EAGAIN;
			goto ret;
1087 1088 1089 1090 1091
		} else {
			/*
			 * This is a silent loss of information.  We still
			 * send the signal, but the *info bits are lost.
			 */
1092
			result = TRACE_SIGNAL_LOSE_INFO;
1093
		}
Linus Torvalds's avatar
Linus Torvalds committed
1094 1095 1096
	}

out_set:
1097
	signalfd_notify(t, sig);
1098
	sigaddset(&pending->signal, sig);
1099
	complete_signal(sig, t, group);
1100 1101 1102
ret:
	trace_signal_generate(sig, info, t, group, result);
	return ret;
Linus Torvalds's avatar
Linus Torvalds committed
1103 1104
}

1105 1106 1107
static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
			int group)
{
1108 1109 1110
	int from_ancestor_ns = 0;

#ifdef CONFIG_PID_NS
1111 1112
	from_ancestor_ns = si_fromuser(info) &&
			   !task_pid_nr_ns(current, task_active_pid_ns(t));
1113 1114 1115
#endif

	return __send_signal(sig, info, t, group, from_ancestor_ns);
1116 1117
}

1118
static void print_fatal_signal(int signr)
Ingo Molnar's avatar
Ingo Molnar committed
1119
{
1120
	struct pt_regs *regs = signal_pt_regs();
1121
	pr_info("potentially unexpected fatal signal %d.\n", signr);
Ingo Molnar's avatar
Ingo Molnar committed
1122

Al Viro's avatar
Al Viro committed
1123
#if defined(__i386__) && !defined(__arch_um__)
1124
	pr_info("code at %08lx: ", regs->ip);
Ingo Molnar's avatar
Ingo Molnar committed
1125 1126 1127 1128 1129
	{
		int i;
		for (i = 0; i < 16; i++) {
			unsigned char insn;

1130 1131
			if (get_user(insn, (unsigned char *)(regs->ip + i)))
				break;
1132
			pr_cont("%02x ", insn);
Ingo Molnar's avatar
Ingo Molnar committed
1133 1134
		}
	}
1135
	pr_cont("\n");
Ingo Molnar's avatar
Ingo Molnar committed
1136
#endif
1137
	preempt_disable();
Ingo Molnar's avatar
Ingo Molnar committed
1138
	show_regs(regs);
1139
	preempt_enable();
Ingo Molnar's avatar
Ingo Molnar committed
1140 1141 1142 1143 1144 1145 1146 1147 1148 1149
}

static int __init setup_print_fatal_signals(char *str)
{
	get_option (&str, &print_fatal_signals);

	return 1;
}

__setup("print-fatal-signals=", setup_print_fatal_signals);
Linus Torvalds's avatar
Linus Torvalds committed
1150

1151 1152 1153 1154 1155 1156
int
__group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
{
	return send_signal(sig, info, p, 1);
}

Linus Torvalds's avatar
Linus Torvalds committed
1157 1158 1159
static int
specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
{
1160
	return send_signal(sig, info, t, 0);
Linus Torvalds's avatar
Linus Torvalds committed
1161 1162
}

1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176
int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
			bool group)
{
	unsigned long flags;
	int ret = -ESRCH;

	if (lock_task_sighand(p, &flags)) {
		ret = send_signal(sig, info, p, group);
		unlock_task_sighand(p, &flags);
	}

	return ret;
}

Linus Torvalds's avatar
Linus Torvalds committed
1177 1178 1179
/*
 * Force a signal that the process can't ignore: if necessary
 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1180 1181 1182 1183 1184
 *
 * Note: If we unblock the signal, we always reset it to SIG_DFL,
 * since we do not want to have a signal handler that was blocked
 * be invoked when user space had explicitly blocked it.
 *
1185 1186
 * We don't want to have recursive SIGSEGV's etc, for example,
 * that is why we also clear SIGNAL_UNKILLABLE.
Linus Torvalds's avatar
Linus Torvalds committed
1187 1188 1189 1190 1191
 */
int
force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
{
	unsigned long int flags;
1192 1193
	int ret, blocked, ignored;
	struct k_sigaction *action;
Linus Torvalds's avatar
Linus Torvalds committed
1194 1195

	spin_lock_irqsave(&t->sighand->siglock, flags);
1196 1197 1198 1199 1200 1201 1202
	action = &t->sighand->action[sig-1];
	ignored = action->sa.sa_handler == SIG_IGN;
	blocked = sigismember(&t->blocked, sig);
	if (blocked || ignored) {
		action->sa.sa_handler = SIG_DFL;
		if (blocked) {
			sigdelset(&t->blocked, sig);
Roland McGrath's avatar
Roland McGrath committed
1203
			recalc_sigpending_and_wake(t);
1204
		}
Linus Torvalds's avatar
Linus Torvalds committed
1205
	}
1206 1207 1208 1209 1210
	/*
	 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
	 * debugging to leave init killable.
	 */
	if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
1211
		t->signal->flags &= ~SIGNAL_UNKILLABLE;
Linus Torvalds's avatar
Linus Torvalds committed
1212 1213 1214 1215 1216 1217 1218 1219 1220
	ret = specific_send_sig_info(sig, info, t);
	spin_unlock_irqrestore(&t->sighand->siglock, flags);

	return ret;
}

/*
 * Nuke all other threads in the group.
 */
1221
int zap_other_threads(struct task_struct *p)
Linus Torvalds's avatar
Linus Torvalds committed
1222
{
1223 1224
	struct task_struct *t = p;
	int count = 0;
Linus Torvalds's avatar
Linus Torvalds committed
1225 1226 1227

	p->signal->group_stop_count = 0;

1228
	while_each_thread(p, t) {
1229
		task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1230 1231 1232
		count++;

		/* Don't bother with already dead threads */
Linus Torvalds's avatar
Linus Torvalds committed
1233 1234 1235 1236 1237
		if (t->exit_state)
			continue;
		sigaddset(&t->pending.signal, SIGKILL);
		signal_wake_up(t, 1);
	}
1238