kthread.c 33.7 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2 3
/* Kernel thread helper functions.
 *   Copyright (C) 2004 IBM Corporation, Rusty Russell.
 *
4
 * Creation is done via kthreadd, so that we get a clean environment
Linus Torvalds's avatar
Linus Torvalds committed
5 6 7
 * even if we're invoked from userspace (think modprobe, hotplug cpu,
 * etc.).
 */
8
#include <uapi/linux/sched/types.h>
Linus Torvalds's avatar
Linus Torvalds committed
9
#include <linux/sched.h>
10
#include <linux/sched/task.h>
Linus Torvalds's avatar
Linus Torvalds committed
11 12 13
#include <linux/kthread.h>
#include <linux/completion.h>
#include <linux/err.h>
14
#include <linux/cpuset.h>
Linus Torvalds's avatar
Linus Torvalds committed
15 16
#include <linux/unistd.h>
#include <linux/file.h>
17
#include <linux/export.h>
18
#include <linux/mutex.h>
19 20
#include <linux/slab.h>
#include <linux/freezer.h>
21
#include <linux/ptrace.h>
22
#include <linux/uaccess.h>
23
#include <trace/events/sched.h>
Linus Torvalds's avatar
Linus Torvalds committed
24

25 26 27
static DEFINE_SPINLOCK(kthread_create_lock);
static LIST_HEAD(kthread_create_list);
struct task_struct *kthreadd_task;
Linus Torvalds's avatar
Linus Torvalds committed
28 29 30

struct kthread_create_info
{
31
	/* Information passed to kthread() from kthreadd. */
Linus Torvalds's avatar
Linus Torvalds committed
32 33
	int (*threadfn)(void *data);
	void *data;
34
	int node;
Linus Torvalds's avatar
Linus Torvalds committed
35

36
	/* Result passed back to kthread_create() from kthreadd. */
Linus Torvalds's avatar
Linus Torvalds committed
37
	struct task_struct *result;
38
	struct completion *done;
39

40
	struct list_head list;
Linus Torvalds's avatar
Linus Torvalds committed
41 42
};

43
struct kthread {
44 45
	unsigned long flags;
	unsigned int cpu;
46
	void *data;
47
	struct completion parked;
48
	struct completion exited;
Shaohua Li's avatar
Shaohua Li committed
49
#ifdef CONFIG_BLK_CGROUP
50 51
	struct cgroup_subsys_state *blkcg_css;
#endif
Linus Torvalds's avatar
Linus Torvalds committed
52 53
};

54 55 56 57 58 59
enum KTHREAD_BITS {
	KTHREAD_IS_PER_CPU = 0,
	KTHREAD_SHOULD_STOP,
	KTHREAD_SHOULD_PARK,
};

60 61 62 63 64 65 66 67 68
static inline void set_kthread_struct(void *kthread)
{
	/*
	 * We abuse ->set_child_tid to avoid the new member and because it
	 * can't be wrongly copied by copy_process(). We also rely on fact
	 * that the caller can't exec, so PF_KTHREAD can't be cleared.
	 */
	current->set_child_tid = (__force void __user *)kthread;
}
69 70 71

static inline struct kthread *to_kthread(struct task_struct *k)
{
72 73
	WARN_ON(!(k->flags & PF_KTHREAD));
	return (__force void *)k->set_child_tid;
74 75
}

76 77
void free_kthread_struct(struct task_struct *k)
{
78 79
	struct kthread *kthread;

80 81 82 83
	/*
	 * Can be NULL if this kthread was created by kernel_thread()
	 * or if kmalloc() in kthread() failed.
	 */
84
	kthread = to_kthread(k);
Shaohua Li's avatar
Shaohua Li committed
85
#ifdef CONFIG_BLK_CGROUP
86 87 88
	WARN_ON_ONCE(kthread && kthread->blkcg_css);
#endif
	kfree(kthread);
89 90
}

91 92 93
/**
 * kthread_should_stop - should this kthread return now?
 *
94
 * When someone calls kthread_stop() on your kthread, it will be woken
95 96 97
 * and this will return true.  You should then return, and your return
 * value will be passed through to kthread_stop().
 */
98
bool kthread_should_stop(void)
Linus Torvalds's avatar
Linus Torvalds committed
99
{
100
	return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
Linus Torvalds's avatar
Linus Torvalds committed
101 102 103
}
EXPORT_SYMBOL(kthread_should_stop);

104 105 106 107 108 109 110 111 112 113 114 115 116 117 118
/**
 * kthread_should_park - should this kthread park now?
 *
 * When someone calls kthread_park() on your kthread, it will be woken
 * and this will return true.  You should then do the necessary
 * cleanup and call kthread_parkme()
 *
 * Similar to kthread_should_stop(), but this keeps the thread alive
 * and in a park position. kthread_unpark() "restarts" the thread and
 * calls the thread function again.
 */
bool kthread_should_park(void)
{
	return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags);
}
119
EXPORT_SYMBOL_GPL(kthread_should_park);
120

121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145
/**
 * kthread_freezable_should_stop - should this freezable kthread return now?
 * @was_frozen: optional out parameter, indicates whether %current was frozen
 *
 * kthread_should_stop() for freezable kthreads, which will enter
 * refrigerator if necessary.  This function is safe from kthread_stop() /
 * freezer deadlock and freezable kthreads should use this function instead
 * of calling try_to_freeze() directly.
 */
bool kthread_freezable_should_stop(bool *was_frozen)
{
	bool frozen = false;

	might_sleep();

	if (unlikely(freezing(current)))
		frozen = __refrigerator(true);

	if (was_frozen)
		*was_frozen = frozen;

	return kthread_should_stop();
}
EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);

146 147 148 149 150 151 152 153 154 155 156 157 158
/**
 * kthread_data - return data value specified on kthread creation
 * @task: kthread task in question
 *
 * Return the data value specified when kthread @task was created.
 * The caller is responsible for ensuring the validity of @task when
 * calling this function.
 */
void *kthread_data(struct task_struct *task)
{
	return to_kthread(task)->data;
}

159
/**
160
 * kthread_probe_data - speculative version of kthread_data()
161 162 163 164 165 166 167
 * @task: possible kthread task in question
 *
 * @task could be a kthread task.  Return the data value specified when it
 * was created if accessible.  If @task isn't a kthread task or its data is
 * inaccessible for any reason, %NULL is returned.  This function requires
 * that @task itself is safe to dereference.
 */
168
void *kthread_probe_data(struct task_struct *task)
169 170 171 172 173 174 175 176
{
	struct kthread *kthread = to_kthread(task);
	void *data = NULL;

	probe_kernel_read(&data, &kthread->data, sizeof(data));
	return data;
}

177 178
static void __kthread_parkme(struct kthread *self)
{
179
	for (;;) {
180 181 182 183 184 185 186 187 188 189
		/*
		 * TASK_PARKED is a special state; we must serialize against
		 * possible pending wakeups to avoid store-store collisions on
		 * task->state.
		 *
		 * Such a collision might possibly result in the task state
		 * changin from TASK_PARKED and us failing the
		 * wait_task_inactive() in kthread_park().
		 */
		set_special_state(TASK_PARKED);
190 191
		if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags))
			break;
192 193

		complete_all(&self->parked);
194 195 196 197 198 199 200 201 202
		schedule();
	}
	__set_current_state(TASK_RUNNING);
}

void kthread_parkme(void)
{
	__kthread_parkme(to_kthread(current));
}
203
EXPORT_SYMBOL_GPL(kthread_parkme);
204

Linus Torvalds's avatar
Linus Torvalds committed
205 206
static int kthread(void *_create)
{
207
	/* Copy data: it's on kthread's stack */
Linus Torvalds's avatar
Linus Torvalds committed
208
	struct kthread_create_info *create = _create;
209 210
	int (*threadfn)(void *data) = create->threadfn;
	void *data = create->data;
211
	struct completion *done;
212
	struct kthread *self;
213
	int ret;
Linus Torvalds's avatar
Linus Torvalds committed
214

215
	self = kzalloc(sizeof(*self), GFP_KERNEL);
216
	set_kthread_struct(self);
Linus Torvalds's avatar
Linus Torvalds committed
217

218 219 220 221 222 223
	/* If user was SIGKILLed, I release the structure. */
	done = xchg(&create->done, NULL);
	if (!done) {
		kfree(create);
		do_exit(-EINTR);
	}
224 225 226 227 228 229 230 231 232 233 234 235

	if (!self) {
		create->result = ERR_PTR(-ENOMEM);
		complete(done);
		do_exit(-ENOMEM);
	}

	self->data = data;
	init_completion(&self->exited);
	init_completion(&self->parked);
	current->vfork_done = &self->exited;

Linus Torvalds's avatar
Linus Torvalds committed
236
	/* OK, tell user we're spawned, wait for stop or wakeup */
237
	__set_current_state(TASK_UNINTERRUPTIBLE);
238
	create->result = current;
239
	complete(done);
Linus Torvalds's avatar
Linus Torvalds committed
240 241
	schedule();

242
	ret = -EINTR;
243
	if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
244
		cgroup_kthread_ready();
245
		__kthread_parkme(self);
246 247
		ret = threadfn(data);
	}
248
	do_exit(ret);
Linus Torvalds's avatar
Linus Torvalds committed
249 250
}

251 252 253 254 255 256 257
/* called from do_fork() to get node information for about to be created task */
int tsk_fork_get_node(struct task_struct *tsk)
{
#ifdef CONFIG_NUMA
	if (tsk == kthreadd_task)
		return tsk->pref_node_fork;
#endif
258
	return NUMA_NO_NODE;
259 260
}

261
static void create_kthread(struct kthread_create_info *create)
Linus Torvalds's avatar
Linus Torvalds committed
262 263 264
{
	int pid;

265 266 267
#ifdef CONFIG_NUMA
	current->pref_node_fork = create->node;
#endif
Linus Torvalds's avatar
Linus Torvalds committed
268 269
	/* We want our own signal handler (we take no signals by default). */
	pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
270
	if (pid < 0) {
271 272 273 274 275 276 277
		/* If user was SIGKILLed, I release the structure. */
		struct completion *done = xchg(&create->done, NULL);

		if (!done) {
			kfree(create);
			return;
		}
Linus Torvalds's avatar
Linus Torvalds committed
278
		create->result = ERR_PTR(pid);
279
		complete(done);
280
	}
Linus Torvalds's avatar
Linus Torvalds committed
281 282
}

283 284
static __printf(4, 0)
struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
285 286 287
						    void *data, int node,
						    const char namefmt[],
						    va_list args)
Linus Torvalds's avatar
Linus Torvalds committed
288
{
289 290 291 292 293 294 295 296 297 298 299
	DECLARE_COMPLETION_ONSTACK(done);
	struct task_struct *task;
	struct kthread_create_info *create = kmalloc(sizeof(*create),
						     GFP_KERNEL);

	if (!create)
		return ERR_PTR(-ENOMEM);
	create->threadfn = threadfn;
	create->data = data;
	create->node = node;
	create->done = &done;
300 301

	spin_lock(&kthread_create_lock);
302
	list_add_tail(&create->list, &kthread_create_list);
303 304
	spin_unlock(&kthread_create_lock);

305
	wake_up_process(kthreadd_task);
306 307 308 309 310 311 312 313 314 315 316 317
	/*
	 * Wait for completion in killable state, for I might be chosen by
	 * the OOM killer while kthreadd is trying to allocate memory for
	 * new kernel thread.
	 */
	if (unlikely(wait_for_completion_killable(&done))) {
		/*
		 * If I was SIGKILLed before kthreadd (or new kernel thread)
		 * calls complete(), leave the cleanup of this structure to
		 * that thread.
		 */
		if (xchg(&create->done, NULL))
318
			return ERR_PTR(-EINTR);
319 320 321 322 323 324 325 326
		/*
		 * kthreadd (or new kernel thread) will call complete()
		 * shortly.
		 */
		wait_for_completion(&done);
	}
	task = create->result;
	if (!IS_ERR(task)) {
327
		static const struct sched_param param = { .sched_priority = 0 };
328
		char name[TASK_COMM_LEN];
329

330 331 332 333 334 335
		/*
		 * task is already visible to other tasks, so updating
		 * COMM must be protected.
		 */
		vsnprintf(name, sizeof(name), namefmt, args);
		set_task_comm(task, name);
336 337 338 339
		/*
		 * root may have changed our (kthreadd's) priority or CPU mask.
		 * The kernel thread should not inherit these properties.
		 */
340 341
		sched_setscheduler_nocheck(task, SCHED_NORMAL, &param);
		set_cpus_allowed_ptr(task, cpu_all_mask);
Linus Torvalds's avatar
Linus Torvalds committed
342
	}
343 344
	kfree(create);
	return task;
Linus Torvalds's avatar
Linus Torvalds committed
345
}
346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383

/**
 * kthread_create_on_node - create a kthread.
 * @threadfn: the function to run until signal_pending(current).
 * @data: data ptr for @threadfn.
 * @node: task and thread structures for the thread are allocated on this node
 * @namefmt: printf-style name for the thread.
 *
 * Description: This helper function creates and names a kernel
 * thread.  The thread will be stopped: use wake_up_process() to start
 * it.  See also kthread_run().  The new thread has SCHED_NORMAL policy and
 * is affine to all CPUs.
 *
 * If thread is going to be bound on a particular cpu, give its node
 * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE.
 * When woken, the thread will run @threadfn() with @data as its
 * argument. @threadfn() can either call do_exit() directly if it is a
 * standalone thread for which no one will call kthread_stop(), or
 * return when 'kthread_should_stop()' is true (which means
 * kthread_stop() has been called).  The return value should be zero
 * or a negative error number; it will be passed to kthread_stop().
 *
 * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR).
 */
struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
					   void *data, int node,
					   const char namefmt[],
					   ...)
{
	struct task_struct *task;
	va_list args;

	va_start(args, namefmt);
	task = __kthread_create_on_node(threadfn, data, node, namefmt, args);
	va_end(args);

	return task;
}
384
EXPORT_SYMBOL(kthread_create_on_node);
Linus Torvalds's avatar
Linus Torvalds committed
385

386
static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, long state)
387
{
388 389
	unsigned long flags;

390 391 392 393
	if (!wait_task_inactive(p, state)) {
		WARN_ON(1);
		return;
	}
394

395
	/* It's safe because the task is inactive. */
396 397
	raw_spin_lock_irqsave(&p->pi_lock, flags);
	do_set_cpus_allowed(p, mask);
398
	p->flags |= PF_NO_SETAFFINITY;
399 400 401 402 403 404 405 406 407 408 409
	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
}

static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
{
	__kthread_bind_mask(p, cpumask_of(cpu), state);
}

void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
{
	__kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
410 411
}

412 413 414 415 416 417 418 419 420 421 422
/**
 * kthread_bind - bind a just-created kthread to a cpu.
 * @p: thread created by kthread_create().
 * @cpu: cpu (might not be online, must be possible) for @k to run on.
 *
 * Description: This function is equivalent to set_cpus_allowed(),
 * except that @cpu doesn't need to be online, and the thread must be
 * stopped (i.e., just returned from kthread_create()).
 */
void kthread_bind(struct task_struct *p, unsigned int cpu)
{
423
	__kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
424 425 426
}
EXPORT_SYMBOL(kthread_bind);

427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443
/**
 * kthread_create_on_cpu - Create a cpu bound kthread
 * @threadfn: the function to run until signal_pending(current).
 * @data: data ptr for @threadfn.
 * @cpu: The cpu on which the thread should be bound,
 * @namefmt: printf-style name for the thread. Format is restricted
 *	     to "name.*%u". Code fills in cpu number.
 *
 * Description: This helper function creates and names a kernel thread
 * The thread will be woken and put into park mode.
 */
struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
					  void *data, unsigned int cpu,
					  const char *namefmt)
{
	struct task_struct *p;

444
	p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
445 446 447
				   cpu);
	if (IS_ERR(p))
		return p;
448 449
	kthread_bind(p, cpu);
	/* CPU hotplug need to bind once again when unparking the thread. */
450 451 452 453 454
	set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags);
	to_kthread(p)->cpu = cpu;
	return p;
}

455 456 457 458 459 460 461 462 463
/**
 * kthread_unpark - unpark a thread created by kthread_create().
 * @k:		thread created by kthread_create().
 *
 * Sets kthread_should_park() for @k to return false, wakes it, and
 * waits for it to return. If the thread is marked percpu then its
 * bound to the cpu again.
 */
void kthread_unpark(struct task_struct *k)
464
{
465 466
	struct kthread *kthread = to_kthread(k);

467
	/*
468 469
	 * Newly created kthread was parked when the CPU was offline.
	 * The binding was lost and we need to set it again.
470
	 */
471 472 473
	if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
		__kthread_bind(k, kthread->cpu, TASK_PARKED);

474
	reinit_completion(&kthread->parked);
475
	clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
476 477 478
	/*
	 * __kthread_parkme() will either see !SHOULD_PARK or get the wakeup.
	 */
479
	wake_up_state(k, TASK_PARKED);
480
}
481
EXPORT_SYMBOL_GPL(kthread_unpark);
482 483 484 485 486 487 488 489 490 491 492 493 494 495 496

/**
 * kthread_park - park a thread created by kthread_create().
 * @k: thread created by kthread_create().
 *
 * Sets kthread_should_park() for @k to return true, wakes it, and
 * waits for it to return. This can also be called after kthread_create()
 * instead of calling wake_up_process(): the thread will park without
 * calling threadfn().
 *
 * Returns 0 if the thread is parked, -ENOSYS if the thread exited.
 * If called by the kthread itself just the park bit is set.
 */
int kthread_park(struct task_struct *k)
{
497 498 499 500 501
	struct kthread *kthread = to_kthread(k);

	if (WARN_ON(k->flags & PF_EXITING))
		return -ENOSYS;

502 503 504
	set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
	if (k != current) {
		wake_up_process(k);
505 506 507 508
		/*
		 * Wait for __kthread_parkme() to complete(), this means we
		 * _will_ have TASK_PARKED and are about to call schedule().
		 */
509
		wait_for_completion(&kthread->parked);
510 511 512 513 514
		/*
		 * Now wait for that schedule() to complete and the task to
		 * get scheduled out.
		 */
		WARN_ON_ONCE(!wait_task_inactive(k, TASK_PARKED));
515
	}
516 517

	return 0;
518
}
519
EXPORT_SYMBOL_GPL(kthread_park);
520

521 522 523 524 525
/**
 * kthread_stop - stop a thread created by kthread_create().
 * @k: thread created by kthread_create().
 *
 * Sets kthread_should_stop() for @k to return true, wakes it, and
526 527 528 529 530 531
 * waits for it to exit. This can also be called after kthread_create()
 * instead of calling wake_up_process(): the thread will exit without
 * calling threadfn().
 *
 * If threadfn() may call do_exit() itself, the caller must ensure
 * task_struct can't go away.
532 533 534 535
 *
 * Returns the result of threadfn(), or %-EINTR if wake_up_process()
 * was never called.
 */
Linus Torvalds's avatar
Linus Torvalds committed
536 537
int kthread_stop(struct task_struct *k)
{
538
	struct kthread *kthread;
Linus Torvalds's avatar
Linus Torvalds committed
539 540
	int ret;

541
	trace_sched_kthread_stop(k);
542 543

	get_task_struct(k);
544 545
	kthread = to_kthread(k);
	set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
546
	kthread_unpark(k);
547 548
	wake_up_process(k);
	wait_for_completion(&kthread->exited);
549
	ret = k->exit_code;
Linus Torvalds's avatar
Linus Torvalds committed
550
	put_task_struct(k);
551

552
	trace_sched_kthread_stop_ret(ret);
Linus Torvalds's avatar
Linus Torvalds committed
553 554
	return ret;
}
555
EXPORT_SYMBOL(kthread_stop);
Linus Torvalds's avatar
Linus Torvalds committed
556

557
int kthreadd(void *unused)
Linus Torvalds's avatar
Linus Torvalds committed
558
{
559
	struct task_struct *tsk = current;
Linus Torvalds's avatar
Linus Torvalds committed
560

561
	/* Setup a clean context for our children to inherit. */
562
	set_task_comm(tsk, "kthreadd");
563
	ignore_signals(tsk);
564
	set_cpus_allowed_ptr(tsk, cpu_all_mask);
565
	set_mems_allowed(node_states[N_MEMORY]);
566

567
	current->flags |= PF_NOFREEZE;
568
	cgroup_init_kthreadd();
569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593

	for (;;) {
		set_current_state(TASK_INTERRUPTIBLE);
		if (list_empty(&kthread_create_list))
			schedule();
		__set_current_state(TASK_RUNNING);

		spin_lock(&kthread_create_lock);
		while (!list_empty(&kthread_create_list)) {
			struct kthread_create_info *create;

			create = list_entry(kthread_create_list.next,
					    struct kthread_create_info, list);
			list_del_init(&create->list);
			spin_unlock(&kthread_create_lock);

			create_kthread(create);

			spin_lock(&kthread_create_lock);
		}
		spin_unlock(&kthread_create_lock);
	}

	return 0;
}
594

595
void __kthread_init_worker(struct kthread_worker *worker,
596 597 598
				const char *name,
				struct lock_class_key *key)
{
599
	memset(worker, 0, sizeof(struct kthread_worker));
600 601 602
	spin_lock_init(&worker->lock);
	lockdep_set_class_and_name(&worker->lock, key, name);
	INIT_LIST_HEAD(&worker->work_list);
603
	INIT_LIST_HEAD(&worker->delayed_work_list);
604
}
605
EXPORT_SYMBOL_GPL(__kthread_init_worker);
606

607 608 609 610
/**
 * kthread_worker_fn - kthread function to process kthread_worker
 * @worker_ptr: pointer to initialized kthread_worker
 *
611 612 613
 * This function implements the main cycle of kthread worker. It processes
 * work_list until it is stopped with kthread_stop(). It sleeps when the queue
 * is empty.
614
 *
615 616 617
 * The works are not allowed to keep any locks, disable preemption or interrupts
 * when they finish. There is defined a safe point for freezing when one work
 * finishes and before a new one is started.
618 619 620
 *
 * Also the works must not be handled by more than one worker at the same time,
 * see also kthread_queue_work().
621 622 623 624 625 626
 */
int kthread_worker_fn(void *worker_ptr)
{
	struct kthread_worker *worker = worker_ptr;
	struct kthread_work *work;

627 628 629 630 631
	/*
	 * FIXME: Update the check and remove the assignment when all kthread
	 * worker users are created using kthread_create_worker*() functions.
	 */
	WARN_ON(worker->task && worker->task != current);
632
	worker->task = current;
633 634 635 636

	if (worker->flags & KTW_FREEZABLE)
		set_freezable();

637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654
repeat:
	set_current_state(TASK_INTERRUPTIBLE);	/* mb paired w/ kthread_stop */

	if (kthread_should_stop()) {
		__set_current_state(TASK_RUNNING);
		spin_lock_irq(&worker->lock);
		worker->task = NULL;
		spin_unlock_irq(&worker->lock);
		return 0;
	}

	work = NULL;
	spin_lock_irq(&worker->lock);
	if (!list_empty(&worker->work_list)) {
		work = list_first_entry(&worker->work_list,
					struct kthread_work, node);
		list_del_init(&work->node);
	}
655
	worker->current_work = work;
656 657 658 659 660 661 662 663 664
	spin_unlock_irq(&worker->lock);

	if (work) {
		__set_current_state(TASK_RUNNING);
		work->func(work);
	} else if (!freezing(current))
		schedule();

	try_to_freeze();
665
	cond_resched();
666 667 668 669
	goto repeat;
}
EXPORT_SYMBOL_GPL(kthread_worker_fn);

670
static __printf(3, 0) struct kthread_worker *
671 672
__kthread_create_worker(int cpu, unsigned int flags,
			const char namefmt[], va_list args)
673 674 675
{
	struct kthread_worker *worker;
	struct task_struct *task;
676
	int node = -1;
677 678 679 680 681 682 683

	worker = kzalloc(sizeof(*worker), GFP_KERNEL);
	if (!worker)
		return ERR_PTR(-ENOMEM);

	kthread_init_worker(worker);

684 685
	if (cpu >= 0)
		node = cpu_to_node(cpu);
686

687 688
	task = __kthread_create_on_node(kthread_worker_fn, worker,
						node, namefmt, args);
689 690 691
	if (IS_ERR(task))
		goto fail_task;

692 693 694
	if (cpu >= 0)
		kthread_bind(task, cpu);

695
	worker->flags = flags;
696 697 698 699 700 701 702 703 704 705 706
	worker->task = task;
	wake_up_process(task);
	return worker;

fail_task:
	kfree(worker);
	return ERR_CAST(task);
}

/**
 * kthread_create_worker - create a kthread worker
707
 * @flags: flags modifying the default behavior of the worker
708 709 710 711 712 713 714
 * @namefmt: printf-style name for the kthread worker (task).
 *
 * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
 * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
 * when the worker was SIGKILLed.
 */
struct kthread_worker *
715
kthread_create_worker(unsigned int flags, const char namefmt[], ...)
716 717 718 719 720
{
	struct kthread_worker *worker;
	va_list args;

	va_start(args, namefmt);
721
	worker = __kthread_create_worker(-1, flags, namefmt, args);
722 723 724 725 726 727 728 729 730 731
	va_end(args);

	return worker;
}
EXPORT_SYMBOL(kthread_create_worker);

/**
 * kthread_create_worker_on_cpu - create a kthread worker and bind it
 *	it to a given CPU and the associated NUMA node.
 * @cpu: CPU number
732
 * @flags: flags modifying the default behavior of the worker
733 734 735 736 737 738 739 740 741 742 743 744 745
 * @namefmt: printf-style name for the kthread worker (task).
 *
 * Use a valid CPU number if you want to bind the kthread worker
 * to the given CPU and the associated NUMA node.
 *
 * A good practice is to add the cpu number also into the worker name.
 * For example, use kthread_create_worker_on_cpu(cpu, "helper/%d", cpu).
 *
 * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
 * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
 * when the worker was SIGKILLed.
 */
struct kthread_worker *
746 747
kthread_create_worker_on_cpu(int cpu, unsigned int flags,
			     const char namefmt[], ...)
748 749 750 751 752
{
	struct kthread_worker *worker;
	va_list args;

	va_start(args, namefmt);
753
	worker = __kthread_create_worker(cpu, flags, namefmt, args);
754 755 756 757 758 759
	va_end(args);

	return worker;
}
EXPORT_SYMBOL(kthread_create_worker_on_cpu);

760 761 762 763 764 765 766 767 768 769 770 771 772
/*
 * Returns true when the work could not be queued at the moment.
 * It happens when it is already pending in a worker list
 * or when it is being cancelled.
 */
static inline bool queuing_blocked(struct kthread_worker *worker,
				   struct kthread_work *work)
{
	lockdep_assert_held(&worker->lock);

	return !list_empty(&work->node) || work->canceling;
}

773 774 775 776 777 778 779 780 781
static void kthread_insert_work_sanity_check(struct kthread_worker *worker,
					     struct kthread_work *work)
{
	lockdep_assert_held(&worker->lock);
	WARN_ON_ONCE(!list_empty(&work->node));
	/* Do not use a work with >1 worker, see kthread_queue_work() */
	WARN_ON_ONCE(work->worker && work->worker != worker);
}

782
/* insert @work before @pos in @worker */
783
static void kthread_insert_work(struct kthread_worker *worker,
784 785
				struct kthread_work *work,
				struct list_head *pos)
786
{
787
	kthread_insert_work_sanity_check(worker, work);
788 789

	list_add_tail(&work->node, pos);
790
	work->worker = worker;
791
	if (!worker->current_work && likely(worker->task))
792 793 794
		wake_up_process(worker->task);
}

795
/**
796
 * kthread_queue_work - queue a kthread_work
797 798 799 800 801 802
 * @worker: target kthread_worker
 * @work: kthread_work to queue
 *
 * Queue @work to work processor @task for async execution.  @task
 * must have been created with kthread_worker_create().  Returns %true
 * if @work was successfully queued, %false if it was already pending.
803 804 805
 *
 * Reinitialize the work if it needs to be used by another worker.
 * For example, when the worker was stopped and started again.
806
 */
807
bool kthread_queue_work(struct kthread_worker *worker,
808 809 810 811 812 813
			struct kthread_work *work)
{
	bool ret = false;
	unsigned long flags;

	spin_lock_irqsave(&worker->lock, flags);
814
	if (!queuing_blocked(worker, work)) {
815
		kthread_insert_work(worker, work, &worker->work_list);
816 817 818 819 820
		ret = true;
	}
	spin_unlock_irqrestore(&worker->lock, flags);
	return ret;
}
821
EXPORT_SYMBOL_GPL(kthread_queue_work);
822

823 824 825
/**
 * kthread_delayed_work_timer_fn - callback that queues the associated kthread
 *	delayed work when the timer expires.
826
 * @t: pointer to the expired timer
827 828 829 830
 *
 * The format of the function is defined by struct timer_list.
 * It should have been called from irqsafe timer with irq already off.
 */
831
void kthread_delayed_work_timer_fn(struct timer_list *t)
832
{
833
	struct kthread_delayed_work *dwork = from_timer(dwork, t, timer);
834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863
	struct kthread_work *work = &dwork->work;
	struct kthread_worker *worker = work->worker;

	/*
	 * This might happen when a pending work is reinitialized.
	 * It means that it is used a wrong way.
	 */
	if (WARN_ON_ONCE(!worker))
		return;

	spin_lock(&worker->lock);
	/* Work must not be used with >1 worker, see kthread_queue_work(). */
	WARN_ON_ONCE(work->worker != worker);

	/* Move the work from worker->delayed_work_list. */
	WARN_ON_ONCE(list_empty(&work->node));
	list_del_init(&work->node);
	kthread_insert_work(worker, work, &worker->work_list);

	spin_unlock(&worker->lock);
}
EXPORT_SYMBOL(kthread_delayed_work_timer_fn);

void __kthread_queue_delayed_work(struct kthread_worker *worker,
				  struct kthread_delayed_work *dwork,
				  unsigned long delay)
{
	struct timer_list *timer = &dwork->timer;
	struct kthread_work *work = &dwork->work;

864
	WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn);
865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910

	/*
	 * If @delay is 0, queue @dwork->work immediately.  This is for
	 * both optimization and correctness.  The earliest @timer can
	 * expire is on the closest next tick and delayed_work users depend
	 * on that there's no such delay when @delay is 0.
	 */
	if (!delay) {
		kthread_insert_work(worker, work, &worker->work_list);
		return;
	}

	/* Be paranoid and try to detect possible races already now. */
	kthread_insert_work_sanity_check(worker, work);

	list_add(&work->node, &worker->delayed_work_list);
	work->worker = worker;
	timer->expires = jiffies + delay;
	add_timer(timer);
}

/**
 * kthread_queue_delayed_work - queue the associated kthread work
 *	after a delay.
 * @worker: target kthread_worker
 * @dwork: kthread_delayed_work to queue
 * @delay: number of jiffies to wait before queuing
 *
 * If the work has not been pending it starts a timer that will queue
 * the work after the given @delay. If @delay is zero, it queues the
 * work immediately.
 *
 * Return: %false if the @work has already been pending. It means that
 * either the timer was running or the work was queued. It returns %true
 * otherwise.
 */
bool kthread_queue_delayed_work(struct kthread_worker *worker,
				struct kthread_delayed_work *dwork,
				unsigned long delay)
{
	struct kthread_work *work = &dwork->work;
	unsigned long flags;
	bool ret = false;

	spin_lock_irqsave(&worker->lock, flags);

911
	if (!queuing_blocked(worker, work)) {
912 913 914 915 916 917 918 919 920
		__kthread_queue_delayed_work(worker, dwork, delay);
		ret = true;
	}

	spin_unlock_irqrestore(&worker->lock, flags);
	return ret;
}
EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);

921 922 923 924 925 926 927 928 929 930 931 932
struct kthread_flush_work {
	struct kthread_work	work;
	struct completion	done;
};

static void kthread_flush_work_fn(struct kthread_work *work)
{
	struct kthread_flush_work *fwork =
		container_of(work, struct kthread_flush_work, work);
	complete(&fwork->done);
}

933
/**
934
 * kthread_flush_work - flush a kthread_work
935 936 937 938
 * @work: work to flush
 *
 * If @work is queued or executing, wait for it to finish execution.
 */
939
void kthread_flush_work(struct kthread_work *work)
940
{
941 942 943 944 945 946 947 948 949 950
	struct kthread_flush_work fwork = {
		KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
		COMPLETION_INITIALIZER_ONSTACK(fwork.done),
	};
	struct kthread_worker *worker;
	bool noop = false;

	worker = work->worker;
	if (!worker)
		return;
951

952
	spin_lock_irq(&worker->lock);
953 954
	/* Work must not be used with >1 worker, see kthread_queue_work(). */
	WARN_ON_ONCE(work->worker != worker);
955

956
	if (!list_empty(&work->node))
957
		kthread_insert_work(worker, &fwork.work, work->node.next);
958
	else if (worker->current_work == work)
959 960
		kthread_insert_work(worker, &fwork.work,
				    worker->work_list.next);
961 962
	else
		noop = true;
963

964
	spin_unlock_irq(&worker->lock);
965

966 967
	if (!noop)
		wait_for_completion(&fwork.done);
968
}
969
EXPORT_SYMBOL_GPL(kthread_flush_work);
970

971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014
/*
 * This function removes the work from the worker queue. Also it makes sure
 * that it won't get queued later via the delayed work's timer.
 *
 * The work might still be in use when this function finishes. See the
 * current_work proceed by the worker.
 *
 * Return: %true if @work was pending and successfully canceled,
 *	%false if @work was not pending
 */
static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork,
				  unsigned long *flags)
{
	/* Try to cancel the timer if exists. */
	if (is_dwork) {
		struct kthread_delayed_work *dwork =
			container_of(work, struct kthread_delayed_work, work);
		struct kthread_worker *worker = work->worker;

		/*
		 * del_timer_sync() must be called to make sure that the timer
		 * callback is not running. The lock must be temporary released
		 * to avoid a deadlock with the callback. In the meantime,
		 * any queuing is blocked by setting the canceling counter.
		 */
		work->canceling++;
		spin_unlock_irqrestore(&worker->lock, *flags);
		del_timer_sync(&dwork->timer);
		spin_lock_irqsave(&worker->lock, *flags);
		work->canceling--;
	}

	/*
	 * Try to remove the work from a worker list. It might either
	 * be from worker->work_list or from worker->delayed_work_list.
	 */
	if (!list_empty(&work->node)) {
		list_del_init(&work->node);
		return true;
	}

	return false;
}

1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067
/**
 * kthread_mod_delayed_work - modify delay of or queue a kthread delayed work
 * @worker: kthread worker to use
 * @dwork: kthread delayed work to queue
 * @delay: number of jiffies to wait before queuing
 *
 * If @dwork is idle, equivalent to kthread_queue_delayed_work(). Otherwise,
 * modify @dwork's timer so that it expires after @delay. If @delay is zero,
 * @work is guaranteed to be queued immediately.
 *
 * Return: %true if @dwork was pending and its timer was modified,
 * %false otherwise.
 *
 * A special case is when the work is being canceled in parallel.
 * It might be caused either by the real kthread_cancel_delayed_work_sync()
 * or yet another kthread_mod_delayed_work() call. We let the other command
 * win and return %false here. The caller is supposed to synchronize these
 * operations a reasonable way.
 *
 * This function is safe to call from any context including IRQ handler.
 * See __kthread_cancel_work() and kthread_delayed_work_timer_fn()
 * for details.
 */
bool kthread_mod_delayed_work(struct kthread_worker *worker,
			      struct kthread_delayed_work *dwork,
			      unsigned long delay)
{
	struct kthread_work *work = &dwork->work;
	unsigned long flags;
	int ret = false;

	spin_lock_irqsave(&worker->lock, flags);

	/* Do not bother with canceling when never queued. */
	if (!work->worker)
		goto fast_queue;

	/* Work must not be used with >1 worker, see kthread_queue_work() */
	WARN_ON_ONCE(work->worker != worker);

	/* Do not fight with another command that is canceling this work. */
	if (work->canceling)
		goto out;

	ret = __kthread_cancel_work(work, true, &flags);
fast_queue:
	__kthread_queue_delayed_work(worker, dwork, delay);
out:
	spin_unlock_irqrestore(&worker->lock, flags);
	return ret;
}
EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);

1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138
static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
{
	struct kthread_worker *worker = work->worker;
	unsigned long flags;
	int ret = false;

	if (!worker)
		goto out;

	spin_lock_irqsave(&worker->lock, flags);
	/* Work must not be used with >1 worker, see kthread_queue_work(). */
	WARN_ON_ONCE(work->worker != worker);

	ret = __kthread_cancel_work(work, is_dwork, &flags);

	if (worker->current_work != work)
		goto out_fast;

	/*
	 * The work is in progress and we need to wait with the lock released.
	 * In the meantime, block any queuing by setting the canceling counter.
	 */
	work->canceling++;
	spin_unlock_irqrestore(&worker->lock, flags);
	kthread_flush_work(work);
	spin_lock_irqsave(&worker->lock, flags);
	work->canceling--;

out_fast:
	spin_unlock_irqrestore(&worker->lock, flags);
out:
	return ret;
}

/**
 * kthread_cancel_work_sync - cancel a kthread work and wait for it to finish
 * @work: the kthread work to cancel
 *
 * Cancel @work and wait for its execution to finish.  This function
 * can be used even if the work re-queues itself. On return from this
 * function, @work is guaranteed to be not pending or executing on any CPU.
 *
 * kthread_cancel_work_sync(&delayed_work->work) must not be used for
 * delayed_work's. Use kthread_cancel_delayed_work_sync() instead.
 *
 * The caller must ensure that the worker on which @work was last
 * queued can't be destroyed before this function returns.
 *
 * Return: %true if @work was pending, %false otherwise.
 */
bool kthread_cancel_work_sync(struct kthread_work *work)
{
	return __kthread_cancel_work_sync(work, false);
}
EXPORT_SYMBOL_GPL(kthread_cancel_work_sync);

/**
 * kthread_cancel_delayed_work_sync - cancel a kthread delayed work and
 *	wait for it to finish.
 * @dwork: the kthread delayed work to cancel
 *
 * This is kthread_cancel_work_sync() for delayed works.
 *
 * Return: %true if @dwork was pending, %false otherwise.
 */
bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork)
{
	return __kthread_cancel_work_sync(&dwork->work, true);
}
EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync);

1139
/**
1140
 * kthread_flush_worker - flush all current works on a kthread_worker
1141 1142 1143 1144 1145
 * @worker: worker to flush
 *
 * Wait until all currently executing or pending works on @worker are
 * finished.
 */
1146
void kthread_flush_worker(struct kthread_worker *worker)
1147 1148 1149 1150 1151 1152
{
	struct kthread_flush_work fwork = {
		KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
		COMPLETION_INITIALIZER_ONSTACK(fwork.done),
	};

1153
	kthread_queue_work(worker, &fwork.work);
1154 1155
	wait_for_completion(&fwork.done);
}
1156
EXPORT_SYMBOL_GPL(kthread_flush_worker);
1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179

/**
 * kthread_destroy_worker - destroy a kthread worker
 * @worker: worker to be destroyed
 *
 * Flush and destroy @worker.  The simple flush is enough because the kthread
 * worker API is used only in trivial scenarios.  There are no multi-step state
 * machines needed.
 */
void kthread_destroy_worker(struct kthread_worker *worker)
{
	struct task_struct *task;

	task = worker->task;
	if (WARN_ON(!task))
		return;

	kthread_flush_worker(worker);
	kthread_stop(task);
	WARN_ON(!list_empty(&worker->work_list));
	kfree(worker);
}
EXPORT_SYMBOL(kthread_destroy_worker);
1180

Shaohua Li's avatar
Shaohua Li committed
1181
#ifdef CONFIG_BLK_CGROUP
1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230
/**
 * kthread_associate_blkcg - associate blkcg to current kthread
 * @css: the cgroup info
 *
 * Current thread must be a kthread. The thread is running jobs on behalf of
 * other threads. In some cases, we expect the jobs attach cgroup info of
 * original threads instead of that of current thread. This function stores
 * original thread's cgroup info in current kthread context for later
 * retrieval.
 */
void kthread_associate_blkcg(struct cgroup_subsys_state *css)
{
	struct kthread *kthread;

	if (!(current->flags & PF_KTHREAD))
		return;
	kthread = to_kthread(current);
	if (!kthread)
		return;

	if (kthread->blkcg_css) {
		css_put(kthread->blkcg_css);
		kthread->blkcg_css = NULL;
	}
	if (css) {
		css_get(css);
		kthread->blkcg_css = css;
	}
}
EXPORT_SYMBOL(kthread_associate_blkcg);

/**
 * kthread_blkcg - get associated blkcg css of current kthread
 *
 * Current thread must be a kthread.
 */
struct cgroup_subsys_state *kthread_blkcg(void)
{
	struct kthread *kthread;

	if (current->flags & PF_KTHREAD) {
		kthread = to_kthread(current);
		if (kthread)
			return kthread->blkcg_css;
	}
	return NULL;
}
EXPORT_SYMBOL(kthread_blkcg);
#endif