smp.c 21.3 KB
Newer Older
1 2 3 4 5
/*
 * Generic helpers for smp ipi calls
 *
 * (C) Jens Axboe <jens.axboe@oracle.com> 2008
 */
6 7 8

#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

9
#include <linux/irq_work.h>
10
#include <linux/rcupdate.h>
11
#include <linux/rculist.h>
12
#include <linux/kernel.h>
13
#include <linux/export.h>
Ingo Molnar's avatar
Ingo Molnar committed
14 15
#include <linux/percpu.h>
#include <linux/init.h>
16
#include <linux/gfp.h>
17
#include <linux/smp.h>
18
#include <linux/cpu.h>
19
#include <linux/sched.h>
20
#include <linux/sched/idle.h>
21
#include <linux/hypervisor.h>
22

23 24
#include "smpboot.h"

25
enum {
26
	CSD_FLAG_LOCK		= 0x01,
27
	CSD_FLAG_SYNCHRONOUS	= 0x02,
28 29 30
};

struct call_function_data {
31
	call_single_data_t	__percpu *csd;
Ingo Molnar's avatar
Ingo Molnar committed
32
	cpumask_var_t		cpumask;
33
	cpumask_var_t		cpumask_ipi;
34 35
};

36 37
static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);

38
static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
39

40 41
static void flush_smp_call_function_queue(bool warn_cpu_offline);

42
int smpcfd_prepare_cpu(unsigned int cpu)
43 44 45
{
	struct call_function_data *cfd = &per_cpu(cfd_data, cpu);

46 47 48
	if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
				     cpu_to_node(cpu)))
		return -ENOMEM;
49 50 51 52 53
	if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,
				     cpu_to_node(cpu))) {
		free_cpumask_var(cfd->cpumask);
		return -ENOMEM;
	}
54
	cfd->csd = alloc_percpu(call_single_data_t);
55
	if (!cfd->csd) {
56
		free_cpumask_var(cfd->cpumask);
57
		free_cpumask_var(cfd->cpumask_ipi);
58 59 60 61
		return -ENOMEM;
	}

	return 0;
62 63
}

64 65 66 67 68
int smpcfd_dead_cpu(unsigned int cpu)
{
	struct call_function_data *cfd = &per_cpu(cfd_data, cpu);

	free_cpumask_var(cfd->cpumask);
69
	free_cpumask_var(cfd->cpumask_ipi);
70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87
	free_percpu(cfd->csd);
	return 0;
}

int smpcfd_dying_cpu(unsigned int cpu)
{
	/*
	 * The IPIs for the smp-call-function callbacks queued by other
	 * CPUs might arrive late, either due to hardware latencies or
	 * because this CPU disabled interrupts (inside stop-machine)
	 * before the IPIs were sent. So flush out any pending callbacks
	 * explicitly (without waiting for the IPIs to arrive), to
	 * ensure that the outgoing CPU doesn't go offline with work
	 * still pending.
	 */
	flush_smp_call_function_queue(false);
	return 0;
}
88

89
void __init call_function_init(void)
90 91 92
{
	int i;

93 94
	for_each_possible_cpu(i)
		init_llist_head(&per_cpu(call_single_queue, i));
95

96
	smpcfd_prepare_cpu(smp_processor_id());
97 98
}

99 100 101
/*
 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
 *
Ingo Molnar's avatar
Ingo Molnar committed
102 103 104
 * For non-synchronous ipi calls the csd can still be in use by the
 * previous function call. For multi-cpu calls its even more interesting
 * as we'll have to ensure no other cpu is observing our csd.
105
 */
106
static __always_inline void csd_lock_wait(call_single_data_t *csd)
107
{
108
	smp_cond_load_acquire(&csd->flags, !(VAL & CSD_FLAG_LOCK));
109 110
}

111
static __always_inline void csd_lock(call_single_data_t *csd)
112
{
Andrew Morton's avatar
Andrew Morton committed
113 114
	csd_lock_wait(csd);
	csd->flags |= CSD_FLAG_LOCK;
115 116

	/*
Ingo Molnar's avatar
Ingo Molnar committed
117 118
	 * prevent CPU from reordering the above assignment
	 * to ->flags with any subsequent assignments to other
119
	 * fields of the specified call_single_data_t structure:
120
	 */
121
	smp_wmb();
122 123
}

124
static __always_inline void csd_unlock(call_single_data_t *csd)
125
{
126
	WARN_ON(!(csd->flags & CSD_FLAG_LOCK));
Ingo Molnar's avatar
Ingo Molnar committed
127

128
	/*
Ingo Molnar's avatar
Ingo Molnar committed
129
	 * ensure we're all done before releasing data:
130
	 */
131
	smp_store_release(&csd->flags, 0);
132 133
}

134
static DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t, csd_data);
135

136
/*
137
 * Insert a previously allocated call_single_data_t element
Ingo Molnar's avatar
Ingo Molnar committed
138 139
 * for execution on the given CPU. data must already have
 * ->func, ->info, and ->flags set.
140
 */
141
static int generic_exec_single(int cpu, call_single_data_t *csd,
142
			       smp_call_func_t func, void *info)
143
{
144
	if (cpu == smp_processor_id()) {
145 146 147 148 149 150 151
		unsigned long flags;

		/*
		 * We can unlock early even for the synchronous on-stack case,
		 * since we're doing this from the same CPU..
		 */
		csd_unlock(csd);
152 153 154 155 156 157 158
		local_irq_save(flags);
		func(info);
		local_irq_restore(flags);
		return 0;
	}


159 160
	if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) {
		csd_unlock(csd);
161
		return -ENXIO;
162
	}
163 164 165 166

	csd->func = func;
	csd->info = info;

167
	/*
168 169 170 171 172 173 174
	 * The list addition should be visible before sending the IPI
	 * handler locks the list to pull the entry off it because of
	 * normal cache coherency rules implied by spinlocks.
	 *
	 * If IPIs can go out of order to the cache coherency protocol
	 * in an architecture, sufficient synchronisation should be added
	 * to arch code to make it appear to obey cache coherency WRT
Ingo Molnar's avatar
Ingo Molnar committed
175 176
	 * locking and barrier primitives. Generic code isn't really
	 * equipped to do the right thing...
177
	 */
178
	if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
179 180
		arch_send_call_function_single_ipi(cpu);

181
	return 0;
182 183
}

184 185 186 187 188
/**
 * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks
 *
 * Invoked by arch to handle an IPI for call function single.
 * Must be called with interrupts disabled.
189 190 191
 */
void generic_smp_call_function_single_interrupt(void)
{
192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211
	flush_smp_call_function_queue(true);
}

/**
 * flush_smp_call_function_queue - Flush pending smp-call-function callbacks
 *
 * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an
 *		      offline CPU. Skip this check if set to 'false'.
 *
 * Flush any pending smp-call-function callbacks queued on this CPU. This is
 * invoked by the generic IPI handler, as well as by a CPU about to go offline,
 * to ensure that all pending IPI callbacks are run before it goes completely
 * offline.
 *
 * Loop through the call_single_queue and run all the queued callbacks.
 * Must be called with interrupts disabled.
 */
static void flush_smp_call_function_queue(bool warn_cpu_offline)
{
	struct llist_head *head;
212
	struct llist_node *entry;
213
	call_single_data_t *csd, *csd_next;
214 215
	static bool warned;

216
	lockdep_assert_irqs_disabled();
217

218
	head = this_cpu_ptr(&call_single_queue);
219
	entry = llist_del_all(head);
220
	entry = llist_reverse_order(entry);
221

222 223 224
	/* There shouldn't be any pending callbacks on an offline CPU. */
	if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) &&
		     !warned && !llist_empty(head))) {
225 226 227 228 229 230 231 232 233 234 235
		warned = true;
		WARN(1, "IPI on offline CPU %d\n", smp_processor_id());

		/*
		 * We don't have to use the _safe() variant here
		 * because we are not invoking the IPI handlers yet.
		 */
		llist_for_each_entry(csd, entry, llist)
			pr_warn("IPI callback %pS sent to offline CPU\n",
				csd->func);
	}
236

237
	llist_for_each_entry_safe(csd, csd_next, entry, llist) {
238 239 240 241 242 243 244 245 246 247 248
		smp_call_func_t func = csd->func;
		void *info = csd->info;

		/* Do we wait until *after* callback? */
		if (csd->flags & CSD_FLAG_SYNCHRONOUS) {
			func(info);
			csd_unlock(csd);
		} else {
			csd_unlock(csd);
			func(info);
		}
249
	}
250 251 252 253 254 255 256 257

	/*
	 * Handle irq works queued remotely by irq_work_queue_on().
	 * Smp functions above are typically synchronous so they
	 * better run first since some other CPUs may be busy waiting
	 * for them.
	 */
	irq_work_run();
258 259 260 261 262 263 264 265
}

/*
 * smp_call_function_single - Run a function on a specific CPU
 * @func: The function to run. This must be fast and non-blocking.
 * @info: An arbitrary pointer to pass to the function.
 * @wait: If true, wait until function has completed on other CPUs.
 *
266
 * Returns 0 on success, else a negative status code.
267
 */
268
int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
269
			     int wait)
270
{
271 272 273 274
	call_single_data_t *csd;
	call_single_data_t csd_stack = {
		.flags = CSD_FLAG_LOCK | CSD_FLAG_SYNCHRONOUS,
	};
Ingo Molnar's avatar
Ingo Molnar committed
275
	int this_cpu;
276
	int err;
277

Ingo Molnar's avatar
Ingo Molnar committed
278 279 280 281 282 283
	/*
	 * prevent preemption and reschedule on another processor,
	 * as well as CPU removal
	 */
	this_cpu = get_cpu();

284 285 286 287 288 289 290 291
	/*
	 * Can deadlock when called with interrupts disabled.
	 * We allow cpu's that are not yet online though, as no one else can
	 * send smp call function interrupt to this cpu and as such deadlocks
	 * can't happen.
	 */
	WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
		     && !oops_in_progress);
292

293 294 295 296 297 298 299 300 301 302
	csd = &csd_stack;
	if (!wait) {
		csd = this_cpu_ptr(&csd_data);
		csd_lock(csd);
	}

	err = generic_exec_single(cpu, csd, func, info);

	if (wait)
		csd_lock_wait(csd);
303 304

	put_cpu();
Ingo Molnar's avatar
Ingo Molnar committed
305

306
	return err;
307 308 309
}
EXPORT_SYMBOL(smp_call_function_single);

310
/**
311 312
 * smp_call_function_single_async(): Run an asynchronous function on a
 * 			         specific CPU.
313 314 315
 * @cpu: The CPU to run on.
 * @csd: Pre-allocated and setup data structure
 *
316 317 318 319 320 321 322 323 324
 * Like smp_call_function_single(), but the call is asynchonous and
 * can thus be done from contexts with disabled interrupts.
 *
 * The caller passes his own pre-allocated data structure
 * (ie: embedded in an object) and is responsible for synchronizing it
 * such that the IPIs performed on the @csd are strictly serialized.
 *
 * NOTE: Be careful, there is unfortunately no current debugging facility to
 * validate the correctness of this serialization.
325
 */
326
int smp_call_function_single_async(int cpu, call_single_data_t *csd)
327 328 329
{
	int err = 0;

330
	preempt_disable();
331 332 333 334 335 336 337 338 339

	/* We could deadlock if we have to wait here with interrupts disabled! */
	if (WARN_ON_ONCE(csd->flags & CSD_FLAG_LOCK))
		csd_lock_wait(csd);

	csd->flags = CSD_FLAG_LOCK;
	smp_wmb();

	err = generic_exec_single(cpu, csd, csd->func, csd->info);
340
	preempt_enable();
341 342 343

	return err;
}
344
EXPORT_SYMBOL_GPL(smp_call_function_single_async);
345

346 347 348 349 350 351 352 353 354 355 356 357 358 359 360
/*
 * smp_call_function_any - Run a function on any of the given cpus
 * @mask: The mask of cpus it can run on.
 * @func: The function to run. This must be fast and non-blocking.
 * @info: An arbitrary pointer to pass to the function.
 * @wait: If true, wait until function has completed.
 *
 * Returns 0 on success, else a negative status code (if no cpus were online).
 *
 * Selection preference:
 *	1) current cpu if in @mask
 *	2) any cpu of current node if in @mask
 *	3) any other online cpu in @mask
 */
int smp_call_function_any(const struct cpumask *mask,
361
			  smp_call_func_t func, void *info, int wait)
362 363 364 365 366 367 368 369 370 371 372
{
	unsigned int cpu;
	const struct cpumask *nodemask;
	int ret;

	/* Try for same CPU (cheapest) */
	cpu = get_cpu();
	if (cpumask_test_cpu(cpu, mask))
		goto call;

	/* Try for same node. */
373
	nodemask = cpumask_of_node(cpu_to_node(cpu));
374 375 376 377 378 379 380 381 382 383 384 385 386 387 388
	for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
	     cpu = cpumask_next_and(cpu, nodemask, mask)) {
		if (cpu_online(cpu))
			goto call;
	}

	/* Any online will do: smp_call_function_single handles nr_cpu_ids. */
	cpu = cpumask_any_and(mask, cpu_online_mask);
call:
	ret = smp_call_function_single(cpu, func, info, wait);
	put_cpu();
	return ret;
}
EXPORT_SYMBOL_GPL(smp_call_function_any);

389
/**
390 391
 * smp_call_function_many(): Run a function on a set of other CPUs.
 * @mask: The set of cpus to run on (only runs on online subset).
392 393
 * @func: The function to run. This must be fast and non-blocking.
 * @info: An arbitrary pointer to pass to the function.
Ingo Molnar's avatar
Ingo Molnar committed
394 395
 * @wait: If true, wait (atomically) until function has completed
 *        on other CPUs.
396
 *
397
 * If @wait is true, then returns once @func has returned.
398 399 400 401 402
 *
 * You must not call this function with disabled interrupts or from a
 * hardware interrupt handler or from a bottom half handler. Preemption
 * must be disabled when calling this function.
 */
403
void smp_call_function_many(const struct cpumask *mask,
404
			    smp_call_func_t func, void *info, bool wait)
405
{
Andrew Morton's avatar
Andrew Morton committed
406
	struct call_function_data *cfd;
407
	int cpu, next_cpu, this_cpu = smp_processor_id();
408

409 410 411 412 413 414 415
	/*
	 * Can deadlock when called with interrupts disabled.
	 * We allow cpu's that are not yet online though, as no one else can
	 * send smp call function interrupt to this cpu and as such deadlocks
	 * can't happen.
	 */
	WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
416
		     && !oops_in_progress && !early_boot_irqs_disabled);
417

418
	/* Try to fastpath.  So, what's a CPU they want? Ignoring this one. */
419
	cpu = cpumask_first_and(mask, cpu_online_mask);
Ingo Molnar's avatar
Ingo Molnar committed
420
	if (cpu == this_cpu)
421
		cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
Ingo Molnar's avatar
Ingo Molnar committed
422

423 424 425 426 427 428
	/* No online cpus?  We're done. */
	if (cpu >= nr_cpu_ids)
		return;

	/* Do we have another CPU which isn't us? */
	next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
Ingo Molnar's avatar
Ingo Molnar committed
429
	if (next_cpu == this_cpu)
430 431 432 433 434 435
		next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);

	/* Fastpath: do that cpu by itself. */
	if (next_cpu >= nr_cpu_ids) {
		smp_call_function_single(cpu, func, info, wait);
		return;
436 437
	}

438
	cfd = this_cpu_ptr(&cfd_data);
439

Andrew Morton's avatar
Andrew Morton committed
440
	cpumask_and(cfd->cpumask, mask, cpu_online_mask);
441
	__cpumask_clear_cpu(this_cpu, cfd->cpumask);
442 443

	/* Some callers race with other cpus changing the passed mask */
Andrew Morton's avatar
Andrew Morton committed
444
	if (unlikely(!cpumask_weight(cfd->cpumask)))
445
		return;
446

447
	cpumask_clear(cfd->cpumask_ipi);
Andrew Morton's avatar
Andrew Morton committed
448
	for_each_cpu(cpu, cfd->cpumask) {
449
		call_single_data_t *csd = per_cpu_ptr(cfd->csd, cpu);
450 451

		csd_lock(csd);
452 453
		if (wait)
			csd->flags |= CSD_FLAG_SYNCHRONOUS;
454 455
		csd->func = func;
		csd->info = info;
456
		if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
457
			__cpumask_set_cpu(cpu, cfd->cpumask_ipi);
458
	}
459

460
	/* Send a message to all CPUs in the map */
461
	arch_send_call_function_ipi_mask(cfd->cpumask_ipi);
462

463
	if (wait) {
Andrew Morton's avatar
Andrew Morton committed
464
		for_each_cpu(cpu, cfd->cpumask) {
465
			call_single_data_t *csd;
Andrew Morton's avatar
Andrew Morton committed
466 467

			csd = per_cpu_ptr(cfd->csd, cpu);
468 469 470
			csd_lock_wait(csd);
		}
	}
471
}
472
EXPORT_SYMBOL(smp_call_function_many);
473 474 475 476 477

/**
 * smp_call_function(): Run a function on all other CPUs.
 * @func: The function to run. This must be fast and non-blocking.
 * @info: An arbitrary pointer to pass to the function.
Ingo Molnar's avatar
Ingo Molnar committed
478 479
 * @wait: If true, wait (atomically) until function has completed
 *        on other CPUs.
480
 *
481
 * Returns 0.
482 483
 *
 * If @wait is true, then returns once @func has returned; otherwise
484
 * it returns just before the target cpu calls @func.
485 486 487 488
 *
 * You must not call this function with disabled interrupts or from a
 * hardware interrupt handler or from a bottom half handler.
 */
489
int smp_call_function(smp_call_func_t func, void *info, int wait)
490 491
{
	preempt_disable();
492
	smp_call_function_many(cpu_online_mask, func, info, wait);
493
	preempt_enable();
Ingo Molnar's avatar
Ingo Molnar committed
494

495
	return 0;
496 497
}
EXPORT_SYMBOL(smp_call_function);
498

499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552
/* Setup configured maximum number of CPUs to activate */
unsigned int setup_max_cpus = NR_CPUS;
EXPORT_SYMBOL(setup_max_cpus);


/*
 * Setup routine for controlling SMP activation
 *
 * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
 * activation entirely (the MPS table probe still happens, though).
 *
 * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
 * greater than 0, limits the maximum number of CPUs activated in
 * SMP mode to <NUM>.
 */

void __weak arch_disable_smp_support(void) { }

static int __init nosmp(char *str)
{
	setup_max_cpus = 0;
	arch_disable_smp_support();

	return 0;
}

early_param("nosmp", nosmp);

/* this is hard limit */
static int __init nrcpus(char *str)
{
	int nr_cpus;

	get_option(&str, &nr_cpus);
	if (nr_cpus > 0 && nr_cpus < nr_cpu_ids)
		nr_cpu_ids = nr_cpus;

	return 0;
}

early_param("nr_cpus", nrcpus);

static int __init maxcpus(char *str)
{
	get_option(&str, &setup_max_cpus);
	if (setup_max_cpus == 0)
		arch_disable_smp_support();

	return 0;
}

early_param("maxcpus", maxcpus);

/* Setup number of possible processor ids */
553
unsigned int nr_cpu_ids __read_mostly = NR_CPUS;
554 555 556 557 558 559 560 561 562 563 564
EXPORT_SYMBOL(nr_cpu_ids);

/* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
void __init setup_nr_cpu_ids(void)
{
	nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
}

/* Called by boot processor to activate the rest. */
void __init smp_init(void)
{
565
	int num_nodes, num_cpus;
566 567
	unsigned int cpu;

568
	idle_threads_init();
569
	cpuhp_threads_init();
570

571 572
	pr_info("Bringing up secondary CPUs ...\n");

573 574 575 576 577 578 579 580
	/* FIXME: This should be done in userspace --RR */
	for_each_present_cpu(cpu) {
		if (num_online_cpus() >= setup_max_cpus)
			break;
		if (!cpu_online(cpu))
			cpu_up(cpu);
	}

581 582 583 584 585 586
	num_nodes = num_online_nodes();
	num_cpus  = num_online_cpus();
	pr_info("Brought up %d node%s, %d CPU%s\n",
		num_nodes, (num_nodes > 1 ? "s" : ""),
		num_cpus,  (num_cpus  > 1 ? "s" : ""));

587 588
	/* Final decision about SMT support */
	cpu_smt_check_topology();
589 590 591 592
	/* Any cleanup work */
	smp_cpus_done(setup_max_cpus);
}

593
/*
594 595 596
 * Call a function on all processors.  May be used during early boot while
 * early_boot_irqs_disabled is set.  Use local_irq_save/restore() instead
 * of local_irq_disable/enable().
597 598 599
 */
int on_each_cpu(void (*func) (void *info), void *info, int wait)
{
600
	unsigned long flags;
601 602 603 604
	int ret = 0;

	preempt_disable();
	ret = smp_call_function(func, info, wait);
605
	local_irq_save(flags);
606
	func(info);
607
	local_irq_restore(flags);
608 609 610 611
	preempt_enable();
	return ret;
}
EXPORT_SYMBOL(on_each_cpu);
612 613 614 615 616 617 618 619 620 621 622 623

/**
 * on_each_cpu_mask(): Run a function on processors specified by
 * cpumask, which may include the local processor.
 * @mask: The set of cpus to run on (only runs on online subset).
 * @func: The function to run. This must be fast and non-blocking.
 * @info: An arbitrary pointer to pass to the function.
 * @wait: If true, wait (atomically) until function has completed
 *        on other CPUs.
 *
 * If @wait is true, then returns once @func has returned.
 *
624 625 626 627
 * You must not call this function with disabled interrupts or from a
 * hardware interrupt handler or from a bottom half handler.  The
 * exception is that it may be used during early boot while
 * early_boot_irqs_disabled is set.
628 629 630 631 632 633 634 635
 */
void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
			void *info, bool wait)
{
	int cpu = get_cpu();

	smp_call_function_many(mask, func, info, wait);
	if (cpumask_test_cpu(cpu, mask)) {
636 637
		unsigned long flags;
		local_irq_save(flags);
638
		func(info);
639
		local_irq_restore(flags);
640 641 642 643
	}
	put_cpu();
}
EXPORT_SYMBOL(on_each_cpu_mask);
644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678

/*
 * on_each_cpu_cond(): Call a function on each processor for which
 * the supplied function cond_func returns true, optionally waiting
 * for all the required CPUs to finish. This may include the local
 * processor.
 * @cond_func:	A callback function that is passed a cpu id and
 *		the the info parameter. The function is called
 *		with preemption disabled. The function should
 *		return a blooean value indicating whether to IPI
 *		the specified CPU.
 * @func:	The function to run on all applicable CPUs.
 *		This must be fast and non-blocking.
 * @info:	An arbitrary pointer to pass to both functions.
 * @wait:	If true, wait (atomically) until function has
 *		completed on other CPUs.
 * @gfp_flags:	GFP flags to use when allocating the cpumask
 *		used internally by the function.
 *
 * The function might sleep if the GFP flags indicates a non
 * atomic allocation is allowed.
 *
 * Preemption is disabled to protect against CPUs going offline but not online.
 * CPUs going online during the call will not be seen or sent an IPI.
 *
 * You must not call this function with disabled interrupts or
 * from a hardware interrupt handler or from a bottom half handler.
 */
void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
			smp_call_func_t func, void *info, bool wait,
			gfp_t gfp_flags)
{
	cpumask_var_t cpus;
	int cpu, ret;

679
	might_sleep_if(gfpflags_allow_blocking(gfp_flags));
680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698

	if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) {
		preempt_disable();
		for_each_online_cpu(cpu)
			if (cond_func(cpu, info))
				cpumask_set_cpu(cpu, cpus);
		on_each_cpu_mask(cpus, func, info, wait);
		preempt_enable();
		free_cpumask_var(cpus);
	} else {
		/*
		 * No free cpumask, bother. No matter, we'll
		 * just have to IPI them one by one.
		 */
		preempt_disable();
		for_each_online_cpu(cpu)
			if (cond_func(cpu, info)) {
				ret = smp_call_function_single(cpu, func,
								info, wait);
699
				WARN_ON_ONCE(ret);
700 701 702 703 704
			}
		preempt_enable();
	}
}
EXPORT_SYMBOL(on_each_cpu_cond);
705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727

static void do_nothing(void *unused)
{
}

/**
 * kick_all_cpus_sync - Force all cpus out of idle
 *
 * Used to synchronize the update of pm_idle function pointer. It's
 * called after the pointer is updated and returns after the dummy
 * callback function has been executed on all cpus. The execution of
 * the function can only happen on the remote cpus after they have
 * left the idle function which had been called via pm_idle function
 * pointer. So it's guaranteed that nothing uses the previous pointer
 * anymore.
 */
void kick_all_cpus_sync(void)
{
	/* Make sure the change is visible before we kick the cpus */
	smp_mb();
	smp_call_function(do_nothing, NULL, 1);
}
EXPORT_SYMBOL_GPL(kick_all_cpus_sync);
728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748

/**
 * wake_up_all_idle_cpus - break all cpus out of idle
 * wake_up_all_idle_cpus try to break all cpus which is in idle state even
 * including idle polling cpus, for non-idle cpus, we will do nothing
 * for them.
 */
void wake_up_all_idle_cpus(void)
{
	int cpu;

	preempt_disable();
	for_each_online_cpu(cpu) {
		if (cpu == smp_processor_id())
			continue;

		wake_up_if_idle(cpu);
	}
	preempt_enable();
}
EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus);
749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788

/**
 * smp_call_on_cpu - Call a function on a specific cpu
 *
 * Used to call a function on a specific cpu and wait for it to return.
 * Optionally make sure the call is done on a specified physical cpu via vcpu
 * pinning in order to support virtualized environments.
 */
struct smp_call_on_cpu_struct {
	struct work_struct	work;
	struct completion	done;
	int			(*func)(void *);
	void			*data;
	int			ret;
	int			cpu;
};

static void smp_call_on_cpu_callback(struct work_struct *work)
{
	struct smp_call_on_cpu_struct *sscs;

	sscs = container_of(work, struct smp_call_on_cpu_struct, work);
	if (sscs->cpu >= 0)
		hypervisor_pin_vcpu(sscs->cpu);
	sscs->ret = sscs->func(sscs->data);
	if (sscs->cpu >= 0)
		hypervisor_pin_vcpu(-1);

	complete(&sscs->done);
}

int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
{
	struct smp_call_on_cpu_struct sscs = {
		.done = COMPLETION_INITIALIZER_ONSTACK(sscs.done),
		.func = func,
		.data = par,
		.cpu  = phys ? cpu : -1,
	};

789 790
	INIT_WORK_ONSTACK(&sscs.work, smp_call_on_cpu_callback);

791 792 793 794 795 796 797 798 799
	if (cpu >= nr_cpu_ids || !cpu_online(cpu))
		return -ENXIO;

	queue_work_on(cpu, system_wq, &sscs.work);
	wait_for_completion(&sscs.done);

	return sscs.ret;
}
EXPORT_SYMBOL_GPL(smp_call_on_cpu);