percpu.c 52.7 KB
Newer Older
1
/*
2
 * mm/percpu.c - percpu memory allocator
3 4 5 6 7 8 9
 *
 * Copyright (C) 2009		SUSE Linux Products GmbH
 * Copyright (C) 2009		Tejun Heo <tj@kernel.org>
 *
 * This file is released under the GPLv2.
 *
 * This is percpu allocator which can handle both static and dynamic
10 11 12
 * areas.  Percpu areas are allocated in chunks.  Each chunk is
 * consisted of boot-time determined number of units and the first
 * chunk is used for static percpu variables in the kernel image
13 14 15
 * (special boot time alloc/init handling necessary as these areas
 * need to be brought up before allocation services are running).
 * Unit grows as necessary and all units grow or shrink in unison.
16
 * When a chunk is filled up, another chunk is allocated.
17 18 19 20 21 22 23 24
 *
 *  c0                           c1                         c2
 *  -------------------          -------------------        ------------
 * | u0 | u1 | u2 | u3 |        | u0 | u1 | u2 | u3 |      | u0 | u1 | u
 *  -------------------  ......  -------------------  ....  ------------
 *
 * Allocation is done in offset-size areas of single unit space.  Ie,
 * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0,
25 26 27 28
 * c1:u1, c1:u2 and c1:u3.  On UMA, units corresponds directly to
 * cpus.  On NUMA, the mapping can be non-linear and even sparse.
 * Percpu access can be done by configuring percpu base registers
 * according to cpu to unit mapping and pcpu_unit_size.
29
 *
30 31
 * There are usually many small percpu allocations many of them being
 * as small as 4 bytes.  The allocator organizes chunks into lists
32 33 34 35 36 37 38 39 40 41 42
 * according to free size and tries to allocate from the fullest one.
 * Each chunk keeps the maximum contiguous area size hint which is
 * guaranteed to be eqaul to or larger than the maximum contiguous
 * area in the chunk.  This helps the allocator not to iterate the
 * chunk maps unnecessarily.
 *
 * Allocation state in each chunk is kept using an array of integers
 * on chunk->map.  A positive value in the map represents a free
 * region and negative allocated.  Allocation inside a chunk is done
 * by scanning this map sequentially and serving the first matching
 * entry.  This is mostly copied from the percpu_modalloc() allocator.
43 44
 * Chunks can be determined from the address using the index field
 * in the page struct. The index field contains a pointer to the chunk.
45 46 47 48
 *
 * To use this allocator, arch code should do the followings.
 *
 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
49 50
 *   regular address to percpu pointer and back if they need to be
 *   different from the default
51
 *
52 53
 * - use pcpu_setup_first_chunk() during percpu area initialization to
 *   setup the first chunk containing the kernel static percpu area
54 55 56 57
 */

#include <linux/bitmap.h>
#include <linux/bootmem.h>
58
#include <linux/err.h>
59
#include <linux/list.h>
60
#include <linux/log2.h>
61 62 63 64 65 66
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/percpu.h>
#include <linux/pfn.h>
#include <linux/slab.h>
67
#include <linux/spinlock.h>
68
#include <linux/vmalloc.h>
69
#include <linux/workqueue.h>
70 71

#include <asm/cacheflush.h>
72
#include <asm/sections.h>
73
#include <asm/tlbflush.h>
74
#include <asm/io.h>
75 76 77 78

#define PCPU_SLOT_BASE_SHIFT		5	/* 1-31 shares the same slot */
#define PCPU_DFL_MAP_ALLOC		16	/* start a map with 16 ents */

79 80 81
/* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
#ifndef __addr_to_pcpu_ptr
#define __addr_to_pcpu_ptr(addr)					\
82 83 84
	(void __percpu *)((unsigned long)(addr) -			\
			  (unsigned long)pcpu_base_addr	+		\
			  (unsigned long)__per_cpu_start)
85 86 87
#endif
#ifndef __pcpu_ptr_to_addr
#define __pcpu_ptr_to_addr(ptr)						\
88 89 90
	(void __force *)((unsigned long)(ptr) +				\
			 (unsigned long)pcpu_base_addr -		\
			 (unsigned long)__per_cpu_start)
91 92
#endif

93 94 95 96
struct pcpu_chunk {
	struct list_head	list;		/* linked to pcpu_slot lists */
	int			free_size;	/* free bytes in the chunk */
	int			contig_hint;	/* max contiguous size hint */
Tejun Heo's avatar
Tejun Heo committed
97
	void			*base_addr;	/* base address of this chunk */
98 99 100
	int			map_used;	/* # of map entries used */
	int			map_alloc;	/* # of map entries allocated */
	int			*map;		/* allocation map */
101
	void			*data;		/* chunk data */
102
	bool			immutable;	/* no [de]population allowed */
Tejun Heo's avatar
Tejun Heo committed
103
	unsigned long		populated[];	/* populated bitmap */
104 105
};

106 107
static int pcpu_unit_pages __read_mostly;
static int pcpu_unit_size __read_mostly;
108
static int pcpu_nr_units __read_mostly;
109
static int pcpu_atom_size __read_mostly;
110 111
static int pcpu_nr_slots __read_mostly;
static size_t pcpu_chunk_struct_size __read_mostly;
112

113 114 115 116
/* cpus with the lowest and highest unit numbers */
static unsigned int pcpu_first_unit_cpu __read_mostly;
static unsigned int pcpu_last_unit_cpu __read_mostly;

117
/* the address of the first chunk which starts with the kernel static area */
118
void *pcpu_base_addr __read_mostly;
119 120
EXPORT_SYMBOL_GPL(pcpu_base_addr);

Tejun Heo's avatar
Tejun Heo committed
121 122
static const int *pcpu_unit_map __read_mostly;		/* cpu -> unit */
const unsigned long *pcpu_unit_offsets __read_mostly;	/* cpu -> unit offset */
123

124 125 126 127 128
/* group information, used for vm allocation */
static int pcpu_nr_groups __read_mostly;
static const unsigned long *pcpu_group_offsets __read_mostly;
static const size_t *pcpu_group_sizes __read_mostly;

129 130 131 132 133 134 135 136 137 138 139 140 141 142
/*
 * The first chunk which always exists.  Note that unlike other
 * chunks, this one can be allocated and mapped in several different
 * ways and thus often doesn't live in the vmalloc area.
 */
static struct pcpu_chunk *pcpu_first_chunk;

/*
 * Optional reserved chunk.  This chunk reserves part of the first
 * chunk and serves it for reserved allocations.  The amount of
 * reserved offset is in pcpu_reserved_chunk_limit.  When reserved
 * area doesn't exist, the following variables contain NULL and 0
 * respectively.
 */
143 144 145
static struct pcpu_chunk *pcpu_reserved_chunk;
static int pcpu_reserved_chunk_limit;

146
/*
147 148 149
 * Synchronization rules.
 *
 * There are two locks - pcpu_alloc_mutex and pcpu_lock.  The former
Tejun Heo's avatar
Tejun Heo committed
150 151 152
 * protects allocation/reclaim paths, chunks, populated bitmap and
 * vmalloc mapping.  The latter is a spinlock and protects the index
 * data structures - chunk slots, chunks and area maps in chunks.
153 154 155
 *
 * During allocation, pcpu_alloc_mutex is kept locked all the time and
 * pcpu_lock is grabbed and released as necessary.  All actual memory
156 157 158 159
 * allocations are done using GFP_KERNEL with pcpu_lock released.  In
 * general, percpu memory can't be allocated with irq off but
 * irqsave/restore are still used in alloc path so that it can be used
 * from early init path - sched_init() specifically.
160 161 162 163 164 165 166 167 168
 *
 * Free path accesses and alters only the index data structures, so it
 * can be safely called from atomic context.  When memory needs to be
 * returned to the system, free path schedules reclaim_work which
 * grabs both pcpu_alloc_mutex and pcpu_lock, unlinks chunks to be
 * reclaimed, release both locks and frees the chunks.  Note that it's
 * necessary to grab both locks to remove a chunk from circulation as
 * allocation path might be referencing the chunk with only
 * pcpu_alloc_mutex locked.
169
 */
170 171
static DEFINE_MUTEX(pcpu_alloc_mutex);	/* protects whole alloc and reclaim */
static DEFINE_SPINLOCK(pcpu_lock);	/* protects index data structures */
172

173
static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
174

175 176 177 178
/* reclaim work to release fully free chunks, scheduled from free path */
static void pcpu_reclaim(struct work_struct *work);
static DECLARE_WORK(pcpu_reclaim_work, pcpu_reclaim);

179 180 181 182 183 184 185 186 187 188 189 190 191 192 193
static bool pcpu_addr_in_first_chunk(void *addr)
{
	void *first_start = pcpu_first_chunk->base_addr;

	return addr >= first_start && addr < first_start + pcpu_unit_size;
}

static bool pcpu_addr_in_reserved_chunk(void *addr)
{
	void *first_start = pcpu_first_chunk->base_addr;

	return addr >= first_start &&
		addr < first_start + pcpu_reserved_chunk_limit;
}

194
static int __pcpu_size_to_slot(int size)
195
{
Tejun Heo's avatar
Tejun Heo committed
196
	int highbit = fls(size);	/* size is in bytes */
197 198 199
	return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1);
}

200 201 202 203 204 205 206
static int pcpu_size_to_slot(int size)
{
	if (size == pcpu_unit_size)
		return pcpu_nr_slots - 1;
	return __pcpu_size_to_slot(size);
}

207 208 209 210 211 212 213 214
static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
{
	if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int))
		return 0;

	return pcpu_size_to_slot(chunk->free_size);
}

215 216 217 218 219 220 221 222 223 224 225 226 227
/* set the pointer to a chunk in a page struct */
static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
{
	page->index = (unsigned long)pcpu;
}

/* obtain pointer to a chunk from a page struct */
static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
{
	return (struct pcpu_chunk *)page->index;
}

static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx)
228
{
229
	return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx;
230 231
}

232 233
static unsigned long __maybe_unused pcpu_chunk_addr(struct pcpu_chunk *chunk,
						unsigned int cpu, int page_idx)
234
{
Tejun Heo's avatar
Tejun Heo committed
235
	return (unsigned long)chunk->base_addr + pcpu_unit_offsets[cpu] +
Tejun Heo's avatar
Tejun Heo committed
236
		(page_idx << PAGE_SHIFT);
237 238
}

239 240
static void __maybe_unused pcpu_next_unpop(struct pcpu_chunk *chunk,
					   int *rs, int *re, int end)
Tejun Heo's avatar
Tejun Heo committed
241 242 243 244 245
{
	*rs = find_next_zero_bit(chunk->populated, end, *rs);
	*re = find_next_bit(chunk->populated, end, *rs + 1);
}

246 247
static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk,
					 int *rs, int *re, int end)
Tejun Heo's avatar
Tejun Heo committed
248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268
{
	*rs = find_next_bit(chunk->populated, end, *rs);
	*re = find_next_zero_bit(chunk->populated, end, *rs + 1);
}

/*
 * (Un)populated page region iterators.  Iterate over (un)populated
 * page regions betwen @start and @end in @chunk.  @rs and @re should
 * be integer variables and will be set to start and end page index of
 * the current region.
 */
#define pcpu_for_each_unpop_region(chunk, rs, re, start, end)		    \
	for ((rs) = (start), pcpu_next_unpop((chunk), &(rs), &(re), (end)); \
	     (rs) < (re);						    \
	     (rs) = (re) + 1, pcpu_next_unpop((chunk), &(rs), &(re), (end)))

#define pcpu_for_each_pop_region(chunk, rs, re, start, end)		    \
	for ((rs) = (start), pcpu_next_pop((chunk), &(rs), &(re), (end));   \
	     (rs) < (re);						    \
	     (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end)))

269
/**
270 271
 * pcpu_mem_alloc - allocate memory
 * @size: bytes to allocate
272
 *
273 274 275
 * Allocate @size bytes.  If @size is smaller than PAGE_SIZE,
 * kzalloc() is used; otherwise, vmalloc() is used.  The returned
 * memory is always zeroed.
276
 *
277 278 279
 * CONTEXT:
 * Does GFP_KERNEL allocation.
 *
280
 * RETURNS:
281
 * Pointer to the allocated area on success, NULL on failure.
282
 */
283
static void *pcpu_mem_alloc(size_t size)
284
{
285 286 287 288 289 290 291 292 293
	if (size <= PAGE_SIZE)
		return kzalloc(size, GFP_KERNEL);
	else {
		void *ptr = vmalloc(size);
		if (ptr)
			memset(ptr, 0, size);
		return ptr;
	}
}
294

295 296 297 298 299 300 301 302 303
/**
 * pcpu_mem_free - free memory
 * @ptr: memory to free
 * @size: size of the area
 *
 * Free @ptr.  @ptr should have been allocated using pcpu_mem_alloc().
 */
static void pcpu_mem_free(void *ptr, size_t size)
{
304
	if (size <= PAGE_SIZE)
305
		kfree(ptr);
306
	else
307
		vfree(ptr);
308 309 310 311 312 313 314 315 316
}

/**
 * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
 * @chunk: chunk of interest
 * @oslot: the previous slot it was on
 *
 * This function is called after an allocation or free changed @chunk.
 * New slot according to the changed state is determined and @chunk is
317 318
 * moved to the slot.  Note that the reserved chunk is never put on
 * chunk slots.
319 320 321
 *
 * CONTEXT:
 * pcpu_lock.
322 323 324 325 326
 */
static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
{
	int nslot = pcpu_chunk_slot(chunk);

327
	if (chunk != pcpu_reserved_chunk && oslot != nslot) {
328 329 330 331 332 333 334
		if (oslot < nslot)
			list_move(&chunk->list, &pcpu_slot[nslot]);
		else
			list_move_tail(&chunk->list, &pcpu_slot[nslot]);
	}
}

335
/**
336 337
 * pcpu_need_to_extend - determine whether chunk area map needs to be extended
 * @chunk: chunk of interest
338
 *
339 340
 * Determine whether area map of @chunk needs to be extended to
 * accomodate a new allocation.
341
 *
342
 * CONTEXT:
343
 * pcpu_lock.
344
 *
345
 * RETURNS:
346 347
 * New target map allocation length if extension is necessary, 0
 * otherwise.
348
 */
349
static int pcpu_need_to_extend(struct pcpu_chunk *chunk)
350 351 352 353 354 355 356 357 358 359
{
	int new_alloc;

	if (chunk->map_alloc >= chunk->map_used + 2)
		return 0;

	new_alloc = PCPU_DFL_MAP_ALLOC;
	while (new_alloc < chunk->map_used + 2)
		new_alloc *= 2;

360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383
	return new_alloc;
}

/**
 * pcpu_extend_area_map - extend area map of a chunk
 * @chunk: chunk of interest
 * @new_alloc: new target allocation length of the area map
 *
 * Extend area map of @chunk to have @new_alloc entries.
 *
 * CONTEXT:
 * Does GFP_KERNEL allocation.  Grabs and releases pcpu_lock.
 *
 * RETURNS:
 * 0 on success, -errno on failure.
 */
static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
{
	int *old = NULL, *new = NULL;
	size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
	unsigned long flags;

	new = pcpu_mem_alloc(new_size);
	if (!new)
384
		return -ENOMEM;
385

386 387 388 389 390
	/* acquire pcpu_lock and switch to new area map */
	spin_lock_irqsave(&pcpu_lock, flags);

	if (new_alloc <= chunk->map_alloc)
		goto out_unlock;
391

392 393
	old_size = chunk->map_alloc * sizeof(chunk->map[0]);
	memcpy(new, chunk->map, old_size);
394 395 396 397 398 399

	/*
	 * map_alloc < PCPU_DFL_MAP_ALLOC indicates that the chunk is
	 * one of the first chunks and still using static map.
	 */
	if (chunk->map_alloc >= PCPU_DFL_MAP_ALLOC)
400
		old = chunk->map;
401 402 403

	chunk->map_alloc = new_alloc;
	chunk->map = new;
404 405 406 407 408 409 410 411 412 413 414 415
	new = NULL;

out_unlock:
	spin_unlock_irqrestore(&pcpu_lock, flags);

	/*
	 * pcpu_mem_free() might end up calling vfree() which uses
	 * IRQ-unsafe lock and thus can't be called under pcpu_lock.
	 */
	pcpu_mem_free(old, old_size);
	pcpu_mem_free(new, new_size);

416 417 418
	return 0;
}

419 420 421 422
/**
 * pcpu_split_block - split a map block
 * @chunk: chunk of interest
 * @i: index of map block to split
Tejun Heo's avatar
Tejun Heo committed
423 424
 * @head: head size in bytes (can be 0)
 * @tail: tail size in bytes (can be 0)
425 426 427 428 429 430 431 432 433
 *
 * Split the @i'th map block into two or three blocks.  If @head is
 * non-zero, @head bytes block is inserted before block @i moving it
 * to @i+1 and reducing its size by @head bytes.
 *
 * If @tail is non-zero, the target block, which can be @i or @i+1
 * depending on @head, is reduced by @tail bytes and @tail byte block
 * is inserted after the target block.
 *
434
 * @chunk->map must have enough free slots to accomodate the split.
435 436 437
 *
 * CONTEXT:
 * pcpu_lock.
438
 */
439 440
static void pcpu_split_block(struct pcpu_chunk *chunk, int i,
			     int head, int tail)
441 442
{
	int nr_extra = !!head + !!tail;
443

444
	BUG_ON(chunk->map_alloc < chunk->map_used + nr_extra);
445

446
	/* insert new subblocks */
447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463
	memmove(&chunk->map[i + nr_extra], &chunk->map[i],
		sizeof(chunk->map[0]) * (chunk->map_used - i));
	chunk->map_used += nr_extra;

	if (head) {
		chunk->map[i + 1] = chunk->map[i] - head;
		chunk->map[i++] = head;
	}
	if (tail) {
		chunk->map[i++] -= tail;
		chunk->map[i] = tail;
	}
}

/**
 * pcpu_alloc_area - allocate area from a pcpu_chunk
 * @chunk: chunk of interest
Tejun Heo's avatar
Tejun Heo committed
464
 * @size: wanted size in bytes
465 466 467 468 469 470
 * @align: wanted align
 *
 * Try to allocate @size bytes area aligned at @align from @chunk.
 * Note that this function only allocates the offset.  It doesn't
 * populate or map the area.
 *
471 472
 * @chunk->map must have at least two free slots.
 *
473 474 475
 * CONTEXT:
 * pcpu_lock.
 *
476
 * RETURNS:
477 478
 * Allocated offset in @chunk on success, -1 if no matching area is
 * found.
479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525
 */
static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align)
{
	int oslot = pcpu_chunk_slot(chunk);
	int max_contig = 0;
	int i, off;

	for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++])) {
		bool is_last = i + 1 == chunk->map_used;
		int head, tail;

		/* extra for alignment requirement */
		head = ALIGN(off, align) - off;
		BUG_ON(i == 0 && head != 0);

		if (chunk->map[i] < 0)
			continue;
		if (chunk->map[i] < head + size) {
			max_contig = max(chunk->map[i], max_contig);
			continue;
		}

		/*
		 * If head is small or the previous block is free,
		 * merge'em.  Note that 'small' is defined as smaller
		 * than sizeof(int), which is very small but isn't too
		 * uncommon for percpu allocations.
		 */
		if (head && (head < sizeof(int) || chunk->map[i - 1] > 0)) {
			if (chunk->map[i - 1] > 0)
				chunk->map[i - 1] += head;
			else {
				chunk->map[i - 1] -= head;
				chunk->free_size -= head;
			}
			chunk->map[i] -= head;
			off += head;
			head = 0;
		}

		/* if tail is small, just keep it around */
		tail = chunk->map[i] - head - size;
		if (tail < sizeof(int))
			tail = 0;

		/* split if warranted */
		if (head || tail) {
526
			pcpu_split_block(chunk, i, head, tail);
527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552
			if (head) {
				i++;
				off += head;
				max_contig = max(chunk->map[i - 1], max_contig);
			}
			if (tail)
				max_contig = max(chunk->map[i + 1], max_contig);
		}

		/* update hint and mark allocated */
		if (is_last)
			chunk->contig_hint = max_contig; /* fully scanned */
		else
			chunk->contig_hint = max(chunk->contig_hint,
						 max_contig);

		chunk->free_size -= chunk->map[i];
		chunk->map[i] = -chunk->map[i];

		pcpu_chunk_relocate(chunk, oslot);
		return off;
	}

	chunk->contig_hint = max_contig;	/* fully scanned */
	pcpu_chunk_relocate(chunk, oslot);

553 554
	/* tell the upper layer that this chunk has no matching area */
	return -1;
555 556 557 558 559 560 561 562 563 564
}

/**
 * pcpu_free_area - free area to a pcpu_chunk
 * @chunk: chunk of interest
 * @freeme: offset of area to free
 *
 * Free area starting from @freeme to @chunk.  Note that this function
 * only modifies the allocation map.  It doesn't depopulate or unmap
 * the area.
565 566 567
 *
 * CONTEXT:
 * pcpu_lock.
568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602
 */
static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme)
{
	int oslot = pcpu_chunk_slot(chunk);
	int i, off;

	for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++]))
		if (off == freeme)
			break;
	BUG_ON(off != freeme);
	BUG_ON(chunk->map[i] > 0);

	chunk->map[i] = -chunk->map[i];
	chunk->free_size += chunk->map[i];

	/* merge with previous? */
	if (i > 0 && chunk->map[i - 1] >= 0) {
		chunk->map[i - 1] += chunk->map[i];
		chunk->map_used--;
		memmove(&chunk->map[i], &chunk->map[i + 1],
			(chunk->map_used - i) * sizeof(chunk->map[0]));
		i--;
	}
	/* merge with next? */
	if (i + 1 < chunk->map_used && chunk->map[i + 1] >= 0) {
		chunk->map[i] += chunk->map[i + 1];
		chunk->map_used--;
		memmove(&chunk->map[i + 1], &chunk->map[i + 2],
			(chunk->map_used - (i + 1)) * sizeof(chunk->map[0]));
	}

	chunk->contig_hint = max(chunk->map[i], chunk->contig_hint);
	pcpu_chunk_relocate(chunk, oslot);
}

603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634
static struct pcpu_chunk *pcpu_alloc_chunk(void)
{
	struct pcpu_chunk *chunk;

	chunk = kzalloc(pcpu_chunk_struct_size, GFP_KERNEL);
	if (!chunk)
		return NULL;

	chunk->map = pcpu_mem_alloc(PCPU_DFL_MAP_ALLOC * sizeof(chunk->map[0]));
	if (!chunk->map) {
		kfree(chunk);
		return NULL;
	}

	chunk->map_alloc = PCPU_DFL_MAP_ALLOC;
	chunk->map[chunk->map_used++] = pcpu_unit_size;

	INIT_LIST_HEAD(&chunk->list);
	chunk->free_size = pcpu_unit_size;
	chunk->contig_hint = pcpu_unit_size;

	return chunk;
}

static void pcpu_free_chunk(struct pcpu_chunk *chunk)
{
	if (!chunk)
		return;
	pcpu_mem_free(chunk->map, chunk->map_alloc * sizeof(chunk->map[0]));
	kfree(chunk);
}

635 636 637 638 639 640 641 642 643 644 645 646 647 648
/*
 * Chunk management implementation.
 *
 * To allow different implementations, chunk alloc/free and
 * [de]population are implemented in a separate file which is pulled
 * into this file and compiled together.  The following functions
 * should be implemented.
 *
 * pcpu_populate_chunk		- populate the specified range of a chunk
 * pcpu_depopulate_chunk	- depopulate the specified range of a chunk
 * pcpu_create_chunk		- create a new chunk
 * pcpu_destroy_chunk		- destroy a chunk, always preceded by full depop
 * pcpu_addr_to_page		- translate address to physical address
 * pcpu_verify_alloc_info	- check alloc_info is acceptable during init
649
 */
650 651 652 653 654 655
static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size);
static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size);
static struct pcpu_chunk *pcpu_create_chunk(void);
static void pcpu_destroy_chunk(struct pcpu_chunk *chunk);
static struct page *pcpu_addr_to_page(void *addr);
static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai);
656

657 658 659
#ifdef CONFIG_NEED_PER_CPU_KM
#include "percpu-km.c"
#else
660
#include "percpu-vm.c"
661
#endif
662

663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687
/**
 * pcpu_chunk_addr_search - determine chunk containing specified address
 * @addr: address for which the chunk needs to be determined.
 *
 * RETURNS:
 * The address of the found chunk.
 */
static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
{
	/* is it in the first chunk? */
	if (pcpu_addr_in_first_chunk(addr)) {
		/* is it in the reserved area? */
		if (pcpu_addr_in_reserved_chunk(addr))
			return pcpu_reserved_chunk;
		return pcpu_first_chunk;
	}

	/*
	 * The address is relative to unit0 which might be unused and
	 * thus unmapped.  Offset the address to the unit space of the
	 * current processor before looking it up in the vmalloc
	 * space.  Note that any possible cpu id can be used here, so
	 * there's no need to worry about preemption or cpu hotplug.
	 */
	addr += pcpu_unit_offsets[raw_smp_processor_id()];
688
	return pcpu_get_page_chunk(pcpu_addr_to_page(addr));
689 690
}

691
/**
692
 * pcpu_alloc - the percpu allocator
Tejun Heo's avatar
Tejun Heo committed
693
 * @size: size of area to allocate in bytes
694
 * @align: alignment of area (max PAGE_SIZE)
695
 * @reserved: allocate from the reserved chunk if available
696
 *
697 698 699 700
 * Allocate percpu area of @size bytes aligned at @align.
 *
 * CONTEXT:
 * Does GFP_KERNEL allocation.
701 702 703 704
 *
 * RETURNS:
 * Percpu pointer to the allocated area on success, NULL on failure.
 */
705
static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved)
706
{
707
	static int warn_limit = 10;
708
	struct pcpu_chunk *chunk;
709
	const char *err;
710
	int slot, off, new_alloc;
711
	unsigned long flags;
712

713
	if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
714 715 716 717 718
		WARN(true, "illegal size (%zu) or align (%zu) for "
		     "percpu allocation\n", size, align);
		return NULL;
	}

719
	mutex_lock(&pcpu_alloc_mutex);
720
	spin_lock_irqsave(&pcpu_lock, flags);
721

722 723 724
	/* serve reserved allocations from the reserved chunk if available */
	if (reserved && pcpu_reserved_chunk) {
		chunk = pcpu_reserved_chunk;
725 726 727

		if (size > chunk->contig_hint) {
			err = "alloc from reserved chunk failed";
728
			goto fail_unlock;
729
		}
730 731 732 733 734 735 736 737 738 739

		while ((new_alloc = pcpu_need_to_extend(chunk))) {
			spin_unlock_irqrestore(&pcpu_lock, flags);
			if (pcpu_extend_area_map(chunk, new_alloc) < 0) {
				err = "failed to extend area map of reserved chunk";
				goto fail_unlock_mutex;
			}
			spin_lock_irqsave(&pcpu_lock, flags);
		}

740 741 742
		off = pcpu_alloc_area(chunk, size, align);
		if (off >= 0)
			goto area_found;
743

744
		err = "alloc from reserved chunk failed";
745
		goto fail_unlock;
746 747
	}

748
restart:
749
	/* search through normal chunks */
750 751 752 753
	for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) {
		list_for_each_entry(chunk, &pcpu_slot[slot], list) {
			if (size > chunk->contig_hint)
				continue;
754

755 756 757 758 759 760 761 762 763 764 765 766 767 768
			new_alloc = pcpu_need_to_extend(chunk);
			if (new_alloc) {
				spin_unlock_irqrestore(&pcpu_lock, flags);
				if (pcpu_extend_area_map(chunk,
							 new_alloc) < 0) {
					err = "failed to extend area map";
					goto fail_unlock_mutex;
				}
				spin_lock_irqsave(&pcpu_lock, flags);
				/*
				 * pcpu_lock has been dropped, need to
				 * restart cpu_slot list walking.
				 */
				goto restart;
769 770
			}

771 772 773 774 775 776 777
			off = pcpu_alloc_area(chunk, size, align);
			if (off >= 0)
				goto area_found;
		}
	}

	/* hmmm... no space left, create a new chunk */
778
	spin_unlock_irqrestore(&pcpu_lock, flags);
779

780
	chunk = pcpu_create_chunk();
781 782
	if (!chunk) {
		err = "failed to allocate new chunk";
783
		goto fail_unlock_mutex;
784
	}
785

786
	spin_lock_irqsave(&pcpu_lock, flags);
787
	pcpu_chunk_relocate(chunk, -1);
788
	goto restart;
789 790

area_found:
791
	spin_unlock_irqrestore(&pcpu_lock, flags);
792

793 794
	/* populate, map and clear the area */
	if (pcpu_populate_chunk(chunk, off, size)) {
795
		spin_lock_irqsave(&pcpu_lock, flags);
796
		pcpu_free_area(chunk, off);
797
		err = "failed to populate";
798
		goto fail_unlock;
799 800
	}

801 802
	mutex_unlock(&pcpu_alloc_mutex);

Tejun Heo's avatar
Tejun Heo committed
803 804
	/* return address relative to base address */
	return __addr_to_pcpu_ptr(chunk->base_addr + off);
805 806

fail_unlock:
807
	spin_unlock_irqrestore(&pcpu_lock, flags);
808 809
fail_unlock_mutex:
	mutex_unlock(&pcpu_alloc_mutex);
810 811 812 813 814 815 816
	if (warn_limit) {
		pr_warning("PERCPU: allocation failed, size=%zu align=%zu, "
			   "%s\n", size, align, err);
		dump_stack();
		if (!--warn_limit)
			pr_info("PERCPU: limit reached, disable warning\n");
	}
817
	return NULL;
818
}
819 820 821 822 823 824 825 826 827

/**
 * __alloc_percpu - allocate dynamic percpu area
 * @size: size of area to allocate in bytes
 * @align: alignment of area (max PAGE_SIZE)
 *
 * Allocate percpu area of @size bytes aligned at @align.  Might
 * sleep.  Might trigger writeouts.
 *
828 829 830
 * CONTEXT:
 * Does GFP_KERNEL allocation.
 *
831 832 833
 * RETURNS:
 * Percpu pointer to the allocated area on success, NULL on failure.
 */
834
void __percpu *__alloc_percpu(size_t size, size_t align)
835 836 837
{
	return pcpu_alloc(size, align, false);
}
838 839
EXPORT_SYMBOL_GPL(__alloc_percpu);

840 841 842 843 844 845 846 847 848
/**
 * __alloc_reserved_percpu - allocate reserved percpu area
 * @size: size of area to allocate in bytes
 * @align: alignment of area (max PAGE_SIZE)
 *
 * Allocate percpu area of @size bytes aligned at @align from reserved
 * percpu area if arch has set it up; otherwise, allocation is served
 * from the same dynamic area.  Might sleep.  Might trigger writeouts.
 *
849 850 851
 * CONTEXT:
 * Does GFP_KERNEL allocation.
 *
852 853 854
 * RETURNS:
 * Percpu pointer to the allocated area on success, NULL on failure.
 */
855
void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
856 857 858 859
{
	return pcpu_alloc(size, align, true);
}

860 861 862 863 864
/**
 * pcpu_reclaim - reclaim fully free chunks, workqueue function
 * @work: unused
 *
 * Reclaim all fully free chunks except for the first one.
865 866 867
 *
 * CONTEXT:
 * workqueue context.
868 869
 */
static void pcpu_reclaim(struct work_struct *work)
870
{
871 872 873 874
	LIST_HEAD(todo);
	struct list_head *head = &pcpu_slot[pcpu_nr_slots - 1];
	struct pcpu_chunk *chunk, *next;

875 876
	mutex_lock(&pcpu_alloc_mutex);
	spin_lock_irq(&pcpu_lock);
877 878 879 880 881 882 883 884 885 886 887

	list_for_each_entry_safe(chunk, next, head, list) {
		WARN_ON(chunk->immutable);

		/* spare the first one */
		if (chunk == list_first_entry(head, struct pcpu_chunk, list))
			continue;

		list_move(&chunk->list, &todo);
	}

888
	spin_unlock_irq(&pcpu_lock);
889 890

	list_for_each_entry_safe(chunk, next, &todo, list) {
Tejun Heo's avatar
Tejun Heo committed
891
		pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size);
892
		pcpu_destroy_chunk(chunk);
893
	}
894 895

	mutex_unlock(&pcpu_alloc_mutex);
896 897 898 899 900 901
}

/**
 * free_percpu - free percpu area
 * @ptr: pointer to area to free
 *
902 903 904 905
 * Free percpu area @ptr.
 *
 * CONTEXT:
 * Can be called from atomic context.
906
 */
907
void free_percpu(void __percpu *ptr)
908
{
909
	void *addr;
910
	struct pcpu_chunk *chunk;
911
	unsigned long flags;
912 913 914 915 916
	int off;

	if (!ptr)
		return;

917 918
	addr = __pcpu_ptr_to_addr(ptr);

919
	spin_lock_irqsave(&pcpu_lock, flags);
920 921

	chunk = pcpu_chunk_addr_search(addr);
Tejun Heo's avatar
Tejun Heo committed
922
	off = addr - chunk->base_addr;
923 924 925

	pcpu_free_area(chunk, off);

926
	/* if there are more than one fully free chunks, wake up grim reaper */
927 928 929
	if (chunk->free_size == pcpu_unit_size) {
		struct pcpu_chunk *pos;

930
		list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list)
931
			if (pos != chunk) {
932
				schedule_work(&pcpu_reclaim_work);
933 934 935 936
				break;
			}
	}

937
	spin_unlock_irqrestore(&pcpu_lock, flags);
938 939 940
}
EXPORT_SYMBOL_GPL(free_percpu);

941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966
/**
 * is_kernel_percpu_address - test whether address is from static percpu area
 * @addr: address to test
 *
 * Test whether @addr belongs to in-kernel static percpu area.  Module
 * static percpu areas are not considered.  For those, use
 * is_module_percpu_address().
 *
 * RETURNS:
 * %true if @addr is from in-kernel static percpu area, %false otherwise.
 */
bool is_kernel_percpu_address(unsigned long addr)
{
	const size_t static_size = __per_cpu_end - __per_cpu_start;
	void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
	unsigned int cpu;

	for_each_possible_cpu(cpu) {
		void *start = per_cpu_ptr(base, cpu);

		if ((void *)addr >= start && (void *)addr < start + static_size)
			return true;
        }
	return false;
}

967 968 969 970 971 972 973 974 975 976 977 978 979 980
/**
 * per_cpu_ptr_to_phys - convert translated percpu address to physical address
 * @addr: the address to be converted to physical address
 *
 * Given @addr which is dereferenceable address obtained via one of
 * percpu access macros, this function translates it into its physical
 * address.  The caller is responsible for ensuring @addr stays valid
 * until this function finishes.
 *
 * RETURNS:
 * The physical address for @addr.
 */
phys_addr_t per_cpu_ptr_to_phys(void *addr)
{
981 982 983 984 985 986 987
	if (pcpu_addr_in_first_chunk(addr)) {
		if ((unsigned long)addr < VMALLOC_START ||
		    (unsigned long)addr >= VMALLOC_END)
			return __pa(addr);
		else
			return page_to_phys(vmalloc_to_page(addr));
	} else
988
		return page_to_phys(pcpu_addr_to_page(addr));
989 990
}

991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004
static inline size_t pcpu_calc_fc_sizes(size_t static_size,
					size_t reserved_size,
					ssize_t *dyn_sizep)
{
	size_t size_sum;

	size_sum = PFN_ALIGN(static_size + reserved_size +
			     (*dyn_sizep >= 0 ? *dyn_sizep : 0));
	if (*dyn_sizep != 0)
		*dyn_sizep = size_sum - static_size - reserved_size;

	return size_sum;
}

1005
/**
1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061
 * pcpu_alloc_alloc_info - allocate percpu allocation info
 * @nr_groups: the number of groups
 * @nr_units: the number of units
 *
 * Allocate ai which is large enough for @nr_groups groups containing
 * @nr_units units.  The returned ai's groups[0].cpu_map points to the
 * cpu_map array which is long enough for @nr_units and filled with
 * NR_CPUS.  It's the caller's responsibility to initialize cpu_map
 * pointer of other groups.
 *
 * RETURNS:
 * Pointer to the allocated pcpu_alloc_info on success, NULL on
 * failure.
 */
struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
						      int nr_units)
{
	struct pcpu_alloc_info *ai;
	size_t base_size, ai_size;
	void *ptr;
	int unit;

	base_size = ALIGN(sizeof(*ai) + nr_groups * sizeof(ai->groups[0]),
			  __alignof__(ai->groups[0].cpu_map[0]));
	ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);

	ptr = alloc_bootmem_nopanic(PFN_ALIGN(ai_size));
	if (!ptr)
		return NULL;
	ai = ptr;
	ptr += base_size;

	ai->groups[0].cpu_map = ptr;

	for (unit = 0; unit < nr_units; unit++)
		ai->groups[0].cpu_map[unit] = NR_CPUS;

	ai->nr_groups = nr_groups;
	ai->__ai_size = PFN_ALIGN(ai_size);

	return ai;
}

/**
 * pcpu_free_alloc_info - free percpu allocation info
 * @ai: pcpu_alloc_info to free
 *
 * Free @ai which was allocated by pcpu_alloc_alloc_info().
 */
void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
{
	free_bootmem(__pa(ai), ai->__ai_size);
}

/**
 * pcpu_build_alloc_info - build alloc_info considering distances between CPUs
1062
 * @reserved_size: the size of reserved percpu area in bytes
1063
 * @dyn_size: free size for dynamic allocation in bytes, -1 for auto
1064 1065
 * @atom_size: allocation atom size
 * @cpu_distance_fn: callback to determine distance between cpus, optional
1066
 *
1067 1068 1069
 * This function determines grouping of units, their mappings to cpus
 * and other parameters considering needed percpu size, allocation
 * atom size and distances between CPUs.
1070
 *
1071 1072 1073 1074 1075
 * Groups are always mutliples of atom size and CPUs which are of
 * LOCAL_DISTANCE both ways are grouped together and share space for
 * units in the same group.  The returned configuration is guaranteed
 * to have CPUs on different nodes on different groups and >=75% usage
 * of allocated virtual address space.
1076 1077
 *
 * RETURNS:
1078 1079
 * On success, pointer to the new allocation_info is returned.  On
 * failure, ERR_PTR value is returned.
1080
 */
1081 1082 1083 1084
struct pcpu_alloc_info * __init pcpu_build_alloc_info(
				size_t reserved_size, ssize_t dyn_size,
				size_t atom_size,
				pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
1085 1086 1087 1088
{
	static int group_map[NR_CPUS] __initdata;
	static int group_cnt[NR_CPUS] __initdata;
	const size_t static_size = __per_cpu_end - __per_cpu_start;
1089
	int group_cnt_max = 0, nr_groups = 1, nr_units = 0;
1090 1091
	size_t size_sum, min_unit_size, alloc_size;
	int upa, max_upa, uninitialized_var(best_upa);	/* units_per_alloc */
1092
	int last_allocs, group, unit;
1093
	unsigned int cpu, tcpu;
1094 1095
	struct pcpu_alloc_info *ai;
	unsigned int *cpu_map;
1096

1097 1098 1099 1100
	/* this function may be called multiple times */
	memset(group_map, 0, sizeof(group_map));
	memset(group_cnt, 0, sizeof(group_map));

1101 1102
	/*
	 * Determine min_unit_size, alloc_size and max_upa such that
1103
	 * alloc_size is multiple of atom_size and is the smallest
1104 1105 1106
	 * which can accomodate 4k aligned segments which are equal to
	 * or larger than min_unit_size.
	 */
1107
	size_sum = pcpu_calc_fc_sizes(static_size, reserved_size, &dyn_size);
1108 1109
	min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);

1110
	alloc_size = roundup(min_unit_size, atom_size);
1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122
	upa = alloc_size / min_unit_size;
	while (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
		upa--;
	max_upa = upa;

	/* group cpus according to their proximity */
	for_each_possible_cpu(cpu) {
		group = 0;
	next_group:
		for_each_possible_cpu(tcpu) {
			if (cpu == tcpu)
				break;
1123
			if (group_map[tcpu] == group && cpu_distance_fn &&
1124 1125 1126
			    (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE ||
			     cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) {
				group++;
1127
				nr_groups = max(nr_groups, group + 1);
1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147
				goto next_group;
			}
		}
		group_map[cpu] = group;
		group_cnt[group]++;
		group_cnt_max = max(group_cnt_max, group_cnt[group]);
	}

	/*
	 * Expand unit size until address space usage goes over 75%
	 * and then as much as possible without using more address
	 * space.
	 */
	last_allocs = INT_MAX;
	for (upa = max_upa; upa; upa--) {
		int allocs = 0, wasted = 0;

		if (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
			continue;

1148
		for (group = 0; group < nr_groups; group++) {
1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167
			int this_allocs = DIV_ROUND_UP(group_cnt[group], upa);
			allocs += this_allocs;
			wasted += this_allocs * upa - group_cnt[group];
		}

		/*
		 * Don't accept if wastage is over 25%.  The
		 * greater-than comparison ensures upa==1 always
		 * passes the following check.
		 */
		if (wasted > num_possible_cpus() / 3)
			continue;

		/* and then don't consume more memory */
		if (allocs > last_allocs)
			break;
		last_allocs = allocs;
		best_upa = upa;
	}
1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199
	upa = best_upa;

	/* allocate and fill alloc_info */
	for (group = 0; group < nr_groups; group++)
		nr_units += roundup(group_cnt[group], upa);

	ai = pcpu_alloc_alloc_info(nr_groups, nr_units);
	if (!ai)
		return ERR_PTR(-ENOMEM);
	cpu_map = ai->groups[0].cpu_map;

	for (group = 0; group < nr_groups; group++) {
		ai->groups[group].cpu_map = cpu_map;
		cpu_map += roundup(group_cnt[group], upa);
	}

	ai->static_size = static_size;
	ai->reserved_size = reserved_size;
	ai->dyn_size = dyn_size;
	ai->unit_size = alloc_size / upa;
	ai->atom_size = atom_size;
	ai->alloc_size = alloc_size;

	for (group = 0, unit = 0; group_cnt[group]; group++) {
		struct pcpu_group_info *gi = &ai->groups[group];

		/*
		 * Initialize base_offset as if all groups are located
		 * back-to-back.  The caller should update this to
		 * reflect actual allocation.
		 */
		gi->base_offset = unit * ai->unit_size;
1200 1201 1202

		for_each_possible_cpu(cpu)
			if (group_map[cpu] == group)
1203 1204 1205
				gi->cpu_map[gi->nr_units++] = cpu;
		gi->nr_units = roundup(gi->nr_units, upa);
		unit += gi->nr_units;
1206
	}
1207
	BUG_ON(unit != nr_units);
1208

1209
	return ai;
1210 1211
}

1212 1213 1214 1215 1216 1217 1218 1219 1220
/**
 * pcpu_dump_alloc_info - print out information about pcpu_alloc_info
 * @lvl: loglevel
 * @ai: allocation info to dump
 *
 * Print out information about @ai using loglevel @lvl.
 */
static void pcpu_dump_alloc_info(const char *lvl,
				 const struct pcpu_alloc_info *ai)
1221
{
1222
	int group_width = 1, cpu_width = 1, width;
1223
	char empty_str[] = "--------";
1224 1225 1226 1227 1228 1229 1230
	int alloc = 0, alloc_end = 0;
	int group, v;
	int upa, apl;	/* units per alloc, allocs per line */

	v = ai->nr_groups;
	while (v /= 10)
		group_width++;
1231

1232
	v = num_possible_cpus();
1233
	while (v /= 10)
1234 1235
		cpu_width++;
	empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0';
1236

1237 1238 1239
	upa = ai->alloc_size / ai->unit_size;
	width = upa * (cpu_width + 1) + group_width + 3;
	apl = rounddown_pow_of_two(max(60 / width, 1));
1240

1241 1242 1243
	printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu",
	       lvl, ai->static_size, ai->reserved_size, ai->dyn_size,
	       ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size);
1244

1245 1246 1247 1248 1249 1250 1251 1252
	for (group = 0; group < ai->nr_groups; group++) {
		const struct pcpu_group_info *gi = &ai->groups[group];
		int unit = 0, unit_end = 0;

		BUG_ON(gi->nr_units % upa);
		for (alloc_end += gi->nr_units / upa;
		     alloc < alloc_end; alloc++) {
			if (!(alloc % apl)) {
1253
				printk("\n");
1254 1255 1256 1257 1258 1259 1260 1261 1262 1263
				printk("%spcpu-alloc: ", lvl);
			}
			printk("[%0*d] ", group_width, group);

			for (unit_end += upa; unit < unit_end; unit++)
				if (gi->cpu_map[unit] != NR_CPUS)
					printk("%0*d ", cpu_width,
					       gi->cpu_map[unit]);
				else
					printk("%s ", empty_str);
1264 1265 1266 1267 1268
		}
	}
	printk("\n");
}

1269
/**
1270
 * pcpu_setup_first_chunk - initialize the first percpu chunk
1271
 * @ai: pcpu_alloc_info describing how to percpu area is shaped
1272
 * @base_addr: mapped address
1273 1274 1275
 *
 * Initialize the first percpu chunk which contains the kernel static
 * perpcu area.  This function is to be called from arch percpu area
1276
 * setup path.
1277
 *
1278 1279 1280 1281 1282 1283
 * @ai contains all information necessary to initialize the first
 * chunk and prime the dynamic percpu allocator.
 *
 * @ai->static_size is the size of static percpu area.
 *
 * @ai->reserved_size, if non-zero, specifies the amount of bytes to
1284 1285 1286 1287 1288 1289 1290
 * reserve after the static area in the first chunk.  This reserves
 * the first chunk such that it's available only through reserved
 * percpu allocation.  This is primarily used to serve module percpu
 * static areas on architectures where the addressing model has
 * limited offset range for symbol relocations to guarantee module
 * percpu symbols fall inside the relocatable range.
 *
1291 1292 1293
 * @ai->dyn_size determines the number of bytes available for dynamic
 * allocation in the first chunk.  The area between @ai->static_size +
 * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused.
1294
 *
1295 1296 1297
 * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE
 * and equal to or larger than @ai->static_size + @ai->reserved_size +
 * @ai->dyn_size.
1298
 *
1299 1300
 * @ai->atom_size is the allocation atom size and used as alignment
 * for vm areas.
1301
 *
1302 1303 1304 1305 1306 1307 1308 1309 1310
 * @ai->alloc_size is the allocation size and always multiple of
 * @ai->atom_size.  This is larger than @ai->atom_size if
 * @ai->unit_size is larger than @ai->atom_size.
 *
 * @ai->nr_groups and @ai->groups describe virtual memory layout of
 * percpu areas.  Units which should be colocated are put into the
 * same group.  Dynamic VM areas will be allocated according to these
 * groupings.  If @ai->nr_groups is zero, a single group containing
 * all units is assumed.
1311
 *
1312 1313
 * The caller should have mapped the first chunk at @base_addr and
 * copied static data to each unit.
1314
 *
1315 1316 1317 1318 1319 1320 1321
 * If the first chunk ends up with both reserved and dynamic areas, it
 * is served by two chunks - one to serve the core static and reserved
 * areas and the other for the dynamic area.  They share the same vm
 * and page map but uses different area allocation map to stay away
 * from each other.  The latter chunk is circulated in the chunk slots
 * and available for dynamic allocation like any other chunks.
 *
1322
 * RETURNS:
Tejun Heo's avatar
Tejun Heo committed
1323
 * 0 on success, -errno on failure.
1324
 */
Tejun Heo's avatar
Tejun Heo committed
1325 1326
int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
				  void *base_addr)
1327
{
1328
	static char cpus_buf[4096] __initdata;
1329
	static int smap[2], dmap[2];
1330 1331
	size_t dyn_size = ai->dyn_size;
	size_t size_sum = ai->static_size + ai->reserved_size + dyn_size;
1332
	struct pcpu_chunk *schunk, *dchunk = NULL;
1333 1334
	unsigned long *group_offsets;
	size_t *group_sizes;
Tejun Heo's avatar
Tejun Heo committed
1335
	unsigned long *unit_off;
1336
	unsigned int cpu;
1337 1338
	int *unit_map;
	int group, unit, i;
1339

1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350
	cpumask_scnprintf(cpus_buf, sizeof(cpus_buf), cpu_possible_mask);

#define PCPU_SETUP_BUG_ON(cond)	do {					\
	if (unlikely(cond)) {						\
		pr_emerg("PERCPU: failed to initialize, %s", #cond);	\
		pr_emerg("PERCPU: cpu_possible_mask=%s\n", cpus_buf);	\
		pcpu_dump_alloc_info(KERN_EMERG, ai);			\
		BUG();							\
	}								\
} while (0)

1351
	/* sanity checks */
1352 1353
	BUILD_BUG_ON(ARRAY_SIZE(smap) >= PCPU_DFL_MAP_ALLOC ||
		     ARRAY_SIZE(dmap) >= PCPU_DFL_MAP_ALLOC);
1354 1355 1356 1357 1358 1359
	PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
	PCPU_SETUP_BUG_ON(!ai->static_size);
	PCPU_SETUP_BUG_ON(!base_addr);
	PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
	PCPU_SETUP_BUG_ON(ai->unit_size & ~PAGE_MASK);
	PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
1360
	PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0);
1361

1362 1363 1364
	/* process group information and build config tables accordingly */
	group_offsets = alloc_bootmem(ai->nr_groups * sizeof(group_offsets[0]));
	group_sizes = alloc_bootmem(ai->nr_groups * sizeof(group_sizes[0]));
1365
	unit_map = alloc_bootmem(nr_cpu_ids * sizeof(unit_map[0]));
Tejun Heo's avatar
Tejun Heo committed
1366
	unit_off = alloc_bootmem(nr_cpu_ids * sizeof(unit_off[0]));
1367

1368
	for (cpu = 0; cpu < nr_cpu_ids; cpu++)
1369
		unit_map[cpu] = UINT_MAX;
1370
	pcpu_first_unit_cpu = NR_CPUS;
1371

1372 1373
	for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
		const struct pcpu_group_info *gi = &ai->groups[group];
1374

1375 1376 1377
		group_offsets[group] = gi->base_offset;
		group_sizes[group] = gi->nr_units * ai->unit_size;

1378 1379 1380 1381
		for (i = 0; i < gi->nr_units; i++) {
			cpu = gi->cpu_map[i];
			if (cpu == NR_CPUS)
				continue;
1382

1383 1384 1385
			PCPU_SETUP_BUG_ON(cpu > nr_cpu_ids);
			PCPU_SETUP_BUG_ON(!cpu_possible(cpu));
			PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX);