sparse.c 22.7 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3 4 5
/*
 * sparse memory mappings.
 */
#include <linux/mm.h>
6
#include <linux/slab.h>
7 8
#include <linux/mmzone.h>
#include <linux/bootmem.h>
9
#include <linux/compiler.h>
10
#include <linux/highmem.h>
11
#include <linux/export.h>
12
#include <linux/spinlock.h>
13
#include <linux/vmalloc.h>
14

15
#include "internal.h"
16
#include <asm/dma.h>
17 18
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
19 20 21 22 23 24

/*
 * Permanent SPARSEMEM data:
 *
 * 1) mem_section	- memory sections, mem_map's for valid memory
 */
25
#ifdef CONFIG_SPARSEMEM_EXTREME
26
struct mem_section **mem_section;
27 28
#else
struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]
29
	____cacheline_internodealigned_in_smp;
30 31 32
#endif
EXPORT_SYMBOL(mem_section);

33 34 35 36 37 38 39 40 41 42 43 44
#ifdef NODE_NOT_IN_PAGE_FLAGS
/*
 * If we did not store the node number in the page then we have to
 * do a lookup in the section_to_node_table in order to find which
 * node the page belongs to.
 */
#if MAX_NUMNODES <= 256
static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
#else
static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
#endif

45
int page_to_nid(const struct page *page)
46 47 48 49
{
	return section_to_node_table[page_to_section(page)];
}
EXPORT_SYMBOL(page_to_nid);
50 51 52 53 54 55 56 57 58

static void set_section_nid(unsigned long section_nr, int nid)
{
	section_to_node_table[section_nr] = nid;
}
#else /* !NODE_NOT_IN_PAGE_FLAGS */
static inline void set_section_nid(unsigned long section_nr, int nid)
{
}
59 60
#endif

61
#ifdef CONFIG_SPARSEMEM_EXTREME
62
static noinline struct mem_section __ref *sparse_index_alloc(int nid)
63 64 65 66 67
{
	struct mem_section *section = NULL;
	unsigned long array_size = SECTIONS_PER_ROOT *
				   sizeof(struct mem_section);

68 69 70
	if (slab_is_available())
		section = kzalloc_node(array_size, GFP_KERNEL, nid);
	else
71
		section = memblock_virt_alloc_node(array_size, nid);
72 73

	return section;
74
}
Bob Picco's avatar
Bob Picco committed
75

76
static int __meminit sparse_index_init(unsigned long section_nr, int nid)
Bob Picco's avatar
Bob Picco committed
77
{
78 79
	unsigned long root = SECTION_NR_TO_ROOT(section_nr);
	struct mem_section *section;
Bob Picco's avatar
Bob Picco committed
80 81

	if (mem_section[root])
82
		return -EEXIST;
83

84
	section = sparse_index_alloc(nid);
85 86
	if (!section)
		return -ENOMEM;
87 88

	mem_section[root] = section;
89

90
	return 0;
91 92 93 94 95
}
#else /* !SPARSEMEM_EXTREME */
static inline int sparse_index_init(unsigned long section_nr, int nid)
{
	return 0;
Bob Picco's avatar
Bob Picco committed
96
}
97 98
#endif

99
#ifdef CONFIG_SPARSEMEM_EXTREME
100 101 102
int __section_nr(struct mem_section* ms)
{
	unsigned long root_nr;
103
	struct mem_section *root = NULL;
104

105 106
	for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) {
		root = __nr_to_section(root_nr * SECTIONS_PER_ROOT);
107 108 109 110 111 112 113
		if (!root)
			continue;

		if ((ms >= root) && (ms < (root + SECTIONS_PER_ROOT)))
		     break;
	}

114
	VM_BUG_ON(!root);
115

116 117
	return (root_nr * SECTIONS_PER_ROOT) + (ms - root);
}
118 119 120 121 122 123
#else
int __section_nr(struct mem_section* ms)
{
	return (int)(ms - mem_section[0]);
}
#endif
124

125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140
/*
 * During early boot, before section_mem_map is used for an actual
 * mem_map, we use section_mem_map to store the section's NUMA
 * node.  This keeps us from having to use another data structure.  The
 * node information is cleared just before we store the real mem_map.
 */
static inline unsigned long sparse_encode_early_nid(int nid)
{
	return (nid << SECTION_NID_SHIFT);
}

static inline int sparse_early_nid(struct mem_section *section)
{
	return (section->section_mem_map >> SECTION_NID_SHIFT);
}

141 142 143
/* Validate the physical addressing limitations of the model */
void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn,
						unsigned long *end_pfn)
144
{
145
	unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT);
146

147 148 149 150
	/*
	 * Sanity checks - do not allow an architecture to pass
	 * in larger pfns than the maximum scope of sparsemem:
	 */
151 152 153 154 155 156 157
	if (*start_pfn > max_sparsemem_pfn) {
		mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
			"Start of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
			*start_pfn, *end_pfn, max_sparsemem_pfn);
		WARN_ON_ONCE(1);
		*start_pfn = max_sparsemem_pfn;
		*end_pfn = max_sparsemem_pfn;
158
	} else if (*end_pfn > max_sparsemem_pfn) {
159 160 161 162 163 164 165 166
		mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
			"End of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
			*start_pfn, *end_pfn, max_sparsemem_pfn);
		WARN_ON_ONCE(1);
		*end_pfn = max_sparsemem_pfn;
	}
}

167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192
/*
 * There are a number of times that we loop over NR_MEM_SECTIONS,
 * looking for section_present() on each.  But, when we have very
 * large physical address spaces, NR_MEM_SECTIONS can also be
 * very large which makes the loops quite long.
 *
 * Keeping track of this gives us an easy way to break out of
 * those loops early.
 */
int __highest_present_section_nr;
static void section_mark_present(struct mem_section *ms)
{
	int section_nr = __section_nr(ms);

	if (section_nr > __highest_present_section_nr)
		__highest_present_section_nr = section_nr;

	ms->section_mem_map |= SECTION_MARKED_PRESENT;
}

static inline int next_present_section_nr(int section_nr)
{
	do {
		section_nr++;
		if (present_section_nr(section_nr))
			return section_nr;
193
	} while ((section_nr <= __highest_present_section_nr));
194 195 196 197 198 199 200 201 202

	return -1;
}
#define for_each_present_section_nr(start, section_nr)		\
	for (section_nr = next_present_section_nr(start-1);	\
	     ((section_nr >= 0) &&				\
	      (section_nr <= __highest_present_section_nr));	\
	     section_nr = next_present_section_nr(section_nr))

203 204 205 206
/* Record a memory area against a node. */
void __init memory_present(int nid, unsigned long start, unsigned long end)
{
	unsigned long pfn;
207

208 209 210 211
#ifdef CONFIG_SPARSEMEM_EXTREME
	if (unlikely(!mem_section)) {
		unsigned long size, align;

212
		size = sizeof(struct mem_section*) * NR_SECTION_ROOTS;
213 214 215 216 217
		align = 1 << (INTERNODE_CACHE_SHIFT);
		mem_section = memblock_virt_alloc(size, align);
	}
#endif

218
	start &= PAGE_SECTION_MASK;
219
	mminit_validate_memmodel_limits(&start, &end);
220 221
	for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
		unsigned long section = pfn_to_section_nr(pfn);
Bob Picco's avatar
Bob Picco committed
222 223 224
		struct mem_section *ms;

		sparse_index_init(section, nid);
225
		set_section_nid(section, nid);
Bob Picco's avatar
Bob Picco committed
226 227

		ms = __nr_to_section(section);
228
		if (!ms->section_mem_map) {
229 230
			ms->section_mem_map = sparse_encode_early_nid(nid) |
							SECTION_IS_ONLINE;
231 232
			section_mark_present(ms);
		}
233 234 235
	}
}

236 237 238 239 240 241 242
/*
 * Subtle, we encode the real pfn into the mem_map such that
 * the identity pfn - section_mem_map will return the actual
 * physical page frame number.
 */
static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum)
{
243 244 245 246 247
	unsigned long coded_mem_map =
		(unsigned long)(mem_map - (section_nr_to_pfn(pnum)));
	BUILD_BUG_ON(SECTION_MAP_LAST_BIT > (1UL<<PFN_SECTION_SHIFT));
	BUG_ON(coded_mem_map & ~SECTION_MAP_MASK);
	return coded_mem_map;
248 249 250
}

/*
251
 * Decode mem_map from the coded memmap
252 253 254
 */
struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum)
{
255 256
	/* mask off the extra low bits of information */
	coded_mem_map &= SECTION_MAP_MASK;
257 258 259
	return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum);
}

260
static int __meminit sparse_init_one_section(struct mem_section *ms,
261 262
		unsigned long pnum, struct page *mem_map,
		unsigned long *pageblock_bitmap)
263
{
264
	if (!present_section(ms))
265 266
		return -EINVAL;

267
	ms->section_mem_map &= ~SECTION_MAP_MASK;
268 269
	ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum) |
							SECTION_HAS_MEM_MAP;
270
 	ms->pageblock_flags = pageblock_bitmap;
271 272 273 274

	return 1;
}

275
unsigned long usemap_size(void)
276
{
277
	return BITS_TO_LONGS(SECTION_BLOCKFLAGS_BITS) * sizeof(unsigned long);
278 279 280 281 282 283 284 285 286
}

#ifdef CONFIG_MEMORY_HOTPLUG
static unsigned long *__kmalloc_section_usemap(void)
{
	return kmalloc(usemap_size(), GFP_KERNEL);
}
#endif /* CONFIG_MEMORY_HOTPLUG */

287 288
#ifdef CONFIG_MEMORY_HOTREMOVE
static unsigned long * __init
289
sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
290
					 unsigned long size)
291
{
292 293 294
	unsigned long goal, limit;
	unsigned long *p;
	int nid;
295 296 297
	/*
	 * A page may contain usemaps for other sections preventing the
	 * page being freed and making a section unremovable while
Li Zhong's avatar
Li Zhong committed
298
	 * other sections referencing the usemap remain active. Similarly,
299 300 301 302 303 304
	 * a pgdat can prevent a section being removed. If section A
	 * contains a pgdat and section B contains the usemap, both
	 * sections become inter-dependent. This allocates usemaps
	 * from the same section as the pgdat where possible to avoid
	 * this problem.
	 */
305
	goal = __pa(pgdat) & (PAGE_SECTION_MASK << PAGE_SHIFT);
306 307 308
	limit = goal + (1UL << PA_SECTION_SHIFT);
	nid = early_pfn_to_nid(goal >> PAGE_SHIFT);
again:
309 310 311
	p = memblock_virt_alloc_try_nid_nopanic(size,
						SMP_CACHE_BYTES, goal, limit,
						nid);
312 313 314 315 316
	if (!p && limit) {
		limit = 0;
		goto again;
	}
	return p;
317 318 319 320 321
}

static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
{
	unsigned long usemap_snr, pgdat_snr;
322 323
	static unsigned long old_usemap_snr;
	static unsigned long old_pgdat_snr;
324 325 326
	struct pglist_data *pgdat = NODE_DATA(nid);
	int usemap_nid;

327 328 329 330 331 332
	/* First call */
	if (!old_usemap_snr) {
		old_usemap_snr = NR_MEM_SECTIONS;
		old_pgdat_snr = NR_MEM_SECTIONS;
	}

333 334 335 336 337 338 339 340 341 342 343 344 345 346
	usemap_snr = pfn_to_section_nr(__pa(usemap) >> PAGE_SHIFT);
	pgdat_snr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT);
	if (usemap_snr == pgdat_snr)
		return;

	if (old_usemap_snr == usemap_snr && old_pgdat_snr == pgdat_snr)
		/* skip redundant message */
		return;

	old_usemap_snr = usemap_snr;
	old_pgdat_snr = pgdat_snr;

	usemap_nid = sparse_early_nid(__nr_to_section(usemap_snr));
	if (usemap_nid != nid) {
347 348
		pr_info("node %d must be removed before remove section %ld\n",
			nid, usemap_snr);
349 350 351 352 353 354 355 356
		return;
	}
	/*
	 * There is a circular dependency.
	 * Some platforms allow un-removable section because they will just
	 * gather other removable sections for dynamic partitioning.
	 * Just notify un-removable section's number here.
	 */
357 358
	pr_info("Section %ld and %ld (node %d) have a circular dependency on usemap and pgdat allocations\n",
		usemap_snr, pgdat_snr, nid);
359 360 361
}
#else
static unsigned long * __init
362
sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
363
					 unsigned long size)
364
{
365
	return memblock_virt_alloc_node_nopanic(size, pgdat->node_id);
366 367 368 369 370 371 372
}

static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
{
}
#endif /* CONFIG_MEMORY_HOTREMOVE */

373
static void __init sparse_early_usemaps_alloc_node(void *data,
374 375 376
				 unsigned long pnum_begin,
				 unsigned long pnum_end,
				 unsigned long usemap_count, int nodeid)
377
{
378 379
	void *usemap;
	unsigned long pnum;
380
	unsigned long **usemap_map = (unsigned long **)data;
381
	int size = usemap_size();
382

383
	usemap = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nodeid),
384
							  size * usemap_count);
385
	if (!usemap) {
386
		pr_warn("%s: allocation failed\n", __func__);
387
		return;
388 389
	}

390 391 392 393 394 395
	for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
		if (!present_section_nr(pnum))
			continue;
		usemap_map[pnum] = usemap;
		usemap += size;
		check_usemap_section_nr(nodeid, usemap_map[pnum]);
396
	}
397 398
}

399
#ifndef CONFIG_SPARSEMEM_VMEMMAP
400 401
struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid,
		struct vmem_altmap *altmap)
402 403
{
	struct page *map;
404
	unsigned long size;
405

406
	size = PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION);
407 408 409
	map = memblock_virt_alloc_try_nid(size,
					  PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
					  BOOTMEM_ALLOC_ACCESSIBLE, nid);
410 411
	return map;
}
412 413 414 415 416 417 418 419 420 421
void __init sparse_mem_maps_populate_node(struct page **map_map,
					  unsigned long pnum_begin,
					  unsigned long pnum_end,
					  unsigned long map_count, int nodeid)
{
	void *map;
	unsigned long pnum;
	unsigned long size = sizeof(struct page) * PAGES_PER_SECTION;

	size = PAGE_ALIGN(size);
422 423 424
	map = memblock_virt_alloc_try_nid_raw(size * map_count,
					      PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
					      BOOTMEM_ALLOC_ACCESSIBLE, nodeid);
425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440
	if (map) {
		for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
			if (!present_section_nr(pnum))
				continue;
			map_map[pnum] = map;
			map += size;
		}
		return;
	}

	/* fallback */
	for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
		struct mem_section *ms;

		if (!present_section_nr(pnum))
			continue;
441
		map_map[pnum] = sparse_mem_map_populate(pnum, nodeid, NULL);
442 443 444
		if (map_map[pnum])
			continue;
		ms = __nr_to_section(pnum);
445
		pr_err("%s: sparsemem memory map backing failed some memory will not be available\n",
Joe Perches's avatar
Joe Perches committed
446
		       __func__);
447 448 449
		ms->section_mem_map = 0;
	}
}
450 451
#endif /* !CONFIG_SPARSEMEM_VMEMMAP */

452
#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
453
static void __init sparse_early_mem_maps_alloc_node(void *data,
454 455 456 457
				 unsigned long pnum_begin,
				 unsigned long pnum_end,
				 unsigned long map_count, int nodeid)
{
458
	struct page **map_map = (struct page **)data;
459 460 461
	sparse_mem_maps_populate_node(map_map, pnum_begin, pnum_end,
					 map_count, nodeid);
}
462
#else
463
static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
464 465 466 467 468
{
	struct page *map;
	struct mem_section *ms = __nr_to_section(pnum);
	int nid = sparse_early_nid(ms);

469
	map = sparse_mem_map_populate(pnum, nid, NULL);
470 471 472
	if (map)
		return map;

473
	pr_err("%s: sparsemem memory map backing failed some memory will not be available\n",
Joe Perches's avatar
Joe Perches committed
474
	       __func__);
Bob Picco's avatar
Bob Picco committed
475
	ms->section_mem_map = 0;
476 477
	return NULL;
}
478
#endif
479

480
void __weak __meminit vmemmap_populate_print_last(void)
481 482
{
}
483

484 485 486 487 488 489 490 491 492 493 494 495 496
/**
 *  alloc_usemap_and_memmap - memory alloction for pageblock flags and vmemmap
 *  @map: usemap_map for pageblock flags or mmap_map for vmemmap
 */
static void __init alloc_usemap_and_memmap(void (*alloc_func)
					(void *, unsigned long, unsigned long,
					unsigned long, int), void *data)
{
	unsigned long pnum;
	unsigned long map_count;
	int nodeid_begin = 0;
	unsigned long pnum_begin = 0;

497
	for_each_present_section_nr(0, pnum) {
498 499 500 501 502 503 504 505
		struct mem_section *ms;

		ms = __nr_to_section(pnum);
		nodeid_begin = sparse_early_nid(ms);
		pnum_begin = pnum;
		break;
	}
	map_count = 1;
506
	for_each_present_section_nr(pnum_begin + 1, pnum) {
507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524
		struct mem_section *ms;
		int nodeid;

		ms = __nr_to_section(pnum);
		nodeid = sparse_early_nid(ms);
		if (nodeid == nodeid_begin) {
			map_count++;
			continue;
		}
		/* ok, we need to take cake of from pnum_begin to pnum - 1*/
		alloc_func(data, pnum_begin, pnum,
						map_count, nodeid_begin);
		/* new start, update count etc*/
		nodeid_begin = nodeid;
		pnum_begin = pnum;
		map_count = 1;
	}
	/* ok, last chunk */
525
	alloc_func(data, pnum_begin, __highest_present_section_nr+1,
526 527 528
						map_count, nodeid_begin);
}

529 530 531 532 533 534 535 536
/*
 * Allocate the accumulated non-linear sections, allocate a mem_map
 * for each and record the physical to section mapping.
 */
void __init sparse_init(void)
{
	unsigned long pnum;
	struct page *map;
537
	unsigned long *usemap;
538
	unsigned long **usemap_map;
539 540 541 542 543
	int size;
#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
	int size2;
	struct page **map_map;
#endif
544

545 546 547
	/* see include/linux/mmzone.h 'struct mem_section' definition */
	BUILD_BUG_ON(!is_power_of_2(sizeof(struct mem_section)));

548 549 550
	/* Setup pageblock_order for HUGETLB_PAGE_SIZE_VARIABLE */
	set_pageblock_order();

551 552 553 554 555 556
	/*
	 * map is using big page (aka 2M in x86 64 bit)
	 * usemap is less one page (aka 24 bytes)
	 * so alloc 2M (with 2M align) and 24 bytes in turn will
	 * make next 2M slip to one more 2M later.
	 * then in big system, the memory will have a lot of holes...
Lucas De Marchi's avatar
Lucas De Marchi committed
557
	 * here try to allocate 2M pages continuously.
558 559 560 561 562
	 *
	 * powerpc need to call sparse_init_one_section right after each
	 * sparse_early_mem_map_alloc, so allocate usemap_map at first.
	 */
	size = sizeof(unsigned long *) * NR_MEM_SECTIONS;
563
	usemap_map = memblock_virt_alloc(size, 0);
564 565
	if (!usemap_map)
		panic("can not allocate usemap_map\n");
566 567
	alloc_usemap_and_memmap(sparse_early_usemaps_alloc_node,
							(void *)usemap_map);
568

569 570
#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
	size2 = sizeof(struct page *) * NR_MEM_SECTIONS;
571
	map_map = memblock_virt_alloc(size2, 0);
572 573
	if (!map_map)
		panic("can not allocate map_map\n");
574 575
	alloc_usemap_and_memmap(sparse_early_mem_maps_alloc_node,
							(void *)map_map);
576 577
#endif

578
	for_each_present_section_nr(0, pnum) {
579
		usemap = usemap_map[pnum];
580 581 582
		if (!usemap)
			continue;

583 584 585
#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
		map = map_map[pnum];
#else
586
		map = sparse_early_mem_map_alloc(pnum);
587
#endif
588 589 590
		if (!map)
			continue;

591 592
		sparse_init_one_section(__nr_to_section(pnum), pnum, map,
								usemap);
593
	}
594

595 596
	vmemmap_populate_print_last();

597
#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
598
	memblock_free_early(__pa(map_map), size2);
599
#endif
600
	memblock_free_early(__pa(usemap_map), size);
601 602 603
}

#ifdef CONFIG_MEMORY_HOTPLUG
604 605 606 607 608 609 610

/* Mark all memory sections within the pfn range as online */
void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
{
	unsigned long pfn;

	for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
611
		unsigned long section_nr = pfn_to_section_nr(pfn);
612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629
		struct mem_section *ms;

		/* onlining code should never touch invalid ranges */
		if (WARN_ON(!valid_section_nr(section_nr)))
			continue;

		ms = __nr_to_section(section_nr);
		ms->section_mem_map |= SECTION_IS_ONLINE;
	}
}

#ifdef CONFIG_MEMORY_HOTREMOVE
/* Mark all memory sections within the pfn range as online */
void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
{
	unsigned long pfn;

	for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
630
		unsigned long section_nr = pfn_to_section_nr(pfn);
631 632 633 634 635 636 637 638 639 640 641 642 643 644 645
		struct mem_section *ms;

		/*
		 * TODO this needs some double checking. Offlining code makes
		 * sure to check pfn_valid but those checks might be just bogus
		 */
		if (WARN_ON(!valid_section_nr(section_nr)))
			continue;

		ms = __nr_to_section(section_nr);
		ms->section_mem_map &= ~SECTION_IS_ONLINE;
	}
}
#endif

646
#ifdef CONFIG_SPARSEMEM_VMEMMAP
647 648
static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
		struct vmem_altmap *altmap)
649 650
{
	/* This will make the necessary allocations eventually. */
651
	return sparse_mem_map_populate(pnum, nid, altmap);
652
}
653 654
static void __kfree_section_memmap(struct page *memmap,
		struct vmem_altmap *altmap)
655
{
656
	unsigned long start = (unsigned long)memmap;
657
	unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION);
658

659
	vmemmap_free(start, end, altmap);
660
}
661
#ifdef CONFIG_MEMORY_HOTREMOVE
662
static void free_map_bootmem(struct page *memmap)
663
{
664
	unsigned long start = (unsigned long)memmap;
665
	unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION);
666

667
	vmemmap_free(start, end, NULL);
668
}
669
#endif /* CONFIG_MEMORY_HOTREMOVE */
670
#else
671
static struct page *__kmalloc_section_memmap(void)
672 673
{
	struct page *page, *ret;
674
	unsigned long memmap_size = sizeof(struct page) * PAGES_PER_SECTION;
675

676
	page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size));
677 678 679 680 681 682 683 684 685 686 687 688 689 690 691
	if (page)
		goto got_map_page;

	ret = vmalloc(memmap_size);
	if (ret)
		goto got_map_ptr;

	return NULL;
got_map_page:
	ret = (struct page *)pfn_to_kaddr(page_to_pfn(page));
got_map_ptr:

	return ret;
}

692 693
static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
		struct vmem_altmap *altmap)
694
{
695
	return __kmalloc_section_memmap();
696 697
}

698 699
static void __kfree_section_memmap(struct page *memmap,
		struct vmem_altmap *altmap)
700
{
701
	if (is_vmalloc_addr(memmap))
702 703 704
		vfree(memmap);
	else
		free_pages((unsigned long)memmap,
705
			   get_order(sizeof(struct page) * PAGES_PER_SECTION));
706
}
707

708
#ifdef CONFIG_MEMORY_HOTREMOVE
709
static void free_map_bootmem(struct page *memmap)
710 711
{
	unsigned long maps_section_nr, removing_section_nr, i;
712
	unsigned long magic, nr_pages;
713
	struct page *page = virt_to_page(memmap);
714

715 716 717
	nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
		>> PAGE_SHIFT;

718
	for (i = 0; i < nr_pages; i++, page++) {
719
		magic = (unsigned long) page->freelist;
720 721 722 723

		BUG_ON(magic == NODE_INFO);

		maps_section_nr = pfn_to_section_nr(page_to_pfn(page));
724
		removing_section_nr = page_private(page);
725 726 727 728 729 730 731 732 733 734 735 736 737

		/*
		 * When this function is called, the removing section is
		 * logical offlined state. This means all pages are isolated
		 * from page allocator. If removing section's memmap is placed
		 * on the same section, it must not be freed.
		 * If it is freed, page allocator may allocate it which will
		 * be removed physically soon.
		 */
		if (maps_section_nr != removing_section_nr)
			put_page_bootmem(page);
	}
}
738
#endif /* CONFIG_MEMORY_HOTREMOVE */
739
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
740

741 742 743 744 745
/*
 * returns the number of sections whose mem_maps were properly
 * set.  If this is <=0, then that means that the passed-in
 * map was not consumed and must be freed.
 */
746 747
int __meminit sparse_add_one_section(struct pglist_data *pgdat,
		unsigned long start_pfn, struct vmem_altmap *altmap)
748
{
749 750 751
	unsigned long section_nr = pfn_to_section_nr(start_pfn);
	struct mem_section *ms;
	struct page *memmap;
752
	unsigned long *usemap;
753 754
	unsigned long flags;
	int ret;
755

756 757 758 759
	/*
	 * no locking for this, because it does its own
	 * plus, it does a kmalloc
	 */
760 761 762
	ret = sparse_index_init(section_nr, pgdat->node_id);
	if (ret < 0 && ret != -EEXIST)
		return ret;
763
	memmap = kmalloc_section_memmap(section_nr, pgdat->node_id, altmap);
764 765
	if (!memmap)
		return -ENOMEM;
766
	usemap = __kmalloc_section_usemap();
767
	if (!usemap) {
768
		__kfree_section_memmap(memmap, altmap);
769 770
		return -ENOMEM;
	}
771 772

	pgdat_resize_lock(pgdat, &flags);
773

774 775 776 777 778
	ms = __pfn_to_section(start_pfn);
	if (ms->section_mem_map & SECTION_MARKED_PRESENT) {
		ret = -EEXIST;
		goto out;
	}
779

780 781 782 783 784 785 786
#ifdef CONFIG_DEBUG_VM
	/*
	 * Poison uninitialized struct pages in order to catch invalid flags
	 * combinations.
	 */
	memset(memmap, PAGE_POISON_PATTERN, sizeof(struct page) * PAGES_PER_SECTION);
#endif
787

788
	section_mark_present(ms);
789

790
	ret = sparse_init_one_section(ms, section_nr, memmap, usemap);
791 792 793

out:
	pgdat_resize_unlock(pgdat, &flags);
794 795
	if (ret <= 0) {
		kfree(usemap);
796
		__kfree_section_memmap(memmap, altmap);
797
	}
798
	return ret;
799
}
800

801
#ifdef CONFIG_MEMORY_HOTREMOVE
802 803 804 805 806 807 808 809
#ifdef CONFIG_MEMORY_FAILURE
static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
{
	int i;

	if (!memmap)
		return;

810
	for (i = 0; i < nr_pages; i++) {
811
		if (PageHWPoison(&memmap[i])) {
812
			atomic_long_sub(1, &num_poisoned_pages);
813 814 815 816 817 818 819 820 821 822
			ClearPageHWPoison(&memmap[i]);
		}
	}
}
#else
static inline void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
{
}
#endif

823 824
static void free_section_usemap(struct page *memmap, unsigned long *usemap,
		struct vmem_altmap *altmap)
825 826 827 828 829 830 831 832 833 834 835 836 837
{
	struct page *usemap_page;

	if (!usemap)
		return;

	usemap_page = virt_to_page(usemap);
	/*
	 * Check to see if allocation came from hot-plug-add
	 */
	if (PageSlab(usemap_page) || PageCompound(usemap_page)) {
		kfree(usemap);
		if (memmap)
838
			__kfree_section_memmap(memmap, altmap);
839 840 841 842 843 844 845 846
		return;
	}

	/*
	 * The usemap came from bootmem. This is packed with other usemaps
	 * on the section which has pgdat at boot time. Just keep it as is now.
	 */

847 848
	if (memmap)
		free_map_bootmem(memmap);
849 850
}

851
void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
852
		unsigned long map_offset, struct vmem_altmap *altmap)
853 854
{
	struct page *memmap = NULL;
855 856
	unsigned long *usemap = NULL, flags;
	struct pglist_data *pgdat = zone->zone_pgdat;
857

858
	pgdat_resize_lock(pgdat, &flags);
859 860 861 862 863 864 865
	if (ms->section_mem_map) {
		usemap = ms->pageblock_flags;
		memmap = sparse_decode_mem_map(ms->section_mem_map,
						__section_nr(ms));
		ms->section_mem_map = 0;
		ms->pageblock_flags = NULL;
	}
866
	pgdat_resize_unlock(pgdat, &flags);
867

868 869
	clear_hwpoisoned_pages(memmap + map_offset,
			PAGES_PER_SECTION - map_offset);
870
	free_section_usemap(memmap, usemap, altmap);
871
}
872 873
#endif /* CONFIG_MEMORY_HOTREMOVE */
#endif /* CONFIG_MEMORY_HOTPLUG */