slab.c 111 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
Linus Torvalds's avatar
Linus Torvalds committed
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
/*
 * linux/mm/slab.c
 * Written by Mark Hemment, 1996/97.
 * (markhe@nextd.demon.co.uk)
 *
 * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
 *
 * Major cleanup, different bufctl logic, per-cpu arrays
 *	(c) 2000 Manfred Spraul
 *
 * Cleanup, make the head arrays unconditional, preparation for NUMA
 * 	(c) 2002 Manfred Spraul
 *
 * An implementation of the Slab Allocator as described in outline in;
 *	UNIX Internals: The New Frontiers by Uresh Vahalia
 *	Pub: Prentice Hall	ISBN 0-13-101908-2
 * or with a little more detail in;
 *	The Slab Allocator: An Object-Caching Kernel Memory Allocator
 *	Jeff Bonwick (Sun Microsystems).
 *	Presented at: USENIX Summer 1994 Technical Conference
 *
 * The memory is organized in caches, one cache for each object type.
 * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct)
 * Each cache consists out of many slabs (they are small (usually one
 * page long) and always contiguous), and each slab contains multiple
 * initialized objects.
 *
 * This means, that your constructor is used only for newly allocated
Simon Arlott's avatar
Simon Arlott committed
30
 * slabs and you must pass objects with the same initializations to
Linus Torvalds's avatar
Linus Torvalds committed
31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53
 * kmem_cache_free.
 *
 * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
 * normal). If you need a special memory type, then must create a new
 * cache for that memory type.
 *
 * In order to reduce fragmentation, the slabs are sorted in 3 groups:
 *   full slabs with 0 free objects
 *   partial slabs
 *   empty slabs with no allocated objects
 *
 * If partial slabs exist, then new allocations come from these slabs,
 * otherwise from empty slabs or new slabs are allocated.
 *
 * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache
 * during kmem_cache_destroy(). The caller must prevent concurrent allocs.
 *
 * Each cache has a short per-cpu head array, most allocs
 * and frees go into that array, and if that array overflows, then 1/2
 * of the entries in the array are given back into the global cache.
 * The head array is strictly LIFO and should improve the cache hit rates.
 * On SMP, it additionally reduces the spinlock operations.
 *
Andrew Morton's avatar
Andrew Morton committed
54
 * The c_cpuarray may not be read with enabled local interrupts -
Linus Torvalds's avatar
Linus Torvalds committed
55 56 57 58
 * it's changed with a smp_call_function().
 *
 * SMP synchronization:
 *  constructors and destructors are called without any locking.
59
 *  Several members in struct kmem_cache and struct slab never change, they
Linus Torvalds's avatar
Linus Torvalds committed
60 61 62 63 64 65 66 67 68 69 70 71
 *	are accessed without any locking.
 *  The per-cpu arrays are never accessed from the wrong cpu, no locking,
 *  	and local interrupts are disabled so slab code is preempt-safe.
 *  The non-constant members are protected with a per-cache irq spinlock.
 *
 * Many thanks to Mark Hemment, who wrote another per-cpu slab patch
 * in 2000 - many ideas in the current implementation are derived from
 * his patch.
 *
 * Further notes from the original documentation:
 *
 * 11 April '97.  Started multi-threading - markhe
72
 *	The global cache-chain is protected by the mutex 'slab_mutex'.
Linus Torvalds's avatar
Linus Torvalds committed
73 74 75 76 77 78
 *	The sem is only needed when accessing/extending the cache-chain, which
 *	can never happen inside an interrupt (kmem_cache_create(),
 *	kmem_cache_shrink() and kmem_cache_reap()).
 *
 *	At present, each engine can be growing a cache.  This should be blocked.
 *
79 80 81 82 83 84 85 86 87
 * 15 March 2005. NUMA slab allocator.
 *	Shai Fultheim <shai@scalex86.org>.
 *	Shobhit Dayal <shobhit@calsoftinc.com>
 *	Alok N Kataria <alokk@calsoftinc.com>
 *	Christoph Lameter <christoph@lameter.com>
 *
 *	Modified the slab allocator to be node aware on NUMA systems.
 *	Each node has its own list of partial, free and full slabs.
 *	All object allocations for a node occur from node specific slab lists.
Linus Torvalds's avatar
Linus Torvalds committed
88 89 90 91
 */

#include	<linux/slab.h>
#include	<linux/mm.h>
92
#include	<linux/poison.h>
Linus Torvalds's avatar
Linus Torvalds committed
93 94 95 96 97
#include	<linux/swap.h>
#include	<linux/cache.h>
#include	<linux/interrupt.h>
#include	<linux/init.h>
#include	<linux/compiler.h>
98
#include	<linux/cpuset.h>
99
#include	<linux/proc_fs.h>
Linus Torvalds's avatar
Linus Torvalds committed
100 101 102 103 104 105 106
#include	<linux/seq_file.h>
#include	<linux/notifier.h>
#include	<linux/kallsyms.h>
#include	<linux/cpu.h>
#include	<linux/sysctl.h>
#include	<linux/module.h>
#include	<linux/rcupdate.h>
107
#include	<linux/string.h>
108
#include	<linux/uaccess.h>
109
#include	<linux/nodemask.h>
110
#include	<linux/kmemleak.h>
111
#include	<linux/mempolicy.h>
112
#include	<linux/mutex.h>
113
#include	<linux/fault-inject.h>
114
#include	<linux/rtmutex.h>
115
#include	<linux/reciprocal_div.h>
116
#include	<linux/debugobjects.h>
117
#include	<linux/memory.h>
118
#include	<linux/prefetch.h>
119
#include	<linux/sched/task_stack.h>
Linus Torvalds's avatar
Linus Torvalds committed
120

121 122
#include	<net/sock.h>

Linus Torvalds's avatar
Linus Torvalds committed
123 124 125 126
#include	<asm/cacheflush.h>
#include	<asm/tlbflush.h>
#include	<asm/page.h>

127 128
#include <trace/events/kmem.h>

129 130
#include	"internal.h"

131 132
#include	"slab.h"

Linus Torvalds's avatar
Linus Torvalds committed
133
/*
134
 * DEBUG	- 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
Linus Torvalds's avatar
Linus Torvalds committed
135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154
 *		  0 for faster, smaller code (especially in the critical paths).
 *
 * STATS	- 1 to collect stats for /proc/slabinfo.
 *		  0 for faster, smaller code (especially in the critical paths).
 *
 * FORCED_DEBUG	- 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
 */

#ifdef CONFIG_DEBUG_SLAB
#define	DEBUG		1
#define	STATS		1
#define	FORCED_DEBUG	1
#else
#define	DEBUG		0
#define	STATS		0
#define	FORCED_DEBUG	0
#endif

/* Shouldn't this be in a header file somewhere? */
#define	BYTES_PER_WORD		sizeof(void *)
155
#define	REDZONE_ALIGN		max(BYTES_PER_WORD, __alignof__(unsigned long long))
Linus Torvalds's avatar
Linus Torvalds committed
156 157 158 159 160

#ifndef ARCH_KMALLOC_FLAGS
#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
#endif

161 162 163 164 165 166 167 168 169
#define FREELIST_BYTE_INDEX (((PAGE_SIZE >> BITS_PER_BYTE) \
				<= SLAB_OBJ_MIN_SIZE) ? 1 : 0)

#if FREELIST_BYTE_INDEX
typedef unsigned char freelist_idx_t;
#else
typedef unsigned short freelist_idx_t;
#endif

170
#define SLAB_OBJ_MAX_NUM ((1 << sizeof(freelist_idx_t) * BITS_PER_BYTE) - 1)
171

Linus Torvalds's avatar
Linus Torvalds committed
172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188
/*
 * struct array_cache
 *
 * Purpose:
 * - LIFO ordering, to hand out cache-warm objects from _alloc
 * - reduce the number of linked list operations
 * - reduce spinlock operations
 *
 * The limit is stored in the per-cpu structure to reduce the data cache
 * footprint.
 *
 */
struct array_cache {
	unsigned int avail;
	unsigned int limit;
	unsigned int batchcount;
	unsigned int touched;
189
	void *entry[];	/*
Andrew Morton's avatar
Andrew Morton committed
190 191 192 193
			 * Must have this definition in here for the proper
			 * alignment of array_cache. Also simplifies accessing
			 * the entries.
			 */
Linus Torvalds's avatar
Linus Torvalds committed
194 195
};

196 197 198 199 200
struct alien_cache {
	spinlock_t lock;
	struct array_cache ac;
};

201 202 203
/*
 * Need this for bootstrapping a per node allocator.
 */
204
#define NUM_INIT_LISTS (2 * MAX_NUMNODES)
205
static struct kmem_cache_node __initdata init_kmem_cache_node[NUM_INIT_LISTS];
206
#define	CACHE_CACHE 0
207
#define	SIZE_NODE (MAX_NUMNODES)
208

209
static int drain_freelist(struct kmem_cache *cache,
210
			struct kmem_cache_node *n, int tofree);
211
static void free_block(struct kmem_cache *cachep, void **objpp, int len,
212 213
			int node, struct list_head *list);
static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list);
214
static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp);
215
static void cache_reap(struct work_struct *unused);
216

217 218 219 220 221
static inline void fixup_objfreelist_debug(struct kmem_cache *cachep,
						void **list);
static inline void fixup_slab_list(struct kmem_cache *cachep,
				struct kmem_cache_node *n, struct page *page,
				void **list);
222 223
static int slab_early_init = 1;

224
#define INDEX_NODE kmalloc_index(sizeof(struct kmem_cache_node))
Linus Torvalds's avatar
Linus Torvalds committed
225

226
static void kmem_cache_node_init(struct kmem_cache_node *parent)
227 228 229 230
{
	INIT_LIST_HEAD(&parent->slabs_full);
	INIT_LIST_HEAD(&parent->slabs_partial);
	INIT_LIST_HEAD(&parent->slabs_free);
231
	parent->total_slabs = 0;
232
	parent->free_slabs = 0;
233 234
	parent->shared = NULL;
	parent->alien = NULL;
235
	parent->colour_next = 0;
236 237 238 239 240
	spin_lock_init(&parent->list_lock);
	parent->free_objects = 0;
	parent->free_touched = 0;
}

Andrew Morton's avatar
Andrew Morton committed
241 242 243
#define MAKE_LIST(cachep, listp, slab, nodeid)				\
	do {								\
		INIT_LIST_HEAD(listp);					\
244
		list_splice(&get_node(cachep, nodeid)->slab, listp);	\
245 246
	} while (0)

Andrew Morton's avatar
Andrew Morton committed
247 248
#define	MAKE_ALL_LISTS(cachep, ptr, nodeid)				\
	do {								\
249 250 251 252
	MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid);	\
	MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
	MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid);	\
	} while (0)
Linus Torvalds's avatar
Linus Torvalds committed
253

254 255
#define CFLGS_OBJFREELIST_SLAB	((slab_flags_t __force)0x40000000U)
#define CFLGS_OFF_SLAB		((slab_flags_t __force)0x80000000U)
256
#define	OBJFREELIST_SLAB(x)	((x)->flags & CFLGS_OBJFREELIST_SLAB)
Linus Torvalds's avatar
Linus Torvalds committed
257 258 259
#define	OFF_SLAB(x)	((x)->flags & CFLGS_OFF_SLAB)

#define BATCHREFILL_LIMIT	16
Andrew Morton's avatar
Andrew Morton committed
260 261 262
/*
 * Optimization question: fewer reaps means less probability for unnessary
 * cpucache drain/refill cycles.
Linus Torvalds's avatar
Linus Torvalds committed
263
 *
264
 * OTOH the cpuarrays can contain lots of objects,
Linus Torvalds's avatar
Linus Torvalds committed
265 266
 * which could lock up otherwise freeable slabs.
 */
267 268
#define REAPTIMEOUT_AC		(2*HZ)
#define REAPTIMEOUT_NODE	(4*HZ)
Linus Torvalds's avatar
Linus Torvalds committed
269 270 271 272 273 274

#if STATS
#define	STATS_INC_ACTIVE(x)	((x)->num_active++)
#define	STATS_DEC_ACTIVE(x)	((x)->num_active--)
#define	STATS_INC_ALLOCED(x)	((x)->num_allocations++)
#define	STATS_INC_GROWN(x)	((x)->grown++)
275
#define	STATS_ADD_REAPED(x,y)	((x)->reaped += (y))
Andrew Morton's avatar
Andrew Morton committed
276 277 278 279 280
#define	STATS_SET_HIGH(x)						\
	do {								\
		if ((x)->num_active > (x)->high_mark)			\
			(x)->high_mark = (x)->num_active;		\
	} while (0)
Linus Torvalds's avatar
Linus Torvalds committed
281 282
#define	STATS_INC_ERR(x)	((x)->errors++)
#define	STATS_INC_NODEALLOCS(x)	((x)->node_allocs++)
283
#define	STATS_INC_NODEFREES(x)	((x)->node_frees++)
284
#define STATS_INC_ACOVERFLOW(x)   ((x)->node_overflow++)
Andrew Morton's avatar
Andrew Morton committed
285 286 287 288 289
#define	STATS_SET_FREEABLE(x, i)					\
	do {								\
		if ((x)->max_freeable < i)				\
			(x)->max_freeable = i;				\
	} while (0)
Linus Torvalds's avatar
Linus Torvalds committed
290 291 292 293 294 295 296 297 298
#define STATS_INC_ALLOCHIT(x)	atomic_inc(&(x)->allochit)
#define STATS_INC_ALLOCMISS(x)	atomic_inc(&(x)->allocmiss)
#define STATS_INC_FREEHIT(x)	atomic_inc(&(x)->freehit)
#define STATS_INC_FREEMISS(x)	atomic_inc(&(x)->freemiss)
#else
#define	STATS_INC_ACTIVE(x)	do { } while (0)
#define	STATS_DEC_ACTIVE(x)	do { } while (0)
#define	STATS_INC_ALLOCED(x)	do { } while (0)
#define	STATS_INC_GROWN(x)	do { } while (0)
299
#define	STATS_ADD_REAPED(x,y)	do { (void)(y); } while (0)
Linus Torvalds's avatar
Linus Torvalds committed
300 301 302
#define	STATS_SET_HIGH(x)	do { } while (0)
#define	STATS_INC_ERR(x)	do { } while (0)
#define	STATS_INC_NODEALLOCS(x)	do { } while (0)
303
#define	STATS_INC_NODEFREES(x)	do { } while (0)
304
#define STATS_INC_ACOVERFLOW(x)   do { } while (0)
Andrew Morton's avatar
Andrew Morton committed
305
#define	STATS_SET_FREEABLE(x, i) do { } while (0)
Linus Torvalds's avatar
Linus Torvalds committed
306 307 308 309 310 311 312 313
#define STATS_INC_ALLOCHIT(x)	do { } while (0)
#define STATS_INC_ALLOCMISS(x)	do { } while (0)
#define STATS_INC_FREEHIT(x)	do { } while (0)
#define STATS_INC_FREEMISS(x)	do { } while (0)
#endif

#if DEBUG

Andrew Morton's avatar
Andrew Morton committed
314 315
/*
 * memory layout of objects:
Linus Torvalds's avatar
Linus Torvalds committed
316
 * 0		: objp
317
 * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
Linus Torvalds's avatar
Linus Torvalds committed
318 319
 * 		the end of an object is aligned with the end of the real
 * 		allocation. Catches writes behind the end of the allocation.
320
 * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
Linus Torvalds's avatar
Linus Torvalds committed
321
 * 		redzone word.
322
 * cachep->obj_offset: The real object.
323 324
 * cachep->size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
 * cachep->size - 1* BYTES_PER_WORD: last caller address
Andrew Morton's avatar
Andrew Morton committed
325
 *					[BYTES_PER_WORD long]
Linus Torvalds's avatar
Linus Torvalds committed
326
 */
327
static int obj_offset(struct kmem_cache *cachep)
Linus Torvalds's avatar
Linus Torvalds committed
328
{
329
	return cachep->obj_offset;
Linus Torvalds's avatar
Linus Torvalds committed
330 331
}

332
static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
Linus Torvalds's avatar
Linus Torvalds committed
333 334
{
	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
335 336
	return (unsigned long long*) (objp + obj_offset(cachep) -
				      sizeof(unsigned long long));
Linus Torvalds's avatar
Linus Torvalds committed
337 338
}

339
static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
Linus Torvalds's avatar
Linus Torvalds committed
340 341 342
{
	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
	if (cachep->flags & SLAB_STORE_USER)
343
		return (unsigned long long *)(objp + cachep->size -
344
					      sizeof(unsigned long long) -
345
					      REDZONE_ALIGN);
346
	return (unsigned long long *) (objp + cachep->size -
347
				       sizeof(unsigned long long));
Linus Torvalds's avatar
Linus Torvalds committed
348 349
}

350
static void **dbg_userword(struct kmem_cache *cachep, void *objp)
Linus Torvalds's avatar
Linus Torvalds committed
351 352
{
	BUG_ON(!(cachep->flags & SLAB_STORE_USER));
353
	return (void **)(objp + cachep->size - BYTES_PER_WORD);
Linus Torvalds's avatar
Linus Torvalds committed
354 355 356 357
}

#else

358
#define obj_offset(x)			0
359 360
#define dbg_redzone1(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
#define dbg_redzone2(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
Linus Torvalds's avatar
Linus Torvalds committed
361 362 363 364
#define dbg_userword(cachep, objp)	({BUG(); (void **)NULL;})

#endif

365 366
#ifdef CONFIG_DEBUG_SLAB_LEAK

367
static inline bool is_store_user_clean(struct kmem_cache *cachep)
368
{
369 370
	return atomic_read(&cachep->store_user_clean) == 1;
}
371

372 373 374 375
static inline void set_store_user_clean(struct kmem_cache *cachep)
{
	atomic_set(&cachep->store_user_clean, 1);
}
376

377 378 379 380
static inline void set_store_user_dirty(struct kmem_cache *cachep)
{
	if (is_store_user_clean(cachep))
		atomic_set(&cachep->store_user_clean, 0);
381 382 383
}

#else
384
static inline void set_store_user_dirty(struct kmem_cache *cachep) {}
385 386 387

#endif

Linus Torvalds's avatar
Linus Torvalds committed
388
/*
389 390
 * Do not go above this order unless 0 objects fit into the slab or
 * overridden on the command line.
Linus Torvalds's avatar
Linus Torvalds committed
391
 */
392 393 394
#define	SLAB_MAX_ORDER_HI	1
#define	SLAB_MAX_ORDER_LO	0
static int slab_max_order = SLAB_MAX_ORDER_LO;
395
static bool slab_max_order_set __initdata;
Linus Torvalds's avatar
Linus Torvalds committed
396

397 398
static inline struct kmem_cache *virt_to_cache(const void *obj)
{
399
	struct page *page = virt_to_head_page(obj);
400
	return page->slab_cache;
401 402
}

403
static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
404 405
				 unsigned int idx)
{
406
	return page->s_mem + cache->size * idx;
407 408
}

409
/*
410 411 412
 * We want to avoid an expensive divide : (offset / cache->size)
 *   Using the fact that size is a constant for a particular cache,
 *   we can replace (offset / cache->size) by
413 414 415
 *   reciprocal_divide(offset, cache->reciprocal_buffer_size)
 */
static inline unsigned int obj_to_index(const struct kmem_cache *cache,
416
					const struct page *page, void *obj)
417
{
418
	u32 offset = (obj - page->s_mem);
419
	return reciprocal_divide(offset, cache->reciprocal_buffer_size);
420 421
}

422
#define BOOT_CPUCACHE_ENTRIES	1
Linus Torvalds's avatar
Linus Torvalds committed
423
/* internal cache of cache description objs */
424
static struct kmem_cache kmem_cache_boot = {
425 426 427
	.batchcount = 1,
	.limit = BOOT_CPUCACHE_ENTRIES,
	.shared = 1,
428
	.size = sizeof(struct kmem_cache),
429
	.name = "kmem_cache",
Linus Torvalds's avatar
Linus Torvalds committed
430 431
};

432
static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
Linus Torvalds's avatar
Linus Torvalds committed
433

434
static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
Linus Torvalds's avatar
Linus Torvalds committed
435
{
436
	return this_cpu_ptr(cachep->cpu_cache);
Linus Torvalds's avatar
Linus Torvalds committed
437 438
}

Andrew Morton's avatar
Andrew Morton committed
439 440 441
/*
 * Calculate the number of objects and left-over bytes for a given buffer size.
 */
442
static unsigned int cache_estimate(unsigned long gfporder, size_t buffer_size,
443
		slab_flags_t flags, size_t *left_over)
444
{
445
	unsigned int num;
446
	size_t slab_size = PAGE_SIZE << gfporder;
Linus Torvalds's avatar
Linus Torvalds committed
447

448 449 450 451 452 453
	/*
	 * The slab management structure can be either off the slab or
	 * on it. For the latter case, the memory allocated for a
	 * slab is used for:
	 *
	 * - @buffer_size bytes for each object
454 455 456 457 458
	 * - One freelist_idx_t for each object
	 *
	 * We don't need to consider alignment of freelist because
	 * freelist will be at the end of slab page. The objects will be
	 * at the correct alignment.
459 460 461 462 463 464
	 *
	 * If the slab management structure is off the slab, then the
	 * alignment will already be calculated into the size. Because
	 * the slabs are all pages aligned, the objects will be at the
	 * correct alignment when allocated.
	 */
465
	if (flags & (CFLGS_OBJFREELIST_SLAB | CFLGS_OFF_SLAB)) {
466
		num = slab_size / buffer_size;
467
		*left_over = slab_size % buffer_size;
468
	} else {
469
		num = slab_size / (buffer_size + sizeof(freelist_idx_t));
470 471
		*left_over = slab_size %
			(buffer_size + sizeof(freelist_idx_t));
472
	}
473 474

	return num;
Linus Torvalds's avatar
Linus Torvalds committed
475 476
}

477
#if DEBUG
478
#define slab_error(cachep, msg) __slab_error(__func__, cachep, msg)
Linus Torvalds's avatar
Linus Torvalds committed
479

Andrew Morton's avatar
Andrew Morton committed
480 481
static void __slab_error(const char *function, struct kmem_cache *cachep,
			char *msg)
Linus Torvalds's avatar
Linus Torvalds committed
482
{
483
	pr_err("slab error in %s(): cache `%s': %s\n",
484
	       function, cachep->name, msg);
Linus Torvalds's avatar
Linus Torvalds committed
485
	dump_stack();
486
	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
Linus Torvalds's avatar
Linus Torvalds committed
487
}
488
#endif
Linus Torvalds's avatar
Linus Torvalds committed
489

490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505
/*
 * By default on NUMA we use alien caches to stage the freeing of
 * objects allocated from other nodes. This causes massive memory
 * inefficiencies when using fake NUMA setup to split memory into a
 * large number of small nodes, so it can be disabled on the command
 * line
  */

static int use_alien_caches __read_mostly = 1;
static int __init noaliencache_setup(char *s)
{
	use_alien_caches = 0;
	return 1;
}
__setup("noaliencache", noaliencache_setup);

506 507 508 509 510 511 512 513 514 515 516
static int __init slab_max_order_setup(char *str)
{
	get_option(&str, &slab_max_order);
	slab_max_order = slab_max_order < 0 ? 0 :
				min(slab_max_order, MAX_ORDER - 1);
	slab_max_order_set = true;

	return 1;
}
__setup("slab_max_order=", slab_max_order_setup);

517 518 519 520 521 522 523
#ifdef CONFIG_NUMA
/*
 * Special reaping functions for NUMA systems called from cache_reap().
 * These take care of doing round robin flushing of alien caches (containing
 * objects freed on different nodes from which they were allocated) and the
 * flushing of remote pcps by calling drain_node_pages.
 */
524
static DEFINE_PER_CPU(unsigned long, slab_reap_node);
525 526 527

static void init_reap_node(int cpu)
{
528 529
	per_cpu(slab_reap_node, cpu) = next_node_in(cpu_to_mem(cpu),
						    node_online_map);
530 531 532 533
}

static void next_reap_node(void)
{
534
	int node = __this_cpu_read(slab_reap_node);
535

536
	node = next_node_in(node, node_online_map);
537
	__this_cpu_write(slab_reap_node, node);
538 539 540 541 542 543 544
}

#else
#define init_reap_node(cpu) do { } while (0)
#define next_reap_node(void) do { } while (0)
#endif

Linus Torvalds's avatar
Linus Torvalds committed
545 546 547 548 549 550 551
/*
 * Initiate the reap timer running on the target CPU.  We run at around 1 to 2Hz
 * via the workqueue/eventd.
 * Add the CPU number into the expiration time to minimize the possibility of
 * the CPUs getting into lockstep and contending for the global cache chain
 * lock.
 */
552
static void start_cpu_timer(int cpu)
Linus Torvalds's avatar
Linus Torvalds committed
553
{
554
	struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu);
Linus Torvalds's avatar
Linus Torvalds committed
555

556
	if (reap_work->work.func == NULL) {
557
		init_reap_node(cpu);
558
		INIT_DEFERRABLE_WORK(reap_work, cache_reap);
559 560
		schedule_delayed_work_on(cpu, reap_work,
					__round_jiffies_relative(HZ, cpu));
Linus Torvalds's avatar
Linus Torvalds committed
561 562 563
	}
}

564
static void init_arraycache(struct array_cache *ac, int limit, int batch)
Linus Torvalds's avatar
Linus Torvalds committed
565
{
566 567
	/*
	 * The array_cache structures contain pointers to free object.
568
	 * However, when such objects are allocated or transferred to another
569 570 571 572
	 * cache the pointers are not cleared and they could be counted as
	 * valid references during a kmemleak scan. Therefore, kmemleak must
	 * not scan such objects.
	 */
573 574 575 576 577 578
	kmemleak_no_scan(ac);
	if (ac) {
		ac->avail = 0;
		ac->limit = limit;
		ac->batchcount = batch;
		ac->touched = 0;
Linus Torvalds's avatar
Linus Torvalds committed
579
	}
580 581 582 583 584
}

static struct array_cache *alloc_arraycache(int node, int entries,
					    int batchcount, gfp_t gfp)
{
585
	size_t memsize = sizeof(void *) * entries + sizeof(struct array_cache);
586 587 588 589 590
	struct array_cache *ac = NULL;

	ac = kmalloc_node(memsize, gfp, node);
	init_arraycache(ac, entries, batchcount);
	return ac;
Linus Torvalds's avatar
Linus Torvalds committed
591 592
}

593 594
static noinline void cache_free_pfmemalloc(struct kmem_cache *cachep,
					struct page *page, void *objp)
595
{
596 597 598
	struct kmem_cache_node *n;
	int page_node;
	LIST_HEAD(list);
599

600 601
	page_node = page_to_nid(page);
	n = get_node(cachep, page_node);
602

603 604 605
	spin_lock(&n->list_lock);
	free_block(cachep, &objp, 1, page_node, &list);
	spin_unlock(&n->list_lock);
606

607
	slabs_destroy(cachep, &list);
608 609
}

610 611 612 613 614 615 616 617 618 619
/*
 * Transfer objects in one arraycache to another.
 * Locking must be handled by the caller.
 *
 * Return the number of entries transferred.
 */
static int transfer_objects(struct array_cache *to,
		struct array_cache *from, unsigned int max)
{
	/* Figure out how many entries to transfer */
620
	int nr = min3(from->avail, max, to->limit - to->avail);
621 622 623 624 625 626 627 628 629 630 631 632

	if (!nr)
		return 0;

	memcpy(to->entry + to->avail, from->entry + from->avail -nr,
			sizeof(void *) *nr);

	from->avail -= nr;
	to->avail += nr;
	return nr;
}

633 634 635
#ifndef CONFIG_NUMA

#define drain_alien_cache(cachep, alien) do { } while (0)
636
#define reap_alien(cachep, n) do { } while (0)
637

638 639
static inline struct alien_cache **alloc_alien_cache(int node,
						int limit, gfp_t gfp)
640
{
641
	return NULL;
642 643
}

644
static inline void free_alien_cache(struct alien_cache **ac_ptr)
645 646 647 648 649 650 651 652 653 654 655 656 657 658
{
}

static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
{
	return 0;
}

static inline void *alternate_node_alloc(struct kmem_cache *cachep,
		gfp_t flags)
{
	return NULL;
}

659
static inline void *____cache_alloc_node(struct kmem_cache *cachep,
660 661 662 663 664
		 gfp_t flags, int nodeid)
{
	return NULL;
}

David Rientjes's avatar
David Rientjes committed
665 666
static inline gfp_t gfp_exact_node(gfp_t flags)
{
667
	return flags & ~__GFP_NOFAIL;
David Rientjes's avatar
David Rientjes committed
668 669
}

670 671
#else	/* CONFIG_NUMA */

672
static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
673
static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
674

675 676 677
static struct alien_cache *__alloc_alien_cache(int node, int entries,
						int batch, gfp_t gfp)
{
678
	size_t memsize = sizeof(void *) * entries + sizeof(struct alien_cache);
679 680 681 682
	struct alien_cache *alc = NULL;

	alc = kmalloc_node(memsize, gfp, node);
	init_arraycache(&alc->ac, entries, batch);
683
	spin_lock_init(&alc->lock);
684 685 686 687
	return alc;
}

static struct alien_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
688
{
689
	struct alien_cache **alc_ptr;
690
	size_t memsize = sizeof(void *) * nr_node_ids;
691 692 693 694
	int i;

	if (limit > 1)
		limit = 12;
695 696 697 698 699 700 701 702 703 704 705 706 707
	alc_ptr = kzalloc_node(memsize, gfp, node);
	if (!alc_ptr)
		return NULL;

	for_each_node(i) {
		if (i == node || !node_online(i))
			continue;
		alc_ptr[i] = __alloc_alien_cache(node, limit, 0xbaadf00d, gfp);
		if (!alc_ptr[i]) {
			for (i--; i >= 0; i--)
				kfree(alc_ptr[i]);
			kfree(alc_ptr);
			return NULL;
708 709
		}
	}
710
	return alc_ptr;
711 712
}

713
static void free_alien_cache(struct alien_cache **alc_ptr)
714 715 716
{
	int i;

717
	if (!alc_ptr)
718 719
		return;
	for_each_node(i)
720 721
	    kfree(alc_ptr[i]);
	kfree(alc_ptr);
722 723
}

724
static void __drain_alien_cache(struct kmem_cache *cachep,
725 726
				struct array_cache *ac, int node,
				struct list_head *list)
727
{
728
	struct kmem_cache_node *n = get_node(cachep, node);
729 730

	if (ac->avail) {
731
		spin_lock(&n->list_lock);
732 733 734 735 736
		/*
		 * Stuff objects into the remote nodes shared array first.
		 * That way we could avoid the overhead of putting the objects
		 * into the free lists and getting them back later.
		 */
737 738
		if (n->shared)
			transfer_objects(n->shared, ac, ac->limit);
739

740
		free_block(cachep, ac->entry, ac->avail, node, list);
741
		ac->avail = 0;
742
		spin_unlock(&n->list_lock);
743 744 745
	}
}

746 747 748
/*
 * Called from cache_reap() to regularly drain alien caches round robin.
 */
749
static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n)
750
{
751
	int node = __this_cpu_read(slab_reap_node);
752

753
	if (n->alien) {
754 755 756 757 758
		struct alien_cache *alc = n->alien[node];
		struct array_cache *ac;

		if (alc) {
			ac = &alc->ac;
759
			if (ac->avail && spin_trylock_irq(&alc->lock)) {
760 761 762
				LIST_HEAD(list);

				__drain_alien_cache(cachep, ac, node, &list);
763
				spin_unlock_irq(&alc->lock);
764
				slabs_destroy(cachep, &list);
765
			}
766 767 768 769
		}
	}
}

Andrew Morton's avatar
Andrew Morton committed
770
static void drain_alien_cache(struct kmem_cache *cachep,
771
				struct alien_cache **alien)
772
{
773
	int i = 0;
774
	struct alien_cache *alc;
775 776 777 778
	struct array_cache *ac;
	unsigned long flags;

	for_each_online_node(i) {
779 780
		alc = alien[i];
		if (alc) {
781 782
			LIST_HEAD(list);

783
			ac = &alc->ac;
784
			spin_lock_irqsave(&alc->lock, flags);
785
			__drain_alien_cache(cachep, ac, i, &list);
786
			spin_unlock_irqrestore(&alc->lock, flags);
787
			slabs_destroy(cachep, &list);
788 789 790
		}
	}
}
791

792 793
static int __cache_free_alien(struct kmem_cache *cachep, void *objp,
				int node, int page_node)
794
{
795
	struct kmem_cache_node *n;
796 797
	struct alien_cache *alien = NULL;
	struct array_cache *ac;
798
	LIST_HEAD(list);
799

800
	n = get_node(cachep, node);
801
	STATS_INC_NODEFREES(cachep);
802 803
	if (n->alien && n->alien[page_node]) {
		alien = n->alien[page_node];
804
		ac = &alien->ac;
805
		spin_lock(&alien->lock);
806
		if (unlikely(ac->avail == ac->limit)) {
807
			STATS_INC_ACOVERFLOW(cachep);
808
			__drain_alien_cache(cachep, ac, page_node, &list);
809
		}
810
		ac->entry[ac->avail++] = objp;
811
		spin_unlock(&alien->lock);
812
		slabs_destroy(cachep, &list);
813
	} else {
814
		n = get_node(cachep, page_node);
815
		spin_lock(&n->list_lock);
816
		free_block(cachep, &objp, 1, page_node, &list);
817
		spin_unlock(&n->list_lock);
818
		slabs_destroy(cachep, &list);
819 820 821
	}
	return 1;
}
822 823 824 825 826 827 828 829 830 831 832 833 834 835

static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
{
	int page_node = page_to_nid(virt_to_page(objp));
	int node = numa_mem_id();
	/*
	 * Make sure we are not freeing a object from another node to the array
	 * cache on this cpu.
	 */
	if (likely(node == page_node))
		return 0;

	return __cache_free_alien(cachep, objp, node, page_node);
}
David Rientjes's avatar
David Rientjes committed
836 837

/*
838 839
 * Construct gfp mask to allocate from a specific node but do not reclaim or
 * warn about failures.
David Rientjes's avatar
David Rientjes committed
840 841 842
 */
static inline gfp_t gfp_exact_node(gfp_t flags)
{
843
	return (flags | __GFP_THISNODE | __GFP_NOWARN) & ~(__GFP_RECLAIM|__GFP_NOFAIL);
David Rientjes's avatar
David Rientjes committed
844
}
845 846
#endif

847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886
static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp)
{
	struct kmem_cache_node *n;

	/*
	 * Set up the kmem_cache_node for cpu before we can
	 * begin anything. Make sure some other cpu on this
	 * node has not already allocated this
	 */
	n = get_node(cachep, node);
	if (n) {
		spin_lock_irq(&n->list_lock);
		n->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount +
				cachep->num;
		spin_unlock_irq(&n->list_lock);

		return 0;
	}

	n = kmalloc_node(sizeof(struct kmem_cache_node), gfp, node);
	if (!n)
		return -ENOMEM;

	kmem_cache_node_init(n);
	n->next_reap = jiffies + REAPTIMEOUT_NODE +
		    ((unsigned long)cachep) % REAPTIMEOUT_NODE;

	n->free_limit =
		(1 + nr_cpus_node(node)) * cachep->batchcount + cachep->num;

	/*
	 * The kmem_cache_nodes don't come and go as CPUs
	 * come and go.  slab_mutex is sufficient
	 * protection here.
	 */
	cachep->node[node] = n;

	return 0;
}

887
#if (defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)) || defined(CONFIG_SMP)
888
/*
889
 * Allocates and initializes node for a node on each slab cache, used for
890
 * either memory or cpu hotplug.  If memory is being hot-added, the kmem_cache_node
891
 * will be allocated off-node since memory is not yet online for the new node.
892
 * When hotplugging memory or a cpu, existing node are not replaced if
893 894
 * already in use.
 *
895
 * Must hold slab_mutex.
896
 */
897
static int init_cache_node_node(int node)
898
{
899
	int ret;
900 901
	struct kmem_cache *cachep;

902
	list_for_each_entry(cachep, &slab_caches, list) {
903 904 905
		ret = init_cache_node(cachep, node, GFP_KERNEL);
		if (ret)
			return ret;
906
	}
907

908 909
	return 0;
}
910
#endif
911

912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960
static int setup_kmem_cache_node(struct kmem_cache *cachep,
				int node, gfp_t gfp, bool force_change)
{
	int ret = -ENOMEM;
	struct kmem_cache_node *n;
	struct array_cache *old_shared = NULL;
	struct array_cache *new_shared = NULL;
	struct alien_cache **new_alien = NULL;
	LIST_HEAD(list);

	if (use_alien_caches) {
		new_alien = alloc_alien_cache(node, cachep->limit, gfp);
		if (!new_alien)
			goto fail;
	}

	if (cachep->shared) {
		new_shared = alloc_arraycache(node,
			cachep->shared * cachep->batchcount, 0xbaadf00d, gfp);
		if (!new_shared)
			goto fail;
	}

	ret = init_cache_node(cachep, node, gfp);
	if (ret)
		goto fail;

	n = get_node(cachep, node);
	spin_lock_irq(&n->list_lock);
	if (n->shared && force_change) {
		free_block(cachep, n->shared->entry,
				n->shared->avail, node, &list);
		n->shared->avail = 0;
	}

	if (!n->shared || force_change) {
		old_shared = n->shared;
		n->shared = new_shared;
		new_shared = NULL;
	}

	if (!n->alien) {
		n->alien = new_alien;
		new_alien = NULL;
	}

	spin_unlock_irq(&n->list_lock);
	slabs_destroy(cachep, &list);

961 962 963 964 965 966
	/*
	 * To protect lockless access to n->shared during irq disabled context.
	 * If n->shared isn't NULL in irq disabled context, accessing to it is
	 * guaranteed to be valid until irq is re-enabled, because it will be
	 * freed after synchronize_sched().
	 */
967
	if (old_shared && force_change)
968 969
		synchronize_sched();

970 971 972 973 974 975 976 977
fail:
	kfree(old_shared);
	kfree(new_shared);
	free_alien_cache(new_alien);

	return ret;
}

978 979
#ifdef CONFIG_SMP

980
static void cpuup_canceled(long cpu)
981 982
{
	struct kmem_cache *cachep;
983
	struct kmem_cache_node *n = NULL;
984
	int node = cpu_to_mem(cpu);
985
	const struct cpumask *mask = cpumask_of_node(node);
986

987
	list_for_each_entry(cachep, &slab_caches, list) {
988 989
		struct array_cache *nc;
		struct array_cache *shared;
990
		struct alien_cache **alien;
991
		LIST_HEAD(list);
992

993
		n = get_node(cachep, node);
994
		if (!n)
995
			continue;
996

997
		spin_lock_irq(&n->list_lock);
998

999 1000
		/* Free limit for this kmem_cache_node */
		n->free_limit -= cachep->batchcount;
1001 1002 1003 1004

		/* cpu is dead; no one can alloc from it. */
		nc = per_cpu_ptr(cachep->cpu_cache, cpu);
		if (nc) {
1005
			free_block(cachep, nc->entry, nc->avail, node, &list);
1006 1007
			nc->avail = 0;
		}
1008

1009
		if (!cpumask_empty(mask)) {
1010
			spin_unlock_irq(&n->list_lock);
1011
			goto free_slab;
1012 1013
		}

1014
		shared = n->shared;
1015 1016
		if (shared) {
			free_block(cachep, shared->entry,
1017
				   shared->avail, node, &list);
1018
			n->shared = NULL;
1019 1020
		}

1021 1022
		alien = n->alien;
		n->alien = NULL;
1023

1024
		spin_unlock_irq(&n->list_lock);
1025 1026 1027 1028 1029 1030

		kfree(shared);
		if (alien) {
			drain_alien_cache(cachep, alien);
			free_alien_cache(alien);
		}
1031 1032

free_slab:
1033
		slabs_destroy(cachep, &list);
1034 1035 1036 1037 1038 1039
	}
	/*
	 * In the previous loop, all the objects were freed to
	 * the respective cache's slabs,  now we can go ahead and
	 * shrink each nodelist to its limit.
	 */
1040
	list_for_each_entry(cachep, &slab_caches, list) {
1041
		n = get_node(cachep, node);
1042
		if (!n)
1043
			continue;
1044
		drain_freelist(cachep, n, INT_MAX);
1045 1046 1047
	}
}

1048
static int cpuup_prepare(long cpu)
Linus Torvalds's avatar
Linus Torvalds committed
1049
{
1050
	struct kmem_cache *cachep;
1051
	int node = cpu_to_mem(cpu);
1052
	int err;
Linus Torvalds's avatar
Linus Torvalds committed
1053

1054 1055 1056 1057
	/*
	 * We need to do this right in the beginning since
	 * alloc_arraycache's are going to use this list.
	 * kmalloc_node allows us to add the slab to the right
1058
	 * kmem_cache_node and not this cpu's kmem_cache_node
1059
	 */
1060
	err = init_cache_node_node(node);
1061 1062
	if (err < 0)
		goto bad;
1063 1064 1065 1066 1067

	/*
	 * Now we can go ahead with allocating the shared arrays and
	 * array caches
	 */
1068
	list_for_each_entry(cachep, &slab_caches, list) {
1069 1070 1071
		err = setup_kmem_cache_node(cachep, node, GFP_KERNEL, false);
		if (err)
			goto bad;
1072
	}
1073

1074 1075
	return 0;
bad:
1076
	cpuup_canceled(cpu);
1077 1078 1079
	return -ENOMEM;
}

1080
int slab_prepare_cpu(unsigned int cpu)
1081
{
1082
	int err;
1083

1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106
	mutex_lock(&slab_mutex);
	err = cpuup_prepare(cpu);
	mutex_unlock(&slab_mutex);
	return err;
}

/*
 * This is called for a failed online attempt and for a successful
 * offline.
 *
 * Even if all the cpus of a node are down, we don't free the
 * kmem_list3 of any cache. This to avoid a race between cpu_down, and
 * a kmalloc allocation from another cpu for memory from the node of
 * the cpu going down.  The list3 structure is usually allocated from
 * kmem_cache_create() and gets destroyed at kmem_cache_destroy().
 */
int slab_dead_cpu(unsigned int cpu)
{
	mutex_lock(&slab_mutex);
	cpuup_canceled(cpu);
	mutex_unlock(&slab_mutex);
	return 0;
}
1107
#endif
1108 1109 1110 1111 1112

static int slab_online_cpu(unsigned int cpu)
{
	start_cpu_timer(cpu);
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
1113 1114
}

1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127
static int slab_offline_cpu(unsigned int cpu)
{
	/*
	 * Shutdown cache reaper. Note that the slab_mutex is held so
	 * that if cache_reap() is invoked it cannot do anything
	 * expensive but will only modify reap_work and reschedule the
	 * timer.
	 */
	cancel_delayed_work_sync(&per_cpu(slab_reap_work, cpu));
	/* Now the cache_reaper is guaranteed to be not running. */
	per_cpu(slab_reap_work, cpu).work.func = NULL;
	return 0;
}
Linus Torvalds's avatar
Linus Torvalds committed
1128

1129 1130 1131 1132 1133 1134
#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
/*
 * Drains freelist for a node on each slab cache, used for memory hot-remove.
 * Returns -EBUSY if all objects cannot be drained so that the node is not
 * removed.
 *
1135
 * Must hold slab_mutex.
1136
 */
1137
static int __meminit drain_cache_node_node(int node)
1138 1139 1140 1141
{
	struct kmem_cache *cachep;
	int ret = 0;

1142
	list_for_each_entry(cachep, &slab_caches, list) {
1143
		struct kmem_cache_node *n;
1144

1145
		n = get_node(cachep, node);
1146
		if (!n)
1147 1148
			continue;

1149
		drain_freelist(cachep, n, INT_MAX);
1150

1151 1152
		if (!list_empty(&n->slabs_full) ||
		    !list_empty(&n->slabs_partial)) {
1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172
			ret = -EBUSY;
			break;
		}
	}
	return ret;
}

static int __meminit slab_memory_callback(struct notifier_block *self,
					unsigned long action, void *arg)
{
	struct memory_notify *mnb = arg;
	int ret = 0;
	int nid;

	nid = mnb->status_change_nid;
	if (nid < 0)
		goto out;

	switch (action) {
	case MEM_GOING_ONLINE:
1173
		mutex_lock(&slab_mutex);
1174
		ret = init_cache_node_node(nid);
1175
		mutex_unlock(&slab_mutex);
1176 1177
		break;
	case MEM_GOING_OFFLINE:
1178
		mutex_lock(&slab_mutex);
1179
		ret = drain_cache_node_node(nid);
1180
		mutex_unlock(&slab_mutex);
1181 1182 1183 1184 1185 1186 1187 1188
		break;
	case MEM_ONLINE:
	case MEM_OFFLINE:
	case MEM_CANCEL_ONLINE:
	case MEM_CANCEL_OFFLINE:
		break;
	}
out:
1189
	return notifier_from_errno(ret);
1190 1191 1192
}
#endif /* CONFIG_NUMA && CONFIG_MEMORY_HOTPLUG */

1193
/*
1194
 * swap the static kmem_cache_node with kmalloced memory
1195
 */
1196
static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node *list,
1197
				int nodeid)
1198
{
1199
	struct kmem_cache_node *ptr;
1200

1201
	ptr = kmalloc_node(sizeof(struct kmem_cache_node), GFP_NOWAIT, nodeid);
1202 1203
	BUG_ON(!ptr);

1204
	memcpy(ptr, list, sizeof(struct kmem_cache_node));
1205 1206 1207 1208 1209
	/*
	 * Do not assume that spinlocks can be initialized via memcpy:
	 */
	spin_lock_init(&ptr->list_lock);

1210
	MAKE_ALL_LISTS(cachep, ptr, nodeid);
1211
	cachep->node[nodeid] = ptr;
1212 1213
}

1214
/*
1215 1216
 * For setting up all the kmem_cache_node for cache whose buffer_size is same as
 * size of kmem_cache_node.
1217
 */
1218
static void __init set_up_node(struct kmem_cache *cachep, int index)
1219 1220 1221 1222
{
	int node;

	for_each_online_node(node) {
1223
		cachep->node[node] = &init_kmem_cache_node[index + node];
1224
		cachep->node[node]->next_reap = jiffies +
1225 1226
		    REAPTIMEOUT_NODE +
		    ((unsigned long)cachep) % REAPTIMEOUT_NODE;
1227 1228 1229
	}
}

Andrew Morton's avatar
Andrew Morton committed
1230 1231 1232
/*
 * Initialisation.  Called after the page allocator have been initialised and
 * before smp_init().
Linus Torvalds's avatar
Linus Torvalds committed
1233 1234 1235
 */
void __init kmem_cache_init(void)
{
1236 1237
	int i;

1238 1239
	kmem_cache = &kmem_cache_boot;

1240
	if (!IS_ENABLED(CONFIG_NUMA) || num_possible_nodes() == 1)
1241 1242
		use_alien_caches = 0;

1243
	for (i = 0; i < NUM_INIT_LISTS; i++)
1244
		kmem_cache_node_init(&init_kmem_cache_node[i]);
1245

Linus Torvalds's avatar
Linus Torvalds committed
1246 1247
	/*
	 * Fragmentation resistance on low memory - only use bigger
1248 1249
	 * page orders on machines with more than 32MB of memory if
	 * not overridden on the command line.
Linus Torvalds's avatar
Linus Torvalds committed
1250
	 */
1251
	if (!slab_max_order_set && totalram_pages > (32 << 20) >> PAGE_SHIFT)
1252
		slab_max_order = SLAB_MAX_ORDER_HI;
Linus Torvalds's avatar
Linus Torvalds committed
1253 1254 1255

	/* Bootstrap is tricky, because several objects are allocated
	 * from caches that do not exist yet:
1256 1257 1258
	 * 1) initialize the kmem_cache cache: it contains the struct
	 *    kmem_cache structures of all caches, except kmem_cache itself:
	 *    kmem_cache is statically allocated.
1259
	 *    Initially an __init data area is used for the head array and the
1260
	 *    kmem_cache_node structures, it's replaced with a kmalloc allocated
1261
	 *    array at the end of the bootstrap.
Linus Torvalds's avatar
Linus Torvalds committed
1262
	 * 2) Create the first kmalloc cache.
1263
	 *    The struct kmem_cache for the new cache is allocated normally.
1264 1265 1266
	 *    An __init data area is used for the head array.
	 * 3) Create the remaining kmalloc caches, with minimally sized
	 *    head arrays.
1267
	 * 4) Replace the __init data head arrays for kmem_cache and the first
Linus Torvalds's avatar
Linus Torvalds committed
1268
	 *    kmalloc cache with kmalloc allocated arrays.
1269
	 * 5) Replace the __init data for kmem_cache_node for kmem_cache and
1270 1271
	 *    the other cache's with kmalloc allocated memory.
	 * 6) Resize the head arrays of the kmalloc caches to their final sizes.
Linus Torvalds's avatar
Linus Torvalds committed
1272 1273
	 */

1274
	/* 1) create the kmem_cache */
Linus Torvalds's avatar
Linus Torvalds committed
1275

1276
	/*
1277
	 * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids
1278
	 */
1279
	create_boot_cache(kmem_cache, "kmem_cache",
1280
		offsetof(struct kmem_cache, node) +
1281
				  nr_node_ids * sizeof(struct kmem_cache_node *),
1282
				  SLAB_HWCACHE_ALIGN, 0, 0);
1283
	list_add(&kmem_cache->list, &slab_caches);
1284
	memcg_link_cache(kmem_cache);