vmscan.c 116 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
Linus Torvalds's avatar
Linus Torvalds committed
2 3 4 5 6 7 8 9 10 11 12 13 14
/*
 *  linux/mm/vmscan.c
 *
 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 *
 *  Swap reorganised 29.12.95, Stephen Tweedie.
 *  kswapd added: 7.1.96  sct
 *  Removed kswapd_ctl limits, and swap out as many pages as needed
 *  to bring the system back to freepages.high: 2.4.97, Rik van Riel.
 *  Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com).
 *  Multiqueue VM started 5.8.00, Rik van Riel.
 */

15 16
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

Linus Torvalds's avatar
Linus Torvalds committed
17
#include <linux/mm.h>
18
#include <linux/sched/mm.h>
Linus Torvalds's avatar
Linus Torvalds committed
19
#include <linux/module.h>
20
#include <linux/gfp.h>
Linus Torvalds's avatar
Linus Torvalds committed
21 22 23 24 25
#include <linux/kernel_stat.h>
#include <linux/swap.h>
#include <linux/pagemap.h>
#include <linux/init.h>
#include <linux/highmem.h>
26
#include <linux/vmpressure.h>
27
#include <linux/vmstat.h>
Linus Torvalds's avatar
Linus Torvalds committed
28 29 30 31 32 33 34 35 36 37 38
#include <linux/file.h>
#include <linux/writeback.h>
#include <linux/blkdev.h>
#include <linux/buffer_head.h>	/* for try_to_release_page(),
					buffer_heads_over_limit */
#include <linux/mm_inline.h>
#include <linux/backing-dev.h>
#include <linux/rmap.h>
#include <linux/topology.h>
#include <linux/cpu.h>
#include <linux/cpuset.h>
39
#include <linux/compaction.h>
Linus Torvalds's avatar
Linus Torvalds committed
40 41
#include <linux/notifier.h>
#include <linux/rwsem.h>
42
#include <linux/delay.h>
43
#include <linux/kthread.h>
44
#include <linux/freezer.h>
45
#include <linux/memcontrol.h>
46
#include <linux/delayacct.h>
47
#include <linux/sysctl.h>
48
#include <linux/oom.h>
49
#include <linux/prefetch.h>
50
#include <linux/printk.h>
51
#include <linux/dax.h>
Linus Torvalds's avatar
Linus Torvalds committed
52 53 54 55 56

#include <asm/tlbflush.h>
#include <asm/div64.h>

#include <linux/swapops.h>
57
#include <linux/balloon_compaction.h>
Linus Torvalds's avatar
Linus Torvalds committed
58

59 60
#include "internal.h"

61 62 63
#define CREATE_TRACE_POINTS
#include <trace/events/vmscan.h>

Linus Torvalds's avatar
Linus Torvalds committed
64
struct scan_control {
65 66 67
	/* How many pages shrink_list() should reclaim */
	unsigned long nr_to_reclaim;

Linus Torvalds's avatar
Linus Torvalds committed
68
	/* This context's GFP mask */
69
	gfp_t gfp_mask;
Linus Torvalds's avatar
Linus Torvalds committed
70

71
	/* Allocation order */
Andy Whitcroft's avatar
Andy Whitcroft committed
72
	int order;
73

74 75 76 77 78
	/*
	 * Nodemask of nodes allowed by the caller. If NULL, all nodes
	 * are scanned.
	 */
	nodemask_t	*nodemask;
79

80 81 82 83 84
	/*
	 * The memory cgroup that hit its limit and as a result is the
	 * primary target of this reclaim invocation.
	 */
	struct mem_cgroup *target_mem_cgroup;
85

86 87 88
	/* Scan (total_size >> priority) pages at once */
	int priority;

89 90 91
	/* The highest zone to isolate pages for reclaim from */
	enum zone_type reclaim_idx;

92
	/* Writepage batching in laptop mode; RECLAIM_WRITE */
93 94 95 96 97 98 99 100
	unsigned int may_writepage:1;

	/* Can mapped pages be reclaimed? */
	unsigned int may_unmap:1;

	/* Can pages be swapped as part of reclaim? */
	unsigned int may_swap:1;

101 102 103 104 105 106 107
	/*
	 * Cgroups are not reclaimed below their configured memory.low,
	 * unless we threaten to OOM. If any cgroups are skipped due to
	 * memory.low and nothing was reclaimed, go back for memory.low.
	 */
	unsigned int memcg_low_reclaim:1;
	unsigned int memcg_low_skipped:1;
108

109 110 111 112 113 114 115 116 117 118
	unsigned int hibernation_mode:1;

	/* One of the zones is ready for compaction */
	unsigned int compaction_ready:1;

	/* Incremented by the number of inactive pages that were scanned */
	unsigned long nr_scanned;

	/* Number of pages freed so far during a call to shrink_zones() */
	unsigned long nr_reclaimed;
119 120 121 122 123 124 125 126 127 128

	struct {
		unsigned int dirty;
		unsigned int unqueued_dirty;
		unsigned int congested;
		unsigned int writeback;
		unsigned int immediate;
		unsigned int file_taken;
		unsigned int taken;
	} nr;
Linus Torvalds's avatar
Linus Torvalds committed
129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162
};

#ifdef ARCH_HAS_PREFETCH
#define prefetch_prev_lru_page(_page, _base, _field)			\
	do {								\
		if ((_page)->lru.prev != _base) {			\
			struct page *prev;				\
									\
			prev = lru_to_page(&(_page->lru));		\
			prefetch(&prev->_field);			\
		}							\
	} while (0)
#else
#define prefetch_prev_lru_page(_page, _base, _field) do { } while (0)
#endif

#ifdef ARCH_HAS_PREFETCHW
#define prefetchw_prev_lru_page(_page, _base, _field)			\
	do {								\
		if ((_page)->lru.prev != _base) {			\
			struct page *prev;				\
									\
			prev = lru_to_page(&(_page->lru));		\
			prefetchw(&prev->_field);			\
		}							\
	} while (0)
#else
#define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
#endif

/*
 * From 0 .. 100.  Higher means more swappy.
 */
int vm_swappiness = 60;
163 164 165 166 167
/*
 * The total number of pages which are beyond the high watermark within all
 * zones.
 */
unsigned long vm_total_pages;
Linus Torvalds's avatar
Linus Torvalds committed
168 169 170 171

static LIST_HEAD(shrinker_list);
static DECLARE_RWSEM(shrinker_rwsem);

172
#ifdef CONFIG_MEMCG
173 174
static bool global_reclaim(struct scan_control *sc)
{
175
	return !sc->target_mem_cgroup;
176
}
177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197

/**
 * sane_reclaim - is the usual dirty throttling mechanism operational?
 * @sc: scan_control in question
 *
 * The normal page dirty throttling mechanism in balance_dirty_pages() is
 * completely broken with the legacy memcg and direct stalling in
 * shrink_page_list() is used for throttling instead, which lacks all the
 * niceties such as fairness, adaptive pausing, bandwidth proportional
 * allocation and configurability.
 *
 * This function tests whether the vmscan currently in progress can assume
 * that the normal dirty throttling mechanism is operational.
 */
static bool sane_reclaim(struct scan_control *sc)
{
	struct mem_cgroup *memcg = sc->target_mem_cgroup;

	if (!memcg)
		return true;
#ifdef CONFIG_CGROUP_WRITEBACK
198
	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
199 200 201 202
		return true;
#endif
	return false;
}
203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225

static void set_memcg_congestion(pg_data_t *pgdat,
				struct mem_cgroup *memcg,
				bool congested)
{
	struct mem_cgroup_per_node *mn;

	if (!memcg)
		return;

	mn = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
	WRITE_ONCE(mn->congested, congested);
}

static bool memcg_congested(pg_data_t *pgdat,
			struct mem_cgroup *memcg)
{
	struct mem_cgroup_per_node *mn;

	mn = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
	return READ_ONCE(mn->congested);

}
226
#else
227 228 229 230
static bool global_reclaim(struct scan_control *sc)
{
	return true;
}
231 232 233 234 235

static bool sane_reclaim(struct scan_control *sc)
{
	return true;
}
236 237 238 239 240 241 242 243 244 245 246 247

static inline void set_memcg_congestion(struct pglist_data *pgdat,
				struct mem_cgroup *memcg, bool congested)
{
}

static inline bool memcg_congested(struct pglist_data *pgdat,
			struct mem_cgroup *memcg)
{
	return false;

}
248 249
#endif

250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267
/*
 * This misses isolated pages which are not accounted for to save counters.
 * As the data only determines if reclaim or compaction continues, it is
 * not expected that isolated pages will be a dominating factor.
 */
unsigned long zone_reclaimable_pages(struct zone *zone)
{
	unsigned long nr;

	nr = zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_FILE) +
		zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_FILE);
	if (get_nr_swap_pages() > 0)
		nr += zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_ANON) +
			zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_ANON);

	return nr;
}

268 269 270 271 272 273 274
/**
 * lruvec_lru_size -  Returns the number of pages on the given LRU list.
 * @lruvec: lru vector
 * @lru: lru to use
 * @zone_idx: zones to consider (use MAX_NR_ZONES for the whole LRU list)
 */
unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx)
275
{
276 277 278
	unsigned long lru_size;
	int zid;

279
	if (!mem_cgroup_disabled())
280 281 282
		lru_size = mem_cgroup_get_lru_size(lruvec, lru);
	else
		lru_size = node_page_state(lruvec_pgdat(lruvec), NR_LRU_BASE + lru);
283

284 285 286
	for (zid = zone_idx + 1; zid < MAX_NR_ZONES; zid++) {
		struct zone *zone = &lruvec_pgdat(lruvec)->node_zones[zid];
		unsigned long size;
287

288 289 290 291 292 293 294 295 296 297 298 299
		if (!managed_zone(zone))
			continue;

		if (!mem_cgroup_disabled())
			size = mem_cgroup_get_zone_lru_size(lruvec, lru, zid);
		else
			size = zone_page_state(&lruvec_pgdat(lruvec)->node_zones[zid],
				       NR_ZONE_LRU_BASE + lru);
		lru_size -= min(size, lru_size);
	}

	return lru_size;
300 301 302

}

Linus Torvalds's avatar
Linus Torvalds committed
303
/*
304
 * Add a shrinker callback to be called from the vm.
Linus Torvalds's avatar
Linus Torvalds committed
305
 */
306
int prealloc_shrinker(struct shrinker *shrinker)
Linus Torvalds's avatar
Linus Torvalds committed
307
{
308 309 310 311 312 313 314 315
	size_t size = sizeof(*shrinker->nr_deferred);

	if (shrinker->flags & SHRINKER_NUMA_AWARE)
		size *= nr_node_ids;

	shrinker->nr_deferred = kzalloc(size, GFP_KERNEL);
	if (!shrinker->nr_deferred)
		return -ENOMEM;
316 317 318 319 320 321 322 323
	return 0;
}

void free_prealloced_shrinker(struct shrinker *shrinker)
{
	kfree(shrinker->nr_deferred);
	shrinker->nr_deferred = NULL;
}
324

325 326
void register_shrinker_prepared(struct shrinker *shrinker)
{
327 328 329
	down_write(&shrinker_rwsem);
	list_add_tail(&shrinker->list, &shrinker_list);
	up_write(&shrinker_rwsem);
330 331 332 333 334 335 336 337 338
}

int register_shrinker(struct shrinker *shrinker)
{
	int err = prealloc_shrinker(shrinker);

	if (err)
		return err;
	register_shrinker_prepared(shrinker);
339
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
340
}
341
EXPORT_SYMBOL(register_shrinker);
Linus Torvalds's avatar
Linus Torvalds committed
342 343 344 345

/*
 * Remove one
 */
346
void unregister_shrinker(struct shrinker *shrinker)
Linus Torvalds's avatar
Linus Torvalds committed
347
{
348 349
	if (!shrinker->nr_deferred)
		return;
Linus Torvalds's avatar
Linus Torvalds committed
350 351 352
	down_write(&shrinker_rwsem);
	list_del(&shrinker->list);
	up_write(&shrinker_rwsem);
353
	kfree(shrinker->nr_deferred);
354
	shrinker->nr_deferred = NULL;
Linus Torvalds's avatar
Linus Torvalds committed
355
}
356
EXPORT_SYMBOL(unregister_shrinker);
Linus Torvalds's avatar
Linus Torvalds committed
357 358

#define SHRINK_BATCH 128
359

360
static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
361
				    struct shrinker *shrinker, int priority)
362 363 364 365
{
	unsigned long freed = 0;
	unsigned long long delta;
	long total_scan;
366
	long freeable;
367 368 369 370 371
	long nr;
	long new_nr;
	int nid = shrinkctl->nid;
	long batch_size = shrinker->batch ? shrinker->batch
					  : SHRINK_BATCH;
372
	long scanned = 0, next_deferred;
373

374 375
	freeable = shrinker->count_objects(shrinker, shrinkctl);
	if (freeable == 0)
376 377 378 379 380 381 382 383 384 385
		return 0;

	/*
	 * copy the current shrinker scan count into a local variable
	 * and zero it so that other concurrent shrinker invocations
	 * don't also do this scanning work.
	 */
	nr = atomic_long_xchg(&shrinker->nr_deferred[nid], 0);

	total_scan = nr;
386 387 388
	delta = freeable >> priority;
	delta *= 4;
	do_div(delta, shrinker->seeks);
389 390
	total_scan += delta;
	if (total_scan < 0) {
391
		pr_err("shrink_slab: %pF negative objects to delete nr=%ld\n",
392
		       shrinker->scan_objects, total_scan);
393
		total_scan = freeable;
394 395 396
		next_deferred = nr;
	} else
		next_deferred = total_scan;
397 398 399 400 401 402 403

	/*
	 * We need to avoid excessive windup on filesystem shrinkers
	 * due to large numbers of GFP_NOFS allocations causing the
	 * shrinkers to return -1 all the time. This results in a large
	 * nr being built up so when a shrink that can do some work
	 * comes along it empties the entire cache due to nr >>>
404
	 * freeable. This is bad for sustaining a working set in
405 406 407 408 409
	 * memory.
	 *
	 * Hence only allow the shrinker to scan the entire cache when
	 * a large delta change is calculated directly.
	 */
410 411
	if (delta < freeable / 4)
		total_scan = min(total_scan, freeable / 2);
412 413 414 415 416 417

	/*
	 * Avoid risking looping forever due to too large nr value:
	 * never try to free more than twice the estimate number of
	 * freeable entries.
	 */
418 419
	if (total_scan > freeable * 2)
		total_scan = freeable * 2;
420 421

	trace_mm_shrink_slab_start(shrinker, shrinkctl, nr,
422
				   freeable, delta, total_scan, priority);
423

424 425 426 427 428 429 430 431 432 433 434
	/*
	 * Normally, we should not scan less than batch_size objects in one
	 * pass to avoid too frequent shrinker calls, but if the slab has less
	 * than batch_size objects in total and we are really tight on memory,
	 * we will try to reclaim all available objects, otherwise we can end
	 * up failing allocations although there are plenty of reclaimable
	 * objects spread over several slabs with usage less than the
	 * batch_size.
	 *
	 * We detect the "tight on memory" situations by looking at the total
	 * number of objects we want to scan (total_scan). If it is greater
435
	 * than the total number of objects on slab (freeable), we must be
436 437 438 439
	 * scanning at high prio and therefore should try to reclaim as much as
	 * possible.
	 */
	while (total_scan >= batch_size ||
440
	       total_scan >= freeable) {
441
		unsigned long ret;
442
		unsigned long nr_to_scan = min(batch_size, total_scan);
443

444
		shrinkctl->nr_to_scan = nr_to_scan;
445
		shrinkctl->nr_scanned = nr_to_scan;
446 447 448 449
		ret = shrinker->scan_objects(shrinker, shrinkctl);
		if (ret == SHRINK_STOP)
			break;
		freed += ret;
450

451 452 453
		count_vm_events(SLABS_SCANNED, shrinkctl->nr_scanned);
		total_scan -= shrinkctl->nr_scanned;
		scanned += shrinkctl->nr_scanned;
454 455 456 457

		cond_resched();
	}

458 459 460 461
	if (next_deferred >= scanned)
		next_deferred -= scanned;
	else
		next_deferred = 0;
462 463 464 465 466
	/*
	 * move the unused scan count back into the shrinker in a
	 * manner that handles concurrent updates. If we exhausted the
	 * scan, there is no need to do an update.
	 */
467 468
	if (next_deferred > 0)
		new_nr = atomic_long_add_return(next_deferred,
469 470 471 472
						&shrinker->nr_deferred[nid]);
	else
		new_nr = atomic_long_read(&shrinker->nr_deferred[nid]);

473
	trace_mm_shrink_slab_end(shrinker, nid, freed, nr, new_nr, total_scan);
474
	return freed;
475 476
}

477
/**
478
 * shrink_slab - shrink slab caches
479 480
 * @gfp_mask: allocation context
 * @nid: node whose slab caches to target
481
 * @memcg: memory cgroup whose slab caches to target
482
 * @priority: the reclaim priority
Linus Torvalds's avatar
Linus Torvalds committed
483
 *
484
 * Call the shrink functions to age shrinkable caches.
Linus Torvalds's avatar
Linus Torvalds committed
485
 *
486 487
 * @nid is passed along to shrinkers with SHRINKER_NUMA_AWARE set,
 * unaware shrinkers will receive a node id of 0 instead.
Linus Torvalds's avatar
Linus Torvalds committed
488
 *
489 490
 * @memcg specifies the memory cgroup to target. If it is not NULL,
 * only shrinkers with SHRINKER_MEMCG_AWARE set will be called to scan
491 492
 * objects from the memory cgroup specified. Otherwise, only unaware
 * shrinkers are called.
493
 *
494 495
 * @priority is sc->priority, we take the number of objects and >> by priority
 * in order to get the scan target.
496
 *
497
 * Returns the number of reclaimed slab objects.
Linus Torvalds's avatar
Linus Torvalds committed
498
 */
499 500
static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
				 struct mem_cgroup *memcg,
501
				 int priority)
Linus Torvalds's avatar
Linus Torvalds committed
502 503
{
	struct shrinker *shrinker;
Dave Chinner's avatar
Dave Chinner committed
504
	unsigned long freed = 0;
Linus Torvalds's avatar
Linus Torvalds committed
505

506
	if (memcg && (!memcg_kmem_enabled() || !mem_cgroup_online(memcg)))
507 508
		return 0;

509
	if (!down_read_trylock(&shrinker_rwsem))
510
		goto out;
Linus Torvalds's avatar
Linus Torvalds committed
511 512

	list_for_each_entry(shrinker, &shrinker_list, list) {
513 514 515
		struct shrink_control sc = {
			.gfp_mask = gfp_mask,
			.nid = nid,
516
			.memcg = memcg,
517
		};
518

519 520 521 522 523 524 525
		/*
		 * If kernel memory accounting is disabled, we ignore
		 * SHRINKER_MEMCG_AWARE flag and call all shrinkers
		 * passing NULL for memcg.
		 */
		if (memcg_kmem_enabled() &&
		    !!memcg != !!(shrinker->flags & SHRINKER_MEMCG_AWARE))
526 527
			continue;

528 529
		if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
			sc.nid = 0;
Linus Torvalds's avatar
Linus Torvalds committed
530

531
		freed += do_shrink_slab(&sc, shrinker, priority);
532 533 534 535 536 537 538 539 540
		/*
		 * Bail out if someone want to register a new shrinker to
		 * prevent the regsitration from being stalled for long periods
		 * by parallel ongoing shrinking.
		 */
		if (rwsem_is_contended(&shrinker_rwsem)) {
			freed = freed ? : 1;
			break;
		}
Linus Torvalds's avatar
Linus Torvalds committed
541
	}
542

Linus Torvalds's avatar
Linus Torvalds committed
543
	up_read(&shrinker_rwsem);
544 545
out:
	cond_resched();
Dave Chinner's avatar
Dave Chinner committed
546
	return freed;
Linus Torvalds's avatar
Linus Torvalds committed
547 548
}

549 550 551 552 553 554 555 556 557
void drop_slab_node(int nid)
{
	unsigned long freed;

	do {
		struct mem_cgroup *memcg = NULL;

		freed = 0;
		do {
558
			freed += shrink_slab(GFP_KERNEL, nid, memcg, 0);
559 560 561 562 563 564 565 566 567 568 569 570
		} while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL);
	} while (freed > 10);
}

void drop_slab(void)
{
	int nid;

	for_each_online_node(nid)
		drop_slab_node(nid);
}

Linus Torvalds's avatar
Linus Torvalds committed
571 572
static inline int is_page_cache_freeable(struct page *page)
{
573 574 575 576 577
	/*
	 * A freeable page cache page is referenced only by the caller
	 * that isolated the page, the page cache radix tree and
	 * optional buffer heads at page->private.
	 */
578 579 580
	int radix_pins = PageTransHuge(page) && PageSwapCache(page) ?
		HPAGE_PMD_NR : 1;
	return page_count(page) - page_has_private(page) == 1 + radix_pins;
Linus Torvalds's avatar
Linus Torvalds committed
581 582
}

583
static int may_write_to_inode(struct inode *inode, struct scan_control *sc)
Linus Torvalds's avatar
Linus Torvalds committed
584
{
585
	if (current->flags & PF_SWAPWRITE)
Linus Torvalds's avatar
Linus Torvalds committed
586
		return 1;
587
	if (!inode_write_congested(inode))
Linus Torvalds's avatar
Linus Torvalds committed
588
		return 1;
589
	if (inode_to_bdi(inode) == current->backing_dev_info)
Linus Torvalds's avatar
Linus Torvalds committed
590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608
		return 1;
	return 0;
}

/*
 * We detected a synchronous write error writing a page out.  Probably
 * -ENOSPC.  We need to propagate that into the address_space for a subsequent
 * fsync(), msync() or close().
 *
 * The tricky part is that after writepage we cannot touch the mapping: nothing
 * prevents it from being freed up.  But we have a ref on the page and once
 * that page is locked, the mapping is pinned.
 *
 * We're allowed to run sleeping lock_page() here because we know the caller has
 * __GFP_FS.
 */
static void handle_write_error(struct address_space *mapping,
				struct page *page, int error)
{
609
	lock_page(page);
610 611
	if (page_mapping(page) == mapping)
		mapping_set_error(mapping, error);
Linus Torvalds's avatar
Linus Torvalds committed
612 613 614
	unlock_page(page);
}

615 616 617 618 619 620 621 622 623 624 625 626
/* possible outcome of pageout() */
typedef enum {
	/* failed to write page out, page is locked */
	PAGE_KEEP,
	/* move page to the active list, page is locked */
	PAGE_ACTIVATE,
	/* page has been sent to the disk successfully, page is unlocked */
	PAGE_SUCCESS,
	/* page is clean and locked */
	PAGE_CLEAN,
} pageout_t;

Linus Torvalds's avatar
Linus Torvalds committed
627
/*
628 629
 * pageout is called by shrink_page_list() for each dirty page.
 * Calls ->writepage().
Linus Torvalds's avatar
Linus Torvalds committed
630
 */
631
static pageout_t pageout(struct page *page, struct address_space *mapping,
632
			 struct scan_control *sc)
Linus Torvalds's avatar
Linus Torvalds committed
633 634 635 636 637 638 639 640
{
	/*
	 * If the page is dirty, only perform writeback if that write
	 * will be non-blocking.  To prevent this allocation from being
	 * stalled by pagecache activity.  But note that there may be
	 * stalls if we need to run get_block().  We could test
	 * PagePrivate for that.
	 *
641
	 * If this process is currently in __generic_file_write_iter() against
Linus Torvalds's avatar
Linus Torvalds committed
642 643 644 645 646 647 648 649 650 651 652 653 654 655 656
	 * this page's queue, we can perform writeback even if that
	 * will block.
	 *
	 * If the page is swapcache, write it back even if that would
	 * block, for some throttling. This happens by accident, because
	 * swap_backing_dev_info is bust: it doesn't reflect the
	 * congestion state of the swapdevs.  Easy to fix, if needed.
	 */
	if (!is_page_cache_freeable(page))
		return PAGE_KEEP;
	if (!mapping) {
		/*
		 * Some data journaling orphaned pages can have
		 * page->mapping == NULL while being dirty with clean buffers.
		 */
657
		if (page_has_private(page)) {
Linus Torvalds's avatar
Linus Torvalds committed
658 659
			if (try_to_free_buffers(page)) {
				ClearPageDirty(page);
660
				pr_info("%s: orphaned page\n", __func__);
Linus Torvalds's avatar
Linus Torvalds committed
661 662 663 664 665 666 667
				return PAGE_CLEAN;
			}
		}
		return PAGE_KEEP;
	}
	if (mapping->a_ops->writepage == NULL)
		return PAGE_ACTIVATE;
668
	if (!may_write_to_inode(mapping->host, sc))
Linus Torvalds's avatar
Linus Torvalds committed
669 670 671 672 673 674 675
		return PAGE_KEEP;

	if (clear_page_dirty_for_io(page)) {
		int res;
		struct writeback_control wbc = {
			.sync_mode = WB_SYNC_NONE,
			.nr_to_write = SWAP_CLUSTER_MAX,
676 677
			.range_start = 0,
			.range_end = LLONG_MAX,
Linus Torvalds's avatar
Linus Torvalds committed
678 679 680 681 682 683 684
			.for_reclaim = 1,
		};

		SetPageReclaim(page);
		res = mapping->a_ops->writepage(page, &wbc);
		if (res < 0)
			handle_write_error(mapping, page, res);
685
		if (res == AOP_WRITEPAGE_ACTIVATE) {
Linus Torvalds's avatar
Linus Torvalds committed
686 687 688
			ClearPageReclaim(page);
			return PAGE_ACTIVATE;
		}
689

Linus Torvalds's avatar
Linus Torvalds committed
690 691 692 693
		if (!PageWriteback(page)) {
			/* synchronous write or broken a_ops? */
			ClearPageReclaim(page);
		}
694
		trace_mm_vmscan_writepage(page);
695
		inc_node_page_state(page, NR_VMSCAN_WRITE);
Linus Torvalds's avatar
Linus Torvalds committed
696 697 698 699 700 701
		return PAGE_SUCCESS;
	}

	return PAGE_CLEAN;
}

702
/*
703 704
 * Same as remove_mapping, but if the page is removed from the mapping, it
 * gets returned with a refcount of 0.
705
 */
706 707
static int __remove_mapping(struct address_space *mapping, struct page *page,
			    bool reclaimed)
708
{
709
	unsigned long flags;
710
	int refcount;
711

712 713
	BUG_ON(!PageLocked(page));
	BUG_ON(mapping != page_mapping(page));
714

Matthew Wilcox's avatar
Matthew Wilcox committed
715
	xa_lock_irqsave(&mapping->i_pages, flags);
716
	/*
717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735
	 * The non racy check for a busy page.
	 *
	 * Must be careful with the order of the tests. When someone has
	 * a ref to the page, it may be possible that they dirty it then
	 * drop the reference. So if PageDirty is tested before page_count
	 * here, then the following race may occur:
	 *
	 * get_user_pages(&page);
	 * [user mapping goes away]
	 * write_to(page);
	 *				!PageDirty(page)    [good]
	 * SetPageDirty(page);
	 * put_page(page);
	 *				!page_count(page)   [good, discard it]
	 *
	 * [oops, our write_to data is lost]
	 *
	 * Reversing the order of the tests ensures such a situation cannot
	 * escape unnoticed. The smp_rmb is needed to ensure the page->flags
736
	 * load is not satisfied before that of page->_refcount.
737 738
	 *
	 * Note that if SetPageDirty is always performed via set_page_dirty,
Matthew Wilcox's avatar
Matthew Wilcox committed
739
	 * and thus under the i_pages lock, then this ordering is not required.
740
	 */
741 742 743 744 745
	if (unlikely(PageTransHuge(page)) && PageSwapCache(page))
		refcount = 1 + HPAGE_PMD_NR;
	else
		refcount = 2;
	if (!page_ref_freeze(page, refcount))
746
		goto cannot_free;
747 748
	/* note: atomic_cmpxchg in page_freeze_refs provides the smp_rmb */
	if (unlikely(PageDirty(page))) {
749
		page_ref_unfreeze(page, refcount);
750
		goto cannot_free;
751
	}
752 753 754

	if (PageSwapCache(page)) {
		swp_entry_t swap = { .val = page_private(page) };
755
		mem_cgroup_swapout(page, swap);
756
		__delete_from_swap_cache(page);
Matthew Wilcox's avatar
Matthew Wilcox committed
757
		xa_unlock_irqrestore(&mapping->i_pages, flags);
758
		put_swap_page(page, swap);
759
	} else {
760
		void (*freepage)(struct page *);
761
		void *shadow = NULL;
762 763

		freepage = mapping->a_ops->freepage;
764 765 766 767 768 769 770 771 772
		/*
		 * Remember a shadow entry for reclaimed file cache in
		 * order to detect refaults, thus thrashing, later on.
		 *
		 * But don't store shadows in an address space that is
		 * already exiting.  This is not just an optizimation,
		 * inode reclaim needs to empty out the radix tree or
		 * the nodes are lost.  Don't plant shadows behind its
		 * back.
773 774 775 776 777
		 *
		 * We also don't store shadows for DAX mappings because the
		 * only page cache pages found in these are zero pages
		 * covering holes, and because we don't want to mix DAX
		 * exceptional entries and shadow exceptional entries in the
Matthew Wilcox's avatar
Matthew Wilcox committed
778
		 * same address_space.
779 780
		 */
		if (reclaimed && page_is_file_cache(page) &&
781
		    !mapping_exiting(mapping) && !dax_mapping(mapping))
782
			shadow = workingset_eviction(mapping, page);
783
		__delete_from_page_cache(page, shadow);
Matthew Wilcox's avatar
Matthew Wilcox committed
784
		xa_unlock_irqrestore(&mapping->i_pages, flags);
785 786 787

		if (freepage != NULL)
			freepage(page);
788 789 790 791 792
	}

	return 1;

cannot_free:
Matthew Wilcox's avatar
Matthew Wilcox committed
793
	xa_unlock_irqrestore(&mapping->i_pages, flags);
794 795 796
	return 0;
}

797 798 799 800 801 802 803 804
/*
 * Attempt to detach a locked page from its ->mapping.  If it is dirty or if
 * someone else has a ref on the page, abort and return 0.  If it was
 * successfully detached, return 1.  Assumes the caller has a single ref on
 * this page.
 */
int remove_mapping(struct address_space *mapping, struct page *page)
{
805
	if (__remove_mapping(mapping, page, false)) {
806 807 808 809 810
		/*
		 * Unfreezing the refcount with 1 rather than 2 effectively
		 * drops the pagecache ref for us without requiring another
		 * atomic operation.
		 */
811
		page_ref_unfreeze(page, 1);
812 813 814 815 816
		return 1;
	}
	return 0;
}

817 818 819 820 821 822 823 824 825 826 827
/**
 * putback_lru_page - put previously isolated page onto appropriate LRU list
 * @page: page to be put back to appropriate lru list
 *
 * Add previously isolated @page to appropriate LRU list.
 * Page may still be unevictable for other reasons.
 *
 * lru_lock must not be held, interrupts must be enabled.
 */
void putback_lru_page(struct page *page)
{
828
	lru_cache_add(page);
829 830 831
	put_page(page);		/* drop ref from isolate */
}

832 833 834
enum page_references {
	PAGEREF_RECLAIM,
	PAGEREF_RECLAIM_CLEAN,
835
	PAGEREF_KEEP,
836 837 838 839 840 841
	PAGEREF_ACTIVATE,
};

static enum page_references page_check_references(struct page *page,
						  struct scan_control *sc)
{
842
	int referenced_ptes, referenced_page;
843 844
	unsigned long vm_flags;

845 846
	referenced_ptes = page_referenced(page, 1, sc->target_mem_cgroup,
					  &vm_flags);
847
	referenced_page = TestClearPageReferenced(page);
848 849 850 851 852 853 854 855

	/*
	 * Mlock lost the isolation race with us.  Let try_to_unmap()
	 * move the page to the unevictable list.
	 */
	if (vm_flags & VM_LOCKED)
		return PAGEREF_RECLAIM;

856
	if (referenced_ptes) {
857
		if (PageSwapBacked(page))
858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874
			return PAGEREF_ACTIVATE;
		/*
		 * All mapped pages start out with page table
		 * references from the instantiating fault, so we need
		 * to look twice if a mapped file page is used more
		 * than once.
		 *
		 * Mark it and spare it for another trip around the
		 * inactive list.  Another page table reference will
		 * lead to its activation.
		 *
		 * Note: the mark is set for activated pages as well
		 * so that recently deactivated but used pages are
		 * quickly recovered.
		 */
		SetPageReferenced(page);

875
		if (referenced_page || referenced_ptes > 1)
876 877
			return PAGEREF_ACTIVATE;

878 879 880 881 882 883
		/*
		 * Activate file-backed executable pages after first usage.
		 */
		if (vm_flags & VM_EXEC)
			return PAGEREF_ACTIVATE;

884 885
		return PAGEREF_KEEP;
	}
886 887

	/* Reclaim if clean, defer dirty pages to writeback */
888
	if (referenced_page && !PageSwapBacked(page))
889 890 891
		return PAGEREF_RECLAIM_CLEAN;

	return PAGEREF_RECLAIM;
892 893
}

894 895 896 897
/* Check if a page is dirty or under writeback */
static void page_check_dirty_writeback(struct page *page,
				       bool *dirty, bool *writeback)
{
898 899
	struct address_space *mapping;

900 901 902 903
	/*
	 * Anonymous pages are not handled by flushers and must be written
	 * from reclaim context. Do not stall reclaim based on them
	 */
Shaohua Li's avatar
Shaohua Li committed
904 905
	if (!page_is_file_cache(page) ||
	    (PageAnon(page) && !PageSwapBacked(page))) {
906 907 908 909 910 911 912 913
		*dirty = false;
		*writeback = false;
		return;
	}

	/* By default assume that the page flags are accurate */
	*dirty = PageDirty(page);
	*writeback = PageWriteback(page);
914 915 916 917 918 919 920 921

	/* Verify dirty/writeback state if the filesystem supports it */
	if (!page_has_private(page))
		return;

	mapping = page_mapping(page);
	if (mapping && mapping->a_ops->is_dirty_writeback)
		mapping->a_ops->is_dirty_writeback(page, dirty, writeback);
922 923
}

Linus Torvalds's avatar
Linus Torvalds committed
924
/*
925
 * shrink_page_list() returns the number of reclaimed pages
Linus Torvalds's avatar
Linus Torvalds committed
926
 */
927
static unsigned long shrink_page_list(struct list_head *page_list,
928
				      struct pglist_data *pgdat,
929
				      struct scan_control *sc,
930
				      enum ttu_flags ttu_flags,
931
				      struct reclaim_stat *stat,
932
				      bool force_reclaim)
Linus Torvalds's avatar
Linus Torvalds committed
933 934
{
	LIST_HEAD(ret_pages);
935
	LIST_HEAD(free_pages);
Linus Torvalds's avatar
Linus Torvalds committed
936
	int pgactivate = 0;
937 938 939 940 941 942
	unsigned nr_unqueued_dirty = 0;
	unsigned nr_dirty = 0;
	unsigned nr_congested = 0;
	unsigned nr_reclaimed = 0;
	unsigned nr_writeback = 0;
	unsigned nr_immediate = 0;
943 944
	unsigned nr_ref_keep = 0;
	unsigned nr_unmap_fail = 0;
Linus Torvalds's avatar
Linus Torvalds committed
945 946 947 948 949 950 951

	cond_resched();

	while (!list_empty(page_list)) {
		struct address_space *mapping;
		struct page *page;
		int may_enter_fs;
952
		enum page_references references = PAGEREF_RECLAIM_CLEAN;
953
		bool dirty, writeback;
Linus Torvalds's avatar
Linus Torvalds committed
954 955 956 957 958 959

		cond_resched();

		page = lru_to_page(page_list);
		list_del(&page->lru);

Nick Piggin's avatar
Nick Piggin committed
960
		if (!trylock_page(page))
Linus Torvalds's avatar
Linus Torvalds committed
961 962
			goto keep;

963
		VM_BUG_ON_PAGE(PageActive(page), page);
Linus Torvalds's avatar
Linus Torvalds committed
964 965

		sc->nr_scanned++;
966

967
		if (unlikely(!page_evictable(page)))
968
			goto activate_locked;
969

970
		if (!sc->may_unmap && page_mapped(page))
971 972
			goto keep_locked;

Linus Torvalds's avatar
Linus Torvalds committed
973
		/* Double the slab pressure for mapped and swapcache pages */
Shaohua Li's avatar
Shaohua Li committed
974 975
		if ((page_mapped(page) || PageSwapCache(page)) &&
		    !(PageAnon(page) && !PageSwapBacked(page)))
Linus Torvalds's avatar
Linus Torvalds committed
976 977
			sc->nr_scanned++;

978 979 980
		may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
			(PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));

981
		/*
982
		 * The number of dirty pages determines if a node is marked
983 984 985 986 987 988 989 990 991 992 993
		 * reclaim_congested which affects wait_iff_congested. kswapd
		 * will stall and start writing pages if the tail of the LRU
		 * is all dirty unqueued pages.
		 */
		page_check_dirty_writeback(page, &dirty, &writeback);
		if (dirty || writeback)
			nr_dirty++;

		if (dirty && !writeback)
			nr_unqueued_dirty++;

994 995 996 997 998 999
		/*
		 * Treat this page as congested if the underlying BDI is or if
		 * pages are cycling through the LRU so quickly that the
		 * pages marked for immediate reclaim are making it to the
		 * end of the LRU a second time.
		 */
1000
		mapping = page_mapping(page);
1001
		if (((dirty || writeback) && mapping &&
1002
		     inode_write_congested(mapping->host)) ||
1003
		    (writeback && PageReclaim(page)))
1004 1005
			nr_congested++;

1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016
		/*
		 * If a page at the tail of the LRU is under writeback, there
		 * are three cases to consider.
		 *
		 * 1) If reclaim is encountering an excessive number of pages
		 *    under writeback and this page is both under writeback and
		 *    PageReclaim then it indicates that pages are being queued
		 *    for IO but are being recycled through the LRU before the
		 *    IO can complete. Waiting on the page itself risks an
		 *    indefinite stall if it is impossible to writeback the
		 *    page due to IO error or disconnected storage so instead
1017 1018
		 *    note that the LRU is being scanned too quickly and the
		 *    caller can stall after page list has been processed.
1019
		 *
1020
		 * 2) Global or new memcg reclaim encounters a page that is
1021 1022 1023
		 *    not marked for immediate reclaim, or the caller does not
		 *    have __GFP_FS (or __GFP_IO if it's simply going to swap,
		 *    not to fs). In this case mark the page for immediate
1024
		 *    reclaim and continue scanning.
1025
		 *
1026 1027
		 *    Require may_enter_fs because we would wait on fs, which
		 *    may not have submitted IO yet. And the loop driver might
1028 1029 1030 1031 1032
		 *    enter reclaim, and deadlock if it waits on a page for
		 *    which it is needed to do the write (loop masks off
		 *    __GFP_IO|__GFP_FS for this reason); but more thought
		 *    would probably show more reasons.
		 *
1033
		 * 3) Legacy memcg encounters a page that is already marked
1034 1035 1036 1037
		 *    PageReclaim. memcg does not have any dirty pages
		 *    throttling so we could easily OOM just because too many
		 *    pages are in writeback and there is nothing else to
		 *    reclaim. Wait for the writeback to complete.
1038 1039 1040 1041 1042 1043 1044 1045 1046
		 *
		 * In cases 1) and 2) we activate the pages to get them out of
		 * the way while we continue scanning for clean pages on the
		 * inactive list and refilling from the active list. The
		 * observation here is that waiting for disk writes is more
		 * expensive than potentially causing reloads down the line.
		 * Since they're marked for immediate reclaim, they won't put
		 * memory pressure on the cache working set any longer than it
		 * takes to write them to disk.
1047
		 */
1048
		if (PageWriteback(page)) {
1049 1050 1051
			/* Case 1 above */
			if (current_is_kswapd() &&
			    PageReclaim(page) &&
1052
			    test_bit(PGDAT_WRITEBACK, &pgdat->flags)) {
1053
				nr_immediate++;
1054
				goto activate_locked;
1055 1056

			/* Case 2 above */
1057
			} else if (sane_reclaim(sc) ||
1058
			    !PageReclaim(page) || !may_enter_fs) {
1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070
				/*
				 * This is slightly racy - end_page_writeback()
				 * might have just cleared PageReclaim, then
				 * setting PageReclaim here end up interpreted
				 * as PageReadahead - but that does not matter
				 * enough to care.  What we do want is for this
				 * page to have PageReclaim set next time memcg
				 * reclaim reaches the tests above, so it will
				 * then wait_on_page_writeback() to avoid OOM;
				 * and it's also appropriate in global reclaim.
				 */
				SetPageReclaim(page);
1071
				nr_writeback++;
1072
				goto activate_locked;
1073 1074 1075

			/* Case 3 above */
			} else {
1076
				unlock_page(page);
1077
				wait_on_page_writeback(page);
1078 1079 1080
				/* then go back and try same page again */
				list_add_tail(&page->lru, page_list);
				continue;
1081
			}
1082
		}
Linus Torvalds's avatar
Linus Torvalds committed
1083

1084 1085 1086
		if (!force_reclaim)
			references = page_check_references(page, sc);

1087 1088
		switch (references) {
		case PAGEREF_ACTIVATE:
Linus Torvalds's avatar
Linus Torvalds committed
1089
			goto activate_locked;
1090
		case PAGEREF_KEEP:
1091
			nr_ref_keep++;
1092
			goto keep_locked;
1093 1094 1095 1096
		case PAGEREF_RECLAIM:
		case PAGEREF_RECLAIM_CLEAN:
			; /* try to reclaim the page below */
		}
Linus Torvalds's avatar
Linus Torvalds committed
1097 1098 1099 1100

		/*
		 * Anonymous process memory has backing store?
		 * Try to allocate it some swap space here.
Shaohua Li's avatar
Shaohua Li committed
1101
		 * Lazyfree page could be freed directly
Linus Torvalds's avatar
Linus Torvalds committed
1102
		 */
1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127
		if (PageAnon(page) && PageSwapBacked(page)) {
			if (!PageSwapCache(page)) {
				if (!(sc->gfp_mask & __GFP_IO))
					goto keep_locked;
				if (PageTransHuge(page)) {
					/* cannot split THP, skip it */
					if (!can_split_huge_page(page, NULL))
						goto activate_locked;
					/*
					 * Split pages without a PMD map right
					 * away. Chances are some or all of the
					 * tail pages can be freed without IO.
					 */
					if (!compound_mapcount(page) &&
					    split_huge_page_to_list(page,
								    page_list))
						goto activate_locked;
				}
				if (!add_to_swap(page)) {
					if (!PageTransHuge(page))
						goto activate_locked;
					/* Fallback to swap normal pages */
					if (split_huge_page_to_list(page,
								    page_list))
						goto activate_locked;
1128 1129 1130
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
					count_vm_event(THP_SWPOUT_FALLBACK);
#endif
1131 1132 1133
					if (!add_to_swap(page))
						goto activate_locked;
				}
1134

1135
				may_enter_fs = 1;
Linus Torvalds's avatar
Linus Torvalds committed
1136

1137 1138 1139
				/* Adding to swap updated mapping */
				mapping = page_mapping(page);
			}
1140 1141 1142 1143
		} else if (unlikely(PageTransHuge(page))) {
			/* Split file THP */
			if (split_huge_page_to_list(page, page_list))
				goto keep_locked;
1144
		}
Linus Torvalds's avatar
Linus Torvalds committed
1145 1146 1147 1148 1149

		/*
		 * The page is mapped into the page tables of one or more
		 * processes. Try to unmap it here.
		 */
Shaohua Li's avatar
Shaohua Li committed
1150
		if (page_mapped(page)) {
1151 1152 1153 1154 1155
			enum ttu_flags flags = ttu_flags | TTU_BATCH_FLUSH;

			if (unlikely(PageTransHuge(page)))
				flags |= TTU_SPLIT_HUGE_PMD;
			if (!try_to_unmap(page, flags)) {
1156
				nr_unmap_fail++;
Linus Torvalds's avatar
Linus Torvalds committed
1157 1158 1159 1160 1161
				goto activate_locked;
			}
		}

		if (PageDirty(page)) {
1162
			/*
1163 1164 1165 1166 1167 1168 1169 1170
			 * Only kswapd can writeback filesystem pages
			 * to avoid risk of stack overflow. But avoid
			 * injecting inefficient single-page IO into
			 * flusher writeback as much as possible: only
			 * write pages when we've encountered many
			 * dirty pages, and when we've already scanned
			 * the rest of the LRU for clean pages and see
			 * the same dirty pages again (PageReclaim).
1171
			 */
1172
			if (page_is_file_cache(page) &&
1173 1174
			    (!current_is_kswapd() || !PageReclaim(page) ||
			     !test_bit(PGDAT_DIRTY, &pgdat->flags))) {
1175 1176 1177 1178 1179 1180
				/*
				 * Immediately reclaim when written back.
				 * Similar in principal to deactivate_page()
				 * except we already have the page isolated
				 * and know it's dirty
				 */
1181
				inc_node_page_state(page, NR_VMSCAN_IMMEDIATE);
1182 1183
				SetPageReclaim(page);

1184
				goto activate_locked;
1185 1186
			}

1187
			if (references == PAGEREF_RECLAIM_CLEAN)
Linus Torvalds's avatar
Linus Torvalds committed
1188
				goto keep_locked;
1189
			if (!may_enter_fs)
Linus Torvalds's avatar
Linus Torvalds committed
1190
				goto keep_locked;
1191
			if (!sc->may_writepage)
Linus Torvalds's avatar
Linus Torvalds committed
1192 1193
				goto keep_locked;

1194 1195 1196 1197 1198 1199
			/*
			 * Page is dirty. Flush the TLB if a writable entry
			 * potentially exists to avoid CPU writes after IO
			 * starts and then write it out here.
			 */
			try_to_unmap_flush_dirty();
1200
			switch (pageout(page, mapping, sc)) {
Linus Torvalds's avatar
Linus Torvalds committed
1201 1202 1203 1204 1205
			case PAGE_KEEP:
				goto keep_locked;
			case PAGE_ACTIVATE:
				goto activate_locked;
			case PAGE_SUCCESS:
1206
				if (PageWriteback(page))
1207
					goto keep;
1208
				if (PageDirty(page))
Linus Torvalds's avatar
Linus Torvalds committed
1209
					goto keep;
1210

Linus Torvalds's avatar
Linus Torvalds committed
1211 1212 1213 1214
				/*
				 * A synchronous write - probably a ramdisk.  Go
				 * ahead and try to reclaim the page.
				 */
Nick Piggin's avatar
Nick Piggin committed
1215
				if (!trylock_page(page))
Linus Torvalds's avatar
Linus Torvalds committed
1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234
					goto keep;
				if (PageDirty(page) || PageWriteback(page))
					goto keep_locked;
				mapping = page_mapping(page);
			case PAGE_CLEAN:
				; /* try to free the page below */
			}
		}

		/*
		 * If the page has buffers, try to free the buffer mappings
		 * associated with this page. If we succeed we try to free
		 * the page as well.
		 *
		 * We do this even if the page is PageDirty().
		 * try_to_release_page() does not perform I/O, but it is
		 * possible for a page to have PageDirty set, but it is actually
		 * clean (all its buffers are clean).  This happens if the
		 * buffers were written out directly, with submit_bh(). ext3
1235
		 * will do this, as well as the blockdev mapping.
Linus Torvalds's avatar
Linus Torvalds committed
1236 1237 1238 1239 1240 1241 1242 1243 1244 1245
		 * try_to_release_page() will discover that cleanness and will
		 * drop the buffers and mark the page clean - it can be freed.
		 *
		 * Rarely, pages can have buffers and no ->mapping.  These are
		 * the pages which were not successfully invalidated in
		 * truncate_complete_page().  We try to drop those buffers here
		 * and if that worked, and the page is no longer mapped into
		 * process address space (page_count == 1) it can be freed.
		 * Otherwise, leave the page on the LRU so it is swappable.
		 */
1246
		if (page_has_private(page)) {
Linus Torvalds's avatar
Linus Torvalds committed
1247 1248
			if (!try_to_release_page(page, sc->gfp_mask))
				goto activate_locked;
1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264
			if (!mapping && page_count(page) == 1) {
				unlock_page(page);
				if (put_page_testzero(page))
					goto free_it;
				else {
					/*
					 * rare race with speculative reference.
					 * the speculative reference will free
					 * this page shortly, so we may
					 * increment nr_reclaimed here (and
					 * leave it off the LRU).
					 */
					nr_reclaimed++;
					continue;
				}
			}
Linus Torvalds's avatar
Linus Torvalds committed
1265 1266
		}

Shaohua Li's avatar
Shaohua Li committed
1267 1268 1269 1270 1271 1272 1273 1274
		if (PageAnon(page) && !PageSwapBacked(page)) {
			/* follow __remove_mapping for reference */
			if (!page_ref_freeze(page, 1))
				goto keep_locked;
			if (PageDirty(page)) {
				page_ref_unfreeze(page, 1);
				goto keep_locked;
			}
Linus Torvalds's avatar
Linus Torvalds committed
1275

Shaohua Li's avatar
Shaohua Li committed
1276
			count_vm_event(PGLAZYFREED);
1277
			count_memcg_page_event(page, PGLAZYFREED);
Shaohua Li's avatar
Shaohua Li committed
1278 1279
		} else if (!mapping || !__remove_mapping(mapping, page, true))
			goto keep_locked;
Nick Piggin's avatar
Nick Piggin committed
1280 1281 1282 1283 1284 1285 1286
		/*
		 * At this point, we have no other references and there is
		 * no way to pick any more up (removed from LRU, removed
		 * from pagecache). Can use non-atomic bitops now (and
		 * we obviously don't have to worry about waking up a process
		 * waiting on the page lock, because there are no references.
		 */
1287
		__ClearPageLocked(page);
1288
free_it:
1289
		nr_reclaimed++;
1290 1291 1292 1293 1294

		/*
		 * Is there need to periodically free_page_list? It would
		 * appear not as the counts should be low
		 */
1295 1296 1297 1298 1299
		if (unlikely(PageTransHuge(page))) {
			mem_cgroup_uncharge(page);
			(*get_compound_page_dtor(page))(page);
		} else
			list_add(&page->lru, &free_pages);
Linus Torvalds's avatar
Linus Torvalds committed
1300 1301 1302
		continue;

activate_locked:
1303
		/* Not a candidate for swapping, so reclaim swap space. */
1304 1305
		if (PageSwapCache(page) && (mem_cgroup_swap_full(page) ||
						PageMlocked(page)))
1306
			try_to_free_swap(page);
1307
		VM_BUG_ON_PAGE(PageActive(page), page);
1308 1309 1310
		if (!PageMlocked(page)) {
			SetPageActive(page);
			pgactivate++;
1311
			count_memcg_page_event(page, PGACTIVATE);
1312
		}
Linus Torvalds's avatar
Linus Torvalds committed
1313 1314 1315 1316
keep_locked:
		unlock_page(page);
keep:
		list_add(&page->lru, &ret_pages);
1317
		VM_BUG_ON_PAGE(PageLRU(page) || PageUnevictable(page), page);
Linus Torvalds's avatar
Linus Torvalds committed
1318
	}
1319

1320
	mem_cgroup_uncharge_list(&free_pages);
1321
	try_to_unmap_flush();
1322
	free_unref_page_list(&free_pages);
1323

Linus Torvalds's avatar
Linus Torvalds committed
1324
	list_splice(&ret_pages, page_list);
1325
	count_vm_events(PGACTIVATE, pgactivate);
1326

1327 1328 1329 1330 1331 1332
	if (stat) {
		stat->nr_dirty = nr_dirty;
		stat->nr_congested = nr_congested;
		stat->nr_unqueued_dirty = nr_unqueued_dirty;
		stat->nr_writeback = nr_writeback;
		stat->nr_immediate = nr_immediate;
1333 1334 1335
		stat->nr_activate = pgactivate;
		stat->nr_ref_keep = nr_ref_keep;
		stat->nr_unmap_fail = nr_unmap_fail;
1336
	}
1337
	return nr_reclaimed;
Linus Torvalds's avatar
Linus Torvalds committed
1338 1339
}

1340 1341 1342 1343 1344 1345 1346 1347
unsigned long reclaim_clean_pages_from_list(struct zone *zone,
					    struct list_head *page_list)
{
	struct scan_control sc = {
		.gfp_mask = GFP_KERNEL,
		.priority = DEF_PRIORITY,
		.may_unmap = 1,
	};
1348
	unsigned long ret;
1349 1350 1351 1352
	struct page *page, *next;
	LIST_HEAD(clean_pages);

	list_for_each_entry_safe(page, next, page_list, lru) {
1353
		if (page_is_file_cache(page) && !PageDirty(page) &&
1354
		    !__PageMovable(page)) {
1355 1356 1357 1358 1359
			ClearPageActive(page);
			list_move(&page->lru, &clean_pages);
		}
	}

1360
	ret = shrink_page_list(&clean_pages, zone->zone_pgdat, &sc,
1361
			TTU_IGNORE_ACCESS, NULL, true);
1362
	list_splice(&clean_pages, page_list);
1363
	mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, -ret);
1364 1365 1366
	return ret;
}

Andy Whitcroft's avatar
Andy Whitcroft committed
1367 1368 1369 1370 1371 1372 1373 1374 1375 1376
/*
 * Attempt to remove the specified page from its LRU.  Only take this page
 * if it is of the appropriate PageActive status.  Pages which are being
 * freed elsewhere are also ignored.
 *
 * page:	page to consider
 * mode:	one of the LRU isolation modes defined above
 *
 * returns 0 on success, -ve errno on failure.
 */
1377
int __isolate_lru_page(struct page *page, isolate_mode_t mode)
Andy Whitcroft's avatar
Andy Whitcroft committed
1378 1379 1380 1381 1382 1383 1384
{
	int ret = -EINVAL;

	/* Only take pages on the LRU. */
	if (!PageLRU(page))
		return ret;

Minchan Kim's avatar
Minchan Kim committed
1385 1386
	/* Compaction should not handle unevictable pages but CMA can do so */
	if (PageUnevictable(page) && !(mode & ISOLATE_UNEVICTABLE))
1387 1388
		return ret;

Andy Whitcroft's avatar
Andy Whitcroft committed
1389
	ret = -EBUSY;
1390

1391 1392 1393 1394 1395 1396 1397 1398
	/*
	 * To minimise LRU disruption, the caller can indicate that it only
	 * wants to isolate pages it will be able to operate on without
	 * blocking - clean pages for the most part.
	 *
	 * ISOLATE_ASYNC_MIGRATE is used to indicate that it only wants to pages
	 * that it is possible to migrate without blocking
	 */
1399
	if (mode & ISOLATE_ASYNC_MIGRATE) {
1400 1401 1402 1403 1404 1405
		/* All the caller can do on PageWriteback is block */
		if (PageWriteback(page))
			return ret;

		if (PageDirty(page)) {
			struct address_space *mapping;
1406
			bool migrate_dirty;
1407 1408 1409 1410

			/*
			 * Only pages without mappings or that have a
			 * ->migratepage callback are possible to migrate
1411 1412 1413 1414 1415
			 * without blocking. However, we can be racing with
			 * truncation so it's necessary to lock the page
			 * to stabilise the mapping as truncation holds
			 * the page lock until after the page is removed
			 * from the page cache.
1416
			 */
1417 1418 1419
			if (!trylock_page(page))
				return ret;

1420
			mapping = page_mapping(page);
1421
			migrate_dirty = !mapping || mapping->a_ops->migratepage;
1422 1423
			unlock_page(page);
			if (!migrate_dirty)
1424 1425 1426
				return ret;
		}
	}
1427

1428 1429 1430
	if ((mode & ISOLATE_UNMAPPED) && page_mapped(page))
		return ret;

Andy Whitcroft's avatar
Andy Whitcroft committed
1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443
	if (likely(get_page_unless_zero(page))) {
		/*
		 * Be careful not to clear PageLRU until after we're
		 * sure the page is not being freed elsewhere -- the
		 * page release code relies on it.
		 */
		ClearPageLRU(page);
		ret = 0;
	}

	return ret;
}

1444 1445 1446 1447 1448 1449

/*
 * Update LRU sizes after isolating pages. The LRU size updates must
 * be complete before mem_cgroup_update_lru_size due to a santity check.
 */
static __always_inline void update_lru_sizes(struct lruvec *lruvec,
1450
			enum lru_list lru, unsigned long *nr_zone_taken)
1451 1452 1453 1454 1455 1456 1457 1458 1459
{
	int zid;

	for (zid = 0; zid < MAX_NR_ZONES; zid++) {
		if (!nr_zone_taken[zid])
			continue;

		__update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]);
#ifdef CONFIG_MEMCG
1460
		mem_cgroup_update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]);
1461
#endif
1462 1463
	}

1464 1465
}