direct-io.c 40.9 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2 3 4 5 6 7
/*
 * fs/direct-io.c
 *
 * Copyright (C) 2002, Linus Torvalds.
 *
 * O_DIRECT
 *
8
 * 04Jul2002	Andrew Morton
Linus Torvalds's avatar
Linus Torvalds committed
9 10 11
 *		Initial version
 * 11Sep2002	janetinc@us.ibm.com
 * 		added readv/writev support.
12
 * 29Oct2002	Andrew Morton
Linus Torvalds's avatar
Linus Torvalds committed
13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
 *		rewrote bio_add_page() support.
 * 30Oct2002	pbadari@us.ibm.com
 *		added support for non-aligned IO.
 * 06Nov2002	pbadari@us.ibm.com
 *		added asynchronous IO support.
 * 21Jul2003	nathans@sgi.com
 *		added IO completion notifier.
 */

#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
30
#include <linux/task_io_accounting_ops.h>
Linus Torvalds's avatar
Linus Torvalds committed
31 32 33 34 35 36 37
#include <linux/bio.h>
#include <linux/wait.h>
#include <linux/err.h>
#include <linux/blkdev.h>
#include <linux/buffer_head.h>
#include <linux/rwsem.h>
#include <linux/uio.h>
Arun Sharma's avatar
Arun Sharma committed
38
#include <linux/atomic.h>
39
#include <linux/prefetch.h>
Linus Torvalds's avatar
Linus Torvalds committed
40 41 42

/*
 * How many user pages to map in one call to get_user_pages().  This determines
Andi Kleen's avatar
Andi Kleen committed
43
 * the size of a structure in the slab cache
Linus Torvalds's avatar
Linus Torvalds committed
44 45 46
 */
#define DIO_PAGES	64

47 48 49 50 51 52
/*
 * Flags for dio_complete()
 */
#define DIO_COMPLETE_ASYNC		0x01	/* This is async IO */
#define DIO_COMPLETE_INVALIDATE		0x02	/* Can invalidate pages */

Linus Torvalds's avatar
Linus Torvalds committed
53 54 55 56 57 58 59 60 61 62 63 64
/*
 * This code generally works in units of "dio_blocks".  A dio_block is
 * somewhere between the hard sector size and the filesystem block size.  it
 * is determined on a per-invocation basis.   When talking to the filesystem
 * we need to convert dio_blocks to fs_blocks by scaling the dio_block quantity
 * down by dio->blkfactor.  Similarly, fs-blocksize quantities are converted
 * to bio_block quantities by shifting left by blkfactor.
 *
 * If blkfactor is zero then the user's request was aligned to the filesystem's
 * blocksize.
 */

65 66 67
/* dio_state only used in the submission path */

struct dio_submit {
Linus Torvalds's avatar
Linus Torvalds committed
68 69 70 71 72 73 74 75 76 77 78 79 80 81
	struct bio *bio;		/* bio under assembly */
	unsigned blkbits;		/* doesn't change */
	unsigned blkfactor;		/* When we're using an alignment which
					   is finer than the filesystem's soft
					   blocksize, this specifies how much
					   finer.  blkfactor=2 means 1/4-block
					   alignment.  Does not change */
	unsigned start_zero_done;	/* flag: sub-blocksize zeroing has
					   been performed at the start of a
					   write */
	int pages_in_io;		/* approximate total IO pages */
	sector_t block_in_file;		/* Current offset into the underlying
					   file in dio_block units. */
	unsigned blocks_available;	/* At block_in_file.  changes */
82
	int reap_counter;		/* rate limit reaping */
Linus Torvalds's avatar
Linus Torvalds committed
83 84
	sector_t final_block_in_request;/* doesn't change */
	int boundary;			/* prev block is at a boundary */
85
	get_block_t *get_block;		/* block mapping function */
86
	dio_submit_t *submit_io;	/* IO submition function */
87

88
	loff_t logical_offset_in_bio;	/* current first logical block in bio */
Linus Torvalds's avatar
Linus Torvalds committed
89 90 91 92 93 94 95 96 97 98 99 100 101
	sector_t final_block_in_bio;	/* current final block in bio + 1 */
	sector_t next_block_for_io;	/* next block to be put under IO,
					   in dio_blocks units */

	/*
	 * Deferred addition of a page to the dio.  These variables are
	 * private to dio_send_cur_page(), submit_page_section() and
	 * dio_bio_add_page().
	 */
	struct page *cur_page;		/* The page */
	unsigned cur_page_offset;	/* Offset into it, in bytes */
	unsigned cur_page_len;		/* Nr of bytes at cur_page_offset */
	sector_t cur_page_block;	/* Where it starts */
102
	loff_t cur_page_fs_offset;	/* Offset in file */
Linus Torvalds's avatar
Linus Torvalds committed
103

Al Viro's avatar
Al Viro committed
104
	struct iov_iter *iter;
Linus Torvalds's avatar
Linus Torvalds committed
105 106 107 108 109 110
	/*
	 * Page queue.  These variables belong to dio_refill_pages() and
	 * dio_get_page().
	 */
	unsigned head;			/* next page to process */
	unsigned tail;			/* last valid page + 1 */
Al Viro's avatar
Al Viro committed
111
	size_t from, to;
112 113 114 115 116
};

/* dio_state communicated between submission path and end_io */
struct dio {
	int flags;			/* doesn't change */
117 118
	int op;
	int op_flags;
119
	blk_qc_t bio_cookie;
120
	struct gendisk *bio_disk;
121
	struct inode *inode;
122 123 124
	loff_t i_size;			/* i_size when submitted */
	dio_iodone_t *end_io;		/* IO completion function */

125
	void *private;			/* copy from map_bh.b_private */
126 127 128

	/* BIO completion state */
	spinlock_t bio_lock;		/* protects BIO fields below */
129 130
	int page_errors;		/* errno from get_user_pages() */
	int is_async;			/* is IO async ? */
131
	bool defer_completion;		/* defer AIO completion to workqueue? */
132
	bool should_dirty;		/* if pages should be dirtied */
133
	int io_error;			/* IO error in completion path */
134 135 136 137 138 139 140 141
	unsigned long refcount;		/* direct_io_worker() and bios */
	struct bio *bio_list;		/* singly linked via bi_private */
	struct task_struct *waiter;	/* waiting task (NULL if none) */

	/* AIO related stuff */
	struct kiocb *iocb;		/* kiocb */
	ssize_t result;                 /* IO result */

142 143 144 145 146
	/*
	 * pages[] (and any fields placed after it) are not zeroed out at
	 * allocation time.  Don't add new fields after pages[] unless you
	 * wish that they not be zeroed.
	 */
147 148 149 150
	union {
		struct page *pages[DIO_PAGES];	/* page buffer */
		struct work_struct complete_work;/* deferred AIO completion */
	};
151 152 153
} ____cacheline_aligned_in_smp;

static struct kmem_cache *dio_cache __read_mostly;
Linus Torvalds's avatar
Linus Torvalds committed
154 155 156 157

/*
 * How many pages are in the queue?
 */
158
static inline unsigned dio_pages_present(struct dio_submit *sdio)
Linus Torvalds's avatar
Linus Torvalds committed
159
{
160
	return sdio->tail - sdio->head;
Linus Torvalds's avatar
Linus Torvalds committed
161 162 163 164 165
}

/*
 * Go grab and pin some userspace pages.   Typically we'll get 64 at a time.
 */
166
static inline int dio_refill_pages(struct dio *dio, struct dio_submit *sdio)
Linus Torvalds's avatar
Linus Torvalds committed
167
{
Al Viro's avatar
Al Viro committed
168
	ssize_t ret;
Linus Torvalds's avatar
Linus Torvalds committed
169

170
	ret = iov_iter_get_pages(sdio->iter, dio->pages, LONG_MAX, DIO_PAGES,
Al Viro's avatar
Al Viro committed
171
				&sdio->from);
Linus Torvalds's avatar
Linus Torvalds committed
172

173
	if (ret < 0 && sdio->blocks_available && (dio->op == REQ_OP_WRITE)) {
Nick Piggin's avatar
Nick Piggin committed
174
		struct page *page = ZERO_PAGE(0);
Linus Torvalds's avatar
Linus Torvalds committed
175 176 177 178 179 180 181
		/*
		 * A memory fault, but the filesystem has some outstanding
		 * mapped blocks.  We need to use those blocks up to avoid
		 * leaking stale data in the file.
		 */
		if (dio->page_errors == 0)
			dio->page_errors = ret;
182
		get_page(page);
183
		dio->pages[0] = page;
184 185
		sdio->head = 0;
		sdio->tail = 1;
Al Viro's avatar
Al Viro committed
186 187 188
		sdio->from = 0;
		sdio->to = PAGE_SIZE;
		return 0;
Linus Torvalds's avatar
Linus Torvalds committed
189 190 191
	}

	if (ret >= 0) {
Al Viro's avatar
Al Viro committed
192 193
		iov_iter_advance(sdio->iter, ret);
		ret += sdio->from;
194
		sdio->head = 0;
Al Viro's avatar
Al Viro committed
195 196 197
		sdio->tail = (ret + PAGE_SIZE - 1) / PAGE_SIZE;
		sdio->to = ((ret - 1) & (PAGE_SIZE - 1)) + 1;
		return 0;
Linus Torvalds's avatar
Linus Torvalds committed
198 199 200 201 202 203 204 205 206 207
	}
	return ret;	
}

/*
 * Get another userspace page.  Returns an ERR_PTR on error.  Pages are
 * buffered inside the dio so that we can call get_user_pages() against a
 * decent number of pages, less frequently.  To provide nicer use of the
 * L1 cache.
 */
208
static inline struct page *dio_get_page(struct dio *dio,
209
					struct dio_submit *sdio)
Linus Torvalds's avatar
Linus Torvalds committed
210
{
211
	if (dio_pages_present(sdio) == 0) {
Linus Torvalds's avatar
Linus Torvalds committed
212 213
		int ret;

214
		ret = dio_refill_pages(dio, sdio);
Linus Torvalds's avatar
Linus Torvalds committed
215 216
		if (ret)
			return ERR_PTR(ret);
217
		BUG_ON(dio_pages_present(sdio) == 0);
Linus Torvalds's avatar
Linus Torvalds committed
218
	}
219
	return dio->pages[sdio->head];
Linus Torvalds's avatar
Linus Torvalds committed
220 221
}

222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242
/*
 * Warn about a page cache invalidation failure during a direct io write.
 */
void dio_warn_stale_pagecache(struct file *filp)
{
	static DEFINE_RATELIMIT_STATE(_rs, 86400 * HZ, DEFAULT_RATELIMIT_BURST);
	char pathname[128];
	struct inode *inode = file_inode(filp);
	char *path;

	errseq_set(&inode->i_mapping->wb_err, -EIO);
	if (__ratelimit(&_rs)) {
		path = file_path(filp, pathname, sizeof(pathname));
		if (IS_ERR(path))
			path = "(unknown)";
		pr_crit("Page cache invalidation failure on direct I/O.  Possible data corruption due to collision with buffered I/O!\n");
		pr_crit("File: %s PID: %d Comm: %.20s\n", path, current->pid,
			current->comm);
	}
}

243 244 245 246
/**
 * dio_complete() - called when all DIO BIO I/O has been completed
 * @offset: the byte offset in the file of the completed operation
 *
247 248
 * This drops i_dio_count, lets interested parties know that a DIO operation
 * has completed, and calculates the resulting return code for the operation.
249 250 251 252 253
 *
 * It lets the filesystem know if it registered an interest earlier via
 * get_block.  Pass the private field of the map buffer_head so that
 * filesystems can use it to hold additional state between get_block calls and
 * dio_complete.
Linus Torvalds's avatar
Linus Torvalds committed
254
 */
255
static ssize_t dio_complete(struct dio *dio, ssize_t ret, unsigned int flags)
Linus Torvalds's avatar
Linus Torvalds committed
256
{
257
	loff_t offset = dio->iocb->ki_pos;
258
	ssize_t transferred = 0;
259
	int err;
260

261 262 263 264 265 266 267 268 269
	/*
	 * AIO submission can race with bio completion to get here while
	 * expecting to have the last io completed by bio completion.
	 * In that case -EIOCBQUEUED is in fact not an error we want
	 * to preserve through this call.
	 */
	if (ret == -EIOCBQUEUED)
		ret = 0;

270 271 272 273
	if (dio->result) {
		transferred = dio->result;

		/* Check for short read case */
274 275
		if ((dio->op == REQ_OP_READ) &&
		    ((offset + transferred) > dio->i_size))
276
			transferred = dio->i_size - offset;
277 278 279
		/* ignore EFAULT if some IO has been done */
		if (unlikely(ret == -EFAULT) && transferred)
			ret = 0;
280 281 282 283 284 285 286 287 288
	}

	if (ret == 0)
		ret = dio->page_errors;
	if (ret == 0)
		ret = dio->io_error;
	if (ret == 0)
		ret = transferred;

289 290 291 292 293 294 295
	if (dio->end_io) {
		// XXX: ki_pos??
		err = dio->end_io(dio->iocb, offset, ret, dio->private);
		if (err)
			ret = err;
	}

296 297 298 299 300 301
	/*
	 * Try again to invalidate clean pages which might have been cached by
	 * non-direct readahead, or faulted in by get_user_pages() if the source
	 * of the write was an mmap'ed region of the file we're writing.  Either
	 * one is a pretty crazy thing to do, so we don't support it 100%.  If
	 * this invalidation fails, tough, the write still worked...
302 303 304 305 306
	 *
	 * And this page cache invalidation has to be after dio->end_io(), as
	 * some filesystems convert unwritten extents to real allocations in
	 * end_io() when necessary, otherwise a racing buffer read would cache
	 * zeros from unwritten extents.
307
	 */
308 309
	if (flags & DIO_COMPLETE_INVALIDATE &&
	    ret > 0 && dio->op == REQ_OP_WRITE &&
310 311 312 313
	    dio->inode->i_mapping->nrpages) {
		err = invalidate_inode_pages2_range(dio->inode->i_mapping,
					offset >> PAGE_SHIFT,
					(offset + ret - 1) >> PAGE_SHIFT);
314 315
		if (err)
			dio_warn_stale_pagecache(dio->iocb->ki_filp);
316 317
	}

318
	inode_dio_end(dio->inode);
319

320
	if (flags & DIO_COMPLETE_ASYNC) {
321 322 323 324 325 326
		/*
		 * generic_write_sync expects ki_pos to have been updated
		 * already, but the submission path only does this for
		 * synchronous I/O.
		 */
		dio->iocb->ki_pos += transferred;
327

328 329
		if (ret > 0 && dio->op == REQ_OP_WRITE)
			ret = generic_write_sync(dio->iocb, ret);
330
		dio->iocb->ki_complete(dio->iocb, ret, 0);
331
	}
332

333
	kmem_cache_free(dio_cache, dio);
334
	return ret;
Linus Torvalds's avatar
Linus Torvalds committed
335 336
}

337 338 339 340
static void dio_aio_complete_work(struct work_struct *work)
{
	struct dio *dio = container_of(work, struct dio, complete_work);

341
	dio_complete(dio, 0, DIO_COMPLETE_ASYNC | DIO_COMPLETE_INVALIDATE);
342 343
}

344
static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio);
345

Linus Torvalds's avatar
Linus Torvalds committed
346 347 348
/*
 * Asynchronous IO callback. 
 */
349
static void dio_bio_end_aio(struct bio *bio)
Linus Torvalds's avatar
Linus Torvalds committed
350 351
{
	struct dio *dio = bio->bi_private;
352 353
	unsigned long remaining;
	unsigned long flags;
354
	bool defer_completion = false;
Linus Torvalds's avatar
Linus Torvalds committed
355 356 357

	/* cleanup the bio */
	dio_bio_complete(dio, bio);
358

359 360 361
	spin_lock_irqsave(&dio->bio_lock, flags);
	remaining = --dio->refcount;
	if (remaining == 1 && dio->waiter)
362
		wake_up_process(dio->waiter);
363
	spin_unlock_irqrestore(&dio->bio_lock, flags);
364

365
	if (remaining == 0) {
366 367 368 369 370 371 372 373 374 375 376 377 378
		/*
		 * Defer completion when defer_completion is set or
		 * when the inode has pages mapped and this is AIO write.
		 * We need to invalidate those pages because there is a
		 * chance they contain stale data in the case buffered IO
		 * went in between AIO submission and completion into the
		 * same region.
		 */
		if (dio->result)
			defer_completion = dio->defer_completion ||
					   (dio->op == REQ_OP_WRITE &&
					    dio->inode->i_mapping->nrpages);
		if (defer_completion) {
379 380 381 382
			INIT_WORK(&dio->complete_work, dio_aio_complete_work);
			queue_work(dio->inode->i_sb->s_dio_done_wq,
				   &dio->complete_work);
		} else {
383
			dio_complete(dio, 0, DIO_COMPLETE_ASYNC);
384
		}
385
	}
Linus Torvalds's avatar
Linus Torvalds committed
386 387 388 389 390 391 392 393 394
}

/*
 * The BIO completion handler simply queues the BIO up for the process-context
 * handler.
 *
 * During I/O bi_private points at the dio.  After I/O, bi_private is used to
 * implement a singly-linked list of completed BIOs, at dio->bio_list.
 */
395
static void dio_bio_end_io(struct bio *bio)
Linus Torvalds's avatar
Linus Torvalds committed
396 397 398 399 400 401 402
{
	struct dio *dio = bio->bi_private;
	unsigned long flags;

	spin_lock_irqsave(&dio->bio_lock, flags);
	bio->bi_private = dio->bio_list;
	dio->bio_list = bio;
403
	if (--dio->refcount == 1 && dio->waiter)
Linus Torvalds's avatar
Linus Torvalds committed
404 405 406 407
		wake_up_process(dio->waiter);
	spin_unlock_irqrestore(&dio->bio_lock, flags);
}

408 409 410 411 412 413 414 415
/**
 * dio_end_io - handle the end io action for the given bio
 * @bio: The direct io bio thats being completed
 *
 * This is meant to be called by any filesystem that uses their own dio_submit_t
 * so that the DIO specific endio actions are dealt with after the filesystem
 * has done it's completion work.
 */
416
void dio_end_io(struct bio *bio)
417 418 419 420
{
	struct dio *dio = bio->bi_private;

	if (dio->is_async)
421
		dio_bio_end_aio(bio);
422
	else
423
		dio_bio_end_io(bio);
424 425 426
}
EXPORT_SYMBOL_GPL(dio_end_io);

427
static inline void
428 429 430
dio_bio_alloc(struct dio *dio, struct dio_submit *sdio,
	      struct block_device *bdev,
	      sector_t first_sector, int nr_vecs)
Linus Torvalds's avatar
Linus Torvalds committed
431 432 433
{
	struct bio *bio;

434
	/*
435 436
	 * bio_alloc() is guaranteed to return a bio when allowed to sleep and
	 * we request a valid number of vectors.
437
	 */
Linus Torvalds's avatar
Linus Torvalds committed
438 439
	bio = bio_alloc(GFP_KERNEL, nr_vecs);

440
	bio_set_dev(bio, bdev);
441
	bio->bi_iter.bi_sector = first_sector;
442
	bio_set_op_attrs(bio, dio->op, dio->op_flags);
Linus Torvalds's avatar
Linus Torvalds committed
443 444 445 446 447
	if (dio->is_async)
		bio->bi_end_io = dio_bio_end_aio;
	else
		bio->bi_end_io = dio_bio_end_io;

448 449
	bio->bi_write_hint = dio->iocb->ki_hint;

450 451
	sdio->bio = bio;
	sdio->logical_offset_in_bio = sdio->cur_page_fs_offset;
Linus Torvalds's avatar
Linus Torvalds committed
452 453 454 455 456 457
}

/*
 * In the AIO read case we speculatively dirty the pages before starting IO.
 * During IO completion, any of these pages which happen to have been written
 * back will be redirtied by bio_check_pages_dirty().
458 459
 *
 * bios hold a dio reference between submit_bio and ->end_io.
Linus Torvalds's avatar
Linus Torvalds committed
460
 */
461
static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio)
Linus Torvalds's avatar
Linus Torvalds committed
462
{
463
	struct bio *bio = sdio->bio;
464
	unsigned long flags;
Linus Torvalds's avatar
Linus Torvalds committed
465 466

	bio->bi_private = dio;
467 468 469 470 471

	spin_lock_irqsave(&dio->bio_lock, flags);
	dio->refcount++;
	spin_unlock_irqrestore(&dio->bio_lock, flags);

472
	if (dio->is_async && dio->op == REQ_OP_READ && dio->should_dirty)
Linus Torvalds's avatar
Linus Torvalds committed
473
		bio_set_pages_dirty(bio);
474

475
	dio->bio_disk = bio->bi_disk;
476

477
	if (sdio->submit_io) {
478
		sdio->submit_io(bio, dio->inode, sdio->logical_offset_in_bio);
479
		dio->bio_cookie = BLK_QC_T_NONE;
480
	} else
481
		dio->bio_cookie = submit_bio(bio);
Linus Torvalds's avatar
Linus Torvalds committed
482

483 484 485
	sdio->bio = NULL;
	sdio->boundary = 0;
	sdio->logical_offset_in_bio = 0;
Linus Torvalds's avatar
Linus Torvalds committed
486 487 488 489 490
}

/*
 * Release any resources in case of a failure
 */
491
static inline void dio_cleanup(struct dio *dio, struct dio_submit *sdio)
Linus Torvalds's avatar
Linus Torvalds committed
492
{
Al Viro's avatar
Al Viro committed
493
	while (sdio->head < sdio->tail)
494
		put_page(dio->pages[sdio->head++]);
Linus Torvalds's avatar
Linus Torvalds committed
495 496 497
}

/*
498 499 500 501
 * Wait for the next BIO to complete.  Remove it and return it.  NULL is
 * returned once all BIOs have been completed.  This must only be called once
 * all bios have been issued so that dio->refcount can only decrease.  This
 * requires that that the caller hold a reference on the dio.
Linus Torvalds's avatar
Linus Torvalds committed
502 503 504 505
 */
static struct bio *dio_await_one(struct dio *dio)
{
	unsigned long flags;
506
	struct bio *bio = NULL;
Linus Torvalds's avatar
Linus Torvalds committed
507 508

	spin_lock_irqsave(&dio->bio_lock, flags);
509 510 511 512 513 514 515 516 517 518 519

	/*
	 * Wait as long as the list is empty and there are bios in flight.  bio
	 * completion drops the count, maybe adds to the list, and wakes while
	 * holding the bio_lock so we don't need set_current_state()'s barrier
	 * and can call it after testing our condition.
	 */
	while (dio->refcount > 1 && dio->bio_list == NULL) {
		__set_current_state(TASK_UNINTERRUPTIBLE);
		dio->waiter = current;
		spin_unlock_irqrestore(&dio->bio_lock, flags);
520
		if (!(dio->iocb->ki_flags & IOCB_HIPRI) ||
521
		    !blk_poll(dio->bio_disk->queue, dio->bio_cookie, true))
522
			io_schedule();
523 524 525
		/* wake up sets us TASK_RUNNING */
		spin_lock_irqsave(&dio->bio_lock, flags);
		dio->waiter = NULL;
Linus Torvalds's avatar
Linus Torvalds committed
526
	}
527 528 529 530
	if (dio->bio_list) {
		bio = dio->bio_list;
		dio->bio_list = bio->bi_private;
	}
Linus Torvalds's avatar
Linus Torvalds committed
531 532 533 534 535 536 537
	spin_unlock_irqrestore(&dio->bio_lock, flags);
	return bio;
}

/*
 * Process one completed BIO.  No locks are held.
 */
538
static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio)
Linus Torvalds's avatar
Linus Torvalds committed
539
{
540 541
	struct bio_vec *bvec;
	unsigned i;
542
	blk_status_t err = bio->bi_status;
Linus Torvalds's avatar
Linus Torvalds committed
543

544 545 546 547 548 549
	if (err) {
		if (err == BLK_STS_AGAIN && (bio->bi_opf & REQ_NOWAIT))
			dio->io_error = -EAGAIN;
		else
			dio->io_error = -EIO;
	}
Linus Torvalds's avatar
Linus Torvalds committed
550

551
	if (dio->is_async && dio->op == REQ_OP_READ && dio->should_dirty) {
552
		bio_check_pages_dirty(bio);	/* transfers ownership */
Linus Torvalds's avatar
Linus Torvalds committed
553
	} else {
554 555
		bio_for_each_segment_all(bvec, bio, i) {
			struct page *page = bvec->bv_page;
Linus Torvalds's avatar
Linus Torvalds committed
556

557
			if (dio->op == REQ_OP_READ && !PageCompound(page) &&
558
					dio->should_dirty)
Linus Torvalds's avatar
Linus Torvalds committed
559
				set_page_dirty_lock(page);
560
			put_page(page);
Linus Torvalds's avatar
Linus Torvalds committed
561 562 563
		}
		bio_put(bio);
	}
564
	return err;
Linus Torvalds's avatar
Linus Torvalds committed
565 566 567
}

/*
568 569 570
 * Wait on and process all in-flight BIOs.  This must only be called once
 * all bios have been issued so that the refcount can only decrease.
 * This just waits for all bios to make it through dio_bio_complete.  IO
571
 * errors are propagated through dio->io_error and should be propagated via
572
 * dio_complete().
Linus Torvalds's avatar
Linus Torvalds committed
573
 */
574
static void dio_await_completion(struct dio *dio)
Linus Torvalds's avatar
Linus Torvalds committed
575
{
576 577 578 579 580 581
	struct bio *bio;
	do {
		bio = dio_await_one(dio);
		if (bio)
			dio_bio_complete(dio, bio);
	} while (bio);
Linus Torvalds's avatar
Linus Torvalds committed
582 583 584 585 586 587 588 589 590
}

/*
 * A really large O_DIRECT read or write can generate a lot of BIOs.  So
 * to keep the memory consumption sane we periodically reap any completed BIOs
 * during the BIO generation phase.
 *
 * This also helps to limit the peak amount of pinned userspace memory.
 */
591
static inline int dio_bio_reap(struct dio *dio, struct dio_submit *sdio)
Linus Torvalds's avatar
Linus Torvalds committed
592 593 594
{
	int ret = 0;

595
	if (sdio->reap_counter++ >= 64) {
Linus Torvalds's avatar
Linus Torvalds committed
596 597 598 599 600 601 602 603 604
		while (dio->bio_list) {
			unsigned long flags;
			struct bio *bio;
			int ret2;

			spin_lock_irqsave(&dio->bio_lock, flags);
			bio = dio->bio_list;
			dio->bio_list = bio->bi_private;
			spin_unlock_irqrestore(&dio->bio_lock, flags);
605
			ret2 = blk_status_to_errno(dio_bio_complete(dio, bio));
Linus Torvalds's avatar
Linus Torvalds committed
606 607 608
			if (ret == 0)
				ret = ret2;
		}
609
		sdio->reap_counter = 0;
Linus Torvalds's avatar
Linus Torvalds committed
610 611 612 613
	}
	return ret;
}

614 615 616 617 618 619
/*
 * Create workqueue for deferred direct IO completions. We allocate the
 * workqueue when it's first needed. This avoids creating workqueue for
 * filesystems that don't need it and also allows us to create the workqueue
 * late enough so the we can include s_id in the name of the workqueue.
 */
620
int sb_init_dio_done_wq(struct super_block *sb)
621
{
622
	struct workqueue_struct *old;
623 624 625 626 627 628 629 630
	struct workqueue_struct *wq = alloc_workqueue("dio/%s",
						      WQ_MEM_RECLAIM, 0,
						      sb->s_id);
	if (!wq)
		return -ENOMEM;
	/*
	 * This has to be atomic as more DIOs can race to create the workqueue
	 */
631
	old = cmpxchg(&sb->s_dio_done_wq, NULL, wq);
632
	/* Someone created workqueue before us? Free ours... */
633
	if (old)
634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649
		destroy_workqueue(wq);
	return 0;
}

static int dio_set_defer_completion(struct dio *dio)
{
	struct super_block *sb = dio->inode->i_sb;

	if (dio->defer_completion)
		return 0;
	dio->defer_completion = true;
	if (!sb->s_dio_done_wq)
		return sb_init_dio_done_wq(sb);
	return 0;
}

Linus Torvalds's avatar
Linus Torvalds committed
650 651
/*
 * Call into the fs to map some more disk blocks.  We record the current number
652
 * of available blocks at sdio->blocks_available.  These are in units of the
Fabian Frederick's avatar
Fabian Frederick committed
653
 * fs blocksize, i_blocksize(inode).
Linus Torvalds's avatar
Linus Torvalds committed
654 655 656 657
 *
 * The fs is allowed to map lots of blocks at once.  If it wants to do that,
 * it uses the passed inode-relative block number as the file offset, as usual.
 *
658
 * get_block() is passed the number of i_blkbits-sized blocks which direct_io
Linus Torvalds's avatar
Linus Torvalds committed
659 660 661 662 663 664 665 666 667 668 669 670
 * has remaining to do.  The fs should not map more than this number of blocks.
 *
 * If the fs has mapped a lot of blocks, it should populate bh->b_size to
 * indicate how much contiguous disk space has been made available at
 * bh->b_blocknr.
 *
 * If *any* of the mapped blocks are new, then the fs must set buffer_new().
 * This isn't very efficient...
 *
 * In the case of filesystem holes: the fs may return an arbitrarily-large
 * hole by returning an appropriate value in b_size and by clearing
 * buffer_mapped().  However the direct-io code will only process holes one
671
 * block at a time - it will repeatedly call get_block() as it walks the hole.
Linus Torvalds's avatar
Linus Torvalds committed
672
 */
673 674
static int get_more_blocks(struct dio *dio, struct dio_submit *sdio,
			   struct buffer_head *map_bh)
Linus Torvalds's avatar
Linus Torvalds committed
675 676 677
{
	int ret;
	sector_t fs_startblk;	/* Into file, in filesystem-sized blocks */
678
	sector_t fs_endblk;	/* Into file, in filesystem-sized blocks */
Linus Torvalds's avatar
Linus Torvalds committed
679 680
	unsigned long fs_count;	/* Number of filesystem-sized blocks */
	int create;
681
	unsigned int i_blkbits = sdio->blkbits + sdio->blkfactor;
Linus Torvalds's avatar
Linus Torvalds committed
682 683 684 685 686 687 688

	/*
	 * If there was a memory error and we've overwritten all the
	 * mapped blocks then we can now return that memory error
	 */
	ret = dio->page_errors;
	if (ret == 0) {
689 690
		BUG_ON(sdio->block_in_file >= sdio->final_block_in_request);
		fs_startblk = sdio->block_in_file >> sdio->blkfactor;
691 692 693
		fs_endblk = (sdio->final_block_in_request - 1) >>
					sdio->blkfactor;
		fs_count = fs_endblk - fs_startblk + 1;
Linus Torvalds's avatar
Linus Torvalds committed
694

695
		map_bh->b_state = 0;
696
		map_bh->b_size = fs_count << i_blkbits;
697

698
		/*
699 700 701 702 703
		 * For writes that could fill holes inside i_size on a
		 * DIO_SKIP_HOLES filesystem we forbid block creations: only
		 * overwrites are permitted. We will return early to the caller
		 * once we see an unmapped buffer head returned, and the caller
		 * will fall back to buffered I/O.
704 705 706 707 708
		 *
		 * Otherwise the decision is left to the get_blocks method,
		 * which may decide to handle it or also return an unmapped
		 * buffer head.
		 */
709
		create = dio->op == REQ_OP_WRITE;
710
		if (dio->flags & DIO_SKIP_HOLES) {
711 712
			if (fs_startblk <= ((i_size_read(dio->inode) - 1) >>
							i_blkbits))
Linus Torvalds's avatar
Linus Torvalds committed
713 714
				create = 0;
		}
715

716
		ret = (*sdio->get_block)(dio->inode, fs_startblk,
Linus Torvalds's avatar
Linus Torvalds committed
717
						map_bh, create);
718 719 720

		/* Store for completion */
		dio->private = map_bh->b_private;
721 722 723

		if (ret == 0 && buffer_defer_completion(map_bh))
			ret = dio_set_defer_completion(dio);
Linus Torvalds's avatar
Linus Torvalds committed
724 725 726 727 728 729 730
	}
	return ret;
}

/*
 * There is no bio.  Make one now.
 */
731 732
static inline int dio_new_bio(struct dio *dio, struct dio_submit *sdio,
		sector_t start_sector, struct buffer_head *map_bh)
Linus Torvalds's avatar
Linus Torvalds committed
733 734 735 736
{
	sector_t sector;
	int ret, nr_pages;

737
	ret = dio_bio_reap(dio, sdio);
Linus Torvalds's avatar
Linus Torvalds committed
738 739
	if (ret)
		goto out;
740
	sector = start_sector << (sdio->blkbits - 9);
741
	nr_pages = min(sdio->pages_in_io, BIO_MAX_PAGES);
Linus Torvalds's avatar
Linus Torvalds committed
742
	BUG_ON(nr_pages <= 0);
743
	dio_bio_alloc(dio, sdio, map_bh->b_bdev, sector, nr_pages);
744
	sdio->boundary = 0;
Linus Torvalds's avatar
Linus Torvalds committed
745 746 747 748 749 750 751 752 753 754 755
out:
	return ret;
}

/*
 * Attempt to put the current chunk of 'cur_page' into the current BIO.  If
 * that was successful then update final_block_in_bio and take a ref against
 * the just-added page.
 *
 * Return zero on success.  Non-zero means the caller needs to start a new BIO.
 */
756
static inline int dio_bio_add_page(struct dio_submit *sdio)
Linus Torvalds's avatar
Linus Torvalds committed
757 758 759
{
	int ret;

760 761 762
	ret = bio_add_page(sdio->bio, sdio->cur_page,
			sdio->cur_page_len, sdio->cur_page_offset);
	if (ret == sdio->cur_page_len) {
Linus Torvalds's avatar
Linus Torvalds committed
763 764 765
		/*
		 * Decrement count only, if we are done with this page
		 */
766 767
		if ((sdio->cur_page_len + sdio->cur_page_offset) == PAGE_SIZE)
			sdio->pages_in_io--;
768
		get_page(sdio->cur_page);
769 770
		sdio->final_block_in_bio = sdio->cur_page_block +
			(sdio->cur_page_len >> sdio->blkbits);
Linus Torvalds's avatar
Linus Torvalds committed
771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787
		ret = 0;
	} else {
		ret = 1;
	}
	return ret;
}
		
/*
 * Put cur_page under IO.  The section of cur_page which is described by
 * cur_page_offset,cur_page_len is put into a BIO.  The section of cur_page
 * starts on-disk at cur_page_block.
 *
 * We take a ref against the page here (on behalf of its presence in the bio).
 *
 * The caller of this function is responsible for removing cur_page from the
 * dio, and for dropping the refcount which came from that presence.
 */
788 789
static inline int dio_send_cur_page(struct dio *dio, struct dio_submit *sdio,
		struct buffer_head *map_bh)
Linus Torvalds's avatar
Linus Torvalds committed
790 791 792
{
	int ret = 0;

793 794 795
	if (sdio->bio) {
		loff_t cur_offset = sdio->cur_page_fs_offset;
		loff_t bio_next_offset = sdio->logical_offset_in_bio +
796
			sdio->bio->bi_iter.bi_size;
797

Linus Torvalds's avatar
Linus Torvalds committed
798
		/*
799 800
		 * See whether this new request is contiguous with the old.
		 *
Namhyung Kim's avatar
Namhyung Kim committed
801 802
		 * Btrfs cannot handle having logically non-contiguous requests
		 * submitted.  For example if you have
803 804
		 *
		 * Logical:  [0-4095][HOLE][8192-12287]
Namhyung Kim's avatar
Namhyung Kim committed
805
		 * Physical: [0-4095]      [4096-8191]
806 807 808 809 810
		 *
		 * We cannot submit those pages together as one BIO.  So if our
		 * current logical offset in the file does not equal what would
		 * be the next logical offset in the bio, submit the bio we
		 * have.
Linus Torvalds's avatar
Linus Torvalds committed
811
		 */
812
		if (sdio->final_block_in_bio != sdio->cur_page_block ||
813
		    cur_offset != bio_next_offset)
814
			dio_bio_submit(dio, sdio);
Linus Torvalds's avatar
Linus Torvalds committed
815 816
	}

817
	if (sdio->bio == NULL) {
818
		ret = dio_new_bio(dio, sdio, sdio->cur_page_block, map_bh);
Linus Torvalds's avatar
Linus Torvalds committed
819 820 821 822
		if (ret)
			goto out;
	}

823 824
	if (dio_bio_add_page(sdio) != 0) {
		dio_bio_submit(dio, sdio);
825
		ret = dio_new_bio(dio, sdio, sdio->cur_page_block, map_bh);
Linus Torvalds's avatar
Linus Torvalds committed
826
		if (ret == 0) {
827
			ret = dio_bio_add_page(sdio);
Linus Torvalds's avatar
Linus Torvalds committed
828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851
			BUG_ON(ret != 0);
		}
	}
out:
	return ret;
}

/*
 * An autonomous function to put a chunk of a page under deferred IO.
 *
 * The caller doesn't actually know (or care) whether this piece of page is in
 * a BIO, or is under IO or whatever.  We just take care of all possible 
 * situations here.  The separation between the logic of do_direct_IO() and
 * that of submit_page_section() is important for clarity.  Please don't break.
 *
 * The chunk of page starts on-disk at blocknr.
 *
 * We perform deferred IO, by recording the last-submitted page inside our
 * private part of the dio structure.  If possible, we just expand the IO
 * across that page here.
 *
 * If that doesn't work out then we put the old page into the bio and add this
 * page to the dio instead.
 */
852
static inline int
853
submit_page_section(struct dio *dio, struct dio_submit *sdio, struct page *page,
854 855
		    unsigned offset, unsigned len, sector_t blocknr,
		    struct buffer_head *map_bh)
Linus Torvalds's avatar
Linus Torvalds committed
856 857 858
{
	int ret = 0;

859
	if (dio->op == REQ_OP_WRITE) {
860 861 862 863 864 865
		/*
		 * Read accounting is performed in submit_bio()
		 */
		task_io_account_write(len);
	}

Linus Torvalds's avatar
Linus Torvalds committed
866 867 868
	/*
	 * Can we just grow the current page's presence in the dio?
	 */
869 870 871 872 873
	if (sdio->cur_page == page &&
	    sdio->cur_page_offset + sdio->cur_page_len == offset &&
	    sdio->cur_page_block +
	    (sdio->cur_page_len >> sdio->blkbits) == blocknr) {
		sdio->cur_page_len += len;
Linus Torvalds's avatar
Linus Torvalds committed
874 875 876 877 878 879
		goto out;
	}

	/*
	 * If there's a deferred page already there then send it.
	 */
880
	if (sdio->cur_page) {
881
		ret = dio_send_cur_page(dio, sdio, map_bh);
882
		put_page(sdio->cur_page);
883
		sdio->cur_page = NULL;
Linus Torvalds's avatar
Linus Torvalds committed
884
		if (ret)
885
			return ret;
Linus Torvalds's avatar
Linus Torvalds committed
886 887
	}

888
	get_page(page);		/* It is in dio */
889 890 891 892 893
	sdio->cur_page = page;
	sdio->cur_page_offset = offset;
	sdio->cur_page_len = len;
	sdio->cur_page_block = blocknr;
	sdio->cur_page_fs_offset = sdio->block_in_file << sdio->blkbits;
Linus Torvalds's avatar
Linus Torvalds committed
894
out:
895 896 897 898 899 900
	/*
	 * If sdio->boundary then we want to schedule the IO now to
	 * avoid metadata seeks.
	 */
	if (sdio->boundary) {
		ret = dio_send_cur_page(dio, sdio, map_bh);
901 902
		if (sdio->bio)
			dio_bio_submit(dio, sdio);
903
		put_page(sdio->cur_page);
904 905
		sdio->cur_page = NULL;
	}
Linus Torvalds's avatar
Linus Torvalds committed
906 907 908 909 910 911 912 913 914 915 916 917
	return ret;
}

/*
 * If we are not writing the entire block and get_block() allocated
 * the block for us, we need to fill-in the unused portion of the
 * block with zeros. This happens only if user-buffer, fileoffset or
 * io length is not filesystem block-size multiple.
 *
 * `end' is zero if we're doing the start of the IO, 1 at the end of the
 * IO.
 */
918 919
static inline void dio_zero_block(struct dio *dio, struct dio_submit *sdio,
		int end, struct buffer_head *map_bh)
Linus Torvalds's avatar
Linus Torvalds committed
920 921 922 923 924 925
{
	unsigned dio_blocks_per_fs_block;
	unsigned this_chunk_blocks;	/* In dio_blocks */
	unsigned this_chunk_bytes;
	struct page *page;

926
	sdio->start_zero_done = 1;
927
	if (!sdio->blkfactor || !buffer_new(map_bh))
Linus Torvalds's avatar
Linus Torvalds committed
928 929
		return;

930 931
	dio_blocks_per_fs_block = 1 << sdio->blkfactor;
	this_chunk_blocks = sdio->block_in_file & (dio_blocks_per_fs_block - 1);
Linus Torvalds's avatar
Linus Torvalds committed
932 933 934 935 936 937 938 939 940 941 942

	if (!this_chunk_blocks)
		return;

	/*
	 * We need to zero out part of an fs block.  It is either at the
	 * beginning or the end of the fs block.
	 */
	if (end) 
		this_chunk_blocks = dio_blocks_per_fs_block - this_chunk_blocks;

943
	this_chunk_bytes = this_chunk_blocks << sdio->blkbits;
Linus Torvalds's avatar
Linus Torvalds committed
944

Nick Piggin's avatar
Nick Piggin committed
945
	page = ZERO_PAGE(0);
946
	if (submit_page_section(dio, sdio, page, 0, this_chunk_bytes,
947
				sdio->next_block_for_io, map_bh))
Linus Torvalds's avatar
Linus Torvalds committed
948 949
		return;

950
	sdio->next_block_for_io += this_chunk_blocks;
Linus Torvalds's avatar
Linus Torvalds committed
951 952 953 954 955 956 957 958 959 960 961
}

/*
 * Walk the user pages, and the file, mapping blocks to disk and generating
 * a sequence of (page,offset,len,block) mappings.  These mappings are injected
 * into submit_page_section(), which takes care of the next stage of submission
 *
 * Direct IO against a blockdev is different from a file.  Because we can
 * happily perform page-sized but 512-byte aligned IOs.  It is important that
 * blockdev IO be able to have fine alignment and large sizes.
 *
962
 * So what we do is to permit the ->get_block function to populate bh.b_size
Linus Torvalds's avatar
Linus Torvalds committed
963 964 965
 * with the size of IO which is permitted at this offset and this i_blkbits.
 *
 * For best results, the blockdev should be set up with 512-byte i_blkbits and
966
 * it should set b_size to PAGE_SIZE or more inside get_block().  This gives
Linus Torvalds's avatar
Linus Torvalds committed
967 968
 * fine alignment but still allows this function to work in PAGE_SIZE units.
 */
969 970
static int do_direct_IO(struct dio *dio, struct dio_submit *sdio,
			struct buffer_head *map_bh)
Linus Torvalds's avatar
Linus Torvalds committed
971
{
972
	const unsigned blkbits = sdio->blkbits;
973
	const unsigned i_blkbits = blkbits + sdio->blkfactor;
Linus Torvalds's avatar
Linus Torvalds committed
974 975
	int ret = 0;

976
	while (sdio->block_in_file < sdio->final_block_in_request) {
Al Viro's avatar
Al Viro committed
977 978
		struct page *page;
		size_t from, to;
979 980

		page = dio_get_page(dio, sdio);
Linus Torvalds's avatar
Linus Torvalds committed
981 982 983 984
		if (IS_ERR(page)) {
			ret = PTR_ERR(page);
			goto out;
		}
985 986 987
		from = sdio->head ? 0 : sdio->from;
		to = (sdio->head == sdio->tail - 1) ? sdio->to : PAGE_SIZE;
		sdio->head++;
Linus Torvalds's avatar
Linus Torvalds committed
988

Al Viro's avatar
Al Viro committed
989
		while (from < to) {
Linus Torvalds's avatar
Linus Torvalds committed
990 991 992 993
			unsigned this_chunk_bytes;	/* # of bytes mapped */
			unsigned this_chunk_blocks;	/* # of blocks */
			unsigned u;

994
			if (sdio->blocks_available == 0) {
Linus Torvalds's avatar
Linus Torvalds committed
995 996 997 998 999 1000
				/*
				 * Need to go and map some more disk
				 */
				unsigned long blkmask;
				unsigned long dio_remainder;

1001
				ret = get_more_blocks(dio, sdio, map_bh);
Linus Torvalds's avatar
Linus Torvalds committed
1002
				if (ret) {
1003
					put_page(page);
Linus Torvalds's avatar
Linus Torvalds committed
1004 1005 1006 1007 1008
					goto out;
				}
				if (!buffer_mapped(map_bh))
					goto do_holes;

1009
				sdio->blocks_available =
1010
						map_bh->b_size >> blkbits;
1011 1012
				sdio->next_block_for_io =
					map_bh->b_blocknr << sdio->blkfactor;
1013 1014 1015 1016
				if (buffer_new(map_bh)) {
					clean_bdev_aliases(
						map_bh->b_bdev,
						map_bh->b_blocknr,
1017
						map_bh->b_size >> i_blkbits);
1018
				}
Linus Torvalds's avatar
Linus Torvalds committed
1019

1020
				if (!sdio->blkfactor)
Linus Torvalds's avatar
Linus Torvalds committed
1021 1022
					goto do_holes;

1023 1024
				blkmask = (1 << sdio->blkfactor) - 1;
				dio_remainder = (sdio->block_in_file & blkmask);
Linus Torvalds's avatar
Linus Torvalds committed
1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037

				/*
				 * If we are at the start of IO and that IO
				 * starts partway into a fs-block,
				 * dio_remainder will be non-zero.  If the IO
				 * is a read then we can simply advance the IO
				 * cursor to the first block which is to be
				 * read.  But if the IO is a write and the
				 * block was newly allocated we cannot do that;
				 * the start of the fs block must be zeroed out
				 * on-disk
				 */
				if (!buffer_new(map_bh))
1038 1039
					sdio->next_block_for_io += dio_remainder;
				sdio->blocks_available -= dio_remainder;
Linus Torvalds's avatar
Linus Torvalds committed
1040 1041 1042 1043
			}
do_holes:
			/* Handle holes */
			if (!buffer_mapped(map_bh)) {
1044
				loff_t i_size_aligned;
Linus Torvalds's avatar
Linus Torvalds committed
1045 1046

				/* AKPM: eargh, -ENOTBLK is a hack */
1047
				if (dio->op == REQ_OP_WRITE) {
1048
					put_page(page);
Linus Torvalds's avatar
Linus Torvalds committed
1049 1050 1051
					return -ENOTBLK;
				}

1052 1053 1054 1055 1056 1057
				/*
				 * Be sure to account for a partial block as the
				 * last block in the file
				 */
				i_size_aligned = ALIGN(i_size_read(dio->inode),
							1 << blkbits);
1058
				if (sdio->block_in_file >=
1059
						i_size_aligned >> blkbits) {
Linus Torvalds's avatar
Linus Torvalds committed
1060
					/* We hit eof */
1061
					put_page(page);
Linus Torvalds's avatar
Linus Torvalds committed
1062 1063
					goto out;
				}
Al Viro's avatar
Al Viro committed
1064
				zero_user(page, from, 1 << blkbits);
1065
				sdio->block_in_file++;
Al Viro's avatar
Al Viro committed
1066
				from += 1 << blkbits;
1067
				dio->result += 1 << blkbits;
Linus Torvalds's avatar
Linus Torvalds committed
1068 1069 1070 1071 1072 1073 1074 1075
				goto next_block;
			}

			/*
			 * If we're performing IO which has an alignment which
			 * is finer than the underlying fs, go check to see if
			 * we must zero out the start of this block.
			 */
1076
			if (unlikely(sdio->blkfactor && !sdio->start_zero_done))
1077
				dio_zero_block(dio, sdio, 0, map_bh);
Linus Torvalds's avatar
Linus Torvalds committed
1078 1079 1080 1081 1082

			/*
			 * Work out, in this_chunk_blocks, how much disk we
			 * can add to this page
			 */
1083
			this_chunk_blocks = sdio->blocks_available;
Al Viro's avatar
Al Viro committed
1084
			u = (to - from) >> blkbits;
Linus Torvalds's avatar
Linus Torvalds committed
1085 1086
			if (this_chunk_blocks > u)
				this_chunk_blocks = u;
1087
			u = sdio->final_block_in_request - sdio->block_in_file;
Linus Torvalds's avatar
Linus Torvalds committed
1088 1089 1090 1091 1092
			if (this_chunk_blocks > u)
				this_chunk_blocks = u;
			this_chunk_bytes = this_chunk_blocks << blkbits;
			BUG_ON(this_chunk_bytes == 0);

1093 1094
			if (this_chunk_blocks == sdio->blocks_available)
				sdio->boundary = buffer_boundary(map_bh);
1095
			ret = submit_page_section(dio, sdio, page,
Al Viro's avatar
Al Viro committed
1096
						  from,
1097
						  this_chunk_bytes,
1098 1099
						  sdio->next_block_for_io,
						  map_bh);
Linus Torvalds's avatar
Linus Torvalds committed
1100
			if (ret) {
1101
				put_page(page);
Linus Torvalds's avatar
Linus Torvalds committed
1102 1103
				goto out;
			}
1104
			sdio->next_block_for_io += this_chunk_blocks;
Linus Torvalds's avatar
Linus Torvalds committed
1105

1106
			sdio->block_in_file += this_chunk_blocks;
Al Viro's avatar
Al Viro committed
1107 1108
			from += this_chunk_bytes;
			dio->result += this_chunk_bytes;
1109
			sdio->blocks_available -= this_chunk_blocks;
Linus Torvalds's avatar
Linus Torvalds committed
1110
next_block:
1111 1112
			BUG_ON(sdio->block_in_file > sdio->final_block_in_request);
			if (sdio->block_in_file == sdio->final_block_in_request)
Linus Torvalds's avatar
Linus Torvalds committed
1113 1114 1115 1116
				break;
		}

		/* Drop the ref which was taken in get_user_pages() */
1117
		put_page(page);
Linus Torvalds's avatar
Linus Torvalds committed
1118 1119 1120 1121 1122
	}
out:
	return ret;
}

1123
static inline int drop_refcount(struct dio *dio)
Linus Torvalds's avatar
Linus Torvalds committed
1124
{
1125
	int ret2;
1126
	unsigned long flags;
Linus Torvalds's avatar
Linus Torvalds committed
1127

1128 1129
	/*
	 * Sync will always be dropping the final ref and completing the
1130 1131 1132
	 * operation.  AIO can if it was a broken operation described above or
	 * in fact if all the bios race to complete before we get here.  In
	 * that case dio_complete() translates the EIOCBQUEUED into the proper
1133
	 * return code that the caller will hand to ->complete().
1134 1135 1136 1137
	 *
	 * This is managed by the bio_lock instead of being an atomic_t so that
	 * completion paths can drop their ref and use the remaining count to
	 * decide to wake the submission path atomically.
1138
	 */
1139 1140 1141
	spin_lock_irqsave(&dio->bio_lock, flags);
	ret2 = --dio->refcount;
	spin_unlock_irqrestore(&dio->bio_lock, flags);
1142
	return ret2;
Linus Torvalds's avatar
Linus Torvalds committed
1143 1144
}

1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156
/*
 * This is a library function for use by filesystem drivers.
 *
 * The locking rules are governed by the flags parameter:
 *  - if the flags value contains DIO_LOCKING we use a fancy locking
 *    scheme for dumb filesystems.
 *    For writes this function is called under i_mutex and returns with
 *    i_mutex held, for reads, i_mutex is not held on entry, but it is
 *    taken and dropped again before returning.
 *  - if the flags value does NOT contain DIO_LOCKING we don't use any
 *    internal locking but rather rely on the filesystem to synchronize
 *    direct I/O reads/writes versus each other and truncate.
1157 1158 1159 1160 1161 1162 1163
 *
 * To help with locking against truncate we incremented the i_dio_count
 * counter before starting direct I/O, and decrement it once we are done.
 * Truncate can wait for it to reach zero to provide exclusion.  It is
 * expected that filesystem provide exclusion between new direct I/O
 * and truncates.  For DIO_LOCKING filesystems this is done by i_mutex,
 * but other filesystems need to take care of this on their own.
1164 1165 1166 1167 1168
 *
 * NOTE: if you pass "sdio" to anything by pointer make sure that function
 * is always inlined. Otherwise gcc is unable to split the structure into
 * individual fields and will generate much worse code. This is important
 * for the whole file.
1169
 */
1170
static inline ssize_t
1171 1172
do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
		      struct block_device *bdev, struct iov_iter *iter,
1173
		      get_block_t get_block, dio_iodone_t end_io,
1174
		      dio_submit_t submit_io, int flags)
Linus Torvalds's avatar
Linus Torvalds committed
1175
{
1176
	unsigned i_blkbits = READ_ONCE(inode->i_blkbits);
1177
	unsigned blkbits = i_blkbits;
Linus Torvalds's avatar
Linus Torvalds committed
1178 1179
	unsigned blocksize_mask = (1 << blkbits) - 1;
	ssize_t retval = -EINVAL;
1180
	const size_t count = iov_iter_count(iter);
1181
	loff_t offset = iocb->ki_pos;
1182
	const loff_t end = offset + count;
Linus Torvalds's avatar
Linus Torvalds committed
1183
	struct dio *dio;
1184
	struct dio_submit sdio = { 0, };
1185
	struct buffer_head map_bh = { 0, };
1186
	struct blk_plug plug;
1187
	unsigned long align = offset | iov_iter_alignment(iter);
Linus Torvalds's avatar
Linus Torvalds committed
1188

1189 1190 1191 1192
	/*
	 * Avoid references to bdev if not absolutely needed to give
	 * the early prefetch in the caller enough time.
	 */
Linus Torvalds's avatar
Linus Torvalds committed
1193

1194
	if (align & blocksize_mask) {
Linus Torvalds's avatar
Linus Torvalds committed
1195
		if (bdev)
1196
			blkbits = blksize_bits(bdev_logical_block_size(bdev));
Linus Torvalds's avatar
Linus Torvalds committed
1197
		blocksize_mask = (1 << blkbits) - 1;
1198
		if (align & blocksize_mask)
Linus Torvalds's avatar
Linus Torvalds committed
1199 1200 1201
			goto out;
	}

1202
	/* watch out for a 0 len io from a tricksy fs */
1203
	if (iov_iter_rw(iter) == READ && !count)
1204 1205
		return 0;

1206
	dio = kmem_cache_alloc(dio_cache, GFP_KERNEL);
Linus Torvalds's avatar
Linus Torvalds committed
1207 1208 1209
	retval = -ENOMEM;
	if (!dio)
		goto out;
1210 1211 1212 1213 1214 1215
	/*
	 * Believe it or not, zeroing out the page array caused a .5%
	 * performance regression in a database benchmark.  So, we take
	 * care to only zero out what's needed.
	 */
	memset(dio, 0, offsetof(struct dio, pages));
Linus Torvalds's avatar
Linus Torvalds committed