inode.c 180 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
 *  linux/fs/ext4/inode.c
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
 *
 * Copyright (C) 1992, 1993, 1994, 1995
 * Remy Card (card@masi.ibp.fr)
 * Laboratoire MASI - Institut Blaise Pascal
 * Universite Pierre et Marie Curie (Paris VI)
 *
 *  from
 *
 *  linux/fs/minix/inode.c
 *
 *  Copyright (C) 1991, 1992  Linus Torvalds
 *
 *  64-bit file support on 64-bit platforms by Jakub Jelinek
 *	(jj@sunsite.ms.mff.cuni.cz)
 *
19
 *  Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000
20 21 22 23 24 25
 */

#include <linux/fs.h>
#include <linux/time.h>
#include <linux/highuid.h>
#include <linux/pagemap.h>
26
#include <linux/dax.h>
27 28 29 30
#include <linux/quotaops.h>
#include <linux/string.h>
#include <linux/buffer_head.h>
#include <linux/writeback.h>
31
#include <linux/pagevec.h>
32
#include <linux/mpage.h>
33
#include <linux/namei.h>
34 35
#include <linux/uio.h>
#include <linux/bio.h>
36
#include <linux/workqueue.h>
37
#include <linux/kernel.h>
38
#include <linux/printk.h>
39
#include <linux/slab.h>
40
#include <linux/bitops.h>
41
#include <linux/iomap.h>
42
#include <linux/iversion.h>
43

44
#include "ext4_jbd2.h"
45 46
#include "xattr.h"
#include "acl.h"
47
#include "truncate.h"
48

49 50
#include <trace/events/ext4.h>

51 52
#define MPAGE_DA_EXTENT_TAIL 0x01

53 54 55 56 57
static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw,
			      struct ext4_inode_info *ei)
{
	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
	__u32 csum;
58 59 60
	__u16 dummy_csum = 0;
	int offset = offsetof(struct ext4_inode, i_checksum_lo);
	unsigned int csum_size = sizeof(dummy_csum);
61

62 63 64 65 66
	csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw, offset);
	csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, csum_size);
	offset += csum_size;
	csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
			   EXT4_GOOD_OLD_INODE_SIZE - offset);
67

68 69 70 71 72 73 74 75 76 77
	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
		offset = offsetof(struct ext4_inode, i_checksum_hi);
		csum = ext4_chksum(sbi, csum, (__u8 *)raw +
				   EXT4_GOOD_OLD_INODE_SIZE,
				   offset - EXT4_GOOD_OLD_INODE_SIZE);
		if (EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) {
			csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum,
					   csum_size);
			offset += csum_size;
		}
78 79
		csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
				   EXT4_INODE_SIZE(inode->i_sb) - offset);
80 81 82 83 84 85 86 87 88 89 90 91
	}

	return csum;
}

static int ext4_inode_csum_verify(struct inode *inode, struct ext4_inode *raw,
				  struct ext4_inode_info *ei)
{
	__u32 provided, calculated;

	if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
	    cpu_to_le32(EXT4_OS_LINUX) ||
92
	    !ext4_has_metadata_csum(inode->i_sb))
93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112
		return 1;

	provided = le16_to_cpu(raw->i_checksum_lo);
	calculated = ext4_inode_csum(inode, raw, ei);
	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
	    EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
		provided |= ((__u32)le16_to_cpu(raw->i_checksum_hi)) << 16;
	else
		calculated &= 0xFFFF;

	return provided == calculated;
}

static void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw,
				struct ext4_inode_info *ei)
{
	__u32 csum;

	if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
	    cpu_to_le32(EXT4_OS_LINUX) ||
113
	    !ext4_has_metadata_csum(inode->i_sb))
114 115 116 117 118 119 120 121 122
		return;

	csum = ext4_inode_csum(inode, raw, ei);
	raw->i_checksum_lo = cpu_to_le16(csum & 0xFFFF);
	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
	    EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
		raw->i_checksum_hi = cpu_to_le16(csum >> 16);
}

123 124 125
static inline int ext4_begin_ordered_truncate(struct inode *inode,
					      loff_t new_size)
{
126
	trace_ext4_begin_ordered_truncate(inode, new_size);
127 128 129 130 131 132 133 134 135 136 137
	/*
	 * If jinode is zero, then we never opened the file for
	 * writing, so there's no need to call
	 * jbd2_journal_begin_ordered_truncate() since there's no
	 * outstanding writes we need to flush.
	 */
	if (!EXT4_I(inode)->jinode)
		return 0;
	return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode),
						   EXT4_I(inode)->jinode,
						   new_size);
138 139
}

140 141
static void ext4_invalidatepage(struct page *page, unsigned int offset,
				unsigned int length);
142 143
static int __ext4_journalled_writepage(struct page *page, unsigned int len);
static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh);
144 145
static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
				  int pextents);
146

147 148
/*
 * Test whether an inode is a fast symlink.
149
 * A fast symlink has its symlink data stored in ext4_inode_info->i_data.
150
 */
151
int ext4_inode_is_fast_symlink(struct inode *inode)
152
{
153 154 155 156 157 158 159 160 161
	if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) {
		int ea_blocks = EXT4_I(inode)->i_file_acl ?
				EXT4_CLUSTER_SIZE(inode->i_sb) >> 9 : 0;

		if (ext4_has_inline_data(inode))
			return 0;

		return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
	}
162 163
	return S_ISLNK(inode->i_mode) && inode->i_size &&
	       (inode->i_size < EXT4_N_BLOCKS * 4);
164 165 166 167 168 169 170
}

/*
 * Restart the transaction associated with *handle.  This does a commit,
 * so before we call here everything must be consistently dirtied against
 * this transaction.
 */
171
int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode,
172
				 int nblocks)
173
{
174 175 176
	int ret;

	/*
177
	 * Drop i_data_sem to avoid deadlock with ext4_map_blocks.  At this
178 179 180 181
	 * moment, get_block can be called only for blocks inside i_size since
	 * page cache has been already dropped and writes are blocked by
	 * i_mutex. So we can safely drop the i_data_sem here.
	 */
182
	BUG_ON(EXT4_JOURNAL(inode) == NULL);
183
	jbd_debug(2, "restarting handle %p\n", handle);
184
	up_write(&EXT4_I(inode)->i_data_sem);
185
	ret = ext4_journal_restart(handle, nblocks);
186
	down_write(&EXT4_I(inode)->i_data_sem);
187
	ext4_discard_preallocations(inode);
188 189

	return ret;
190 191 192 193 194
}

/*
 * Called at the last iput() if i_nlink is zero.
 */
Al Viro's avatar
Al Viro committed
195
void ext4_evict_inode(struct inode *inode)
196 197
{
	handle_t *handle;
198
	int err;
199
	int extra_credits = 3;
200
	struct ext4_xattr_inode_array *ea_inode_array = NULL;
201

202
	trace_ext4_evict_inode(inode);
203

Al Viro's avatar
Al Viro committed
204
	if (inode->i_nlink) {
205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222
		/*
		 * When journalling data dirty buffers are tracked only in the
		 * journal. So although mm thinks everything is clean and
		 * ready for reaping the inode might still have some pages to
		 * write in the running transaction or waiting to be
		 * checkpointed. Thus calling jbd2_journal_invalidatepage()
		 * (via truncate_inode_pages()) to discard these buffers can
		 * cause data loss. Also even if we did not discard these
		 * buffers, we would have no way to find them after the inode
		 * is reaped and thus user could see stale data if he tries to
		 * read them before the transaction is checkpointed. So be
		 * careful and force everything to disk here... We use
		 * ei->i_datasync_tid to store the newest transaction
		 * containing inode's data.
		 *
		 * Note that directories do not have this problem because they
		 * don't use page cache.
		 */
223 224
		if (inode->i_ino != EXT4_JOURNAL_INO &&
		    ext4_should_journal_data(inode) &&
225 226
		    (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode)) &&
		    inode->i_data.nrpages) {
227 228 229
			journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
			tid_t commit_tid = EXT4_I(inode)->i_datasync_tid;

230
			jbd2_complete_transaction(journal, commit_tid);
231 232
			filemap_write_and_wait(&inode->i_data);
		}
233
		truncate_inode_pages_final(&inode->i_data);
Jan Kara's avatar
Jan Kara committed
234

Al Viro's avatar
Al Viro committed
235 236 237
		goto no_delete;
	}

238 239 240
	if (is_bad_inode(inode))
		goto no_delete;
	dquot_initialize(inode);
241

242 243
	if (ext4_should_order_data(inode))
		ext4_begin_ordered_truncate(inode, 0);
244
	truncate_inode_pages_final(&inode->i_data);
245

246 247 248 249 250
	/*
	 * Protect us against freezing - iput() caller didn't have to have any
	 * protection against it
	 */
	sb_start_intwrite(inode->i_sb);
251

252 253 254 255 256
	if (!IS_NOQUOTA(inode))
		extra_credits += EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb);

	handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE,
				 ext4_blocks_for_truncate(inode)+extra_credits);
257
	if (IS_ERR(handle)) {
258
		ext4_std_error(inode->i_sb, PTR_ERR(handle));
259 260 261 262 263
		/*
		 * If we're going to skip the normal cleanup, we still need to
		 * make sure that the in-core orphan linked list is properly
		 * cleaned up.
		 */
264
		ext4_orphan_del(NULL, inode);
265
		sb_end_intwrite(inode->i_sb);
266 267
		goto no_delete;
	}
268

269
	if (IS_SYNC(inode))
270
		ext4_handle_sync(handle);
271 272 273 274 275 276 277 278 279 280

	/*
	 * Set inode->i_size to 0 before calling ext4_truncate(). We need
	 * special handling of symlinks here because i_size is used to
	 * determine whether ext4_inode_info->i_data contains symlink data or
	 * block mappings. Setting i_size to 0 will remove its fast symlink
	 * status. Erase i_data so that it becomes a valid empty block map.
	 */
	if (ext4_inode_is_fast_symlink(inode))
		memset(EXT4_I(inode)->i_data, 0, sizeof(EXT4_I(inode)->i_data));
281
	inode->i_size = 0;
282 283
	err = ext4_mark_inode_dirty(handle, inode);
	if (err) {
284
		ext4_warning(inode->i_sb,
285 286 287
			     "couldn't mark inode dirty (err %d)", err);
		goto stop_handle;
	}
288 289 290 291 292 293 294 295 296
	if (inode->i_blocks) {
		err = ext4_truncate(inode);
		if (err) {
			ext4_error(inode->i_sb,
				   "couldn't truncate inode %lu (err %d)",
				   inode->i_ino, err);
			goto stop_handle;
		}
	}
297

298 299 300 301 302 303 304 305 306 307 308
	/* Remove xattr references. */
	err = ext4_xattr_delete_inode(handle, inode, &ea_inode_array,
				      extra_credits);
	if (err) {
		ext4_warning(inode->i_sb, "xattr delete (err %d)", err);
stop_handle:
		ext4_journal_stop(handle);
		ext4_orphan_del(NULL, inode);
		sb_end_intwrite(inode->i_sb);
		ext4_xattr_inode_array_free(ea_inode_array);
		goto no_delete;
309 310
	}

311
	/*
312
	 * Kill off the orphan record which ext4_truncate created.
313
	 * AKPM: I think this can be inside the above `if'.
314
	 * Note that ext4_orphan_del() has to be able to cope with the
315
	 * deletion of a non-existent orphan - this is because we don't
316
	 * know if ext4_truncate() actually created an orphan record.
317 318
	 * (Well, we could do this if we need to, but heck - it works)
	 */
319
	ext4_orphan_del(handle, inode);
320
	EXT4_I(inode)->i_dtime	= (__u32)ktime_get_real_seconds();
321 322 323 324 325 326 327 328

	/*
	 * One subtle ordering requirement: if anything has gone wrong
	 * (transaction abort, IO errors, whatever), then we can still
	 * do these next steps (the fs will already have been marked as
	 * having errors), but we can't free the inode if the mark_dirty
	 * fails.
	 */
329
	if (ext4_mark_inode_dirty(handle, inode))
330
		/* If that failed, just do the required in-core inode clear. */
Al Viro's avatar
Al Viro committed
331
		ext4_clear_inode(inode);
332
	else
333 334
		ext4_free_inode(handle, inode);
	ext4_journal_stop(handle);
335
	sb_end_intwrite(inode->i_sb);
336
	ext4_xattr_inode_array_free(ea_inode_array);
337 338
	return;
no_delete:
Al Viro's avatar
Al Viro committed
339
	ext4_clear_inode(inode);	/* We must guarantee clearing of inode... */
340 341
}

342 343
#ifdef CONFIG_QUOTA
qsize_t *ext4_get_reserved_space(struct inode *inode)
344
{
345
	return &EXT4_I(inode)->i_reserved_quota;
346
}
347
#endif
348

349 350 351 352
/*
 * Called with i_data_sem down, which is important since we can call
 * ext4_discard_preallocations() from here.
 */
353 354
void ext4_da_update_reserve_space(struct inode *inode,
					int used, int quota_claim)
355 356
{
	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
357 358 359
	struct ext4_inode_info *ei = EXT4_I(inode);

	spin_lock(&ei->i_block_reservation_lock);
360
	trace_ext4_da_update_reserve_space(inode, used, quota_claim);
361
	if (unlikely(used > ei->i_reserved_data_blocks)) {
362
		ext4_warning(inode->i_sb, "%s: ino %lu, used %d "
363
			 "with only %d reserved data blocks",
364 365 366 367 368
			 __func__, inode->i_ino, used,
			 ei->i_reserved_data_blocks);
		WARN_ON(1);
		used = ei->i_reserved_data_blocks;
	}
369

370 371
	/* Update per-inode reservations */
	ei->i_reserved_data_blocks -= used;
372
	percpu_counter_sub(&sbi->s_dirtyclusters_counter, used);
373

374
	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
375

376 377
	/* Update quota subsystem for data blocks */
	if (quota_claim)
378
		dquot_claim_block(inode, EXT4_C2B(sbi, used));
379
	else {
380 381 382
		/*
		 * We did fallocate with an offset that is already delayed
		 * allocated. So on delayed allocated writeback we should
383
		 * not re-claim the quota for fallocated blocks.
384
		 */
385
		dquot_release_reservation_block(inode, EXT4_C2B(sbi, used));
386
	}
387 388 389 390 391 392

	/*
	 * If we have done all the pending block allocations and if
	 * there aren't any writers on the inode, we can discard the
	 * inode's preallocations.
	 */
393 394
	if ((ei->i_reserved_data_blocks == 0) &&
	    (atomic_read(&inode->i_writecount) == 0))
395
		ext4_discard_preallocations(inode);
396 397
}

398
static int __check_block_validity(struct inode *inode, const char *func,
399 400
				unsigned int line,
				struct ext4_map_blocks *map)
401
{
402 403
	if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk,
				   map->m_len)) {
404
		ext4_error_inode(inode, func, line, map->m_pblk,
405
				 "lblock %lu mapped to illegal pblock %llu "
406
				 "(length %d)", (unsigned long) map->m_lblk,
407
				 map->m_pblk, map->m_len);
408
		return -EFSCORRUPTED;
409 410 411 412
	}
	return 0;
}

413 414 415 416 417 418
int ext4_issue_zeroout(struct inode *inode, ext4_lblk_t lblk, ext4_fsblk_t pblk,
		       ext4_lblk_t len)
{
	int ret;

	if (ext4_encrypted_inode(inode))
419
		return fscrypt_zeroout_range(inode, lblk, pblk, len);
420 421 422 423 424 425 426 427

	ret = sb_issue_zeroout(inode->i_sb, pblk, len, GFP_NOFS);
	if (ret > 0)
		ret = 0;

	return ret;
}

428
#define check_block_validity(inode, map)	\
429
	__check_block_validity((inode), __func__, __LINE__, (map))
430

431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447
#ifdef ES_AGGRESSIVE_TEST
static void ext4_map_blocks_es_recheck(handle_t *handle,
				       struct inode *inode,
				       struct ext4_map_blocks *es_map,
				       struct ext4_map_blocks *map,
				       int flags)
{
	int retval;

	map->m_flags = 0;
	/*
	 * There is a race window that the result is not the same.
	 * e.g. xfstests #223 when dioread_nolock enables.  The reason
	 * is that we lookup a block mapping in extent status tree with
	 * out taking i_data_sem.  So at the time the unwritten extent
	 * could be converted.
	 */
448
	down_read(&EXT4_I(inode)->i_data_sem);
449 450 451 452 453 454 455
	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
		retval = ext4_ext_map_blocks(handle, inode, map, flags &
					     EXT4_GET_BLOCKS_KEEP_SIZE);
	} else {
		retval = ext4_ind_map_blocks(handle, inode, map, flags &
					     EXT4_GET_BLOCKS_KEEP_SIZE);
	}
456
	up_read((&EXT4_I(inode)->i_data_sem));
457 458 459 460 461 462 463 464

	/*
	 * We don't check m_len because extent will be collpased in status
	 * tree.  So the m_len might not equal.
	 */
	if (es_map->m_lblk != map->m_lblk ||
	    es_map->m_flags != map->m_flags ||
	    es_map->m_pblk != map->m_pblk) {
465
		printk("ES cache assertion failed for inode: %lu "
466 467 468 469 470 471 472 473 474 475
		       "es_cached ex [%d/%d/%llu/%x] != "
		       "found ex [%d/%d/%llu/%x] retval %d flags %x\n",
		       inode->i_ino, es_map->m_lblk, es_map->m_len,
		       es_map->m_pblk, es_map->m_flags, map->m_lblk,
		       map->m_len, map->m_pblk, map->m_flags,
		       retval, flags);
	}
}
#endif /* ES_AGGRESSIVE_TEST */

476
/*
477
 * The ext4_map_blocks() function tries to look up the requested blocks,
478
 * and returns if the blocks are already mapped.
479 480 481 482 483
 *
 * Otherwise it takes the write lock of the i_data_sem and allocate blocks
 * and store the allocated blocks in the result buffer head and mark it
 * mapped.
 *
484 485
 * If file type is extents based, it will call ext4_ext_map_blocks(),
 * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping
486 487
 * based files
 *
488 489 490
 * On success, it returns the number of blocks being mapped or allocated.  if
 * create==0 and the blocks are pre-allocated and unwritten, the resulting @map
 * is marked as unwritten. If the create == 1, it will mark @map as mapped.
491 492
 *
 * It returns 0 if plain look up failed (blocks have not been allocated), in
493 494
 * that case, @map is returned as unmapped but we still do fill map->m_len to
 * indicate the length of a hole starting at map->m_lblk.
495 496 497
 *
 * It returns the error in case of allocation failure.
 */
498 499
int ext4_map_blocks(handle_t *handle, struct inode *inode,
		    struct ext4_map_blocks *map, int flags)
500
{
501
	struct extent_status es;
502
	int retval;
503
	int ret = 0;
504 505 506 507 508
#ifdef ES_AGGRESSIVE_TEST
	struct ext4_map_blocks orig_map;

	memcpy(&orig_map, map, sizeof(*map));
#endif
509

510 511 512 513
	map->m_flags = 0;
	ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u,"
		  "logical block %lu\n", inode->i_ino, flags, map->m_len,
		  (unsigned long) map->m_lblk);
514

515 516 517 518 519 520
	/*
	 * ext4_map_blocks returns an int, and m_len is an unsigned int
	 */
	if (unlikely(map->m_len > INT_MAX))
		map->m_len = INT_MAX;

521 522
	/* We can handle the block number less than EXT_MAX_BLOCKS */
	if (unlikely(map->m_lblk >= EXT_MAX_BLOCKS))
523
		return -EFSCORRUPTED;
524

525 526 527 528 529 530 531 532 533 534 535 536
	/* Lookup extent status tree firstly */
	if (ext4_es_lookup_extent(inode, map->m_lblk, &es)) {
		if (ext4_es_is_written(&es) || ext4_es_is_unwritten(&es)) {
			map->m_pblk = ext4_es_pblock(&es) +
					map->m_lblk - es.es_lblk;
			map->m_flags |= ext4_es_is_written(&es) ?
					EXT4_MAP_MAPPED : EXT4_MAP_UNWRITTEN;
			retval = es.es_len - (map->m_lblk - es.es_lblk);
			if (retval > map->m_len)
				retval = map->m_len;
			map->m_len = retval;
		} else if (ext4_es_is_delayed(&es) || ext4_es_is_hole(&es)) {
537 538 539 540 541
			map->m_pblk = 0;
			retval = es.es_len - (map->m_lblk - es.es_lblk);
			if (retval > map->m_len)
				retval = map->m_len;
			map->m_len = retval;
542 543 544 545
			retval = 0;
		} else {
			BUG_ON(1);
		}
546 547 548 549
#ifdef ES_AGGRESSIVE_TEST
		ext4_map_blocks_es_recheck(handle, inode, map,
					   &orig_map, flags);
#endif
550 551 552
		goto found;
	}

553
	/*
554 555
	 * Try to see if we can get the block without requesting a new
	 * file system block.
556
	 */
557
	down_read(&EXT4_I(inode)->i_data_sem);
558
	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
559 560
		retval = ext4_ext_map_blocks(handle, inode, map, flags &
					     EXT4_GET_BLOCKS_KEEP_SIZE);
561
	} else {
562 563
		retval = ext4_ind_map_blocks(handle, inode, map, flags &
					     EXT4_GET_BLOCKS_KEEP_SIZE);
564
	}
565
	if (retval > 0) {
566
		unsigned int status;
567

568 569 570 571 572 573
		if (unlikely(retval != map->m_len)) {
			ext4_warning(inode->i_sb,
				     "ES len assertion failed for inode "
				     "%lu: retval %d != map->m_len %d",
				     inode->i_ino, retval, map->m_len);
			WARN_ON(1);
574 575
		}

576 577 578
		status = map->m_flags & EXT4_MAP_UNWRITTEN ?
				EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
		if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
579
		    !(status & EXTENT_STATUS_WRITTEN) &&
580 581
		    ext4_es_scan_range(inode, &ext4_es_is_delayed, map->m_lblk,
				       map->m_lblk + map->m_len - 1))
582 583 584 585 586 587
			status |= EXTENT_STATUS_DELAYED;
		ret = ext4_es_insert_extent(inode, map->m_lblk,
					    map->m_len, map->m_pblk, status);
		if (ret < 0)
			retval = ret;
	}
588
	up_read((&EXT4_I(inode)->i_data_sem));
589

590
found:
591
	if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
592
		ret = check_block_validity(inode, map);
593 594 595 596
		if (ret != 0)
			return ret;
	}

597
	/* If it is only a block(s) look up */
598
	if ((flags & EXT4_GET_BLOCKS_CREATE) == 0)
599 600 601 602 603 604
		return retval;

	/*
	 * Returns if the blocks have already allocated
	 *
	 * Note that if blocks have been preallocated
605
	 * ext4_ext_get_block() returns the create = 0
606 607
	 * with buffer head unmapped.
	 */
608
	if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
609 610 611 612 613 614 615
		/*
		 * If we need to convert extent to unwritten
		 * we continue and do the actual work in
		 * ext4_ext_map_blocks()
		 */
		if (!(flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN))
			return retval;
616

617
	/*
618 619
	 * Here we clear m_flags because after allocating an new extent,
	 * it will be set again.
620
	 */
621
	map->m_flags &= ~EXT4_MAP_FLAGS;
622

623
	/*
624
	 * New blocks allocate and/or writing to unwritten extent
625
	 * will possibly result in updating i_data, so we take
626
	 * the write lock of i_data_sem, and call get_block()
627
	 * with create == 1 flag.
628
	 */
629
	down_write(&EXT4_I(inode)->i_data_sem);
630

631 632 633 634
	/*
	 * We need to check for EXT4 here because migrate
	 * could have changed the inode type in between
	 */
635
	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
636
		retval = ext4_ext_map_blocks(handle, inode, map, flags);
637
	} else {
638
		retval = ext4_ind_map_blocks(handle, inode, map, flags);
639

640
		if (retval > 0 && map->m_flags & EXT4_MAP_NEW) {
641 642 643 644 645
			/*
			 * We allocated new blocks which will result in
			 * i_data's format changing.  Force the migrate
			 * to fail by clearing migrate flags
			 */
646
			ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
647
		}
648

649 650 651 652 653 654 655
		/*
		 * Update reserved blocks/metadata blocks after successful
		 * block allocation which had been deferred till now. We don't
		 * support fallocate for non extent files. So we can update
		 * reserve space here.
		 */
		if ((retval > 0) &&
656
			(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE))
657 658
			ext4_da_update_reserve_space(inode, retval, 1);
	}
659

660
	if (retval > 0) {
661
		unsigned int status;
662

663 664 665 666 667 668
		if (unlikely(retval != map->m_len)) {
			ext4_warning(inode->i_sb,
				     "ES len assertion failed for inode "
				     "%lu: retval %d != map->m_len %d",
				     inode->i_ino, retval, map->m_len);
			WARN_ON(1);
669 670
		}

671 672 673
		/*
		 * We have to zeroout blocks before inserting them into extent
		 * status tree. Otherwise someone could look them up there and
674 675 676
		 * use them before they are really zeroed. We also have to
		 * unmap metadata before zeroing as otherwise writeback can
		 * overwrite zeros with stale data from block device.
677 678 679 680
		 */
		if (flags & EXT4_GET_BLOCKS_ZERO &&
		    map->m_flags & EXT4_MAP_MAPPED &&
		    map->m_flags & EXT4_MAP_NEW) {
681 682
			clean_bdev_aliases(inode->i_sb->s_bdev, map->m_pblk,
					   map->m_len);
683 684 685 686 687 688 689 690
			ret = ext4_issue_zeroout(inode, map->m_lblk,
						 map->m_pblk, map->m_len);
			if (ret) {
				retval = ret;
				goto out_sem;
			}
		}

691 692 693 694 695 696 697
		/*
		 * If the extent has been zeroed out, we don't need to update
		 * extent status tree.
		 */
		if ((flags & EXT4_GET_BLOCKS_PRE_IO) &&
		    ext4_es_lookup_extent(inode, map->m_lblk, &es)) {
			if (ext4_es_is_written(&es))
698
				goto out_sem;
699
		}
700 701 702
		status = map->m_flags & EXT4_MAP_UNWRITTEN ?
				EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
		if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
703
		    !(status & EXTENT_STATUS_WRITTEN) &&
704 705
		    ext4_es_scan_range(inode, &ext4_es_is_delayed, map->m_lblk,
				       map->m_lblk + map->m_len - 1))
706 707 708
			status |= EXTENT_STATUS_DELAYED;
		ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
					    map->m_pblk, status);
709
		if (ret < 0) {
710
			retval = ret;
711 712
			goto out_sem;
		}
713 714
	}

715
out_sem:
716
	up_write((&EXT4_I(inode)->i_data_sem));
717
	if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
718
		ret = check_block_validity(inode, map);
719 720
		if (ret != 0)
			return ret;
721 722 723 724 725 726 727 728 729

		/*
		 * Inodes with freshly allocated blocks where contents will be
		 * visible after transaction commit must be on transaction's
		 * ordered data list.
		 */
		if (map->m_flags & EXT4_MAP_NEW &&
		    !(map->m_flags & EXT4_MAP_UNWRITTEN) &&
		    !(flags & EXT4_GET_BLOCKS_ZERO) &&
730
		    !ext4_is_quota_file(inode) &&
731
		    ext4_should_order_data(inode)) {
732 733 734 735
			if (flags & EXT4_GET_BLOCKS_IO_SUBMIT)
				ret = ext4_jbd2_inode_add_wait(handle, inode);
			else
				ret = ext4_jbd2_inode_add_write(handle, inode);
736 737 738
			if (ret)
				return ret;
		}
739
	}
740 741 742
	return retval;
}

Jan Kara's avatar
Jan Kara committed
743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770
/*
 * Update EXT4_MAP_FLAGS in bh->b_state. For buffer heads attached to pages
 * we have to be careful as someone else may be manipulating b_state as well.
 */
static void ext4_update_bh_state(struct buffer_head *bh, unsigned long flags)
{
	unsigned long old_state;
	unsigned long new_state;

	flags &= EXT4_MAP_FLAGS;

	/* Dummy buffer_head? Set non-atomically. */
	if (!bh->b_page) {
		bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | flags;
		return;
	}
	/*
	 * Someone else may be modifying b_state. Be careful! This is ugly but
	 * once we get rid of using bh as a container for mapping information
	 * to pass to / from get_block functions, this can go away.
	 */
	do {
		old_state = READ_ONCE(bh->b_state);
		new_state = (old_state & ~EXT4_MAP_FLAGS) | flags;
	} while (unlikely(
		 cmpxchg(&bh->b_state, old_state, new_state) != old_state));
}

771 772
static int _ext4_get_block(struct inode *inode, sector_t iblock,
			   struct buffer_head *bh, int flags)
773
{
774
	struct ext4_map_blocks map;
775
	int ret = 0;
776

777 778 779
	if (ext4_has_inline_data(inode))
		return -ERANGE;

780 781 782
	map.m_lblk = iblock;
	map.m_len = bh->b_size >> inode->i_blkbits;

783 784
	ret = ext4_map_blocks(ext4_journal_current_handle(), inode, &map,
			      flags);
Jan Kara's avatar
Jan Kara committed
785
	if (ret > 0) {
786
		map_bh(bh, inode->i_sb, map.m_pblk);
Jan Kara's avatar
Jan Kara committed
787
		ext4_update_bh_state(bh, map.m_flags);
788
		bh->b_size = inode->i_sb->s_blocksize * map.m_len;
Jan Kara's avatar
Jan Kara committed
789
		ret = 0;
790 791 792
	} else if (ret == 0) {
		/* hole case, need to fill in bh->b_size */
		bh->b_size = inode->i_sb->s_blocksize * map.m_len;
793 794 795 796
	}
	return ret;
}

797 798 799 800 801 802 803
int ext4_get_block(struct inode *inode, sector_t iblock,
		   struct buffer_head *bh, int create)
{
	return _ext4_get_block(inode, iblock, bh,
			       create ? EXT4_GET_BLOCKS_CREATE : 0);
}

804 805 806 807 808 809 810 811 812 813 814 815 816 817
/*
 * Get block function used when preparing for buffered write if we require
 * creating an unwritten extent if blocks haven't been allocated.  The extent
 * will be converted to written after the IO is complete.
 */
int ext4_get_block_unwritten(struct inode *inode, sector_t iblock,
			     struct buffer_head *bh_result, int create)
{
	ext4_debug("ext4_get_block_unwritten: inode %lu, create flag %d\n",
		   inode->i_ino, create);
	return _ext4_get_block(inode, iblock, bh_result,
			       EXT4_GET_BLOCKS_IO_CREATE_EXT);
}

818 819 820
/* Maximum number of blocks we map for direct IO at once. */
#define DIO_MAX_BLOCKS 4096

821 822 823 824 825 826 827
/*
 * Get blocks function for the cases that need to start a transaction -
 * generally difference cases of direct IO and DAX IO. It also handles retries
 * in case of ENOSPC.
 */
static int ext4_get_block_trans(struct inode *inode, sector_t iblock,
				struct buffer_head *bh_result, int flags)
828 829
{
	int dio_credits;
830 831 832
	handle_t *handle;
	int retries = 0;
	int ret;
833 834 835 836 837 838

	/* Trim mapping request to maximum we can map at once for DIO */
	if (bh_result->b_size >> inode->i_blkbits > DIO_MAX_BLOCKS)
		bh_result->b_size = DIO_MAX_BLOCKS << inode->i_blkbits;
	dio_credits = ext4_chunk_trans_blocks(inode,
				      bh_result->b_size >> inode->i_blkbits);
839 840 841 842 843 844 845 846 847 848 849
retry:
	handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, dio_credits);
	if (IS_ERR(handle))
		return PTR_ERR(handle);

	ret = _ext4_get_block(inode, iblock, bh_result, flags);
	ext4_journal_stop(handle);

	if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
		goto retry;
	return ret;
850 851
}

852 853 854 855
/* Get block function for DIO reads and writes to inodes without extents */
int ext4_dio_get_block(struct inode *inode, sector_t iblock,
		       struct buffer_head *bh, int create)
{
856 857 858
	/* We don't expect handle for direct IO */
	WARN_ON_ONCE(ext4_journal_current_handle());

859 860 861
	if (!create)
		return _ext4_get_block(inode, iblock, bh, 0);
	return ext4_get_block_trans(inode, iblock, bh, EXT4_GET_BLOCKS_CREATE);
862 863 864
}

/*
865
 * Get block function for AIO DIO writes when we create unwritten extent if
866 867 868
 * blocks are not allocated yet. The extent will be converted to written
 * after IO is complete.
 */
869 870
static int ext4_dio_get_block_unwritten_async(struct inode *inode,
		sector_t iblock, struct buffer_head *bh_result,	int create)
871
{
872 873 874 875 876
	int ret;

	/* We don't expect handle for direct IO */
	WARN_ON_ONCE(ext4_journal_current_handle());

877 878
	ret = ext4_get_block_trans(inode, iblock, bh_result,
				   EXT4_GET_BLOCKS_IO_CREATE_EXT);
879

880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896
	/*
	 * When doing DIO using unwritten extents, we need io_end to convert
	 * unwritten extents to written on IO completion. We allocate io_end
	 * once we spot unwritten extent and store it in b_private. Generic
	 * DIO code keeps b_private set and furthermore passes the value to
	 * our completion callback in 'private' argument.
	 */
	if (!ret && buffer_unwritten(bh_result)) {
		if (!bh_result->b_private) {
			ext4_io_end_t *io_end;

			io_end = ext4_init_io_end(inode, GFP_KERNEL);
			if (!io_end)
				return -ENOMEM;
			bh_result->b_private = io_end;
			ext4_set_io_unwritten_flag(inode, io_end);
		}
897 898 899 900
		set_buffer_defer_completion(bh_result);
	}

	return ret;
901 902
}

903 904 905
/*
 * Get block function for non-AIO DIO writes when we create unwritten extent if
 * blocks are not allocated yet. The extent will be converted to written
906
 * after IO is complete by ext4_direct_IO_write().
907 908 909 910 911 912 913 914 915
 */
static int ext4_dio_get_block_unwritten_sync(struct inode *inode,
		sector_t iblock, struct buffer_head *bh_result,	int create)
{
	int ret;

	/* We don't expect handle for direct IO */
	WARN_ON_ONCE(ext4_journal_current_handle());

916 917
	ret = ext4_get_block_trans(inode, iblock, bh_result,
				   EXT4_GET_BLOCKS_IO_CREATE_EXT);
918 919 920

	/*
	 * Mark inode as having pending DIO writes to unwritten extents.
921
	 * ext4_direct_IO_write() checks this flag and converts extents to
922 923 924 925 926 927 928 929
	 * written.
	 */
	if (!ret && buffer_unwritten(bh_result))
		ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);

	return ret;
}

930 931 932 933 934 935 936
static int ext4_dio_get_block_overwrite(struct inode *inode, sector_t iblock,
		   struct buffer_head *bh_result, int create)
{
	int ret;

	ext4_debug("ext4_dio_get_block_overwrite: inode %lu, create flag %d\n",
		   inode->i_ino, create);
937 938 939
	/* We don't expect handle for direct IO */
	WARN_ON_ONCE(ext4_journal_current_handle());

940 941 942 943 944
	ret = _ext4_get_block(inode, iblock, bh_result, 0);
	/*
	 * Blocks should have been preallocated! ext4_file_write_iter() checks
	 * that.
	 */
945
	WARN_ON_ONCE(!buffer_mapped(bh_result) || buffer_unwritten(bh_result));
946 947 948 949 950

	return ret;
}


951 952 953
/*
 * `handle' can be NULL if create is zero
 */
954
struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
955
				ext4_lblk_t block, int map_flags)
956
{
957 958
	struct ext4_map_blocks map;
	struct buffer_head *bh;
959
	int create = map_flags & EXT4_GET_BLOCKS_CREATE;
960
	int err;
961 962 963

	J_ASSERT(handle != NULL || create == 0);

964 965
	map.m_lblk = block;
	map.m_len = 1;
966
	err = ext4_map_blocks(handle, inode, &map, map_flags);
967

968 969
	if (err == 0)
		return create ? ERR_PTR(-ENOSPC) : NULL;
970
	if (err < 0)
971
		return ERR_PTR(err);
972 973

	bh = sb_getblk(inode->i_sb, map.m_pblk);
974 975
	if (unlikely(!bh))
		return ERR_PTR(-ENOMEM);
976 977 978
	if (map.m_flags & EXT4_MAP_NEW) {
		J_ASSERT(create != 0);
		J_ASSERT(handle != NULL);
979

980 981 982 983 984 985 986 987 988
		/*
		 * Now that we do not always journal data, we should
		 * keep in mind whether this should always journal the
		 * new buffer as metadata.  For now, regular file
		 * writes use ext4_get_block instead, so it's not a
		 * problem.
		 */
		lock_buffer(bh);
		BUFFER_TRACE(bh, "call get_create_access");
989 990 991 992 993 994
		err = ext4_journal_get_create_access(handle, bh);
		if (unlikely(err)) {
			unlock_buffer(bh);
			goto errout;
		}
		if (!buffer_uptodate(bh)) {
995 996
			memset(bh->b_data, 0, inode->i_sb->s_blocksize);
			set_buffer_uptodate(bh);
997
		}
998 999 1000
		unlock_buffer(bh);
		BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
		err = ext4_handle_dirty_metadata(handle, inode, bh);
1001 1002 1003
		if (unlikely(err))
			goto errout;
	} else
1004 1005
		BUFFER_TRACE(bh, "not a new buffer");
	return bh;
1006 1007 1008
errout:
	brelse(bh);
	return ERR_PTR(err);
1009 1010
}

1011
struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
1012
			       ext4_lblk_t block, int map_flags)
1013
{
1014
	struct buffer_head *bh;
1015

1016
	bh = ext4_getblk(handle, inode, block, map_flags);
1017
	if (IS_ERR(bh))
1018
		return bh;
1019
	if (!bh || buffer_uptodate(bh))
1020
		return bh;
1021
	ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &bh);
1022 1023 1024 1025
	wait_on_buffer(bh);
	if (buffer_uptodate(bh))
		return bh;
	put_bh(bh);
1026
	return ERR_PTR(-EIO);
1027 1028
}

1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072
/* Read a contiguous batch of blocks. */
int ext4_bread_batch(struct inode *inode, ext4_lblk_t block, int bh_count,
		     bool wait, struct buffer_head **bhs)
{
	int i, err;

	for (i = 0; i < bh_count; i++) {
		bhs[i] = ext4_getblk(NULL, inode, block + i, 0 /* map_flags */);
		if (IS_ERR(bhs[i])) {
			err = PTR_ERR(bhs[i]);
			bh_count = i;
			goto out_brelse;
		}
	}

	for (i = 0; i < bh_count; i++)
		/* Note that NULL bhs[i] is valid because of holes. */
		if (bhs[i] && !buffer_uptodate(bhs[i]))
			ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1,
				    &bhs[i]);

	if (!wait)
		return 0;

	for (i = 0; i < bh_count; i++)
		if (bhs[i])
			wait_on_buffer(bhs[i]);

	for (i = 0; i < bh_count; i++) {
		if (bhs[i] && !buffer_uptodate(bhs[i])) {
			err = -EIO;
			goto out_brelse;
		}
	}
	return 0;

out_brelse:
	for (i = 0; i < bh_count; i++) {
		brelse(bhs[i]);
		bhs[i] = NULL;
	}
	return err;
}

1073 1074 1075 1076 1077 1078 1079
int ext4_walk_page_buffers(handle_t *handle,
			   struct buffer_head *head,
			   unsigned from,
			   unsigned to,
			   int *partial,
			   int (*fn)(handle_t *handle,
				     struct buffer_head *bh))
1080 1081 1082 1083 1084 1085 1086
{
	struct buffer_head *bh;
	unsigned block_start, block_end;
	unsigned blocksize = head->b_size;
	int err, ret = 0;
	struct buffer_head *next;

1087 1088
	for (bh = head, block_start = 0;
	     ret == 0 && (bh != head || !block_start);
1089
	     block_start = block_end, bh = next) {
1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106
		next = bh->b_this_page;
		block_end = block_start + blocksize;
		if (block_end <= from || block_start >= to) {
			if (partial && !buffer_uptodate(bh))
				*partial = 1;
			continue;
		}
		err = (*fn)(handle, bh);
		if (!ret)
			ret = err;
	}
	return ret;
}

/*
 * To preserve ordering, it is essential that the hole instantiation and
 * the data write be encapsulated in a single transaction.  We cannot
1107
 * close off a transaction and start a new one between the ext4_get_block()
1108
 * and the commit_write().  So doing the jbd2_journal_start at the start of
1109 1110
 * prepare_write() is the right place.
 *
1111 1112 1113 1114
 * Also, this function can nest inside ext4_writepage().  In that case, we
 * *know* that ext4_writepage() has generated enough buffer credits to do the
 * whole page.  So we won't block on the journal in that case, which is good,
 * because the caller may be PF_MEMALLOC.
1115
 *
1116
 * By accident, ext4 can be reentered when a transaction is open via
1117 1118 1119 1120 1121 1122
 * quota file writes.  If we were to commit the transaction while thus
 * reentered, there can be a deadlock - we would be holding a quota
 * lock, and the commit would never complete if another thread had a
 * transaction open and was blocking on the quota lock - a ranking
 * violation.
 *
1123
 * So what we do is to rely on the fact that jbd2_journal_stop/journal_start
1124 1125 1126 1127
 * will _not_ run commit under these circumstances because handle->h_ref
 * is elevated.  We'll still have enough credits for the tiny quotafile
 * write.
 */
1128 1129
int do_journal_get_write_access(handle_t *handle,
				struct buffer_head *bh)
1130
{
1131 1132 1133
	int dirty = buffer_dirty(bh);
	int ret;

1134 1135
	if (!buffer_mapped(bh) || buffer_freed(bh))
		return 0;
1136
	/*
1137
	 * __block_write_begin() could have dirtied some buffers. Clean
1138 1139
	 * the dirty bit as jbd2_journal_get_write_access() could complain
	 * otherwise about fs integrity issues. Setting of the dirty bit
1140
	 * by __block_write_begin() isn't a real problem here as we clear
1141 1142 1143 1144 1145
	 * the bit before releasing a page lock and thus writeback cannot
	 * ever write the buffer.
	 */
	if (dirty)
		clear_buffer_dirty(bh);
1146
	BUFFER_TRACE(bh, "get write access");
1147 1148 1149 1150
	ret = ext4_journal_get_write_access(handle, bh);
	if (!ret && dirty)
		ret = ext4_handle_dirty_metadata(handle, NULL, bh);
	return ret;
1151 1152
}

1153 1154 1155 1156
#ifdef CONFIG_EXT4_FS_ENCRYPTION
static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
				  get_block_t *get_block)
{
1157
	unsigned from = pos & (PAGE_SIZE - 1);
1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168
	unsigned to = from + len;
	struct inode *inode = page->mapping->host;
	unsigned block_start, block_end;
	sector_t block;
	int err = 0;
	unsigned blocksize = inode->i_sb->s_blocksize;
	unsigned bbits;
	struct buffer_head *bh, *head, *wait[2], **wait_bh = wait;
	bool decrypt = false;

	BUG_ON(!PageLocked(page));
1169 1170
	BUG_ON(from > PAGE_SIZE);
	BUG_ON(to > PAGE_SIZE);
1171 1172 1173 1174 1175 1176
	BUG_ON(from > to);

	if (!page_has_buffers(page))
		create_empty_buffers(page, blocksize, 0);
	head = page_buffers(page);
	bbits = ilog2(blocksize);
1177
	block = (sector_t)page->index << (PAGE_SHIFT - bbits);
1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194