file.c 44.4 KB
Newer Older
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1
/*
Jaegeuk Kim's avatar
Jaegeuk Kim committed
2 3 4 5 6 7 8 9 10 11 12 13 14 15
 * fs/f2fs/file.c
 *
 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
 *             http://www.samsung.com/
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include <linux/stat.h>
#include <linux/buffer_head.h>
#include <linux/writeback.h>
16
#include <linux/blkdev.h>
Jaegeuk Kim's avatar
Jaegeuk Kim committed
17 18
#include <linux/falloc.h>
#include <linux/types.h>
19
#include <linux/compat.h>
Jaegeuk Kim's avatar
Jaegeuk Kim committed
20 21
#include <linux/uaccess.h>
#include <linux/mount.h>
22
#include <linux/pagevec.h>
23
#include <linux/random.h>
Jaegeuk Kim's avatar
Jaegeuk Kim committed
24 25 26 27 28 29

#include "f2fs.h"
#include "node.h"
#include "segment.h"
#include "xattr.h"
#include "acl.h"
30
#include "gc.h"
Jaegeuk Kim's avatar
Jaegeuk Kim committed
31
#include "trace.h"
32
#include <trace/events/f2fs.h>
Jaegeuk Kim's avatar
Jaegeuk Kim committed
33 34 35 36 37

static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
						struct vm_fault *vmf)
{
	struct page *page = vmf->page;
38
	struct inode *inode = file_inode(vma->vm_file);
39
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
40
	struct dnode_of_data dn;
41
	int err;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
42 43

	sb_start_pagefault(inode->i_sb);
44 45

	f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
46

Jaegeuk Kim's avatar
Jaegeuk Kim committed
47
	/* block allocation */
48
	f2fs_lock_op(sbi);
49
	set_new_dnode(&dn, inode, NULL, NULL, 0);
50
	err = f2fs_reserve_block(&dn, page->index);
51 52
	if (err) {
		f2fs_unlock_op(sbi);
53
		goto out;
54 55 56
	}
	f2fs_put_dnode(&dn);
	f2fs_unlock_op(sbi);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
57

Jaegeuk Kim's avatar
Jaegeuk Kim committed
58
	f2fs_balance_fs(sbi, dn.node_changed);
59

60
	file_update_time(vma->vm_file);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
61
	lock_page(page);
62
	if (unlikely(page->mapping != inode->i_mapping ||
63
			page_offset(page) > i_size_read(inode) ||
64
			!PageUptodate(page))) {
Jaegeuk Kim's avatar
Jaegeuk Kim committed
65 66 67 68 69 70 71 72 73
		unlock_page(page);
		err = -EFAULT;
		goto out;
	}

	/*
	 * check to see if the page is mapped already (no holes)
	 */
	if (PageMappedToDisk(page))
74
		goto mapped;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
75 76

	/* page is wholly or partially inside EOF */
77 78
	if (((loff_t)(page->index + 1) << PAGE_CACHE_SHIFT) >
						i_size_read(inode)) {
Jaegeuk Kim's avatar
Jaegeuk Kim committed
79 80 81 82 83 84 85
		unsigned offset;
		offset = i_size_read(inode) & ~PAGE_CACHE_MASK;
		zero_user_segment(page, offset, PAGE_CACHE_SIZE);
	}
	set_page_dirty(page);
	SetPageUptodate(page);

86
	trace_f2fs_vm_page_mkwrite(page, DATA);
87 88
mapped:
	/* fill the page */
89
	f2fs_wait_on_page_writeback(page, DATA, false);
90 91 92 93 94

	/* wait for GCed encrypted page writeback */
	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
		f2fs_wait_on_encrypted_page_writeback(sbi, dn.data_blkaddr);

95 96
	/* if gced page is attached, don't write to cold segment */
	clear_cold_data(page);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
97 98
out:
	sb_end_pagefault(inode->i_sb);
99
	f2fs_update_time(sbi, REQ_TIME);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
100 101 102 103
	return block_page_mkwrite_return(err);
}

static const struct vm_operations_struct f2fs_file_vm_ops = {
104
	.fault		= filemap_fault,
105
	.map_pages	= filemap_map_pages,
106
	.page_mkwrite	= f2fs_vm_page_mkwrite,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
107 108
};

109 110 111 112 113 114 115 116 117 118
static int get_parent_ino(struct inode *inode, nid_t *pino)
{
	struct dentry *dentry;

	inode = igrab(inode);
	dentry = d_find_any_alias(inode);
	iput(inode);
	if (!dentry)
		return 0;

119
	if (update_dent_inode(inode, inode, &dentry->d_name)) {
120 121 122
		dput(dentry);
		return 0;
	}
123

124 125
	*pino = parent_ino(dentry);
	dput(dentry);
126 127 128
	return 1;
}

129 130
static inline bool need_do_checkpoint(struct inode *inode)
{
131
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
132 133 134 135
	bool need_cp = false;

	if (!S_ISREG(inode->i_mode) || inode->i_nlink != 1)
		need_cp = true;
136 137
	else if (file_enc_name(inode) && need_dentry_mark(sbi, inode->i_ino))
		need_cp = true;
138 139 140 141 142 143 144 145
	else if (file_wrong_pino(inode))
		need_cp = true;
	else if (!space_for_roll_forward(sbi))
		need_cp = true;
	else if (!is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
		need_cp = true;
	else if (F2FS_I(inode)->xattr_ver == cur_cp_version(F2FS_CKPT(sbi)))
		need_cp = true;
146 147
	else if (test_opt(sbi, FASTBOOT))
		need_cp = true;
148 149
	else if (sbi->active_logs == 2)
		need_cp = true;
150 151 152 153

	return need_cp;
}

154 155 156 157 158 159 160 161 162 163 164
static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino)
{
	struct page *i = find_get_page(NODE_MAPPING(sbi), ino);
	bool ret = false;
	/* But we need to avoid that there are some inode updates */
	if ((i && PageDirty(i)) || need_inode_block_update(sbi, ino))
		ret = true;
	f2fs_put_page(i, 0);
	return ret;
}

165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184
static void try_to_fix_pino(struct inode *inode)
{
	struct f2fs_inode_info *fi = F2FS_I(inode);
	nid_t pino;

	down_write(&fi->i_sem);
	fi->xattr_ver = 0;
	if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
			get_parent_ino(inode, &pino)) {
		fi->i_pino = pino;
		file_got_pino(inode);
		up_write(&fi->i_sem);

		mark_inode_dirty_sync(inode);
		f2fs_write_inode(inode, NULL);
	} else {
		up_write(&fi->i_sem);
	}
}

Jaegeuk Kim's avatar
Jaegeuk Kim committed
185 186 187
int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
{
	struct inode *inode = file->f_mapping->host;
188
	struct f2fs_inode_info *fi = F2FS_I(inode);
189
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
190
	nid_t ino = inode->i_ino;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
191 192 193
	int ret = 0;
	bool need_cp = false;
	struct writeback_control wbc = {
194
		.sync_mode = WB_SYNC_ALL,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
195 196 197 198
		.nr_to_write = LONG_MAX,
		.for_reclaim = 0,
	};

199
	if (unlikely(f2fs_readonly(inode->i_sb)))
200 201
		return 0;

202
	trace_f2fs_sync_file_enter(inode);
203 204

	/* if fdatasync is triggered, let's do in-place-update */
Jaegeuk Kim's avatar
Jaegeuk Kim committed
205
	if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
206
		set_inode_flag(fi, FI_NEED_IPU);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
207
	ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
208 209
	clear_inode_flag(fi, FI_NEED_IPU);

210 211
	if (ret) {
		trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
212
		return ret;
213
	}
Jaegeuk Kim's avatar
Jaegeuk Kim committed
214

215
	/* if the inode is dirty, let's recover all the time */
216 217
	if (!datasync) {
		f2fs_write_inode(inode, NULL);
218 219 220
		goto go_write;
	}

221 222 223 224
	/*
	 * if there is no written data, don't waste time to write recovery info.
	 */
	if (!is_inode_flag_set(fi, FI_APPEND_WRITE) &&
225
			!exist_written_data(sbi, ino, APPEND_INO)) {
226

227 228
		/* it may call write_inode just prior to fsync */
		if (need_inode_page_update(sbi, ino))
229 230
			goto go_write;

231
		if (is_inode_flag_set(fi, FI_UPDATE_WRITE) ||
232
				exist_written_data(sbi, ino, UPDATE_INO))
233 234 235
			goto flush_out;
		goto out;
	}
236
go_write:
237 238 239 240
	/*
	 * Both of fdatasync() and fsync() are able to be recovered from
	 * sudden-power-off.
	 */
241 242
	down_read(&fi->i_sem);
	need_cp = need_do_checkpoint(inode);
243 244
	up_read(&fi->i_sem);

Jaegeuk Kim's avatar
Jaegeuk Kim committed
245 246 247
	if (need_cp) {
		/* all the dirty node pages should be flushed for POR */
		ret = f2fs_sync_fs(inode->i_sb, 1);
248

249 250 251 252 253
		/*
		 * We've secured consistency through sync_fs. Following pino
		 * will be used only for fsynced inodes after checkpoint.
		 */
		try_to_fix_pino(inode);
254 255
		clear_inode_flag(fi, FI_APPEND_WRITE);
		clear_inode_flag(fi, FI_UPDATE_WRITE);
256 257
		goto out;
	}
258
sync_nodes:
259 260
	sync_node_pages(sbi, ino, &wbc);

261
	/* if cp_error was enabled, we should avoid infinite loop */
262 263
	if (unlikely(f2fs_cp_error(sbi))) {
		ret = -EIO;
264
		goto out;
265
	}
266

267 268 269 270
	if (need_inode_block_update(sbi, ino)) {
		mark_inode_dirty_sync(inode);
		f2fs_write_inode(inode, NULL);
		goto sync_nodes;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
271
	}
272 273 274 275 276 277

	ret = wait_on_node_pages_writeback(sbi, ino);
	if (ret)
		goto out;

	/* once recovery info is written, don't need to tack this */
278
	remove_ino_entry(sbi, ino, APPEND_INO);
279 280
	clear_inode_flag(fi, FI_APPEND_WRITE);
flush_out:
281
	remove_ino_entry(sbi, ino, UPDATE_INO);
282 283
	clear_inode_flag(fi, FI_UPDATE_WRITE);
	ret = f2fs_issue_flush(sbi);
284
	f2fs_update_time(sbi, REQ_TIME);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
285
out:
286
	trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret);
287
	f2fs_trace_ios(NULL, 1);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
288 289 290
	return ret;
}

291 292 293 294 295 296 297 298 299 300 301
static pgoff_t __get_first_dirty_index(struct address_space *mapping,
						pgoff_t pgofs, int whence)
{
	struct pagevec pvec;
	int nr_pages;

	if (whence != SEEK_DATA)
		return 0;

	/* find first dirty page index */
	pagevec_init(&pvec, 0);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
302 303 304
	nr_pages = pagevec_lookup_tag(&pvec, mapping, &pgofs,
					PAGECACHE_TAG_DIRTY, 1);
	pgofs = nr_pages ? pvec.pages[0]->index : LONG_MAX;
305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325
	pagevec_release(&pvec);
	return pgofs;
}

static bool __found_offset(block_t blkaddr, pgoff_t dirty, pgoff_t pgofs,
							int whence)
{
	switch (whence) {
	case SEEK_DATA:
		if ((blkaddr == NEW_ADDR && dirty == pgofs) ||
			(blkaddr != NEW_ADDR && blkaddr != NULL_ADDR))
			return true;
		break;
	case SEEK_HOLE:
		if (blkaddr == NULL_ADDR)
			return true;
		break;
	}
	return false;
}

326 327 328 329 330
static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
{
	struct inode *inode = file->f_mapping->host;
	loff_t maxbytes = inode->i_sb->s_maxbytes;
	struct dnode_of_data dn;
331 332 333
	pgoff_t pgofs, end_offset, dirty;
	loff_t data_ofs = offset;
	loff_t isize;
334 335
	int err = 0;

Al Viro's avatar
Al Viro committed
336
	inode_lock(inode);
337 338 339 340 341 342

	isize = i_size_read(inode);
	if (offset >= isize)
		goto fail;

	/* handle inline data case */
Chao Yu's avatar
Chao Yu committed
343
	if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) {
344 345 346 347 348 349 350
		if (whence == SEEK_HOLE)
			data_ofs = isize;
		goto found;
	}

	pgofs = (pgoff_t)(offset >> PAGE_CACHE_SHIFT);

351 352
	dirty = __get_first_dirty_index(inode->i_mapping, pgofs, whence);

353
	for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_CACHE_SHIFT) {
354 355 356 357 358
		set_new_dnode(&dn, inode, NULL, NULL, 0);
		err = get_dnode_of_data(&dn, pgofs, LOOKUP_NODE_RA);
		if (err && err != -ENOENT) {
			goto fail;
		} else if (err == -ENOENT) {
arter97's avatar
arter97 committed
359
			/* direct node does not exists */
360
			if (whence == SEEK_DATA) {
361
				pgofs = PGOFS_OF_NEXT_DNODE(pgofs, inode);
362 363 364 365 366 367
				continue;
			} else {
				goto found;
			}
		}

368
		end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
369 370 371 372

		/* find data/hole in dnode block */
		for (; dn.ofs_in_node < end_offset;
				dn.ofs_in_node++, pgofs++,
373
				data_ofs = (loff_t)pgofs << PAGE_CACHE_SHIFT) {
374 375 376
			block_t blkaddr;
			blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);

377
			if (__found_offset(blkaddr, dirty, pgofs, whence)) {
378 379 380 381 382 383 384 385 386 387
				f2fs_put_dnode(&dn);
				goto found;
			}
		}
		f2fs_put_dnode(&dn);
	}

	if (whence == SEEK_DATA)
		goto fail;
found:
388 389
	if (whence == SEEK_HOLE && data_ofs > isize)
		data_ofs = isize;
Al Viro's avatar
Al Viro committed
390
	inode_unlock(inode);
391 392
	return vfs_setpos(file, data_ofs, maxbytes);
fail:
Al Viro's avatar
Al Viro committed
393
	inode_unlock(inode);
394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409
	return -ENXIO;
}

static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
{
	struct inode *inode = file->f_mapping->host;
	loff_t maxbytes = inode->i_sb->s_maxbytes;

	switch (whence) {
	case SEEK_SET:
	case SEEK_CUR:
	case SEEK_END:
		return generic_file_llseek_size(file, offset, whence,
						maxbytes, i_size_read(inode));
	case SEEK_DATA:
	case SEEK_HOLE:
410 411
		if (offset < 0)
			return -ENXIO;
412 413 414 415 416 417
		return f2fs_seek_block(file, offset, whence);
	}

	return -EINVAL;
}

Jaegeuk Kim's avatar
Jaegeuk Kim committed
418 419
static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
{
420
	struct inode *inode = file_inode(file);
421
	int err;
422

423
	if (f2fs_encrypted_inode(inode)) {
424
		err = f2fs_get_encryption_info(inode);
425 426 427 428
		if (err)
			return 0;
	}

429
	/* we don't need to use inline_data strictly */
430 431 432
	err = f2fs_convert_inline_inode(inode);
	if (err)
		return err;
433

Jaegeuk Kim's avatar
Jaegeuk Kim committed
434 435 436 437 438
	file_accessed(file);
	vma->vm_ops = &f2fs_file_vm_ops;
	return 0;
}

439 440 441 442 443 444 445 446 447 448 449 450
static int f2fs_file_open(struct inode *inode, struct file *filp)
{
	int ret = generic_file_open(inode, filp);

	if (!ret && f2fs_encrypted_inode(inode)) {
		ret = f2fs_get_encryption_info(inode);
		if (ret)
			ret = -EACCES;
	}
	return ret;
}

451
int truncate_data_blocks_range(struct dnode_of_data *dn, int count)
Jaegeuk Kim's avatar
Jaegeuk Kim committed
452
{
453
	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
454
	struct f2fs_node *raw_node;
Chao Yu's avatar
Chao Yu committed
455
	int nr_free = 0, ofs = dn->ofs_in_node, len = count;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
456 457
	__le32 *addr;

458
	raw_node = F2FS_NODE(dn->node_page);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
459 460
	addr = blkaddr_in_node(raw_node) + ofs;

Chris Fries's avatar
Chris Fries committed
461
	for (; count > 0; count--, addr++, dn->ofs_in_node++) {
Jaegeuk Kim's avatar
Jaegeuk Kim committed
462 463 464 465
		block_t blkaddr = le32_to_cpu(*addr);
		if (blkaddr == NULL_ADDR)
			continue;

466
		dn->data_blkaddr = NULL_ADDR;
467
		set_data_blkaddr(dn);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
468
		invalidate_blocks(sbi, blkaddr);
469 470 471
		if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
			clear_inode_flag(F2FS_I(dn->inode),
						FI_FIRST_BLOCK_WRITTEN);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
472 473
		nr_free++;
	}
Chao Yu's avatar
Chao Yu committed
474

Jaegeuk Kim's avatar
Jaegeuk Kim committed
475
	if (nr_free) {
Chao Yu's avatar
Chao Yu committed
476 477 478 479 480 481
		pgoff_t fofs;
		/*
		 * once we invalidate valid blkaddr in range [ofs, ofs + count],
		 * we will invalidate all blkaddr in the whole range.
		 */
		fofs = start_bidx_of_node(ofs_of_node(dn->node_page),
482
							dn->inode) + ofs;
Chao Yu's avatar
Chao Yu committed
483
		f2fs_update_extent_cache_range(dn, fofs, 0, len);
484
		dec_valid_block_count(sbi, dn->inode, nr_free);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
485 486 487
		sync_inode_page(dn);
	}
	dn->ofs_in_node = ofs;
488

489
	f2fs_update_time(sbi, REQ_TIME);
490 491
	trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
					 dn->ofs_in_node, nr_free);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
492 493 494 495 496 497 498 499
	return nr_free;
}

void truncate_data_blocks(struct dnode_of_data *dn)
{
	truncate_data_blocks_range(dn, ADDRS_PER_BLOCK);
}

500
static int truncate_partial_data_page(struct inode *inode, u64 from,
501
								bool cache_only)
Jaegeuk Kim's avatar
Jaegeuk Kim committed
502 503
{
	unsigned offset = from & (PAGE_CACHE_SIZE - 1);
504 505
	pgoff_t index = from >> PAGE_CACHE_SHIFT;
	struct address_space *mapping = inode->i_mapping;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
506 507
	struct page *page;

508
	if (!offset && !cache_only)
509
		return 0;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
510

511
	if (cache_only) {
512
		page = f2fs_grab_cache_page(mapping, index, false);
513 514 515
		if (page && PageUptodate(page))
			goto truncate_out;
		f2fs_put_page(page, 1);
516
		return 0;
517
	}
Jaegeuk Kim's avatar
Jaegeuk Kim committed
518

519
	page = get_lock_data_page(inode, index, true);
520 521 522
	if (IS_ERR(page))
		return 0;
truncate_out:
523
	f2fs_wait_on_page_writeback(page, DATA, true);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
524
	zero_user(page, offset, PAGE_CACHE_SIZE - offset);
525
	if (!cache_only || !f2fs_encrypted_inode(inode) || !S_ISREG(inode->i_mode))
526
		set_page_dirty(page);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
527
	f2fs_put_page(page, 1);
528
	return 0;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
529 530
}

531
int truncate_blocks(struct inode *inode, u64 from, bool lock)
Jaegeuk Kim's avatar
Jaegeuk Kim committed
532
{
533
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
534 535 536
	unsigned int blocksize = inode->i_sb->s_blocksize;
	struct dnode_of_data dn;
	pgoff_t free_from;
537
	int count = 0, err = 0;
538
	struct page *ipage;
539
	bool truncate_page = false;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
540

541 542
	trace_f2fs_truncate_blocks_enter(inode, from);

543
	free_from = (pgoff_t)F2FS_BYTES_TO_BLK(from + blocksize - 1);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
544

545 546
	if (lock)
		f2fs_lock_op(sbi);
547

548 549 550 551 552 553 554
	ipage = get_node_page(sbi, inode->i_ino);
	if (IS_ERR(ipage)) {
		err = PTR_ERR(ipage);
		goto out;
	}

	if (f2fs_has_inline_data(inode)) {
555 556
		if (truncate_inline_inode(ipage, from))
			set_page_dirty(ipage);
557
		f2fs_put_page(ipage, 1);
558
		truncate_page = true;
559 560 561 562
		goto out;
	}

	set_new_dnode(&dn, inode, ipage, NULL, 0);
563
	err = get_dnode_of_data(&dn, free_from, LOOKUP_NODE);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
564 565 566
	if (err) {
		if (err == -ENOENT)
			goto free_next;
567
		goto out;
568 569
	}

570
	count = ADDRS_PER_PAGE(dn.node_page, inode);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
571 572

	count -= dn.ofs_in_node;
573
	f2fs_bug_on(sbi, count < 0);
574

Jaegeuk Kim's avatar
Jaegeuk Kim committed
575 576 577 578 579 580 581 582
	if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
		truncate_data_blocks_range(&dn, count);
		free_from += count;
	}

	f2fs_put_dnode(&dn);
free_next:
	err = truncate_inode_blocks(inode, free_from);
583 584 585
out:
	if (lock)
		f2fs_unlock_op(sbi);
586 587 588

	/* lastly zero out the first data page */
	if (!err)
589
		err = truncate_partial_data_page(inode, from, truncate_page);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
590

591
	trace_f2fs_truncate_blocks_exit(inode, err);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
592 593 594
	return err;
}

595
int f2fs_truncate(struct inode *inode, bool lock)
Jaegeuk Kim's avatar
Jaegeuk Kim committed
596
{
597 598
	int err;

Jaegeuk Kim's avatar
Jaegeuk Kim committed
599 600
	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
				S_ISLNK(inode->i_mode)))
601
		return 0;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
602

603 604
	trace_f2fs_truncate(inode);

605
	/* we should check inline_data size */
606
	if (!f2fs_may_inline_data(inode)) {
607 608 609
		err = f2fs_convert_inline_inode(inode);
		if (err)
			return err;
610 611
	}

612 613 614 615 616 617 618
	err = truncate_blocks(inode, i_size_read(inode), lock);
	if (err)
		return err;

	inode->i_mtime = inode->i_ctime = CURRENT_TIME;
	mark_inode_dirty(inode);
	return 0;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
619 620
}

621
int f2fs_getattr(struct vfsmount *mnt,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
622 623
			 struct dentry *dentry, struct kstat *stat)
{
624
	struct inode *inode = d_inode(dentry);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662
	generic_fillattr(inode, stat);
	stat->blocks <<= 3;
	return 0;
}

#ifdef CONFIG_F2FS_FS_POSIX_ACL
static void __setattr_copy(struct inode *inode, const struct iattr *attr)
{
	struct f2fs_inode_info *fi = F2FS_I(inode);
	unsigned int ia_valid = attr->ia_valid;

	if (ia_valid & ATTR_UID)
		inode->i_uid = attr->ia_uid;
	if (ia_valid & ATTR_GID)
		inode->i_gid = attr->ia_gid;
	if (ia_valid & ATTR_ATIME)
		inode->i_atime = timespec_trunc(attr->ia_atime,
						inode->i_sb->s_time_gran);
	if (ia_valid & ATTR_MTIME)
		inode->i_mtime = timespec_trunc(attr->ia_mtime,
						inode->i_sb->s_time_gran);
	if (ia_valid & ATTR_CTIME)
		inode->i_ctime = timespec_trunc(attr->ia_ctime,
						inode->i_sb->s_time_gran);
	if (ia_valid & ATTR_MODE) {
		umode_t mode = attr->ia_mode;

		if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
			mode &= ~S_ISGID;
		set_acl_inode(fi, mode);
	}
}
#else
#define __setattr_copy setattr_copy
#endif

int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
{
663
	struct inode *inode = d_inode(dentry);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
664 665 666 667 668 669 670
	struct f2fs_inode_info *fi = F2FS_I(inode);
	int err;

	err = inode_change_ok(inode, attr);
	if (err)
		return err;

671
	if (attr->ia_valid & ATTR_SIZE) {
672 673 674 675
		if (f2fs_encrypted_inode(inode) &&
				f2fs_get_encryption_info(inode))
			return -EACCES;

676
		if (attr->ia_size <= i_size_read(inode)) {
677
			truncate_setsize(inode, attr->ia_size);
678 679 680
			err = f2fs_truncate(inode, true);
			if (err)
				return err;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
681
			f2fs_balance_fs(F2FS_I_SB(inode), true);
682 683
		} else {
			/*
684 685
			 * do not trim all blocks after i_size if target size is
			 * larger than i_size.
686
			 */
687
			truncate_setsize(inode, attr->ia_size);
688 689

			/* should convert inline inode here */
690
			if (!f2fs_may_inline_data(inode)) {
691 692 693 694
				err = f2fs_convert_inline_inode(inode);
				if (err)
					return err;
			}
695
			inode->i_mtime = inode->i_ctime = CURRENT_TIME;
696
		}
Jaegeuk Kim's avatar
Jaegeuk Kim committed
697 698 699 700 701
	}

	__setattr_copy(inode, attr);

	if (attr->ia_valid & ATTR_MODE) {
702
		err = posix_acl_chmod(inode, get_inode_mode(inode));
Jaegeuk Kim's avatar
Jaegeuk Kim committed
703 704 705 706 707 708 709 710 711 712 713 714 715 716
		if (err || is_inode_flag_set(fi, FI_ACL_MODE)) {
			inode->i_mode = fi->i_acl_mode;
			clear_inode_flag(fi, FI_ACL_MODE);
		}
	}

	mark_inode_dirty(inode);
	return err;
}

const struct inode_operations f2fs_file_inode_operations = {
	.getattr	= f2fs_getattr,
	.setattr	= f2fs_setattr,
	.get_acl	= f2fs_get_acl,
717
	.set_acl	= f2fs_set_acl,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
718 719 720 721 722 723
#ifdef CONFIG_F2FS_FS_XATTR
	.setxattr	= generic_setxattr,
	.getxattr	= generic_getxattr,
	.listxattr	= f2fs_listxattr,
	.removexattr	= generic_removexattr,
#endif
Jaegeuk Kim's avatar
Jaegeuk Kim committed
724
	.fiemap		= f2fs_fiemap,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
725 726
};

Chao Yu's avatar
Chao Yu committed
727
static int fill_zero(struct inode *inode, pgoff_t index,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
728 729
					loff_t start, loff_t len)
{
730
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
731 732 733
	struct page *page;

	if (!len)
Chao Yu's avatar
Chao Yu committed
734
		return 0;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
735

Jaegeuk Kim's avatar
Jaegeuk Kim committed
736
	f2fs_balance_fs(sbi, true);
737

738
	f2fs_lock_op(sbi);
739
	page = get_new_data_page(inode, NULL, index, false);
740
	f2fs_unlock_op(sbi);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
741

Chao Yu's avatar
Chao Yu committed
742 743 744
	if (IS_ERR(page))
		return PTR_ERR(page);

745
	f2fs_wait_on_page_writeback(page, DATA, true);
Chao Yu's avatar
Chao Yu committed
746 747 748 749
	zero_user(page, start, len);
	set_page_dirty(page);
	f2fs_put_page(page, 1);
	return 0;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
750 751 752 753 754 755
}

int truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
{
	int err;

756
	while (pg_start < pg_end) {
Jaegeuk Kim's avatar
Jaegeuk Kim committed
757
		struct dnode_of_data dn;
758
		pgoff_t end_offset, count;
759

Jaegeuk Kim's avatar
Jaegeuk Kim committed
760
		set_new_dnode(&dn, inode, NULL, NULL, 0);
761
		err = get_dnode_of_data(&dn, pg_start, LOOKUP_NODE);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
762
		if (err) {
763 764
			if (err == -ENOENT) {
				pg_start++;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
765
				continue;
766
			}
Jaegeuk Kim's avatar
Jaegeuk Kim committed
767 768 769
			return err;
		}

770
		end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
771 772 773 774 775
		count = min(end_offset - dn.ofs_in_node, pg_end - pg_start);

		f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset);

		truncate_data_blocks_range(&dn, count);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
776
		f2fs_put_dnode(&dn);
777 778

		pg_start += count;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
779 780 781 782
	}
	return 0;
}

783
static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
Jaegeuk Kim's avatar
Jaegeuk Kim committed
784 785 786
{
	pgoff_t pg_start, pg_end;
	loff_t off_start, off_end;
787
	int ret;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
788

789 790 791
	ret = f2fs_convert_inline_inode(inode);
	if (ret)
		return ret;
792

Jaegeuk Kim's avatar
Jaegeuk Kim committed
793 794 795 796 797 798 799
	pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT;
	pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT;

	off_start = offset & (PAGE_CACHE_SIZE - 1);
	off_end = (offset + len) & (PAGE_CACHE_SIZE - 1);

	if (pg_start == pg_end) {
Chao Yu's avatar
Chao Yu committed
800
		ret = fill_zero(inode, pg_start, off_start,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
801
						off_end - off_start);
Chao Yu's avatar
Chao Yu committed
802 803
		if (ret)
			return ret;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
804
	} else {
Chao Yu's avatar
Chao Yu committed
805 806 807 808 809 810 811 812 813 814 815
		if (off_start) {
			ret = fill_zero(inode, pg_start++, off_start,
						PAGE_CACHE_SIZE - off_start);
			if (ret)
				return ret;
		}
		if (off_end) {
			ret = fill_zero(inode, pg_end, 0, off_end);
			if (ret)
				return ret;
		}
Jaegeuk Kim's avatar
Jaegeuk Kim committed
816 817 818 819

		if (pg_start < pg_end) {
			struct address_space *mapping = inode->i_mapping;
			loff_t blk_start, blk_end;
820
			struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
821

Jaegeuk Kim's avatar
Jaegeuk Kim committed
822
			f2fs_balance_fs(sbi, true);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
823

824 825
			blk_start = (loff_t)pg_start << PAGE_CACHE_SHIFT;
			blk_end = (loff_t)pg_end << PAGE_CACHE_SHIFT;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
826 827
			truncate_inode_pages_range(mapping, blk_start,
					blk_end - 1);
828

829
			f2fs_lock_op(sbi);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
830
			ret = truncate_hole(inode, pg_start, pg_end);
831
			f2fs_unlock_op(sbi);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
832 833 834 835 836 837
		}
	}

	return ret;
}

838 839
static int __exchange_data_block(struct inode *inode, pgoff_t src,
					pgoff_t dst, bool full)
840 841 842
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct dnode_of_data dn;
843 844 845
	block_t new_addr;
	bool do_replace = false;
	int ret;
846

847 848 849 850 851 852 853 854 855 856 857 858 859 860
	set_new_dnode(&dn, inode, NULL, NULL, 0);
	ret = get_dnode_of_data(&dn, src, LOOKUP_NODE_RA);
	if (ret && ret != -ENOENT) {
		return ret;
	} else if (ret == -ENOENT) {
		new_addr = NULL_ADDR;
	} else {
		new_addr = dn.data_blkaddr;
		if (!is_checkpointed_data(sbi, new_addr)) {
			dn.data_blkaddr = NULL_ADDR;
			/* do not invalidate this block address */
			set_data_blkaddr(&dn);
			f2fs_update_extent_cache(&dn);
			do_replace = true;
861
		}
862 863
		f2fs_put_dnode(&dn);
	}
864

865 866
	if (new_addr == NULL_ADDR)
		return full ? truncate_hole(inode, dst, dst + 1) : 0;
867

868 869 870
	if (do_replace) {
		struct page *ipage = get_node_page(sbi, inode->i_ino);
		struct node_info ni;
871

872 873 874 875
		if (IS_ERR(ipage)) {
			ret = PTR_ERR(ipage);
			goto err_out;
		}
876

877 878 879 880
		set_new_dnode(&dn, inode, ipage, NULL, 0);
		ret = f2fs_reserve_block(&dn, dst);
		if (ret)
			goto err_out;
881

882
		truncate_data_blocks_range(&dn, 1);
883

884 885 886 887 888 889 890
		get_node_info(sbi, dn.nid, &ni);
		f2fs_replace_block(sbi, &dn, dn.data_blkaddr, new_addr,
				ni.version, true);
		f2fs_put_dnode(&dn);
	} else {
		struct page *psrc, *pdst;

891
		psrc = get_lock_data_page(inode, src, true);
892 893 894 895 896 897 898 899 900 901 902
		if (IS_ERR(psrc))
			return PTR_ERR(psrc);
		pdst = get_new_data_page(inode, NULL, dst, false);
		if (IS_ERR(pdst)) {
			f2fs_put_page(psrc, 1);
			return PTR_ERR(pdst);
		}
		f2fs_copy_page(psrc, pdst);
		set_page_dirty(pdst);
		f2fs_put_page(pdst, 1);
		f2fs_put_page(psrc, 1);
903

904 905 906
		return truncate_hole(inode, src, src + 1);
	}
	return 0;
907

908 909 910 911 912 913 914 915 916
err_out:
	if (!get_dnode_of_data(&dn, src, LOOKUP_NODE)) {
		dn.data_blkaddr = new_addr;
		set_data_blkaddr(&dn);
		f2fs_update_extent_cache(&dn);
		f2fs_put_dnode(&dn);
	}
	return ret;
}
917

918 919 920 921 922 923 924
static int f2fs_do_collapse(struct inode *inode, pgoff_t start, pgoff_t end)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	pgoff_t nrpages = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;
	int ret = 0;

	for (; end < nrpages; start++, end++) {