file.c 44.7 KB
Newer Older
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1
/*
Jaegeuk Kim's avatar
Jaegeuk Kim committed
2 3 4 5 6 7 8 9 10 11 12 13 14 15
 * fs/f2fs/file.c
 *
 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
 *             http://www.samsung.com/
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include <linux/stat.h>
#include <linux/buffer_head.h>
#include <linux/writeback.h>
16
#include <linux/blkdev.h>
Jaegeuk Kim's avatar
Jaegeuk Kim committed
17 18
#include <linux/falloc.h>
#include <linux/types.h>
19
#include <linux/compat.h>
Jaegeuk Kim's avatar
Jaegeuk Kim committed
20 21
#include <linux/uaccess.h>
#include <linux/mount.h>
22
#include <linux/pagevec.h>
23
#include <linux/random.h>
Jaegeuk Kim's avatar
Jaegeuk Kim committed
24 25 26 27 28 29

#include "f2fs.h"
#include "node.h"
#include "segment.h"
#include "xattr.h"
#include "acl.h"
30
#include "gc.h"
Jaegeuk Kim's avatar
Jaegeuk Kim committed
31
#include "trace.h"
32
#include <trace/events/f2fs.h>
Jaegeuk Kim's avatar
Jaegeuk Kim committed
33 34 35 36 37

static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
						struct vm_fault *vmf)
{
	struct page *page = vmf->page;
38
	struct inode *inode = file_inode(vma->vm_file);
39
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
40
	struct dnode_of_data dn;
41
	int err;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
42 43 44 45

	f2fs_balance_fs(sbi);

	sb_start_pagefault(inode->i_sb);
46 47

	f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
48

Jaegeuk Kim's avatar
Jaegeuk Kim committed
49
	/* block allocation */
50
	f2fs_lock_op(sbi);
51
	set_new_dnode(&dn, inode, NULL, NULL, 0);
52
	err = f2fs_reserve_block(&dn, page->index);
53 54
	if (err) {
		f2fs_unlock_op(sbi);
55
		goto out;
56 57 58
	}
	f2fs_put_dnode(&dn);
	f2fs_unlock_op(sbi);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
59

60
	file_update_time(vma->vm_file);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
61
	lock_page(page);
62
	if (unlikely(page->mapping != inode->i_mapping ||
63
			page_offset(page) > i_size_read(inode) ||
64
			!PageUptodate(page))) {
Jaegeuk Kim's avatar
Jaegeuk Kim committed
65 66 67 68 69 70 71 72 73
		unlock_page(page);
		err = -EFAULT;
		goto out;
	}

	/*
	 * check to see if the page is mapped already (no holes)
	 */
	if (PageMappedToDisk(page))
74
		goto mapped;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
75 76

	/* page is wholly or partially inside EOF */
77 78
	if (((loff_t)(page->index + 1) << PAGE_CACHE_SHIFT) >
						i_size_read(inode)) {
Jaegeuk Kim's avatar
Jaegeuk Kim committed
79 80 81 82 83 84 85
		unsigned offset;
		offset = i_size_read(inode) & ~PAGE_CACHE_MASK;
		zero_user_segment(page, offset, PAGE_CACHE_SIZE);
	}
	set_page_dirty(page);
	SetPageUptodate(page);

86
	trace_f2fs_vm_page_mkwrite(page, DATA);
87 88
mapped:
	/* fill the page */
89
	f2fs_wait_on_page_writeback(page, DATA);
90 91 92 93 94

	/* wait for GCed encrypted page writeback */
	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
		f2fs_wait_on_encrypted_page_writeback(sbi, dn.data_blkaddr);

95 96
	/* if gced page is attached, don't write to cold segment */
	clear_cold_data(page);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
97 98 99 100 101 102
out:
	sb_end_pagefault(inode->i_sb);
	return block_page_mkwrite_return(err);
}

static const struct vm_operations_struct f2fs_file_vm_ops = {
103
	.fault		= filemap_fault,
104
	.map_pages	= filemap_map_pages,
105
	.page_mkwrite	= f2fs_vm_page_mkwrite,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
106 107
};

108 109 110 111 112 113 114 115 116 117
static int get_parent_ino(struct inode *inode, nid_t *pino)
{
	struct dentry *dentry;

	inode = igrab(inode);
	dentry = d_find_any_alias(inode);
	iput(inode);
	if (!dentry)
		return 0;

118
	if (update_dent_inode(inode, inode, &dentry->d_name)) {
119 120 121
		dput(dentry);
		return 0;
	}
122

123 124
	*pino = parent_ino(dentry);
	dput(dentry);
125 126 127
	return 1;
}

128 129
static inline bool need_do_checkpoint(struct inode *inode)
{
130
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
131 132 133 134
	bool need_cp = false;

	if (!S_ISREG(inode->i_mode) || inode->i_nlink != 1)
		need_cp = true;
135 136
	else if (file_enc_name(inode) && need_dentry_mark(sbi, inode->i_ino))
		need_cp = true;
137 138 139 140 141 142 143 144
	else if (file_wrong_pino(inode))
		need_cp = true;
	else if (!space_for_roll_forward(sbi))
		need_cp = true;
	else if (!is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
		need_cp = true;
	else if (F2FS_I(inode)->xattr_ver == cur_cp_version(F2FS_CKPT(sbi)))
		need_cp = true;
145 146
	else if (test_opt(sbi, FASTBOOT))
		need_cp = true;
147 148
	else if (sbi->active_logs == 2)
		need_cp = true;
149 150 151 152

	return need_cp;
}

153 154 155 156 157 158 159 160 161 162 163
static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino)
{
	struct page *i = find_get_page(NODE_MAPPING(sbi), ino);
	bool ret = false;
	/* But we need to avoid that there are some inode updates */
	if ((i && PageDirty(i)) || need_inode_block_update(sbi, ino))
		ret = true;
	f2fs_put_page(i, 0);
	return ret;
}

164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183
static void try_to_fix_pino(struct inode *inode)
{
	struct f2fs_inode_info *fi = F2FS_I(inode);
	nid_t pino;

	down_write(&fi->i_sem);
	fi->xattr_ver = 0;
	if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
			get_parent_ino(inode, &pino)) {
		fi->i_pino = pino;
		file_got_pino(inode);
		up_write(&fi->i_sem);

		mark_inode_dirty_sync(inode);
		f2fs_write_inode(inode, NULL);
	} else {
		up_write(&fi->i_sem);
	}
}

Jaegeuk Kim's avatar
Jaegeuk Kim committed
184 185 186
int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
{
	struct inode *inode = file->f_mapping->host;
187
	struct f2fs_inode_info *fi = F2FS_I(inode);
188
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
189
	nid_t ino = inode->i_ino;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
190 191 192
	int ret = 0;
	bool need_cp = false;
	struct writeback_control wbc = {
193
		.sync_mode = WB_SYNC_ALL,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
194 195 196 197
		.nr_to_write = LONG_MAX,
		.for_reclaim = 0,
	};

198
	if (unlikely(f2fs_readonly(inode->i_sb)))
199 200
		return 0;

201
	trace_f2fs_sync_file_enter(inode);
202 203

	/* if fdatasync is triggered, let's do in-place-update */
204
	if (get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
205
		set_inode_flag(fi, FI_NEED_IPU);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
206
	ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
207 208
	clear_inode_flag(fi, FI_NEED_IPU);

209 210
	if (ret) {
		trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
211
		return ret;
212
	}
Jaegeuk Kim's avatar
Jaegeuk Kim committed
213

214
	/* if the inode is dirty, let's recover all the time */
215 216
	if (!datasync) {
		f2fs_write_inode(inode, NULL);
217 218 219
		goto go_write;
	}

220 221 222 223
	/*
	 * if there is no written data, don't waste time to write recovery info.
	 */
	if (!is_inode_flag_set(fi, FI_APPEND_WRITE) &&
224
			!exist_written_data(sbi, ino, APPEND_INO)) {
225

226 227
		/* it may call write_inode just prior to fsync */
		if (need_inode_page_update(sbi, ino))
228 229
			goto go_write;

230
		if (is_inode_flag_set(fi, FI_UPDATE_WRITE) ||
231
				exist_written_data(sbi, ino, UPDATE_INO))
232 233 234
			goto flush_out;
		goto out;
	}
235
go_write:
236 237 238
	/* guarantee free sections for fsync */
	f2fs_balance_fs(sbi);

239 240 241 242
	/*
	 * Both of fdatasync() and fsync() are able to be recovered from
	 * sudden-power-off.
	 */
243 244
	down_read(&fi->i_sem);
	need_cp = need_do_checkpoint(inode);
245 246
	up_read(&fi->i_sem);

Jaegeuk Kim's avatar
Jaegeuk Kim committed
247 248 249
	if (need_cp) {
		/* all the dirty node pages should be flushed for POR */
		ret = f2fs_sync_fs(inode->i_sb, 1);
250

251 252 253 254 255
		/*
		 * We've secured consistency through sync_fs. Following pino
		 * will be used only for fsynced inodes after checkpoint.
		 */
		try_to_fix_pino(inode);
256 257
		clear_inode_flag(fi, FI_APPEND_WRITE);
		clear_inode_flag(fi, FI_UPDATE_WRITE);
258 259
		goto out;
	}
260
sync_nodes:
261 262
	sync_node_pages(sbi, ino, &wbc);

263 264 265 266
	/* if cp_error was enabled, we should avoid infinite loop */
	if (unlikely(f2fs_cp_error(sbi)))
		goto out;

267 268 269 270
	if (need_inode_block_update(sbi, ino)) {
		mark_inode_dirty_sync(inode);
		f2fs_write_inode(inode, NULL);
		goto sync_nodes;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
271
	}
272 273 274 275 276 277 278 279 280 281 282 283

	ret = wait_on_node_pages_writeback(sbi, ino);
	if (ret)
		goto out;

	/* once recovery info is written, don't need to tack this */
	remove_dirty_inode(sbi, ino, APPEND_INO);
	clear_inode_flag(fi, FI_APPEND_WRITE);
flush_out:
	remove_dirty_inode(sbi, ino, UPDATE_INO);
	clear_inode_flag(fi, FI_UPDATE_WRITE);
	ret = f2fs_issue_flush(sbi);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
284
out:
285
	trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret);
286
	f2fs_trace_ios(NULL, 1);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
287 288 289
	return ret;
}

290 291 292 293 294 295 296 297 298 299 300
static pgoff_t __get_first_dirty_index(struct address_space *mapping,
						pgoff_t pgofs, int whence)
{
	struct pagevec pvec;
	int nr_pages;

	if (whence != SEEK_DATA)
		return 0;

	/* find first dirty page index */
	pagevec_init(&pvec, 0);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
301 302 303
	nr_pages = pagevec_lookup_tag(&pvec, mapping, &pgofs,
					PAGECACHE_TAG_DIRTY, 1);
	pgofs = nr_pages ? pvec.pages[0]->index : LONG_MAX;
304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324
	pagevec_release(&pvec);
	return pgofs;
}

static bool __found_offset(block_t blkaddr, pgoff_t dirty, pgoff_t pgofs,
							int whence)
{
	switch (whence) {
	case SEEK_DATA:
		if ((blkaddr == NEW_ADDR && dirty == pgofs) ||
			(blkaddr != NEW_ADDR && blkaddr != NULL_ADDR))
			return true;
		break;
	case SEEK_HOLE:
		if (blkaddr == NULL_ADDR)
			return true;
		break;
	}
	return false;
}

325 326 327 328 329
static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
{
	struct inode *inode = file->f_mapping->host;
	loff_t maxbytes = inode->i_sb->s_maxbytes;
	struct dnode_of_data dn;
330 331 332
	pgoff_t pgofs, end_offset, dirty;
	loff_t data_ofs = offset;
	loff_t isize;
333 334 335 336 337 338 339 340 341
	int err = 0;

	mutex_lock(&inode->i_mutex);

	isize = i_size_read(inode);
	if (offset >= isize)
		goto fail;

	/* handle inline data case */
Chao Yu's avatar
Chao Yu committed
342
	if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) {
343 344 345 346 347 348 349
		if (whence == SEEK_HOLE)
			data_ofs = isize;
		goto found;
	}

	pgofs = (pgoff_t)(offset >> PAGE_CACHE_SHIFT);

350 351
	dirty = __get_first_dirty_index(inode->i_mapping, pgofs, whence);

352
	for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_CACHE_SHIFT) {
353 354 355 356 357
		set_new_dnode(&dn, inode, NULL, NULL, 0);
		err = get_dnode_of_data(&dn, pgofs, LOOKUP_NODE_RA);
		if (err && err != -ENOENT) {
			goto fail;
		} else if (err == -ENOENT) {
arter97's avatar
arter97 committed
358
			/* direct node does not exists */
359 360 361 362 363 364 365 366 367
			if (whence == SEEK_DATA) {
				pgofs = PGOFS_OF_NEXT_DNODE(pgofs,
							F2FS_I(inode));
				continue;
			} else {
				goto found;
			}
		}

368
		end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
369 370 371 372

		/* find data/hole in dnode block */
		for (; dn.ofs_in_node < end_offset;
				dn.ofs_in_node++, pgofs++,
373
				data_ofs = (loff_t)pgofs << PAGE_CACHE_SHIFT) {
374 375 376
			block_t blkaddr;
			blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);

377
			if (__found_offset(blkaddr, dirty, pgofs, whence)) {
378 379 380 381 382 383 384 385 386 387
				f2fs_put_dnode(&dn);
				goto found;
			}
		}
		f2fs_put_dnode(&dn);
	}

	if (whence == SEEK_DATA)
		goto fail;
found:
388 389
	if (whence == SEEK_HOLE && data_ofs > isize)
		data_ofs = isize;
390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409
	mutex_unlock(&inode->i_mutex);
	return vfs_setpos(file, data_ofs, maxbytes);
fail:
	mutex_unlock(&inode->i_mutex);
	return -ENXIO;
}

static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
{
	struct inode *inode = file->f_mapping->host;
	loff_t maxbytes = inode->i_sb->s_maxbytes;

	switch (whence) {
	case SEEK_SET:
	case SEEK_CUR:
	case SEEK_END:
		return generic_file_llseek_size(file, offset, whence,
						maxbytes, i_size_read(inode));
	case SEEK_DATA:
	case SEEK_HOLE:
410 411
		if (offset < 0)
			return -ENXIO;
412 413 414 415 416 417
		return f2fs_seek_block(file, offset, whence);
	}

	return -EINVAL;
}

Jaegeuk Kim's avatar
Jaegeuk Kim committed
418 419
static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
{
420 421
	struct inode *inode = file_inode(file);

422 423 424 425 426 427
	if (f2fs_encrypted_inode(inode)) {
		int err = f2fs_get_encryption_info(inode);
		if (err)
			return 0;
	}

428 429 430 431 432 433 434
	/* we don't need to use inline_data strictly */
	if (f2fs_has_inline_data(inode)) {
		int err = f2fs_convert_inline_inode(inode);
		if (err)
			return err;
	}

Jaegeuk Kim's avatar
Jaegeuk Kim committed
435 436 437 438 439
	file_accessed(file);
	vma->vm_ops = &f2fs_file_vm_ops;
	return 0;
}

440 441 442 443 444 445 446 447 448 449 450 451
static int f2fs_file_open(struct inode *inode, struct file *filp)
{
	int ret = generic_file_open(inode, filp);

	if (!ret && f2fs_encrypted_inode(inode)) {
		ret = f2fs_get_encryption_info(inode);
		if (ret)
			ret = -EACCES;
	}
	return ret;
}

452
int truncate_data_blocks_range(struct dnode_of_data *dn, int count)
Jaegeuk Kim's avatar
Jaegeuk Kim committed
453
{
454
	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
455
	struct f2fs_node *raw_node;
Chao Yu's avatar
Chao Yu committed
456
	int nr_free = 0, ofs = dn->ofs_in_node, len = count;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
457 458
	__le32 *addr;

459
	raw_node = F2FS_NODE(dn->node_page);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
460 461
	addr = blkaddr_in_node(raw_node) + ofs;

Chris Fries's avatar
Chris Fries committed
462
	for (; count > 0; count--, addr++, dn->ofs_in_node++) {
Jaegeuk Kim's avatar
Jaegeuk Kim committed
463 464 465 466
		block_t blkaddr = le32_to_cpu(*addr);
		if (blkaddr == NULL_ADDR)
			continue;

467
		dn->data_blkaddr = NULL_ADDR;
468
		set_data_blkaddr(dn);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
469
		invalidate_blocks(sbi, blkaddr);
470 471 472
		if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
			clear_inode_flag(F2FS_I(dn->inode),
						FI_FIRST_BLOCK_WRITTEN);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
473 474
		nr_free++;
	}
Chao Yu's avatar
Chao Yu committed
475

Jaegeuk Kim's avatar
Jaegeuk Kim committed
476
	if (nr_free) {
Chao Yu's avatar
Chao Yu committed
477 478 479 480 481 482 483 484
		pgoff_t fofs;
		/*
		 * once we invalidate valid blkaddr in range [ofs, ofs + count],
		 * we will invalidate all blkaddr in the whole range.
		 */
		fofs = start_bidx_of_node(ofs_of_node(dn->node_page),
						F2FS_I(dn->inode)) + ofs;
		f2fs_update_extent_cache_range(dn, fofs, 0, len);
485
		dec_valid_block_count(sbi, dn->inode, nr_free);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
486 487 488 489
		set_page_dirty(dn->node_page);
		sync_inode_page(dn);
	}
	dn->ofs_in_node = ofs;
490 491 492

	trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
					 dn->ofs_in_node, nr_free);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
493 494 495 496 497 498 499 500
	return nr_free;
}

void truncate_data_blocks(struct dnode_of_data *dn)
{
	truncate_data_blocks_range(dn, ADDRS_PER_BLOCK);
}

501
static int truncate_partial_data_page(struct inode *inode, u64 from,
502
								bool cache_only)
Jaegeuk Kim's avatar
Jaegeuk Kim committed
503 504
{
	unsigned offset = from & (PAGE_CACHE_SIZE - 1);
505 506
	pgoff_t index = from >> PAGE_CACHE_SHIFT;
	struct address_space *mapping = inode->i_mapping;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
507 508
	struct page *page;

509
	if (!offset && !cache_only)
510
		return 0;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
511

512
	if (cache_only) {
513
		page = f2fs_grab_cache_page(mapping, index, false);
514 515 516
		if (page && PageUptodate(page))
			goto truncate_out;
		f2fs_put_page(page, 1);
517
		return 0;
518
	}
Jaegeuk Kim's avatar
Jaegeuk Kim committed
519

520
	page = get_lock_data_page(inode, index, true);
521 522 523
	if (IS_ERR(page))
		return 0;
truncate_out:
524
	f2fs_wait_on_page_writeback(page, DATA);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
525
	zero_user(page, offset, PAGE_CACHE_SIZE - offset);
526
	if (!cache_only || !f2fs_encrypted_inode(inode) || !S_ISREG(inode->i_mode))
527
		set_page_dirty(page);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
528
	f2fs_put_page(page, 1);
529
	return 0;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
530 531
}

532
int truncate_blocks(struct inode *inode, u64 from, bool lock)
Jaegeuk Kim's avatar
Jaegeuk Kim committed
533
{
534
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
535 536 537
	unsigned int blocksize = inode->i_sb->s_blocksize;
	struct dnode_of_data dn;
	pgoff_t free_from;
538
	int count = 0, err = 0;
539
	struct page *ipage;
540
	bool truncate_page = false;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
541

542 543
	trace_f2fs_truncate_blocks_enter(inode, from);

544
	free_from = (pgoff_t)F2FS_BYTES_TO_BLK(from + blocksize - 1);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
545

546 547
	if (lock)
		f2fs_lock_op(sbi);
548

549 550 551 552 553 554 555
	ipage = get_node_page(sbi, inode->i_ino);
	if (IS_ERR(ipage)) {
		err = PTR_ERR(ipage);
		goto out;
	}

	if (f2fs_has_inline_data(inode)) {
556 557
		if (truncate_inline_inode(ipage, from))
			set_page_dirty(ipage);
558
		f2fs_put_page(ipage, 1);
559
		truncate_page = true;
560 561 562 563
		goto out;
	}

	set_new_dnode(&dn, inode, ipage, NULL, 0);
564
	err = get_dnode_of_data(&dn, free_from, LOOKUP_NODE);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
565 566 567
	if (err) {
		if (err == -ENOENT)
			goto free_next;
568
		goto out;
569 570
	}

571
	count = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
Jaegeuk Kim's avatar
Jaegeuk Kim committed
572 573

	count -= dn.ofs_in_node;
574
	f2fs_bug_on(sbi, count < 0);
575

Jaegeuk Kim's avatar
Jaegeuk Kim committed
576 577 578 579 580 581 582 583
	if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
		truncate_data_blocks_range(&dn, count);
		free_from += count;
	}

	f2fs_put_dnode(&dn);
free_next:
	err = truncate_inode_blocks(inode, free_from);
584 585 586
out:
	if (lock)
		f2fs_unlock_op(sbi);
587 588 589

	/* lastly zero out the first data page */
	if (!err)
590
		err = truncate_partial_data_page(inode, from, truncate_page);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
591

592
	trace_f2fs_truncate_blocks_exit(inode, err);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
593 594 595
	return err;
}

596
int f2fs_truncate(struct inode *inode, bool lock)
Jaegeuk Kim's avatar
Jaegeuk Kim committed
597
{
598 599
	int err;

Jaegeuk Kim's avatar
Jaegeuk Kim committed
600 601
	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
				S_ISLNK(inode->i_mode)))
602
		return 0;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
603

604 605
	trace_f2fs_truncate(inode);

606
	/* we should check inline_data size */
607
	if (f2fs_has_inline_data(inode) && !f2fs_may_inline_data(inode)) {
608 609 610
		err = f2fs_convert_inline_inode(inode);
		if (err)
			return err;
611 612
	}

613 614 615 616 617 618 619
	err = truncate_blocks(inode, i_size_read(inode), lock);
	if (err)
		return err;

	inode->i_mtime = inode->i_ctime = CURRENT_TIME;
	mark_inode_dirty(inode);
	return 0;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
620 621
}

622
int f2fs_getattr(struct vfsmount *mnt,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
623 624
			 struct dentry *dentry, struct kstat *stat)
{
625
	struct inode *inode = d_inode(dentry);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663
	generic_fillattr(inode, stat);
	stat->blocks <<= 3;
	return 0;
}

#ifdef CONFIG_F2FS_FS_POSIX_ACL
static void __setattr_copy(struct inode *inode, const struct iattr *attr)
{
	struct f2fs_inode_info *fi = F2FS_I(inode);
	unsigned int ia_valid = attr->ia_valid;

	if (ia_valid & ATTR_UID)
		inode->i_uid = attr->ia_uid;
	if (ia_valid & ATTR_GID)
		inode->i_gid = attr->ia_gid;
	if (ia_valid & ATTR_ATIME)
		inode->i_atime = timespec_trunc(attr->ia_atime,
						inode->i_sb->s_time_gran);
	if (ia_valid & ATTR_MTIME)
		inode->i_mtime = timespec_trunc(attr->ia_mtime,
						inode->i_sb->s_time_gran);
	if (ia_valid & ATTR_CTIME)
		inode->i_ctime = timespec_trunc(attr->ia_ctime,
						inode->i_sb->s_time_gran);
	if (ia_valid & ATTR_MODE) {
		umode_t mode = attr->ia_mode;

		if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
			mode &= ~S_ISGID;
		set_acl_inode(fi, mode);
	}
}
#else
#define __setattr_copy setattr_copy
#endif

int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
{
664
	struct inode *inode = d_inode(dentry);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
665 666 667 668 669 670 671
	struct f2fs_inode_info *fi = F2FS_I(inode);
	int err;

	err = inode_change_ok(inode, attr);
	if (err)
		return err;

672
	if (attr->ia_valid & ATTR_SIZE) {
673 674 675 676
		if (f2fs_encrypted_inode(inode) &&
				f2fs_get_encryption_info(inode))
			return -EACCES;

677
		if (attr->ia_size <= i_size_read(inode)) {
678
			truncate_setsize(inode, attr->ia_size);
679 680 681
			err = f2fs_truncate(inode, true);
			if (err)
				return err;
682 683 684
			f2fs_balance_fs(F2FS_I_SB(inode));
		} else {
			/*
685 686
			 * do not trim all blocks after i_size if target size is
			 * larger than i_size.
687
			 */
688
			truncate_setsize(inode, attr->ia_size);
689 690 691 692 693 694 695 696

			/* should convert inline inode here */
			if (f2fs_has_inline_data(inode) &&
					!f2fs_may_inline_data(inode)) {
				err = f2fs_convert_inline_inode(inode);
				if (err)
					return err;
			}
697
			inode->i_mtime = inode->i_ctime = CURRENT_TIME;
698
		}
Jaegeuk Kim's avatar
Jaegeuk Kim committed
699 700 701 702 703
	}

	__setattr_copy(inode, attr);

	if (attr->ia_valid & ATTR_MODE) {
704
		err = posix_acl_chmod(inode, get_inode_mode(inode));
Jaegeuk Kim's avatar
Jaegeuk Kim committed
705 706 707 708 709 710 711 712 713 714 715 716 717 718
		if (err || is_inode_flag_set(fi, FI_ACL_MODE)) {
			inode->i_mode = fi->i_acl_mode;
			clear_inode_flag(fi, FI_ACL_MODE);
		}
	}

	mark_inode_dirty(inode);
	return err;
}

const struct inode_operations f2fs_file_inode_operations = {
	.getattr	= f2fs_getattr,
	.setattr	= f2fs_setattr,
	.get_acl	= f2fs_get_acl,
719
	.set_acl	= f2fs_set_acl,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
720 721 722 723 724 725
#ifdef CONFIG_F2FS_FS_XATTR
	.setxattr	= generic_setxattr,
	.getxattr	= generic_getxattr,
	.listxattr	= f2fs_listxattr,
	.removexattr	= generic_removexattr,
#endif
Jaegeuk Kim's avatar
Jaegeuk Kim committed
726
	.fiemap		= f2fs_fiemap,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
727 728
};

Chao Yu's avatar
Chao Yu committed
729
static int fill_zero(struct inode *inode, pgoff_t index,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
730 731
					loff_t start, loff_t len)
{
732
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
733 734 735
	struct page *page;

	if (!len)
Chao Yu's avatar
Chao Yu committed
736
		return 0;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
737

738 739
	f2fs_balance_fs(sbi);

740
	f2fs_lock_op(sbi);
741
	page = get_new_data_page(inode, NULL, index, false);
742
	f2fs_unlock_op(sbi);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
743

Chao Yu's avatar
Chao Yu committed
744 745 746 747 748 749 750 751
	if (IS_ERR(page))
		return PTR_ERR(page);

	f2fs_wait_on_page_writeback(page, DATA);
	zero_user(page, start, len);
	set_page_dirty(page);
	f2fs_put_page(page, 1);
	return 0;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
752 753 754 755 756 757
}

int truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
{
	int err;

758
	while (pg_start < pg_end) {
Jaegeuk Kim's avatar
Jaegeuk Kim committed
759
		struct dnode_of_data dn;
760
		pgoff_t end_offset, count;
761

Jaegeuk Kim's avatar
Jaegeuk Kim committed
762
		set_new_dnode(&dn, inode, NULL, NULL, 0);
763
		err = get_dnode_of_data(&dn, pg_start, LOOKUP_NODE);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
764
		if (err) {
765 766
			if (err == -ENOENT) {
				pg_start++;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
767
				continue;
768
			}
Jaegeuk Kim's avatar
Jaegeuk Kim committed
769 770 771
			return err;
		}

772 773 774 775 776 777
		end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
		count = min(end_offset - dn.ofs_in_node, pg_end - pg_start);

		f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset);

		truncate_data_blocks_range(&dn, count);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
778
		f2fs_put_dnode(&dn);
779 780

		pg_start += count;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
781 782 783 784
	}
	return 0;
}

785
static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
Jaegeuk Kim's avatar
Jaegeuk Kim committed
786 787 788 789 790
{
	pgoff_t pg_start, pg_end;
	loff_t off_start, off_end;
	int ret = 0;

791 792 793 794 795
	if (f2fs_has_inline_data(inode)) {
		ret = f2fs_convert_inline_inode(inode);
		if (ret)
			return ret;
	}
796

Jaegeuk Kim's avatar
Jaegeuk Kim committed
797 798 799 800 801 802 803
	pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT;
	pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT;

	off_start = offset & (PAGE_CACHE_SIZE - 1);
	off_end = (offset + len) & (PAGE_CACHE_SIZE - 1);

	if (pg_start == pg_end) {
Chao Yu's avatar
Chao Yu committed
804
		ret = fill_zero(inode, pg_start, off_start,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
805
						off_end - off_start);
Chao Yu's avatar
Chao Yu committed
806 807
		if (ret)
			return ret;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
808
	} else {
Chao Yu's avatar
Chao Yu committed
809 810 811 812 813 814 815 816 817 818 819
		if (off_start) {
			ret = fill_zero(inode, pg_start++, off_start,
						PAGE_CACHE_SIZE - off_start);
			if (ret)
				return ret;
		}
		if (off_end) {
			ret = fill_zero(inode, pg_end, 0, off_end);
			if (ret)
				return ret;
		}
Jaegeuk Kim's avatar
Jaegeuk Kim committed
820 821 822 823

		if (pg_start < pg_end) {
			struct address_space *mapping = inode->i_mapping;
			loff_t blk_start, blk_end;
824
			struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
825 826

			f2fs_balance_fs(sbi);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
827

828 829
			blk_start = (loff_t)pg_start << PAGE_CACHE_SHIFT;
			blk_end = (loff_t)pg_end << PAGE_CACHE_SHIFT;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
830 831
			truncate_inode_pages_range(mapping, blk_start,
					blk_end - 1);
832

833
			f2fs_lock_op(sbi);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
834
			ret = truncate_hole(inode, pg_start, pg_end);
835
			f2fs_unlock_op(sbi);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
836 837 838 839 840 841
		}
	}

	return ret;
}

842 843
static int __exchange_data_block(struct inode *inode, pgoff_t src,
					pgoff_t dst, bool full)
844 845 846
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct dnode_of_data dn;
847 848 849
	block_t new_addr;
	bool do_replace = false;
	int ret;
850

851 852 853 854 855 856 857 858 859 860 861 862 863 864
	set_new_dnode(&dn, inode, NULL, NULL, 0);
	ret = get_dnode_of_data(&dn, src, LOOKUP_NODE_RA);
	if (ret && ret != -ENOENT) {
		return ret;
	} else if (ret == -ENOENT) {
		new_addr = NULL_ADDR;
	} else {
		new_addr = dn.data_blkaddr;
		if (!is_checkpointed_data(sbi, new_addr)) {
			dn.data_blkaddr = NULL_ADDR;
			/* do not invalidate this block address */
			set_data_blkaddr(&dn);
			f2fs_update_extent_cache(&dn);
			do_replace = true;
865
		}
866 867
		f2fs_put_dnode(&dn);
	}
868

869 870
	if (new_addr == NULL_ADDR)
		return full ? truncate_hole(inode, dst, dst + 1) : 0;
871

872 873 874
	if (do_replace) {
		struct page *ipage = get_node_page(sbi, inode->i_ino);
		struct node_info ni;
875

876 877 878 879
		if (IS_ERR(ipage)) {
			ret = PTR_ERR(ipage);
			goto err_out;
		}
880

881 882 883 884
		set_new_dnode(&dn, inode, ipage, NULL, 0);
		ret = f2fs_reserve_block(&dn, dst);
		if (ret)
			goto err_out;
885

886
		truncate_data_blocks_range(&dn, 1);
887

888 889 890 891 892 893 894
		get_node_info(sbi, dn.nid, &ni);
		f2fs_replace_block(sbi, &dn, dn.data_blkaddr, new_addr,
				ni.version, true);
		f2fs_put_dnode(&dn);
	} else {
		struct page *psrc, *pdst;

895
		psrc = get_lock_data_page(inode, src, true);
896 897 898 899 900 901 902 903 904 905 906
		if (IS_ERR(psrc))
			return PTR_ERR(psrc);
		pdst = get_new_data_page(inode, NULL, dst, false);
		if (IS_ERR(pdst)) {
			f2fs_put_page(psrc, 1);
			return PTR_ERR(pdst);
		}
		f2fs_copy_page(psrc, pdst);
		set_page_dirty(pdst);
		f2fs_put_page(pdst, 1);
		f2fs_put_page(psrc, 1);
907

908 909 910
		return truncate_hole(inode, src, src + 1);
	}
	return 0;
911

912 913 914 915 916 917 918 919 920
err_out:
	if (!get_dnode_of_data(&dn, src, LOOKUP_NODE)) {
		dn.data_blkaddr = new_addr;
		set_data_blkaddr(&dn);
		f2fs_update_extent_cache(&dn);
		f2fs_put_dnode(&dn);
	}
	return ret;
}
921

922 923 924 925 926 927 928 929 930 931
static int f2fs_do_collapse(struct inode *inode, pgoff_t start, pgoff_t end)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	pgoff_t nrpages = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;
	int ret = 0;

	for (; end < nrpages; start++, end++) {
		f2fs_balance_fs(sbi);
		f2fs_lock_op(sbi);
		ret = __exchange_data_block(inode, end, start, true);
932
		f2fs_unlock_op(sbi);
933 934
		if (ret)
			break;
935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951
	}
	return ret;
}

static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
{
	pgoff_t pg_start, pg_end;
	loff_t new_size;
	int ret;

	if (offset + len >= i_size_read(inode))
		return -EINVAL;

	/* collapse range should be aligned to block size of f2fs. */
	if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
		return -EINVAL;

952 953 954 955 956 957 958 959
	f2fs_balance_fs(F2FS_I_SB(inode));

	if (f2fs_has_inline_data(inode)) {
		ret = f2fs_convert_inline_inode(inode);
		if (ret)
			return ret;
	}

960 961 962 963 964 965 966 967 968 969 970 971 972 973
	pg_start = offset >> PAGE_CACHE_SHIFT;
	pg_end = (offset + len) >> PAGE_CACHE_SHIFT;

	/* write out all dirty pages from offset */
	ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
	if (ret)
		return ret;

	truncate_pagecache(inode, offset);

	ret = f2fs_do_collapse(inode, pg_start, pg_end);
	if (ret)
		return ret;

974 975 976 977
	/* write out all moved pages, if possible */
	filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
	truncate_pagecache(inode, offset);

978
	new_size = i_size_read(inode) - len;
979
	truncate_pagecache(inode, new_size);
980 981 982 983 984 985 986 987

	ret = truncate_blocks(inode, new_size, true);
	if (!ret)
		i_size_write(inode, new_size);

	return ret;
}

Chao Yu's avatar
Chao Yu committed
988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022
static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
								int mode)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct address_space *mapping = inode->i_mapping;
	pgoff_t index, pg_start, pg_end;
	loff_t new_size = i_size_read(inode);
	loff_t off_start, off_end;
	int ret = 0;

	ret = inode_newsize_ok(inode, (len + offset));
	if (ret)
		return ret;

	f2fs_balance_fs(sbi);

	if (f2fs_has_inline_data(inode)) {
		ret = f2fs_convert_inline_inode(inode);
		if (ret)
			return ret;
	}

	ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1);
	if (ret)
		return ret;

	truncate_pagecache_range(inode, offset, offset + len - 1);

	pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT;
	pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT;

	off_start = offset &