file.c 44 KB
Newer Older
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1
/*
Jaegeuk Kim's avatar
Jaegeuk Kim committed
2 3 4 5 6 7 8 9 10 11 12 13 14 15
 * fs/f2fs/file.c
 *
 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
 *             http://www.samsung.com/
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include <linux/stat.h>
#include <linux/buffer_head.h>
#include <linux/writeback.h>
16
#include <linux/blkdev.h>
Jaegeuk Kim's avatar
Jaegeuk Kim committed
17 18
#include <linux/falloc.h>
#include <linux/types.h>
19
#include <linux/compat.h>
Jaegeuk Kim's avatar
Jaegeuk Kim committed
20 21
#include <linux/uaccess.h>
#include <linux/mount.h>
22
#include <linux/pagevec.h>
23
#include <linux/random.h>
Jaegeuk Kim's avatar
Jaegeuk Kim committed
24 25 26 27 28 29

#include "f2fs.h"
#include "node.h"
#include "segment.h"
#include "xattr.h"
#include "acl.h"
30
#include "gc.h"
Jaegeuk Kim's avatar
Jaegeuk Kim committed
31
#include "trace.h"
32
#include <trace/events/f2fs.h>
Jaegeuk Kim's avatar
Jaegeuk Kim committed
33 34 35 36 37

static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
						struct vm_fault *vmf)
{
	struct page *page = vmf->page;
38
	struct inode *inode = file_inode(vma->vm_file);
39
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
40
	struct dnode_of_data dn;
41
	int err;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
42 43

	sb_start_pagefault(inode->i_sb);
44 45

	f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
46

Jaegeuk Kim's avatar
Jaegeuk Kim committed
47
	/* block allocation */
48
	f2fs_lock_op(sbi);
49
	set_new_dnode(&dn, inode, NULL, NULL, 0);
50
	err = f2fs_reserve_block(&dn, page->index);
51 52
	if (err) {
		f2fs_unlock_op(sbi);
53
		goto out;
54 55 56
	}
	f2fs_put_dnode(&dn);
	f2fs_unlock_op(sbi);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
57

Jaegeuk Kim's avatar
Jaegeuk Kim committed
58
	f2fs_balance_fs(sbi, dn.node_changed);
59

60
	file_update_time(vma->vm_file);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
61
	lock_page(page);
62
	if (unlikely(page->mapping != inode->i_mapping ||
63
			page_offset(page) > i_size_read(inode) ||
64
			!PageUptodate(page))) {
Jaegeuk Kim's avatar
Jaegeuk Kim committed
65 66 67 68 69 70 71 72 73
		unlock_page(page);
		err = -EFAULT;
		goto out;
	}

	/*
	 * check to see if the page is mapped already (no holes)
	 */
	if (PageMappedToDisk(page))
74
		goto mapped;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
75 76

	/* page is wholly or partially inside EOF */
77 78
	if (((loff_t)(page->index + 1) << PAGE_CACHE_SHIFT) >
						i_size_read(inode)) {
Jaegeuk Kim's avatar
Jaegeuk Kim committed
79 80 81 82 83 84 85
		unsigned offset;
		offset = i_size_read(inode) & ~PAGE_CACHE_MASK;
		zero_user_segment(page, offset, PAGE_CACHE_SIZE);
	}
	set_page_dirty(page);
	SetPageUptodate(page);

86
	trace_f2fs_vm_page_mkwrite(page, DATA);
87 88
mapped:
	/* fill the page */
89
	f2fs_wait_on_page_writeback(page, DATA);
90 91 92 93 94

	/* wait for GCed encrypted page writeback */
	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
		f2fs_wait_on_encrypted_page_writeback(sbi, dn.data_blkaddr);

95 96
	/* if gced page is attached, don't write to cold segment */
	clear_cold_data(page);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
97 98 99 100 101 102
out:
	sb_end_pagefault(inode->i_sb);
	return block_page_mkwrite_return(err);
}

static const struct vm_operations_struct f2fs_file_vm_ops = {
103
	.fault		= filemap_fault,
104
	.map_pages	= filemap_map_pages,
105
	.page_mkwrite	= f2fs_vm_page_mkwrite,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
106 107
};

108 109 110 111 112 113 114 115 116 117
static int get_parent_ino(struct inode *inode, nid_t *pino)
{
	struct dentry *dentry;

	inode = igrab(inode);
	dentry = d_find_any_alias(inode);
	iput(inode);
	if (!dentry)
		return 0;

118
	if (update_dent_inode(inode, inode, &dentry->d_name)) {
119 120 121
		dput(dentry);
		return 0;
	}
122

123 124
	*pino = parent_ino(dentry);
	dput(dentry);
125 126 127
	return 1;
}

128 129
static inline bool need_do_checkpoint(struct inode *inode)
{
130
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
131 132 133 134
	bool need_cp = false;

	if (!S_ISREG(inode->i_mode) || inode->i_nlink != 1)
		need_cp = true;
135 136
	else if (file_enc_name(inode) && need_dentry_mark(sbi, inode->i_ino))
		need_cp = true;
137 138 139 140 141 142 143 144
	else if (file_wrong_pino(inode))
		need_cp = true;
	else if (!space_for_roll_forward(sbi))
		need_cp = true;
	else if (!is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
		need_cp = true;
	else if (F2FS_I(inode)->xattr_ver == cur_cp_version(F2FS_CKPT(sbi)))
		need_cp = true;
145 146
	else if (test_opt(sbi, FASTBOOT))
		need_cp = true;
147 148
	else if (sbi->active_logs == 2)
		need_cp = true;
149 150 151 152

	return need_cp;
}

153 154 155 156 157 158 159 160 161 162 163
static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino)
{
	struct page *i = find_get_page(NODE_MAPPING(sbi), ino);
	bool ret = false;
	/* But we need to avoid that there are some inode updates */
	if ((i && PageDirty(i)) || need_inode_block_update(sbi, ino))
		ret = true;
	f2fs_put_page(i, 0);
	return ret;
}

164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183
static void try_to_fix_pino(struct inode *inode)
{
	struct f2fs_inode_info *fi = F2FS_I(inode);
	nid_t pino;

	down_write(&fi->i_sem);
	fi->xattr_ver = 0;
	if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
			get_parent_ino(inode, &pino)) {
		fi->i_pino = pino;
		file_got_pino(inode);
		up_write(&fi->i_sem);

		mark_inode_dirty_sync(inode);
		f2fs_write_inode(inode, NULL);
	} else {
		up_write(&fi->i_sem);
	}
}

Jaegeuk Kim's avatar
Jaegeuk Kim committed
184 185 186
int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
{
	struct inode *inode = file->f_mapping->host;
187
	struct f2fs_inode_info *fi = F2FS_I(inode);
188
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
189
	nid_t ino = inode->i_ino;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
190 191 192
	int ret = 0;
	bool need_cp = false;
	struct writeback_control wbc = {
193
		.sync_mode = WB_SYNC_ALL,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
194 195 196 197
		.nr_to_write = LONG_MAX,
		.for_reclaim = 0,
	};

198
	if (unlikely(f2fs_readonly(inode->i_sb)))
199 200
		return 0;

201
	trace_f2fs_sync_file_enter(inode);
202 203

	/* if fdatasync is triggered, let's do in-place-update */
Jaegeuk Kim's avatar
Jaegeuk Kim committed
204
	if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
205
		set_inode_flag(fi, FI_NEED_IPU);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
206
	ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
207 208
	clear_inode_flag(fi, FI_NEED_IPU);

209 210
	if (ret) {
		trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
211
		return ret;
212
	}
Jaegeuk Kim's avatar
Jaegeuk Kim committed
213

214
	/* if the inode is dirty, let's recover all the time */
215 216
	if (!datasync) {
		f2fs_write_inode(inode, NULL);
217 218 219
		goto go_write;
	}

220 221 222 223
	/*
	 * if there is no written data, don't waste time to write recovery info.
	 */
	if (!is_inode_flag_set(fi, FI_APPEND_WRITE) &&
224
			!exist_written_data(sbi, ino, APPEND_INO)) {
225

226 227
		/* it may call write_inode just prior to fsync */
		if (need_inode_page_update(sbi, ino))
228 229
			goto go_write;

230
		if (is_inode_flag_set(fi, FI_UPDATE_WRITE) ||
231
				exist_written_data(sbi, ino, UPDATE_INO))
232 233 234
			goto flush_out;
		goto out;
	}
235
go_write:
236 237 238 239
	/*
	 * Both of fdatasync() and fsync() are able to be recovered from
	 * sudden-power-off.
	 */
240 241
	down_read(&fi->i_sem);
	need_cp = need_do_checkpoint(inode);
242 243
	up_read(&fi->i_sem);

Jaegeuk Kim's avatar
Jaegeuk Kim committed
244 245 246
	if (need_cp) {
		/* all the dirty node pages should be flushed for POR */
		ret = f2fs_sync_fs(inode->i_sb, 1);
247

248 249 250 251 252
		/*
		 * We've secured consistency through sync_fs. Following pino
		 * will be used only for fsynced inodes after checkpoint.
		 */
		try_to_fix_pino(inode);
253 254
		clear_inode_flag(fi, FI_APPEND_WRITE);
		clear_inode_flag(fi, FI_UPDATE_WRITE);
255 256
		goto out;
	}
257
sync_nodes:
258 259
	sync_node_pages(sbi, ino, &wbc);

260
	/* if cp_error was enabled, we should avoid infinite loop */
261 262
	if (unlikely(f2fs_cp_error(sbi))) {
		ret = -EIO;
263
		goto out;
264
	}
265

266 267 268 269
	if (need_inode_block_update(sbi, ino)) {
		mark_inode_dirty_sync(inode);
		f2fs_write_inode(inode, NULL);
		goto sync_nodes;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
270
	}
271 272 273 274 275 276

	ret = wait_on_node_pages_writeback(sbi, ino);
	if (ret)
		goto out;

	/* once recovery info is written, don't need to tack this */
277
	remove_ino_entry(sbi, ino, APPEND_INO);
278 279
	clear_inode_flag(fi, FI_APPEND_WRITE);
flush_out:
280
	remove_ino_entry(sbi, ino, UPDATE_INO);
281 282
	clear_inode_flag(fi, FI_UPDATE_WRITE);
	ret = f2fs_issue_flush(sbi);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
283
out:
284
	trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret);
285
	f2fs_trace_ios(NULL, 1);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
286 287 288
	return ret;
}

289 290 291 292 293 294 295 296 297 298 299
static pgoff_t __get_first_dirty_index(struct address_space *mapping,
						pgoff_t pgofs, int whence)
{
	struct pagevec pvec;
	int nr_pages;

	if (whence != SEEK_DATA)
		return 0;

	/* find first dirty page index */
	pagevec_init(&pvec, 0);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
300 301 302
	nr_pages = pagevec_lookup_tag(&pvec, mapping, &pgofs,
					PAGECACHE_TAG_DIRTY, 1);
	pgofs = nr_pages ? pvec.pages[0]->index : LONG_MAX;
303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323
	pagevec_release(&pvec);
	return pgofs;
}

static bool __found_offset(block_t blkaddr, pgoff_t dirty, pgoff_t pgofs,
							int whence)
{
	switch (whence) {
	case SEEK_DATA:
		if ((blkaddr == NEW_ADDR && dirty == pgofs) ||
			(blkaddr != NEW_ADDR && blkaddr != NULL_ADDR))
			return true;
		break;
	case SEEK_HOLE:
		if (blkaddr == NULL_ADDR)
			return true;
		break;
	}
	return false;
}

324 325 326 327 328
static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
{
	struct inode *inode = file->f_mapping->host;
	loff_t maxbytes = inode->i_sb->s_maxbytes;
	struct dnode_of_data dn;
329 330 331
	pgoff_t pgofs, end_offset, dirty;
	loff_t data_ofs = offset;
	loff_t isize;
332 333 334 335 336 337 338 339 340
	int err = 0;

	mutex_lock(&inode->i_mutex);

	isize = i_size_read(inode);
	if (offset >= isize)
		goto fail;

	/* handle inline data case */
Chao Yu's avatar
Chao Yu committed
341
	if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) {
342 343 344 345 346 347 348
		if (whence == SEEK_HOLE)
			data_ofs = isize;
		goto found;
	}

	pgofs = (pgoff_t)(offset >> PAGE_CACHE_SHIFT);

349 350
	dirty = __get_first_dirty_index(inode->i_mapping, pgofs, whence);

351
	for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_CACHE_SHIFT) {
352 353 354 355 356
		set_new_dnode(&dn, inode, NULL, NULL, 0);
		err = get_dnode_of_data(&dn, pgofs, LOOKUP_NODE_RA);
		if (err && err != -ENOENT) {
			goto fail;
		} else if (err == -ENOENT) {
arter97's avatar
arter97 committed
357
			/* direct node does not exists */
358 359 360 361 362 363 364 365 366
			if (whence == SEEK_DATA) {
				pgofs = PGOFS_OF_NEXT_DNODE(pgofs,
							F2FS_I(inode));
				continue;
			} else {
				goto found;
			}
		}

367
		end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
368 369 370 371

		/* find data/hole in dnode block */
		for (; dn.ofs_in_node < end_offset;
				dn.ofs_in_node++, pgofs++,
372
				data_ofs = (loff_t)pgofs << PAGE_CACHE_SHIFT) {
373 374 375
			block_t blkaddr;
			blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);

376
			if (__found_offset(blkaddr, dirty, pgofs, whence)) {
377 378 379 380 381 382 383 384 385 386
				f2fs_put_dnode(&dn);
				goto found;
			}
		}
		f2fs_put_dnode(&dn);
	}

	if (whence == SEEK_DATA)
		goto fail;
found:
387 388
	if (whence == SEEK_HOLE && data_ofs > isize)
		data_ofs = isize;
389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408
	mutex_unlock(&inode->i_mutex);
	return vfs_setpos(file, data_ofs, maxbytes);
fail:
	mutex_unlock(&inode->i_mutex);
	return -ENXIO;
}

static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
{
	struct inode *inode = file->f_mapping->host;
	loff_t maxbytes = inode->i_sb->s_maxbytes;

	switch (whence) {
	case SEEK_SET:
	case SEEK_CUR:
	case SEEK_END:
		return generic_file_llseek_size(file, offset, whence,
						maxbytes, i_size_read(inode));
	case SEEK_DATA:
	case SEEK_HOLE:
409 410
		if (offset < 0)
			return -ENXIO;
411 412 413 414 415 416
		return f2fs_seek_block(file, offset, whence);
	}

	return -EINVAL;
}

Jaegeuk Kim's avatar
Jaegeuk Kim committed
417 418
static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
{
419
	struct inode *inode = file_inode(file);
420
	int err;
421

422
	if (f2fs_encrypted_inode(inode)) {
423
		err = f2fs_get_encryption_info(inode);
424 425 426 427
		if (err)
			return 0;
	}

428
	/* we don't need to use inline_data strictly */
429 430 431
	err = f2fs_convert_inline_inode(inode);
	if (err)
		return err;
432

Jaegeuk Kim's avatar
Jaegeuk Kim committed
433 434 435 436 437
	file_accessed(file);
	vma->vm_ops = &f2fs_file_vm_ops;
	return 0;
}

438 439 440 441 442 443 444 445 446 447 448 449
static int f2fs_file_open(struct inode *inode, struct file *filp)
{
	int ret = generic_file_open(inode, filp);

	if (!ret && f2fs_encrypted_inode(inode)) {
		ret = f2fs_get_encryption_info(inode);
		if (ret)
			ret = -EACCES;
	}
	return ret;
}

450
int truncate_data_blocks_range(struct dnode_of_data *dn, int count)
Jaegeuk Kim's avatar
Jaegeuk Kim committed
451
{
452
	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
453
	struct f2fs_node *raw_node;
Chao Yu's avatar
Chao Yu committed
454
	int nr_free = 0, ofs = dn->ofs_in_node, len = count;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
455 456
	__le32 *addr;

457
	raw_node = F2FS_NODE(dn->node_page);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
458 459
	addr = blkaddr_in_node(raw_node) + ofs;

Chris Fries's avatar
Chris Fries committed
460
	for (; count > 0; count--, addr++, dn->ofs_in_node++) {
Jaegeuk Kim's avatar
Jaegeuk Kim committed
461 462 463 464
		block_t blkaddr = le32_to_cpu(*addr);
		if (blkaddr == NULL_ADDR)
			continue;

465
		dn->data_blkaddr = NULL_ADDR;
466
		set_data_blkaddr(dn);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
467
		invalidate_blocks(sbi, blkaddr);
468 469 470
		if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
			clear_inode_flag(F2FS_I(dn->inode),
						FI_FIRST_BLOCK_WRITTEN);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
471 472
		nr_free++;
	}
Chao Yu's avatar
Chao Yu committed
473

Jaegeuk Kim's avatar
Jaegeuk Kim committed
474
	if (nr_free) {
Chao Yu's avatar
Chao Yu committed
475 476 477 478 479 480 481 482
		pgoff_t fofs;
		/*
		 * once we invalidate valid blkaddr in range [ofs, ofs + count],
		 * we will invalidate all blkaddr in the whole range.
		 */
		fofs = start_bidx_of_node(ofs_of_node(dn->node_page),
						F2FS_I(dn->inode)) + ofs;
		f2fs_update_extent_cache_range(dn, fofs, 0, len);
483
		dec_valid_block_count(sbi, dn->inode, nr_free);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
484 485 486
		sync_inode_page(dn);
	}
	dn->ofs_in_node = ofs;
487 488 489

	trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
					 dn->ofs_in_node, nr_free);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
490 491 492 493 494 495 496 497
	return nr_free;
}

void truncate_data_blocks(struct dnode_of_data *dn)
{
	truncate_data_blocks_range(dn, ADDRS_PER_BLOCK);
}

498
static int truncate_partial_data_page(struct inode *inode, u64 from,
499
								bool cache_only)
Jaegeuk Kim's avatar
Jaegeuk Kim committed
500 501
{
	unsigned offset = from & (PAGE_CACHE_SIZE - 1);
502 503
	pgoff_t index = from >> PAGE_CACHE_SHIFT;
	struct address_space *mapping = inode->i_mapping;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
504 505
	struct page *page;

506
	if (!offset && !cache_only)
507
		return 0;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
508

509
	if (cache_only) {
510
		page = f2fs_grab_cache_page(mapping, index, false);
511 512 513
		if (page && PageUptodate(page))
			goto truncate_out;
		f2fs_put_page(page, 1);
514
		return 0;
515
	}
Jaegeuk Kim's avatar
Jaegeuk Kim committed
516

517
	page = get_lock_data_page(inode, index, true);
518 519 520
	if (IS_ERR(page))
		return 0;
truncate_out:
521
	f2fs_wait_on_page_writeback(page, DATA);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
522
	zero_user(page, offset, PAGE_CACHE_SIZE - offset);
523
	if (!cache_only || !f2fs_encrypted_inode(inode) || !S_ISREG(inode->i_mode))
524
		set_page_dirty(page);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
525
	f2fs_put_page(page, 1);
526
	return 0;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
527 528
}

529
int truncate_blocks(struct inode *inode, u64 from, bool lock)
Jaegeuk Kim's avatar
Jaegeuk Kim committed
530
{
531
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
532 533 534
	unsigned int blocksize = inode->i_sb->s_blocksize;
	struct dnode_of_data dn;
	pgoff_t free_from;
535
	int count = 0, err = 0;
536
	struct page *ipage;
537
	bool truncate_page = false;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
538

539 540
	trace_f2fs_truncate_blocks_enter(inode, from);

541
	free_from = (pgoff_t)F2FS_BYTES_TO_BLK(from + blocksize - 1);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
542

543 544
	if (lock)
		f2fs_lock_op(sbi);
545

546 547 548 549 550 551 552
	ipage = get_node_page(sbi, inode->i_ino);
	if (IS_ERR(ipage)) {
		err = PTR_ERR(ipage);
		goto out;
	}

	if (f2fs_has_inline_data(inode)) {
553 554
		if (truncate_inline_inode(ipage, from))
			set_page_dirty(ipage);
555
		f2fs_put_page(ipage, 1);
556
		truncate_page = true;
557 558 559 560
		goto out;
	}

	set_new_dnode(&dn, inode, ipage, NULL, 0);
561
	err = get_dnode_of_data(&dn, free_from, LOOKUP_NODE);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
562 563 564
	if (err) {
		if (err == -ENOENT)
			goto free_next;
565
		goto out;
566 567
	}

568
	count = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
Jaegeuk Kim's avatar
Jaegeuk Kim committed
569 570

	count -= dn.ofs_in_node;
571
	f2fs_bug_on(sbi, count < 0);
572

Jaegeuk Kim's avatar
Jaegeuk Kim committed
573 574 575 576 577 578 579 580
	if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
		truncate_data_blocks_range(&dn, count);
		free_from += count;
	}

	f2fs_put_dnode(&dn);
free_next:
	err = truncate_inode_blocks(inode, free_from);
581 582 583
out:
	if (lock)
		f2fs_unlock_op(sbi);
584 585 586

	/* lastly zero out the first data page */
	if (!err)
587
		err = truncate_partial_data_page(inode, from, truncate_page);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
588

589
	trace_f2fs_truncate_blocks_exit(inode, err);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
590 591 592
	return err;
}

593
int f2fs_truncate(struct inode *inode, bool lock)
Jaegeuk Kim's avatar
Jaegeuk Kim committed
594
{
595 596
	int err;

Jaegeuk Kim's avatar
Jaegeuk Kim committed
597 598
	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
				S_ISLNK(inode->i_mode)))
599
		return 0;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
600

601 602
	trace_f2fs_truncate(inode);

603
	/* we should check inline_data size */
604
	if (!f2fs_may_inline_data(inode)) {
605 606 607
		err = f2fs_convert_inline_inode(inode);
		if (err)
			return err;
608 609
	}

610 611 612 613 614 615 616
	err = truncate_blocks(inode, i_size_read(inode), lock);
	if (err)
		return err;

	inode->i_mtime = inode->i_ctime = CURRENT_TIME;
	mark_inode_dirty(inode);
	return 0;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
617 618
}

619
int f2fs_getattr(struct vfsmount *mnt,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
620 621
			 struct dentry *dentry, struct kstat *stat)
{
622
	struct inode *inode = d_inode(dentry);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660
	generic_fillattr(inode, stat);
	stat->blocks <<= 3;
	return 0;
}

#ifdef CONFIG_F2FS_FS_POSIX_ACL
static void __setattr_copy(struct inode *inode, const struct iattr *attr)
{
	struct f2fs_inode_info *fi = F2FS_I(inode);
	unsigned int ia_valid = attr->ia_valid;

	if (ia_valid & ATTR_UID)
		inode->i_uid = attr->ia_uid;
	if (ia_valid & ATTR_GID)
		inode->i_gid = attr->ia_gid;
	if (ia_valid & ATTR_ATIME)
		inode->i_atime = timespec_trunc(attr->ia_atime,
						inode->i_sb->s_time_gran);
	if (ia_valid & ATTR_MTIME)
		inode->i_mtime = timespec_trunc(attr->ia_mtime,
						inode->i_sb->s_time_gran);
	if (ia_valid & ATTR_CTIME)
		inode->i_ctime = timespec_trunc(attr->ia_ctime,
						inode->i_sb->s_time_gran);
	if (ia_valid & ATTR_MODE) {
		umode_t mode = attr->ia_mode;

		if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
			mode &= ~S_ISGID;
		set_acl_inode(fi, mode);
	}
}
#else
#define __setattr_copy setattr_copy
#endif

int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
{
661
	struct inode *inode = d_inode(dentry);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
662 663 664 665 666 667 668
	struct f2fs_inode_info *fi = F2FS_I(inode);
	int err;

	err = inode_change_ok(inode, attr);
	if (err)
		return err;

669
	if (attr->ia_valid & ATTR_SIZE) {
670 671 672 673
		if (f2fs_encrypted_inode(inode) &&
				f2fs_get_encryption_info(inode))
			return -EACCES;

674
		if (attr->ia_size <= i_size_read(inode)) {
675
			truncate_setsize(inode, attr->ia_size);
676 677 678
			err = f2fs_truncate(inode, true);
			if (err)
				return err;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
679
			f2fs_balance_fs(F2FS_I_SB(inode), true);
680 681
		} else {
			/*
682 683
			 * do not trim all blocks after i_size if target size is
			 * larger than i_size.
684
			 */
685
			truncate_setsize(inode, attr->ia_size);
686 687

			/* should convert inline inode here */
688
			if (!f2fs_may_inline_data(inode)) {
689 690 691 692
				err = f2fs_convert_inline_inode(inode);
				if (err)
					return err;
			}
693
			inode->i_mtime = inode->i_ctime = CURRENT_TIME;
694
		}
Jaegeuk Kim's avatar
Jaegeuk Kim committed
695 696 697 698 699
	}

	__setattr_copy(inode, attr);

	if (attr->ia_valid & ATTR_MODE) {
700
		err = posix_acl_chmod(inode, get_inode_mode(inode));
Jaegeuk Kim's avatar
Jaegeuk Kim committed
701 702 703 704 705 706 707 708 709 710 711 712 713 714
		if (err || is_inode_flag_set(fi, FI_ACL_MODE)) {
			inode->i_mode = fi->i_acl_mode;
			clear_inode_flag(fi, FI_ACL_MODE);
		}
	}

	mark_inode_dirty(inode);
	return err;
}

const struct inode_operations f2fs_file_inode_operations = {
	.getattr	= f2fs_getattr,
	.setattr	= f2fs_setattr,
	.get_acl	= f2fs_get_acl,
715
	.set_acl	= f2fs_set_acl,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
716 717 718 719 720 721
#ifdef CONFIG_F2FS_FS_XATTR
	.setxattr	= generic_setxattr,
	.getxattr	= generic_getxattr,
	.listxattr	= f2fs_listxattr,
	.removexattr	= generic_removexattr,
#endif
Jaegeuk Kim's avatar
Jaegeuk Kim committed
722
	.fiemap		= f2fs_fiemap,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
723 724
};

Chao Yu's avatar
Chao Yu committed
725
static int fill_zero(struct inode *inode, pgoff_t index,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
726 727
					loff_t start, loff_t len)
{
728
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
729 730 731
	struct page *page;

	if (!len)
Chao Yu's avatar
Chao Yu committed
732
		return 0;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
733

Jaegeuk Kim's avatar
Jaegeuk Kim committed
734
	f2fs_balance_fs(sbi, true);
735

736
	f2fs_lock_op(sbi);
737
	page = get_new_data_page(inode, NULL, index, false);
738
	f2fs_unlock_op(sbi);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
739

Chao Yu's avatar
Chao Yu committed
740 741 742 743 744 745 746 747
	if (IS_ERR(page))
		return PTR_ERR(page);

	f2fs_wait_on_page_writeback(page, DATA);
	zero_user(page, start, len);
	set_page_dirty(page);
	f2fs_put_page(page, 1);
	return 0;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
748 749 750 751 752 753
}

int truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
{
	int err;

754
	while (pg_start < pg_end) {
Jaegeuk Kim's avatar
Jaegeuk Kim committed
755
		struct dnode_of_data dn;
756
		pgoff_t end_offset, count;
757

Jaegeuk Kim's avatar
Jaegeuk Kim committed
758
		set_new_dnode(&dn, inode, NULL, NULL, 0);
759
		err = get_dnode_of_data(&dn, pg_start, LOOKUP_NODE);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
760
		if (err) {
761 762
			if (err == -ENOENT) {
				pg_start++;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
763
				continue;
764
			}
Jaegeuk Kim's avatar
Jaegeuk Kim committed
765 766 767
			return err;
		}

768 769 770 771 772 773
		end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
		count = min(end_offset - dn.ofs_in_node, pg_end - pg_start);

		f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset);

		truncate_data_blocks_range(&dn, count);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
774
		f2fs_put_dnode(&dn);
775 776

		pg_start += count;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
777 778 779 780
	}
	return 0;
}

781
static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
Jaegeuk Kim's avatar
Jaegeuk Kim committed
782 783 784
{
	pgoff_t pg_start, pg_end;
	loff_t off_start, off_end;
785
	int ret;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
786

787 788 789
	ret = f2fs_convert_inline_inode(inode);
	if (ret)
		return ret;
790

Jaegeuk Kim's avatar
Jaegeuk Kim committed
791 792 793 794 795 796 797
	pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT;
	pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT;

	off_start = offset & (PAGE_CACHE_SIZE - 1);
	off_end = (offset + len) & (PAGE_CACHE_SIZE - 1);

	if (pg_start == pg_end) {
Chao Yu's avatar
Chao Yu committed
798
		ret = fill_zero(inode, pg_start, off_start,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
799
						off_end - off_start);
Chao Yu's avatar
Chao Yu committed
800 801
		if (ret)
			return ret;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
802
	} else {
Chao Yu's avatar
Chao Yu committed
803 804 805 806 807 808 809 810 811 812 813
		if (off_start) {
			ret = fill_zero(inode, pg_start++, off_start,
						PAGE_CACHE_SIZE - off_start);
			if (ret)
				return ret;
		}
		if (off_end) {
			ret = fill_zero(inode, pg_end, 0, off_end);
			if (ret)
				return ret;
		}
Jaegeuk Kim's avatar
Jaegeuk Kim committed
814 815 816 817

		if (pg_start < pg_end) {
			struct address_space *mapping = inode->i_mapping;
			loff_t blk_start, blk_end;
818
			struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
819

Jaegeuk Kim's avatar
Jaegeuk Kim committed
820
			f2fs_balance_fs(sbi, true);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
821

822 823
			blk_start = (loff_t)pg_start << PAGE_CACHE_SHIFT;
			blk_end = (loff_t)pg_end << PAGE_CACHE_SHIFT;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
824 825
			truncate_inode_pages_range(mapping, blk_start,
					blk_end - 1);
826

827
			f2fs_lock_op(sbi);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
828
			ret = truncate_hole(inode, pg_start, pg_end);
829
			f2fs_unlock_op(sbi);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
830 831 832 833 834 835
		}
	}

	return ret;
}

836 837
static int __exchange_data_block(struct inode *inode, pgoff_t src,
					pgoff_t dst, bool full)
838 839 840
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct dnode_of_data dn;
841 842 843
	block_t new_addr;
	bool do_replace = false;
	int ret;
844

845 846 847 848 849 850 851 852 853 854 855 856 857 858
	set_new_dnode(&dn, inode, NULL, NULL, 0);
	ret = get_dnode_of_data(&dn, src, LOOKUP_NODE_RA);
	if (ret && ret != -ENOENT) {
		return ret;
	} else if (ret == -ENOENT) {
		new_addr = NULL_ADDR;
	} else {
		new_addr = dn.data_blkaddr;
		if (!is_checkpointed_data(sbi, new_addr)) {
			dn.data_blkaddr = NULL_ADDR;
			/* do not invalidate this block address */
			set_data_blkaddr(&dn);
			f2fs_update_extent_cache(&dn);
			do_replace = true;
859
		}
860 861
		f2fs_put_dnode(&dn);
	}
862

863 864
	if (new_addr == NULL_ADDR)
		return full ? truncate_hole(inode, dst, dst + 1) : 0;
865

866 867 868
	if (do_replace) {
		struct page *ipage = get_node_page(sbi, inode->i_ino);
		struct node_info ni;
869

870 871 872 873
		if (IS_ERR(ipage)) {
			ret = PTR_ERR(ipage);
			goto err_out;
		}
874

875 876 877 878
		set_new_dnode(&dn, inode, ipage, NULL, 0);
		ret = f2fs_reserve_block(&dn, dst);
		if (ret)
			goto err_out;
879

880
		truncate_data_blocks_range(&dn, 1);
881

882 883 884 885 886 887 888
		get_node_info(sbi, dn.nid, &ni);
		f2fs_replace_block(sbi, &dn, dn.data_blkaddr, new_addr,
				ni.version, true);
		f2fs_put_dnode(&dn);
	} else {
		struct page *psrc, *pdst;

889
		psrc = get_lock_data_page(inode, src, true);
890 891 892 893 894 895 896 897 898 899 900
		if (IS_ERR(psrc))
			return PTR_ERR(psrc);
		pdst = get_new_data_page(inode, NULL, dst, false);
		if (IS_ERR(pdst)) {
			f2fs_put_page(psrc, 1);
			return PTR_ERR(pdst);
		}
		f2fs_copy_page(psrc, pdst);
		set_page_dirty(pdst);
		f2fs_put_page(pdst, 1);
		f2fs_put_page(psrc, 1);
901

902 903 904
		return truncate_hole(inode, src, src + 1);
	}
	return 0;
905

906 907 908 909 910 911 912 913 914
err_out:
	if (!get_dnode_of_data(&dn, src, LOOKUP_NODE)) {
		dn.data_blkaddr = new_addr;
		set_data_blkaddr(&dn);
		f2fs_update_extent_cache(&dn);
		f2fs_put_dnode(&dn);
	}
	return ret;
}
915

916 917 918 919 920 921 922
static int f2fs_do_collapse(struct inode *inode, pgoff_t start, pgoff_t end)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	pgoff_t nrpages = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;
	int ret = 0;

	for (; end < nrpages; start++, end++) {
Jaegeuk Kim's avatar
Jaegeuk Kim committed
923
		f2fs_balance_fs(sbi, true);
924 925
		f2fs_lock_op(sbi);
		ret = __exchange_data_block(inode, end, start, true);
926
		f2fs_unlock_op(sbi);
927 928
		if (ret)
			break;
929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945
	}
	return ret;
}

static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
{
	pgoff_t pg_start, pg_end;
	loff_t new_size;
	int ret;

	if (offset + len >= i_size_read(inode))
		return -EINVAL;

	/* collapse range should be aligned to block size of f2fs. */
	if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
		return -EINVAL;

946 947 948
	ret = f2fs_convert_inline_inode(inode);
	if (ret)
		return ret;
949

950 951 952 953 954 955 956 957 958 959 960 961