file.c 39 KB
Newer Older
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1
/*
Jaegeuk Kim's avatar
Jaegeuk Kim committed
2 3 4 5 6 7 8 9 10 11 12 13 14 15
 * fs/f2fs/file.c
 *
 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
 *             http://www.samsung.com/
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include <linux/stat.h>
#include <linux/buffer_head.h>
#include <linux/writeback.h>
16
#include <linux/blkdev.h>
Jaegeuk Kim's avatar
Jaegeuk Kim committed
17 18
#include <linux/falloc.h>
#include <linux/types.h>
19
#include <linux/compat.h>
Jaegeuk Kim's avatar
Jaegeuk Kim committed
20 21
#include <linux/uaccess.h>
#include <linux/mount.h>
22
#include <linux/pagevec.h>
23
#include <linux/random.h>
Jaegeuk Kim's avatar
Jaegeuk Kim committed
24 25 26 27 28 29

#include "f2fs.h"
#include "node.h"
#include "segment.h"
#include "xattr.h"
#include "acl.h"
30
#include "gc.h"
Jaegeuk Kim's avatar
Jaegeuk Kim committed
31
#include "trace.h"
32
#include <trace/events/f2fs.h>
Jaegeuk Kim's avatar
Jaegeuk Kim committed
33 34 35 36 37

static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
						struct vm_fault *vmf)
{
	struct page *page = vmf->page;
38
	struct inode *inode = file_inode(vma->vm_file);
39
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
40
	struct dnode_of_data dn;
41
	int err;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
42 43 44 45

	f2fs_balance_fs(sbi);

	sb_start_pagefault(inode->i_sb);
46 47

	f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
48

Jaegeuk Kim's avatar
Jaegeuk Kim committed
49
	/* block allocation */
50
	f2fs_lock_op(sbi);
51
	set_new_dnode(&dn, inode, NULL, NULL, 0);
52
	err = f2fs_reserve_block(&dn, page->index);
53 54
	if (err) {
		f2fs_unlock_op(sbi);
55
		goto out;
56 57 58
	}
	f2fs_put_dnode(&dn);
	f2fs_unlock_op(sbi);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
59

60
	file_update_time(vma->vm_file);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
61
	lock_page(page);
62
	if (unlikely(page->mapping != inode->i_mapping ||
63
			page_offset(page) > i_size_read(inode) ||
64
			!PageUptodate(page))) {
Jaegeuk Kim's avatar
Jaegeuk Kim committed
65 66 67 68 69 70 71 72 73
		unlock_page(page);
		err = -EFAULT;
		goto out;
	}

	/*
	 * check to see if the page is mapped already (no holes)
	 */
	if (PageMappedToDisk(page))
74
		goto mapped;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
75 76

	/* page is wholly or partially inside EOF */
77 78
	if (((loff_t)(page->index + 1) << PAGE_CACHE_SHIFT) >
						i_size_read(inode)) {
Jaegeuk Kim's avatar
Jaegeuk Kim committed
79 80 81 82 83 84 85
		unsigned offset;
		offset = i_size_read(inode) & ~PAGE_CACHE_MASK;
		zero_user_segment(page, offset, PAGE_CACHE_SIZE);
	}
	set_page_dirty(page);
	SetPageUptodate(page);

86
	trace_f2fs_vm_page_mkwrite(page, DATA);
87 88
mapped:
	/* fill the page */
89
	f2fs_wait_on_page_writeback(page, DATA);
90 91
	/* if gced page is attached, don't write to cold segment */
	clear_cold_data(page);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
92 93 94 95 96 97
out:
	sb_end_pagefault(inode->i_sb);
	return block_page_mkwrite_return(err);
}

static const struct vm_operations_struct f2fs_file_vm_ops = {
98
	.fault		= filemap_fault,
99
	.map_pages	= filemap_map_pages,
100
	.page_mkwrite	= f2fs_vm_page_mkwrite,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
101 102
};

103 104 105 106 107 108 109 110 111 112
static int get_parent_ino(struct inode *inode, nid_t *pino)
{
	struct dentry *dentry;

	inode = igrab(inode);
	dentry = d_find_any_alias(inode);
	iput(inode);
	if (!dentry)
		return 0;

113
	if (update_dent_inode(inode, inode, &dentry->d_name)) {
114 115 116
		dput(dentry);
		return 0;
	}
117

118 119
	*pino = parent_ino(dentry);
	dput(dentry);
120 121 122
	return 1;
}

123 124
static inline bool need_do_checkpoint(struct inode *inode)
{
125
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
126 127 128 129
	bool need_cp = false;

	if (!S_ISREG(inode->i_mode) || inode->i_nlink != 1)
		need_cp = true;
130 131
	else if (file_enc_name(inode) && need_dentry_mark(sbi, inode->i_ino))
		need_cp = true;
132 133 134 135 136 137 138 139
	else if (file_wrong_pino(inode))
		need_cp = true;
	else if (!space_for_roll_forward(sbi))
		need_cp = true;
	else if (!is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
		need_cp = true;
	else if (F2FS_I(inode)->xattr_ver == cur_cp_version(F2FS_CKPT(sbi)))
		need_cp = true;
140 141
	else if (test_opt(sbi, FASTBOOT))
		need_cp = true;
142 143
	else if (sbi->active_logs == 2)
		need_cp = true;
144 145 146 147

	return need_cp;
}

148 149 150 151 152 153 154 155 156 157 158
static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino)
{
	struct page *i = find_get_page(NODE_MAPPING(sbi), ino);
	bool ret = false;
	/* But we need to avoid that there are some inode updates */
	if ((i && PageDirty(i)) || need_inode_block_update(sbi, ino))
		ret = true;
	f2fs_put_page(i, 0);
	return ret;
}

159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178
static void try_to_fix_pino(struct inode *inode)
{
	struct f2fs_inode_info *fi = F2FS_I(inode);
	nid_t pino;

	down_write(&fi->i_sem);
	fi->xattr_ver = 0;
	if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
			get_parent_ino(inode, &pino)) {
		fi->i_pino = pino;
		file_got_pino(inode);
		up_write(&fi->i_sem);

		mark_inode_dirty_sync(inode);
		f2fs_write_inode(inode, NULL);
	} else {
		up_write(&fi->i_sem);
	}
}

Jaegeuk Kim's avatar
Jaegeuk Kim committed
179 180 181
int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
{
	struct inode *inode = file->f_mapping->host;
182
	struct f2fs_inode_info *fi = F2FS_I(inode);
183
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
184
	nid_t ino = inode->i_ino;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
185 186 187
	int ret = 0;
	bool need_cp = false;
	struct writeback_control wbc = {
188
		.sync_mode = WB_SYNC_ALL,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
189 190 191 192
		.nr_to_write = LONG_MAX,
		.for_reclaim = 0,
	};

193
	if (unlikely(f2fs_readonly(inode->i_sb)))
194 195
		return 0;

196
	trace_f2fs_sync_file_enter(inode);
197 198

	/* if fdatasync is triggered, let's do in-place-update */
199
	if (get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
200
		set_inode_flag(fi, FI_NEED_IPU);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
201
	ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
202 203
	clear_inode_flag(fi, FI_NEED_IPU);

204 205
	if (ret) {
		trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
206
		return ret;
207
	}
Jaegeuk Kim's avatar
Jaegeuk Kim committed
208

209
	/* if the inode is dirty, let's recover all the time */
210 211
	if (!datasync) {
		f2fs_write_inode(inode, NULL);
212 213 214
		goto go_write;
	}

215 216 217 218
	/*
	 * if there is no written data, don't waste time to write recovery info.
	 */
	if (!is_inode_flag_set(fi, FI_APPEND_WRITE) &&
219
			!exist_written_data(sbi, ino, APPEND_INO)) {
220

221 222
		/* it may call write_inode just prior to fsync */
		if (need_inode_page_update(sbi, ino))
223 224
			goto go_write;

225
		if (is_inode_flag_set(fi, FI_UPDATE_WRITE) ||
226
				exist_written_data(sbi, ino, UPDATE_INO))
227 228 229
			goto flush_out;
		goto out;
	}
230
go_write:
231 232 233
	/* guarantee free sections for fsync */
	f2fs_balance_fs(sbi);

234 235 236 237
	/*
	 * Both of fdatasync() and fsync() are able to be recovered from
	 * sudden-power-off.
	 */
238 239
	down_read(&fi->i_sem);
	need_cp = need_do_checkpoint(inode);
240 241
	up_read(&fi->i_sem);

Jaegeuk Kim's avatar
Jaegeuk Kim committed
242 243 244
	if (need_cp) {
		/* all the dirty node pages should be flushed for POR */
		ret = f2fs_sync_fs(inode->i_sb, 1);
245

246 247 248 249 250
		/*
		 * We've secured consistency through sync_fs. Following pino
		 * will be used only for fsynced inodes after checkpoint.
		 */
		try_to_fix_pino(inode);
251 252
		clear_inode_flag(fi, FI_APPEND_WRITE);
		clear_inode_flag(fi, FI_UPDATE_WRITE);
253 254
		goto out;
	}
255
sync_nodes:
256 257
	sync_node_pages(sbi, ino, &wbc);

258 259 260 261
	/* if cp_error was enabled, we should avoid infinite loop */
	if (unlikely(f2fs_cp_error(sbi)))
		goto out;

262 263 264 265
	if (need_inode_block_update(sbi, ino)) {
		mark_inode_dirty_sync(inode);
		f2fs_write_inode(inode, NULL);
		goto sync_nodes;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
266
	}
267 268 269 270 271 272 273 274 275 276 277 278

	ret = wait_on_node_pages_writeback(sbi, ino);
	if (ret)
		goto out;

	/* once recovery info is written, don't need to tack this */
	remove_dirty_inode(sbi, ino, APPEND_INO);
	clear_inode_flag(fi, FI_APPEND_WRITE);
flush_out:
	remove_dirty_inode(sbi, ino, UPDATE_INO);
	clear_inode_flag(fi, FI_UPDATE_WRITE);
	ret = f2fs_issue_flush(sbi);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
279
out:
280
	trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret);
281
	f2fs_trace_ios(NULL, 1);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
282 283 284
	return ret;
}

285 286 287 288 289 290 291 292 293 294 295
static pgoff_t __get_first_dirty_index(struct address_space *mapping,
						pgoff_t pgofs, int whence)
{
	struct pagevec pvec;
	int nr_pages;

	if (whence != SEEK_DATA)
		return 0;

	/* find first dirty page index */
	pagevec_init(&pvec, 0);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
296 297 298
	nr_pages = pagevec_lookup_tag(&pvec, mapping, &pgofs,
					PAGECACHE_TAG_DIRTY, 1);
	pgofs = nr_pages ? pvec.pages[0]->index : LONG_MAX;
299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319
	pagevec_release(&pvec);
	return pgofs;
}

static bool __found_offset(block_t blkaddr, pgoff_t dirty, pgoff_t pgofs,
							int whence)
{
	switch (whence) {
	case SEEK_DATA:
		if ((blkaddr == NEW_ADDR && dirty == pgofs) ||
			(blkaddr != NEW_ADDR && blkaddr != NULL_ADDR))
			return true;
		break;
	case SEEK_HOLE:
		if (blkaddr == NULL_ADDR)
			return true;
		break;
	}
	return false;
}

320 321 322 323 324
static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
{
	struct inode *inode = file->f_mapping->host;
	loff_t maxbytes = inode->i_sb->s_maxbytes;
	struct dnode_of_data dn;
325 326 327
	pgoff_t pgofs, end_offset, dirty;
	loff_t data_ofs = offset;
	loff_t isize;
328 329 330 331 332 333 334 335 336
	int err = 0;

	mutex_lock(&inode->i_mutex);

	isize = i_size_read(inode);
	if (offset >= isize)
		goto fail;

	/* handle inline data case */
Chao Yu's avatar
Chao Yu committed
337
	if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) {
338 339 340 341 342 343 344
		if (whence == SEEK_HOLE)
			data_ofs = isize;
		goto found;
	}

	pgofs = (pgoff_t)(offset >> PAGE_CACHE_SHIFT);

345 346
	dirty = __get_first_dirty_index(inode->i_mapping, pgofs, whence);

347
	for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_CACHE_SHIFT) {
348 349 350 351 352
		set_new_dnode(&dn, inode, NULL, NULL, 0);
		err = get_dnode_of_data(&dn, pgofs, LOOKUP_NODE_RA);
		if (err && err != -ENOENT) {
			goto fail;
		} else if (err == -ENOENT) {
arter97's avatar
arter97 committed
353
			/* direct node does not exists */
354 355 356 357 358 359 360 361 362
			if (whence == SEEK_DATA) {
				pgofs = PGOFS_OF_NEXT_DNODE(pgofs,
							F2FS_I(inode));
				continue;
			} else {
				goto found;
			}
		}

363
		end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
364 365 366 367

		/* find data/hole in dnode block */
		for (; dn.ofs_in_node < end_offset;
				dn.ofs_in_node++, pgofs++,
368
				data_ofs = (loff_t)pgofs << PAGE_CACHE_SHIFT) {
369 370 371
			block_t blkaddr;
			blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);

372
			if (__found_offset(blkaddr, dirty, pgofs, whence)) {
373 374 375 376 377 378 379 380 381 382
				f2fs_put_dnode(&dn);
				goto found;
			}
		}
		f2fs_put_dnode(&dn);
	}

	if (whence == SEEK_DATA)
		goto fail;
found:
383 384
	if (whence == SEEK_HOLE && data_ofs > isize)
		data_ofs = isize;
385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404
	mutex_unlock(&inode->i_mutex);
	return vfs_setpos(file, data_ofs, maxbytes);
fail:
	mutex_unlock(&inode->i_mutex);
	return -ENXIO;
}

static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
{
	struct inode *inode = file->f_mapping->host;
	loff_t maxbytes = inode->i_sb->s_maxbytes;

	switch (whence) {
	case SEEK_SET:
	case SEEK_CUR:
	case SEEK_END:
		return generic_file_llseek_size(file, offset, whence,
						maxbytes, i_size_read(inode));
	case SEEK_DATA:
	case SEEK_HOLE:
405 406
		if (offset < 0)
			return -ENXIO;
407 408 409 410 411 412
		return f2fs_seek_block(file, offset, whence);
	}

	return -EINVAL;
}

Jaegeuk Kim's avatar
Jaegeuk Kim committed
413 414
static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
{
415 416
	struct inode *inode = file_inode(file);

417 418 419 420 421 422
	if (f2fs_encrypted_inode(inode)) {
		int err = f2fs_get_encryption_info(inode);
		if (err)
			return 0;
	}

423 424 425 426 427 428 429
	/* we don't need to use inline_data strictly */
	if (f2fs_has_inline_data(inode)) {
		int err = f2fs_convert_inline_inode(inode);
		if (err)
			return err;
	}

Jaegeuk Kim's avatar
Jaegeuk Kim committed
430 431 432 433 434
	file_accessed(file);
	vma->vm_ops = &f2fs_file_vm_ops;
	return 0;
}

435 436 437 438 439 440 441 442 443 444 445 446
static int f2fs_file_open(struct inode *inode, struct file *filp)
{
	int ret = generic_file_open(inode, filp);

	if (!ret && f2fs_encrypted_inode(inode)) {
		ret = f2fs_get_encryption_info(inode);
		if (ret)
			ret = -EACCES;
	}
	return ret;
}

447
int truncate_data_blocks_range(struct dnode_of_data *dn, int count)
Jaegeuk Kim's avatar
Jaegeuk Kim committed
448
{
449
	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
450
	struct f2fs_node *raw_node;
Chao Yu's avatar
Chao Yu committed
451
	int nr_free = 0, ofs = dn->ofs_in_node, len = count;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
452 453
	__le32 *addr;

454
	raw_node = F2FS_NODE(dn->node_page);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
455 456
	addr = blkaddr_in_node(raw_node) + ofs;

Chris Fries's avatar
Chris Fries committed
457
	for (; count > 0; count--, addr++, dn->ofs_in_node++) {
Jaegeuk Kim's avatar
Jaegeuk Kim committed
458 459 460 461
		block_t blkaddr = le32_to_cpu(*addr);
		if (blkaddr == NULL_ADDR)
			continue;

462
		dn->data_blkaddr = NULL_ADDR;
463
		set_data_blkaddr(dn);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
464
		invalidate_blocks(sbi, blkaddr);
465 466 467
		if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
			clear_inode_flag(F2FS_I(dn->inode),
						FI_FIRST_BLOCK_WRITTEN);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
468 469
		nr_free++;
	}
Chao Yu's avatar
Chao Yu committed
470

Jaegeuk Kim's avatar
Jaegeuk Kim committed
471
	if (nr_free) {
Chao Yu's avatar
Chao Yu committed
472 473 474 475 476 477 478 479
		pgoff_t fofs;
		/*
		 * once we invalidate valid blkaddr in range [ofs, ofs + count],
		 * we will invalidate all blkaddr in the whole range.
		 */
		fofs = start_bidx_of_node(ofs_of_node(dn->node_page),
						F2FS_I(dn->inode)) + ofs;
		f2fs_update_extent_cache_range(dn, fofs, 0, len);
480
		dec_valid_block_count(sbi, dn->inode, nr_free);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
481 482 483 484
		set_page_dirty(dn->node_page);
		sync_inode_page(dn);
	}
	dn->ofs_in_node = ofs;
485 486 487

	trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
					 dn->ofs_in_node, nr_free);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
488 489 490 491 492 493 494 495
	return nr_free;
}

void truncate_data_blocks(struct dnode_of_data *dn)
{
	truncate_data_blocks_range(dn, ADDRS_PER_BLOCK);
}

496
static int truncate_partial_data_page(struct inode *inode, u64 from,
497
								bool cache_only)
Jaegeuk Kim's avatar
Jaegeuk Kim committed
498 499
{
	unsigned offset = from & (PAGE_CACHE_SIZE - 1);
500 501
	pgoff_t index = from >> PAGE_CACHE_SHIFT;
	struct address_space *mapping = inode->i_mapping;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
502 503
	struct page *page;

504
	if (!offset && !cache_only)
505
		return 0;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
506

507 508 509 510 511
	if (cache_only) {
		page = grab_cache_page(mapping, index);
		if (page && PageUptodate(page))
			goto truncate_out;
		f2fs_put_page(page, 1);
512
		return 0;
513
	}
Jaegeuk Kim's avatar
Jaegeuk Kim committed
514

515 516 517 518
	page = get_lock_data_page(inode, index);
	if (IS_ERR(page))
		return 0;
truncate_out:
519
	f2fs_wait_on_page_writeback(page, DATA);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
520
	zero_user(page, offset, PAGE_CACHE_SIZE - offset);
521
	if (!cache_only || !f2fs_encrypted_inode(inode) || !S_ISREG(inode->i_mode))
522
		set_page_dirty(page);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
523
	f2fs_put_page(page, 1);
524
	return 0;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
525 526
}

527
int truncate_blocks(struct inode *inode, u64 from, bool lock)
Jaegeuk Kim's avatar
Jaegeuk Kim committed
528
{
529
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
530 531 532
	unsigned int blocksize = inode->i_sb->s_blocksize;
	struct dnode_of_data dn;
	pgoff_t free_from;
533
	int count = 0, err = 0;
534
	struct page *ipage;
535
	bool truncate_page = false;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
536

537 538
	trace_f2fs_truncate_blocks_enter(inode, from);

539
	free_from = (pgoff_t)F2FS_BYTES_TO_BLK(from + blocksize - 1);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
540

541 542
	if (lock)
		f2fs_lock_op(sbi);
543

544 545 546 547 548 549 550
	ipage = get_node_page(sbi, inode->i_ino);
	if (IS_ERR(ipage)) {
		err = PTR_ERR(ipage);
		goto out;
	}

	if (f2fs_has_inline_data(inode)) {
551 552
		if (truncate_inline_inode(ipage, from))
			set_page_dirty(ipage);
553
		f2fs_put_page(ipage, 1);
554
		truncate_page = true;
555 556 557 558
		goto out;
	}

	set_new_dnode(&dn, inode, ipage, NULL, 0);
559
	err = get_dnode_of_data(&dn, free_from, LOOKUP_NODE);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
560 561 562
	if (err) {
		if (err == -ENOENT)
			goto free_next;
563
		goto out;
564 565
	}

566
	count = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
Jaegeuk Kim's avatar
Jaegeuk Kim committed
567 568

	count -= dn.ofs_in_node;
569
	f2fs_bug_on(sbi, count < 0);
570

Jaegeuk Kim's avatar
Jaegeuk Kim committed
571 572 573 574 575 576 577 578
	if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
		truncate_data_blocks_range(&dn, count);
		free_from += count;
	}

	f2fs_put_dnode(&dn);
free_next:
	err = truncate_inode_blocks(inode, free_from);
579 580 581
out:
	if (lock)
		f2fs_unlock_op(sbi);
582 583 584

	/* lastly zero out the first data page */
	if (!err)
585
		err = truncate_partial_data_page(inode, from, truncate_page);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
586

587
	trace_f2fs_truncate_blocks_exit(inode, err);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
588 589 590
	return err;
}

591
int f2fs_truncate(struct inode *inode, bool lock)
Jaegeuk Kim's avatar
Jaegeuk Kim committed
592
{
593 594
	int err;

Jaegeuk Kim's avatar
Jaegeuk Kim committed
595 596
	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
				S_ISLNK(inode->i_mode)))
597
		return 0;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
598

599 600
	trace_f2fs_truncate(inode);

601
	/* we should check inline_data size */
602
	if (f2fs_has_inline_data(inode) && !f2fs_may_inline_data(inode)) {
603 604 605
		err = f2fs_convert_inline_inode(inode);
		if (err)
			return err;
606 607
	}

608 609 610 611 612 613 614
	err = truncate_blocks(inode, i_size_read(inode), lock);
	if (err)
		return err;

	inode->i_mtime = inode->i_ctime = CURRENT_TIME;
	mark_inode_dirty(inode);
	return 0;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
615 616
}

617
int f2fs_getattr(struct vfsmount *mnt,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
618 619
			 struct dentry *dentry, struct kstat *stat)
{
620
	struct inode *inode = d_inode(dentry);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658
	generic_fillattr(inode, stat);
	stat->blocks <<= 3;
	return 0;
}

#ifdef CONFIG_F2FS_FS_POSIX_ACL
static void __setattr_copy(struct inode *inode, const struct iattr *attr)
{
	struct f2fs_inode_info *fi = F2FS_I(inode);
	unsigned int ia_valid = attr->ia_valid;

	if (ia_valid & ATTR_UID)
		inode->i_uid = attr->ia_uid;
	if (ia_valid & ATTR_GID)
		inode->i_gid = attr->ia_gid;
	if (ia_valid & ATTR_ATIME)
		inode->i_atime = timespec_trunc(attr->ia_atime,
						inode->i_sb->s_time_gran);
	if (ia_valid & ATTR_MTIME)
		inode->i_mtime = timespec_trunc(attr->ia_mtime,
						inode->i_sb->s_time_gran);
	if (ia_valid & ATTR_CTIME)
		inode->i_ctime = timespec_trunc(attr->ia_ctime,
						inode->i_sb->s_time_gran);
	if (ia_valid & ATTR_MODE) {
		umode_t mode = attr->ia_mode;

		if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
			mode &= ~S_ISGID;
		set_acl_inode(fi, mode);
	}
}
#else
#define __setattr_copy setattr_copy
#endif

int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
{
659
	struct inode *inode = d_inode(dentry);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
660 661 662 663 664 665 666
	struct f2fs_inode_info *fi = F2FS_I(inode);
	int err;

	err = inode_change_ok(inode, attr);
	if (err)
		return err;

667
	if (attr->ia_valid & ATTR_SIZE) {
668 669 670 671
		if (f2fs_encrypted_inode(inode) &&
				f2fs_get_encryption_info(inode))
			return -EACCES;

672
		if (attr->ia_size <= i_size_read(inode)) {
673
			truncate_setsize(inode, attr->ia_size);
674 675 676
			err = f2fs_truncate(inode, true);
			if (err)
				return err;
677 678 679
			f2fs_balance_fs(F2FS_I_SB(inode));
		} else {
			/*
680 681
			 * do not trim all blocks after i_size if target size is
			 * larger than i_size.
682
			 */
683
			truncate_setsize(inode, attr->ia_size);
684
		}
Jaegeuk Kim's avatar
Jaegeuk Kim committed
685 686 687 688 689
	}

	__setattr_copy(inode, attr);

	if (attr->ia_valid & ATTR_MODE) {
690
		err = posix_acl_chmod(inode, get_inode_mode(inode));
Jaegeuk Kim's avatar
Jaegeuk Kim committed
691 692 693 694 695 696 697 698 699 700 701 702 703 704
		if (err || is_inode_flag_set(fi, FI_ACL_MODE)) {
			inode->i_mode = fi->i_acl_mode;
			clear_inode_flag(fi, FI_ACL_MODE);
		}
	}

	mark_inode_dirty(inode);
	return err;
}

const struct inode_operations f2fs_file_inode_operations = {
	.getattr	= f2fs_getattr,
	.setattr	= f2fs_setattr,
	.get_acl	= f2fs_get_acl,
705
	.set_acl	= f2fs_set_acl,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
706 707 708 709 710 711
#ifdef CONFIG_F2FS_FS_XATTR
	.setxattr	= generic_setxattr,
	.getxattr	= generic_getxattr,
	.listxattr	= f2fs_listxattr,
	.removexattr	= generic_removexattr,
#endif
Jaegeuk Kim's avatar
Jaegeuk Kim committed
712
	.fiemap		= f2fs_fiemap,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
713 714
};

Chao Yu's avatar
Chao Yu committed
715
static int fill_zero(struct inode *inode, pgoff_t index,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
716 717
					loff_t start, loff_t len)
{
718
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
719 720 721
	struct page *page;

	if (!len)
Chao Yu's avatar
Chao Yu committed
722
		return 0;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
723

724 725
	f2fs_balance_fs(sbi);

726
	f2fs_lock_op(sbi);
727
	page = get_new_data_page(inode, NULL, index, false);
728
	f2fs_unlock_op(sbi);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
729

Chao Yu's avatar
Chao Yu committed
730 731 732 733 734 735 736 737
	if (IS_ERR(page))
		return PTR_ERR(page);

	f2fs_wait_on_page_writeback(page, DATA);
	zero_user(page, start, len);
	set_page_dirty(page);
	f2fs_put_page(page, 1);
	return 0;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
738 739 740 741 742 743 744 745 746
}

int truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
{
	pgoff_t index;
	int err;

	for (index = pg_start; index < pg_end; index++) {
		struct dnode_of_data dn;
747

Jaegeuk Kim's avatar
Jaegeuk Kim committed
748
		set_new_dnode(&dn, inode, NULL, NULL, 0);
749
		err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
750 751 752 753 754 755 756 757 758 759 760 761 762
		if (err) {
			if (err == -ENOENT)
				continue;
			return err;
		}

		if (dn.data_blkaddr != NULL_ADDR)
			truncate_data_blocks_range(&dn, 1);
		f2fs_put_dnode(&dn);
	}
	return 0;
}

763
static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
Jaegeuk Kim's avatar
Jaegeuk Kim committed
764 765 766 767 768
{
	pgoff_t pg_start, pg_end;
	loff_t off_start, off_end;
	int ret = 0;

769 770 771 772 773
	if (f2fs_has_inline_data(inode)) {
		ret = f2fs_convert_inline_inode(inode);
		if (ret)
			return ret;
	}
774

Jaegeuk Kim's avatar
Jaegeuk Kim committed
775 776 777 778 779 780 781
	pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT;
	pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT;

	off_start = offset & (PAGE_CACHE_SIZE - 1);
	off_end = (offset + len) & (PAGE_CACHE_SIZE - 1);

	if (pg_start == pg_end) {
Chao Yu's avatar
Chao Yu committed
782
		ret = fill_zero(inode, pg_start, off_start,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
783
						off_end - off_start);
Chao Yu's avatar
Chao Yu committed
784 785
		if (ret)
			return ret;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
786
	} else {
Chao Yu's avatar
Chao Yu committed
787 788 789 790 791 792 793 794 795 796 797
		if (off_start) {
			ret = fill_zero(inode, pg_start++, off_start,
						PAGE_CACHE_SIZE - off_start);
			if (ret)
				return ret;
		}
		if (off_end) {
			ret = fill_zero(inode, pg_end, 0, off_end);
			if (ret)
				return ret;
		}
Jaegeuk Kim's avatar
Jaegeuk Kim committed
798 799 800 801

		if (pg_start < pg_end) {
			struct address_space *mapping = inode->i_mapping;
			loff_t blk_start, blk_end;
802
			struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
803 804

			f2fs_balance_fs(sbi);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
805

806 807
			blk_start = (loff_t)pg_start << PAGE_CACHE_SHIFT;
			blk_end = (loff_t)pg_end << PAGE_CACHE_SHIFT;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
808 809
			truncate_inode_pages_range(mapping, blk_start,
					blk_end - 1);
810

811
			f2fs_lock_op(sbi);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
812
			ret = truncate_hole(inode, pg_start, pg_end);
813
			f2fs_unlock_op(sbi);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
814 815 816 817 818 819
		}
	}

	return ret;
}

820 821 822 823 824 825 826 827 828 829
static int f2fs_do_collapse(struct inode *inode, pgoff_t start, pgoff_t end)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct dnode_of_data dn;
	pgoff_t nrpages = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;
	int ret = 0;

	for (; end < nrpages; start++, end++) {
		block_t new_addr, old_addr;

830 831
		f2fs_lock_op(sbi);

832 833 834 835 836 837 838 839 840 841 842 843 844 845 846
		set_new_dnode(&dn, inode, NULL, NULL, 0);
		ret = get_dnode_of_data(&dn, end, LOOKUP_NODE_RA);
		if (ret && ret != -ENOENT) {
			goto out;
		} else if (ret == -ENOENT) {
			new_addr = NULL_ADDR;
		} else {
			new_addr = dn.data_blkaddr;
			truncate_data_blocks_range(&dn, 1);
			f2fs_put_dnode(&dn);
		}

		if (new_addr == NULL_ADDR) {
			set_new_dnode(&dn, inode, NULL, NULL, 0);
			ret = get_dnode_of_data(&dn, start, LOOKUP_NODE_RA);
847
			if (ret && ret != -ENOENT) {
848
				goto out;
849 850
			} else if (ret == -ENOENT) {
				f2fs_unlock_op(sbi);
851
				continue;
852
			}
853 854 855

			if (dn.data_blkaddr == NULL_ADDR) {
				f2fs_put_dnode(&dn);
856
				f2fs_unlock_op(sbi);
857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888
				continue;
			} else {
				truncate_data_blocks_range(&dn, 1);
			}

			f2fs_put_dnode(&dn);
		} else {
			struct page *ipage;

			ipage = get_node_page(sbi, inode->i_ino);
			if (IS_ERR(ipage)) {
				ret = PTR_ERR(ipage);
				goto out;
			}

			set_new_dnode(&dn, inode, ipage, NULL, 0);
			ret = f2fs_reserve_block(&dn, start);
			if (ret)
				goto out;

			old_addr = dn.data_blkaddr;
			if (old_addr != NEW_ADDR && new_addr == NEW_ADDR) {
				dn.data_blkaddr = NULL_ADDR;
				f2fs_update_extent_cache(&dn);
				invalidate_blocks(sbi, old_addr);

				dn.data_blkaddr = new_addr;
				set_data_blkaddr(&dn);
			} else if (new_addr != NEW_ADDR) {
				struct node_info ni;

				get_node_info(sbi, dn.nid, &ni);
889 890
				f2fs_replace_block(sbi, &dn, old_addr, new_addr,
							ni.version, true);
891 892 893 894
			}

			f2fs_put_dnode(&dn);
		}
895
		f2fs_unlock_op(sbi);
896
	}
897
	return 0;
898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915
out:
	f2fs_unlock_op(sbi);
	return ret;
}

static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
{
	pgoff_t pg_start, pg_end;
	loff_t new_size;
	int ret;

	if (offset + len >= i_size_read(inode))
		return -EINVAL;

	/* collapse range should be aligned to block size of f2fs. */
	if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
		return -EINVAL;

916 917 918 919 920 921 922 923
	f2fs_balance_fs(F2FS_I_SB(inode));

	if (f2fs_has_inline_data(inode)) {
		ret = f2fs_convert_inline_inode(inode);
		if (ret)
			return ret;
	}

924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946
	pg_start = offset >> PAGE_CACHE_SHIFT;
	pg_end = (offset + len) >> PAGE_CACHE_SHIFT;

	/* write out all dirty pages from offset */
	ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
	if (ret)
		return ret;

	truncate_pagecache(inode, offset);

	ret = f2fs_do_collapse(inode, pg_start, pg_end);
	if (ret)
		return ret;

	new_size = i_size_read(inode) - len;

	ret = truncate_blocks(inode, new_size, true);
	if (!ret)
		i_size_write(inode, new_size);

	return ret;
}

Chao Yu's avatar
Chao Yu committed
947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981
static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
								int mode)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct address_space *mapping = inode->i_mapping;
	pgoff_t index, pg_start, pg_end;
	loff_t new_size = i_size_read(inode);
	loff_t off_start, off_end;
	int ret = 0;

	ret = inode_newsize_ok(inode, (len + offset));
	if (ret)
		return ret;

	f2fs_balance_fs(sbi);

	if (f2fs_has_inline_data(inode)) {
		ret = f2fs_convert_inline_inode(inode);
		if (ret)
			return ret;
	}

	ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1);
	if (ret)
		return ret;

	truncate_pagecache_range(inode, offset, offset + len - 1);

	pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT;
	pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT;

	off_start = offset & (PAGE_CACHE_SIZE - 1);
	off_end = (offset + len) & (PAGE_CACHE_SIZE - 1);

	if (pg_start == pg_end) {
Chao Yu's avatar
Chao Yu committed
982 983 984 985 986
		ret = fill_zero(inode, pg_start, off_start,
						off_end - off_start);
		if (ret)
			return ret;

Chao Yu's avatar
Chao Yu committed
987 988 989 990 991
		if (offset + len > new_size)
			new_size = offset + len;
		new_size = max_t(loff_t, new_size, offset + len);
	} else {
		if (off_start) {
Chao Yu's avatar
Chao Yu committed
992 993 994 995 996
			ret = fill_zero(inode, pg_start++, off_start,
						PAGE_CACHE_SIZE - off_start);
			if (ret)
				return ret;

Chao Yu's avatar
Chao Yu committed
997
			new_size = max_t(loff_t, new_size,
998
					(loff_t)pg_start << PAGE_CACHE_SHIFT);
Chao Yu's avatar
Chao Yu committed
999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033
		}

		for (index = pg_start; index < pg_end; index++) {
			struct dnode_of_data dn;
			struct page *ipage;

			f2fs_lock_op(sbi);

			ipage = get_node_page(sbi, inode->i_ino);
			if (IS_ERR(ipage)) {
				ret = PTR_ERR(ipage);
				f2fs_unlock_op(sbi);
				goto out;
			}

			set_new_dnode(&dn, inode, ipage, NULL, 0);
			ret = f2fs_reserve_block(&dn, index);
			if (ret) {
				f2fs_unlock_op(sbi);
				goto out;
			}

			if (dn.data_blkaddr != NEW_ADDR) {
				invalidate_blocks(sbi, dn.data_blkaddr);

				dn.data_blkaddr = NEW_ADDR;
				set_data_blkaddr(&dn);

				dn.data_blkaddr = NULL_ADDR;
				f2fs_update_extent_cache(&dn);
			}
			f2fs_put_dnode(&dn);
			f2fs_unlock_op(sbi);

			new_size = max_t(loff_t, new_size,
1034
				(loff_t)(index + 1) << PAGE_CACHE_SHIFT);
Chao Yu's avatar
Chao Yu committed
1035 1036 1037
		}

		if (off_end) {
Chao Yu's avatar
Chao Yu committed
1038 1039 1040 1041
			ret = fill_zero(inode, pg_end, 0, off_end);
			if (ret)
				goto out;

Chao Yu's avatar
Chao Yu committed
1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055
			new_size = max_t(loff_t, new_size, offset + len);
		}
	}

out:
	if (!(mode & FALLOC_FL_KEEP_SIZE) && i_size_read(inode) < new_size) {
		i_size_write(inode, new_size);
		mark_inode_dirty(inode);
		update_inode_page(inode);
	}

	return ret;
}

Chao Yu's avatar
Chao Yu committed
1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075
static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	pgoff_t pg_start, pg_end, delta, nrpages, idx;
	loff_t new_size;
	int ret;

	new_size = i_size_read(inode) + len;
	if (new_size > inode->i_sb->s_maxbytes)
		return -EFBIG;

	if (offset >= i_size_read(inode))
		return -EINVAL;

	/* insert range should be aligned to block size of f2fs. */
	if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
		return -EINVAL;

	f2fs_balance_fs(sbi);

1076 1077 1078 1079 1080 1081
	if (f2fs_has_inline_data(inode)) {
		ret = f2fs_convert_inline_inode(inode);
		if (ret)
			return ret;
	}

Chao Yu's avatar
Chao Yu committed
1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 <