file.c 52.6 KB
Newer Older
1
/*
Jaegeuk Kim's avatar
Jaegeuk Kim committed
2 3 4 5 6 7 8 9 10 11 12 13 14 15
 * fs/f2fs/file.c
 *
 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
 *             http://www.samsung.com/
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include <linux/stat.h>
#include <linux/buffer_head.h>
#include <linux/writeback.h>
16
#include <linux/blkdev.h>
Jaegeuk Kim's avatar
Jaegeuk Kim committed
17 18
#include <linux/falloc.h>
#include <linux/types.h>
19
#include <linux/compat.h>
Jaegeuk Kim's avatar
Jaegeuk Kim committed
20 21
#include <linux/uaccess.h>
#include <linux/mount.h>
22
#include <linux/pagevec.h>
23
#include <linux/uuid.h>
24
#include <linux/file.h>
Jaegeuk Kim's avatar
Jaegeuk Kim committed
25 26 27 28 29 30

#include "f2fs.h"
#include "node.h"
#include "segment.h"
#include "xattr.h"
#include "acl.h"
31
#include "gc.h"
Jaegeuk Kim's avatar
Jaegeuk Kim committed
32
#include "trace.h"
33
#include <trace/events/f2fs.h>
Jaegeuk Kim's avatar
Jaegeuk Kim committed
34 35 36 37 38

static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
						struct vm_fault *vmf)
{
	struct page *page = vmf->page;
39
	struct inode *inode = file_inode(vma->vm_file);
40
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
41
	struct dnode_of_data dn;
42
	int err;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
43 44

	sb_start_pagefault(inode->i_sb);
45 46

	f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
47

Jaegeuk Kim's avatar
Jaegeuk Kim committed
48
	/* block allocation */
49
	f2fs_lock_op(sbi);
50
	set_new_dnode(&dn, inode, NULL, NULL, 0);
51
	err = f2fs_reserve_block(&dn, page->index);
52 53
	if (err) {
		f2fs_unlock_op(sbi);
54
		goto out;
55 56 57
	}
	f2fs_put_dnode(&dn);
	f2fs_unlock_op(sbi);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
58

Jaegeuk Kim's avatar
Jaegeuk Kim committed
59
	f2fs_balance_fs(sbi, dn.node_changed);
60

61
	file_update_time(vma->vm_file);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
62
	lock_page(page);
63
	if (unlikely(page->mapping != inode->i_mapping ||
64
			page_offset(page) > i_size_read(inode) ||
65
			!PageUptodate(page))) {
Jaegeuk Kim's avatar
Jaegeuk Kim committed
66 67 68 69 70 71 72 73 74
		unlock_page(page);
		err = -EFAULT;
		goto out;
	}

	/*
	 * check to see if the page is mapped already (no holes)
	 */
	if (PageMappedToDisk(page))
75
		goto mapped;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
76 77

	/* page is wholly or partially inside EOF */
78
	if (((loff_t)(page->index + 1) << PAGE_SHIFT) >
79
						i_size_read(inode)) {
Jaegeuk Kim's avatar
Jaegeuk Kim committed
80
		unsigned offset;
81 82
		offset = i_size_read(inode) & ~PAGE_MASK;
		zero_user_segment(page, offset, PAGE_SIZE);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
83 84
	}
	set_page_dirty(page);
85 86
	if (!PageUptodate(page))
		SetPageUptodate(page);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
87

88
	trace_f2fs_vm_page_mkwrite(page, DATA);
89 90
mapped:
	/* fill the page */
91
	f2fs_wait_on_page_writeback(page, DATA, false);
92 93 94 95 96

	/* wait for GCed encrypted page writeback */
	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
		f2fs_wait_on_encrypted_page_writeback(sbi, dn.data_blkaddr);

97 98
	/* if gced page is attached, don't write to cold segment */
	clear_cold_data(page);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
99 100
out:
	sb_end_pagefault(inode->i_sb);
101
	f2fs_update_time(sbi, REQ_TIME);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
102 103 104 105
	return block_page_mkwrite_return(err);
}

static const struct vm_operations_struct f2fs_file_vm_ops = {
106
	.fault		= filemap_fault,
107
	.map_pages	= filemap_map_pages,
108
	.page_mkwrite	= f2fs_vm_page_mkwrite,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
109 110
};

111 112 113 114 115 116 117 118 119 120
static int get_parent_ino(struct inode *inode, nid_t *pino)
{
	struct dentry *dentry;

	inode = igrab(inode);
	dentry = d_find_any_alias(inode);
	iput(inode);
	if (!dentry)
		return 0;

121
	if (update_dent_inode(inode, inode, &dentry->d_name)) {
122 123 124
		dput(dentry);
		return 0;
	}
125

126 127
	*pino = parent_ino(dentry);
	dput(dentry);
128 129 130
	return 1;
}

131 132
static inline bool need_do_checkpoint(struct inode *inode)
{
133
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
134 135 136 137
	bool need_cp = false;

	if (!S_ISREG(inode->i_mode) || inode->i_nlink != 1)
		need_cp = true;
138 139
	else if (file_enc_name(inode) && need_dentry_mark(sbi, inode->i_ino))
		need_cp = true;
140 141 142 143 144 145 146 147
	else if (file_wrong_pino(inode))
		need_cp = true;
	else if (!space_for_roll_forward(sbi))
		need_cp = true;
	else if (!is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
		need_cp = true;
	else if (F2FS_I(inode)->xattr_ver == cur_cp_version(F2FS_CKPT(sbi)))
		need_cp = true;
148 149
	else if (test_opt(sbi, FASTBOOT))
		need_cp = true;
150 151
	else if (sbi->active_logs == 2)
		need_cp = true;
152 153 154 155

	return need_cp;
}

156 157 158 159 160 161 162 163 164 165 166
static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino)
{
	struct page *i = find_get_page(NODE_MAPPING(sbi), ino);
	bool ret = false;
	/* But we need to avoid that there are some inode updates */
	if ((i && PageDirty(i)) || need_inode_block_update(sbi, ino))
		ret = true;
	f2fs_put_page(i, 0);
	return ret;
}

167 168 169 170 171 172 173 174 175
static void try_to_fix_pino(struct inode *inode)
{
	struct f2fs_inode_info *fi = F2FS_I(inode);
	nid_t pino;

	down_write(&fi->i_sem);
	fi->xattr_ver = 0;
	if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
			get_parent_ino(inode, &pino)) {
176
		f2fs_i_pino_write(inode, pino);
177 178
		file_got_pino(inode);
	}
179
	up_write(&fi->i_sem);
180 181
}

182 183
static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
						int datasync, bool atomic)
Jaegeuk Kim's avatar
Jaegeuk Kim committed
184 185
{
	struct inode *inode = file->f_mapping->host;
186
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
187
	nid_t ino = inode->i_ino;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
188 189 190
	int ret = 0;
	bool need_cp = false;
	struct writeback_control wbc = {
191
		.sync_mode = WB_SYNC_ALL,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
192 193 194 195
		.nr_to_write = LONG_MAX,
		.for_reclaim = 0,
	};

196
	if (unlikely(f2fs_readonly(inode->i_sb)))
197 198
		return 0;

199
	trace_f2fs_sync_file_enter(inode);
200 201

	/* if fdatasync is triggered, let's do in-place-update */
Jaegeuk Kim's avatar
Jaegeuk Kim committed
202
	if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
203
		set_inode_flag(inode, FI_NEED_IPU);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
204
	ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
205
	clear_inode_flag(inode, FI_NEED_IPU);
206

207 208
	if (ret) {
		trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
209
		return ret;
210
	}
Jaegeuk Kim's avatar
Jaegeuk Kim committed
211

212
	/* if the inode is dirty, let's recover all the time */
213
	if (!datasync && !f2fs_skip_inode_update(inode)) {
214
		f2fs_write_inode(inode, NULL);
215 216 217
		goto go_write;
	}

218 219 220
	/*
	 * if there is no written data, don't waste time to write recovery info.
	 */
221
	if (!is_inode_flag_set(inode, FI_APPEND_WRITE) &&
222
			!exist_written_data(sbi, ino, APPEND_INO)) {
223

224 225
		/* it may call write_inode just prior to fsync */
		if (need_inode_page_update(sbi, ino))
226 227
			goto go_write;

228
		if (is_inode_flag_set(inode, FI_UPDATE_WRITE) ||
229
				exist_written_data(sbi, ino, UPDATE_INO))
230 231 232
			goto flush_out;
		goto out;
	}
233
go_write:
234 235 236 237
	/*
	 * Both of fdatasync() and fsync() are able to be recovered from
	 * sudden-power-off.
	 */
238
	down_read(&F2FS_I(inode)->i_sem);
239
	need_cp = need_do_checkpoint(inode);
240
	up_read(&F2FS_I(inode)->i_sem);
241

Jaegeuk Kim's avatar
Jaegeuk Kim committed
242 243 244
	if (need_cp) {
		/* all the dirty node pages should be flushed for POR */
		ret = f2fs_sync_fs(inode->i_sb, 1);
245

246 247 248 249 250
		/*
		 * We've secured consistency through sync_fs. Following pino
		 * will be used only for fsynced inodes after checkpoint.
		 */
		try_to_fix_pino(inode);
251 252
		clear_inode_flag(inode, FI_APPEND_WRITE);
		clear_inode_flag(inode, FI_UPDATE_WRITE);
253 254
		goto out;
	}
255
sync_nodes:
256
	ret = fsync_node_pages(sbi, inode, &wbc, atomic);
257 258
	if (ret)
		goto out;
259

260
	/* if cp_error was enabled, we should avoid infinite loop */
261 262
	if (unlikely(f2fs_cp_error(sbi))) {
		ret = -EIO;
263
		goto out;
264
	}
265

266
	if (need_inode_block_update(sbi, ino)) {
Jaegeuk Kim's avatar
Jaegeuk Kim committed
267
		f2fs_mark_inode_dirty_sync(inode);
268 269
		f2fs_write_inode(inode, NULL);
		goto sync_nodes;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
270
	}
271 272 273 274 275 276

	ret = wait_on_node_pages_writeback(sbi, ino);
	if (ret)
		goto out;

	/* once recovery info is written, don't need to tack this */
277
	remove_ino_entry(sbi, ino, APPEND_INO);
278
	clear_inode_flag(inode, FI_APPEND_WRITE);
279
flush_out:
280
	remove_ino_entry(sbi, ino, UPDATE_INO);
281
	clear_inode_flag(inode, FI_UPDATE_WRITE);
282
	ret = f2fs_issue_flush(sbi);
283
	f2fs_update_time(sbi, REQ_TIME);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
284
out:
285
	trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret);
286
	f2fs_trace_ios(NULL, 1);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
287 288 289
	return ret;
}

290 291 292 293 294
int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
{
	return f2fs_do_sync_file(file, start, end, datasync, false);
}

295 296 297 298 299 300 301 302 303 304 305
static pgoff_t __get_first_dirty_index(struct address_space *mapping,
						pgoff_t pgofs, int whence)
{
	struct pagevec pvec;
	int nr_pages;

	if (whence != SEEK_DATA)
		return 0;

	/* find first dirty page index */
	pagevec_init(&pvec, 0);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
306 307
	nr_pages = pagevec_lookup_tag(&pvec, mapping, &pgofs,
					PAGECACHE_TAG_DIRTY, 1);
308
	pgofs = nr_pages ? pvec.pages[0]->index : ULONG_MAX;
309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329
	pagevec_release(&pvec);
	return pgofs;
}

static bool __found_offset(block_t blkaddr, pgoff_t dirty, pgoff_t pgofs,
							int whence)
{
	switch (whence) {
	case SEEK_DATA:
		if ((blkaddr == NEW_ADDR && dirty == pgofs) ||
			(blkaddr != NEW_ADDR && blkaddr != NULL_ADDR))
			return true;
		break;
	case SEEK_HOLE:
		if (blkaddr == NULL_ADDR)
			return true;
		break;
	}
	return false;
}

330 331 332 333 334
static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
{
	struct inode *inode = file->f_mapping->host;
	loff_t maxbytes = inode->i_sb->s_maxbytes;
	struct dnode_of_data dn;
335 336 337
	pgoff_t pgofs, end_offset, dirty;
	loff_t data_ofs = offset;
	loff_t isize;
338 339
	int err = 0;

Al Viro's avatar
Al Viro committed
340
	inode_lock(inode);
341 342 343 344 345 346

	isize = i_size_read(inode);
	if (offset >= isize)
		goto fail;

	/* handle inline data case */
Chao Yu's avatar
Chao Yu committed
347
	if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) {
348 349 350 351 352
		if (whence == SEEK_HOLE)
			data_ofs = isize;
		goto found;
	}

353
	pgofs = (pgoff_t)(offset >> PAGE_SHIFT);
354

355 356
	dirty = __get_first_dirty_index(inode->i_mapping, pgofs, whence);

357
	for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
358
		set_new_dnode(&dn, inode, NULL, NULL, 0);
359
		err = get_dnode_of_data(&dn, pgofs, LOOKUP_NODE);
360 361 362
		if (err && err != -ENOENT) {
			goto fail;
		} else if (err == -ENOENT) {
arter97's avatar
arter97 committed
363
			/* direct node does not exists */
364
			if (whence == SEEK_DATA) {
365
				pgofs = get_next_page_offset(&dn, pgofs);
366 367 368 369 370 371
				continue;
			} else {
				goto found;
			}
		}

372
		end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
373 374 375 376

		/* find data/hole in dnode block */
		for (; dn.ofs_in_node < end_offset;
				dn.ofs_in_node++, pgofs++,
377
				data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
378 379 380
			block_t blkaddr;
			blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);

381
			if (__found_offset(blkaddr, dirty, pgofs, whence)) {
382 383 384 385 386 387 388 389 390 391
				f2fs_put_dnode(&dn);
				goto found;
			}
		}
		f2fs_put_dnode(&dn);
	}

	if (whence == SEEK_DATA)
		goto fail;
found:
392 393
	if (whence == SEEK_HOLE && data_ofs > isize)
		data_ofs = isize;
Al Viro's avatar
Al Viro committed
394
	inode_unlock(inode);
395 396
	return vfs_setpos(file, data_ofs, maxbytes);
fail:
Al Viro's avatar
Al Viro committed
397
	inode_unlock(inode);
398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413
	return -ENXIO;
}

static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
{
	struct inode *inode = file->f_mapping->host;
	loff_t maxbytes = inode->i_sb->s_maxbytes;

	switch (whence) {
	case SEEK_SET:
	case SEEK_CUR:
	case SEEK_END:
		return generic_file_llseek_size(file, offset, whence,
						maxbytes, i_size_read(inode));
	case SEEK_DATA:
	case SEEK_HOLE:
414 415
		if (offset < 0)
			return -ENXIO;
416 417 418 419 420 421
		return f2fs_seek_block(file, offset, whence);
	}

	return -EINVAL;
}

Jaegeuk Kim's avatar
Jaegeuk Kim committed
422 423
static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
{
424
	struct inode *inode = file_inode(file);
425
	int err;
426

427
	if (f2fs_encrypted_inode(inode)) {
428
		err = fscrypt_get_encryption_info(inode);
429 430
		if (err)
			return 0;
431 432
		if (!f2fs_encrypted_inode(inode))
			return -ENOKEY;
433 434
	}

435
	/* we don't need to use inline_data strictly */
436 437 438
	err = f2fs_convert_inline_inode(inode);
	if (err)
		return err;
439

Jaegeuk Kim's avatar
Jaegeuk Kim committed
440 441 442 443 444
	file_accessed(file);
	vma->vm_ops = &f2fs_file_vm_ops;
	return 0;
}

445 446 447
static int f2fs_file_open(struct inode *inode, struct file *filp)
{
	int ret = generic_file_open(inode, filp);
448
	struct dentry *dir;
449 450

	if (!ret && f2fs_encrypted_inode(inode)) {
451
		ret = fscrypt_get_encryption_info(inode);
452
		if (ret)
453
			return -EACCES;
454
		if (!fscrypt_has_encryption_key(inode))
455
			return -ENOKEY;
456
	}
457 458 459 460
	dir = dget_parent(file_dentry(filp));
	if (f2fs_encrypted_inode(d_inode(dir)) &&
			!fscrypt_has_permitted_context(d_inode(dir), inode)) {
		dput(dir);
461
		return -EPERM;
462 463
	}
	dput(dir);
464 465 466
	return ret;
}

467
int truncate_data_blocks_range(struct dnode_of_data *dn, int count)
Jaegeuk Kim's avatar
Jaegeuk Kim committed
468
{
469
	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
470
	struct f2fs_node *raw_node;
471
	int nr_free = 0, ofs = dn->ofs_in_node, len = count;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
472 473
	__le32 *addr;

474
	raw_node = F2FS_NODE(dn->node_page);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
475 476
	addr = blkaddr_in_node(raw_node) + ofs;

477
	for (; count > 0; count--, addr++, dn->ofs_in_node++) {
Jaegeuk Kim's avatar
Jaegeuk Kim committed
478 479 480 481
		block_t blkaddr = le32_to_cpu(*addr);
		if (blkaddr == NULL_ADDR)
			continue;

482
		dn->data_blkaddr = NULL_ADDR;
483
		set_data_blkaddr(dn);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
484
		invalidate_blocks(sbi, blkaddr);
485
		if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
486
			clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
487 488
		nr_free++;
	}
489

Jaegeuk Kim's avatar
Jaegeuk Kim committed
490
	if (nr_free) {
491 492 493 494 495 496
		pgoff_t fofs;
		/*
		 * once we invalidate valid blkaddr in range [ofs, ofs + count],
		 * we will invalidate all blkaddr in the whole range.
		 */
		fofs = start_bidx_of_node(ofs_of_node(dn->node_page),
497
							dn->inode) + ofs;
498
		f2fs_update_extent_cache_range(dn, fofs, 0, len);
499
		dec_valid_block_count(sbi, dn->inode, nr_free);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
500 501
	}
	dn->ofs_in_node = ofs;
502

503
	f2fs_update_time(sbi, REQ_TIME);
504 505
	trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
					 dn->ofs_in_node, nr_free);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
506 507 508 509 510 511 512 513
	return nr_free;
}

void truncate_data_blocks(struct dnode_of_data *dn)
{
	truncate_data_blocks_range(dn, ADDRS_PER_BLOCK);
}

514
static int truncate_partial_data_page(struct inode *inode, u64 from,
515
								bool cache_only)
Jaegeuk Kim's avatar
Jaegeuk Kim committed
516
{
517 518
	unsigned offset = from & (PAGE_SIZE - 1);
	pgoff_t index = from >> PAGE_SHIFT;
519
	struct address_space *mapping = inode->i_mapping;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
520 521
	struct page *page;

522
	if (!offset && !cache_only)
523
		return 0;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
524

525
	if (cache_only) {
526
		page = f2fs_grab_cache_page(mapping, index, false);
527 528 529
		if (page && PageUptodate(page))
			goto truncate_out;
		f2fs_put_page(page, 1);
530
		return 0;
531
	}
Jaegeuk Kim's avatar
Jaegeuk Kim committed
532

533
	page = get_lock_data_page(inode, index, true);
534 535 536
	if (IS_ERR(page))
		return 0;
truncate_out:
537
	f2fs_wait_on_page_writeback(page, DATA, true);
538
	zero_user(page, offset, PAGE_SIZE - offset);
539 540
	if (!cache_only || !f2fs_encrypted_inode(inode) ||
					!S_ISREG(inode->i_mode))
541
		set_page_dirty(page);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
542
	f2fs_put_page(page, 1);
543
	return 0;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
544 545
}

546
int truncate_blocks(struct inode *inode, u64 from, bool lock)
Jaegeuk Kim's avatar
Jaegeuk Kim committed
547
{
548
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
549 550 551
	unsigned int blocksize = inode->i_sb->s_blocksize;
	struct dnode_of_data dn;
	pgoff_t free_from;
552
	int count = 0, err = 0;
553
	struct page *ipage;
554
	bool truncate_page = false;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
555

556 557
	trace_f2fs_truncate_blocks_enter(inode, from);

558
	free_from = (pgoff_t)F2FS_BYTES_TO_BLK(from + blocksize - 1);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
559

560 561 562
	if (free_from >= sbi->max_file_blocks)
		goto free_partial;

563 564
	if (lock)
		f2fs_lock_op(sbi);
565

566 567 568 569 570 571 572
	ipage = get_node_page(sbi, inode->i_ino);
	if (IS_ERR(ipage)) {
		err = PTR_ERR(ipage);
		goto out;
	}

	if (f2fs_has_inline_data(inode)) {
573 574
		if (truncate_inline_inode(ipage, from))
			set_page_dirty(ipage);
575
		f2fs_put_page(ipage, 1);
576
		truncate_page = true;
577 578 579 580
		goto out;
	}

	set_new_dnode(&dn, inode, ipage, NULL, 0);
581
	err = get_dnode_of_data(&dn, free_from, LOOKUP_NODE_RA);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
582 583 584
	if (err) {
		if (err == -ENOENT)
			goto free_next;
585
		goto out;
586 587
	}

588
	count = ADDRS_PER_PAGE(dn.node_page, inode);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
589 590

	count -= dn.ofs_in_node;
591
	f2fs_bug_on(sbi, count < 0);
592

Jaegeuk Kim's avatar
Jaegeuk Kim committed
593 594 595 596 597 598 599 600
	if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
		truncate_data_blocks_range(&dn, count);
		free_from += count;
	}

	f2fs_put_dnode(&dn);
free_next:
	err = truncate_inode_blocks(inode, free_from);
601 602 603
out:
	if (lock)
		f2fs_unlock_op(sbi);
604
free_partial:
605 606
	/* lastly zero out the first data page */
	if (!err)
607
		err = truncate_partial_data_page(inode, from, truncate_page);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
608

609
	trace_f2fs_truncate_blocks_exit(inode, err);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
610 611 612
	return err;
}

613
int f2fs_truncate(struct inode *inode)
Jaegeuk Kim's avatar
Jaegeuk Kim committed
614
{
615 616
	int err;

Jaegeuk Kim's avatar
Jaegeuk Kim committed
617 618
	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
				S_ISLNK(inode->i_mode)))
619
		return 0;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
620

621 622
	trace_f2fs_truncate(inode);

623
	/* we should check inline_data size */
624
	if (!f2fs_may_inline_data(inode)) {
625 626 627
		err = f2fs_convert_inline_inode(inode);
		if (err)
			return err;
628 629
	}

630
	err = truncate_blocks(inode, i_size_read(inode), true);
631 632 633 634
	if (err)
		return err;

	inode->i_mtime = inode->i_ctime = CURRENT_TIME;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
635
	f2fs_mark_inode_dirty_sync(inode);
636
	return 0;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
637 638
}

639
int f2fs_getattr(struct vfsmount *mnt,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
640 641
			 struct dentry *dentry, struct kstat *stat)
{
642
	struct inode *inode = d_inode(dentry);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670
	generic_fillattr(inode, stat);
	stat->blocks <<= 3;
	return 0;
}

#ifdef CONFIG_F2FS_FS_POSIX_ACL
static void __setattr_copy(struct inode *inode, const struct iattr *attr)
{
	unsigned int ia_valid = attr->ia_valid;

	if (ia_valid & ATTR_UID)
		inode->i_uid = attr->ia_uid;
	if (ia_valid & ATTR_GID)
		inode->i_gid = attr->ia_gid;
	if (ia_valid & ATTR_ATIME)
		inode->i_atime = timespec_trunc(attr->ia_atime,
						inode->i_sb->s_time_gran);
	if (ia_valid & ATTR_MTIME)
		inode->i_mtime = timespec_trunc(attr->ia_mtime,
						inode->i_sb->s_time_gran);
	if (ia_valid & ATTR_CTIME)
		inode->i_ctime = timespec_trunc(attr->ia_ctime,
						inode->i_sb->s_time_gran);
	if (ia_valid & ATTR_MODE) {
		umode_t mode = attr->ia_mode;

		if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
			mode &= ~S_ISGID;
671
		set_acl_inode(inode, mode);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
672 673 674 675 676 677 678 679
	}
}
#else
#define __setattr_copy setattr_copy
#endif

int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
{
680
	struct inode *inode = d_inode(dentry);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
681 682
	int err;

683
	err = setattr_prepare(dentry, attr);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
684 685 686
	if (err)
		return err;

687
	if (attr->ia_valid & ATTR_SIZE) {
688
		if (f2fs_encrypted_inode(inode) &&
689
				fscrypt_get_encryption_info(inode))
690 691
			return -EACCES;

692
		if (attr->ia_size <= i_size_read(inode)) {
693
			truncate_setsize(inode, attr->ia_size);
694
			err = f2fs_truncate(inode);
695 696
			if (err)
				return err;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
697
			f2fs_balance_fs(F2FS_I_SB(inode), true);
698 699
		} else {
			/*
700 701
			 * do not trim all blocks after i_size if target size is
			 * larger than i_size.
702
			 */
703
			truncate_setsize(inode, attr->ia_size);
704 705

			/* should convert inline inode here */
706
			if (!f2fs_may_inline_data(inode)) {
707 708 709 710
				err = f2fs_convert_inline_inode(inode);
				if (err)
					return err;
			}
711
			inode->i_mtime = inode->i_ctime = CURRENT_TIME;
712
		}
Jaegeuk Kim's avatar
Jaegeuk Kim committed
713 714 715 716 717
	}

	__setattr_copy(inode, attr);

	if (attr->ia_valid & ATTR_MODE) {
718
		err = posix_acl_chmod(inode, get_inode_mode(inode));
719 720 721
		if (err || is_inode_flag_set(inode, FI_ACL_MODE)) {
			inode->i_mode = F2FS_I(inode)->i_acl_mode;
			clear_inode_flag(inode, FI_ACL_MODE);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
722 723 724
		}
	}

Jaegeuk Kim's avatar
Jaegeuk Kim committed
725
	f2fs_mark_inode_dirty_sync(inode);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
726 727 728 729 730 731 732
	return err;
}

const struct inode_operations f2fs_file_inode_operations = {
	.getattr	= f2fs_getattr,
	.setattr	= f2fs_setattr,
	.get_acl	= f2fs_get_acl,
733
	.set_acl	= f2fs_set_acl,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
734 735 736 737 738 739
#ifdef CONFIG_F2FS_FS_XATTR
	.setxattr	= generic_setxattr,
	.getxattr	= generic_getxattr,
	.listxattr	= f2fs_listxattr,
	.removexattr	= generic_removexattr,
#endif
Jaegeuk Kim's avatar
Jaegeuk Kim committed
740
	.fiemap		= f2fs_fiemap,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
741 742
};

Chao Yu's avatar
Chao Yu committed
743
static int fill_zero(struct inode *inode, pgoff_t index,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
744 745
					loff_t start, loff_t len)
{
746
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
747 748 749
	struct page *page;

	if (!len)
Chao Yu's avatar
Chao Yu committed
750
		return 0;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
751

Jaegeuk Kim's avatar
Jaegeuk Kim committed
752
	f2fs_balance_fs(sbi, true);
753

754
	f2fs_lock_op(sbi);
755
	page = get_new_data_page(inode, NULL, index, false);
756
	f2fs_unlock_op(sbi);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
757

Chao Yu's avatar
Chao Yu committed
758 759 760
	if (IS_ERR(page))
		return PTR_ERR(page);

761
	f2fs_wait_on_page_writeback(page, DATA, true);
Chao Yu's avatar
Chao Yu committed
762 763 764 765
	zero_user(page, start, len);
	set_page_dirty(page);
	f2fs_put_page(page, 1);
	return 0;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
766 767 768 769 770 771
}

int truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
{
	int err;

772
	while (pg_start < pg_end) {
Jaegeuk Kim's avatar
Jaegeuk Kim committed
773
		struct dnode_of_data dn;
774
		pgoff_t end_offset, count;
775

Jaegeuk Kim's avatar
Jaegeuk Kim committed
776
		set_new_dnode(&dn, inode, NULL, NULL, 0);
777
		err = get_dnode_of_data(&dn, pg_start, LOOKUP_NODE);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
778
		if (err) {
779 780
			if (err == -ENOENT) {
				pg_start++;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
781
				continue;
782
			}
Jaegeuk Kim's avatar
Jaegeuk Kim committed
783 784 785
			return err;
		}

786
		end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
787 788 789 790 791
		count = min(end_offset - dn.ofs_in_node, pg_end - pg_start);

		f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset);

		truncate_data_blocks_range(&dn, count);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
792
		f2fs_put_dnode(&dn);
793 794

		pg_start += count;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
795 796 797 798
	}
	return 0;
}

799
static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
Jaegeuk Kim's avatar
Jaegeuk Kim committed
800 801 802
{
	pgoff_t pg_start, pg_end;
	loff_t off_start, off_end;
803
	int ret;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
804

805 806 807
	ret = f2fs_convert_inline_inode(inode);
	if (ret)
		return ret;
808

809 810
	pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
	pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
811

812 813
	off_start = offset & (PAGE_SIZE - 1);
	off_end = (offset + len) & (PAGE_SIZE - 1);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
814 815

	if (pg_start == pg_end) {
Chao Yu's avatar
Chao Yu committed
816
		ret = fill_zero(inode, pg_start, off_start,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
817
						off_end - off_start);
Chao Yu's avatar
Chao Yu committed
818 819
		if (ret)
			return ret;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
820
	} else {
Chao Yu's avatar
Chao Yu committed
821 822
		if (off_start) {
			ret = fill_zero(inode, pg_start++, off_start,
823
						PAGE_SIZE - off_start);
Chao Yu's avatar
Chao Yu committed
824 825 826 827 828 829 830 831
			if (ret)
				return ret;
		}
		if (off_end) {
			ret = fill_zero(inode, pg_end, 0, off_end);
			if (ret)
				return ret;
		}
Jaegeuk Kim's avatar
Jaegeuk Kim committed
832 833 834 835

		if (pg_start < pg_end) {
			struct address_space *mapping = inode->i_mapping;
			loff_t blk_start, blk_end;
836
			struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
837

Jaegeuk Kim's avatar
Jaegeuk Kim committed
838
			f2fs_balance_fs(sbi, true);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
839

840 841
			blk_start = (loff_t)pg_start << PAGE_SHIFT;
			blk_end = (loff_t)pg_end << PAGE_SHIFT;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
842 843
			truncate_inode_pages_range(mapping, blk_start,
					blk_end - 1);
844

845
			f2fs_lock_op(sbi);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
846
			ret = truncate_hole(inode, pg_start, pg_end);
847
			f2fs_unlock_op(sbi);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
848 849 850 851 852 853
		}
	}

	return ret;
}

854 855
static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr,
				int *do_replace, pgoff_t off, pgoff_t len)
856 857 858
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct dnode_of_data dn;
859
	int ret, done, i;
860

861
next_dnode:
862
	set_new_dnode(&dn, inode, NULL, NULL, 0);
863
	ret = get_dnode_of_data(&dn, off, LOOKUP_NODE_RA);
864 865 866
	if (ret && ret != -ENOENT) {
		return ret;
	} else if (ret == -ENOENT) {
867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885
		if (dn.max_level == 0)
			return -ENOENT;
		done = min((pgoff_t)ADDRS_PER_BLOCK - dn.ofs_in_node, len);
		blkaddr += done;
		do_replace += done;
		goto next;
	}

	done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) -
							dn.ofs_in_node, len);
	for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) {
		*blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
		if (!is_checkpointed_data(sbi, *blkaddr)) {

			if (test_opt(sbi, LFS)) {
				f2fs_put_dnode(&dn);
				return -ENOTSUPP;
			}

886
			/* do not invalidate this block address */
887
			f2fs_update_data_blkaddr(&dn, NULL_ADDR);
888
			*do_replace = 1;
889
		}
890
	}
891 892 893 894 895 896 897 898
	f2fs_put_dnode(&dn);
next:
	len -= done;
	off += done;
	if (len)
		goto next_dnode;
	return 0;
}
899

900 901 902 903 904 905
static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr,
				int *do_replace, pgoff_t off, int len)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct dnode_of_data dn;
	int ret, i;
906

907 908 909
	for (i = 0; i < len; i++, do_replace++, blkaddr++) {
		if (*do_replace == 0)
			continue;
910

911 912 913 914 915 916 917
		set_new_dnode(&dn, inode, NULL, NULL, 0);
		ret = get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA);
		if (ret) {
			dec_valid_block_count(sbi, inode, 1);
			invalidate_blocks(sbi, *blkaddr);
		} else {
			f2fs_update_data_blkaddr(&dn, *blkaddr);
918
		}
919 920 921 922 923 924 925 926 927 928 929 930
		f2fs_put_dnode(&dn);
	}
	return 0;
}

static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
			block_t *blkaddr, int *do_replace,
			pgoff_t src, pgoff_t dst, pgoff_t len, bool full)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(src_inode);
	pgoff_t i = 0;
	int ret;
931

932 933 934 935
	while (i < len) {
		if (blkaddr[i] == NULL_ADDR && !full) {
			i++;
			continue;
936
		}
937

938 939 940 941 942
		if (do_replace[i] || blkaddr[i] == NULL_ADDR) {
			struct dnode_of_data dn;
			struct node_info ni;
			size_t new_size;
			pgoff_t ilen;
943

944 945 946 947
			set_new_dnode(&dn, dst_inode, NULL, NULL, 0);
			ret = get_dnode_of_data(&dn, dst + i, ALLOC_NODE);
			if (ret)
				return ret;
948

949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973
			get_node_info(sbi, dn.nid, &ni);
			ilen = min((pgoff_t)
				ADDRS_PER_PAGE(dn.node_page, dst_inode) -
						dn.ofs_in_node, len - i);
			do {
				dn.data_blkaddr = datablock_addr(dn.node_page,
								dn.ofs_in_node);
				truncate_data_blocks_range(&dn, 1);

				if (do_replace[i]) {
					f2fs_i_blocks_write(src_inode,
								1, false);
					f2fs_i_blocks_write(dst_inode,
								1, true);
					f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
					blkaddr[i], ni.version, true, false);

					do_replace[i] = 0;
				}
				dn.ofs_in_node++;
				i++;
				new_size = (dst + i) << PAGE_SHIFT;
				if (dst_inode->i_size < new_size)
					f2fs_i_size_write(dst_inode, new_size);
			} while ((do_replace[i] || blkaddr[i] == NULL_ADDR) && --ilen);
974

975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990
			f2fs_put_dnode(&dn);
		} else {
			struct page *psrc, *pdst;

			psrc = get_lock_data_page(src_inode, src + i, true);
			if (IS_ERR(psrc))
				return PTR_ERR(psrc);
			pdst = get_new_data_page(dst_inode, NULL, dst + i,
								true);
			if (IS_ERR(pdst)) {
				f2fs_put_page(psrc, 1);
				return PTR_ERR(pdst);
			}
			f2fs_copy_page(psrc, pdst);
			set_page_dirty(pdst);
			f2fs_put_page(pdst, 1);
991
			f2fs_put_page(psrc, 1);
992

993 994 995 996 997
			ret = truncate_hole(src_inode, src + i, src + i + 1);
			if (ret)
				return ret;
			i++;
		}
998 999
	}
	return 0;
1000
}
1001

1002 1003
static int __exchange_data_block(struct inode *src_inode,
			struct inode *dst_inode, pgoff_t src, pgoff_t dst,
1004
			pgoff_t len, bool full)
1005 1006 1007
{
	block_t *src_blkaddr;
	int *do_replace;
1008
	pgoff_t olen;
1009 1010
	int ret;

1011 1012
	while (len) {
		olen = min((pgoff_t)4 * ADDRS_PER_BLOCK, len);
1013

1014 1015 1016
		src_blkaddr = f2fs_kvzalloc(sizeof(block_t) * olen, GFP_KERNEL);
		if (!src_blkaddr)
			return -ENOMEM;
1017

1018 1019 1020 1021 1022
		do_replace = f2fs_kvzalloc(sizeof(int) * olen, GFP_KERNEL);
		if (!do_replace) {
			kvfree(src_blkaddr);
			return -ENOMEM;
		}
1023

1024 1025 1026 1027
		ret = __read_out_blkaddrs(src_inode, src_blkaddr,
					do_replace, src, olen);
		if (ret)
			goto roll_back;
1028

1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040
		ret = __clone_blkaddrs(src_inode, dst_inode, src_blkaddr,
					do_replace, src, dst, olen, full);
		if (ret)
			goto roll_back;

		src += olen;
		dst += olen;
		len -= olen;

		kvfree(src_blkaddr);
		kvfree(do_replace);
	}
1041 1042 1043 1044 1045 1046
	return 0;

roll_back:
	__roll_back_blkaddrs(src_inode, src_blkaddr, do_replace, src, len);
	kvfree(src_blkaddr);
	kvfree(do_replace);
1047 1048
	return ret;
}