Commit 4fc29c1a authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-f2fs-4.8' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs

Pull f2fs updates from Jaegeuk Kim:
 "The major change in this version is mitigating cpu overheads on write
  paths by replacing redundant inode page updates with mark_inode_dirty
  calls.  And we tried to reduce lock contentions as well to improve
  filesystem scalability.  Other feature is setting F2FS automatically
  when detecting host-managed SMR.

  Enhancements:
   - ioctl to move a range of data between files
   - inject orphan inode errors
   - avoid flush commands congestion
   - support lazytime

  Bug fixes:
   - return proper results for some dentry operations
   - fix deadlock in add_link failure
   - disable extent_cache for fcollapse/finsert"

* tag 'for-f2fs-4.8' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs: (68 commits)
  f2fs: clean up coding style and redundancy
  f2fs: get victim segment again after new cp
  f2fs: handle error case with f2fs_bug_on
  f2fs: avoid data race when deciding checkpoin in f2fs_sync_file
  f2fs: support an ioctl to move a range of data blocks
  f2fs: fix to report error number of f2fs_find_entry
  f2fs: avoid memory allocation failure due to a long length
  f2fs: reset default idle interval value
  f2fs: use blk_plug in all the possible paths
  f2fs: fix to avoid data update racing between GC and DIO
  f2fs: add maximum prefree segments
  f2fs: disable extent_cache for fcollapse/finsert inodes
  f2fs: refactor __exchange_data_block for speed up
  f2fs: fix ERR_PTR returned by bio
  f2fs: avoid mark_inode_dirty
  f2fs: move i_size_write in f2fs_write_end
  f2fs: fix to avoid redundant discard during fstrim
  f2fs: avoid mismatching block range for discard
  f2fs: fix incorrect f_bfree calculation in ->statfs
  f2fs: use percpu_rw_semaphore
  ...
parents 0e6acf02 5302fb00
......@@ -109,7 +109,9 @@ background_gc=%s Turn on/off cleaning operations, namely garbage
disable_roll_forward Disable the roll-forward recovery routine
norecovery Disable the roll-forward recovery routine, mounted read-
only (i.e., -o ro,disable_roll_forward)
discard Issue discard/TRIM commands when a segment is cleaned.
discard/nodiscard Enable/disable real-time discard in f2fs, if discard is
enabled, f2fs will issue discard/TRIM commands when a
segment is cleaned.
no_heap Disable heap-style segment allocation which finds free
segments for data from the beginning of main area, while
for node from the end of main area.
......@@ -151,6 +153,9 @@ noinline_data Disable the inline data feature, inline data feature is
enabled by default.
data_flush Enable data flushing before checkpoint in order to
persist data of regular and symlink.
mode=%s Control block allocation mode which supports "adaptive"
and "lfs". In "lfs" mode, there should be no random
writes towards main area.
================================================================================
DEBUGFS ENTRIES
......
......@@ -201,7 +201,6 @@ struct posix_acl *f2fs_get_acl(struct inode *inode, int type)
static int __f2fs_set_acl(struct inode *inode, int type,
struct posix_acl *acl, struct page *ipage)
{
struct f2fs_inode_info *fi = F2FS_I(inode);
int name_index;
void *value = NULL;
size_t size = 0;
......@@ -214,7 +213,7 @@ static int __f2fs_set_acl(struct inode *inode, int type,
error = posix_acl_equiv_mode(acl, &inode->i_mode);
if (error < 0)
return error;
set_acl_inode(fi, inode->i_mode);
set_acl_inode(inode, inode->i_mode);
if (error == 0)
acl = NULL;
}
......@@ -233,7 +232,7 @@ static int __f2fs_set_acl(struct inode *inode, int type,
if (acl) {
value = f2fs_acl_to_disk(acl, &size);
if (IS_ERR(value)) {
clear_inode_flag(fi, FI_ACL_MODE);
clear_inode_flag(inode, FI_ACL_MODE);
return (int)PTR_ERR(value);
}
}
......@@ -244,7 +243,7 @@ static int __f2fs_set_acl(struct inode *inode, int type,
if (!error)
set_cached_acl(inode, type, acl);
clear_inode_flag(fi, FI_ACL_MODE);
clear_inode_flag(inode, FI_ACL_MODE);
return error;
}
......@@ -385,6 +384,8 @@ int f2fs_init_acl(struct inode *inode, struct inode *dir, struct page *ipage,
if (error)
return error;
f2fs_mark_inode_dirty_sync(inode);
if (default_acl) {
error = __f2fs_set_acl(inode, ACL_TYPE_DEFAULT, default_acl,
ipage);
......
......@@ -37,7 +37,7 @@ struct f2fs_acl_header {
#ifdef CONFIG_F2FS_FS_POSIX_ACL
extern struct posix_acl *f2fs_get_acl(struct inode *, int);
extern int f2fs_set_acl(struct inode *inode, struct posix_acl *acl, int type);
extern int f2fs_set_acl(struct inode *, struct posix_acl *, int);
extern int f2fs_init_acl(struct inode *, struct inode *, struct page *,
struct page *);
#else
......
......@@ -48,7 +48,8 @@ struct page *grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
goto repeat;
}
f2fs_wait_on_page_writeback(page, META, true);
SetPageUptodate(page);
if (!PageUptodate(page))
SetPageUptodate(page);
return page;
}
......@@ -266,6 +267,7 @@ static int f2fs_write_meta_pages(struct address_space *mapping,
struct writeback_control *wbc)
{
struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
struct blk_plug plug;
long diff, written;
/* collect a number of dirty meta pages and write together */
......@@ -278,7 +280,9 @@ static int f2fs_write_meta_pages(struct address_space *mapping,
/* if mounting is failed, skip writing node pages */
mutex_lock(&sbi->cp_mutex);
diff = nr_pages_to_write(sbi, META, wbc);
blk_start_plug(&plug);
written = sync_meta_pages(sbi, META, wbc->nr_to_write);
blk_finish_plug(&plug);
mutex_unlock(&sbi->cp_mutex);
wbc->nr_to_write = max((long)0, wbc->nr_to_write - written - diff);
return 0;
......@@ -366,9 +370,10 @@ static int f2fs_set_meta_page_dirty(struct page *page)
{
trace_f2fs_set_page_dirty(page, META);
SetPageUptodate(page);
if (!PageUptodate(page))
SetPageUptodate(page);
if (!PageDirty(page)) {
__set_page_dirty_nobuffers(page);
f2fs_set_page_dirty_nobuffers(page);
inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_META);
SetPagePrivate(page);
f2fs_trace_pid(page);
......@@ -510,10 +515,11 @@ void release_orphan_inode(struct f2fs_sb_info *sbi)
spin_unlock(&im->ino_lock);
}
void add_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
void add_orphan_inode(struct inode *inode)
{
/* add new orphan ino entry into list */
__add_ino_entry(sbi, ino, ORPHAN_INO);
__add_ino_entry(F2FS_I_SB(inode), inode->i_ino, ORPHAN_INO);
update_inode_page(inode);
}
void remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
......@@ -761,28 +767,25 @@ int get_valid_checkpoint(struct f2fs_sb_info *sbi)
static void __add_dirty_inode(struct inode *inode, enum inode_type type)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct f2fs_inode_info *fi = F2FS_I(inode);
int flag = (type == DIR_INODE) ? FI_DIRTY_DIR : FI_DIRTY_FILE;
if (is_inode_flag_set(fi, flag))
if (is_inode_flag_set(inode, flag))
return;
set_inode_flag(fi, flag);
list_add_tail(&fi->dirty_list, &sbi->inode_list[type]);
set_inode_flag(inode, flag);
list_add_tail(&F2FS_I(inode)->dirty_list, &sbi->inode_list[type]);
stat_inc_dirty_inode(sbi, type);
}
static void __remove_dirty_inode(struct inode *inode, enum inode_type type)
{
struct f2fs_inode_info *fi = F2FS_I(inode);
int flag = (type == DIR_INODE) ? FI_DIRTY_DIR : FI_DIRTY_FILE;
if (get_dirty_pages(inode) ||
!is_inode_flag_set(F2FS_I(inode), flag))
if (get_dirty_pages(inode) || !is_inode_flag_set(inode, flag))
return;
list_del_init(&fi->dirty_list);
clear_inode_flag(fi, flag);
list_del_init(&F2FS_I(inode)->dirty_list);
clear_inode_flag(inode, flag);
stat_dec_dirty_inode(F2FS_I_SB(inode), type);
}
......@@ -795,13 +798,12 @@ void update_dirty_page(struct inode *inode, struct page *page)
!S_ISLNK(inode->i_mode))
return;
if (type != FILE_INODE || test_opt(sbi, DATA_FLUSH)) {
spin_lock(&sbi->inode_lock[type]);
spin_lock(&sbi->inode_lock[type]);
if (type != FILE_INODE || test_opt(sbi, DATA_FLUSH))
__add_dirty_inode(inode, type);
spin_unlock(&sbi->inode_lock[type]);
}
inode_inc_dirty_pages(inode);
spin_unlock(&sbi->inode_lock[type]);
SetPagePrivate(page);
f2fs_trace_pid(page);
}
......@@ -864,6 +866,34 @@ int sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type)
goto retry;
}
int f2fs_sync_inode_meta(struct f2fs_sb_info *sbi)
{
struct list_head *head = &sbi->inode_list[DIRTY_META];
struct inode *inode;
struct f2fs_inode_info *fi;
s64 total = get_pages(sbi, F2FS_DIRTY_IMETA);
while (total--) {
if (unlikely(f2fs_cp_error(sbi)))
return -EIO;
spin_lock(&sbi->inode_lock[DIRTY_META]);
if (list_empty(head)) {
spin_unlock(&sbi->inode_lock[DIRTY_META]);
return 0;
}
fi = list_entry(head->next, struct f2fs_inode_info,
gdirty_list);
inode = igrab(&fi->vfs_inode);
spin_unlock(&sbi->inode_lock[DIRTY_META]);
if (inode) {
update_inode_page(inode);
iput(inode);
}
};
return 0;
}
/*
* Freeze all the FS-operations for checkpoint.
*/
......@@ -890,6 +920,14 @@ static int block_operations(struct f2fs_sb_info *sbi)
goto retry_flush_dents;
}
if (get_pages(sbi, F2FS_DIRTY_IMETA)) {
f2fs_unlock_all(sbi);
err = f2fs_sync_inode_meta(sbi);
if (err)
goto out;
goto retry_flush_dents;
}
/*
* POR: we should ensure that there are no dirty node pages
* until finishing nat/sit flush.
......@@ -914,6 +952,8 @@ static int block_operations(struct f2fs_sb_info *sbi)
static void unblock_operations(struct f2fs_sb_info *sbi)
{
up_write(&sbi->node_write);
build_free_nids(sbi);
f2fs_unlock_all(sbi);
}
......@@ -954,7 +994,7 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
* This avoids to conduct wrong roll-forward operations and uses
* metapages, so should be called prior to sync_meta_pages below.
*/
if (discard_next_dnode(sbi, discard_blk))
if (!test_opt(sbi, LFS) && discard_next_dnode(sbi, discard_blk))
invalidate = true;
/* Flush all the NAT/SIT pages */
......
......@@ -19,6 +19,8 @@
#include <linux/bio.h>
#include <linux/prefetch.h>
#include <linux/uio.h>
#include <linux/mm.h>
#include <linux/memcontrol.h>
#include <linux/cleancache.h>
#include "f2fs.h"
......@@ -45,7 +47,8 @@ static void f2fs_read_end_io(struct bio *bio)
struct page *page = bvec->bv_page;
if (!bio->bi_error) {
SetPageUptodate(page);
if (!PageUptodate(page))
SetPageUptodate(page);
} else {
ClearPageUptodate(page);
SetPageError(page);
......@@ -97,10 +100,15 @@ static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
return bio;
}
static inline void __submit_bio(struct f2fs_sb_info *sbi, struct bio *bio)
static inline void __submit_bio(struct f2fs_sb_info *sbi,
struct bio *bio, enum page_type type)
{
if (!is_read_io(bio_op(bio)))
if (!is_read_io(bio_op(bio))) {
atomic_inc(&sbi->nr_wb_bios);
if (f2fs_sb_mounted_hmsmr(sbi->sb) &&
current->plug && (type == DATA || type == NODE))
blk_finish_plug(current->plug);
}
submit_bio(bio);
}
......@@ -118,7 +126,7 @@ static void __submit_merged_bio(struct f2fs_bio_info *io)
bio_set_op_attrs(io->bio, fio->op, fio->op_flags);
__submit_bio(io->sbi, io->bio);
__submit_bio(io->sbi, io->bio, fio->type);
io->bio = NULL;
}
......@@ -240,7 +248,7 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
bio->bi_rw = fio->op_flags;
bio_set_op_attrs(bio, fio->op, fio->op_flags);
__submit_bio(fio->sbi, bio);
__submit_bio(fio->sbi, bio, fio->type);
return 0;
}
......@@ -326,7 +334,7 @@ int reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
if (!count)
return 0;
if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
return -EPERM;
if (unlikely(!inc_valid_block_count(sbi, dn->inode, &count)))
return -ENOSPC;
......@@ -348,9 +356,6 @@ int reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
if (set_page_dirty(dn->node_page))
dn->node_changed = true;
mark_inode_dirty(dn->inode);
sync_inode_page(dn);
return 0;
}
......@@ -446,7 +451,8 @@ struct page *get_read_data_page(struct inode *inode, pgoff_t index,
*/
if (dn.data_blkaddr == NEW_ADDR) {
zero_user_segment(page, 0, PAGE_SIZE);
SetPageUptodate(page);
if (!PageUptodate(page))
SetPageUptodate(page);
unlock_page(page);
return page;
}
......@@ -505,14 +511,14 @@ struct page *get_lock_data_page(struct inode *inode, pgoff_t index,
/* wait for read completion */
lock_page(page);
if (unlikely(!PageUptodate(page))) {
f2fs_put_page(page, 1);
return ERR_PTR(-EIO);
}
if (unlikely(page->mapping != mapping)) {
f2fs_put_page(page, 1);
goto repeat;
}
if (unlikely(!PageUptodate(page))) {
f2fs_put_page(page, 1);
return ERR_PTR(-EIO);
}
return page;
}
......@@ -557,7 +563,8 @@ struct page *get_new_data_page(struct inode *inode,
if (dn.data_blkaddr == NEW_ADDR) {
zero_user_segment(page, 0, PAGE_SIZE);
SetPageUptodate(page);
if (!PageUptodate(page))
SetPageUptodate(page);
} else {
f2fs_put_page(page, 1);
......@@ -569,11 +576,8 @@ struct page *get_new_data_page(struct inode *inode,
}
got_it:
if (new_i_size && i_size_read(inode) <
((loff_t)(index + 1) << PAGE_SHIFT)) {
i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT));
/* Only the directory inode sets new_i_size */
set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR);
}
((loff_t)(index + 1) << PAGE_SHIFT))
f2fs_i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT));
return page;
}
......@@ -586,7 +590,7 @@ static int __allocate_data_block(struct dnode_of_data *dn)
pgoff_t fofs;
blkcnt_t count = 1;
if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
return -EPERM;
dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node);
......@@ -611,7 +615,7 @@ static int __allocate_data_block(struct dnode_of_data *dn)
fofs = start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
dn->ofs_in_node;
if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_SHIFT))
i_size_write(dn->inode,
f2fs_i_size_write(dn->inode,
((loff_t)(fofs + 1) << PAGE_SHIFT));
return 0;
}
......@@ -660,7 +664,7 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
unsigned int maxblocks = map->m_len;
struct dnode_of_data dn;
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
int mode = create ? ALLOC_NODE : LOOKUP_NODE_RA;
int mode = create ? ALLOC_NODE : LOOKUP_NODE;
pgoff_t pgofs, end_offset, end;
int err = 0, ofs = 1;
unsigned int ofs_in_node, last_ofs_in_node;
......@@ -723,8 +727,7 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
} else {
err = __allocate_data_block(&dn);
if (!err) {
set_inode_flag(F2FS_I(inode),
FI_APPEND_WRITE);
set_inode_flag(inode, FI_APPEND_WRITE);
allocated = true;
}
}
......@@ -795,8 +798,6 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
else if (dn.ofs_in_node < end_offset)
goto next_block;
if (allocated)
sync_inode_page(&dn);
f2fs_put_dnode(&dn);
if (create) {
......@@ -807,8 +808,6 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
goto next_dnode;
sync_out:
if (allocated)
sync_inode_page(&dn);
f2fs_put_dnode(&dn);
unlock_out:
if (create) {
......@@ -968,6 +967,37 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
return ret;
}
struct bio *f2fs_grab_bio(struct inode *inode, block_t blkaddr,
unsigned nr_pages)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct fscrypt_ctx *ctx = NULL;
struct block_device *bdev = sbi->sb->s_bdev;
struct bio *bio;
if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
ctx = fscrypt_get_ctx(inode, GFP_NOFS);
if (IS_ERR(ctx))
return ERR_CAST(ctx);
/* wait the page to be moved by cleaning */
f2fs_wait_on_encrypted_page_writeback(sbi, blkaddr);
}
bio = bio_alloc(GFP_KERNEL, min_t(int, nr_pages, BIO_MAX_PAGES));
if (!bio) {
if (ctx)
fscrypt_release_ctx(ctx);
return ERR_PTR(-ENOMEM);
}
bio->bi_bdev = bdev;
bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blkaddr);
bio->bi_end_io = f2fs_read_end_io;
bio->bi_private = ctx;
return bio;
}
/*
* This function was originally taken from fs/mpage.c, and customized for f2fs.
* Major change was from block_size == page_size in f2fs by default.
......@@ -986,7 +1016,6 @@ static int f2fs_mpage_readpages(struct address_space *mapping,
sector_t last_block;
sector_t last_block_in_file;
sector_t block_nr;
struct block_device *bdev = inode->i_sb->s_bdev;
struct f2fs_map_blocks map;
map.m_pblk = 0;
......@@ -1047,7 +1076,8 @@ static int f2fs_mpage_readpages(struct address_space *mapping,
}
} else {
zero_user_segment(page, 0, PAGE_SIZE);
SetPageUptodate(page);
if (!PageUptodate(page))
SetPageUptodate(page);
unlock_page(page);
goto next_page;
}
......@@ -1058,35 +1088,15 @@ static int f2fs_mpage_readpages(struct address_space *mapping,
*/
if (bio && (last_block_in_bio != block_nr - 1)) {
submit_and_realloc:
__submit_bio(F2FS_I_SB(inode), bio);
__submit_bio(F2FS_I_SB(inode), bio, DATA);
bio = NULL;
}
if (bio == NULL) {
struct fscrypt_ctx *ctx = NULL;
if (f2fs_encrypted_inode(inode) &&
S_ISREG(inode->i_mode)) {
ctx = fscrypt_get_ctx(inode, GFP_NOFS);
if (IS_ERR(ctx))
goto set_error_page;
/* wait the page to be moved by cleaning */
f2fs_wait_on_encrypted_page_writeback(
F2FS_I_SB(inode), block_nr);
}
bio = bio_alloc(GFP_KERNEL,
min_t(int, nr_pages, BIO_MAX_PAGES));
if (!bio) {
if (ctx)
fscrypt_release_ctx(ctx);
bio = f2fs_grab_bio(inode, block_nr, nr_pages);
if (IS_ERR(bio)) {
bio = NULL;
goto set_error_page;
}
bio->bi_bdev = bdev;
bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(block_nr);
bio->bi_end_io = f2fs_read_end_io;
bio->bi_private = ctx;
bio_set_op_attrs(bio, REQ_OP_READ, 0);
}
......@@ -1102,7 +1112,7 @@ static int f2fs_mpage_readpages(struct address_space *mapping,
goto next_page;
confused:
if (bio) {
__submit_bio(F2FS_I_SB(inode), bio);
__submit_bio(F2FS_I_SB(inode), bio, DATA);
bio = NULL;
}
unlock_page(page);
......@@ -1112,7 +1122,7 @@ static int f2fs_mpage_readpages(struct address_space *mapping,
}
BUG_ON(pages && !list_empty(pages));
if (bio)
__submit_bio(F2FS_I_SB(inode), bio);
__submit_bio(F2FS_I_SB(inode), bio, DATA);
return 0;
}
......@@ -1201,14 +1211,14 @@ int do_write_data_page(struct f2fs_io_info *fio)
!IS_ATOMIC_WRITTEN_PAGE(page) &&
need_inplace_update(inode))) {
rewrite_data_page(fio);
set_inode_flag(F2FS_I(inode), FI_UPDATE_WRITE);
set_inode_flag(inode, FI_UPDATE_WRITE);
trace_f2fs_do_write_data_page(page, IPU);
} else {
write_data_page(&dn, fio);
trace_f2fs_do_write_data_page(page, OPU);
set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE);
set_inode_flag(inode, FI_APPEND_WRITE);
if (page->index == 0)
set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN);
set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
}
out_writepage:
f2fs_put_dnode(&dn);
......@@ -1223,6 +1233,7 @@ static int f2fs_write_data_page(struct page *page,
loff_t i_size = i_size_read(inode);
const pgoff_t end_index = ((unsigned long long) i_size)
>> PAGE_SHIFT;
loff_t psize = (page->index + 1) << PAGE_SHIFT;
unsigned offset = 0;
bool need_balance_fs = false;
int err = 0;
......@@ -1260,20 +1271,18 @@ static int f2fs_write_data_page(struct page *page,
available_free_memory(sbi, BASE_CHECK))))
goto redirty_out;
/* we should bypass data pages to proceed the kworkder jobs */
if (unlikely(f2fs_cp_error(sbi))) {
mapping_set_error(page->mapping, -EIO);
goto out;
}
/* Dentry blocks are controlled by checkpoint */
if (S_ISDIR(inode->i_mode)) {
if (unlikely(f2fs_cp_error(sbi)))
goto redirty_out;
err = do_write_data_page(&fio);
goto done;
}
/* we should bypass data pages to proceed the kworkder jobs */
if (unlikely(f2fs_cp_error(sbi))) {
SetPageError(page);
goto out;
}
if (!wbc->for_reclaim)
need_balance_fs = true;
else if (has_not_enough_free_secs(sbi, 0))
......@@ -1285,6 +1294,8 @@ static int f2fs_write_data_page(struct page *page,
err = f2fs_write_inline_data(inode, page);
if (err == -EAGAIN)
err = do_write_data_page(&fio);
if (F2FS_I(inode)->last_disk_size < psize)
F2FS_I(inode)->last_disk_size = psize;
f2fs_unlock_op(sbi);
done:
if (err && err != -ENOENT)
......@@ -1311,16 +1322,8 @@ static int f2fs_write_data_page(struct page *page,
redirty_out:
redirty_page_for_writepage(wbc, page);
return AOP_WRITEPAGE_ACTIVATE;
}
static int __f2fs_writepage(struct page *page, struct writeback_control *wbc,
void *data)
{
struct address_space *mapping = data;
int ret = mapping->a_ops->writepage(page, wbc);
mapping_set_error(mapping, ret);
return ret;
unlock_page(page);
return err;
}
/*
......@@ -1329,8 +1332,7 @@ static int __f2fs_writepage(struct page *page, struct writeback_control *wbc,
* warm/hot data page.
*/
static int f2fs_write_cache_pages(struct address_space *mapping,
struct writeback_control *wbc, writepage_t writepage,
void *data)
struct writeback_control *wbc)
{
int ret = 0;
int done = 0;
......@@ -1343,10 +1345,9 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
int cycled;
int range_whole = 0;
int tag;
int step = 0;
pagevec_init(&pvec, 0);
next:
<