Commit ea1754a0 authored by Kirill A. Shutemov's avatar Kirill A. Shutemov Committed by Linus Torvalds

mm, fs: remove remaining PAGE_CACHE_* and page_cache_{get,release} usage

Mostly direct substitution with occasional adjustment or removing
outdated comments.
Signed-off-by: 's avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: 's avatarMichal Hocko <mhocko@suse.com>
Signed-off-by: 's avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 09cbfeaf
...@@ -38,7 +38,7 @@ the update lasts only as long as the inode is cached in memory, after ...@@ -38,7 +38,7 @@ the update lasts only as long as the inode is cached in memory, after
which the timestamp reverts to 1970, i.e. moves backwards in time. which the timestamp reverts to 1970, i.e. moves backwards in time.
Currently, cramfs must be written and read with architectures of the Currently, cramfs must be written and read with architectures of the
same endianness, and can be read only by kernels with PAGE_CACHE_SIZE same endianness, and can be read only by kernels with PAGE_SIZE
== 4096. At least the latter of these is a bug, but it hasn't been == 4096. At least the latter of these is a bug, but it hasn't been
decided what the best fix is. For the moment if you have larger pages decided what the best fix is. For the moment if you have larger pages
you can just change the #define in mkcramfs.c, so long as you don't you can just change the #define in mkcramfs.c, so long as you don't
......
...@@ -60,7 +60,7 @@ size: The limit of allocated bytes for this tmpfs instance. The ...@@ -60,7 +60,7 @@ size: The limit of allocated bytes for this tmpfs instance. The
default is half of your physical RAM without swap. If you default is half of your physical RAM without swap. If you
oversize your tmpfs instances the machine will deadlock oversize your tmpfs instances the machine will deadlock
since the OOM handler will not be able to free that memory. since the OOM handler will not be able to free that memory.
nr_blocks: The same as size, but in blocks of PAGE_CACHE_SIZE. nr_blocks: The same as size, but in blocks of PAGE_SIZE.
nr_inodes: The maximum number of inodes for this instance. The default nr_inodes: The maximum number of inodes for this instance. The default
is half of the number of your physical RAM pages, or (on a is half of the number of your physical RAM pages, or (on a
machine with highmem) the number of lowmem RAM pages, machine with highmem) the number of lowmem RAM pages,
......
...@@ -708,9 +708,9 @@ struct address_space_operations { ...@@ -708,9 +708,9 @@ struct address_space_operations {
from the address space. This generally corresponds to either a from the address space. This generally corresponds to either a
truncation, punch hole or a complete invalidation of the address truncation, punch hole or a complete invalidation of the address
space (in the latter case 'offset' will always be 0 and 'length' space (in the latter case 'offset' will always be 0 and 'length'
will be PAGE_CACHE_SIZE). Any private data associated with the page will be PAGE_SIZE). Any private data associated with the page
should be updated to reflect this truncation. If offset is 0 and should be updated to reflect this truncation. If offset is 0 and
length is PAGE_CACHE_SIZE, then the private data should be released, length is PAGE_SIZE, then the private data should be released,
because the page must be able to be completely discarded. This may because the page must be able to be completely discarded. This may
be done by calling the ->releasepage function, but in this case the be done by calling the ->releasepage function, but in this case the
release MUST succeed. release MUST succeed.
......
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
#include <linux/swap.h> #include <linux/swap.h>
#include <linux/unistd.h> #include <linux/unistd.h>
#include <linux/nodemask.h> /* for node_online_map */ #include <linux/nodemask.h> /* for node_online_map */
#include <linux/pagemap.h> /* for release_pages and page_cache_release */ #include <linux/pagemap.h> /* for release_pages */
#include <linux/compat.h> #include <linux/compat.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
......
...@@ -1615,8 +1615,8 @@ static void bio_release_pages(struct bio *bio) ...@@ -1615,8 +1615,8 @@ static void bio_release_pages(struct bio *bio)
* the BIO and the offending pages and re-dirty the pages in process context. * the BIO and the offending pages and re-dirty the pages in process context.
* *
* It is expected that bio_check_pages_dirty() will wholly own the BIO from * It is expected that bio_check_pages_dirty() will wholly own the BIO from
* here on. It will run one page_cache_release() against each page and will * here on. It will run one put_page() against each page and will run one
* run one bio_put() against the BIO. * bio_put() against the BIO.
*/ */
static void bio_dirty_fn(struct work_struct *work); static void bio_dirty_fn(struct work_struct *work);
......
...@@ -1327,8 +1327,8 @@ struct bm_extent { ...@@ -1327,8 +1327,8 @@ struct bm_extent {
#endif #endif
#endif #endif
/* BIO_MAX_SIZE is 256 * PAGE_CACHE_SIZE, /* BIO_MAX_SIZE is 256 * PAGE_SIZE,
* so for typical PAGE_CACHE_SIZE of 4k, that is (1<<20) Byte. * so for typical PAGE_SIZE of 4k, that is (1<<20) Byte.
* Since we may live in a mixed-platform cluster, * Since we may live in a mixed-platform cluster,
* we limit us to a platform agnostic constant here for now. * we limit us to a platform agnostic constant here for now.
* A followup commit may allow even bigger BIO sizes, * A followup commit may allow even bigger BIO sizes,
......
...@@ -514,7 +514,7 @@ typedef struct { ...@@ -514,7 +514,7 @@ typedef struct {
/** /**
* Starting offset of the fragment within the page. Note that the * Starting offset of the fragment within the page. Note that the
* end of the fragment must not pass the end of the page; i.e., * end of the fragment must not pass the end of the page; i.e.,
* kiov_len + kiov_offset <= PAGE_CACHE_SIZE. * kiov_len + kiov_offset <= PAGE_SIZE.
*/ */
unsigned int kiov_offset; unsigned int kiov_offset;
} lnet_kiov_t; } lnet_kiov_t;
......
...@@ -390,8 +390,8 @@ typedef struct sfw_test_instance { ...@@ -390,8 +390,8 @@ typedef struct sfw_test_instance {
} tsi_u; } tsi_u;
} sfw_test_instance_t; } sfw_test_instance_t;
/* XXX: trailing (PAGE_CACHE_SIZE % sizeof(lnet_process_id_t)) bytes at /* XXX: trailing (PAGE_SIZE % sizeof(lnet_process_id_t)) bytes at the end of
* the end of pages are not used */ * pages are not used */
#define SFW_MAX_CONCUR LST_MAX_CONCUR #define SFW_MAX_CONCUR LST_MAX_CONCUR
#define SFW_ID_PER_PAGE (PAGE_SIZE / sizeof(lnet_process_id_packed_t)) #define SFW_ID_PER_PAGE (PAGE_SIZE / sizeof(lnet_process_id_packed_t))
#define SFW_MAX_NDESTS (LNET_MAX_IOV * SFW_ID_PER_PAGE) #define SFW_MAX_NDESTS (LNET_MAX_IOV * SFW_ID_PER_PAGE)
......
...@@ -1118,7 +1118,7 @@ struct lu_context_key { ...@@ -1118,7 +1118,7 @@ struct lu_context_key {
{ \ { \
type *value; \ type *value; \
\ \
CLASSERT(PAGE_CACHE_SIZE >= sizeof (*value)); \ CLASSERT(PAGE_SIZE >= sizeof (*value)); \
\ \
value = kzalloc(sizeof(*value), GFP_NOFS); \ value = kzalloc(sizeof(*value), GFP_NOFS); \
if (!value) \ if (!value) \
......
...@@ -1022,10 +1022,10 @@ static inline int lu_dirent_size(struct lu_dirent *ent) ...@@ -1022,10 +1022,10 @@ static inline int lu_dirent_size(struct lu_dirent *ent)
* MDS_READPAGE page size * MDS_READPAGE page size
* *
* This is the directory page size packed in MDS_READPAGE RPC. * This is the directory page size packed in MDS_READPAGE RPC.
* It's different than PAGE_CACHE_SIZE because the client needs to * It's different than PAGE_SIZE because the client needs to
* access the struct lu_dirpage header packed at the beginning of * access the struct lu_dirpage header packed at the beginning of
* the "page" and without this there isn't any way to know find the * the "page" and without this there isn't any way to know find the
* lu_dirpage header is if client and server PAGE_CACHE_SIZE differ. * lu_dirpage header is if client and server PAGE_SIZE differ.
*/ */
#define LU_PAGE_SHIFT 12 #define LU_PAGE_SHIFT 12
#define LU_PAGE_SIZE (1UL << LU_PAGE_SHIFT) #define LU_PAGE_SIZE (1UL << LU_PAGE_SHIFT)
......
...@@ -112,8 +112,8 @@ ...@@ -112,8 +112,8 @@
# if ((PTLRPC_MAX_BRW_PAGES & (PTLRPC_MAX_BRW_PAGES - 1)) != 0) # if ((PTLRPC_MAX_BRW_PAGES & (PTLRPC_MAX_BRW_PAGES - 1)) != 0)
# error "PTLRPC_MAX_BRW_PAGES isn't a power of two" # error "PTLRPC_MAX_BRW_PAGES isn't a power of two"
# endif # endif
# if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * PAGE_CACHE_SIZE)) # if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * PAGE_SIZE))
# error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * PAGE_CACHE_SIZE" # error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * PAGE_SIZE"
# endif # endif
# if (PTLRPC_MAX_BRW_SIZE > LNET_MTU * PTLRPC_BULK_OPS_COUNT) # if (PTLRPC_MAX_BRW_SIZE > LNET_MTU * PTLRPC_BULK_OPS_COUNT)
# error "PTLRPC_MAX_BRW_SIZE too big" # error "PTLRPC_MAX_BRW_SIZE too big"
......
...@@ -272,7 +272,7 @@ struct client_obd { ...@@ -272,7 +272,7 @@ struct client_obd {
int cl_grant_shrink_interval; /* seconds */ int cl_grant_shrink_interval; /* seconds */
/* A chunk is an optimal size used by osc_extent to determine /* A chunk is an optimal size used by osc_extent to determine
* the extent size. A chunk is max(PAGE_CACHE_SIZE, OST block size) * the extent size. A chunk is max(PAGE_SIZE, OST block size)
*/ */
int cl_chunkbits; int cl_chunkbits;
int cl_chunk; int cl_chunk;
......
...@@ -134,9 +134,8 @@ ...@@ -134,9 +134,8 @@
* a header lu_dirpage which describes the start/end hash, and whether this * a header lu_dirpage which describes the start/end hash, and whether this
* page is empty (contains no dir entry) or hash collide with next page. * page is empty (contains no dir entry) or hash collide with next page.
* After client receives reply, several pages will be integrated into dir page * After client receives reply, several pages will be integrated into dir page
* in PAGE_CACHE_SIZE (if PAGE_CACHE_SIZE greater than LU_PAGE_SIZE), and the * in PAGE_SIZE (if PAGE_SIZE greater than LU_PAGE_SIZE), and the lu_dirpage
* lu_dirpage for this integrated page will be adjusted. See * for this integrated page will be adjusted. See lmv_adjust_dirpages().
* lmv_adjust_dirpages().
* *
*/ */
......
...@@ -521,7 +521,7 @@ static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io, ...@@ -521,7 +521,7 @@ static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io,
* striped over, rather than having a constant value for all files here. * striped over, rather than having a constant value for all files here.
*/ */
/* RAS_INCREASE_STEP should be (1UL << (inode->i_blkbits - PAGE_CACHE_SHIFT)). /* RAS_INCREASE_STEP should be (1UL << (inode->i_blkbits - PAGE_SHIFT)).
* Temporarily set RAS_INCREASE_STEP to 1MB. After 4MB RPC is enabled * Temporarily set RAS_INCREASE_STEP to 1MB. After 4MB RPC is enabled
* by default, this should be adjusted corresponding with max_read_ahead_mb * by default, this should be adjusted corresponding with max_read_ahead_mb
* and max_read_ahead_per_file_mb otherwise the readahead budget can be used * and max_read_ahead_per_file_mb otherwise the readahead budget can be used
......
...@@ -512,7 +512,7 @@ static int vvp_io_read_start(const struct lu_env *env, ...@@ -512,7 +512,7 @@ static int vvp_io_read_start(const struct lu_env *env,
vio->cui_ra_window_set = 1; vio->cui_ra_window_set = 1;
bead->lrr_start = cl_index(obj, pos); bead->lrr_start = cl_index(obj, pos);
/* /*
* XXX: explicit PAGE_CACHE_SIZE * XXX: explicit PAGE_SIZE
*/ */
bead->lrr_count = cl_index(obj, tot + PAGE_SIZE - 1); bead->lrr_count = cl_index(obj, tot + PAGE_SIZE - 1);
ll_ra_read_in(file, bead); ll_ra_read_in(file, bead);
......
...@@ -2017,7 +2017,7 @@ static int lmv_sync(struct obd_export *exp, const struct lu_fid *fid, ...@@ -2017,7 +2017,7 @@ static int lmv_sync(struct obd_export *exp, const struct lu_fid *fid,
* |s|e|f|p|ent| 0 | ... | 0 | * |s|e|f|p|ent| 0 | ... | 0 |
* '----------------- -----' * '----------------- -----'
* *
* However, on hosts where the native VM page size (PAGE_CACHE_SIZE) is * However, on hosts where the native VM page size (PAGE_SIZE) is
* larger than LU_PAGE_SIZE, a single host page may contain multiple * larger than LU_PAGE_SIZE, a single host page may contain multiple
* lu_dirpages. After reading the lu_dirpages from the MDS, the * lu_dirpages. After reading the lu_dirpages from the MDS, the
* ldp_hash_end of the first lu_dirpage refers to the one immediately * ldp_hash_end of the first lu_dirpage refers to the one immediately
...@@ -2048,7 +2048,7 @@ static int lmv_sync(struct obd_export *exp, const struct lu_fid *fid, ...@@ -2048,7 +2048,7 @@ static int lmv_sync(struct obd_export *exp, const struct lu_fid *fid,
* - Adjust the lde_reclen of the ending entry of each lu_dirpage to span * - Adjust the lde_reclen of the ending entry of each lu_dirpage to span
* to the first entry of the next lu_dirpage. * to the first entry of the next lu_dirpage.
*/ */
#if PAGE_CACHE_SIZE > LU_PAGE_SIZE #if PAGE_SIZE > LU_PAGE_SIZE
static void lmv_adjust_dirpages(struct page **pages, int ncfspgs, int nlupgs) static void lmv_adjust_dirpages(struct page **pages, int ncfspgs, int nlupgs)
{ {
int i; int i;
...@@ -2101,7 +2101,7 @@ static void lmv_adjust_dirpages(struct page **pages, int ncfspgs, int nlupgs) ...@@ -2101,7 +2101,7 @@ static void lmv_adjust_dirpages(struct page **pages, int ncfspgs, int nlupgs)
} }
#else #else
#define lmv_adjust_dirpages(pages, ncfspgs, nlupgs) do {} while (0) #define lmv_adjust_dirpages(pages, ncfspgs, nlupgs) do {} while (0)
#endif /* PAGE_CACHE_SIZE > LU_PAGE_SIZE */ #endif /* PAGE_SIZE > LU_PAGE_SIZE */
static int lmv_readpage(struct obd_export *exp, struct md_op_data *op_data, static int lmv_readpage(struct obd_export *exp, struct md_op_data *op_data,
struct page **pages, struct ptlrpc_request **request) struct page **pages, struct ptlrpc_request **request)
...@@ -2110,7 +2110,7 @@ static int lmv_readpage(struct obd_export *exp, struct md_op_data *op_data, ...@@ -2110,7 +2110,7 @@ static int lmv_readpage(struct obd_export *exp, struct md_op_data *op_data,
struct lmv_obd *lmv = &obd->u.lmv; struct lmv_obd *lmv = &obd->u.lmv;
__u64 offset = op_data->op_offset; __u64 offset = op_data->op_offset;
int rc; int rc;
int ncfspgs; /* pages read in PAGE_CACHE_SIZE */ int ncfspgs; /* pages read in PAGE_SIZE */
int nlupgs; /* pages read in LU_PAGE_SIZE */ int nlupgs; /* pages read in LU_PAGE_SIZE */
struct lmv_tgt_desc *tgt; struct lmv_tgt_desc *tgt;
......
...@@ -47,7 +47,6 @@ ...@@ -47,7 +47,6 @@
#include "../../include/lustre/lustre_idl.h" #include "../../include/lustre/lustre_idl.h"
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/pagemap.h> /* for PAGE_CACHE_SIZE */
void obdo_refresh_inode(struct inode *dst, struct obdo *src, u32 valid) void obdo_refresh_inode(struct inode *dst, struct obdo *src, u32 valid)
{ {
......
...@@ -1456,7 +1456,7 @@ static void osc_unreserve_grant(struct client_obd *cli, ...@@ -1456,7 +1456,7 @@ static void osc_unreserve_grant(struct client_obd *cli,
* used, we should return these grants to OST. There're two cases where grants * used, we should return these grants to OST. There're two cases where grants
* can be lost: * can be lost:
* 1. truncate; * 1. truncate;
* 2. blocksize at OST is less than PAGE_CACHE_SIZE and a partial page was * 2. blocksize at OST is less than PAGE_SIZE and a partial page was
* written. In this case OST may use less chunks to serve this partial * written. In this case OST may use less chunks to serve this partial
* write. OSTs don't actually know the page size on the client side. so * write. OSTs don't actually know the page size on the client side. so
* clients have to calculate lost grant by the blocksize on the OST. * clients have to calculate lost grant by the blocksize on the OST.
......
...@@ -3039,13 +3039,13 @@ int btrfsic_mount(struct btrfs_root *root, ...@@ -3039,13 +3039,13 @@ int btrfsic_mount(struct btrfs_root *root,
if (root->nodesize & ((u64)PAGE_SIZE - 1)) { if (root->nodesize & ((u64)PAGE_SIZE - 1)) {
printk(KERN_INFO printk(KERN_INFO
"btrfsic: cannot handle nodesize %d not being a multiple of PAGE_CACHE_SIZE %ld!\n", "btrfsic: cannot handle nodesize %d not being a multiple of PAGE_SIZE %ld!\n",
root->nodesize, PAGE_SIZE); root->nodesize, PAGE_SIZE);
return -1; return -1;
} }
if (root->sectorsize & ((u64)PAGE_SIZE - 1)) { if (root->sectorsize & ((u64)PAGE_SIZE - 1)) {
printk(KERN_INFO printk(KERN_INFO
"btrfsic: cannot handle sectorsize %d not being a multiple of PAGE_CACHE_SIZE %ld!\n", "btrfsic: cannot handle sectorsize %d not being a multiple of PAGE_SIZE %ld!\n",
root->sectorsize, PAGE_SIZE); root->sectorsize, PAGE_SIZE);
return -1; return -1;
} }
......
...@@ -3264,13 +3264,11 @@ static noinline_for_stack int writepage_delalloc(struct inode *inode, ...@@ -3264,13 +3264,11 @@ static noinline_for_stack int writepage_delalloc(struct inode *inode,
goto done; goto done;
} }
/* /*
* delalloc_end is already one less than the total * delalloc_end is already one less than the total length, so
* length, so we don't subtract one from * we don't subtract one from PAGE_SIZE
* PAGE_CACHE_SIZE
*/ */
delalloc_to_write += (delalloc_end - delalloc_start + delalloc_to_write += (delalloc_end - delalloc_start +
PAGE_SIZE) >> PAGE_SIZE) >> PAGE_SHIFT;
PAGE_SHIFT;
delalloc_start = delalloc_end + 1; delalloc_start = delalloc_end + 1;
} }
if (wbc->nr_to_write < delalloc_to_write) { if (wbc->nr_to_write < delalloc_to_write) {
......
...@@ -66,7 +66,7 @@ u##bits btrfs_get_token_##bits(struct extent_buffer *eb, void *ptr, \ ...@@ -66,7 +66,7 @@ u##bits btrfs_get_token_##bits(struct extent_buffer *eb, void *ptr, \
\ \
if (token && token->kaddr && token->offset <= offset && \ if (token && token->kaddr && token->offset <= offset && \
token->eb == eb && \ token->eb == eb && \
(token->offset + PAGE_CACHE_SIZE >= offset + size)) { \ (token->offset + PAGE_SIZE >= offset + size)) { \
kaddr = token->kaddr; \ kaddr = token->kaddr; \
p = kaddr + part_offset - token->offset; \ p = kaddr + part_offset - token->offset; \
res = get_unaligned_le##bits(p + off); \ res = get_unaligned_le##bits(p + off); \
...@@ -104,7 +104,7 @@ void btrfs_set_token_##bits(struct extent_buffer *eb, \ ...@@ -104,7 +104,7 @@ void btrfs_set_token_##bits(struct extent_buffer *eb, \
\ \
if (token && token->kaddr && token->offset <= offset && \ if (token && token->kaddr && token->offset <= offset && \
token->eb == eb && \ token->eb == eb && \
(token->offset + PAGE_CACHE_SIZE >= offset + size)) { \ (token->offset + PAGE_SIZE >= offset + size)) { \
kaddr = token->kaddr; \ kaddr = token->kaddr; \
p = kaddr + part_offset - token->offset; \ p = kaddr + part_offset - token->offset; \
put_unaligned_le##bits(val, p + off); \ put_unaligned_le##bits(val, p + off); \
......
...@@ -239,7 +239,7 @@ static int test_find_delalloc(void) ...@@ -239,7 +239,7 @@ static int test_find_delalloc(void)
end = 0; end = 0;
/* /*
* Currently if we fail to find dirty pages in the delalloc range we * Currently if we fail to find dirty pages in the delalloc range we
* will adjust max_bytes down to PAGE_CACHE_SIZE and then re-search. If * will adjust max_bytes down to PAGE_SIZE and then re-search. If
* this changes at any point in the future we will need to fix this * this changes at any point in the future we will need to fix this
* tests expected behavior. * tests expected behavior.
*/ */
......
...@@ -714,7 +714,7 @@ compare_mid(__u16 mid, const struct smb_hdr *smb) ...@@ -714,7 +714,7 @@ compare_mid(__u16 mid, const struct smb_hdr *smb)
* *
* Note that this might make for "interesting" allocation problems during * Note that this might make for "interesting" allocation problems during
* writeback however as we have to allocate an array of pointers for the * writeback however as we have to allocate an array of pointers for the
* pages. A 16M write means ~32kb page array with PAGE_CACHE_SIZE == 4096. * pages. A 16M write means ~32kb page array with PAGE_SIZE == 4096.
* *
* For reads, there is a similar problem as we need to allocate an array * For reads, there is a similar problem as we need to allocate an array
* of kvecs to handle the receive, though that should only need to be done * of kvecs to handle the receive, though that should only need to be done
...@@ -733,7 +733,7 @@ compare_mid(__u16 mid, const struct smb_hdr *smb) ...@@ -733,7 +733,7 @@ compare_mid(__u16 mid, const struct smb_hdr *smb)
/* /*
* The default wsize is 1M. find_get_pages seems to return a maximum of 256 * The default wsize is 1M. find_get_pages seems to return a maximum of 256
* pages in a single call. With PAGE_CACHE_SIZE == 4k, this means we can fill * pages in a single call. With PAGE_SIZE == 4k, this means we can fill
* a single wsize request with a single call. * a single wsize request with a single call.
*/ */
#define CIFS_DEFAULT_IOSIZE (1024 * 1024) #define CIFS_DEFAULT_IOSIZE (1024 * 1024)
......
...@@ -1902,7 +1902,7 @@ wdata_alloc_and_fillpages(pgoff_t tofind, struct address_space *mapping, ...@@ -1902,7 +1902,7 @@ wdata_alloc_and_fillpages(pgoff_t tofind, struct address_space *mapping,
* find_get_pages_tag seems to return a max of 256 on each * find_get_pages_tag seems to return a max of 256 on each
* iteration, so we must call it several times in order to * iteration, so we must call it several times in order to
* fill the array or the wsize is effectively limited to * fill the array or the wsize is effectively limited to
* 256 * PAGE_CACHE_SIZE. * 256 * PAGE_SIZE.
*/ */
*found_pages = 0; *found_pages = 0;
pages = wdata->pages; pages = wdata->pages;
......
...@@ -86,26 +86,26 @@ Block Size ...@@ -86,26 +86,26 @@ Block Size
(Block size in cramfs refers to the size of input data that is (Block size in cramfs refers to the size of input data that is
compressed at a time. It's intended to be somewhere around compressed at a time. It's intended to be somewhere around
PAGE_CACHE_SIZE for cramfs_readpage's convenience.) PAGE_SIZE for cramfs_readpage's convenience.)
The superblock ought to indicate the block size that the fs was The superblock ought to indicate the block size that the fs was
written for, since comments in <linux/pagemap.h> indicate that written for, since comments in <linux/pagemap.h> indicate that
PAGE_CACHE_SIZE may grow in future (if I interpret the comment PAGE_SIZE may grow in future (if I interpret the comment
correctly). correctly).
Currently, mkcramfs #define's PAGE_CACHE_SIZE as 4096 and uses that Currently, mkcramfs #define's PAGE_SIZE as 4096 and uses that
for blksize, whereas Linux-2.3.39 uses its PAGE_CACHE_SIZE, which in for blksize, whereas Linux-2.3.39 uses its PAGE_SIZE, which in
turn is defined as PAGE_SIZE (which can be as large as 32KB on arm). turn is defined as PAGE_SIZE (which can be as large as 32KB on arm).
This discrepancy is a bug, though it's not clear which should be This discrepancy is a bug, though it's not clear which should be
changed. changed.
One option is to change mkcramfs to take its PAGE_CACHE_SIZE from One option is to change mkcramfs to take its PAGE_SIZE from
<asm/page.h>. Personally I don't like this option, but it does <asm/page.h>. Personally I don't like this option, but it does
require the least amount of change: just change `#define require the least amount of change: just change `#define
PAGE_CACHE_SIZE (4096)' to `#include <asm/page.h>'. The disadvantage PAGE_SIZE (4096)' to `#include <asm/page.h>'. The disadvantage
is that the generated cramfs cannot always be shared between different is that the generated cramfs cannot always be shared between different
kernels, not even necessarily kernels of the same architecture if kernels, not even necessarily kernels of the same architecture if
PAGE_CACHE_SIZE is subject to change between kernel versions PAGE_SIZE is subject to change between kernel versions
(currently possible with arm and ia64). (currently possible with arm and ia64).
The remaining options try to make cramfs more sharable. The remaining options try to make cramfs more sharable.
...@@ -126,22 +126,22 @@ size. The options are: ...@@ -126,22 +126,22 @@ size. The options are:
1. Always 4096 bytes. 1. Always 4096 bytes.
2. Writer chooses blocksize; kernel adapts but rejects blocksize > 2. Writer chooses blocksize; kernel adapts but rejects blocksize >
PAGE_CACHE_SIZE. PAGE_SIZE.
3. Writer chooses blocksize; kernel adapts even to blocksize > 3. Writer chooses blocksize; kernel adapts even to blocksize >
PAGE_CACHE_SIZE. PAGE_SIZE.
It's easy enough to change the kernel to use a smaller value than It's easy enough to change the kernel to use a smaller value than
PAGE_CACHE_SIZE: just make cramfs_readpage read multiple blocks. PAGE_SIZE: just make cramfs_readpage read multiple blocks.
The cost of option 1 is that kernels with a larger PAGE_CACHE_SIZE The cost of option 1 is that kernels with a larger PAGE_SIZE
value don't get as good compression as they can. value don't get as good compression as they can.
The cost of option 2 relative to option 1 is that the code uses The cost of option 2 relative to option 1 is that the code uses
variables instead of #define'd constants. The gain is that people variables instead of #define'd constants. The gain is that people
with kernels having larger PAGE_CACHE_SIZE can make use of that if with kernels having larger PAGE_SIZE can make use of that if
they don't mind their cramfs being inaccessible to kernels with they don't mind their cramfs being inaccessible to kernels with
smaller PAGE_CACHE_SIZE values. smaller PAGE_SIZE values.
Option 3 is easy to implement if we don't mind being CPU-inefficient: Option 3 is easy to implement if we don't mind being CPU-inefficient:
e.g. get readpage to decompress to a buffer of size MAX_BLKSIZE (which e.g. get readpage to decompress to a buffer of size MAX_BLKSIZE (which
......
...@@ -137,7 +137,7 @@ static struct inode *get_cramfs_inode(struct super_block *sb, ...@@ -137,7 +137,7 @@ static struct inode *get_cramfs_inode(struct super_block *sb,
* page cache and dentry tree anyway.. * page cache and dentry tree anyway..
* *
* This also acts as a way to guarantee contiguous areas of up to * This also acts as a way to guarantee contiguous areas of up to
* BLKS_PER_BUF*PAGE_CACHE_SIZE, so that the caller doesn't need to * BLKS_PER_BUF*PAGE_SIZE, so that the caller doesn't need to
* worry about end-of-buffer issues even when decompressing a full * worry about end-of-buffer issues even when decompressing a full
* page cache. * page cache.
*/ */
......
...@@ -1094,7 +1094,7 @@ EXPORT_SYMBOL_GPL(dax_pfn_mkwrite); ...@@ -1094,7 +1094,7 @@ EXPORT_SYMBOL_GPL(dax_pfn_mkwrite);
* you are truncating a file, the helper function dax_truncate_page() may be * you are truncating a file, the helper function dax_truncate_page() may be
* more convenient. * more convenient.
* *
* We work in terms of PAGE_CACHE_SIZE here for commonality with * We work in terms of PAGE_SIZE here for commonality with
* block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
* took care of disposing of the unnecessary blocks. Even if the filesystem * took care of disposing of the unnecessary blocks. Even if the filesystem
* block size is smaller than PAGE_SIZE, we have to zero the rest of the page * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
...@@ -1146,7 +1146,7 @@ EXPORT_SYMBOL_GPL(dax_zero_page_range); ...@@ -1146,7 +1146,7 @@ EXPORT_SYMBOL_GPL(dax_zero_page_range);
* Similar to block_truncate_page(), this function can be called by a * Similar to block_truncate_page(), this function can be called by a
* filesystem when it is truncating a DAX file to handle the partial page. * filesystem when it is truncating a DAX file to handle the partial page.
* *
* We work in terms of PAGE_CACHE_SIZE here for commonality with * We work in terms of PAGE_SIZE here for commonality with
* block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
* took care of disposing of the unnecessary blocks. Even if the filesystem * took care of disposing of the unnecessary blocks. Even if the filesystem
* block size is smaller than PAGE_SIZE, we have to zero the rest of the page * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
......
...@@ -763,8 +763,8 @@ static int truncate_upper(struct dentry *dentry, struct iattr *ia, ...@@ -763,8 +763,8 @@ static int truncate_upper(struct dentry *dentry, struct iattr *ia,
} else { /* ia->ia_size < i_size_read(inode) */ } else { /* ia->ia_size < i_size_read(inode) */
/* We're chopping off all the pages down to the page /* We're chopping off all the pages down to the page
* in which ia->ia_size is located. Fill in the end of * in which ia->ia_size is located. Fill in the end of
* that page from (ia->ia_size & ~PAGE_CACHE_MASK) to * that page from (ia->ia_size & ~PAGE_MASK) to
* PAGE_CACHE_SIZE with zeros. */ * PAGE_SIZE with zeros. */
size_t num_zeros = (PAGE_SIZE size_t num_zeros = (PAGE_SIZE
- (ia->ia_size & ~PAGE_MASK)); - (ia->ia_size & ~PAGE_MASK));
......
...@@ -37,7 +37,7 @@ static inline unsigned ext2_rec_len_from_disk(__le16 dlen) ...@@ -37,7 +37,7 @@ static inline unsigned ext2_rec_len_from_disk(__le16 dlen)
{ {
unsigned len = le16_to_cpu(dlen); unsigned len = le16_to_cpu(dlen);
#if (PAGE_CACHE_SIZE >= 65536) #if (PAGE_SIZE >= 65536)
if (len == EXT2_MAX_REC_LEN) if (len == EXT2_MAX_REC_LEN)
return 1 << 16; return 1 << 16;
#endif #endif
...@@ -46,7 +46,7 @@ static inline unsigned ext2_rec_len_from_disk(__le16 dlen) ...@@ -46,7 +46,7 @@ static inline unsigned ext2_rec_len_from_disk(__le16 dlen)
static inline __le16 ext2_rec_len_to_disk(unsigned len) static inline __le16 ext2_rec_len_to_disk(unsigned len)
{ {
#if (PAGE_CACHE_SIZE >= 65536) #if (PAGE_SIZE >= 65536)
if (len == (1 << 16)) if (len == (1 << 16))
return cpu_to_le16(EXT2_MAX_REC_LEN); return cpu_to_le16(EXT2_MAX_REC_LEN);
else else
......
...@@ -1961,7 +1961,7 @@ ext4_rec_len_from_disk(__le16 dlen, unsigned blocksize) ...@@ -1961,7 +1961,7 @@ ext4_rec_len_from_disk(__le16 dlen, unsigned blocksize)
{ {
unsigned len = le16_to_cpu(dlen); unsigned len = le16_to_cpu(dlen);
#if (PAGE_CACHE_SIZE >= 65536) #if (PAGE_SIZE >= 65536)
if (len == EXT4_MAX_REC_LEN || len == 0) if (len == EXT4_MAX_REC_LEN || len == 0)
return blocksize; return blocksize;
return (len & 65532) | ((len & 3) << 16); return (len & 65532) | ((len & 3) << 16);
...@@ -1974,7 +1974,7 @@ static inline __le16 ext4_rec_len_to_disk(unsigned len, unsigned blocksize) ...@@ -1974,7 +1974,7 @@ static inline __le16 ext4_rec_len_to_disk(unsigned len, unsigned blocksize)
{ {
if ((len > blocksize) || (blocksize > (1 << 18)) || (len & 3)) if ((len > blocksize) || (blocksize > (1 << 18)) || (len & 3))
BUG(); BUG();
#if (PAGE_CACHE_SIZE >= 65536) #if (PAGE_SIZE >= 65536)
if (len < 65536) if (len < 65536)
return cpu_to_le16(len); return cpu_to_le16(len);
if (len == blocksize) { if (len == blocksize) {
......
...@@ -4894,7 +4894,7 @@ static void ext4_wait_for_tail_page_commit(struct inode *inode) ...@@ -4894,7 +4894,7 @@ static void ext4_wait_for_tail_page_commit(struct inode *inode)
offset = inode->i_size & (PAGE_SIZE - 1); offset = inode->i_size & (PAGE_SIZE - 1);
/* /*
* All buffers in the last page remain valid? Then there's nothing to * All buffers in the last page remain valid? Then there's nothing to
* do. We do the check mainly to optimize the common PAGE_CACHE_SIZE == * do. We do the check mainly to optimize the common PAGE_SIZE ==
* blocksize case * blocksize case
*/ */
if (offset > PAGE_SIZE - (1 << inode->i_blkbits)) if (offset > PAGE_SIZE - (1 << inode->i_blkbits))
......
...@@ -119,7 +119,7 @@ MODULE_PARM_DESC(mballoc_debug, "Debugging level for ext4's mballoc"); ...@@ -119,7 +119,7 @@ MODULE_PARM_DESC(mballoc_debug, "Debugging level for ext4's mballoc");
* *
* *
* one block each for bitmap and buddy information. So for each group we * one block each for bitmap and buddy information. So for each group we
* take up 2 blocks. A page can contain blocks_per_page (PAGE_CACHE_SIZE / * take up 2 blocks. A page can contain blocks_per_page (PAGE_SIZE /
* blocksize) blocks. So it can have information regarding groups_per_page * blocksize) blocks. So it can have information regarding groups_per_page
* which is blocks_per_page/2 * which is blocks_per_page/2
* *
...@@ -807,7 +807,7 @@ static void mb_regenerate_buddy(struct ext4_buddy *e4b) ...@@ -807,7 +807,7 @@ static void mb_regenerate_buddy(struct ext4_buddy *e4b)
* *
* one block each for bitmap and buddy information. * one block each for bitmap and buddy information.
* So for each group we take up 2 blocks. A page can * So for each group we take up 2 blocks. A page can
* contain blocks_per_page (PAGE_CACHE_SIZE / blocksize) blocks. * contain blocks_per_page (PAGE_SIZE / blocksize) blocks.
* So it can have information regarding groups_per_page which * So it can have information regarding groups_per_page which
* is blocks_per_page/2 * is blocks_per_page/2
* *
......
...@@ -23,7 +23,7 @@ ...@@ -23,7 +23,7 @@
* *
* then this code just gives up and calls the buffer_head-based read function. * then this code just gives up and calls the buffer_head-based read function.
* It does handle a page which has holes at the end - that is a common case: * It does handle a page which has holes at the end - that is a common case:
* the end-of-file on blocksize < PAGE_CACHE_SIZE setups. * the end-of-file on blocksize < PAGE_SIZE setups.
* *
*/ */
......
...@@ -237,7 +237,7 @@ hugetlbfs_read_actor(struct page *page, unsigned long offset, ...@@ -237,7 +237,7 @@ hugetlbfs_read_actor(struct page *page, unsigned long offset,
/* /*
* Support for read() - Find the page attached to f_mapping and copy out the * Support for read() - Find the page attached to f_mapping and copy out the
* data. Its *very* similar to do_generic_mapping_read(), we can't use that * data. Its *very* similar to do_generic_mapping_read(), we can't use that
* since it has PAGE_CACHE_SIZE assumptions. * since it has PAGE_SIZE assumptions.
*/ */
static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to) static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
{ {
......
...@@ -331,7 +331,7 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, ...@@ -331,7 +331,7 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,