Commit 09cbfeaf authored by Kirill A. Shutemov's avatar Kirill A. Shutemov Committed by Linus Torvalds

mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros

PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.

This promise never materialized.  And unlikely will.

We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE.  And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.

Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.

Let's stop pretending that pages in page cache are special.  They are
not.

The changes are pretty straight-forward:

 - <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;

 - <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;

 - PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};

 - page_cache_get() -> get_page();

 - page_cache_release() -> put_page();

This patch contains automated changes generated with coccinelle using
script below.  For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.

The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.

There are few places in the code where coccinelle didn't reach.  I'll
fix them manually in a separate patch.  Comments and documentation also
will be addressed with the separate patch.

virtual patch

@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E

@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E

@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT

@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE

@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK

@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)

@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)

@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent c05c2ec9
...@@ -628,7 +628,7 @@ void flush_dcache_page(struct page *page) ...@@ -628,7 +628,7 @@ void flush_dcache_page(struct page *page)
/* kernel reading from page with U-mapping */ /* kernel reading from page with U-mapping */
phys_addr_t paddr = (unsigned long)page_address(page); phys_addr_t paddr = (unsigned long)page_address(page);
unsigned long vaddr = page->index << PAGE_CACHE_SHIFT; unsigned long vaddr = page->index << PAGE_SHIFT;
if (addr_not_cache_congruent(paddr, vaddr)) if (addr_not_cache_congruent(paddr, vaddr))
__flush_dcache_page(paddr, vaddr); __flush_dcache_page(paddr, vaddr);
......
...@@ -235,7 +235,7 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page) ...@@ -235,7 +235,7 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
*/ */
if (mapping && cache_is_vipt_aliasing()) if (mapping && cache_is_vipt_aliasing())
flush_pfn_alias(page_to_pfn(page), flush_pfn_alias(page_to_pfn(page),
page->index << PAGE_CACHE_SHIFT); page->index << PAGE_SHIFT);
} }
static void __flush_dcache_aliases(struct address_space *mapping, struct page *page) static void __flush_dcache_aliases(struct address_space *mapping, struct page *page)
...@@ -250,7 +250,7 @@ static void __flush_dcache_aliases(struct address_space *mapping, struct page *p ...@@ -250,7 +250,7 @@ static void __flush_dcache_aliases(struct address_space *mapping, struct page *p
* data in the current VM view associated with this page. * data in the current VM view associated with this page.
* - aliasing VIPT: we only need to find one mapping of this page. * - aliasing VIPT: we only need to find one mapping of this page.
*/ */
pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); pgoff = page->index;
flush_dcache_mmap_lock(mapping); flush_dcache_mmap_lock(mapping);
vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) { vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
......
...@@ -319,7 +319,7 @@ void flush_dcache_page(struct page *page) ...@@ -319,7 +319,7 @@ void flush_dcache_page(struct page *page)
if (!mapping) if (!mapping)
return; return;
pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); pgoff = page->index;
/* We have carefully arranged in arch_get_unmapped_area() that /* We have carefully arranged in arch_get_unmapped_area() that
* *any* mappings of a file are always congruently mapped (whether * *any* mappings of a file are always congruently mapped (whether
......
...@@ -732,8 +732,8 @@ spufs_fill_super(struct super_block *sb, void *data, int silent) ...@@ -732,8 +732,8 @@ spufs_fill_super(struct super_block *sb, void *data, int silent)
return -ENOMEM; return -ENOMEM;
sb->s_maxbytes = MAX_LFS_FILESIZE; sb->s_maxbytes = MAX_LFS_FILESIZE;
sb->s_blocksize = PAGE_CACHE_SIZE; sb->s_blocksize = PAGE_SIZE;
sb->s_blocksize_bits = PAGE_CACHE_SHIFT; sb->s_blocksize_bits = PAGE_SHIFT;
sb->s_magic = SPUFS_MAGIC; sb->s_magic = SPUFS_MAGIC;
sb->s_op = &s_ops; sb->s_op = &s_ops;
sb->s_fs_info = info; sb->s_fs_info = info;
......
...@@ -278,8 +278,8 @@ static int hypfs_fill_super(struct super_block *sb, void *data, int silent) ...@@ -278,8 +278,8 @@ static int hypfs_fill_super(struct super_block *sb, void *data, int silent)
sbi->uid = current_uid(); sbi->uid = current_uid();
sbi->gid = current_gid(); sbi->gid = current_gid();
sb->s_fs_info = sbi; sb->s_fs_info = sbi;
sb->s_blocksize = PAGE_CACHE_SIZE; sb->s_blocksize = PAGE_SIZE;
sb->s_blocksize_bits = PAGE_CACHE_SHIFT; sb->s_blocksize_bits = PAGE_SHIFT;
sb->s_magic = HYPFS_MAGIC; sb->s_magic = HYPFS_MAGIC;
sb->s_op = &hypfs_s_ops; sb->s_op = &hypfs_s_ops;
if (hypfs_parse_options(data, sb)) if (hypfs_parse_options(data, sb))
......
...@@ -1339,7 +1339,7 @@ struct bio *bio_map_user_iov(struct request_queue *q, ...@@ -1339,7 +1339,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
* release the pages we didn't map into the bio, if any * release the pages we didn't map into the bio, if any
*/ */
while (j < page_limit) while (j < page_limit)
page_cache_release(pages[j++]); put_page(pages[j++]);
} }
kfree(pages); kfree(pages);
...@@ -1365,7 +1365,7 @@ struct bio *bio_map_user_iov(struct request_queue *q, ...@@ -1365,7 +1365,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
for (j = 0; j < nr_pages; j++) { for (j = 0; j < nr_pages; j++) {
if (!pages[j]) if (!pages[j])
break; break;
page_cache_release(pages[j]); put_page(pages[j]);
} }
out: out:
kfree(pages); kfree(pages);
...@@ -1385,7 +1385,7 @@ static void __bio_unmap_user(struct bio *bio) ...@@ -1385,7 +1385,7 @@ static void __bio_unmap_user(struct bio *bio)
if (bio_data_dir(bio) == READ) if (bio_data_dir(bio) == READ)
set_page_dirty_lock(bvec->bv_page); set_page_dirty_lock(bvec->bv_page);
page_cache_release(bvec->bv_page); put_page(bvec->bv_page);
} }
bio_put(bio); bio_put(bio);
...@@ -1658,7 +1658,7 @@ void bio_check_pages_dirty(struct bio *bio) ...@@ -1658,7 +1658,7 @@ void bio_check_pages_dirty(struct bio *bio)
struct page *page = bvec->bv_page; struct page *page = bvec->bv_page;
if (PageDirty(page) || PageCompound(page)) { if (PageDirty(page) || PageCompound(page)) {
page_cache_release(page); put_page(page);
bvec->bv_page = NULL; bvec->bv_page = NULL;
} else { } else {
nr_clean_pages++; nr_clean_pages++;
......
...@@ -706,7 +706,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) ...@@ -706,7 +706,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
goto fail_id; goto fail_id;
q->backing_dev_info.ra_pages = q->backing_dev_info.ra_pages =
(VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; (VM_MAX_READAHEAD * 1024) / PAGE_SIZE;
q->backing_dev_info.capabilities = BDI_CAP_CGROUP_WRITEBACK; q->backing_dev_info.capabilities = BDI_CAP_CGROUP_WRITEBACK;
q->backing_dev_info.name = "block"; q->backing_dev_info.name = "block";
q->node = node_id; q->node = node_id;
......
...@@ -239,8 +239,8 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_secto ...@@ -239,8 +239,8 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_secto
struct queue_limits *limits = &q->limits; struct queue_limits *limits = &q->limits;
unsigned int max_sectors; unsigned int max_sectors;
if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) { if ((max_hw_sectors << 9) < PAGE_SIZE) {
max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9); max_hw_sectors = 1 << (PAGE_SHIFT - 9);
printk(KERN_INFO "%s: set to minimum %d\n", printk(KERN_INFO "%s: set to minimum %d\n",
__func__, max_hw_sectors); __func__, max_hw_sectors);
} }
...@@ -329,8 +329,8 @@ EXPORT_SYMBOL(blk_queue_max_segments); ...@@ -329,8 +329,8 @@ EXPORT_SYMBOL(blk_queue_max_segments);
**/ **/
void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size) void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
{ {
if (max_size < PAGE_CACHE_SIZE) { if (max_size < PAGE_SIZE) {
max_size = PAGE_CACHE_SIZE; max_size = PAGE_SIZE;
printk(KERN_INFO "%s: set to minimum %d\n", printk(KERN_INFO "%s: set to minimum %d\n",
__func__, max_size); __func__, max_size);
} }
...@@ -760,8 +760,8 @@ EXPORT_SYMBOL_GPL(blk_queue_dma_drain); ...@@ -760,8 +760,8 @@ EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
**/ **/
void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask) void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
{ {
if (mask < PAGE_CACHE_SIZE - 1) { if (mask < PAGE_SIZE - 1) {
mask = PAGE_CACHE_SIZE - 1; mask = PAGE_SIZE - 1;
printk(KERN_INFO "%s: set to minimum %lx\n", printk(KERN_INFO "%s: set to minimum %lx\n",
__func__, mask); __func__, mask);
} }
......
...@@ -76,7 +76,7 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count) ...@@ -76,7 +76,7 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
static ssize_t queue_ra_show(struct request_queue *q, char *page) static ssize_t queue_ra_show(struct request_queue *q, char *page)
{ {
unsigned long ra_kb = q->backing_dev_info.ra_pages << unsigned long ra_kb = q->backing_dev_info.ra_pages <<
(PAGE_CACHE_SHIFT - 10); (PAGE_SHIFT - 10);
return queue_var_show(ra_kb, (page)); return queue_var_show(ra_kb, (page));
} }
...@@ -90,7 +90,7 @@ queue_ra_store(struct request_queue *q, const char *page, size_t count) ...@@ -90,7 +90,7 @@ queue_ra_store(struct request_queue *q, const char *page, size_t count)
if (ret < 0) if (ret < 0)
return ret; return ret;
q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10); q->backing_dev_info.ra_pages = ra_kb >> (PAGE_SHIFT - 10);
return ret; return ret;
} }
...@@ -117,7 +117,7 @@ static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) ...@@ -117,7 +117,7 @@ static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
if (blk_queue_cluster(q)) if (blk_queue_cluster(q))
return queue_var_show(queue_max_segment_size(q), (page)); return queue_var_show(queue_max_segment_size(q), (page));
return queue_var_show(PAGE_CACHE_SIZE, (page)); return queue_var_show(PAGE_SIZE, (page));
} }
static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
...@@ -198,7 +198,7 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) ...@@ -198,7 +198,7 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
{ {
unsigned long max_sectors_kb, unsigned long max_sectors_kb,
max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1, max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1,
page_kb = 1 << (PAGE_CACHE_SHIFT - 10); page_kb = 1 << (PAGE_SHIFT - 10);
ssize_t ret = queue_var_store(&max_sectors_kb, page, count); ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
if (ret < 0) if (ret < 0)
......
...@@ -4075,7 +4075,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, ...@@ -4075,7 +4075,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
* idle timer unplug to continue working. * idle timer unplug to continue working.
*/ */
if (cfq_cfqq_wait_request(cfqq)) { if (cfq_cfqq_wait_request(cfqq)) {
if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE || if (blk_rq_bytes(rq) > PAGE_SIZE ||
cfqd->busy_queues > 1) { cfqd->busy_queues > 1) {
cfq_del_timer(cfqd, cfqq); cfq_del_timer(cfqd, cfqq);
cfq_clear_cfqq_wait_request(cfqq); cfq_clear_cfqq_wait_request(cfqq);
......
...@@ -710,7 +710,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg) ...@@ -710,7 +710,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
return -EINVAL; return -EINVAL;
bdi = blk_get_backing_dev_info(bdev); bdi = blk_get_backing_dev_info(bdev);
return compat_put_long(arg, return compat_put_long(arg,
(bdi->ra_pages * PAGE_CACHE_SIZE) / 512); (bdi->ra_pages * PAGE_SIZE) / 512);
case BLKROGET: /* compatible */ case BLKROGET: /* compatible */
return compat_put_int(arg, bdev_read_only(bdev) != 0); return compat_put_int(arg, bdev_read_only(bdev) != 0);
case BLKBSZGET_32: /* get the logical block size (cf. BLKSSZGET) */ case BLKBSZGET_32: /* get the logical block size (cf. BLKSSZGET) */
...@@ -729,7 +729,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg) ...@@ -729,7 +729,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
if (!capable(CAP_SYS_ADMIN)) if (!capable(CAP_SYS_ADMIN))
return -EACCES; return -EACCES;
bdi = blk_get_backing_dev_info(bdev); bdi = blk_get_backing_dev_info(bdev);
bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE; bdi->ra_pages = (arg * 512) / PAGE_SIZE;
return 0; return 0;
case BLKGETSIZE: case BLKGETSIZE:
size = i_size_read(bdev->bd_inode); size = i_size_read(bdev->bd_inode);
......
...@@ -550,7 +550,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, ...@@ -550,7 +550,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
if (!arg) if (!arg)
return -EINVAL; return -EINVAL;
bdi = blk_get_backing_dev_info(bdev); bdi = blk_get_backing_dev_info(bdev);
return put_long(arg, (bdi->ra_pages * PAGE_CACHE_SIZE) / 512); return put_long(arg, (bdi->ra_pages * PAGE_SIZE) / 512);
case BLKROGET: case BLKROGET:
return put_int(arg, bdev_read_only(bdev) != 0); return put_int(arg, bdev_read_only(bdev) != 0);
case BLKBSZGET: /* get block device soft block size (cf. BLKSSZGET) */ case BLKBSZGET: /* get block device soft block size (cf. BLKSSZGET) */
...@@ -578,7 +578,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, ...@@ -578,7 +578,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
if(!capable(CAP_SYS_ADMIN)) if(!capable(CAP_SYS_ADMIN))
return -EACCES; return -EACCES;
bdi = blk_get_backing_dev_info(bdev); bdi = blk_get_backing_dev_info(bdev);
bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE; bdi->ra_pages = (arg * 512) / PAGE_SIZE;
return 0; return 0;
case BLKBSZSET: case BLKBSZSET:
return blkdev_bszset(bdev, mode, argp); return blkdev_bszset(bdev, mode, argp);
......
...@@ -566,8 +566,8 @@ static struct page *read_pagecache_sector(struct block_device *bdev, sector_t n) ...@@ -566,8 +566,8 @@ static struct page *read_pagecache_sector(struct block_device *bdev, sector_t n)
{ {
struct address_space *mapping = bdev->bd_inode->i_mapping; struct address_space *mapping = bdev->bd_inode->i_mapping;
return read_mapping_page(mapping, (pgoff_t)(n >> (PAGE_CACHE_SHIFT-9)), return read_mapping_page(mapping, (pgoff_t)(n >> (PAGE_SHIFT-9)),
NULL); NULL);
} }
unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p) unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p)
...@@ -584,9 +584,9 @@ unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p) ...@@ -584,9 +584,9 @@ unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p)
if (PageError(page)) if (PageError(page))
goto fail; goto fail;
p->v = page; p->v = page;
return (unsigned char *)page_address(page) + ((n & ((1 << (PAGE_CACHE_SHIFT - 9)) - 1)) << 9); return (unsigned char *)page_address(page) + ((n & ((1 << (PAGE_SHIFT - 9)) - 1)) << 9);
fail: fail:
page_cache_release(page); put_page(page);
} }
p->v = NULL; p->v = NULL;
return NULL; return NULL;
......
...@@ -397,7 +397,7 @@ aoeblk_gdalloc(void *vp) ...@@ -397,7 +397,7 @@ aoeblk_gdalloc(void *vp)
WARN_ON(d->flags & DEVFL_UP); WARN_ON(d->flags & DEVFL_UP);
blk_queue_max_hw_sectors(q, BLK_DEF_MAX_SECTORS); blk_queue_max_hw_sectors(q, BLK_DEF_MAX_SECTORS);
q->backing_dev_info.name = "aoe"; q->backing_dev_info.name = "aoe";
q->backing_dev_info.ra_pages = READ_AHEAD / PAGE_CACHE_SIZE; q->backing_dev_info.ra_pages = READ_AHEAD / PAGE_SIZE;
d->bufpool = mp; d->bufpool = mp;
d->blkq = gd->queue = q; d->blkq = gd->queue = q;
q->queuedata = d; q->queuedata = d;
......
...@@ -374,7 +374,7 @@ static int brd_rw_page(struct block_device *bdev, sector_t sector, ...@@ -374,7 +374,7 @@ static int brd_rw_page(struct block_device *bdev, sector_t sector,
struct page *page, int rw) struct page *page, int rw)
{ {
struct brd_device *brd = bdev->bd_disk->private_data; struct brd_device *brd = bdev->bd_disk->private_data;
int err = brd_do_bvec(brd, page, PAGE_CACHE_SIZE, 0, rw, sector); int err = brd_do_bvec(brd, page, PAGE_SIZE, 0, rw, sector);
page_endio(page, rw & WRITE, err); page_endio(page, rw & WRITE, err);
return err; return err;
} }
......
...@@ -1178,7 +1178,7 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi ...@@ -1178,7 +1178,7 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi
blk_queue_max_hw_sectors(q, max_hw_sectors); blk_queue_max_hw_sectors(q, max_hw_sectors);
/* This is the workaround for "bio would need to, but cannot, be split" */ /* This is the workaround for "bio would need to, but cannot, be split" */
blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS); blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1); blk_queue_segment_boundary(q, PAGE_SIZE-1);
if (b) { if (b) {
struct drbd_connection *connection = first_peer_device(device)->connection; struct drbd_connection *connection = first_peer_device(device)->connection;
......
...@@ -616,7 +616,7 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm) ...@@ -616,7 +616,7 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
set_page_dirty(page); set_page_dirty(page);
mark_page_accessed(page); mark_page_accessed(page);
page_cache_release(page); put_page(page);
} }
sg_free_table(ttm->sg); sg_free_table(ttm->sg);
......
...@@ -481,7 +481,7 @@ armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach, ...@@ -481,7 +481,7 @@ armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
release: release:
for_each_sg(sgt->sgl, sg, num, i) for_each_sg(sgt->sgl, sg, num, i)
page_cache_release(sg_page(sg)); put_page(sg_page(sg));
free_table: free_table:
sg_free_table(sgt); sg_free_table(sgt);
free_sgt: free_sgt:
...@@ -502,7 +502,7 @@ static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach, ...@@ -502,7 +502,7 @@ static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
if (dobj->obj.filp) { if (dobj->obj.filp) {
struct scatterlist *sg; struct scatterlist *sg;
for_each_sg(sgt->sgl, sg, sgt->nents, i) for_each_sg(sgt->sgl, sg, sgt->nents, i)
page_cache_release(sg_page(sg)); put_page(sg_page(sg));
} }
sg_free_table(sgt); sg_free_table(sgt);
......
...@@ -534,7 +534,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj) ...@@ -534,7 +534,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj)
fail: fail:
while (i--) while (i--)
page_cache_release(pages[i]); put_page(pages[i]);
drm_free_large(pages); drm_free_large(pages);
return ERR_CAST(p); return ERR_CAST(p);
...@@ -569,7 +569,7 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, ...@@ -569,7 +569,7 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
mark_page_accessed(pages[i]); mark_page_accessed(pages[i]);
/* Undo the reference we took when populating the table */ /* Undo the reference we took when populating the table */
page_cache_release(pages[i]); put_page(pages[i]);
} }
drm_free_large(pages); drm_free_large(pages);
......
...@@ -177,7 +177,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) ...@@ -177,7 +177,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
drm_clflush_virt_range(vaddr, PAGE_SIZE); drm_clflush_virt_range(vaddr, PAGE_SIZE);
kunmap_atomic(src); kunmap_atomic(src);
page_cache_release(page); put_page(page);
vaddr += PAGE_SIZE; vaddr += PAGE_SIZE;
} }
...@@ -243,7 +243,7 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj) ...@@ -243,7 +243,7 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
set_page_dirty(page); set_page_dirty(page);
if (obj->madv == I915_MADV_WILLNEED) if (obj->madv == I915_MADV_WILLNEED)
mark_page_accessed(page); mark_page_accessed(page);
page_cache_release(page); put_page(page);
vaddr += PAGE_SIZE; vaddr += PAGE_SIZE;
} }
obj->dirty = 0; obj->dirty = 0;
...@@ -2206,7 +2206,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) ...@@ -2206,7 +2206,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
if (obj->madv == I915_MADV_WILLNEED) if (obj->madv == I915_MADV_WILLNEED)
mark_page_accessed(page); mark_page_accessed(page);
page_cache_release(page); put_page(page);
} }
obj->dirty = 0; obj->dirty = 0;
...@@ -2346,7 +2346,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) ...@@ -2346,7 +2346,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
err_pages: err_pages:
sg_mark_end(sg); sg_mark_end(sg);
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
page_cache_release(sg_page_iter_page(&sg_iter)); put_page(sg_page_iter_page(&sg_iter));
sg_free_table(st); sg_free_table(st);
kfree(st); kfree(st);
......
...@@ -683,7 +683,7 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj) ...@@ -683,7 +683,7 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
set_page_dirty(page); set_page_dirty(page);
mark_page_accessed(page); mark_page_accessed(page);
page_cache_release(page); put_page(page);
} }
obj->dirty = 0; obj->dirty = 0;
......
...@@ -609,7 +609,7 @@ static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm) ...@@ -609,7 +609,7 @@ static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
set_page_dirty(page); set_page_dirty(page);
mark_page_accessed(page); mark_page_accessed(page);
page_cache_release(page); put_page(page);
} }
sg_free_table(ttm->sg); sg_free_table(ttm->sg);
......
...@@ -311,7 +311,7 @@ int ttm_tt_swapin(struct ttm_tt *ttm) ...@@ -311,7 +311,7 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
goto out_err; goto out_err;
copy_highpage(to_page, from_page); copy_highpage(to_page, from_page);
page_cache_release(from_page); put_page(from_page);
} }
if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP)) if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP))
...@@ -361,7 +361,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage) ...@@ -361,7 +361,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
copy_highpage(to_page, from_page); copy_highpage(to_page, from_page);
set_page_dirty(to_page);