Commit 11bac800 authored by Dave Jiang's avatar Dave Jiang Committed by Linus Torvalds

mm, fs: reduce fault, page_mkwrite, and pfn_mkwrite to take only vmf

->fault(), ->page_mkwrite(), and ->pfn_mkwrite() calls do not need to
take a vma and vmf parameter when the vma already resides in vmf.

Remove the vma parameter to simplify things.

[arnd@arndb.de: fix ARM build]
  Link: http://lkml.kernel.org/r/20170125223558.1451224-1-arnd@arndb.de
Link: http://lkml.kernel.org/r/148521301778.19116.10840599906674778980.stgit@djiang5-desk3.ch.intel.comSigned-off-by: 's avatarDave Jiang <dave.jiang@intel.com>
Signed-off-by: 's avatarArnd Bergmann <arnd@arndb.de>
Reviewed-by: 's avatarRoss Zwisler <ross.zwisler@linux.intel.com>
Cc: Theodore Ts'o <tytso@mit.edu>
Cc: Darrick J. Wong <darrick.wong@oracle.com>
Cc: Matthew Wilcox <mawilcox@microsoft.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Jan Kara <jack@suse.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: 's avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: 's avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 374ad05a
......@@ -102,9 +102,9 @@ static void release_spapr_tce_table(struct rcu_head *head)
kfree(stt);
}
static int kvm_spapr_tce_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
static int kvm_spapr_tce_fault(struct vm_fault *vmf)
{
struct kvmppc_spapr_tce_table *stt = vma->vm_file->private_data;
struct kvmppc_spapr_tce_table *stt = vmf->vma->vm_file->private_data;
struct page *page;
if (vmf->pgoff >= kvmppc_tce_pages(stt->size))
......
......@@ -233,8 +233,9 @@ spufs_mem_write(struct file *file, const char __user *buffer,
}
static int
spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
spufs_mem_mmap_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct spu_context *ctx = vma->vm_file->private_data;
unsigned long pfn, offset;
......@@ -311,12 +312,11 @@ static const struct file_operations spufs_mem_fops = {
.mmap = spufs_mem_mmap,
};
static int spufs_ps_fault(struct vm_area_struct *vma,
struct vm_fault *vmf,
static int spufs_ps_fault(struct vm_fault *vmf,
unsigned long ps_offs,
unsigned long ps_size)
{
struct spu_context *ctx = vma->vm_file->private_data;
struct spu_context *ctx = vmf->vma->vm_file->private_data;
unsigned long area, offset = vmf->pgoff << PAGE_SHIFT;
int ret = 0;
......@@ -354,7 +354,7 @@ static int spufs_ps_fault(struct vm_area_struct *vma,
down_read(&current->mm->mmap_sem);
} else {
area = ctx->spu->problem_phys + ps_offs;
vm_insert_pfn(vma, vmf->address, (area + offset) >> PAGE_SHIFT);
vm_insert_pfn(vmf->vma, vmf->address, (area + offset) >> PAGE_SHIFT);
spu_context_trace(spufs_ps_fault__insert, ctx, ctx->spu);
}
......@@ -367,10 +367,9 @@ static int spufs_ps_fault(struct vm_area_struct *vma,
}
#if SPUFS_MMAP_4K
static int spufs_cntl_mmap_fault(struct vm_area_struct *vma,
struct vm_fault *vmf)
static int spufs_cntl_mmap_fault(struct vm_fault *vmf)
{
return spufs_ps_fault(vma, vmf, 0x4000, SPUFS_CNTL_MAP_SIZE);
return spufs_ps_fault(vmf, 0x4000, SPUFS_CNTL_MAP_SIZE);
}
static const struct vm_operations_struct spufs_cntl_mmap_vmops = {
......@@ -1067,15 +1066,15 @@ static ssize_t spufs_signal1_write(struct file *file, const char __user *buf,
}
static int
spufs_signal1_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
spufs_signal1_mmap_fault(struct vm_fault *vmf)
{
#if SPUFS_SIGNAL_MAP_SIZE == 0x1000
return spufs_ps_fault(vma, vmf, 0x14000, SPUFS_SIGNAL_MAP_SIZE);
return spufs_ps_fault(vmf, 0x14000, SPUFS_SIGNAL_MAP_SIZE);
#elif SPUFS_SIGNAL_MAP_SIZE == 0x10000
/* For 64k pages, both signal1 and signal2 can be used to mmap the whole
* signal 1 and 2 area
*/
return spufs_ps_fault(vma, vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE);
return spufs_ps_fault(vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE);
#else
#error unsupported page size
#endif
......@@ -1205,15 +1204,15 @@ static ssize_t spufs_signal2_write(struct file *file, const char __user *buf,
#if SPUFS_MMAP_4K
static int
spufs_signal2_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
spufs_signal2_mmap_fault(struct vm_fault *vmf)
{
#if SPUFS_SIGNAL_MAP_SIZE == 0x1000
return spufs_ps_fault(vma, vmf, 0x1c000, SPUFS_SIGNAL_MAP_SIZE);
return spufs_ps_fault(vmf, 0x1c000, SPUFS_SIGNAL_MAP_SIZE);
#elif SPUFS_SIGNAL_MAP_SIZE == 0x10000
/* For 64k pages, both signal1 and signal2 can be used to mmap the whole
* signal 1 and 2 area
*/
return spufs_ps_fault(vma, vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE);
return spufs_ps_fault(vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE);
#else
#error unsupported page size
#endif
......@@ -1334,9 +1333,9 @@ DEFINE_SPUFS_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get,
#if SPUFS_MMAP_4K
static int
spufs_mss_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
spufs_mss_mmap_fault(struct vm_fault *vmf)
{
return spufs_ps_fault(vma, vmf, 0x0000, SPUFS_MSS_MAP_SIZE);
return spufs_ps_fault(vmf, 0x0000, SPUFS_MSS_MAP_SIZE);
}
static const struct vm_operations_struct spufs_mss_mmap_vmops = {
......@@ -1396,9 +1395,9 @@ static const struct file_operations spufs_mss_fops = {
};
static int
spufs_psmap_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
spufs_psmap_mmap_fault(struct vm_fault *vmf)
{
return spufs_ps_fault(vma, vmf, 0x0000, SPUFS_PS_MAP_SIZE);
return spufs_ps_fault(vmf, 0x0000, SPUFS_PS_MAP_SIZE);
}
static const struct vm_operations_struct spufs_psmap_mmap_vmops = {
......@@ -1456,9 +1455,9 @@ static const struct file_operations spufs_psmap_fops = {
#if SPUFS_MMAP_4K
static int
spufs_mfc_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
spufs_mfc_mmap_fault(struct vm_fault *vmf)
{
return spufs_ps_fault(vma, vmf, 0x3000, SPUFS_MFC_MAP_SIZE);
return spufs_ps_fault(vmf, 0x3000, SPUFS_MFC_MAP_SIZE);
}
static const struct vm_operations_struct spufs_mfc_mmap_vmops = {
......
......@@ -3342,7 +3342,7 @@ static void binder_vma_close(struct vm_area_struct *vma)
binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
}
static int binder_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
static int binder_vm_fault(struct vm_fault *vmf)
{
return VM_FAULT_SIGBUS;
}
......
......@@ -11,15 +11,14 @@
#include "agp.h"
static int alpha_core_agp_vm_fault(struct vm_area_struct *vma,
struct vm_fault *vmf)
static int alpha_core_agp_vm_fault(struct vm_fault *vmf)
{
alpha_agp_info *agp = agp_bridge->dev_private_data;
dma_addr_t dma_addr;
unsigned long pa;
struct page *page;
dma_addr = vmf->address - vma->vm_start + agp->aperture.bus_base;
dma_addr = vmf->address - vmf->vma->vm_start + agp->aperture.bus_base;
pa = agp->ops->translate(agp, dma_addr);
if (pa == (unsigned long)-EINVAL)
......
......@@ -191,12 +191,12 @@ mspec_close(struct vm_area_struct *vma)
* Creates a mspec page and maps it to user space.
*/
static int
mspec_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
mspec_fault(struct vm_fault *vmf)
{
unsigned long paddr, maddr;
unsigned long pfn;
pgoff_t index = vmf->pgoff;
struct vma_data *vdata = vma->vm_private_data;
struct vma_data *vdata = vmf->vma->vm_private_data;
maddr = (volatile unsigned long) vdata->maddr[index];
if (maddr == 0) {
......@@ -227,7 +227,7 @@ mspec_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
* be because another thread has installed the pte first, so it
* is no problem.
*/
vm_insert_pfn(vma, vmf->address, pfn);
vm_insert_pfn(vmf->vma, vmf->address, pfn);
return VM_FAULT_NOPAGE;
}
......
......@@ -419,8 +419,7 @@ static phys_addr_t pgoff_to_phys(struct dax_dev *dax_dev, pgoff_t pgoff,
return -1;
}
static int __dax_dev_fault(struct dax_dev *dax_dev, struct vm_area_struct *vma,
struct vm_fault *vmf)
static int __dax_dev_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
{
struct device *dev = &dax_dev->dev;
struct dax_region *dax_region;
......@@ -428,7 +427,7 @@ static int __dax_dev_fault(struct dax_dev *dax_dev, struct vm_area_struct *vma,
phys_addr_t phys;
pfn_t pfn;
if (check_vma(dax_dev, vma, __func__))
if (check_vma(dax_dev, vmf->vma, __func__))
return VM_FAULT_SIGBUS;
dax_region = dax_dev->region;
......@@ -446,7 +445,7 @@ static int __dax_dev_fault(struct dax_dev *dax_dev, struct vm_area_struct *vma,
pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
rc = vm_insert_mixed(vma, vmf->address, pfn);
rc = vm_insert_mixed(vmf->vma, vmf->address, pfn);
if (rc == -ENOMEM)
return VM_FAULT_OOM;
......@@ -456,8 +455,9 @@ static int __dax_dev_fault(struct dax_dev *dax_dev, struct vm_area_struct *vma,
return VM_FAULT_NOPAGE;
}
static int dax_dev_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
static int dax_dev_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
int rc;
struct file *filp = vma->vm_file;
struct dax_dev *dax_dev = filp->private_data;
......@@ -466,7 +466,7 @@ static int dax_dev_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
current->comm, (vmf->flags & FAULT_FLAG_WRITE)
? "write" : "read", vma->vm_start, vma->vm_end);
rcu_read_lock();
rc = __dax_dev_fault(dax_dev, vma, vmf);
rc = __dax_dev_fault(dax_dev, vmf);
rcu_read_unlock();
return rc;
......
......@@ -14,14 +14,15 @@
#include <drm/armada_drm.h>
#include "armada_ioctlP.h"
static int armada_gem_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
static int armada_gem_vm_fault(struct vm_fault *vmf)
{
struct armada_gem_object *obj = drm_to_armada_gem(vma->vm_private_data);
struct drm_gem_object *gobj = vmf->vma->vm_private_data;
struct armada_gem_object *obj = drm_to_armada_gem(gobj);
unsigned long pfn = obj->phys_addr >> PAGE_SHIFT;
int ret;
pfn += (vmf->address - vma->vm_start) >> PAGE_SHIFT;
ret = vm_insert_pfn(vma, vmf->address, pfn);
pfn += (vmf->address - vmf->vma->vm_start) >> PAGE_SHIFT;
ret = vm_insert_pfn(vmf->vma, vmf->address, pfn);
switch (ret) {
case 0:
......
......@@ -96,8 +96,9 @@ static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
* map, get the page, increment the use count and return it.
*/
#if IS_ENABLED(CONFIG_AGP)
static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
static int drm_do_vm_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_file *priv = vma->vm_file->private_data;
struct drm_device *dev = priv->minor->dev;
struct drm_local_map *map = NULL;
......@@ -168,7 +169,7 @@ static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
return VM_FAULT_SIGBUS; /* Disallow mremap */
}
#else
static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
static int drm_do_vm_fault(struct vm_fault *vmf)
{
return VM_FAULT_SIGBUS;
}
......@@ -184,8 +185,9 @@ static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
* Get the mapping, find the real physical page to map, get the page, and
* return it.
*/
static int drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
static int drm_do_vm_shm_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_local_map *map = vma->vm_private_data;
unsigned long offset;
unsigned long i;
......@@ -280,14 +282,14 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
/**
* \c fault method for DMA virtual memory.
*
* \param vma virtual memory area.
* \param address access address.
* \return pointer to the page structure.
*
* Determine the page number from the page offset and get it from drm_device_dma::pagelist.
*/
static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
static int drm_do_vm_dma_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_file *priv = vma->vm_file->private_data;
struct drm_device *dev = priv->minor->dev;
struct drm_device_dma *dma = dev->dma;
......@@ -315,14 +317,14 @@ static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
/**
* \c fault method for scatter-gather virtual memory.
*
* \param vma virtual memory area.
* \param address access address.
* \return pointer to the page structure.
*
* Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
*/
static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
static int drm_do_vm_sg_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_local_map *map = vma->vm_private_data;
struct drm_file *priv = vma->vm_file->private_data;
struct drm_device *dev = priv->minor->dev;
......@@ -347,24 +349,24 @@ static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
return 0;
}
static int drm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
static int drm_vm_fault(struct vm_fault *vmf)
{
return drm_do_vm_fault(vma, vmf);
return drm_do_vm_fault(vmf);
}
static int drm_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
static int drm_vm_shm_fault(struct vm_fault *vmf)
{
return drm_do_vm_shm_fault(vma, vmf);
return drm_do_vm_shm_fault(vmf);
}
static int drm_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
static int drm_vm_dma_fault(struct vm_fault *vmf)
{
return drm_do_vm_dma_fault(vma, vmf);
return drm_do_vm_dma_fault(vmf);
}
static int drm_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
static int drm_vm_sg_fault(struct vm_fault *vmf)
{
return drm_do_vm_sg_fault(vma, vmf);
return drm_do_vm_sg_fault(vmf);
}
/** AGP virtual memory operations */
......
......@@ -73,7 +73,7 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_file *file);
int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma);
int etnaviv_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int etnaviv_gem_fault(struct vm_fault *vmf);
int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset);
struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj);
void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj);
......
......@@ -175,8 +175,9 @@ int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma)
return obj->ops->mmap(obj, vma);
}
int etnaviv_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
int etnaviv_gem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj = vma->vm_private_data;
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
struct page **pages, *page;
......
......@@ -447,8 +447,9 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
return ret;
}
int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
int exynos_drm_gem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj = vma->vm_private_data;
struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
unsigned long pfn;
......
......@@ -116,7 +116,7 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
uint64_t *offset);
/* page fault handler and mmap fault address(virtual) to physical memory. */
int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int exynos_drm_gem_fault(struct vm_fault *vmf);
/* set vm_flags and we can change the vm attribute to other one at here. */
int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
......
......@@ -111,8 +111,9 @@ static int psbfb_pan(struct fb_var_screeninfo *var, struct fb_info *info)
return 0;
}
static int psbfb_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
static int psbfb_vm_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct psb_framebuffer *psbfb = vma->vm_private_data;
struct drm_device *dev = psbfb->base.dev;
struct drm_psb_private *dev_priv = dev->dev_private;
......
......@@ -164,8 +164,9 @@ int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
* vma->vm_private_data points to the GEM object that is backing this
* mapping.
*/
int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
int psb_gem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj;
struct gtt_range *r;
int ret;
......
......@@ -752,7 +752,7 @@ extern int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args);
extern int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
uint32_t handle, uint64_t *offset);
extern int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
extern int psb_gem_fault(struct vm_fault *vmf);
/* psb_device.c */
extern const struct psb_ops psb_chip_ops;
......
......@@ -3352,7 +3352,7 @@ int __must_check i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
unsigned int flags);
int __must_check i915_gem_suspend(struct drm_i915_private *dev_priv);
void i915_gem_resume(struct drm_i915_private *dev_priv);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int i915_gem_fault(struct vm_fault *vmf);
int i915_gem_object_wait(struct drm_i915_gem_object *obj,
unsigned int flags,
long timeout,
......
......@@ -1772,7 +1772,6 @@ compute_partial_view(struct drm_i915_gem_object *obj,
/**
* i915_gem_fault - fault a page into the GTT
* @area: CPU VMA in question
* @vmf: fault info
*
* The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
......@@ -1789,9 +1788,10 @@ compute_partial_view(struct drm_i915_gem_object *obj,
* The current feature set supported by i915_gem_fault() and thus GTT mmaps
* is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version).
*/
int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
int i915_gem_fault(struct vm_fault *vmf)
{
#define MIN_CHUNK_PAGES ((1 << 20) >> PAGE_SHIFT) /* 1 MiB */
struct vm_area_struct *area = vmf->vma;
struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data);
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
......
......@@ -206,7 +206,7 @@ void msm_gem_shrinker_cleanup(struct drm_device *dev);
int msm_gem_mmap_obj(struct drm_gem_object *obj,
struct vm_area_struct *vma);
int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int msm_gem_fault(struct vm_fault *vmf);
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
uint64_t *iova);
......
......@@ -191,8 +191,9 @@ int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
return msm_gem_mmap_obj(vma->vm_private_data, vma);
}
int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
int msm_gem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj = vma->vm_private_data;
struct drm_device *dev = obj->dev;
struct msm_drm_private *priv = dev->dev_private;
......
......@@ -188,7 +188,7 @@ int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma);
int omap_gem_mmap_obj(struct drm_gem_object *obj,
struct vm_area_struct *vma);
int omap_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int omap_gem_fault(struct vm_fault *vmf);
int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op);
int omap_gem_op_finish(struct drm_gem_object *obj, enum omap_gem_op op);
int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op);
......
......@@ -518,7 +518,6 @@ static int fault_2d(struct drm_gem_object *obj,
/**
* omap_gem_fault - pagefault handler for GEM objects
* @vma: the VMA of the GEM object
* @vmf: fault detail
*
* Invoked when a fault occurs on an mmap of a GEM managed area. GEM
......@@ -529,8 +528,9 @@ static int fault_2d(struct drm_gem_object *obj,
* vma->vm_private_data points to the GEM object that is backing this
* mapping.
*/
int omap_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
int omap_gem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj = vma->vm_private_data;
struct omap_gem_object *omap_obj = to_omap_bo(obj);
struct drm_device *dev = obj->dev;
......
......@@ -105,15 +105,15 @@ static void qxl_ttm_global_fini(struct qxl_device *qdev)
static struct vm_operations_struct qxl_ttm_vm_ops;
static const struct vm_operations_struct *ttm_vm_ops;
static int qxl_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
static int qxl_ttm_fault(struct vm_fault *vmf)
{
struct ttm_buffer_object *bo;
int r;
bo = (struct ttm_buffer_object *)vma->vm_private_data;
bo = (struct ttm_buffer_object *)vmf->vma->vm_private_data;
if (bo == NULL)
return VM_FAULT_NOPAGE;
r = ttm_vm_ops->fault(vma, vmf);
r = ttm_vm_ops->fault(vmf);
return r;
}
......
......@@ -979,19 +979,19 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
static struct vm_operations_struct radeon_ttm_vm_ops;
static const struct vm_operations_struct *ttm_vm_ops = NULL;
static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
static int radeon_ttm_fault(struct vm_fault *vmf)
{
struct ttm_buffer_object *bo;
struct radeon_device *rdev;
int r;
bo = (struct ttm_buffer_object *)vma->vm_private_data;
bo = (struct ttm_buffer_object *)vmf->vma->vm_private_data;
if (bo == NULL) {
return VM_FAULT_NOPAGE;
}
rdev = radeon_get_rdev(bo->bdev);
down_read(&rdev->pm.mclk_lock);
r = ttm_vm_ops->fault(vma, vmf);
r = ttm_vm_ops->fault(vmf);
up_read(&rdev->pm.mclk_lock);
return r;
}
......
......@@ -441,8 +441,9 @@ int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
return 0;
}
static int tegra_bo_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
static int tegra_bo_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *gem = vma->vm_private_data;
struct tegra_bo *bo = to_tegra_bo(gem);
struct page *page;
......
......@@ -43,7 +43,6 @@
#define TTM_BO_VM_NUM_PREFAULT 16
static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
struct vm_area_struct *vma,
struct vm_fault *vmf)
{
int ret = 0;
......@@ -67,7 +66,7 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
goto out_unlock;
ttm_bo_reference(bo);
up_read(&vma->vm_mm->mmap_sem);
up_read(&vmf->vma->vm_mm->mmap_sem);
(void) dma_fence_wait(bo->moving, true);
ttm_bo_unreserve(bo);
ttm_bo_unref(&bo);
......@@ -92,8 +91,9 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
return ret;
}
static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
static int ttm_bo_vm_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
vma->vm_private_data;
struct ttm_bo_device *bdev = bo->bdev;
......@@ -124,7 +124,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
ttm_bo_reference(bo);
up_read(&vma->vm_mm->mmap_sem);
up_read(&vmf->vma->vm_mm->mmap_sem);
(void) ttm_bo_wait_unreserved(bo);
ttm_bo_unref(&bo);
}
......@@ -168,7 +168,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
* Wait for buffer data in transit, due to a pipelined
* move.
*/
ret = ttm_bo_vm_fault_idle(bo, vma, vmf);
ret = ttm_bo_vm_fault_idle(bo, vmf);
if (unlikely(ret != 0)) {
retval = ret;
......
......@@ -134,7 +134,7 @@ void udl_gem_put_pages(struct udl_gem_object *obj);
int udl_gem_vmap(struct udl_gem_object *obj);
void udl_gem_vunmap(struct udl_gem_object *obj);
int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int udl_gem_fault(struct vm_fault *vmf);
int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
int width, int height);
......
......@@ -100,8 +100,9 @@ int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
return ret;
}
int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
int udl_gem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct udl_gem_object *obj = to_udl_bo(vma->vm_private_data);
struct page *page;
unsigned int page_offset;
......
......@@ -50,8 +50,9 @@ static void vgem_gem_free_object(struct drm_gem_object *obj)
kfree(vgem_obj);
}
static int vgem_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
static int vgem_gem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_vgem_gem_object *obj = vma->vm_private_data;
/* We don't use vmf->pgoff since that has the fake offset */
unsigned long vaddr = vmf->address;
......
......@@ -114,18 +114,17 @@ static void virtio_gpu_ttm_global_fini(struct virtio_gpu_device *vgdev)
static struct vm_operations_struct virtio_gpu_ttm_vm_ops;
static const struct vm_operations_struct *ttm_vm_ops;
static int virtio_gpu_ttm_fault(struct vm_area_struct *vma,
struct vm_fault *vmf)
static int virtio_gpu_ttm_fault(struct vm_fault *vmf)
{
struct ttm_buffer_object *bo;
struct virtio_gpu_device *vgdev;
int r;
bo = (struct ttm_buffer_object *)vma->vm_private_data;
bo = (struct ttm_buffer_object *)vmf->vma->vm_private_data;
if (bo == NULL)
return VM_FAULT_NOPAGE;
vgdev = virtio_gpu_get_vgdev(bo->bdev);
r = ttm_vm_ops->fault(vma, vmf);
r = ttm_vm_ops->fault(vmf);
return r;
}
#endif
......
......@@ -1098,9 +1098,9 @@ static void cs_hsi_stop(struct cs_hsi_iface *hi)
kfree(hi);
}
static int cs_char_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
static int cs_char_vma_fault(struct vm_fault *vmf)
{
struct cs_char *csdata = vma->vm_private_data;
struct cs_char *csdata = vmf->vma->vm_private_data;
struct page *page;
page = virt_to_page(csdata->mmap_base);
......
......@@ -1188,9 +1188,9 @@ static void msc_mmap_close(struct vm_area_struct *vma)
mutex_unlock(&msc->buf_mutex);
}