Commit 79d79c32 authored by Stephen Rothwell's avatar Stephen Rothwell

Merge remote-tracking branch 'drm-intel/for-linux-next'

parents 0557c732 1b4bd5c4
......@@ -15,6 +15,7 @@
#include <linux/export.h>
#include <linux/acpi.h>
#include <linux/mfd/intel_soc_pmic.h>
#include <linux/regmap.h>
#include <acpi/acpi_lpat.h>
#include "intel_pmic.h"
......@@ -36,6 +37,8 @@ struct intel_pmic_opregion {
struct intel_pmic_regs_handler_ctx ctx;
};
static struct intel_pmic_opregion *intel_pmic_opregion;
static int pmic_get_reg_bit(int address, struct pmic_table *table,
int count, int *reg, int *bit)
{
......@@ -304,6 +307,7 @@ int intel_pmic_install_opregion_handler(struct device *dev, acpi_handle handle,
}
opregion->data = d;
intel_pmic_opregion = opregion;
return 0;
out_remove_thermal_handler:
......@@ -319,3 +323,60 @@ int intel_pmic_install_opregion_handler(struct device *dev, acpi_handle handle,
return ret;
}
EXPORT_SYMBOL_GPL(intel_pmic_install_opregion_handler);
/**
* intel_soc_pmic_exec_mipi_pmic_seq_element - Execute PMIC MIPI sequence
* @i2c_address: I2C client address for the PMIC
* @reg_address: PMIC register address
* @value: New value for the register bits to change
* @mask: Mask indicating which register bits to change
*
* DSI LCD panels describe an initialization sequence in the i915 VBT (Video
* BIOS Tables) using so called MIPI sequences. One possible element in these
* sequences is a PMIC specific element of 15 bytes.
*
* This function executes these PMIC specific elements sending the embedded
* commands to the PMIC.
*
* Return 0 on success, < 0 on failure.
*/
int intel_soc_pmic_exec_mipi_pmic_seq_element(u16 i2c_address, u32 reg_address,
u32 value, u32 mask)
{
struct intel_pmic_opregion_data *d;
int ret;
if (!intel_pmic_opregion) {
pr_warn("%s: No PMIC registered\n", __func__);
return -ENXIO;
}
d = intel_pmic_opregion->data;
mutex_lock(&intel_pmic_opregion->lock);
if (d->exec_mipi_pmic_seq_element) {
ret = d->exec_mipi_pmic_seq_element(intel_pmic_opregion->regmap,
i2c_address, reg_address,
value, mask);
} else if (d->pmic_i2c_address) {
if (i2c_address == d->pmic_i2c_address) {
ret = regmap_update_bits(intel_pmic_opregion->regmap,
reg_address, mask, value);
} else {
pr_err("%s: Unexpected i2c-addr: 0x%02x (reg-addr 0x%x value 0x%x mask 0x%x)\n",
__func__, i2c_address, reg_address, value, mask);
ret = -ENXIO;
}
} else {
pr_warn("%s: Not implemented\n", __func__);
pr_warn("%s: i2c-addr: 0x%x reg-addr 0x%x value 0x%x mask 0x%x\n",
__func__, i2c_address, reg_address, value, mask);
ret = -EOPNOTSUPP;
}
mutex_unlock(&intel_pmic_opregion->lock);
return ret;
}
EXPORT_SYMBOL_GPL(intel_soc_pmic_exec_mipi_pmic_seq_element);
......@@ -15,10 +15,14 @@ struct intel_pmic_opregion_data {
int (*update_aux)(struct regmap *r, int reg, int raw_temp);
int (*get_policy)(struct regmap *r, int reg, int bit, u64 *value);
int (*update_policy)(struct regmap *r, int reg, int bit, int enable);
int (*exec_mipi_pmic_seq_element)(struct regmap *r, u16 i2c_address,
u32 reg_address, u32 value, u32 mask);
struct pmic_table *power_table;
int power_table_count;
struct pmic_table *thermal_table;
int thermal_table_count;
/* For generic exec_mipi_pmic_seq_element handling */
int pmic_i2c_address;
};
int intel_pmic_install_opregion_handler(struct device *dev, acpi_handle handle, struct regmap *regmap, struct intel_pmic_opregion_data *d);
......
......@@ -231,6 +231,24 @@ static int intel_cht_wc_pmic_update_power(struct regmap *regmap, int reg,
return regmap_update_bits(regmap, reg, bitmask, on ? 1 : 0);
}
static int intel_cht_wc_exec_mipi_pmic_seq_element(struct regmap *regmap,
u16 i2c_client_address,
u32 reg_address,
u32 value, u32 mask)
{
u32 address;
if (i2c_client_address > 0xff || reg_address > 0xff) {
pr_warn("%s warning addresses too big client 0x%x reg 0x%x\n",
__func__, i2c_client_address, reg_address);
return -ERANGE;
}
address = (i2c_client_address << 8) | reg_address;
return regmap_update_bits(regmap, address, mask, value);
}
/*
* The thermal table and ops are empty, we do not support the Thermal opregion
* (DPTF) due to lacking documentation.
......@@ -238,6 +256,7 @@ static int intel_cht_wc_pmic_update_power(struct regmap *regmap, int reg,
static struct intel_pmic_opregion_data intel_cht_wc_pmic_opregion_data = {
.get_power = intel_cht_wc_pmic_get_power,
.update_power = intel_cht_wc_pmic_update_power,
.exec_mipi_pmic_seq_element = intel_cht_wc_exec_mipi_pmic_seq_element,
.power_table = power_table,
.power_table_count = ARRAY_SIZE(power_table),
};
......
......@@ -265,6 +265,7 @@ static struct intel_pmic_opregion_data intel_xpower_pmic_opregion_data = {
.power_table_count = ARRAY_SIZE(power_table),
.thermal_table = thermal_table,
.thermal_table_count = ARRAY_SIZE(thermal_table),
.pmic_i2c_address = 0x34,
};
static acpi_status intel_xpower_pmic_gpio_handler(u32 function,
......
......@@ -21,11 +21,11 @@ config DRM_I915_DEBUG
select DEBUG_FS
select PREEMPT_COUNT
select I2C_CHARDEV
select STACKDEPOT
select DRM_DP_AUX_CHARDEV
select X86_MSR # used by igt/pm_rpm
select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks)
select DRM_DEBUG_MM if DRM=y
select STACKDEPOT if DRM=y # for DRM_DEBUG_MM
select DRM_DEBUG_SELFTEST
select SW_SYNC # signaling validation framework (igt/syncobj*)
select DRM_I915_SW_FENCE_DEBUG_OBJECTS
......@@ -173,6 +173,7 @@ config DRM_I915_DEBUG_RUNTIME_PM
bool "Enable extra state checking for runtime PM"
depends on DRM_I915
default n
select STACKDEPOT
help
Choose this option to turn on extra state checking for the
runtime PM functionality. This may introduce overhead during
......
......@@ -40,9 +40,10 @@ i915-y := i915_drv.o \
i915_mm.o \
i915_params.o \
i915_pci.o \
i915_suspend.o \
i915_syncmap.o \
i915_reset.o \
i915_suspend.o \
i915_sw_fence.o \
i915_syncmap.o \
i915_sysfs.o \
intel_csr.o \
intel_device_info.o \
......
......@@ -24,7 +24,6 @@
#define _INTEL_DVO_H
#include <linux/i2c.h>
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include "intel_drv.h"
......
......@@ -180,7 +180,7 @@ static void free_vgpu_fence(struct intel_vgpu *vgpu)
}
mutex_unlock(&dev_priv->drm.struct_mutex);
intel_runtime_pm_put(dev_priv);
intel_runtime_pm_put_unchecked(dev_priv);
}
static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
......@@ -206,7 +206,7 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
_clear_vgpu_fence(vgpu);
mutex_unlock(&dev_priv->drm.struct_mutex);
intel_runtime_pm_put(dev_priv);
intel_runtime_pm_put_unchecked(dev_priv);
return 0;
out_free_fence:
gvt_vgpu_err("Failed to alloc fences\n");
......@@ -219,7 +219,7 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
vgpu->fence.regs[i] = NULL;
}
mutex_unlock(&dev_priv->drm.struct_mutex);
intel_runtime_pm_put(dev_priv);
intel_runtime_pm_put_unchecked(dev_priv);
return -ENOSPC;
}
......@@ -317,7 +317,7 @@ void intel_vgpu_reset_resource(struct intel_vgpu *vgpu)
intel_runtime_pm_get(dev_priv);
_clear_vgpu_fence(vgpu);
intel_runtime_pm_put(dev_priv);
intel_runtime_pm_put_unchecked(dev_priv);
}
/**
......
......@@ -597,7 +597,7 @@ static inline void mmio_hw_access_pre(struct drm_i915_private *dev_priv)
static inline void mmio_hw_access_post(struct drm_i915_private *dev_priv)
{
intel_runtime_pm_put(dev_priv);
intel_runtime_pm_put_unchecked(dev_priv);
}
/**
......
......@@ -474,6 +474,6 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
}
}
spin_unlock_bh(&scheduler->mmio_context_lock);
intel_runtime_pm_put(dev_priv);
intel_runtime_pm_put_unchecked(dev_priv);
mutex_unlock(&vgpu->gvt->sched_lock);
}
......@@ -1017,7 +1017,7 @@ static int workload_thread(void *priv)
intel_uncore_forcewake_put(gvt->dev_priv,
FORCEWAKE_ALL);
intel_runtime_pm_put(gvt->dev_priv);
intel_runtime_pm_put_unchecked(gvt->dev_priv);
if (ret && (vgpu_is_vm_unhealthy(ret)))
enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
}
......@@ -1471,7 +1471,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
mutex_lock(&dev_priv->drm.struct_mutex);
ret = intel_gvt_scan_and_shadow_workload(workload);
mutex_unlock(&dev_priv->drm.struct_mutex);
intel_runtime_pm_put(dev_priv);
intel_runtime_pm_put_unchecked(dev_priv);
}
if (ret && (vgpu_is_vm_unhealthy(ret))) {
......
......@@ -148,10 +148,10 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
gvt->types[i].avail_instance = min(low_avail / vgpu_types[i].low_mm,
high_avail / vgpu_types[i].high_mm);
if (IS_GEN8(gvt->dev_priv))
if (IS_GEN(gvt->dev_priv, 8))
sprintf(gvt->types[i].name, "GVTg_V4_%s",
vgpu_types[i].name);
else if (IS_GEN9(gvt->dev_priv))
else if (IS_GEN(gvt->dev_priv, 9))
sprintf(gvt->types[i].name, "GVTg_V5_%s",
vgpu_types[i].name);
......
......@@ -865,7 +865,7 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
int cmd_table_count;
int ret;
if (!IS_GEN7(engine->i915))
if (!IS_GEN(engine->i915, 7))
return;
switch (engine->id) {
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -86,7 +86,6 @@
*/
#include <linux/log2.h>
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_trace.h"
......@@ -311,7 +310,7 @@ static u32 default_desc_template(const struct drm_i915_private *i915,
address_mode = INTEL_LEGACY_64B_CONTEXT;
desc |= address_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT;
if (IS_GEN8(i915))
if (IS_GEN(i915, 8))
desc |= GEN8_CTX_L3LLC_COHERENT;
/* TODO: WaDisableLiteRestore when we start using semaphore
......
......@@ -27,7 +27,6 @@
#include <linux/dma-buf.h>
#include <linux/reservation.h>
#include <drm/drmP.h>
#include "i915_drv.h"
......
......@@ -26,7 +26,6 @@
*
*/
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
......
......@@ -31,7 +31,6 @@
#include <linux/sync_file.h>
#include <linux/uaccess.h>
#include <drm/drmP.h>
#include <drm/drm_syncobj.h>
#include <drm/i915_drm.h>
......@@ -1380,7 +1379,7 @@ eb_relocate_entry(struct i915_execbuffer *eb,
* batchbuffers.
*/
if (reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
IS_GEN6(eb->i915)) {
IS_GEN(eb->i915, 6)) {
err = i915_vma_bind(target, target->obj->cache_level,
PIN_GLOBAL);
if (WARN_ONCE(err,
......@@ -1896,7 +1895,7 @@ static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
u32 *cs;
int i;
if (!IS_GEN7(rq->i915) || rq->engine->id != RCS) {
if (!IS_GEN(rq->i915, 7) || rq->engine->id != RCS) {
DRM_DEBUG("sol reset is gen7/rcs only\n");
return -EINVAL;
}
......@@ -2203,6 +2202,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
struct i915_execbuffer eb;
struct dma_fence *in_fence = NULL;
struct sync_file *out_fence = NULL;
intel_wakeref_t wakeref;
int out_fence_fd = -1;
int err;
......@@ -2273,7 +2273,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
* wakeref that we hold until the GPU has been idle for at least
* 100ms.
*/
intel_runtime_pm_get(eb.i915);
wakeref = intel_runtime_pm_get(eb.i915);
err = i915_mutex_lock_interruptible(dev);
if (err)
......@@ -2425,7 +2425,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
eb_release_vmas(&eb);
mutex_unlock(&dev->struct_mutex);
err_rpm:
intel_runtime_pm_put(eb.i915);
intel_runtime_pm_put(eb.i915, wakeref);
i915_gem_context_put(eb.ctx);
err_destroy:
eb_destroy(&eb);
......
......@@ -21,7 +21,6 @@
* IN THE SOFTWARE.
*/
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
......@@ -193,9 +192,9 @@ static void fence_write(struct drm_i915_fence_reg *fence,
* and explicitly managed for internal users.
*/
if (IS_GEN2(fence->i915))
if (IS_GEN(fence->i915, 2))
i830_write_fence_reg(fence, vma);
else if (IS_GEN3(fence->i915))
else if (IS_GEN(fence->i915, 3))
i915_write_fence_reg(fence, vma);
else
i965_write_fence_reg(fence, vma);
......@@ -210,6 +209,7 @@ static void fence_write(struct drm_i915_fence_reg *fence,
static int fence_update(struct drm_i915_fence_reg *fence,
struct i915_vma *vma)
{
intel_wakeref_t wakeref;
int ret;
if (vma) {
......@@ -257,9 +257,10 @@ static int fence_update(struct drm_i915_fence_reg *fence,
* If the device is currently powered down, we will defer the write
* to the runtime resume, see i915_gem_restore_fences().
*/
if (intel_runtime_pm_get_if_in_use(fence->i915)) {
wakeref = intel_runtime_pm_get_if_in_use(fence->i915);
if (wakeref) {
fence_write(fence, vma);
intel_runtime_pm_put(fence->i915);
intel_runtime_pm_put(fence->i915, wakeref);
}
if (vma) {
......@@ -554,8 +555,8 @@ void i915_gem_restore_fences(struct drm_i915_private *dev_priv)
void
i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
{
uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
u32 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
u32 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
if (INTEL_GEN(dev_priv) >= 8 || IS_VALLEYVIEW(dev_priv)) {
/*
......@@ -578,7 +579,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
}
} else {
uint32_t dimm_c0, dimm_c1;
u32 dimm_c0, dimm_c1;
dimm_c0 = I915_READ(MAD_DIMM_C0);
dimm_c1 = I915_READ(MAD_DIMM_C1);
dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
......@@ -596,13 +597,13 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
}
}
} else if (IS_GEN5(dev_priv)) {
} else if (IS_GEN(dev_priv, 5)) {
/* On Ironlake whatever DRAM config, GPU always do
* same swizzling setup.
*/
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
} else if (IS_GEN2(dev_priv)) {
} else if (IS_GEN(dev_priv, 2)) {
/* As far as we know, the 865 doesn't have these bit 6
* swizzling issues.
*/
......@@ -610,7 +611,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
} else if (IS_MOBILE(dev_priv) ||
IS_I915G(dev_priv) || IS_I945G(dev_priv)) {
uint32_t dcc;
u32 dcc;
/* On 9xx chipsets, channel interleave by the CPU is
* determined by DCC. For single-channel, neither the CPU
......@@ -647,7 +648,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
}
/* check for L-shaped memory aka modified enhanced addressing */
if (IS_GEN4(dev_priv) &&
if (IS_GEN(dev_priv, 4) &&
!(I915_READ(DCC2) & DCC2_MODIFIED_ENHANCED_DISABLE)) {
swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
......
This diff is collapsed.
......@@ -288,6 +288,8 @@ struct i915_address_space {
bool closed;
struct mutex mutex; /* protects vma and our lists */
#define VM_CLASS_GGTT 0
#define VM_CLASS_PPGTT 1
u64 scratch_pte;
struct i915_page_dma scratch_page;
......@@ -413,8 +415,6 @@ struct i915_hw_ppgtt {
struct i915_page_directory_pointer pdp; /* GEN8+ */
struct i915_page_directory pd; /* GEN6-7 */
};
void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
};
struct gen6_hw_ppgtt {
......
......@@ -22,7 +22,6 @@
*
*/
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
......
......@@ -29,7 +29,8 @@
#include <drm/drm_vma_manager.h>
#include <drm/drm_gem.h>
#include <drm/drmP.h>
#include <drm/drm_file.h>
#include <drm/drm_device.h>
#include <drm/i915_drm.h>
......@@ -56,6 +57,7 @@ struct drm_i915_gem_object_ops {
#define I915_GEM_OBJECT_HAS_STRUCT_PAGE BIT(0)
#define I915_GEM_OBJECT_IS_SHRINKABLE BIT(1)
#define I915_GEM_OBJECT_IS_PROXY BIT(2)
#define I915_GEM_OBJECT_ASYNC_CANCEL BIT(3)
/* Interface between the GEM object and its backing storage.
* get_pages() is called once prior to the use of the associated set
......@@ -386,6 +388,12 @@ i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj)
return obj->ops->flags & I915_GEM_OBJECT_IS_PROXY;
}
static inline bool
i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj)
{
return obj->ops->flags & I915_GEM_OBJECT_ASYNC_CANCEL;
}
static inline bool
i915_gem_object_is_active(const struct drm_i915_gem_object *obj)
{
......
......@@ -30,30 +30,27 @@
#include <linux/pci.h>
#include <linux/dma-buf.h>
#include <linux/vmalloc.h>
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_trace.h"
static bool shrinker_lock(struct drm_i915_private *i915, bool *unlock)
static bool shrinker_lock(struct drm_i915_private *i915,
unsigned int flags,
bool *unlock)
{
switch (mutex_trylock_recursive(&i915->drm.struct_mutex)) {
struct mutex *m = &i915->drm.struct_mutex;
switch (mutex_trylock_recursive(m)) {
case MUTEX_TRYLOCK_RECURSIVE:
*unlock = false;
return true;
case MUTEX_TRYLOCK_FAILED:
*unlock = false;
preempt_disable();
do {
cpu_relax();
if (mutex_trylock(&i915->drm.struct_mutex)) {
*unlock = true;
break;
}
} while (!need_resched());
preempt_enable();
if (flags & I915_SHRINK_ACTIVE &&
mutex_lock_killable_nested(m, I915_MM_SHRINKER) == 0)
*unlock = true;
return *unlock;
case MUTEX_TRYLOCK_SUCCESS:
......@@ -156,11 +153,12 @@ i915_gem_shrink(struct drm_i915_private *i915,
{ &i915->mm.bound_list, I915_SHRINK_BOUND },
{ NULL, 0 },
}, *phase;
intel_wakeref_t wakeref = 0;
unsigned long count = 0;
unsigned long scanned = 0;
bool unlock;
if (!shrinker_lock(i915, &unlock))
if (!shrinker_lock(i915, flags, &unlock))
return 0;
/*
......@@ -185,9 +183,11 @@ i915_gem_shrink(struct drm_i915_private *i915,
* device just to recover a little memory. If absolutely necessary,
* we will force the wake during oom-notifier.
*/
if ((flags & I915_SHRINK_BOUND) &&
!intel_runtime_pm_get_if_in_use(i915))
flags &= ~I915_SHRINK_BOUND;
if (flags & I915_SHRINK_BOUND) {
wakeref = intel_runtime_pm_get_if_in_use(i915);
if (!wakeref)
flags &= ~I915_SHRINK_BOUND;
}
/*
* As we may completely rewrite the (un)bound list whilst unbinding
......@@ -268,7 +268,7 @@ i915_gem_shrink(struct drm_i915_private *i915,
}
if (flags & I915_SHRINK_BOUND)
intel_runtime_pm_put(i915);
intel_runtime_pm_put(i915, wakeref);
i915_retire_requests(i915);
......@@ -295,14 +295,15 @@ i915_gem_shrink(struct drm_i915_private *i915,
*/
unsigned long i915_gem_shrink_all(struct drm_i915_private *i915)
{
unsigned long freed;
intel_runtime_pm_get(i915);
freed = i915_gem_shrink(i915, -1UL, NULL,
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND |
I915_SHRINK_ACTIVE);
intel_runtime_pm_put(i915);
intel_wakeref_t wakeref;
unsigned long freed = 0;
with_intel_runtime_pm(i915, wakeref) {
freed = i915_gem_shrink(i915, -1UL, NULL,
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND |
I915_SHRINK_ACTIVE);
}
return freed;
}
......@@ -357,7 +358,7 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
sc->nr_scanned = 0;
if (!shrinker_lock(i915, &unlock))
if (!shrinker_lock(i915, 0, &unlock))
return SHRINK_STOP;
freed = i915_gem_shrink(i915,
......@@ -373,14 +374,16 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND);
if (sc->nr_scanned < sc->nr_to_scan && current_is_kswapd()) {
intel_runtime_pm_get(i915);
freed += i915_gem_shrink(i915,
sc->nr_to_scan - sc->nr_scanned,
&sc->nr_scanned,
I915_SHRINK_ACTIVE |
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND);
intel_runtime_pm_put(i915);
intel_wakeref_t wakeref;
with_intel_runtime_pm(i915, wakeref) {
freed += i915_gem_shrink(i915,
sc->nr_to_scan - sc->nr_scanned,
&sc->nr_scanned,
I915_SHRINK_ACTIVE |
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND);
}
}
shrinker_unlock(i915, unlock);
......@@ -388,31 +391,6 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
return sc->nr_scanned ? freed : SHRINK_STOP;
}
static bool
shrinker_lock_uninterruptible(struct drm_i915_private *i915, bool *unlock,
int timeout_ms)
{
unsigned long timeout = jiffies + msecs_to_jiffies_timeout(timeout_ms);
do {
if (i915_gem_wait_for_idle(i915,
0, MAX_SCHEDULE_TIMEOUT) == 0 &&
shrinker_lock(i915, unlock))
break;
schedule_timeout_killable(1);
if (fatal_signal_pending(current))
return false;
if (time_after(jiffies, timeout)) {
pr_err("Unable to lock GPU to purge memory.\n");
return false;