Commit 01758cca authored by Guido Günther's avatar Guido Günther

Merge branch 'drm-updates' into 'imx8-4.18-wip'

Drm updates

See merge request !13
parents dbabc795 cef52804
Pipeline #4073 passed with stage
in 29 minutes and 17 seconds
...@@ -106,17 +106,6 @@ static inline size_t size_vstruct(size_t nelem, size_t elem_size, size_t base) ...@@ -106,17 +106,6 @@ static inline size_t size_vstruct(size_t nelem, size_t elem_size, size_t base)
return base + nelem * elem_size; return base + nelem * elem_size;
} }
/* returns true if fence a comes after fence b */
static inline bool fence_after(u32 a, u32 b)
{
return (s32)(a - b) > 0;
}
static inline bool fence_after_eq(u32 a, u32 b)
{
return (s32)(a - b) >= 0;
}
/* /*
* Etnaviv timeouts are specified wrt CLOCK_MONOTONIC, not jiffies. * Etnaviv timeouts are specified wrt CLOCK_MONOTONIC, not jiffies.
* We need to calculate the timeout in terms of number of jiffies * We need to calculate the timeout in terms of number of jiffies
......
...@@ -438,9 +438,9 @@ static void submit_cleanup(struct kref *kref) ...@@ -438,9 +438,9 @@ static void submit_cleanup(struct kref *kref)
dma_fence_put(submit->in_fence); dma_fence_put(submit->in_fence);
if (submit->out_fence) { if (submit->out_fence) {
/* first remove from IDR, so fence can not be found anymore */ /* first remove from IDR, so fence can not be found anymore */
mutex_lock(&submit->gpu->fence_idr_lock); mutex_lock(&submit->gpu->fence_lock);
idr_remove(&submit->gpu->fence_idr, submit->out_fence_id); idr_remove(&submit->gpu->fence_idr, submit->out_fence_id);
mutex_unlock(&submit->gpu->fence_idr_lock); mutex_unlock(&submit->gpu->fence_lock);
dma_fence_put(submit->out_fence); dma_fence_put(submit->out_fence);
} }
kfree(submit->pmrs); kfree(submit->pmrs);
......
...@@ -994,7 +994,6 @@ void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu) ...@@ -994,7 +994,6 @@ void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu)
complete(&gpu->event_free); complete(&gpu->event_free);
bitmap_zero(gpu->event_bitmap, ETNA_NR_EVENTS); bitmap_zero(gpu->event_bitmap, ETNA_NR_EVENTS);
spin_unlock_irqrestore(&gpu->event_spinlock, flags); spin_unlock_irqrestore(&gpu->event_spinlock, flags);
gpu->completed_fence = gpu->active_fence;
etnaviv_gpu_hw_init(gpu); etnaviv_gpu_hw_init(gpu);
gpu->lastctx = NULL; gpu->lastctx = NULL;
...@@ -1037,7 +1036,7 @@ static bool etnaviv_fence_signaled(struct dma_fence *fence) ...@@ -1037,7 +1036,7 @@ static bool etnaviv_fence_signaled(struct dma_fence *fence)
{ {
struct etnaviv_fence *f = to_etnaviv_fence(fence); struct etnaviv_fence *f = to_etnaviv_fence(fence);
return fence_completed(f->gpu, f->base.seqno); return (s32)(f->gpu->completed_fence - f->base.seqno) >= 0;
} }
static void etnaviv_fence_release(struct dma_fence *fence) static void etnaviv_fence_release(struct dma_fence *fence)
...@@ -1078,6 +1077,12 @@ static struct dma_fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu) ...@@ -1078,6 +1077,12 @@ static struct dma_fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
return &f->base; return &f->base;
} }
/* returns true if fence a comes after fence b */
static inline bool fence_after(u32 a, u32 b)
{
return (s32)(a - b) > 0;
}
/* /*
* event management: * event management:
*/ */
...@@ -1313,8 +1318,6 @@ struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit) ...@@ -1313,8 +1318,6 @@ struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit)
goto out_unlock; goto out_unlock;
} }
gpu->active_fence = gpu_fence->seqno;
if (submit->nr_pmrs) { if (submit->nr_pmrs) {
gpu->event[event[1]].sync_point = &sync_point_perfmon_sample_pre; gpu->event[event[1]].sync_point = &sync_point_perfmon_sample_pre;
kref_get(&submit->refcount); kref_get(&submit->refcount);
...@@ -1734,7 +1737,7 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev) ...@@ -1734,7 +1737,7 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
gpu->dev = &pdev->dev; gpu->dev = &pdev->dev;
mutex_init(&gpu->lock); mutex_init(&gpu->lock);
mutex_init(&gpu->fence_idr_lock); mutex_init(&gpu->fence_lock);
/* Map registers: */ /* Map registers: */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0); res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
...@@ -1813,8 +1816,8 @@ static int etnaviv_gpu_rpm_suspend(struct device *dev) ...@@ -1813,8 +1816,8 @@ static int etnaviv_gpu_rpm_suspend(struct device *dev)
struct etnaviv_gpu *gpu = dev_get_drvdata(dev); struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
u32 idle, mask; u32 idle, mask;
/* If we have outstanding fences, we're not idle */ /* If there are any jobs in the HW queue, we're not idle */
if (gpu->completed_fence != gpu->active_fence) if (atomic_read(&gpu->sched.hw_rq_count))
return -EBUSY; return -EBUSY;
/* Check whether the hardware (except FE) is idle */ /* Check whether the hardware (except FE) is idle */
......
...@@ -118,10 +118,9 @@ struct etnaviv_gpu { ...@@ -118,10 +118,9 @@ struct etnaviv_gpu {
u32 idle_mask; u32 idle_mask;
/* Fencing support */ /* Fencing support */
struct mutex fence_idr_lock; struct mutex fence_lock;
struct idr fence_idr; struct idr fence_idr;
u32 next_fence; u32 next_fence;
u32 active_fence;
u32 completed_fence; u32 completed_fence;
wait_queue_head_t fence_event; wait_queue_head_t fence_event;
u64 fence_context; u64 fence_context;
...@@ -161,11 +160,6 @@ static inline u32 gpu_read(struct etnaviv_gpu *gpu, u32 reg) ...@@ -161,11 +160,6 @@ static inline u32 gpu_read(struct etnaviv_gpu *gpu, u32 reg)
return readl(gpu->mmio + reg); return readl(gpu->mmio + reg);
} }
static inline bool fence_completed(struct etnaviv_gpu *gpu, u32 fence)
{
return fence_after_eq(gpu->completed_fence, fence);
}
int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value); int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value);
int etnaviv_gpu_init(struct etnaviv_gpu *gpu); int etnaviv_gpu_init(struct etnaviv_gpu *gpu);
......
...@@ -119,8 +119,7 @@ static size_t etnaviv_iommuv2_unmap(struct etnaviv_iommu_domain *domain, ...@@ -119,8 +119,7 @@ static size_t etnaviv_iommuv2_unmap(struct etnaviv_iommu_domain *domain,
static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain) static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
{ {
u32 *p; int ret;
int ret, i;
/* allocate scratch page */ /* allocate scratch page */
etnaviv_domain->base.bad_page_cpu = etnaviv_domain->base.bad_page_cpu =
...@@ -131,9 +130,9 @@ static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain) ...@@ -131,9 +130,9 @@ static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
ret = -ENOMEM; ret = -ENOMEM;
goto fail_mem; goto fail_mem;
} }
p = etnaviv_domain->base.bad_page_cpu;
for (i = 0; i < SZ_4K / 4; i++) memset32(etnaviv_domain->base.bad_page_cpu, 0xdead55aa,
*p++ = 0xdead55aa; SZ_4K / sizeof(u32));
etnaviv_domain->pta_cpu = dma_alloc_wc(etnaviv_domain->base.dev, etnaviv_domain->pta_cpu = dma_alloc_wc(etnaviv_domain->base.dev,
SZ_4K, &etnaviv_domain->pta_dma, SZ_4K, &etnaviv_domain->pta_dma,
......
...@@ -93,7 +93,7 @@ static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job) ...@@ -93,7 +93,7 @@ static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job)
* If the GPU managed to complete this jobs fence, the timout is * If the GPU managed to complete this jobs fence, the timout is
* spurious. Bail out. * spurious. Bail out.
*/ */
if (fence_completed(gpu, submit->out_fence->seqno)) if (dma_fence_is_signaled(submit->out_fence))
return; return;
/* /*
...@@ -140,28 +140,38 @@ static const struct drm_sched_backend_ops etnaviv_sched_ops = { ...@@ -140,28 +140,38 @@ static const struct drm_sched_backend_ops etnaviv_sched_ops = {
int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity, int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity,
struct etnaviv_gem_submit *submit) struct etnaviv_gem_submit *submit)
{ {
int ret; int ret = 0;
/*
* Hold the fence lock across the whole operation to avoid jobs being
* pushed out of order with regard to their sched fence seqnos as
* allocated in drm_sched_job_init.
*/
mutex_lock(&submit->gpu->fence_lock);
ret = drm_sched_job_init(&submit->sched_job, &submit->gpu->sched, ret = drm_sched_job_init(&submit->sched_job, &submit->gpu->sched,
sched_entity, submit->cmdbuf.ctx); sched_entity, submit->cmdbuf.ctx);
if (ret) if (ret)
return ret; goto out_unlock;
submit->out_fence = dma_fence_get(&submit->sched_job.s_fence->finished); submit->out_fence = dma_fence_get(&submit->sched_job.s_fence->finished);
mutex_lock(&submit->gpu->fence_idr_lock);
submit->out_fence_id = idr_alloc_cyclic(&submit->gpu->fence_idr, submit->out_fence_id = idr_alloc_cyclic(&submit->gpu->fence_idr,
submit->out_fence, 0, submit->out_fence, 0,
INT_MAX, GFP_KERNEL); INT_MAX, GFP_KERNEL);
mutex_unlock(&submit->gpu->fence_idr_lock); if (submit->out_fence_id < 0) {
if (submit->out_fence_id < 0) ret = -ENOMEM;
return -ENOMEM; goto out_unlock;
}
/* the scheduler holds on to the job now */ /* the scheduler holds on to the job now */
kref_get(&submit->refcount); kref_get(&submit->refcount);
drm_sched_entity_push_job(&submit->sched_job, sched_entity); drm_sched_entity_push_job(&submit->sched_job, sched_entity);
return 0; out_unlock:
mutex_unlock(&submit->gpu->fence_lock);
return ret;
} }
int etnaviv_sched_init(struct etnaviv_gpu *gpu) int etnaviv_sched_init(struct etnaviv_gpu *gpu)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment