Commit 273e8478 authored by Stephen Rothwell's avatar Stephen Rothwell

Merge remote-tracking branch 'slave-dma/next'

parents a0e91c4a 8160320b
......@@ -23,8 +23,6 @@ Deprecated properties:
Optional properties:
- is_private: The device channels should be marked as private and not for by the
general purpose DMA channel allocator. False if not passed.
- multi-block: Multi block transfers supported by hardware. Array property with
one cell per channel. 0: not supported, 1 (default): supported.
- snps,dma-protection-control: AHB HPROT[3:1] protection setting.
......
......@@ -37,10 +37,11 @@ Required properties:
Required properties for VDMA:
- xlnx,num-fstores: Should be the number of framebuffers as configured in h/w.
Optional properties:
- xlnx,include-sg: Tells configured for Scatter-mode in
the hardware.
Optional properties for AXI DMA:
- xlnx,sg-length-width: Should be set to the width in bits of the length
register as configured in h/w. Takes values {8...26}. If the property
is missing or invalid then the default value 23 is used. This is the
maximum value that is supported by all IP versions.
- xlnx,mcdma: Tells whether configured for multi-channel mode in the hardware.
Optional properties for VDMA:
- xlnx,flush-fsync: Tells which channel to Flush on Frame sync.
......
......@@ -134,7 +134,6 @@ static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
struct at_desc *ret = NULL;
unsigned long flags;
unsigned int i = 0;
LIST_HEAD(tmp_list);
spin_lock_irqsave(&atchan->lock, flags);
list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
......@@ -1387,8 +1386,6 @@ static int atc_pause(struct dma_chan *chan)
int chan_id = atchan->chan_common.chan_id;
unsigned long flags;
LIST_HEAD(list);
dev_vdbg(chan2dev(chan), "%s\n", __func__);
spin_lock_irqsave(&atchan->lock, flags);
......@@ -1408,8 +1405,6 @@ static int atc_resume(struct dma_chan *chan)
int chan_id = atchan->chan_common.chan_id;
unsigned long flags;
LIST_HEAD(list);
dev_vdbg(chan2dev(chan), "%s\n", __func__);
if (!atc_chan_is_paused(atchan))
......
......@@ -312,8 +312,7 @@ static struct bcm2835_desc *bcm2835_dma_create_cb_chain(
return NULL;
/* allocate and setup the descriptor. */
d = kzalloc(sizeof(*d) + frames * sizeof(struct bcm2835_cb_entry),
gfp);
d = kzalloc(struct_size(d, cb_list, frames), gfp);
if (!d)
return NULL;
......
......@@ -838,9 +838,8 @@ static int jz4780_dma_probe(struct platform_device *pdev)
if (!soc_data)
return -EINVAL;
jzdma = devm_kzalloc(dev, sizeof(*jzdma)
+ sizeof(*jzdma->chan) * soc_data->nb_channels,
GFP_KERNEL);
jzdma = devm_kzalloc(dev, struct_size(jzdma, chan,
soc_data->nb_channels), GFP_KERNEL);
if (!jzdma)
return -ENOMEM;
......
......@@ -75,7 +75,7 @@ struct __packed axi_dma_lli {
__le32 sstat;
__le32 dstat;
__le32 status_lo;
__le32 ststus_hi;
__le32 status_hi;
__le32 reserved_lo;
__le32 reserved_hi;
};
......
# SPDX-License-Identifier: GPL-2.0
#
# DMA engine configuration for dw
#
......
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_DW_DMAC_CORE) += dw_dmac_core.o
dw_dmac_core-objs := core.o
dw_dmac_core-objs := core.o dw.o idma32.o
obj-$(CONFIG_DW_DMAC) += dw_dmac.o
dw_dmac-objs := platform.o
......
This diff is collapsed.
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2007-2008 Atmel Corporation
// Copyright (C) 2010-2011 ST Microelectronics
// Copyright (C) 2013,2018 Intel Corporation
#include <linux/bitops.h>
#include <linux/dmaengine.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/types.h>
#include "internal.h"
static void dw_dma_initialize_chan(struct dw_dma_chan *dwc)
{
struct dw_dma *dw = to_dw_dma(dwc->chan.device);
u32 cfghi = DWC_CFGH_FIFO_MODE;
u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
bool hs_polarity = dwc->dws.hs_polarity;
cfghi |= DWC_CFGH_DST_PER(dwc->dws.dst_id);
cfghi |= DWC_CFGH_SRC_PER(dwc->dws.src_id);
cfghi |= DWC_CFGH_PROTCTL(dw->pdata->protctl);
/* Set polarity of handshake interface */
cfglo |= hs_polarity ? DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL : 0;
channel_writel(dwc, CFG_LO, cfglo);
channel_writel(dwc, CFG_HI, cfghi);
}
static void dw_dma_suspend_chan(struct dw_dma_chan *dwc, bool drain)
{
u32 cfglo = channel_readl(dwc, CFG_LO);
channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
}
static void dw_dma_resume_chan(struct dw_dma_chan *dwc, bool drain)
{
u32 cfglo = channel_readl(dwc, CFG_LO);
channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP);
}
static u32 dw_dma_bytes2block(struct dw_dma_chan *dwc,
size_t bytes, unsigned int width, size_t *len)
{
u32 block;
if ((bytes >> width) > dwc->block_size) {
block = dwc->block_size;
*len = dwc->block_size << width;
} else {
block = bytes >> width;
*len = bytes;
}
return block;
}
static size_t dw_dma_block2bytes(struct dw_dma_chan *dwc, u32 block, u32 width)
{
return DWC_CTLH_BLOCK_TS(block) << width;
}
static u32 dw_dma_prepare_ctllo(struct dw_dma_chan *dwc)
{
struct dma_slave_config *sconfig = &dwc->dma_sconfig;
bool is_slave = is_slave_direction(dwc->direction);
u8 smsize = is_slave ? sconfig->src_maxburst : DW_DMA_MSIZE_16;
u8 dmsize = is_slave ? sconfig->dst_maxburst : DW_DMA_MSIZE_16;
u8 p_master = dwc->dws.p_master;
u8 m_master = dwc->dws.m_master;
u8 dms = (dwc->direction == DMA_MEM_TO_DEV) ? p_master : m_master;
u8 sms = (dwc->direction == DMA_DEV_TO_MEM) ? p_master : m_master;
return DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN |
DWC_CTLL_DST_MSIZE(dmsize) | DWC_CTLL_SRC_MSIZE(smsize) |
DWC_CTLL_DMS(dms) | DWC_CTLL_SMS(sms);
}
static void dw_dma_encode_maxburst(struct dw_dma_chan *dwc, u32 *maxburst)
{
/*
* Fix burst size according to dw_dmac. We need to convert them as:
* 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
*/
*maxburst = *maxburst > 1 ? fls(*maxburst) - 2 : 0;
}
static void dw_dma_set_device_name(struct dw_dma *dw, int id)
{
snprintf(dw->name, sizeof(dw->name), "dw:dmac%d", id);
}
static void dw_dma_disable(struct dw_dma *dw)
{
do_dw_dma_off(dw);
}
static void dw_dma_enable(struct dw_dma *dw)
{
do_dw_dma_on(dw);
}
int dw_dma_probe(struct dw_dma_chip *chip)
{
struct dw_dma *dw;
dw = devm_kzalloc(chip->dev, sizeof(*dw), GFP_KERNEL);
if (!dw)
return -ENOMEM;
/* Channel operations */
dw->initialize_chan = dw_dma_initialize_chan;
dw->suspend_chan = dw_dma_suspend_chan;
dw->resume_chan = dw_dma_resume_chan;
dw->prepare_ctllo = dw_dma_prepare_ctllo;
dw->encode_maxburst = dw_dma_encode_maxburst;
dw->bytes2block = dw_dma_bytes2block;
dw->block2bytes = dw_dma_block2bytes;
/* Device operations */
dw->set_device_name = dw_dma_set_device_name;
dw->disable = dw_dma_disable;
dw->enable = dw_dma_enable;
chip->dw = dw;
return do_dma_probe(chip);
}
EXPORT_SYMBOL_GPL(dw_dma_probe);
int dw_dma_remove(struct dw_dma_chip *chip)
{
return do_dma_remove(chip);
}
EXPORT_SYMBOL_GPL(dw_dma_remove);
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2013,2018 Intel Corporation
#include <linux/bitops.h>
#include <linux/dmaengine.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/types.h>
#include "internal.h"
static void idma32_initialize_chan(struct dw_dma_chan *dwc)
{
u32 cfghi = 0;
u32 cfglo = 0;
/* Set default burst alignment */
cfglo |= IDMA32C_CFGL_DST_BURST_ALIGN | IDMA32C_CFGL_SRC_BURST_ALIGN;
/* Low 4 bits of the request lines */
cfghi |= IDMA32C_CFGH_DST_PER(dwc->dws.dst_id & 0xf);
cfghi |= IDMA32C_CFGH_SRC_PER(dwc->dws.src_id & 0xf);
/* Request line extension (2 bits) */
cfghi |= IDMA32C_CFGH_DST_PER_EXT(dwc->dws.dst_id >> 4 & 0x3);
cfghi |= IDMA32C_CFGH_SRC_PER_EXT(dwc->dws.src_id >> 4 & 0x3);
channel_writel(dwc, CFG_LO, cfglo);
channel_writel(dwc, CFG_HI, cfghi);
}
static void idma32_suspend_chan(struct dw_dma_chan *dwc, bool drain)
{
u32 cfglo = channel_readl(dwc, CFG_LO);
if (drain)
cfglo |= IDMA32C_CFGL_CH_DRAIN;
channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
}
static void idma32_resume_chan(struct dw_dma_chan *dwc, bool drain)
{
u32 cfglo = channel_readl(dwc, CFG_LO);
if (drain)
cfglo &= ~IDMA32C_CFGL_CH_DRAIN;
channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP);
}
static u32 idma32_bytes2block(struct dw_dma_chan *dwc,
size_t bytes, unsigned int width, size_t *len)
{
u32 block;
if (bytes > dwc->block_size) {
block = dwc->block_size;
*len = dwc->block_size;
} else {
block = bytes;
*len = bytes;
}
return block;
}
static size_t idma32_block2bytes(struct dw_dma_chan *dwc, u32 block, u32 width)
{
return IDMA32C_CTLH_BLOCK_TS(block);
}
static u32 idma32_prepare_ctllo(struct dw_dma_chan *dwc)
{
struct dma_slave_config *sconfig = &dwc->dma_sconfig;
bool is_slave = is_slave_direction(dwc->direction);
u8 smsize = is_slave ? sconfig->src_maxburst : IDMA32_MSIZE_8;
u8 dmsize = is_slave ? sconfig->dst_maxburst : IDMA32_MSIZE_8;
return DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN |
DWC_CTLL_DST_MSIZE(dmsize) | DWC_CTLL_SRC_MSIZE(smsize);
}
static void idma32_encode_maxburst(struct dw_dma_chan *dwc, u32 *maxburst)
{
*maxburst = *maxburst > 1 ? fls(*maxburst) - 1 : 0;
}
static void idma32_set_device_name(struct dw_dma *dw, int id)
{
snprintf(dw->name, sizeof(dw->name), "idma32:dmac%d", id);
}
/*
* Program FIFO size of channels.
*
* By default full FIFO (512 bytes) is assigned to channel 0. Here we
* slice FIFO on equal parts between channels.
*/
static void idma32_fifo_partition(struct dw_dma *dw)
{
u64 value = IDMA32C_FP_PSIZE_CH0(64) | IDMA32C_FP_PSIZE_CH1(64) |
IDMA32C_FP_UPDATE;
u64 fifo_partition = 0;
/* Fill FIFO_PARTITION low bits (Channels 0..1, 4..5) */
fifo_partition |= value << 0;
/* Fill FIFO_PARTITION high bits (Channels 2..3, 6..7) */
fifo_partition |= value << 32;
/* Program FIFO Partition registers - 64 bytes per channel */
idma32_writeq(dw, FIFO_PARTITION1, fifo_partition);
idma32_writeq(dw, FIFO_PARTITION0, fifo_partition);
}
static void idma32_disable(struct dw_dma *dw)
{
do_dw_dma_off(dw);
idma32_fifo_partition(dw);
}
static void idma32_enable(struct dw_dma *dw)
{
idma32_fifo_partition(dw);
do_dw_dma_on(dw);
}
int idma32_dma_probe(struct dw_dma_chip *chip)
{
struct dw_dma *dw;
dw = devm_kzalloc(chip->dev, sizeof(*dw), GFP_KERNEL);
if (!dw)
return -ENOMEM;
/* Channel operations */
dw->initialize_chan = idma32_initialize_chan;
dw->suspend_chan = idma32_suspend_chan;
dw->resume_chan = idma32_resume_chan;
dw->prepare_ctllo = idma32_prepare_ctllo;
dw->encode_maxburst = idma32_encode_maxburst;
dw->bytes2block = idma32_bytes2block;
dw->block2bytes = idma32_block2bytes;
/* Device operations */
dw->set_device_name = idma32_set_device_name;
dw->disable = idma32_disable;
dw->enable = idma32_enable;
chip->dw = dw;
return do_dma_probe(chip);
}
EXPORT_SYMBOL_GPL(idma32_dma_probe);
int idma32_dma_remove(struct dw_dma_chip *chip)
{
return do_dma_remove(chip);
}
EXPORT_SYMBOL_GPL(idma32_dma_remove);
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Driver for the Synopsys DesignWare DMA Controller
*
* Copyright (C) 2013 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef _DMA_DW_INTERNAL_H
......@@ -15,8 +12,14 @@
#include "regs.h"
int dw_dma_disable(struct dw_dma_chip *chip);
int dw_dma_enable(struct dw_dma_chip *chip);
int do_dma_probe(struct dw_dma_chip *chip);
int do_dma_remove(struct dw_dma_chip *chip);
void do_dw_dma_on(struct dw_dma *dw);
void do_dw_dma_off(struct dw_dma *dw);
int do_dw_dma_disable(struct dw_dma_chip *chip);
int do_dw_dma_enable(struct dw_dma_chip *chip);
extern bool dw_dma_filter(struct dma_chan *chan, void *param);
......
// SPDX-License-Identifier: GPL-2.0
/*
* PCI driver for the Synopsys DesignWare DMA Controller
*
* Copyright (C) 2013 Intel Corporation
* Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
......@@ -15,21 +12,33 @@
#include "internal.h"
static struct dw_dma_platform_data mrfld_pdata = {
struct dw_dma_pci_data {
const struct dw_dma_platform_data *pdata;
int (*probe)(struct dw_dma_chip *chip);
};
static const struct dw_dma_pci_data dw_pci_data = {
.probe = dw_dma_probe,
};
static const struct dw_dma_platform_data idma32_pdata = {
.nr_channels = 8,
.is_private = true,
.is_memcpy = true,
.is_idma32 = true,
.chan_allocation_order = CHAN_ALLOCATION_ASCENDING,
.chan_priority = CHAN_PRIORITY_ASCENDING,
.block_size = 131071,
.nr_masters = 1,
.data_width = {4},
.multi_block = {1, 1, 1, 1, 1, 1, 1, 1},
};
static const struct dw_dma_pci_data idma32_pci_data = {
.pdata = &idma32_pdata,
.probe = idma32_dma_probe,
};
static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
{
const struct dw_dma_platform_data *pdata = (void *)pid->driver_data;
const struct dw_dma_pci_data *data = (void *)pid->driver_data;
struct dw_dma_chip *chip;
int ret;
......@@ -62,9 +71,9 @@ static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
chip->id = pdev->devfn;
chip->regs = pcim_iomap_table(pdev)[0];
chip->irq = pdev->irq;
chip->pdata = pdata;
chip->pdata = data->pdata;
ret = dw_dma_probe(chip);
ret = data->probe(chip);
if (ret)
return ret;
......@@ -90,7 +99,7 @@ static int dw_pci_suspend_late(struct device *dev)
struct pci_dev *pci = to_pci_dev(dev);
struct dw_dma_chip *chip = pci_get_drvdata(pci);
return dw_dma_disable(chip);
return do_dw_dma_disable(chip);
};
static int dw_pci_resume_early(struct device *dev)
......@@ -98,7 +107,7 @@ static int dw_pci_resume_early(struct device *dev)
struct pci_dev *pci = to_pci_dev(dev);
struct dw_dma_chip *chip = pci_get_drvdata(pci);
return dw_dma_enable(chip);
return do_dw_dma_enable(chip);
};
#endif /* CONFIG_PM_SLEEP */
......@@ -109,24 +118,24 @@ static const struct dev_pm_ops dw_pci_dev_pm_ops = {
static const struct pci_device_id dw_pci_id_table[] = {
/* Medfield (GPDMA) */
{ PCI_VDEVICE(INTEL, 0x0827) },
{ PCI_VDEVICE(INTEL, 0x0827), (kernel_ulong_t)&dw_pci_data },
/* BayTrail */
{ PCI_VDEVICE(INTEL, 0x0f06) },
{ PCI_VDEVICE(INTEL, 0x0f40) },
{ PCI_VDEVICE(INTEL, 0x0f06), (kernel_ulong_t)&dw_pci_data },
{ PCI_VDEVICE(INTEL, 0x0f40), (kernel_ulong_t)&dw_pci_data },
/* Merrifield iDMA 32-bit (GPDMA) */
{ PCI_VDEVICE(INTEL, 0x11a2), (kernel_ulong_t)&mrfld_pdata },
/* Merrifield */
{ PCI_VDEVICE(INTEL, 0x11a2), (kernel_ulong_t)&idma32_pci_data },
/* Braswell */
{ PCI_VDEVICE(INTEL, 0x2286) },
{ PCI_VDEVICE(INTEL, 0x22c0) },
{ PCI_VDEVICE(INTEL, 0x2286), (kernel_ulong_t)&dw_pci_data },
{ PCI_VDEVICE(INTEL, 0x22c0), (kernel_ulong_t)&dw_pci_data },
/* Haswell */
{ PCI_VDEVICE(INTEL, 0x9c60) },
{ PCI_VDEVICE(INTEL, 0x9c60), (kernel_ulong_t)&dw_pci_data },
/* Broadwell */
{ PCI_VDEVICE(INTEL, 0x9ce0) },
{ PCI_VDEVICE(INTEL, 0x9ce0), (kernel_ulong_t)&dw_pci_data },
{ }
};
......
// SPDX-License-Identifier: GPL-2.0
/*
* Platform driver for the Synopsys DesignWare DMA Controller
*
......@@ -6,10 +7,6 @@
* Copyright (C) 2013 Intel Corporation
*
* Some parts of this driver are derived from the original dw_dmac.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
......@@ -128,15 +125,6 @@ dw_dma_parse_dt(struct platform_device *pdev)
pdata->nr_masters = nr_masters;
pdata->nr_channels = nr_channels;
if (of_property_read_bool(np, "is_private"))
pdata->is_private = true;
/*
* All known devices, which use DT for configuration, support
* memory-to-memory transfers. So enable it by default.
*/
pdata->is_memcpy = true;
if (!of_property_read_u32(np, "chan_allocation_order", &tmp))
pdata->chan_allocation_order = (unsigned char)tmp;
......@@ -264,7 +252,7 @@ static void dw_shutdown(struct platform_device *pdev)
struct dw_dma_chip *chip = platform_get_drvdata(pdev);
/*
* We have to call dw_dma_disable() to stop any ongoing transfer. On
* We have to call do_dw_dma_disable() to stop any ongoing transfer. On
* some platforms we can't do that since DMA device is powered off.
* Moreover we have no possibility to check if the platform is affected
* or not. That's why we call pm_runtime_get_sync() / pm_runtime_put()
......@@ -273,7 +261,7 @@ static void dw_shutdown(struct platform_device *pdev)
* used by the driver.
*/
pm_runtime_get_sync(chip->dev);
dw_dma_disable(chip);
do_dw_dma_disable(chip);
pm_runtime_put_sync_suspend(chip->dev);
clk_disable_unprepare(chip->clk);
......@@ -303,7 +291,7 @@ static int dw_suspend_late(struct device *dev)
{
struct dw_dma_chip *chip = dev_get_drvdata(dev);
dw_dma_disable(chip);
do_dw_dma_disable(chip);
clk_disable_unprepare(chip->clk);
return 0;
......@@ -318,7 +306,7 @@ static int dw_resume_early(struct device *dev)
if (ret)
return ret;
return dw_dma_enable(chip);
return do_dw_dma_enable(chip);
}
#endif /* CONFIG_PM_SLEEP */
......
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Driver for the Synopsys DesignWare AHB DMA Controller
*
* Copyright (C) 2005-2007 Atmel Corporation
* Copyright (C) 2010-2011 ST Microelectronics
* Copyright (C) 2016 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/bitops.h>
......@@ -222,6 +219,16 @@ enum dw_dma_msize {
/* iDMA 32-bit support */
/* bursts size */
enum idma32_msize {
IDMA32_MSIZE_1,
IDMA32_MSIZE_2,
IDMA32_MSIZE_4,
IDMA32_MSIZE_8,
IDMA32_MSIZE_16,
IDMA32_MSIZE_32,
};
/* Bitfields in CTL_HI */
#define IDMA32C_CTLH_BLOCK_TS_MASK GENMASK(16, 0)
#define IDMA32C_CTLH_BLOCK_TS(x) ((x) & IDMA32C_CTLH_BLOCK_TS_MASK)
......@@ -312,6 +319,21 @@ struct dw_dma {
u8 all_chan_mask;
u8 in_use;
/* Channel operations */
void (*initialize_chan)(struct dw_dma_chan *dwc);
void (*suspend_chan)(struct dw_dma_chan *dwc, bool drain);
void (*resume_chan)(struct dw_dma_chan *dwc, bool drain);
u32 (*prepare_ctllo)(struct dw_dma_chan *dwc);
void (*encode_maxburst)(struct dw_dma_chan *dwc, u32 *maxburst);
u32 (*bytes2block)(struct dw_dma_chan *dwc, size_t bytes,
unsigned int width, size_t *len);
size_t (*block2bytes)(struct dw_dma_chan *dwc, u32 block, u32 width);
/* Device operations */
void (*set_device_name)(struct dw_dma *dw, int id);
void (*disable)(struct dw_dma *dw);
void (*enable)(struct dw_dma *dw);
/* platform data */
struct dw_dma_platform_data *pdata;
};
......
......@@ -377,6 +377,7 @@ struct sdma_channel {
unsigned long watermark_level;
u32 shp_addr, per_addr;
enum dma_status status;
bool context_loaded;
struct imx_dma_data data;
struct work_struct terminate_worker;
};
......@@ -970,6 +971,9 @@ static int sdma_load_context(struct sdma_channel *sdmac)
int ret;
unsigned long flags;
if (sdmac->context_loaded)
return 0;
if (sdmac->direction == DMA_DEV_TO_MEM)
load_address = sdmac->pc_from_device;
else if (sdmac->direction == DMA_DEV_TO_DEV)
......@@ -1012,6 +1016,8 @@ static int sdma_load_context(struct sdma_channel *sdmac)
spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
sdmac->context_loaded = true;
return ret;
}
......@@ -1051,6 +1057,7 @@ static void sdma_channel_terminate_work(struct work_struct *work)
sdmac->desc = NULL;
spin_unlock_irqrestore(&sdmac->vc.lock, flags);
vchan_dma_desc_free_list(&sdmac->vc, &head);
sdmac->context_loaded = false;
}
static int sdma_disable_channel_async(struct dma_chan *chan)
......
......@@ -1153,7 +1153,10 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
dma_async_device_register(dma_dev);
ret = dma_async_device_register(dma_dev);
if (ret)
goto err_free_irq;
return mv_chan;
err_free_irq:
......
......@@ -2267,7 +2267,6 @@ static int pl330_terminate_all(struct dma_chan *chan)
struct dma_pl330_desc *desc;
unsigned long flags;
struct pl330_dmac *pl330 = pch->dmac;
LIST_HEAD(list);
bool power_down = false;
pm_runtime_get_sync(pl330->ddma.dev);
......
......@@ -636,8 +636,8 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan,
num_alloc += DIV_ROUND_UP(sg_dma_len(sg), BAM_FIFO_SIZE);
/* allocate enough room to accomodate the number of entries */
async_desc = kzalloc(sizeof(*async_desc) +
(num_alloc * sizeof(struct bam_desc_hw)), GFP_NOWAIT);
async_desc = kzalloc(struct_size(async_desc, desc, num_alloc),
GFP_NOWAIT);
if (!async_desc)
goto err_out;
......
......@@ -423,9 +423,8 @@ static int __init hidma_mgmt_init(void)
hidma_mgmt_of_populate_channels(child);
}
#endif
platform_driver_register(&hidma_mgmt_driver);