dmaengine fixes-2 for v5.11
Some late fixes for dmaengine: - Core: fix channel device_node deletion - Driver fixes for: - dw: revert of runtime pm enabling - idxd: device state fix, interrupt completion and list corruption - ti: resource leak -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEE+vs47OPLdNbVcHzyfBQHDyUjg0cFAmAjrkQACgkQfBQHDyUj g0foZA/+Iqpi6fU0Dth4bdoJa5HO63a62G5nrhofF/vH681GMaazNj46byol3vuA Gc0/EZ2UtIkEY29ix0XaHkksQrsqn/Q5E4QK+5u9x32DHf3jvtbOblOSIBCdr//3 i+uc/K90ot4ERtNvwiPxQGWjS7rF+6BvHItRDxOaiele0Uvf18/VGn2x7fH5vNeK GqtZK47E11y5UhqpJAiwcgNAhKXC6I6s/tP0pidyWuXWeqVm+usr6Pun9YExMJQm N+kiR8eJoh5F0N9KAg3rOppxf4iEblvgh2vfMgcNC63GdeWB2x1OMgizAXjE136K HAvcp/3rQf76tUhjZkr/YZaNB7wCqzCRRcgQ/xyhSJt24yswfv9NFGVHd2ltkfx9 Yp+rl8ZC0dSvdGR3ECF9z98MzRbBPgu+TCW/50/Hh42Va0FJZbXyY45hpfR9qPe2 hiXwQkJ8IKH7C8BpDKA8vMlJc4xhbNsYW0GaSyoAUzhaStwTHcKNB4+5Xeia55e3 RR2OPJXl+y3jywcO15fmFdNIRsSvRVGYioFH0NzneaVVIlbQk5hRqADNMWelnwiA DJc21v7yurHeCh3lefn5Aml10n986S1b7XNPA7Ls+2FMmJeIt2vrKqvmKLhHVANY bvSKEXda2pAvb3zw2fCcCuPq6KUdJAfDrB0oorIlRBM3IkY+B5Y= =j/zV -----END PGP SIGNATURE----- Merge tag 'dmaengine-fix2-5.11' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine Pull dmaengine fixes from Vinod Koul: "Some late fixes for dmaengine: Core: - fix channel device_node deletion Driver fixes: - dw: revert of runtime pm enabling - idxd: device state fix, interrupt completion and list corruption - ti: resource leak * tag 'dmaengine-fix2-5.11' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine: dmaengine dw: Revert "dmaengine: dw: Enable runtime PM" dmaengine: idxd: check device state before issue command dmaengine: ti: k3-udma: Fix a resource leak in an error handling path dmaengine: move channel device_node deletion to driver dmaengine: idxd: fix misc interrupt completion dmaengine: idxd: Fix list corruption in description completion
This commit is contained in:
commit
708c2e4181
|
@ -1110,7 +1110,6 @@ static void __dma_async_device_channel_unregister(struct dma_device *device,
|
||||||
"%s called while %d clients hold a reference\n",
|
"%s called while %d clients hold a reference\n",
|
||||||
__func__, chan->client_count);
|
__func__, chan->client_count);
|
||||||
mutex_lock(&dma_list_mutex);
|
mutex_lock(&dma_list_mutex);
|
||||||
list_del(&chan->device_node);
|
|
||||||
device->chancnt--;
|
device->chancnt--;
|
||||||
chan->dev->chan = NULL;
|
chan->dev->chan = NULL;
|
||||||
mutex_unlock(&dma_list_mutex);
|
mutex_unlock(&dma_list_mutex);
|
||||||
|
|
|
@ -982,11 +982,8 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
|
||||||
|
|
||||||
dev_vdbg(chan2dev(chan), "%s\n", __func__);
|
dev_vdbg(chan2dev(chan), "%s\n", __func__);
|
||||||
|
|
||||||
pm_runtime_get_sync(dw->dma.dev);
|
|
||||||
|
|
||||||
/* ASSERT: channel is idle */
|
/* ASSERT: channel is idle */
|
||||||
if (dma_readl(dw, CH_EN) & dwc->mask) {
|
if (dma_readl(dw, CH_EN) & dwc->mask) {
|
||||||
pm_runtime_put_sync_suspend(dw->dma.dev);
|
|
||||||
dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
|
dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
|
||||||
return -EIO;
|
return -EIO;
|
||||||
}
|
}
|
||||||
|
@ -1003,7 +1000,6 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
|
||||||
* We need controller-specific data to set up slave transfers.
|
* We need controller-specific data to set up slave transfers.
|
||||||
*/
|
*/
|
||||||
if (chan->private && !dw_dma_filter(chan, chan->private)) {
|
if (chan->private && !dw_dma_filter(chan, chan->private)) {
|
||||||
pm_runtime_put_sync_suspend(dw->dma.dev);
|
|
||||||
dev_warn(chan2dev(chan), "Wrong controller-specific data\n");
|
dev_warn(chan2dev(chan), "Wrong controller-specific data\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
@ -1047,8 +1043,6 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
|
||||||
if (!dw->in_use)
|
if (!dw->in_use)
|
||||||
do_dw_dma_off(dw);
|
do_dw_dma_off(dw);
|
||||||
|
|
||||||
pm_runtime_put_sync_suspend(dw->dma.dev);
|
|
||||||
|
|
||||||
dev_vdbg(chan2dev(chan), "%s: done\n", __func__);
|
dev_vdbg(chan2dev(chan), "%s: done\n", __func__);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -398,17 +398,31 @@ static inline bool idxd_is_enabled(struct idxd_device *idxd)
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool idxd_device_is_halted(struct idxd_device *idxd)
|
||||||
|
{
|
||||||
|
union gensts_reg gensts;
|
||||||
|
|
||||||
|
gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
|
||||||
|
|
||||||
|
return (gensts.state == IDXD_DEVICE_STATE_HALT);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This is function is only used for reset during probe and will
|
* This is function is only used for reset during probe and will
|
||||||
* poll for completion. Once the device is setup with interrupts,
|
* poll for completion. Once the device is setup with interrupts,
|
||||||
* all commands will be done via interrupt completion.
|
* all commands will be done via interrupt completion.
|
||||||
*/
|
*/
|
||||||
void idxd_device_init_reset(struct idxd_device *idxd)
|
int idxd_device_init_reset(struct idxd_device *idxd)
|
||||||
{
|
{
|
||||||
struct device *dev = &idxd->pdev->dev;
|
struct device *dev = &idxd->pdev->dev;
|
||||||
union idxd_command_reg cmd;
|
union idxd_command_reg cmd;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
|
if (idxd_device_is_halted(idxd)) {
|
||||||
|
dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
|
||||||
|
return -ENXIO;
|
||||||
|
}
|
||||||
|
|
||||||
memset(&cmd, 0, sizeof(cmd));
|
memset(&cmd, 0, sizeof(cmd));
|
||||||
cmd.cmd = IDXD_CMD_RESET_DEVICE;
|
cmd.cmd = IDXD_CMD_RESET_DEVICE;
|
||||||
dev_dbg(dev, "%s: sending reset for init.\n", __func__);
|
dev_dbg(dev, "%s: sending reset for init.\n", __func__);
|
||||||
|
@ -419,6 +433,7 @@ void idxd_device_init_reset(struct idxd_device *idxd)
|
||||||
IDXD_CMDSTS_ACTIVE)
|
IDXD_CMDSTS_ACTIVE)
|
||||||
cpu_relax();
|
cpu_relax();
|
||||||
spin_unlock_irqrestore(&idxd->dev_lock, flags);
|
spin_unlock_irqrestore(&idxd->dev_lock, flags);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
|
static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
|
||||||
|
@ -428,6 +443,12 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
|
||||||
DECLARE_COMPLETION_ONSTACK(done);
|
DECLARE_COMPLETION_ONSTACK(done);
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
|
if (idxd_device_is_halted(idxd)) {
|
||||||
|
dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
|
||||||
|
*status = IDXD_CMDSTS_HW_ERR;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
memset(&cmd, 0, sizeof(cmd));
|
memset(&cmd, 0, sizeof(cmd));
|
||||||
cmd.cmd = cmd_code;
|
cmd.cmd = cmd_code;
|
||||||
cmd.operand = operand;
|
cmd.operand = operand;
|
||||||
|
|
|
@ -205,5 +205,8 @@ int idxd_register_dma_channel(struct idxd_wq *wq)
|
||||||
|
|
||||||
void idxd_unregister_dma_channel(struct idxd_wq *wq)
|
void idxd_unregister_dma_channel(struct idxd_wq *wq)
|
||||||
{
|
{
|
||||||
dma_async_device_channel_unregister(&wq->idxd->dma_dev, &wq->dma_chan);
|
struct dma_chan *chan = &wq->dma_chan;
|
||||||
|
|
||||||
|
dma_async_device_channel_unregister(&wq->idxd->dma_dev, chan);
|
||||||
|
list_del(&chan->device_node);
|
||||||
}
|
}
|
||||||
|
|
|
@ -326,7 +326,7 @@ void idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id);
|
||||||
void idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id);
|
void idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id);
|
||||||
|
|
||||||
/* device control */
|
/* device control */
|
||||||
void idxd_device_init_reset(struct idxd_device *idxd);
|
int idxd_device_init_reset(struct idxd_device *idxd);
|
||||||
int idxd_device_enable(struct idxd_device *idxd);
|
int idxd_device_enable(struct idxd_device *idxd);
|
||||||
int idxd_device_disable(struct idxd_device *idxd);
|
int idxd_device_disable(struct idxd_device *idxd);
|
||||||
void idxd_device_reset(struct idxd_device *idxd);
|
void idxd_device_reset(struct idxd_device *idxd);
|
||||||
|
|
|
@ -335,7 +335,10 @@ static int idxd_probe(struct idxd_device *idxd)
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
dev_dbg(dev, "%s entered and resetting device\n", __func__);
|
dev_dbg(dev, "%s entered and resetting device\n", __func__);
|
||||||
idxd_device_init_reset(idxd);
|
rc = idxd_device_init_reset(idxd);
|
||||||
|
if (rc < 0)
|
||||||
|
return rc;
|
||||||
|
|
||||||
dev_dbg(dev, "IDXD reset complete\n");
|
dev_dbg(dev, "IDXD reset complete\n");
|
||||||
|
|
||||||
if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM)) {
|
if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM)) {
|
||||||
|
|
|
@ -111,19 +111,14 @@ irqreturn_t idxd_irq_handler(int vec, void *data)
|
||||||
return IRQ_WAKE_THREAD;
|
return IRQ_WAKE_THREAD;
|
||||||
}
|
}
|
||||||
|
|
||||||
irqreturn_t idxd_misc_thread(int vec, void *data)
|
static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
|
||||||
{
|
{
|
||||||
struct idxd_irq_entry *irq_entry = data;
|
|
||||||
struct idxd_device *idxd = irq_entry->idxd;
|
|
||||||
struct device *dev = &idxd->pdev->dev;
|
struct device *dev = &idxd->pdev->dev;
|
||||||
union gensts_reg gensts;
|
union gensts_reg gensts;
|
||||||
u32 cause, val = 0;
|
u32 val = 0;
|
||||||
int i;
|
int i;
|
||||||
bool err = false;
|
bool err = false;
|
||||||
|
|
||||||
cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET);
|
|
||||||
iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET);
|
|
||||||
|
|
||||||
if (cause & IDXD_INTC_ERR) {
|
if (cause & IDXD_INTC_ERR) {
|
||||||
spin_lock_bh(&idxd->dev_lock);
|
spin_lock_bh(&idxd->dev_lock);
|
||||||
for (i = 0; i < 4; i++)
|
for (i = 0; i < 4; i++)
|
||||||
|
@ -181,7 +176,7 @@ irqreturn_t idxd_misc_thread(int vec, void *data)
|
||||||
val);
|
val);
|
||||||
|
|
||||||
if (!err)
|
if (!err)
|
||||||
goto out;
|
return 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This case should rarely happen and typically is due to software
|
* This case should rarely happen and typically is due to software
|
||||||
|
@ -211,37 +206,58 @@ irqreturn_t idxd_misc_thread(int vec, void *data)
|
||||||
gensts.reset_type == IDXD_DEVICE_RESET_FLR ?
|
gensts.reset_type == IDXD_DEVICE_RESET_FLR ?
|
||||||
"FLR" : "system reset");
|
"FLR" : "system reset");
|
||||||
spin_unlock_bh(&idxd->dev_lock);
|
spin_unlock_bh(&idxd->dev_lock);
|
||||||
|
return -ENXIO;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
out:
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
irqreturn_t idxd_misc_thread(int vec, void *data)
|
||||||
|
{
|
||||||
|
struct idxd_irq_entry *irq_entry = data;
|
||||||
|
struct idxd_device *idxd = irq_entry->idxd;
|
||||||
|
int rc;
|
||||||
|
u32 cause;
|
||||||
|
|
||||||
|
cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET);
|
||||||
|
if (cause)
|
||||||
|
iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET);
|
||||||
|
|
||||||
|
while (cause) {
|
||||||
|
rc = process_misc_interrupts(idxd, cause);
|
||||||
|
if (rc < 0)
|
||||||
|
break;
|
||||||
|
cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET);
|
||||||
|
if (cause)
|
||||||
|
iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET);
|
||||||
|
}
|
||||||
|
|
||||||
idxd_unmask_msix_vector(idxd, irq_entry->id);
|
idxd_unmask_msix_vector(idxd, irq_entry->id);
|
||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool process_fault(struct idxd_desc *desc, u64 fault_addr)
|
static inline bool match_fault(struct idxd_desc *desc, u64 fault_addr)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* Completion address can be bad as well. Check fault address match for descriptor
|
* Completion address can be bad as well. Check fault address match for descriptor
|
||||||
* and completion address.
|
* and completion address.
|
||||||
*/
|
*/
|
||||||
if ((u64)desc->hw == fault_addr ||
|
if ((u64)desc->hw == fault_addr || (u64)desc->completion == fault_addr) {
|
||||||
(u64)desc->completion == fault_addr) {
|
struct idxd_device *idxd = desc->wq->idxd;
|
||||||
idxd_dma_complete_txd(desc, IDXD_COMPLETE_DEV_FAIL);
|
struct device *dev = &idxd->pdev->dev;
|
||||||
|
|
||||||
|
dev_warn(dev, "desc with fault address: %#llx\n", fault_addr);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool complete_desc(struct idxd_desc *desc)
|
static inline void complete_desc(struct idxd_desc *desc, enum idxd_complete_type reason)
|
||||||
{
|
{
|
||||||
if (desc->completion->status) {
|
idxd_dma_complete_txd(desc, reason);
|
||||||
idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL);
|
idxd_free_desc(desc->wq, desc);
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry,
|
static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry,
|
||||||
|
@ -251,25 +267,25 @@ static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry,
|
||||||
struct idxd_desc *desc, *t;
|
struct idxd_desc *desc, *t;
|
||||||
struct llist_node *head;
|
struct llist_node *head;
|
||||||
int queued = 0;
|
int queued = 0;
|
||||||
bool completed = false;
|
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
enum idxd_complete_type reason;
|
||||||
|
|
||||||
*processed = 0;
|
*processed = 0;
|
||||||
head = llist_del_all(&irq_entry->pending_llist);
|
head = llist_del_all(&irq_entry->pending_llist);
|
||||||
if (!head)
|
if (!head)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
llist_for_each_entry_safe(desc, t, head, llnode) {
|
if (wtype == IRQ_WORK_NORMAL)
|
||||||
if (wtype == IRQ_WORK_NORMAL)
|
reason = IDXD_COMPLETE_NORMAL;
|
||||||
completed = complete_desc(desc);
|
else
|
||||||
else if (wtype == IRQ_WORK_PROCESS_FAULT)
|
reason = IDXD_COMPLETE_DEV_FAIL;
|
||||||
completed = process_fault(desc, data);
|
|
||||||
|
|
||||||
if (completed) {
|
llist_for_each_entry_safe(desc, t, head, llnode) {
|
||||||
idxd_free_desc(desc->wq, desc);
|
if (desc->completion->status) {
|
||||||
|
if ((desc->completion->status & DSA_COMP_STATUS_MASK) != DSA_COMP_SUCCESS)
|
||||||
|
match_fault(desc, data);
|
||||||
|
complete_desc(desc, reason);
|
||||||
(*processed)++;
|
(*processed)++;
|
||||||
if (wtype == IRQ_WORK_PROCESS_FAULT)
|
|
||||||
break;
|
|
||||||
} else {
|
} else {
|
||||||
spin_lock_irqsave(&irq_entry->list_lock, flags);
|
spin_lock_irqsave(&irq_entry->list_lock, flags);
|
||||||
list_add_tail(&desc->list,
|
list_add_tail(&desc->list,
|
||||||
|
@ -287,42 +303,46 @@ static int irq_process_work_list(struct idxd_irq_entry *irq_entry,
|
||||||
enum irq_work_type wtype,
|
enum irq_work_type wtype,
|
||||||
int *processed, u64 data)
|
int *processed, u64 data)
|
||||||
{
|
{
|
||||||
struct list_head *node, *next;
|
|
||||||
int queued = 0;
|
int queued = 0;
|
||||||
bool completed = false;
|
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
LIST_HEAD(flist);
|
||||||
|
struct idxd_desc *desc, *n;
|
||||||
|
enum idxd_complete_type reason;
|
||||||
|
|
||||||
*processed = 0;
|
*processed = 0;
|
||||||
|
if (wtype == IRQ_WORK_NORMAL)
|
||||||
|
reason = IDXD_COMPLETE_NORMAL;
|
||||||
|
else
|
||||||
|
reason = IDXD_COMPLETE_DEV_FAIL;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This lock protects list corruption from access of list outside of the irq handler
|
||||||
|
* thread.
|
||||||
|
*/
|
||||||
spin_lock_irqsave(&irq_entry->list_lock, flags);
|
spin_lock_irqsave(&irq_entry->list_lock, flags);
|
||||||
if (list_empty(&irq_entry->work_list))
|
if (list_empty(&irq_entry->work_list)) {
|
||||||
goto out;
|
|
||||||
|
|
||||||
list_for_each_safe(node, next, &irq_entry->work_list) {
|
|
||||||
struct idxd_desc *desc =
|
|
||||||
container_of(node, struct idxd_desc, list);
|
|
||||||
|
|
||||||
spin_unlock_irqrestore(&irq_entry->list_lock, flags);
|
spin_unlock_irqrestore(&irq_entry->list_lock, flags);
|
||||||
if (wtype == IRQ_WORK_NORMAL)
|
return 0;
|
||||||
completed = complete_desc(desc);
|
}
|
||||||
else if (wtype == IRQ_WORK_PROCESS_FAULT)
|
|
||||||
completed = process_fault(desc, data);
|
|
||||||
|
|
||||||
if (completed) {
|
list_for_each_entry_safe(desc, n, &irq_entry->work_list, list) {
|
||||||
spin_lock_irqsave(&irq_entry->list_lock, flags);
|
if (desc->completion->status) {
|
||||||
list_del(&desc->list);
|
list_del(&desc->list);
|
||||||
spin_unlock_irqrestore(&irq_entry->list_lock, flags);
|
|
||||||
idxd_free_desc(desc->wq, desc);
|
|
||||||
(*processed)++;
|
(*processed)++;
|
||||||
if (wtype == IRQ_WORK_PROCESS_FAULT)
|
list_add_tail(&desc->list, &flist);
|
||||||
return queued;
|
|
||||||
} else {
|
} else {
|
||||||
queued++;
|
queued++;
|
||||||
}
|
}
|
||||||
spin_lock_irqsave(&irq_entry->list_lock, flags);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
out:
|
|
||||||
spin_unlock_irqrestore(&irq_entry->list_lock, flags);
|
spin_unlock_irqrestore(&irq_entry->list_lock, flags);
|
||||||
|
|
||||||
|
list_for_each_entry(desc, &flist, list) {
|
||||||
|
if ((desc->completion->status & DSA_COMP_STATUS_MASK) != DSA_COMP_SUCCESS)
|
||||||
|
match_fault(desc, data);
|
||||||
|
complete_desc(desc, reason);
|
||||||
|
}
|
||||||
|
|
||||||
return queued;
|
return queued;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2401,7 +2401,8 @@ static int bcdma_alloc_chan_resources(struct dma_chan *chan)
|
||||||
dev_err(ud->ddev.dev,
|
dev_err(ud->ddev.dev,
|
||||||
"Descriptor pool allocation failed\n");
|
"Descriptor pool allocation failed\n");
|
||||||
uc->use_dma_pool = false;
|
uc->use_dma_pool = false;
|
||||||
return -ENOMEM;
|
ret = -ENOMEM;
|
||||||
|
goto err_res_free;
|
||||||
}
|
}
|
||||||
|
|
||||||
uc->use_dma_pool = true;
|
uc->use_dma_pool = true;
|
||||||
|
|
Loading…
Reference in New Issue