2019-06-04 16:11:33 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
2007-07-12 02:04:50 +08:00
|
|
|
* linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
|
|
|
* Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-12 03:35:53 +08:00
|
|
|
* Copyright (C) 2010 ST-Ericsson SA
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/moduleparam.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/ioport.h>
|
|
|
|
#include <linux/device.h>
|
2014-03-17 20:56:32 +08:00
|
|
|
#include <linux/io.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/interrupt.h>
|
2011-01-31 05:06:53 +08:00
|
|
|
#include <linux/kernel.h>
|
2012-04-16 17:18:43 +08:00
|
|
|
#include <linux/slab.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <linux/err.h>
|
|
|
|
#include <linux/highmem.h>
|
2007-10-11 13:06:03 +08:00
|
|
|
#include <linux/log2.h>
|
2018-12-06 23:13:31 +08:00
|
|
|
#include <linux/mmc/mmc.h>
|
2013-01-07 22:35:06 +08:00
|
|
|
#include <linux/mmc/pm.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/mmc/host.h>
|
2010-10-19 19:43:58 +08:00
|
|
|
#include <linux/mmc/card.h>
|
2020-01-28 17:06:34 +08:00
|
|
|
#include <linux/mmc/sd.h>
|
2014-03-17 20:56:19 +08:00
|
|
|
#include <linux/mmc/slot-gpio.h>
|
2006-01-07 21:52:45 +08:00
|
|
|
#include <linux/amba/bus.h>
|
2006-01-08 00:15:52 +08:00
|
|
|
#include <linux/clk.h>
|
2007-10-24 15:01:09 +08:00
|
|
|
#include <linux/scatterlist.h>
|
2018-09-21 07:01:10 +08:00
|
|
|
#include <linux/of.h>
|
2009-09-22 21:41:40 +08:00
|
|
|
#include <linux/regulator/consumer.h>
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-12 03:35:53 +08:00
|
|
|
#include <linux/dmaengine.h>
|
|
|
|
#include <linux/dma-mapping.h>
|
|
|
|
#include <linux/amba/mmci.h>
|
2011-08-14 16:17:05 +08:00
|
|
|
#include <linux/pm_runtime.h>
|
2012-02-01 18:42:19 +08:00
|
|
|
#include <linux/types.h>
|
2012-10-29 21:39:30 +08:00
|
|
|
#include <linux/pinctrl/consumer.h>
|
2018-10-08 20:08:51 +08:00
|
|
|
#include <linux/reset.h>
|
2021-01-25 01:02:56 +08:00
|
|
|
#include <linux/gpio/consumer.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2005-07-01 19:02:59 +08:00
|
|
|
#include <asm/div64.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <asm/io.h>
|
|
|
|
|
|
|
|
#include "mmci.h"
|
|
|
|
|
|
|
|
#define DRIVER_NAME "mmci-pl18x"
|
|
|
|
|
2019-03-06 22:04:56 +08:00
|
|
|
static void mmci_variant_init(struct mmci_host *host);
|
2019-10-08 17:56:03 +08:00
|
|
|
static void ux500_variant_init(struct mmci_host *host);
|
2019-03-27 17:05:29 +08:00
|
|
|
static void ux500v2_variant_init(struct mmci_host *host);
|
2018-10-08 20:08:33 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
static unsigned int fmax = 515633;
|
|
|
|
|
2010-07-21 19:54:40 +08:00
|
|
|
static struct variant_data variant_arm = {
|
2010-08-09 19:57:30 +08:00
|
|
|
.fifosize = 16 * 4,
|
|
|
|
.fifohalfsize = 8 * 4,
|
2018-10-08 20:08:45 +08:00
|
|
|
.cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
|
|
|
|
.cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
|
|
|
|
.cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
|
|
|
|
.cmdreg_srsp = MCI_CPSM_RESPONSE,
|
2010-07-21 19:55:59 +08:00
|
|
|
.datalength_bits = 16,
|
2018-10-08 20:08:43 +08:00
|
|
|
.datactrl_blocksz = 11,
|
2011-12-13 23:54:55 +08:00
|
|
|
.pwrreg_powerup = MCI_PWR_UP,
|
2014-06-02 17:09:47 +08:00
|
|
|
.f_max = 100000000,
|
2014-06-13 19:21:38 +08:00
|
|
|
.reversed_irq_handling = true,
|
2018-01-18 22:34:17 +08:00
|
|
|
.mmcimask1 = true,
|
2018-10-08 20:08:47 +08:00
|
|
|
.irq_pio_mask = MCI_IRQ_PIO_MASK,
|
2018-01-18 22:34:18 +08:00
|
|
|
.start_err = MCI_STARTBITERR,
|
2018-01-18 22:34:19 +08:00
|
|
|
.opendrain = MCI_ROD,
|
2018-10-08 20:08:33 +08:00
|
|
|
.init = mmci_variant_init,
|
2010-07-21 19:54:40 +08:00
|
|
|
};
|
|
|
|
|
2011-03-12 01:18:07 +08:00
|
|
|
static struct variant_data variant_arm_extended_fifo = {
|
|
|
|
.fifosize = 128 * 4,
|
|
|
|
.fifohalfsize = 64 * 4,
|
2018-10-08 20:08:45 +08:00
|
|
|
.cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
|
|
|
|
.cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
|
|
|
|
.cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
|
|
|
|
.cmdreg_srsp = MCI_CPSM_RESPONSE,
|
2011-03-12 01:18:07 +08:00
|
|
|
.datalength_bits = 16,
|
2018-10-08 20:08:43 +08:00
|
|
|
.datactrl_blocksz = 11,
|
2011-12-13 23:54:55 +08:00
|
|
|
.pwrreg_powerup = MCI_PWR_UP,
|
2014-06-02 17:09:47 +08:00
|
|
|
.f_max = 100000000,
|
2018-01-18 22:34:17 +08:00
|
|
|
.mmcimask1 = true,
|
2018-10-08 20:08:47 +08:00
|
|
|
.irq_pio_mask = MCI_IRQ_PIO_MASK,
|
2018-01-18 22:34:18 +08:00
|
|
|
.start_err = MCI_STARTBITERR,
|
2018-01-18 22:34:19 +08:00
|
|
|
.opendrain = MCI_ROD,
|
2018-10-08 20:08:33 +08:00
|
|
|
.init = mmci_variant_init,
|
2011-03-12 01:18:07 +08:00
|
|
|
};
|
|
|
|
|
2013-01-24 21:12:45 +08:00
|
|
|
static struct variant_data variant_arm_extended_fifo_hwfc = {
|
|
|
|
.fifosize = 128 * 4,
|
|
|
|
.fifohalfsize = 64 * 4,
|
|
|
|
.clkreg_enable = MCI_ARM_HWFCEN,
|
2018-10-08 20:08:45 +08:00
|
|
|
.cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
|
|
|
|
.cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
|
|
|
|
.cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
|
|
|
|
.cmdreg_srsp = MCI_CPSM_RESPONSE,
|
2013-01-24 21:12:45 +08:00
|
|
|
.datalength_bits = 16,
|
2018-10-08 20:08:43 +08:00
|
|
|
.datactrl_blocksz = 11,
|
2013-01-24 21:12:45 +08:00
|
|
|
.pwrreg_powerup = MCI_PWR_UP,
|
2014-06-02 17:09:47 +08:00
|
|
|
.f_max = 100000000,
|
2018-01-18 22:34:17 +08:00
|
|
|
.mmcimask1 = true,
|
2018-10-08 20:08:47 +08:00
|
|
|
.irq_pio_mask = MCI_IRQ_PIO_MASK,
|
2018-01-18 22:34:18 +08:00
|
|
|
.start_err = MCI_STARTBITERR,
|
2018-01-18 22:34:19 +08:00
|
|
|
.opendrain = MCI_ROD,
|
2018-10-08 20:08:33 +08:00
|
|
|
.init = mmci_variant_init,
|
2013-01-24 21:12:45 +08:00
|
|
|
};
|
|
|
|
|
2010-07-21 19:54:40 +08:00
|
|
|
static struct variant_data variant_u300 = {
|
2010-08-09 19:57:30 +08:00
|
|
|
.fifosize = 16 * 4,
|
|
|
|
.fifohalfsize = 8 * 4,
|
2011-03-04 21:54:16 +08:00
|
|
|
.clkreg_enable = MCI_ST_U300_HWFCEN,
|
2014-06-02 17:09:23 +08:00
|
|
|
.clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
|
2018-10-08 20:08:45 +08:00
|
|
|
.cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
|
|
|
|
.cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
|
|
|
|
.cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
|
|
|
|
.cmdreg_srsp = MCI_CPSM_RESPONSE,
|
2010-07-21 19:55:59 +08:00
|
|
|
.datalength_bits = 16,
|
2018-10-08 20:08:43 +08:00
|
|
|
.datactrl_blocksz = 11,
|
2016-10-25 17:06:05 +08:00
|
|
|
.datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
|
2014-08-22 12:55:16 +08:00
|
|
|
.st_sdio = true,
|
2011-12-13 23:54:55 +08:00
|
|
|
.pwrreg_powerup = MCI_PWR_ON,
|
2014-06-02 17:09:47 +08:00
|
|
|
.f_max = 100000000,
|
2011-12-13 23:57:07 +08:00
|
|
|
.signal_direction = true,
|
2013-01-10 00:19:54 +08:00
|
|
|
.pwrreg_clkgate = true,
|
2013-09-04 16:05:17 +08:00
|
|
|
.pwrreg_nopower = true,
|
2018-01-18 22:34:17 +08:00
|
|
|
.mmcimask1 = true,
|
2018-10-08 20:08:47 +08:00
|
|
|
.irq_pio_mask = MCI_IRQ_PIO_MASK,
|
2018-01-18 22:34:18 +08:00
|
|
|
.start_err = MCI_STARTBITERR,
|
2018-01-18 22:34:19 +08:00
|
|
|
.opendrain = MCI_OD,
|
2018-10-08 20:08:33 +08:00
|
|
|
.init = mmci_variant_init,
|
2010-07-21 19:54:40 +08:00
|
|
|
};
|
|
|
|
|
2012-04-11 00:43:59 +08:00
|
|
|
static struct variant_data variant_nomadik = {
|
|
|
|
.fifosize = 16 * 4,
|
|
|
|
.fifohalfsize = 8 * 4,
|
|
|
|
.clkreg = MCI_CLK_ENABLE,
|
2016-01-04 09:22:08 +08:00
|
|
|
.clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
|
2018-10-08 20:08:45 +08:00
|
|
|
.cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
|
|
|
|
.cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
|
|
|
|
.cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
|
|
|
|
.cmdreg_srsp = MCI_CPSM_RESPONSE,
|
2012-04-11 00:43:59 +08:00
|
|
|
.datalength_bits = 24,
|
2018-10-08 20:08:43 +08:00
|
|
|
.datactrl_blocksz = 11,
|
2016-10-25 17:06:05 +08:00
|
|
|
.datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
|
2014-08-22 12:55:16 +08:00
|
|
|
.st_sdio = true,
|
2012-04-11 00:43:59 +08:00
|
|
|
.st_clkdiv = true,
|
|
|
|
.pwrreg_powerup = MCI_PWR_ON,
|
2014-06-02 17:09:47 +08:00
|
|
|
.f_max = 100000000,
|
2012-04-11 00:43:59 +08:00
|
|
|
.signal_direction = true,
|
2013-01-10 00:19:54 +08:00
|
|
|
.pwrreg_clkgate = true,
|
2013-09-04 16:05:17 +08:00
|
|
|
.pwrreg_nopower = true,
|
2018-01-18 22:34:17 +08:00
|
|
|
.mmcimask1 = true,
|
2018-10-08 20:08:47 +08:00
|
|
|
.irq_pio_mask = MCI_IRQ_PIO_MASK,
|
2018-01-18 22:34:18 +08:00
|
|
|
.start_err = MCI_STARTBITERR,
|
2018-01-18 22:34:19 +08:00
|
|
|
.opendrain = MCI_OD,
|
2018-10-08 20:08:33 +08:00
|
|
|
.init = mmci_variant_init,
|
2012-04-11 00:43:59 +08:00
|
|
|
};
|
|
|
|
|
2010-07-21 19:54:40 +08:00
|
|
|
static struct variant_data variant_ux500 = {
|
2010-08-09 19:57:30 +08:00
|
|
|
.fifosize = 30 * 4,
|
|
|
|
.fifohalfsize = 8 * 4,
|
2010-07-21 19:54:40 +08:00
|
|
|
.clkreg = MCI_CLK_ENABLE,
|
2011-03-04 21:54:16 +08:00
|
|
|
.clkreg_enable = MCI_ST_UX500_HWFCEN,
|
2014-06-02 17:09:23 +08:00
|
|
|
.clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
|
2014-06-02 17:09:30 +08:00
|
|
|
.clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE,
|
2018-10-08 20:08:45 +08:00
|
|
|
.cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
|
|
|
|
.cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
|
|
|
|
.cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
|
|
|
|
.cmdreg_srsp = MCI_CPSM_RESPONSE,
|
2010-07-21 19:55:59 +08:00
|
|
|
.datalength_bits = 24,
|
2018-10-08 20:08:43 +08:00
|
|
|
.datactrl_blocksz = 11,
|
2019-12-17 22:39:52 +08:00
|
|
|
.datactrl_any_blocksz = true,
|
|
|
|
.dma_power_of_2 = true,
|
2016-10-25 17:06:05 +08:00
|
|
|
.datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
|
2014-08-22 12:55:16 +08:00
|
|
|
.st_sdio = true,
|
2010-12-06 16:24:14 +08:00
|
|
|
.st_clkdiv = true,
|
2011-12-13 23:54:55 +08:00
|
|
|
.pwrreg_powerup = MCI_PWR_ON,
|
2014-06-02 17:09:47 +08:00
|
|
|
.f_max = 100000000,
|
2011-12-13 23:57:07 +08:00
|
|
|
.signal_direction = true,
|
2013-01-10 00:19:54 +08:00
|
|
|
.pwrreg_clkgate = true,
|
2013-05-16 03:53:22 +08:00
|
|
|
.busy_detect = true,
|
2016-10-25 17:06:06 +08:00
|
|
|
.busy_dpsm_flag = MCI_DPSM_ST_BUSYMODE,
|
|
|
|
.busy_detect_flag = MCI_ST_CARDBUSY,
|
|
|
|
.busy_detect_mask = MCI_ST_BUSYENDMASK,
|
2013-09-04 16:05:17 +08:00
|
|
|
.pwrreg_nopower = true,
|
2018-01-18 22:34:17 +08:00
|
|
|
.mmcimask1 = true,
|
2018-10-08 20:08:47 +08:00
|
|
|
.irq_pio_mask = MCI_IRQ_PIO_MASK,
|
2018-01-18 22:34:18 +08:00
|
|
|
.start_err = MCI_STARTBITERR,
|
2018-01-18 22:34:19 +08:00
|
|
|
.opendrain = MCI_OD,
|
2019-10-08 17:56:03 +08:00
|
|
|
.init = ux500_variant_init,
|
2010-07-21 19:54:40 +08:00
|
|
|
};
|
2010-12-06 16:24:14 +08:00
|
|
|
|
2011-03-25 15:51:52 +08:00
|
|
|
static struct variant_data variant_ux500v2 = {
|
|
|
|
.fifosize = 30 * 4,
|
|
|
|
.fifohalfsize = 8 * 4,
|
|
|
|
.clkreg = MCI_CLK_ENABLE,
|
|
|
|
.clkreg_enable = MCI_ST_UX500_HWFCEN,
|
2014-06-02 17:09:23 +08:00
|
|
|
.clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
|
2014-06-02 17:09:30 +08:00
|
|
|
.clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE,
|
2018-10-08 20:08:45 +08:00
|
|
|
.cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
|
|
|
|
.cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
|
|
|
|
.cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
|
|
|
|
.cmdreg_srsp = MCI_CPSM_RESPONSE,
|
2016-10-25 17:06:05 +08:00
|
|
|
.datactrl_mask_ddrmode = MCI_DPSM_ST_DDRMODE,
|
2011-03-25 15:51:52 +08:00
|
|
|
.datalength_bits = 24,
|
2018-10-08 20:08:43 +08:00
|
|
|
.datactrl_blocksz = 11,
|
2019-12-17 22:39:52 +08:00
|
|
|
.datactrl_any_blocksz = true,
|
|
|
|
.dma_power_of_2 = true,
|
2016-10-25 17:06:05 +08:00
|
|
|
.datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
|
2014-08-22 12:55:16 +08:00
|
|
|
.st_sdio = true,
|
2011-03-25 15:51:52 +08:00
|
|
|
.st_clkdiv = true,
|
2011-12-13 23:54:55 +08:00
|
|
|
.pwrreg_powerup = MCI_PWR_ON,
|
2014-06-02 17:09:47 +08:00
|
|
|
.f_max = 100000000,
|
2011-12-13 23:57:07 +08:00
|
|
|
.signal_direction = true,
|
2013-01-10 00:19:54 +08:00
|
|
|
.pwrreg_clkgate = true,
|
2013-05-16 03:53:22 +08:00
|
|
|
.busy_detect = true,
|
2016-10-25 17:06:06 +08:00
|
|
|
.busy_dpsm_flag = MCI_DPSM_ST_BUSYMODE,
|
|
|
|
.busy_detect_flag = MCI_ST_CARDBUSY,
|
|
|
|
.busy_detect_mask = MCI_ST_BUSYENDMASK,
|
2013-09-04 16:05:17 +08:00
|
|
|
.pwrreg_nopower = true,
|
2018-01-18 22:34:17 +08:00
|
|
|
.mmcimask1 = true,
|
2018-10-08 20:08:47 +08:00
|
|
|
.irq_pio_mask = MCI_IRQ_PIO_MASK,
|
2018-01-18 22:34:18 +08:00
|
|
|
.start_err = MCI_STARTBITERR,
|
2018-01-18 22:34:19 +08:00
|
|
|
.opendrain = MCI_OD,
|
2019-03-27 17:05:29 +08:00
|
|
|
.init = ux500v2_variant_init,
|
2011-03-25 15:51:52 +08:00
|
|
|
};
|
|
|
|
|
2018-01-18 22:34:21 +08:00
|
|
|
static struct variant_data variant_stm32 = {
|
|
|
|
.fifosize = 32 * 4,
|
|
|
|
.fifohalfsize = 8 * 4,
|
|
|
|
.clkreg = MCI_CLK_ENABLE,
|
|
|
|
.clkreg_enable = MCI_ST_UX500_HWFCEN,
|
|
|
|
.clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
|
|
|
|
.clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE,
|
2018-10-08 20:08:45 +08:00
|
|
|
.cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
|
|
|
|
.cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
|
|
|
|
.cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
|
|
|
|
.cmdreg_srsp = MCI_CPSM_RESPONSE,
|
2018-10-08 20:08:47 +08:00
|
|
|
.irq_pio_mask = MCI_IRQ_PIO_MASK,
|
2018-01-18 22:34:21 +08:00
|
|
|
.datalength_bits = 24,
|
2018-10-08 20:08:43 +08:00
|
|
|
.datactrl_blocksz = 11,
|
2018-01-18 22:34:21 +08:00
|
|
|
.datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
|
|
|
|
.st_sdio = true,
|
|
|
|
.st_clkdiv = true,
|
|
|
|
.pwrreg_powerup = MCI_PWR_ON,
|
|
|
|
.f_max = 48000000,
|
|
|
|
.pwrreg_clkgate = true,
|
|
|
|
.pwrreg_nopower = true,
|
2018-10-08 20:08:33 +08:00
|
|
|
.init = mmci_variant_init,
|
2018-01-18 22:34:21 +08:00
|
|
|
};
|
|
|
|
|
2018-10-08 20:08:55 +08:00
|
|
|
static struct variant_data variant_stm32_sdmmc = {
|
|
|
|
.fifosize = 16 * 4,
|
|
|
|
.fifohalfsize = 8 * 4,
|
|
|
|
.f_max = 208000000,
|
|
|
|
.stm32_clkdiv = true,
|
|
|
|
.cmdreg_cpsm_enable = MCI_CPSM_STM32_ENABLE,
|
|
|
|
.cmdreg_lrsp_crc = MCI_CPSM_STM32_LRSP_CRC,
|
|
|
|
.cmdreg_srsp_crc = MCI_CPSM_STM32_SRSP_CRC,
|
|
|
|
.cmdreg_srsp = MCI_CPSM_STM32_SRSP,
|
2018-12-06 23:13:31 +08:00
|
|
|
.cmdreg_stop = MCI_CPSM_STM32_CMDSTOP,
|
2018-10-08 20:08:55 +08:00
|
|
|
.data_cmd_enable = MCI_CPSM_STM32_CMDTRANS,
|
|
|
|
.irq_pio_mask = MCI_IRQ_PIO_STM32_MASK,
|
|
|
|
.datactrl_first = true,
|
|
|
|
.datacnt_useless = true,
|
|
|
|
.datalength_bits = 25,
|
|
|
|
.datactrl_blocksz = 14,
|
2019-12-17 22:39:52 +08:00
|
|
|
.datactrl_any_blocksz = true,
|
2020-06-11 21:28:39 +08:00
|
|
|
.datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
|
2018-10-08 20:08:55 +08:00
|
|
|
.stm32_idmabsize_mask = GENMASK(12, 5),
|
2019-10-08 17:56:04 +08:00
|
|
|
.busy_timeout = true,
|
|
|
|
.busy_detect = true,
|
|
|
|
.busy_detect_flag = MCI_STM32_BUSYD0,
|
|
|
|
.busy_detect_mask = MCI_STM32_BUSYD0ENDMASK,
|
2018-10-08 20:08:55 +08:00
|
|
|
.init = sdmmc_variant_init,
|
|
|
|
};
|
|
|
|
|
2020-01-28 17:06:36 +08:00
|
|
|
static struct variant_data variant_stm32_sdmmcv2 = {
|
|
|
|
.fifosize = 16 * 4,
|
|
|
|
.fifohalfsize = 8 * 4,
|
2021-12-15 22:17:25 +08:00
|
|
|
.f_max = 267000000,
|
2020-01-28 17:06:36 +08:00
|
|
|
.stm32_clkdiv = true,
|
|
|
|
.cmdreg_cpsm_enable = MCI_CPSM_STM32_ENABLE,
|
|
|
|
.cmdreg_lrsp_crc = MCI_CPSM_STM32_LRSP_CRC,
|
|
|
|
.cmdreg_srsp_crc = MCI_CPSM_STM32_SRSP_CRC,
|
|
|
|
.cmdreg_srsp = MCI_CPSM_STM32_SRSP,
|
|
|
|
.cmdreg_stop = MCI_CPSM_STM32_CMDSTOP,
|
|
|
|
.data_cmd_enable = MCI_CPSM_STM32_CMDTRANS,
|
|
|
|
.irq_pio_mask = MCI_IRQ_PIO_STM32_MASK,
|
|
|
|
.datactrl_first = true,
|
|
|
|
.datacnt_useless = true,
|
|
|
|
.datalength_bits = 25,
|
|
|
|
.datactrl_blocksz = 14,
|
|
|
|
.datactrl_any_blocksz = true,
|
2020-06-11 21:28:39 +08:00
|
|
|
.datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
|
2020-01-28 17:06:36 +08:00
|
|
|
.stm32_idmabsize_mask = GENMASK(16, 5),
|
|
|
|
.dma_lli = true,
|
|
|
|
.busy_timeout = true,
|
|
|
|
.busy_detect = true,
|
|
|
|
.busy_detect_flag = MCI_STM32_BUSYD0,
|
|
|
|
.busy_detect_mask = MCI_STM32_BUSYD0ENDMASK,
|
|
|
|
.init = sdmmc_variant_init,
|
|
|
|
};
|
|
|
|
|
2014-06-02 17:10:13 +08:00
|
|
|
static struct variant_data variant_qcom = {
|
|
|
|
.fifosize = 16 * 4,
|
|
|
|
.fifohalfsize = 8 * 4,
|
|
|
|
.clkreg = MCI_CLK_ENABLE,
|
|
|
|
.clkreg_enable = MCI_QCOM_CLK_FLOWENA |
|
|
|
|
MCI_QCOM_CLK_SELECT_IN_FBCLK,
|
|
|
|
.clkreg_8bit_bus_enable = MCI_QCOM_CLK_WIDEBUS_8,
|
|
|
|
.datactrl_mask_ddrmode = MCI_QCOM_CLK_SELECT_IN_DDR_MODE,
|
2018-10-08 20:08:45 +08:00
|
|
|
.cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
|
|
|
|
.cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
|
|
|
|
.cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
|
|
|
|
.cmdreg_srsp = MCI_CPSM_RESPONSE,
|
2016-10-25 17:06:05 +08:00
|
|
|
.data_cmd_enable = MCI_CPSM_QCOM_DATCMD,
|
2014-06-02 17:10:13 +08:00
|
|
|
.datalength_bits = 24,
|
2018-10-08 20:08:43 +08:00
|
|
|
.datactrl_blocksz = 11,
|
2019-12-17 22:39:52 +08:00
|
|
|
.datactrl_any_blocksz = true,
|
2014-06-02 17:10:13 +08:00
|
|
|
.pwrreg_powerup = MCI_PWR_UP,
|
|
|
|
.f_max = 208000000,
|
|
|
|
.explicit_mclk_control = true,
|
|
|
|
.qcom_fifo = true,
|
2014-07-29 10:50:30 +08:00
|
|
|
.qcom_dml = true,
|
2018-01-18 22:34:17 +08:00
|
|
|
.mmcimask1 = true,
|
2018-10-08 20:08:47 +08:00
|
|
|
.irq_pio_mask = MCI_IRQ_PIO_MASK,
|
2018-01-18 22:34:18 +08:00
|
|
|
.start_err = MCI_STARTBITERR,
|
2018-01-18 22:34:19 +08:00
|
|
|
.opendrain = MCI_ROD,
|
2018-07-16 19:08:18 +08:00
|
|
|
.init = qcom_variant_init,
|
2014-06-02 17:10:13 +08:00
|
|
|
};
|
|
|
|
|
2016-10-25 17:06:06 +08:00
|
|
|
/* Busy detection for the ST Micro variant */
|
2013-05-16 03:53:22 +08:00
|
|
|
static int mmci_card_busy(struct mmc_host *mmc)
|
|
|
|
{
|
|
|
|
struct mmci_host *host = mmc_priv(mmc);
|
|
|
|
unsigned long flags;
|
|
|
|
int busy = 0;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&host->lock, flags);
|
2016-10-25 17:06:06 +08:00
|
|
|
if (readl(host->base + MMCISTATUS) & host->variant->busy_detect_flag)
|
2013-05-16 03:53:22 +08:00
|
|
|
busy = 1;
|
|
|
|
spin_unlock_irqrestore(&host->lock, flags);
|
|
|
|
|
|
|
|
return busy;
|
|
|
|
}
|
|
|
|
|
2013-09-04 16:01:15 +08:00
|
|
|
static void mmci_reg_delay(struct mmci_host *host)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* According to the spec, at least three feedback clock cycles
|
|
|
|
* of max 52 MHz must pass between two writes to the MMCICLOCK reg.
|
|
|
|
* Three MCLK clock cycles must pass between two MMCIPOWER reg writes.
|
|
|
|
* Worst delay time during card init is at 100 kHz => 30 us.
|
|
|
|
* Worst delay time when up and running is at 25 MHz => 120 ns.
|
|
|
|
*/
|
|
|
|
if (host->cclk < 25000000)
|
|
|
|
udelay(30);
|
|
|
|
else
|
|
|
|
ndelay(120);
|
|
|
|
}
|
|
|
|
|
2012-01-18 16:17:27 +08:00
|
|
|
/*
|
|
|
|
* This must be called with host->lock held
|
|
|
|
*/
|
2018-10-08 20:08:42 +08:00
|
|
|
void mmci_write_clkreg(struct mmci_host *host, u32 clk)
|
2012-01-18 16:17:27 +08:00
|
|
|
{
|
|
|
|
if (host->clk_reg != clk) {
|
|
|
|
host->clk_reg = clk;
|
|
|
|
writel(clk, host->base + MMCICLOCK);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This must be called with host->lock held
|
|
|
|
*/
|
2018-10-08 20:08:42 +08:00
|
|
|
void mmci_write_pwrreg(struct mmci_host *host, u32 pwr)
|
2012-01-18 16:17:27 +08:00
|
|
|
{
|
|
|
|
if (host->pwr_reg != pwr) {
|
|
|
|
host->pwr_reg = pwr;
|
|
|
|
writel(pwr, host->base + MMCIPOWER);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-05-16 03:48:23 +08:00
|
|
|
/*
|
|
|
|
* This must be called with host->lock held
|
|
|
|
*/
|
|
|
|
static void mmci_write_datactrlreg(struct mmci_host *host, u32 datactrl)
|
|
|
|
{
|
2016-10-25 17:06:06 +08:00
|
|
|
/* Keep busy mode in DPSM if enabled */
|
|
|
|
datactrl |= host->datactrl_reg & host->variant->busy_dpsm_flag;
|
2013-05-16 03:53:22 +08:00
|
|
|
|
2013-05-16 03:48:23 +08:00
|
|
|
if (host->datactrl_reg != datactrl) {
|
|
|
|
host->datactrl_reg = datactrl;
|
|
|
|
writel(datactrl, host->base + MMCIDATACTRL);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-09-14 19:56:14 +08:00
|
|
|
/*
|
|
|
|
* This must be called with host->lock held
|
|
|
|
*/
|
|
|
|
static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired)
|
|
|
|
{
|
2010-07-21 19:54:40 +08:00
|
|
|
struct variant_data *variant = host->variant;
|
|
|
|
u32 clk = variant->clkreg;
|
2009-09-14 19:56:14 +08:00
|
|
|
|
2013-05-13 22:40:03 +08:00
|
|
|
/* Make sure cclk reflects the current calculated clock */
|
|
|
|
host->cclk = 0;
|
|
|
|
|
2009-09-14 19:56:14 +08:00
|
|
|
if (desired) {
|
2014-06-02 17:09:55 +08:00
|
|
|
if (variant->explicit_mclk_control) {
|
|
|
|
host->cclk = host->mclk;
|
|
|
|
} else if (desired >= host->mclk) {
|
2010-12-10 16:35:53 +08:00
|
|
|
clk = MCI_CLK_BYPASS;
|
2011-04-01 14:59:17 +08:00
|
|
|
if (variant->st_clkdiv)
|
|
|
|
clk |= MCI_ST_UX500_NEG_EDGE;
|
2009-09-14 19:56:14 +08:00
|
|
|
host->cclk = host->mclk;
|
2010-12-06 16:24:14 +08:00
|
|
|
} else if (variant->st_clkdiv) {
|
|
|
|
/*
|
|
|
|
* DB8500 TRM says f = mclk / (clkdiv + 2)
|
|
|
|
* => clkdiv = (mclk / f) - 2
|
|
|
|
* Round the divider up so we don't exceed the max
|
|
|
|
* frequency
|
|
|
|
*/
|
|
|
|
clk = DIV_ROUND_UP(host->mclk, desired) - 2;
|
|
|
|
if (clk >= 256)
|
|
|
|
clk = 255;
|
|
|
|
host->cclk = host->mclk / (clk + 2);
|
2009-09-14 19:56:14 +08:00
|
|
|
} else {
|
2010-12-06 16:24:14 +08:00
|
|
|
/*
|
|
|
|
* PL180 TRM says f = mclk / (2 * (clkdiv + 1))
|
|
|
|
* => clkdiv = mclk / (2 * f) - 1
|
|
|
|
*/
|
2009-09-14 19:56:14 +08:00
|
|
|
clk = host->mclk / (2 * desired) - 1;
|
|
|
|
if (clk >= 256)
|
|
|
|
clk = 255;
|
|
|
|
host->cclk = host->mclk / (2 * (clk + 1));
|
|
|
|
}
|
2010-07-21 19:55:18 +08:00
|
|
|
|
|
|
|
clk |= variant->clkreg_enable;
|
2009-09-14 19:56:14 +08:00
|
|
|
clk |= MCI_CLK_ENABLE;
|
|
|
|
/* This hasn't proven to be worthwhile */
|
|
|
|
/* clk |= MCI_CLK_PWRSAVE; */
|
|
|
|
}
|
|
|
|
|
2013-05-13 22:40:03 +08:00
|
|
|
/* Set actual clock for debug */
|
|
|
|
host->mmc->actual_clock = host->cclk;
|
|
|
|
|
2009-09-14 19:57:11 +08:00
|
|
|
if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4)
|
2010-04-08 14:38:52 +08:00
|
|
|
clk |= MCI_4BIT_BUS;
|
|
|
|
if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8)
|
2014-06-02 17:09:23 +08:00
|
|
|
clk |= variant->clkreg_8bit_bus_enable;
|
2009-09-14 19:57:11 +08:00
|
|
|
|
2014-03-14 20:12:13 +08:00
|
|
|
if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50 ||
|
|
|
|
host->mmc->ios.timing == MMC_TIMING_MMC_DDR52)
|
2014-06-02 17:09:30 +08:00
|
|
|
clk |= variant->clkreg_neg_edge_enable;
|
2013-01-07 22:30:44 +08:00
|
|
|
|
2012-01-18 16:17:27 +08:00
|
|
|
mmci_write_clkreg(host, clk);
|
2009-09-14 19:56:14 +08:00
|
|
|
}
|
|
|
|
|
2019-09-25 19:42:31 +08:00
|
|
|
static void mmci_dma_release(struct mmci_host *host)
|
2018-10-08 20:08:33 +08:00
|
|
|
{
|
|
|
|
if (host->ops && host->ops->dma_release)
|
|
|
|
host->ops->dma_release(host);
|
|
|
|
|
|
|
|
host->use_dma = false;
|
|
|
|
}
|
|
|
|
|
2019-09-25 19:42:31 +08:00
|
|
|
static void mmci_dma_setup(struct mmci_host *host)
|
2018-10-08 20:08:33 +08:00
|
|
|
{
|
|
|
|
if (!host->ops || !host->ops->dma_setup)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (host->ops->dma_setup(host))
|
|
|
|
return;
|
|
|
|
|
2018-10-08 20:08:34 +08:00
|
|
|
/* initialize pre request cookie */
|
|
|
|
host->next_cookie = 1;
|
|
|
|
|
2018-10-08 20:08:33 +08:00
|
|
|
host->use_dma = true;
|
|
|
|
}
|
|
|
|
|
2018-10-08 20:08:41 +08:00
|
|
|
/*
|
|
|
|
* Validate mmc prerequisites
|
|
|
|
*/
|
|
|
|
static int mmci_validate_data(struct mmci_host *host,
|
|
|
|
struct mmc_data *data)
|
|
|
|
{
|
2019-12-17 22:39:52 +08:00
|
|
|
struct variant_data *variant = host->variant;
|
|
|
|
|
2018-10-08 20:08:41 +08:00
|
|
|
if (!data)
|
|
|
|
return 0;
|
2019-12-17 22:39:52 +08:00
|
|
|
if (!is_power_of_2(data->blksz) && !variant->datactrl_any_blocksz) {
|
2018-10-08 20:08:41 +08:00
|
|
|
dev_err(mmc_dev(host->mmc),
|
|
|
|
"unsupported block size (%d bytes)\n", data->blksz);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (host->ops && host->ops->validate_data)
|
|
|
|
return host->ops->validate_data(host, data);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-09-25 19:42:31 +08:00
|
|
|
static int mmci_prep_data(struct mmci_host *host, struct mmc_data *data, bool next)
|
2018-10-08 20:08:36 +08:00
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (!host->ops || !host->ops->prep_data)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err = host->ops->prep_data(host, data, next);
|
|
|
|
|
|
|
|
if (next && !err)
|
|
|
|
data->host_cookie = ++host->next_cookie < 0 ?
|
|
|
|
1 : host->next_cookie;
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2019-09-25 19:42:31 +08:00
|
|
|
static void mmci_unprep_data(struct mmci_host *host, struct mmc_data *data,
|
2018-10-08 20:08:36 +08:00
|
|
|
int err)
|
|
|
|
{
|
|
|
|
if (host->ops && host->ops->unprep_data)
|
|
|
|
host->ops->unprep_data(host, data, err);
|
|
|
|
|
|
|
|
data->host_cookie = 0;
|
|
|
|
}
|
|
|
|
|
2019-09-25 19:42:31 +08:00
|
|
|
static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
|
2018-10-08 20:08:37 +08:00
|
|
|
{
|
|
|
|
WARN_ON(data->host_cookie && data->host_cookie != host->next_cookie);
|
|
|
|
|
|
|
|
if (host->ops && host->ops->get_next_data)
|
|
|
|
host->ops->get_next_data(host, data);
|
|
|
|
}
|
|
|
|
|
2019-09-25 19:42:31 +08:00
|
|
|
static int mmci_dma_start(struct mmci_host *host, unsigned int datactrl)
|
2018-10-08 20:08:38 +08:00
|
|
|
{
|
|
|
|
struct mmc_data *data = host->data;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!host->use_dma)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
ret = mmci_prep_data(host, data, false);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
if (!host->ops || !host->ops->dma_start)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* Okay, go for it. */
|
|
|
|
dev_vdbg(mmc_dev(host->mmc),
|
|
|
|
"Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
|
|
|
|
data->sg_len, data->blksz, data->blocks, data->flags);
|
|
|
|
|
2019-12-17 22:39:52 +08:00
|
|
|
ret = host->ops->dma_start(host, &datactrl);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2018-10-08 20:08:38 +08:00
|
|
|
|
|
|
|
/* Trigger the DMA transfer */
|
|
|
|
mmci_write_datactrlreg(host, datactrl);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Let the MMCI say when the data is ended and it's time
|
|
|
|
* to fire next DMA request. When that happens, MMCI will
|
|
|
|
* call mmci_data_end()
|
|
|
|
*/
|
|
|
|
writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK,
|
|
|
|
host->base + MMCIMASK0);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-09-25 19:42:31 +08:00
|
|
|
static void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data)
|
2018-10-08 20:08:39 +08:00
|
|
|
{
|
|
|
|
if (!host->use_dma)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (host->ops && host->ops->dma_finalize)
|
|
|
|
host->ops->dma_finalize(host, data);
|
|
|
|
}
|
|
|
|
|
2019-09-25 19:42:31 +08:00
|
|
|
static void mmci_dma_error(struct mmci_host *host)
|
2018-10-08 20:08:40 +08:00
|
|
|
{
|
|
|
|
if (!host->use_dma)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (host->ops && host->ops->dma_error)
|
|
|
|
host->ops->dma_error(host);
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
static void
|
|
|
|
mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
|
|
|
|
{
|
|
|
|
writel(0, host->base + MMCICOMMAND);
|
|
|
|
|
2007-01-09 00:42:51 +08:00
|
|
|
BUG_ON(host->data);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
host->mrq = NULL;
|
|
|
|
host->cmd = NULL;
|
|
|
|
|
|
|
|
mmc_request_done(host->mmc, mrq);
|
|
|
|
}
|
|
|
|
|
2010-10-19 19:39:48 +08:00
|
|
|
static void mmci_set_mask1(struct mmci_host *host, unsigned int mask)
|
|
|
|
{
|
|
|
|
void __iomem *base = host->base;
|
2018-01-18 22:34:17 +08:00
|
|
|
struct variant_data *variant = host->variant;
|
2010-10-19 19:39:48 +08:00
|
|
|
|
|
|
|
if (host->singleirq) {
|
|
|
|
unsigned int mask0 = readl(base + MMCIMASK0);
|
|
|
|
|
2018-10-08 20:08:47 +08:00
|
|
|
mask0 &= ~variant->irq_pio_mask;
|
2010-10-19 19:39:48 +08:00
|
|
|
mask0 |= mask;
|
|
|
|
|
|
|
|
writel(mask0, base + MMCIMASK0);
|
|
|
|
}
|
|
|
|
|
2018-01-18 22:34:17 +08:00
|
|
|
if (variant->mmcimask1)
|
|
|
|
writel(mask, base + MMCIMASK1);
|
|
|
|
|
|
|
|
host->mask1_reg = mask;
|
2010-10-19 19:39:48 +08:00
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
static void mmci_stop_data(struct mmci_host *host)
|
|
|
|
{
|
2013-05-16 03:48:23 +08:00
|
|
|
mmci_write_datactrlreg(host, 0);
|
2010-10-19 19:39:48 +08:00
|
|
|
mmci_set_mask1(host, 0);
|
2005-04-17 06:20:36 +08:00
|
|
|
host->data = NULL;
|
|
|
|
}
|
|
|
|
|
2010-07-21 19:44:58 +08:00
|
|
|
static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data)
|
|
|
|
{
|
|
|
|
unsigned int flags = SG_MITER_ATOMIC;
|
|
|
|
|
|
|
|
if (data->flags & MMC_DATA_READ)
|
|
|
|
flags |= SG_MITER_TO_SG;
|
|
|
|
else
|
|
|
|
flags |= SG_MITER_FROM_SG;
|
|
|
|
|
|
|
|
sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
|
|
|
|
}
|
|
|
|
|
2019-03-27 17:05:29 +08:00
|
|
|
static u32 mmci_get_dctrl_cfg(struct mmci_host *host)
|
|
|
|
{
|
|
|
|
return MCI_DPSM_ENABLE | mmci_dctrl_blksz(host);
|
|
|
|
}
|
|
|
|
|
|
|
|
static u32 ux500v2_get_dctrl_cfg(struct mmci_host *host)
|
|
|
|
{
|
|
|
|
return MCI_DPSM_ENABLE | (host->data->blksz << 16);
|
|
|
|
}
|
|
|
|
|
2019-10-08 17:56:03 +08:00
|
|
|
static bool ux500_busy_complete(struct mmci_host *host, u32 status, u32 err_msk)
|
|
|
|
{
|
|
|
|
void __iomem *base = host->base;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Before unmasking for the busy end IRQ, confirm that the
|
|
|
|
* command was sent successfully. To keep track of having a
|
|
|
|
* command in-progress, waiting for busy signaling to end,
|
|
|
|
* store the status in host->busy_status.
|
|
|
|
*
|
|
|
|
* Note that, the card may need a couple of clock cycles before
|
|
|
|
* it starts signaling busy on DAT0, hence re-read the
|
|
|
|
* MMCISTATUS register here, to allow the busy bit to be set.
|
|
|
|
* Potentially we may even need to poll the register for a
|
|
|
|
* while, to allow it to be set, but tests indicates that it
|
|
|
|
* isn't needed.
|
|
|
|
*/
|
|
|
|
if (!host->busy_status && !(status & err_msk) &&
|
|
|
|
(readl(base + MMCISTATUS) & host->variant->busy_detect_flag)) {
|
|
|
|
writel(readl(base + MMCIMASK0) |
|
|
|
|
host->variant->busy_detect_mask,
|
|
|
|
base + MMCIMASK0);
|
|
|
|
|
|
|
|
host->busy_status = status & (MCI_CMDSENT | MCI_CMDRESPEND);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If there is a command in-progress that has been successfully
|
|
|
|
* sent, then bail out if busy status is set and wait for the
|
|
|
|
* busy end IRQ.
|
|
|
|
*
|
|
|
|
* Note that, the HW triggers an IRQ on both edges while
|
|
|
|
* monitoring DAT0 for busy completion, but there is only one
|
|
|
|
* status bit in MMCISTATUS for the busy state. Therefore
|
|
|
|
* both the start and the end interrupts needs to be cleared,
|
|
|
|
* one after the other. So, clear the busy start IRQ here.
|
|
|
|
*/
|
|
|
|
if (host->busy_status &&
|
|
|
|
(status & host->variant->busy_detect_flag)) {
|
|
|
|
writel(host->variant->busy_detect_mask, base + MMCICLEAR);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If there is a command in-progress that has been successfully
|
|
|
|
* sent and the busy bit isn't set, it means we have received
|
|
|
|
* the busy end IRQ. Clear and mask the IRQ, then continue to
|
|
|
|
* process the command.
|
|
|
|
*/
|
|
|
|
if (host->busy_status) {
|
|
|
|
writel(host->variant->busy_detect_mask, base + MMCICLEAR);
|
|
|
|
|
|
|
|
writel(readl(base + MMCIMASK0) &
|
|
|
|
~host->variant->busy_detect_mask, base + MMCIMASK0);
|
|
|
|
host->busy_status = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-12 03:35:53 +08:00
|
|
|
/*
|
|
|
|
* All the DMA operation mode stuff goes inside this ifdef.
|
|
|
|
* This assumes that you have a generic DMA device interface,
|
|
|
|
* no custom DMA interfaces are supported.
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_DMA_ENGINE
|
2018-10-08 20:08:34 +08:00
|
|
|
struct mmci_dmae_next {
|
|
|
|
struct dma_async_tx_descriptor *desc;
|
|
|
|
struct dma_chan *chan;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct mmci_dmae_priv {
|
|
|
|
struct dma_chan *cur;
|
|
|
|
struct dma_chan *rx_channel;
|
|
|
|
struct dma_chan *tx_channel;
|
|
|
|
struct dma_async_tx_descriptor *desc_current;
|
|
|
|
struct mmci_dmae_next next_data;
|
|
|
|
};
|
|
|
|
|
2018-10-08 20:08:33 +08:00
|
|
|
int mmci_dmae_setup(struct mmci_host *host)
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-12 03:35:53 +08:00
|
|
|
{
|
|
|
|
const char *rxname, *txname;
|
2018-10-08 20:08:34 +08:00
|
|
|
struct mmci_dmae_priv *dmae;
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-12 03:35:53 +08:00
|
|
|
|
2018-10-08 20:08:34 +08:00
|
|
|
dmae = devm_kzalloc(mmc_dev(host->mmc), sizeof(*dmae), GFP_KERNEL);
|
|
|
|
if (!dmae)
|
|
|
|
return -ENOMEM;
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-12 03:35:53 +08:00
|
|
|
|
2018-10-08 20:08:34 +08:00
|
|
|
host->dma_priv = dmae;
|
|
|
|
|
2019-12-17 19:27:37 +08:00
|
|
|
dmae->rx_channel = dma_request_chan(mmc_dev(host->mmc), "rx");
|
|
|
|
if (IS_ERR(dmae->rx_channel)) {
|
|
|
|
int ret = PTR_ERR(dmae->rx_channel);
|
|
|
|
dmae->rx_channel = NULL;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
dmae->tx_channel = dma_request_chan(mmc_dev(host->mmc), "tx");
|
|
|
|
if (IS_ERR(dmae->tx_channel)) {
|
|
|
|
if (PTR_ERR(dmae->tx_channel) == -EPROBE_DEFER)
|
|
|
|
dev_warn(mmc_dev(host->mmc),
|
|
|
|
"Deferred probe for TX channel ignored\n");
|
|
|
|
dmae->tx_channel = NULL;
|
|
|
|
}
|
2011-07-02 00:55:24 +08:00
|
|
|
|
2013-05-03 19:51:17 +08:00
|
|
|
/*
|
|
|
|
* If only an RX channel is specified, the driver will
|
2022-06-08 21:08:47 +08:00
|
|
|
* attempt to use it bidirectionally, however if it
|
2013-05-03 19:51:17 +08:00
|
|
|
* is specified but cannot be located, DMA will be disabled.
|
|
|
|
*/
|
2018-10-08 20:08:34 +08:00
|
|
|
if (dmae->rx_channel && !dmae->tx_channel)
|
|
|
|
dmae->tx_channel = dmae->rx_channel;
|
2013-05-03 19:51:17 +08:00
|
|
|
|
2018-10-08 20:08:34 +08:00
|
|
|
if (dmae->rx_channel)
|
|
|
|
rxname = dma_chan_name(dmae->rx_channel);
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-12 03:35:53 +08:00
|
|
|
else
|
|
|
|
rxname = "none";
|
|
|
|
|
2018-10-08 20:08:34 +08:00
|
|
|
if (dmae->tx_channel)
|
|
|
|
txname = dma_chan_name(dmae->tx_channel);
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-12 03:35:53 +08:00
|
|
|
else
|
|
|
|
txname = "none";
|
|
|
|
|
|
|
|
dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n",
|
|
|
|
rxname, txname);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Limit the maximum segment size in any SG entry according to
|
|
|
|
* the parameters of the DMA engine device.
|
|
|
|
*/
|
2018-10-08 20:08:34 +08:00
|
|
|
if (dmae->tx_channel) {
|
|
|
|
struct device *dev = dmae->tx_channel->device->dev;
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-12 03:35:53 +08:00
|
|
|
unsigned int max_seg_size = dma_get_max_seg_size(dev);
|
|
|
|
|
|
|
|
if (max_seg_size < host->mmc->max_seg_size)
|
|
|
|
host->mmc->max_seg_size = max_seg_size;
|
|
|
|
}
|
2018-10-08 20:08:34 +08:00
|
|
|
if (dmae->rx_channel) {
|
|
|
|
struct device *dev = dmae->rx_channel->device->dev;
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-12 03:35:53 +08:00
|
|
|
unsigned int max_seg_size = dma_get_max_seg_size(dev);
|
|
|
|
|
|
|
|
if (max_seg_size < host->mmc->max_seg_size)
|
|
|
|
host->mmc->max_seg_size = max_seg_size;
|
|
|
|
}
|
2014-07-29 10:50:30 +08:00
|
|
|
|
2018-10-08 20:08:34 +08:00
|
|
|
if (!dmae->tx_channel || !dmae->rx_channel) {
|
2018-10-08 20:08:33 +08:00
|
|
|
mmci_dmae_release(host);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-12 03:35:53 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2012-11-20 02:26:03 +08:00
|
|
|
* This is used in or so inline it
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-12 03:35:53 +08:00
|
|
|
* so it can be discarded.
|
|
|
|
*/
|
2018-10-08 20:08:33 +08:00
|
|
|
void mmci_dmae_release(struct mmci_host *host)
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-12 03:35:53 +08:00
|
|
|
{
|
2018-10-08 20:08:34 +08:00
|
|
|
struct mmci_dmae_priv *dmae = host->dma_priv;
|
|
|
|
|
|
|
|
if (dmae->rx_channel)
|
|
|
|
dma_release_channel(dmae->rx_channel);
|
|
|
|
if (dmae->tx_channel)
|
|
|
|
dma_release_channel(dmae->tx_channel);
|
|
|
|
dmae->rx_channel = dmae->tx_channel = NULL;
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-12 03:35:53 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
|
|
|
|
{
|
2018-10-08 20:08:34 +08:00
|
|
|
struct mmci_dmae_priv *dmae = host->dma_priv;
|
2013-01-22 04:29:34 +08:00
|
|
|
struct dma_chan *chan;
|
|
|
|
|
2017-03-27 02:45:56 +08:00
|
|
|
if (data->flags & MMC_DATA_READ)
|
2018-10-08 20:08:34 +08:00
|
|
|
chan = dmae->rx_channel;
|
2017-03-27 02:45:56 +08:00
|
|
|
else
|
2018-10-08 20:08:34 +08:00
|
|
|
chan = dmae->tx_channel;
|
2013-01-22 04:29:34 +08:00
|
|
|
|
2017-03-27 02:45:56 +08:00
|
|
|
dma_unmap_sg(chan->device->dev, data->sg, data->sg_len,
|
|
|
|
mmc_get_dma_dir(data));
|
2013-01-22 04:29:34 +08:00
|
|
|
}
|
|
|
|
|
2018-10-08 20:08:40 +08:00
|
|
|
void mmci_dmae_error(struct mmci_host *host)
|
2018-09-21 17:45:55 +08:00
|
|
|
{
|
2018-10-08 20:08:34 +08:00
|
|
|
struct mmci_dmae_priv *dmae = host->dma_priv;
|
|
|
|
|
2018-10-08 20:08:40 +08:00
|
|
|
if (!dma_inprogress(host))
|
2018-09-21 17:45:56 +08:00
|
|
|
return;
|
|
|
|
|
2018-09-21 17:45:55 +08:00
|
|
|
dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
|
2018-10-08 20:08:34 +08:00
|
|
|
dmaengine_terminate_all(dmae->cur);
|
2018-09-21 17:45:55 +08:00
|
|
|
host->dma_in_progress = false;
|
2018-10-08 20:08:34 +08:00
|
|
|
dmae->cur = NULL;
|
|
|
|
dmae->desc_current = NULL;
|
2018-09-21 17:45:55 +08:00
|
|
|
host->data->host_cookie = 0;
|
|
|
|
|
|
|
|
mmci_dma_unmap(host, host->data);
|
|
|
|
}
|
|
|
|
|
2018-10-08 20:08:39 +08:00
|
|
|
void mmci_dmae_finalize(struct mmci_host *host, struct mmc_data *data)
|
2013-01-22 04:29:34 +08:00
|
|
|
{
|
2018-10-08 20:08:34 +08:00
|
|
|
struct mmci_dmae_priv *dmae = host->dma_priv;
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-12 03:35:53 +08:00
|
|
|
u32 status;
|
|
|
|
int i;
|
|
|
|
|
2018-10-08 20:08:39 +08:00
|
|
|
if (!dma_inprogress(host))
|
2018-09-21 17:45:56 +08:00
|
|
|
return;
|
|
|
|
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-12 03:35:53 +08:00
|
|
|
/* Wait up to 1ms for the DMA to complete */
|
|
|
|
for (i = 0; ; i++) {
|
|
|
|
status = readl(host->base + MMCISTATUS);
|
|
|
|
if (!(status & MCI_RXDATAAVLBLMASK) || i >= 100)
|
|
|
|
break;
|
|
|
|
udelay(10);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check to see whether we still have some data left in the FIFO -
|
|
|
|
* this catches DMA controllers which are unable to monitor the
|
|
|
|
* DMALBREQ and DMALSREQ signals while allowing us to DMA to non-
|
|
|
|
* contiguous buffers. On TX, we'll get a FIFO underrun error.
|
|
|
|
*/
|
|
|
|
if (status & MCI_RXDATAAVLBLMASK) {
|
2018-10-08 20:08:40 +08:00
|
|
|
mmci_dma_error(host);
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-12 03:35:53 +08:00
|
|
|
if (!data->error)
|
|
|
|
data->error = -EIO;
|
2018-09-21 17:45:55 +08:00
|
|
|
} else if (!data->host_cookie) {
|
2013-01-22 04:29:34 +08:00
|
|
|
mmci_dma_unmap(host, data);
|
2018-09-21 17:45:55 +08:00
|
|
|
}
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-12 03:35:53 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Use of DMA with scatter-gather is impossible.
|
|
|
|
* Give up with DMA and switch back to PIO mode.
|
|
|
|
*/
|
|
|
|
if (status & MCI_RXDATAAVLBLMASK) {
|
|
|
|
dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n");
|
|
|
|
mmci_dma_release(host);
|
|
|
|
}
|
|
|
|
|
2017-01-27 22:04:54 +08:00
|
|
|
host->dma_in_progress = false;
|
2018-10-08 20:08:34 +08:00
|
|
|
dmae->cur = NULL;
|
|
|
|
dmae->desc_current = NULL;
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-12 03:35:53 +08:00
|
|
|
}
|
|
|
|
|
2013-01-22 04:29:34 +08:00
|
|
|
/* prepares DMA channel and DMA descriptor, returns non-zero on failure */
|
2018-10-08 20:08:36 +08:00
|
|
|
static int _mmci_dmae_prep_data(struct mmci_host *host, struct mmc_data *data,
|
2013-01-22 04:29:34 +08:00
|
|
|
struct dma_chan **dma_chan,
|
|
|
|
struct dma_async_tx_descriptor **dma_desc)
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-12 03:35:53 +08:00
|
|
|
{
|
2018-10-08 20:08:34 +08:00
|
|
|
struct mmci_dmae_priv *dmae = host->dma_priv;
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-12 03:35:53 +08:00
|
|
|
struct variant_data *variant = host->variant;
|
|
|
|
struct dma_slave_config conf = {
|
|
|
|
.src_addr = host->phybase + MMCIFIFO,
|
|
|
|
.dst_addr = host->phybase + MMCIFIFO,
|
|
|
|
.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
|
|
|
|
.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
|
|
|
|
.src_maxburst = variant->fifohalfsize >> 2, /* # of words */
|
|
|
|
.dst_maxburst = variant->fifohalfsize >> 2, /* # of words */
|
2012-02-01 18:42:19 +08:00
|
|
|
.device_fc = false,
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-12 03:35:53 +08:00
|
|
|
};
|
|
|
|
struct dma_chan *chan;
|
|
|
|
struct dma_device *device;
|
|
|
|
struct dma_async_tx_descriptor *desc;
|
|
|
|
int nr_sg;
|
2014-07-29 10:50:30 +08:00
|
|
|
unsigned long flags = DMA_CTRL_ACK;
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-12 03:35:53 +08:00
|
|
|
|
|
|
|
if (data->flags & MMC_DATA_READ) {
|
2011-10-14 13:15:11 +08:00
|
|
|
conf.direction = DMA_DEV_TO_MEM;
|
2018-10-08 20:08:34 +08:00
|
|
|
chan = dmae->rx_channel;
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-12 03:35:53 +08:00
|
|
|
} else {
|
2011-10-14 13:15:11 +08:00
|
|
|
conf.direction = DMA_MEM_TO_DEV;
|
2018-10-08 20:08:34 +08:00
|
|
|
chan = dmae->tx_channel;
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-12 03:35:53 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* If there's no DMA channel, fall back to PIO */
|
|
|
|
if (!chan)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* If less than or equal to the fifo size, don't bother with DMA */
|
2011-07-02 00:55:24 +08:00
|
|
|
if (data->blksz * data->blocks <= variant->fifosize)
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-12 03:35:53 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
2019-12-17 22:39:52 +08:00
|
|
|
/*
|
|
|
|
* This is necessary to get SDIO working on the Ux500. We do not yet
|
|
|
|
* know if this is a bug in:
|
|
|
|
* - The Ux500 DMA controller (DMA40)
|
|
|
|
* - The MMCI DMA interface on the Ux500
|
|
|
|
* some power of two blocks (such as 64 bytes) are sent regularly
|
|
|
|
* during SDIO traffic and those work fine so for these we enable DMA
|
|
|
|
* transfers.
|
|
|
|
*/
|
|
|
|
if (host->variant->dma_power_of_2 && !is_power_of_2(data->blksz))
|
|
|
|
return -EINVAL;
|
|
|
|
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-12 03:35:53 +08:00
|
|
|
device = chan->device;
|
2017-03-27 02:45:56 +08:00
|
|
|
nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len,
|
|
|
|
mmc_get_dma_dir(data));
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-12 03:35:53 +08:00
|
|
|
if (nr_sg == 0)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2014-07-29 10:50:30 +08:00
|
|
|
if (host->variant->qcom_dml)
|
|
|
|
flags |= DMA_PREP_INTERRUPT;
|
|
|
|
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-12 03:35:53 +08:00
|
|
|
dmaengine_slave_config(chan, &conf);
|
2012-03-09 05:11:18 +08:00
|
|
|
desc = dmaengine_prep_slave_sg(chan, data->sg, nr_sg,
|
2014-07-29 10:50:30 +08:00
|
|
|
conf.direction, flags);
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-12 03:35:53 +08:00
|
|
|
if (!desc)
|
|
|
|
goto unmap_exit;
|
|
|
|
|
2013-01-22 04:29:34 +08:00
|
|
|
*dma_chan = chan;
|
|
|
|
*dma_desc = desc;
|
2011-07-02 00:55:24 +08:00
|
|
|
|
|
|
|
return 0;
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-12 03:35:53 +08:00
|
|
|
|
2011-07-02 00:55:24 +08:00
|
|
|
unmap_exit:
|
2017-03-27 02:45:56 +08:00
|
|
|
dma_unmap_sg(device->dev, data->sg, data->sg_len,
|
|
|
|
mmc_get_dma_dir(data));
|
2011-07-02 00:55:24 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2018-10-08 20:08:36 +08:00
|
|
|
int mmci_dmae_prep_data(struct mmci_host *host,
|
|
|
|
struct mmc_data *data,
|
|
|
|
bool next)
|
2013-01-22 04:29:34 +08:00
|
|
|
{
|
2018-10-08 20:08:34 +08:00
|
|
|
struct mmci_dmae_priv *dmae = host->dma_priv;
|
2018-10-08 20:08:35 +08:00
|
|
|
struct mmci_dmae_next *nd = &dmae->next_data;
|
2018-10-08 20:08:34 +08:00
|
|
|
|
2018-10-08 20:08:36 +08:00
|
|
|
if (!host->use_dma)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2018-10-08 20:08:35 +08:00
|
|
|
if (next)
|
2018-10-08 20:08:36 +08:00
|
|
|
return _mmci_dmae_prep_data(host, data, &nd->chan, &nd->desc);
|
2013-01-22 04:29:34 +08:00
|
|
|
/* Check if next job is already prepared. */
|
2018-10-08 20:08:34 +08:00
|
|
|
if (dmae->cur && dmae->desc_current)
|
2013-01-22 04:29:34 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* No job were prepared thus do it now. */
|
2018-10-08 20:08:36 +08:00
|
|
|
return _mmci_dmae_prep_data(host, data, &dmae->cur,
|
2018-10-08 20:08:34 +08:00
|
|
|
&dmae->desc_current);
|
2013-01-22 04:29:34 +08:00
|
|
|
}
|
|
|
|
|
2018-10-08 20:08:38 +08:00
|
|
|
int mmci_dmae_start(struct mmci_host *host, unsigned int *datactrl)
|
2011-07-02 00:55:24 +08:00
|
|
|
{
|
2018-10-08 20:08:34 +08:00
|
|
|
struct mmci_dmae_priv *dmae = host->dma_priv;
|
2019-12-17 22:39:52 +08:00
|
|
|
int ret;
|
2011-07-02 00:55:24 +08:00
|
|
|
|
2017-01-27 22:04:54 +08:00
|
|
|
host->dma_in_progress = true;
|
2019-12-17 22:39:52 +08:00
|
|
|
ret = dma_submit_error(dmaengine_submit(dmae->desc_current));
|
|
|
|
if (ret < 0) {
|
|
|
|
host->dma_in_progress = false;
|
|
|
|
return ret;
|
|
|
|
}
|
2018-10-08 20:08:34 +08:00
|
|
|
dma_async_issue_pending(dmae->cur);
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-12 03:35:53 +08:00
|
|
|
|
2018-10-08 20:08:38 +08:00
|
|
|
*datactrl |= MCI_DPSM_DMAENABLE;
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-12 03:35:53 +08:00
|
|
|
|
|
|
|
return 0;
|
2011-07-02 00:55:24 +08:00
|
|
|
}
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-12 03:35:53 +08:00
|
|
|
|
2018-10-08 20:08:37 +08:00
|
|
|
void mmci_dmae_get_next_data(struct mmci_host *host, struct mmc_data *data)
|
2011-07-02 00:55:24 +08:00
|
|
|
{
|
2018-10-08 20:08:34 +08:00
|
|
|
struct mmci_dmae_priv *dmae = host->dma_priv;
|
|
|
|
struct mmci_dmae_next *next = &dmae->next_data;
|
2011-07-02 00:55:24 +08:00
|
|
|
|
2018-10-08 20:08:33 +08:00
|
|
|
if (!host->use_dma)
|
|
|
|
return;
|
|
|
|
|
2018-10-08 20:08:34 +08:00
|
|
|
WARN_ON(!data->host_cookie && (next->desc || next->chan));
|
2011-07-02 00:55:24 +08:00
|
|
|
|
2018-10-08 20:08:34 +08:00
|
|
|
dmae->desc_current = next->desc;
|
|
|
|
dmae->cur = next->chan;
|
|
|
|
next->desc = NULL;
|
|
|
|
next->chan = NULL;
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-12 03:35:53 +08:00
|
|
|
}
|
2011-07-02 00:55:24 +08:00
|
|
|
|
2018-10-08 20:08:36 +08:00
|
|
|
void mmci_dmae_unprep_data(struct mmci_host *host,
|
|
|
|
struct mmc_data *data, int err)
|
2011-07-02 00:55:24 +08:00
|
|
|
|
|
|
|
{
|
2018-10-08 20:08:34 +08:00
|
|
|
struct mmci_dmae_priv *dmae = host->dma_priv;
|
2011-07-02 00:55:24 +08:00
|
|
|
|
2018-10-08 20:08:36 +08:00
|
|
|
if (!host->use_dma)
|
2011-07-02 00:55:24 +08:00
|
|
|
return;
|
|
|
|
|
2013-01-22 04:29:34 +08:00
|
|
|
mmci_dma_unmap(host, data);
|
2011-07-02 00:55:24 +08:00
|
|
|
|
2013-01-22 04:29:34 +08:00
|
|
|
if (err) {
|
2018-10-08 20:08:34 +08:00
|
|
|
struct mmci_dmae_next *next = &dmae->next_data;
|
2013-01-22 04:29:34 +08:00
|
|
|
struct dma_chan *chan;
|
|
|
|
if (data->flags & MMC_DATA_READ)
|
2018-10-08 20:08:34 +08:00
|
|
|
chan = dmae->rx_channel;
|
2013-01-22 04:29:34 +08:00
|
|
|
else
|
2018-10-08 20:08:34 +08:00
|
|
|
chan = dmae->tx_channel;
|
2013-01-22 04:29:34 +08:00
|
|
|
dmaengine_terminate_all(chan);
|
2011-07-02 00:55:24 +08:00
|
|
|
|
2018-10-08 20:08:34 +08:00
|
|
|
if (dmae->desc_current == next->desc)
|
|
|
|
dmae->desc_current = NULL;
|
2014-10-08 19:25:17 +08:00
|
|
|
|
2018-10-08 20:08:34 +08:00
|
|
|
if (dmae->cur == next->chan) {
|
2017-01-27 22:04:54 +08:00
|
|
|
host->dma_in_progress = false;
|
2018-10-08 20:08:34 +08:00
|
|
|
dmae->cur = NULL;
|
2017-01-27 22:04:54 +08:00
|
|
|
}
|
2014-10-08 19:25:17 +08:00
|
|
|
|
2018-10-08 20:08:34 +08:00
|
|
|
next->desc = NULL;
|
|
|
|
next->chan = NULL;
|
2011-07-02 00:55:24 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-08 20:08:33 +08:00
|
|
|
static struct mmci_host_ops mmci_variant_ops = {
|
2018-10-08 20:08:36 +08:00
|
|
|
.prep_data = mmci_dmae_prep_data,
|
|
|
|
.unprep_data = mmci_dmae_unprep_data,
|
2019-03-27 17:05:29 +08:00
|
|
|
.get_datactrl_cfg = mmci_get_dctrl_cfg,
|
2018-10-08 20:08:37 +08:00
|
|
|
.get_next_data = mmci_dmae_get_next_data,
|
2018-10-08 20:08:33 +08:00
|
|
|
.dma_setup = mmci_dmae_setup,
|
|
|
|
.dma_release = mmci_dmae_release,
|
2018-10-08 20:08:38 +08:00
|
|
|
.dma_start = mmci_dmae_start,
|
2018-10-08 20:08:39 +08:00
|
|
|
.dma_finalize = mmci_dmae_finalize,
|
2018-10-08 20:08:40 +08:00
|
|
|
.dma_error = mmci_dmae_error,
|
2018-10-08 20:08:33 +08:00
|
|
|
};
|
2019-03-27 17:05:29 +08:00
|
|
|
#else
|
|
|
|
static struct mmci_host_ops mmci_variant_ops = {
|
|
|
|
.get_datactrl_cfg = mmci_get_dctrl_cfg,
|
|
|
|
};
|
|
|
|
#endif
|
2018-10-08 20:08:33 +08:00
|
|
|
|
2019-09-25 19:42:31 +08:00
|
|
|
static void mmci_variant_init(struct mmci_host *host)
|
2018-10-08 20:08:33 +08:00
|
|
|
{
|
|
|
|
host->ops = &mmci_variant_ops;
|
|
|
|
}
|
2019-03-27 17:05:29 +08:00
|
|
|
|
2019-10-08 17:56:03 +08:00
|
|
|
static void ux500_variant_init(struct mmci_host *host)
|
|
|
|
{
|
|
|
|
host->ops = &mmci_variant_ops;
|
|
|
|
host->ops->busy_complete = ux500_busy_complete;
|
|
|
|
}
|
|
|
|
|
2019-09-25 19:42:31 +08:00
|
|
|
static void ux500v2_variant_init(struct mmci_host *host)
|
2019-03-27 17:05:29 +08:00
|
|
|
{
|
|
|
|
host->ops = &mmci_variant_ops;
|
2019-10-08 17:56:03 +08:00
|
|
|
host->ops->busy_complete = ux500_busy_complete;
|
2019-03-27 17:05:29 +08:00
|
|
|
host->ops->get_datactrl_cfg = ux500v2_get_dctrl_cfg;
|
|
|
|
}
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-12 03:35:53 +08:00
|
|
|
|
2018-10-08 20:08:36 +08:00
|
|
|
static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq)
|
|
|
|
{
|
|
|
|
struct mmci_host *host = mmc_priv(mmc);
|
|
|
|
struct mmc_data *data = mrq->data;
|
|
|
|
|
|
|
|
if (!data)
|
|
|
|
return;
|
|
|
|
|
|
|
|
WARN_ON(data->host_cookie);
|
|
|
|
|
|
|
|
if (mmci_validate_data(host, data))
|
|
|
|
return;
|
|
|
|
|
|
|
|
mmci_prep_data(host, data, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
|
|
|
|
int err)
|
|
|
|
{
|
|
|
|
struct mmci_host *host = mmc_priv(mmc);
|
|
|
|
struct mmc_data *data = mrq->data;
|
|
|
|
|
|
|
|
if (!data || !data->host_cookie)
|
|
|
|
return;
|
|
|
|
|
|
|
|
mmci_unprep_data(host, data, err);
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
|
|
|
|
{
|
2010-08-09 19:57:30 +08:00
|
|
|
struct variant_data *variant = host->variant;
|
2005-04-17 06:20:36 +08:00
|
|
|
unsigned int datactrl, timeout, irqmask;
|
2005-07-01 19:02:59 +08:00
|
|
|
unsigned long long clks;
|
2005-04-17 06:20:36 +08:00
|
|
|
void __iomem *base;
|
|
|
|
|
2010-02-19 08:09:10 +08:00
|
|
|
dev_dbg(mmc_dev(host->mmc), "blksz %04x blks %04x flags %08x\n",
|
|
|
|
data->blksz, data->blocks, data->flags);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
host->data = data;
|
2010-07-21 19:49:49 +08:00
|
|
|
host->size = data->blksz * data->blocks;
|
2011-01-27 18:56:52 +08:00
|
|
|
data->bytes_xfered = 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2005-07-01 19:02:59 +08:00
|
|
|
clks = (unsigned long long)data->timeout_ns * host->cclk;
|
2014-06-02 17:08:39 +08:00
|
|
|
do_div(clks, NSEC_PER_SEC);
|
2005-07-01 19:02:59 +08:00
|
|
|
|
|
|
|
timeout = data->timeout_clks + (unsigned int)clks;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
base = host->base;
|
|
|
|
writel(timeout, base + MMCIDATATIMER);
|
|
|
|
writel(host->size, base + MMCIDATALENGTH);
|
|
|
|
|
2019-03-27 17:05:32 +08:00
|
|
|
datactrl = host->ops->get_datactrl_cfg(host);
|
|
|
|
datactrl |= host->data->flags & MMC_DATA_READ ? MCI_DPSM_DIRECTION : 0;
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-12 03:35:53 +08:00
|
|
|
|
2014-08-22 12:55:16 +08:00
|
|
|
if (host->mmc->card && mmc_card_sdio(host->mmc->card)) {
|
|
|
|
u32 clk;
|
2011-12-14 00:05:28 +08:00
|
|
|
|
2014-08-22 12:55:16 +08:00
|
|
|
datactrl |= variant->datactrl_mask_sdio;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The ST Micro variant for SDIO small write transfers
|
|
|
|
* needs to have clock H/W flow control disabled,
|
|
|
|
* otherwise the transfer will not start. The threshold
|
|
|
|
* depends on the rate of MCLK.
|
|
|
|
*/
|
|
|
|
if (variant->st_sdio && data->flags & MMC_DATA_WRITE &&
|
|
|
|
(host->size < 8 ||
|
|
|
|
(host->size <= 8 && host->mclk > 50000000)))
|
|
|
|
clk = host->clk_reg & ~variant->clkreg_enable;
|
|
|
|
else
|
|
|
|
clk = host->clk_reg | variant->clkreg_enable;
|
|
|
|
|
|
|
|
mmci_write_clkreg(host, clk);
|
|
|
|
}
|
2012-10-12 21:01:50 +08:00
|
|
|
|
2014-03-14 20:12:13 +08:00
|
|
|
if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50 ||
|
|
|
|
host->mmc->ios.timing == MMC_TIMING_MMC_DDR52)
|
2014-06-02 17:09:15 +08:00
|
|
|
datactrl |= variant->datactrl_mask_ddrmode;
|
2013-01-07 22:30:44 +08:00
|
|
|
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-12 03:35:53 +08:00
|
|
|
/*
|
|
|
|
* Attempt to use DMA operation mode, if this
|
|
|
|
* should fail, fall back to PIO mode
|
|
|
|
*/
|
2018-10-08 20:08:38 +08:00
|
|
|
if (!mmci_dma_start(host, datactrl))
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-12 03:35:53 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
/* IRQ mode, map the SG list for CPU reading/writing */
|
|
|
|
mmci_init_sg(host, data);
|
|
|
|
|
|
|
|
if (data->flags & MMC_DATA_READ) {
|
2005-04-17 06:20:36 +08:00
|
|
|
irqmask = MCI_RXFIFOHALFFULLMASK;
|
2006-02-17 00:48:31 +08:00
|
|
|
|
|
|
|
/*
|
2011-01-27 17:50:13 +08:00
|
|
|
* If we have less than the fifo 'half-full' threshold to
|
|
|
|
* transfer, trigger a PIO interrupt as soon as any data
|
|
|
|
* is available.
|
2006-02-17 00:48:31 +08:00
|
|
|
*/
|
2011-01-27 17:50:13 +08:00
|
|
|
if (host->size < variant->fifohalfsize)
|
2006-02-17 00:48:31 +08:00
|
|
|
irqmask |= MCI_RXDATAAVLBLMASK;
|
2005-04-17 06:20:36 +08:00
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* We don't actually need to include "FIFO empty" here
|
|
|
|
* since its implicit in "FIFO half empty".
|
|
|
|
*/
|
|
|
|
irqmask = MCI_TXFIFOHALFEMPTYMASK;
|
|
|
|
}
|
|
|
|
|
2013-05-16 03:48:23 +08:00
|
|
|
mmci_write_datactrlreg(host, datactrl);
|
2005-04-17 06:20:36 +08:00
|
|
|
writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0);
|
2010-10-19 19:39:48 +08:00
|
|
|
mmci_set_mask1(host, irqmask);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
|
|
|
|
{
|
|
|
|
void __iomem *base = host->base;
|
2019-10-08 17:56:02 +08:00
|
|
|
unsigned long long clks;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2010-02-19 08:09:10 +08:00
|
|
|
dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n",
|
2005-04-17 06:20:36 +08:00
|
|
|
cmd->opcode, cmd->arg, cmd->flags);
|
|
|
|
|
2018-10-08 20:08:45 +08:00
|
|
|
if (readl(base + MMCICOMMAND) & host->variant->cmdreg_cpsm_enable) {
|
2005-04-17 06:20:36 +08:00
|
|
|
writel(0, base + MMCICOMMAND);
|
2014-06-02 17:08:57 +08:00
|
|
|
mmci_reg_delay(host);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2018-12-06 23:13:31 +08:00
|
|
|
if (host->variant->cmdreg_stop &&
|
|
|
|
cmd->opcode == MMC_STOP_TRANSMISSION)
|
|
|
|
c |= host->variant->cmdreg_stop;
|
|
|
|
|
2018-10-08 20:08:45 +08:00
|
|
|
c |= cmd->opcode | host->variant->cmdreg_cpsm_enable;
|
2006-02-02 20:23:12 +08:00
|
|
|
if (cmd->flags & MMC_RSP_PRESENT) {
|
|
|
|
if (cmd->flags & MMC_RSP_136)
|
2018-10-08 20:08:45 +08:00
|
|
|
c |= host->variant->cmdreg_lrsp_crc;
|
|
|
|
else if (cmd->flags & MMC_RSP_CRC)
|
|
|
|
c |= host->variant->cmdreg_srsp_crc;
|
|
|
|
else
|
|
|
|
c |= host->variant->cmdreg_srsp;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2019-10-08 17:56:02 +08:00
|
|
|
|
|
|
|
if (host->variant->busy_timeout && cmd->flags & MMC_RSP_BUSY) {
|
|
|
|
if (!cmd->busy_timeout)
|
|
|
|
cmd->busy_timeout = 10 * MSEC_PER_SEC;
|
|
|
|
|
mmc: mmci: Add MMC_CAP_NEED_RSP_BUSY for the stm32 variants
An issue has been observed on STM32MP157C-EV1 board, with an erase command
with secure erase argument, ending up waiting for ~4 hours before timeout.
The requested busy timeout from the mmc core ends up with 14784000ms (~4
hours), but the supported host->max_busy_timeout is 86767ms, which leads to
that the core switch to use an R1 response in favor of the R1B and polls
for busy with the host->card_busy() ops. In this case the polling doesn't
work as expected, as we never detects that the card stops signaling busy,
which leads to the following message:
mmc1: Card stuck being busy! __mmc_poll_for_busy
The problem boils done to that the stm32 variants can't use R1 responses in
favor of R1B responses, as it leads to an internal state machine in the
controller to get stuck. To continue to process requests, it would need to
be reset.
To fix this problem, let's set MMC_CAP_NEED_RSP_BUSY for the stm32 variant,
which prevent the mmc core from switching to R1 responses. Additionally,
let's cap the cmd->busy_timeout to the host->max_busy_timeout, thus rely on
86767ms to be sufficient (~66 seconds was need for this test case).
Fixes: 94fe2580a2f3 ("mmc: core: Enable erase/discard/trim support for all mmc hosts")
Signed-off-by: Yann Gautier <yann.gautier@foss.st.com>
Link: https://lore.kernel.org/r/20210225145454.12780-1-yann.gautier@foss.st.com
Cc: stable@vger.kernel.org
[Ulf: Simplified the code and extended the commit message]
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
2021-02-25 22:54:54 +08:00
|
|
|
if (cmd->busy_timeout > host->mmc->max_busy_timeout)
|
|
|
|
clks = (unsigned long long)host->mmc->max_busy_timeout * host->cclk;
|
|
|
|
else
|
|
|
|
clks = (unsigned long long)cmd->busy_timeout * host->cclk;
|
|
|
|
|
2019-10-08 17:56:02 +08:00
|
|
|
do_div(clks, MSEC_PER_SEC);
|
|
|
|
writel_relaxed(clks, host->base + MMCIDATATIMER);
|
|
|
|
}
|
|
|
|
|
2020-01-28 17:06:34 +08:00
|
|
|
if (host->ops->pre_sig_volt_switch && cmd->opcode == SD_SWITCH_VOLTAGE)
|
|
|
|
host->ops->pre_sig_volt_switch(host);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
if (/*interrupt*/0)
|
|
|
|
c |= MCI_CPSM_INTERRUPT;
|
|
|
|
|
2014-06-02 17:09:39 +08:00
|
|
|
if (mmc_cmd_type(cmd) == MMC_CMD_ADTC)
|
|
|
|
c |= host->variant->data_cmd_enable;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
host->cmd = cmd;
|
|
|
|
|
|
|
|
writel(cmd->arg, base + MMCIARGUMENT);
|
|
|
|
writel(c, base + MMCICOMMAND);
|
|
|
|
}
|
|
|
|
|
2019-01-29 22:35:56 +08:00
|
|
|
static void mmci_stop_command(struct mmci_host *host)
|
|
|
|
{
|
|
|
|
host->stop_abort.error = 0;
|
|
|
|
mmci_start_command(host, &host->stop_abort, 0);
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
static void
|
|
|
|
mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
|
|
|
|
unsigned int status)
|
|
|
|
{
|
2018-10-08 20:08:44 +08:00
|
|
|
unsigned int status_err;
|
|
|
|
|
2014-06-12 20:42:23 +08:00
|
|
|
/* Make sure we have data to handle */
|
|
|
|
if (!data)
|
|
|
|
return;
|
|
|
|
|
2010-10-19 20:41:24 +08:00
|
|
|
/* First check for errors */
|
2018-10-08 20:08:44 +08:00
|
|
|
status_err = status & (host->variant->start_err |
|
|
|
|
MCI_DATACRCFAIL | MCI_DATATIMEOUT |
|
|
|
|
MCI_TXUNDERRUN | MCI_RXOVERRUN);
|
|
|
|
|
|
|
|
if (status_err) {
|
2011-01-24 22:22:13 +08:00
|
|
|
u32 remain, success;
|
2010-10-19 20:41:24 +08:00
|
|
|
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-12 03:35:53 +08:00
|
|
|
/* Terminate the DMA transfer */
|
2018-10-08 20:08:40 +08:00
|
|
|
mmci_dma_error(host);
|
2006-01-05 00:24:05 +08:00
|
|
|
|
|
|
|
/*
|
2011-02-04 17:19:46 +08:00
|
|
|
* Calculate how far we are into the transfer. Note that
|
|
|
|
* the data counter gives the number of bytes transferred
|
|
|
|
* on the MMC bus, not on the host side. On reads, this
|
|
|
|
* can be as much as a FIFO-worth of data ahead. This
|
|
|
|
* matters for FIFO overruns only.
|
2006-01-05 00:24:05 +08:00
|
|
|
*/
|
2018-10-08 20:08:49 +08:00
|
|
|
if (!host->variant->datacnt_useless) {
|
|
|
|
remain = readl(host->base + MMCIDATACNT);
|
|
|
|
success = data->blksz * data->blocks - remain;
|
|
|
|
} else {
|
|
|
|
success = 0;
|
|
|
|
}
|
2011-01-24 22:22:13 +08:00
|
|
|
|
2011-02-04 17:19:46 +08:00
|
|
|
dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n",
|
2018-10-08 20:08:44 +08:00
|
|
|
status_err, success);
|
|
|
|
if (status_err & MCI_DATACRCFAIL) {
|
2011-01-24 22:22:13 +08:00
|
|
|
/* Last block was not successful */
|
2011-02-04 17:19:46 +08:00
|
|
|
success -= 1;
|
2007-07-23 04:18:46 +08:00
|
|
|
data->error = -EILSEQ;
|
2018-10-08 20:08:44 +08:00
|
|
|
} else if (status_err & MCI_DATATIMEOUT) {
|
2007-07-23 04:18:46 +08:00
|
|
|
data->error = -ETIMEDOUT;
|
2018-10-08 20:08:44 +08:00
|
|
|
} else if (status_err & MCI_STARTBITERR) {
|
2011-06-30 22:10:21 +08:00
|
|
|
data->error = -ECOMM;
|
2018-10-08 20:08:44 +08:00
|
|
|
} else if (status_err & MCI_TXUNDERRUN) {
|
2011-02-04 17:19:46 +08:00
|
|
|
data->error = -EIO;
|
2018-10-08 20:08:44 +08:00
|
|
|
} else if (status_err & MCI_RXOVERRUN) {
|
2011-02-04 17:19:46 +08:00
|
|
|
if (success > host->variant->fifosize)
|
|
|
|
success -= host->variant->fifosize;
|
|
|
|
else
|
|
|
|
success = 0;
|
2007-07-23 04:18:46 +08:00
|
|
|
data->error = -EIO;
|
2010-07-21 19:44:58 +08:00
|
|
|
}
|
2011-01-27 18:56:52 +08:00
|
|
|
data->bytes_xfered = round_down(success, data->blksz);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2010-10-19 20:41:24 +08:00
|
|
|
|
2011-01-24 22:22:13 +08:00
|
|
|
if (status & MCI_DATABLOCKEND)
|
|
|
|
dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n");
|
2010-10-19 20:41:24 +08:00
|
|
|
|
2011-01-31 05:03:50 +08:00
|
|
|
if (status & MCI_DATAEND || data->error) {
|
2018-09-21 17:45:56 +08:00
|
|
|
mmci_dma_finalize(host, data);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
mmci_stop_data(host);
|
|
|
|
|
2011-01-24 22:22:13 +08:00
|
|
|
if (!data->error)
|
|
|
|
/* The error clause is handled above, success! */
|
2011-01-27 18:56:52 +08:00
|
|
|
data->bytes_xfered = data->blksz * data->blocks;
|
2010-10-19 20:41:24 +08:00
|
|
|
|
2019-01-29 22:35:56 +08:00
|
|
|
if (!data->stop) {
|
|
|
|
if (host->variant->cmdreg_stop && data->error)
|
|
|
|
mmci_stop_command(host);
|
|
|
|
else
|
|
|
|
mmci_request_end(host, data->mrq);
|
|
|
|
} else if (host->mrq->sbc && !data->error) {
|
2005-04-17 06:20:36 +08:00
|
|
|
mmci_request_end(host, data->mrq);
|
2019-01-29 22:35:56 +08:00
|
|
|
} else {
|
2005-04-17 06:20:36 +08:00
|
|
|
mmci_start_command(host, data->stop, 0);
|
2019-01-29 22:35:56 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
|
|
|
|
unsigned int status)
|
|
|
|
{
|
2019-10-08 17:56:02 +08:00
|
|
|
u32 err_msk = MCI_CMDCRCFAIL | MCI_CMDTIMEOUT;
|
2005-04-17 06:20:36 +08:00
|
|
|
void __iomem *base = host->base;
|
2019-04-26 15:46:34 +08:00
|
|
|
bool sbc, busy_resp;
|
2014-06-12 21:01:57 +08:00
|
|
|
|
|
|
|
if (!cmd)
|
|
|
|
return;
|
|
|
|
|
|
|
|
sbc = (cmd == host->mrq->sbc);
|
2019-04-26 15:46:34 +08:00
|
|
|
busy_resp = !!(cmd->flags & MMC_RSP_BUSY);
|
2014-06-12 21:01:57 +08:00
|
|
|
|
2016-10-25 17:06:06 +08:00
|
|
|
/*
|
|
|
|
* We need to be one of these interrupts to be considered worth
|
|
|
|
* handling. Note that we tag on any latent IRQs postponed
|
|
|
|
* due to waiting for busy status.
|
|
|
|
*/
|
2019-10-08 17:56:02 +08:00
|
|
|
if (host->variant->busy_timeout && busy_resp)
|
|
|
|
err_msk |= MCI_DATATIMEOUT;
|
|
|
|
|
|
|
|
if (!((status | host->busy_status) &
|
|
|
|
(err_msk | MCI_CMDSENT | MCI_CMDRESPEND)))
|
2014-06-12 21:01:57 +08:00
|
|
|
return;
|
2014-01-13 23:49:31 +08:00
|
|
|
|
2019-07-23 20:28:09 +08:00
|
|
|
/* Handle busy detection on DAT0 if the variant supports it. */
|
2019-10-08 17:56:03 +08:00
|
|
|
if (busy_resp && host->variant->busy_detect)
|
|
|
|
if (!host->ops->busy_complete(host, status, err_msk))
|
2019-07-23 20:28:09 +08:00
|
|
|
return;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
host->cmd = NULL;
|
|
|
|
|
|
|
|
if (status & MCI_CMDTIMEOUT) {
|
2007-07-23 04:18:46 +08:00
|
|
|
cmd->error = -ETIMEDOUT;
|
2005-04-17 06:20:36 +08:00
|
|
|
} else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) {
|
2007-07-23 04:18:46 +08:00
|
|
|
cmd->error = -EILSEQ;
|
2019-10-08 17:56:02 +08:00
|
|
|
} else if (host->variant->busy_timeout && busy_resp &&
|
|
|
|
status & MCI_DATATIMEOUT) {
|
|
|
|
cmd->error = -ETIMEDOUT;
|
2021-09-21 22:33:59 +08:00
|
|
|
/*
|
|
|
|
* This will wake up mmci_irq_thread() which will issue
|
|
|
|
* a hardware reset of the MMCI block.
|
|
|
|
*/
|
2019-12-11 21:39:34 +08:00
|
|
|
host->irq_action = IRQ_WAKE_THREAD;
|
2011-01-12 00:35:56 +08:00
|
|
|
} else {
|
|
|
|
cmd->resp[0] = readl(base + MMCIRESPONSE0);
|
|
|
|
cmd->resp[1] = readl(base + MMCIRESPONSE1);
|
|
|
|
cmd->resp[2] = readl(base + MMCIRESPONSE2);
|
|
|
|
cmd->resp[3] = readl(base + MMCIRESPONSE3);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2013-05-13 22:40:56 +08:00
|
|
|
if ((!sbc && !cmd->data) || cmd->error) {
|
2011-12-13 23:58:43 +08:00
|
|
|
if (host->data) {
|
|
|
|
/* Terminate the DMA transfer */
|
2018-10-08 20:08:40 +08:00
|
|
|
mmci_dma_error(host);
|
2018-09-21 17:45:55 +08:00
|
|
|
|
2007-01-09 00:42:51 +08:00
|
|
|
mmci_stop_data(host);
|
2019-01-29 22:35:56 +08:00
|
|
|
if (host->variant->cmdreg_stop && cmd->error) {
|
|
|
|
mmci_stop_command(host);
|
|
|
|
return;
|
|
|
|
}
|
2011-12-13 23:58:43 +08:00
|
|
|
}
|
2019-12-11 21:39:34 +08:00
|
|
|
|
|
|
|
if (host->irq_action != IRQ_WAKE_THREAD)
|
|
|
|
mmci_request_end(host, host->mrq);
|
|
|
|
|
2013-05-13 22:40:56 +08:00
|
|
|
} else if (sbc) {
|
|
|
|
mmci_start_command(host, host->mrq->cmd, 0);
|
2018-10-08 20:08:48 +08:00
|
|
|
} else if (!host->variant->datactrl_first &&
|
|
|
|
!(cmd->data->flags & MMC_DATA_READ)) {
|
2005-04-17 06:20:36 +08:00
|
|
|
mmci_start_data(host, cmd->data);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-06-02 17:10:04 +08:00
|
|
|
static int mmci_get_rx_fifocnt(struct mmci_host *host, u32 status, int remain)
|
|
|
|
{
|
|
|
|
return remain - (readl(host->base + MMCIFIFOCNT) << 2);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mmci_qcom_get_rx_fifocnt(struct mmci_host *host, u32 status, int r)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* on qcom SDCC4 only 8 words are used in each burst so only 8 addresses
|
|
|
|
* from the fifo range should be used
|
|
|
|
*/
|
|
|
|
if (status & MCI_RXFIFOHALFFULL)
|
|
|
|
return host->variant->fifohalfsize;
|
|
|
|
else if (status & MCI_RXDATAAVLBL)
|
|
|
|
return 4;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain)
|
|
|
|
{
|
|
|
|
void __iomem *base = host->base;
|
|
|
|
char *ptr = buffer;
|
2014-06-02 17:10:04 +08:00
|
|
|
u32 status = readl(host->base + MMCISTATUS);
|
2008-04-27 06:39:44 +08:00
|
|
|
int host_remain = host->size;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
do {
|
2014-06-02 17:10:04 +08:00
|
|
|
int count = host->get_rx_fifocnt(host, status, host_remain);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (count > remain)
|
|
|
|
count = remain;
|
|
|
|
|
|
|
|
if (count <= 0)
|
|
|
|
break;
|
|
|
|
|
2011-12-14 00:08:04 +08:00
|
|
|
/*
|
|
|
|
* SDIO especially may want to send something that is
|
|
|
|
* not divisible by 4 (as opposed to card sectors
|
|
|
|
* etc). Therefore make sure to always read the last bytes
|
|
|
|
* while only doing full 32-bit reads towards the FIFO.
|
|
|
|
*/
|
|
|
|
if (unlikely(count & 0x3)) {
|
|
|
|
if (count < 4) {
|
|
|
|
unsigned char buf[4];
|
2012-12-10 21:47:21 +08:00
|
|
|
ioread32_rep(base + MMCIFIFO, buf, 1);
|
2011-12-14 00:08:04 +08:00
|
|
|
memcpy(ptr, buf, count);
|
|
|
|
} else {
|
2012-12-10 21:47:21 +08:00
|
|
|
ioread32_rep(base + MMCIFIFO, ptr, count >> 2);
|
2011-12-14 00:08:04 +08:00
|
|
|
count &= ~0x3;
|
|
|
|
}
|
|
|
|
} else {
|
2012-12-10 21:47:21 +08:00
|
|
|
ioread32_rep(base + MMCIFIFO, ptr, count >> 2);
|
2011-12-14 00:08:04 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
ptr += count;
|
|
|
|
remain -= count;
|
2008-04-27 06:39:44 +08:00
|
|
|
host_remain -= count;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (remain == 0)
|
|
|
|
break;
|
|
|
|
|
|
|
|
status = readl(base + MMCISTATUS);
|
|
|
|
} while (status & MCI_RXDATAAVLBL);
|
|
|
|
|
|
|
|
return ptr - buffer;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status)
|
|
|
|
{
|
2010-08-09 19:57:30 +08:00
|
|
|
struct variant_data *variant = host->variant;
|
2005-04-17 06:20:36 +08:00
|
|
|
void __iomem *base = host->base;
|
|
|
|
char *ptr = buffer;
|
|
|
|
|
|
|
|
do {
|
|
|
|
unsigned int count, maxcnt;
|
|
|
|
|
2010-08-09 19:57:30 +08:00
|
|
|
maxcnt = status & MCI_TXFIFOEMPTY ?
|
|
|
|
variant->fifosize : variant->fifohalfsize;
|
2005-04-17 06:20:36 +08:00
|
|
|
count = min(remain, maxcnt);
|
|
|
|
|
2010-10-19 19:43:58 +08:00
|
|
|
/*
|
|
|
|
* SDIO especially may want to send something that is
|
|
|
|
* not divisible by 4 (as opposed to card sectors
|
|
|
|
* etc), and the FIFO only accept full 32-bit writes.
|
|
|
|
* So compensate by adding +3 on the count, a single
|
|
|
|
* byte become a 32bit write, 7 bytes will be two
|
|
|
|
* 32bit writes etc.
|
|
|
|
*/
|
2012-12-10 21:47:21 +08:00
|
|
|
iowrite32_rep(base + MMCIFIFO, ptr, (count + 3) >> 2);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
ptr += count;
|
|
|
|
remain -= count;
|
|
|
|
|
|
|
|
if (remain == 0)
|
|
|
|
break;
|
|
|
|
|
|
|
|
status = readl(base + MMCISTATUS);
|
|
|
|
} while (status & MCI_TXFIFOHALFEMPTY);
|
|
|
|
|
|
|
|
return ptr - buffer;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* PIO data transfer IRQ handler.
|
|
|
|
*/
|
IRQ: Maintain regs pointer globally rather than passing to IRQ handlers
Maintain a per-CPU global "struct pt_regs *" variable which can be used instead
of passing regs around manually through all ~1800 interrupt handlers in the
Linux kernel.
The regs pointer is used in few places, but it potentially costs both stack
space and code to pass it around. On the FRV arch, removing the regs parameter
from all the genirq function results in a 20% speed up of the IRQ exit path
(ie: from leaving timer_interrupt() to leaving do_IRQ()).
Where appropriate, an arch may override the generic storage facility and do
something different with the variable. On FRV, for instance, the address is
maintained in GR28 at all times inside the kernel as part of general exception
handling.
Having looked over the code, it appears that the parameter may be handed down
through up to twenty or so layers of functions. Consider a USB character
device attached to a USB hub, attached to a USB controller that posts its
interrupts through a cascaded auxiliary interrupt controller. A character
device driver may want to pass regs to the sysrq handler through the input
layer which adds another few layers of parameter passing.
I've build this code with allyesconfig for x86_64 and i386. I've runtested the
main part of the code on FRV and i386, though I can't test most of the drivers.
I've also done partial conversion for powerpc and MIPS - these at least compile
with minimal configurations.
This will affect all archs. Mostly the changes should be relatively easy.
Take do_IRQ(), store the regs pointer at the beginning, saving the old one:
struct pt_regs *old_regs = set_irq_regs(regs);
And put the old one back at the end:
set_irq_regs(old_regs);
Don't pass regs through to generic_handle_irq() or __do_IRQ().
In timer_interrupt(), this sort of change will be necessary:
- update_process_times(user_mode(regs));
- profile_tick(CPU_PROFILING, regs);
+ update_process_times(user_mode(get_irq_regs()));
+ profile_tick(CPU_PROFILING);
I'd like to move update_process_times()'s use of get_irq_regs() into itself,
except that i386, alone of the archs, uses something other than user_mode().
Some notes on the interrupt handling in the drivers:
(*) input_dev() is now gone entirely. The regs pointer is no longer stored in
the input_dev struct.
(*) finish_unlinks() in drivers/usb/host/ohci-q.c needs checking. It does
something different depending on whether it's been supplied with a regs
pointer or not.
(*) Various IRQ handler function pointers have been moved to type
irq_handler_t.
Signed-Off-By: David Howells <dhowells@redhat.com>
(cherry picked from 1b16e7ac850969f38b375e511e3fa2f474a33867 commit)
2006-10-05 21:55:46 +08:00
|
|
|
static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct mmci_host *host = dev_id;
|
2010-07-21 19:44:58 +08:00
|
|
|
struct sg_mapping_iter *sg_miter = &host->sg_miter;
|
2010-08-09 19:57:30 +08:00
|
|
|
struct variant_data *variant = host->variant;
|
2005-04-17 06:20:36 +08:00
|
|
|
void __iomem *base = host->base;
|
|
|
|
u32 status;
|
|
|
|
|
|
|
|
status = readl(base + MMCISTATUS);
|
|
|
|
|
2010-02-19 08:09:10 +08:00
|
|
|
dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
do {
|
|
|
|
unsigned int remain, len;
|
|
|
|
char *buffer;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For write, we only need to test the half-empty flag
|
|
|
|
* here - if the FIFO is completely empty, then by
|
|
|
|
* definition it is more than half empty.
|
|
|
|
*
|
|
|
|
* For read, check for data available.
|
|
|
|
*/
|
|
|
|
if (!(status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL)))
|
|
|
|
break;
|
|
|
|
|
2010-07-21 19:44:58 +08:00
|
|
|
if (!sg_miter_next(sg_miter))
|
|
|
|
break;
|
|
|
|
|
|
|
|
buffer = sg_miter->addr;
|
|
|
|
remain = sg_miter->length;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
len = 0;
|
|
|
|
if (status & MCI_RXACTIVE)
|
|
|
|
len = mmci_pio_read(host, buffer, remain);
|
|
|
|
if (status & MCI_TXACTIVE)
|
|
|
|
len = mmci_pio_write(host, buffer, remain, status);
|
|
|
|
|
2010-07-21 19:44:58 +08:00
|
|
|
sg_miter->consumed = len;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
host->size -= len;
|
|
|
|
remain -= len;
|
|
|
|
|
|
|
|
if (remain)
|
|
|
|
break;
|
|
|
|
|
|
|
|
status = readl(base + MMCISTATUS);
|
|
|
|
} while (1);
|
|
|
|
|
2010-07-21 19:44:58 +08:00
|
|
|
sg_miter_stop(sg_miter);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
2011-01-27 17:50:13 +08:00
|
|
|
* If we have less than the fifo 'half-full' threshold to transfer,
|
|
|
|
* trigger a PIO interrupt as soon as any data is available.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2011-01-27 17:50:13 +08:00
|
|
|
if (status & MCI_RXACTIVE && host->size < variant->fifohalfsize)
|
2010-10-19 19:39:48 +08:00
|
|
|
mmci_set_mask1(host, MCI_RXDATAAVLBLMASK);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If we run out of data, disable the data IRQs; this
|
|
|
|
* prevents a race where the FIFO becomes empty before
|
|
|
|
* the chip itself has disabled the data path, and
|
|
|
|
* stops us racing with our data end IRQ.
|
|
|
|
*/
|
|
|
|
if (host->size == 0) {
|
2010-10-19 19:39:48 +08:00
|
|
|
mmci_set_mask1(host, 0);
|
2005-04-17 06:20:36 +08:00
|
|
|
writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0);
|
|
|
|
}
|
|
|
|
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Handle completion of command and data transfers.
|
|
|
|
*/
|
IRQ: Maintain regs pointer globally rather than passing to IRQ handlers
Maintain a per-CPU global "struct pt_regs *" variable which can be used instead
of passing regs around manually through all ~1800 interrupt handlers in the
Linux kernel.
The regs pointer is used in few places, but it potentially costs both stack
space and code to pass it around. On the FRV arch, removing the regs parameter
from all the genirq function results in a 20% speed up of the IRQ exit path
(ie: from leaving timer_interrupt() to leaving do_IRQ()).
Where appropriate, an arch may override the generic storage facility and do
something different with the variable. On FRV, for instance, the address is
maintained in GR28 at all times inside the kernel as part of general exception
handling.
Having looked over the code, it appears that the parameter may be handed down
through up to twenty or so layers of functions. Consider a USB character
device attached to a USB hub, attached to a USB controller that posts its
interrupts through a cascaded auxiliary interrupt controller. A character
device driver may want to pass regs to the sysrq handler through the input
layer which adds another few layers of parameter passing.
I've build this code with allyesconfig for x86_64 and i386. I've runtested the
main part of the code on FRV and i386, though I can't test most of the drivers.
I've also done partial conversion for powerpc and MIPS - these at least compile
with minimal configurations.
This will affect all archs. Mostly the changes should be relatively easy.
Take do_IRQ(), store the regs pointer at the beginning, saving the old one:
struct pt_regs *old_regs = set_irq_regs(regs);
And put the old one back at the end:
set_irq_regs(old_regs);
Don't pass regs through to generic_handle_irq() or __do_IRQ().
In timer_interrupt(), this sort of change will be necessary:
- update_process_times(user_mode(regs));
- profile_tick(CPU_PROFILING, regs);
+ update_process_times(user_mode(get_irq_regs()));
+ profile_tick(CPU_PROFILING);
I'd like to move update_process_times()'s use of get_irq_regs() into itself,
except that i386, alone of the archs, uses something other than user_mode().
Some notes on the interrupt handling in the drivers:
(*) input_dev() is now gone entirely. The regs pointer is no longer stored in
the input_dev struct.
(*) finish_unlinks() in drivers/usb/host/ohci-q.c needs checking. It does
something different depending on whether it's been supplied with a regs
pointer or not.
(*) Various IRQ handler function pointers have been moved to type
irq_handler_t.
Signed-Off-By: David Howells <dhowells@redhat.com>
(cherry picked from 1b16e7ac850969f38b375e511e3fa2f474a33867 commit)
2006-10-05 21:55:46 +08:00
|
|
|
static irqreturn_t mmci_irq(int irq, void *dev_id)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct mmci_host *host = dev_id;
|
|
|
|
u32 status;
|
|
|
|
|
|
|
|
spin_lock(&host->lock);
|
2019-12-11 21:39:34 +08:00
|
|
|
host->irq_action = IRQ_HANDLED;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
do {
|
|
|
|
status = readl(host->base + MMCISTATUS);
|
2022-04-17 06:45:49 +08:00
|
|
|
if (!status)
|
|
|
|
break;
|
2010-10-19 19:39:48 +08:00
|
|
|
|
|
|
|
if (host->singleirq) {
|
2018-01-18 22:34:17 +08:00
|
|
|
if (status & host->mask1_reg)
|
2010-10-19 19:39:48 +08:00
|
|
|
mmci_pio_irq(irq, dev_id);
|
|
|
|
|
2018-10-08 20:08:47 +08:00
|
|
|
status &= ~host->variant->irq_pio_mask;
|
2010-10-19 19:39:48 +08:00
|
|
|
}
|
|
|
|
|
2014-01-13 23:49:31 +08:00
|
|
|
/*
|
2019-07-23 20:28:09 +08:00
|
|
|
* Busy detection is managed by mmci_cmd_irq(), including to
|
|
|
|
* clear the corresponding IRQ.
|
2014-01-13 23:49:31 +08:00
|
|
|
*/
|
2005-04-17 06:20:36 +08:00
|
|
|
status &= readl(host->base + MMCIMASK0);
|
mmc: mmci: avoid clearing ST Micro busy end interrupt mistakenly
This fixes a race condition that may occur whenever ST micro busy end
interrupt is raised just after being unmasked but before leaving mmci
interrupt context.
A dead-lock has been found if connecting mmci ST Micro variant whose amba
id is 0x10480180 to some new eMMC that supports internal caches. Whenever
mmci driver enables cache control by programming eMMC's EXT_CSD register,
block driver may request to flush the eMMC internal caches causing mmci
driver to send a MMC_SWITCH command to the card with FLUSH_CACHE operation.
And because busy end interrupt may be mistakenly cleared while not yet
processed, this mmc request may never complete. As a result, mmcqd task
may be stuck forever.
Here is an instance caught by lockup detector which shows that mmcqd task
was hung while waiting for mmc_flush_cache command to complete:
..
[ 240.251595] INFO: task mmcqd/1:52 blocked for more than 120 seconds.
[ 240.257973] Not tainted 4.1.13-00510-g9d91424 #2
[ 240.263109] "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
[ 240.270955] mmcqd/1 D c047504c 0 52 2 0x00000000
[ 240.277359] [<c047504c>] (__schedule) from [<c04754a0>] (schedule+0x40/0x98)
[ 240.284418] [<c04754a0>] (schedule) from [<c0477d40>] (schedule_timeout+0x148/0x188)
[ 240.292191] [<c0477d40>] (schedule_timeout) from [<c0476040>] (wait_for_common+0xa4/0x170)
[ 240.300491] [<c0476040>] (wait_for_common) from [<c02efc1c>] (mmc_wait_for_req_done+0x4c/0x13c)
[ 240.309224] [<c02efc1c>] (mmc_wait_for_req_done) from [<c02efd90>] (mmc_wait_for_cmd+0x64/0x84)
[ 240.317953] [<c02efd90>] (mmc_wait_for_cmd) from [<c02f5b14>] (__mmc_switch+0xa4/0x2a8)
[ 240.325964] [<c02f5b14>] (__mmc_switch) from [<c02f5d40>] (mmc_switch+0x28/0x30)
[ 240.333389] [<c02f5d40>] (mmc_switch) from [<c02f0984>] (mmc_flush_cache+0x54/0x80)
[ 240.341073] [<c02f0984>] (mmc_flush_cache) from [<c02ff0c4>] (mmc_blk_issue_rq+0x114/0x4e8)
[ 240.349459] [<c02ff0c4>] (mmc_blk_issue_rq) from [<c03008d4>] (mmc_queue_thread+0xc0/0x180)
[ 240.357844] [<c03008d4>] (mmc_queue_thread) from [<c003cf90>] (kthread+0xdc/0xf4)
[ 240.365339] [<c003cf90>] (kthread) from [<c0010068>] (ret_from_fork+0x14/0x2c)
..
..
[ 240.664311] INFO: task partprobe:564 blocked for more than 120 seconds.
[ 240.670943] Not tainted 4.1.13-00510-g9d91424 #2
[ 240.676078] "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
[ 240.683922] partprobe D c047504c 0 564 486 0x00000000
[ 240.690318] [<c047504c>] (__schedule) from [<c04754a0>] (schedule+0x40/0x98)
[ 240.697396] [<c04754a0>] (schedule) from [<c0477d40>] (schedule_timeout+0x148/0x188)
[ 240.705149] [<c0477d40>] (schedule_timeout) from [<c0476040>] (wait_for_common+0xa4/0x170)
[ 240.713446] [<c0476040>] (wait_for_common) from [<c01f3300>] (submit_bio_wait+0x58/0x64)
[ 240.721571] [<c01f3300>] (submit_bio_wait) from [<c01fbbd8>] (blkdev_issue_flush+0x60/0x88)
[ 240.729957] [<c01fbbd8>] (blkdev_issue_flush) from [<c010ff84>] (blkdev_fsync+0x34/0x44)
[ 240.738083] [<c010ff84>] (blkdev_fsync) from [<c0109594>] (do_fsync+0x3c/0x64)
[ 240.745319] [<c0109594>] (do_fsync) from [<c000ffc0>] (ret_fast_syscall+0x0/0x3c)
..
Here is the detailed sequence showing when this issue may happen:
1) At probe time, mmci device is initialized and card busy detection based
on DAT[0] monitoring is enabled.
2) Later during run time, since card reported to support internal caches, a
MMCI_SWITCH command is sent to eMMC device with FLUSH_CACHE operation. On
receiving this command, eMMC may enter busy state (for a relatively short
time in the case of the dead-lock).
3) Then mmci interrupt is raised and mmci_irq() is called:
MMCISTATUS register is read and is equal to 0x01000440. So the following
status bits are set:
- MCI_CMDRESPEND (= 6)
- MCI_DATABLOCKEND (= 10)
- MCI_ST_CARDBUSY (= 24)
Since MMCIMASK0 register is 0x3FF, status variable is set to 0x00000040 and
BIT MCI_CMDRESPEND is cleared by writing MMCICLEAR register.
Then mmci_cmd_irq() is called. Considering the following conditions:
- host->busy_status is 0,
- this is a "busy response",
- reading again MMCISTATUS register gives 0x1000400,
MMCIMASK0 is updated to unmask MCI_ST_BUSYEND bit.
Thus, MMCIMASK0 is set to 0x010003FF and host->busy_status is set to wait
for busy end completion.
Back again in status loop of mmci_irq(), we quickly go through
mmci_data_irq() as there are no data in that case. And we finally go
through following test at the end of while(status) loop:
/*
* Don't poll for busy completion in irq context.
*/
if (host->variant->busy_detect && host->busy_status)
status &= ~host->variant->busy_detect_flag;
Because status variable is not yet null (is equal to 0x40), we do not leave
interrupt context yet but we loop again into while(status) loop. So we run
across following steps:
a) MMCISTATUS register is read again and this time is equal to 0x01000400.
So that following bits are set:
- MCI_DATABLOCKEND (= 10)
- MCI_ST_CARDBUSY (= 24)
Since MMCIMASK0 register is equal to 0x010003FF:
b) status variable is set to 0x01000000.
c) MCI_ST_CARDBUSY bit is cleared by writing MMCICLEAR register.
Then, mmci_cmd_irq() is called one more time. Since host->busy_status is
set and that MCI_ST_CARDBUSY is set in status variable, we just return from
this function.
Back again in mmci_irq(), status variable is set to 0 and we finally leave
the while(status) loop. As a result we leave interrupt context, waiting for
busy end interrupt event.
Now, consider that busy end completion is raised IN BETWEEN steps 3.a) and
3.c). In such a case, we may mistakenly clear busy end interrupt at step
3.c) while it has not yet been processed. This will result in mmc command
to wait forever for a busy end completion that will never happen.
To fix the problem, this patch implements the following changes:
Considering that the mmci seems to be triggering the IRQ on both edges
while monitoring DAT0 for busy completion and that same status bit is used
to monitor start and end of busy detection, special care must be taken to
make sure that both start and end interrupts are always cleared one after
the other.
1) Clearing of card busy bit is moved in mmc_cmd_irq() function where
unmasking of busy end bit is effectively handled.
2) Just before unmasking busy end event, busy start event is cleared by
writing card busy bit in MMCICLEAR register.
3) Finally, once we are no more busy with a command, busy end event is
cleared writing again card busy bit in MMCICLEAR register.
This patch has been tested with the ST Accordo5 machine, not yet supported
upstream but relies on the mmci driver.
Signed-off-by: Sarang Mairal <sarang.mairal@garmin.com>
Signed-off-by: Jean-Nicolas Graux <jean-nicolas.graux@st.com>
Reviewed-by: Linus Walleij <linus.walleij@linaro.org>
Tested-by: Ulf Hansson <ulf.hansson@linaro.org>
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
2017-02-07 19:12:41 +08:00
|
|
|
if (host->variant->busy_detect)
|
|
|
|
writel(status & ~host->variant->busy_detect_mask,
|
|
|
|
host->base + MMCICLEAR);
|
|
|
|
else
|
|
|
|
writel(status, host->base + MMCICLEAR);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2010-02-19 08:09:10 +08:00
|
|
|
dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2014-06-13 19:21:38 +08:00
|
|
|
if (host->variant->reversed_irq_handling) {
|
|
|
|
mmci_data_irq(host, host->data, status);
|
|
|
|
mmci_cmd_irq(host, host->cmd, status);
|
|
|
|
} else {
|
|
|
|
mmci_cmd_irq(host, host->cmd, status);
|
|
|
|
mmci_data_irq(host, host->data, status);
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2016-10-25 17:06:06 +08:00
|
|
|
/*
|
2019-04-26 15:46:35 +08:00
|
|
|
* Busy detection has been handled by mmci_cmd_irq() above.
|
|
|
|
* Clear the status bit to prevent polling in IRQ context.
|
2016-10-25 17:06:06 +08:00
|
|
|
*/
|
2019-04-26 15:46:35 +08:00
|
|
|
if (host->variant->busy_detect_flag)
|
2016-10-25 17:06:06 +08:00
|
|
|
status &= ~host->variant->busy_detect_flag;
|
2014-01-13 23:49:31 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
} while (status);
|
|
|
|
|
|
|
|
spin_unlock(&host->lock);
|
|
|
|
|
2019-12-11 21:39:34 +08:00
|
|
|
return host->irq_action;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* mmci_irq_thread() - A threaded IRQ handler that manages a reset of the HW.
|
|
|
|
*
|
|
|
|
* A reset is needed for some variants, where a datatimeout for a R1B request
|
|
|
|
* causes the DPSM to stay busy (non-functional).
|
|
|
|
*/
|
|
|
|
static irqreturn_t mmci_irq_thread(int irq, void *dev_id)
|
|
|
|
{
|
|
|
|
struct mmci_host *host = dev_id;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
if (host->rst) {
|
|
|
|
reset_control_assert(host->rst);
|
|
|
|
udelay(2);
|
|
|
|
reset_control_deassert(host->rst);
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_lock_irqsave(&host->lock, flags);
|
|
|
|
writel(host->clk_reg, host->base + MMCICLOCK);
|
|
|
|
writel(host->pwr_reg, host->base + MMCIPOWER);
|
|
|
|
writel(MCI_IRQENABLE | host->variant->start_err,
|
|
|
|
host->base + MMCIMASK0);
|
|
|
|
|
|
|
|
host->irq_action = IRQ_HANDLED;
|
|
|
|
mmci_request_end(host, host->mrq);
|
|
|
|
spin_unlock_irqrestore(&host->lock, flags);
|
|
|
|
|
|
|
|
return host->irq_action;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
|
|
|
|
{
|
|
|
|
struct mmci_host *host = mmc_priv(mmc);
|
2008-10-25 04:17:50 +08:00
|
|
|
unsigned long flags;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
WARN_ON(host->mrq != NULL);
|
|
|
|
|
2013-01-22 04:29:34 +08:00
|
|
|
mrq->cmd->error = mmci_validate_data(host, mrq->data);
|
|
|
|
if (mrq->cmd->error) {
|
2007-07-25 02:38:53 +08:00
|
|
|
mmc_request_done(mmc, mrq);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2008-10-25 04:17:50 +08:00
|
|
|
spin_lock_irqsave(&host->lock, flags);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
host->mrq = mrq;
|
|
|
|
|
2011-07-02 00:55:24 +08:00
|
|
|
if (mrq->data)
|
|
|
|
mmci_get_next_data(host, mrq->data);
|
|
|
|
|
2018-10-08 20:08:48 +08:00
|
|
|
if (mrq->data &&
|
|
|
|
(host->variant->datactrl_first || mrq->data->flags & MMC_DATA_READ))
|
2005-04-17 06:20:36 +08:00
|
|
|
mmci_start_data(host, mrq->data);
|
|
|
|
|
2013-05-13 22:40:56 +08:00
|
|
|
if (mrq->sbc)
|
|
|
|
mmci_start_command(host, mrq->sbc, 0);
|
|
|
|
else
|
|
|
|
mmci_start_command(host, mrq->cmd, 0);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-10-25 04:17:50 +08:00
|
|
|
spin_unlock_irqrestore(&host->lock, flags);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2019-10-08 17:56:02 +08:00
|
|
|
static void mmci_set_max_busy_timeout(struct mmc_host *mmc)
|
|
|
|
{
|
|
|
|
struct mmci_host *host = mmc_priv(mmc);
|
|
|
|
u32 max_busy_timeout = 0;
|
|
|
|
|
|
|
|
if (!host->variant->busy_detect)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (host->variant->busy_timeout && mmc->actual_clock)
|
|
|
|
max_busy_timeout = ~0UL / (mmc->actual_clock / MSEC_PER_SEC);
|
|
|
|
|
|
|
|
mmc->max_busy_timeout = max_busy_timeout;
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
|
|
|
|
{
|
|
|
|
struct mmci_host *host = mmc_priv(mmc);
|
2011-12-13 23:54:55 +08:00
|
|
|
struct variant_data *variant = host->variant;
|
2009-09-14 19:56:14 +08:00
|
|
|
u32 pwr = 0;
|
|
|
|
unsigned long flags;
|
2013-05-03 19:52:12 +08:00
|
|
|
int ret;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
switch (ios->power_mode) {
|
|
|
|
case MMC_POWER_OFF:
|
2013-01-07 23:22:50 +08:00
|
|
|
if (!IS_ERR(mmc->supply.vmmc))
|
|
|
|
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
|
2013-01-31 19:27:52 +08:00
|
|
|
|
2013-05-14 20:53:10 +08:00
|
|
|
if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled) {
|
2013-01-31 19:27:52 +08:00
|
|
|
regulator_disable(mmc->supply.vqmmc);
|
2013-05-14 20:53:10 +08:00
|
|
|
host->vqmmc_enabled = false;
|
|
|
|
}
|
2013-01-31 19:27:52 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
break;
|
|
|
|
case MMC_POWER_UP:
|
2013-01-07 23:22:50 +08:00
|
|
|
if (!IS_ERR(mmc->supply.vmmc))
|
|
|
|
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
|
|
|
|
|
2011-12-13 23:54:55 +08:00
|
|
|
/*
|
|
|
|
* The ST Micro variant doesn't have the PL180s MCI_PWR_UP
|
|
|
|
* and instead uses MCI_PWR_ON so apply whatever value is
|
|
|
|
* configured in the variant data.
|
|
|
|
*/
|
|
|
|
pwr |= variant->pwrreg_powerup;
|
|
|
|
|
|
|
|
break;
|
2005-04-17 06:20:36 +08:00
|
|
|
case MMC_POWER_ON:
|
2013-05-14 20:53:10 +08:00
|
|
|
if (!IS_ERR(mmc->supply.vqmmc) && !host->vqmmc_enabled) {
|
2013-05-03 19:52:12 +08:00
|
|
|
ret = regulator_enable(mmc->supply.vqmmc);
|
|
|
|
if (ret < 0)
|
|
|
|
dev_err(mmc_dev(mmc),
|
|
|
|
"failed to enable vqmmc regulator\n");
|
2013-05-14 20:53:10 +08:00
|
|
|
else
|
|
|
|
host->vqmmc_enabled = true;
|
2013-05-03 19:52:12 +08:00
|
|
|
}
|
2013-01-31 19:27:52 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
pwr |= MCI_PWR_ON;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2011-12-13 23:57:07 +08:00
|
|
|
if (variant->signal_direction && ios->power_mode != MMC_POWER_OFF) {
|
|
|
|
/*
|
|
|
|
* The ST Micro variant has some additional bits
|
|
|
|
* indicating signal direction for the signals in
|
|
|
|
* the SD/MMC bus and feedback-clock usage.
|
|
|
|
*/
|
2014-03-21 17:13:05 +08:00
|
|
|
pwr |= host->pwr_reg_add;
|
2011-12-13 23:57:07 +08:00
|
|
|
|
|
|
|
if (ios->bus_width == MMC_BUS_WIDTH_4)
|
|
|
|
pwr &= ~MCI_ST_DATA74DIREN;
|
|
|
|
else if (ios->bus_width == MMC_BUS_WIDTH_1)
|
|
|
|
pwr &= (~MCI_ST_DATA74DIREN &
|
|
|
|
~MCI_ST_DATA31DIREN &
|
|
|
|
~MCI_ST_DATA2DIREN);
|
|
|
|
}
|
|
|
|
|
2018-01-18 22:34:20 +08:00
|
|
|
if (variant->opendrain) {
|
|
|
|
if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
|
|
|
|
pwr |= variant->opendrain;
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* If the variant cannot configure the pads by its own, then we
|
|
|
|
* expect the pinctrl to be able to do that for us
|
|
|
|
*/
|
|
|
|
if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
|
|
|
|
pinctrl_select_state(host->pinctrl, host->pins_opendrain);
|
|
|
|
else
|
2019-12-07 01:08:15 +08:00
|
|
|
pinctrl_select_default_state(mmc_dev(mmc));
|
2018-01-18 22:34:20 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2013-01-10 00:19:54 +08:00
|
|
|
/*
|
|
|
|
* If clock = 0 and the variant requires the MMCIPOWER to be used for
|
|
|
|
* gating the clock, the MCI_PWR_ON bit is cleared.
|
|
|
|
*/
|
|
|
|
if (!ios->clock && variant->pwrreg_clkgate)
|
|
|
|
pwr &= ~MCI_PWR_ON;
|
|
|
|
|
2014-06-02 17:09:55 +08:00
|
|
|
if (host->variant->explicit_mclk_control &&
|
|
|
|
ios->clock != host->clock_cache) {
|
|
|
|
ret = clk_set_rate(host->clk, ios->clock);
|
|
|
|
if (ret < 0)
|
|
|
|
dev_err(mmc_dev(host->mmc),
|
|
|
|
"Error setting clock rate (%d)\n", ret);
|
|
|
|
else
|
|
|
|
host->mclk = clk_get_rate(host->clk);
|
|
|
|
}
|
|
|
|
host->clock_cache = ios->clock;
|
|
|
|
|
2009-09-14 19:56:14 +08:00
|
|
|
spin_lock_irqsave(&host->lock, flags);
|
|
|
|
|
2018-10-08 20:08:42 +08:00
|
|
|
if (host->ops && host->ops->set_clkreg)
|
|
|
|
host->ops->set_clkreg(host, ios->clock);
|
|
|
|
else
|
|
|
|
mmci_set_clkreg(host, ios->clock);
|
|
|
|
|
2019-10-08 17:56:02 +08:00
|
|
|
mmci_set_max_busy_timeout(mmc);
|
|
|
|
|
2018-10-08 20:08:42 +08:00
|
|
|
if (host->ops && host->ops->set_pwrreg)
|
|
|
|
host->ops->set_pwrreg(host, pwr);
|
|
|
|
else
|
|
|
|
mmci_write_pwrreg(host, pwr);
|
|
|
|
|
2013-09-04 16:01:15 +08:00
|
|
|
mmci_reg_delay(host);
|
2009-09-14 19:56:14 +08:00
|
|
|
|
|
|
|
spin_unlock_irqrestore(&host->lock, flags);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2009-07-09 22:16:07 +08:00
|
|
|
static int mmci_get_cd(struct mmc_host *mmc)
|
|
|
|
{
|
|
|
|
struct mmci_host *host = mmc_priv(mmc);
|
2010-08-09 19:54:43 +08:00
|
|
|
struct mmci_platform_data *plat = host->plat;
|
2014-03-17 20:56:19 +08:00
|
|
|
unsigned int status = mmc_gpio_get_cd(mmc);
|
2009-07-09 22:16:07 +08:00
|
|
|
|
2014-03-17 20:56:19 +08:00
|
|
|
if (status == -ENOSYS) {
|
2010-08-09 19:56:40 +08:00
|
|
|
if (!plat->status)
|
|
|
|
return 1; /* Assume always present */
|
|
|
|
|
2010-08-09 19:54:43 +08:00
|
|
|
status = plat->status(mmc_dev(host->mmc));
|
2014-03-17 20:56:19 +08:00
|
|
|
}
|
2010-07-29 22:58:59 +08:00
|
|
|
return status;
|
2009-07-09 22:16:07 +08:00
|
|
|
}
|
|
|
|
|
2013-05-16 03:47:33 +08:00
|
|
|
static int mmci_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios)
|
|
|
|
{
|
2020-01-28 17:06:34 +08:00
|
|
|
struct mmci_host *host = mmc_priv(mmc);
|
2020-04-17 00:36:49 +08:00
|
|
|
int ret;
|
2013-05-16 03:47:33 +08:00
|
|
|
|
2020-04-17 00:36:49 +08:00
|
|
|
ret = mmc_regulator_set_vqmmc(mmc, ios);
|
2013-05-16 03:47:33 +08:00
|
|
|
|
2020-04-17 00:36:49 +08:00
|
|
|
if (!ret && host->ops && host->ops->post_sig_volt_switch)
|
|
|
|
ret = host->ops->post_sig_volt_switch(host, ios);
|
|
|
|
else if (ret)
|
|
|
|
ret = 0;
|
2020-01-28 17:06:34 +08:00
|
|
|
|
2020-04-17 00:36:49 +08:00
|
|
|
if (ret < 0)
|
|
|
|
dev_warn(mmc_dev(mmc), "Voltage switch failed\n");
|
2013-05-16 03:47:33 +08:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2013-05-16 03:53:22 +08:00
|
|
|
static struct mmc_host_ops mmci_ops = {
|
2005-04-17 06:20:36 +08:00
|
|
|
.request = mmci_request,
|
2011-07-02 00:55:24 +08:00
|
|
|
.pre_req = mmci_pre_request,
|
|
|
|
.post_req = mmci_post_request,
|
2005-04-17 06:20:36 +08:00
|
|
|
.set_ios = mmci_set_ios,
|
2014-03-17 20:56:19 +08:00
|
|
|
.get_ro = mmc_gpio_get_ro,
|
2009-07-09 22:16:07 +08:00
|
|
|
.get_cd = mmci_get_cd,
|
2013-05-16 03:47:33 +08:00
|
|
|
.start_signal_voltage_switch = mmci_sig_volt_switch,
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
|
2021-01-25 01:02:56 +08:00
|
|
|
static void mmci_probe_level_translator(struct mmc_host *mmc)
|
|
|
|
{
|
|
|
|
struct device *dev = mmc_dev(mmc);
|
|
|
|
struct mmci_host *host = mmc_priv(mmc);
|
|
|
|
struct gpio_desc *cmd_gpio;
|
|
|
|
struct gpio_desc *ck_gpio;
|
|
|
|
struct gpio_desc *ckin_gpio;
|
|
|
|
int clk_hi, clk_lo;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Assume the level translator is present if st,use-ckin is set.
|
|
|
|
* This is to cater for DTs which do not implement this test.
|
|
|
|
*/
|
|
|
|
host->clk_reg_add |= MCI_STM32_CLK_SELCKIN;
|
|
|
|
|
|
|
|
cmd_gpio = gpiod_get(dev, "st,cmd", GPIOD_OUT_HIGH);
|
|
|
|
if (IS_ERR(cmd_gpio))
|
|
|
|
goto exit_cmd;
|
|
|
|
|
|
|
|
ck_gpio = gpiod_get(dev, "st,ck", GPIOD_OUT_HIGH);
|
|
|
|
if (IS_ERR(ck_gpio))
|
|
|
|
goto exit_ck;
|
|
|
|
|
|
|
|
ckin_gpio = gpiod_get(dev, "st,ckin", GPIOD_IN);
|
|
|
|
if (IS_ERR(ckin_gpio))
|
|
|
|
goto exit_ckin;
|
|
|
|
|
|
|
|
/* All GPIOs are valid, test whether level translator works */
|
|
|
|
|
|
|
|
/* Sample CKIN */
|
|
|
|
clk_hi = !!gpiod_get_value(ckin_gpio);
|
|
|
|
|
|
|
|
/* Set CK low */
|
|
|
|
gpiod_set_value(ck_gpio, 0);
|
|
|
|
|
|
|
|
/* Sample CKIN */
|
|
|
|
clk_lo = !!gpiod_get_value(ckin_gpio);
|
|
|
|
|
|
|
|
/* Tristate all */
|
|
|
|
gpiod_direction_input(cmd_gpio);
|
|
|
|
gpiod_direction_input(ck_gpio);
|
|
|
|
|
|
|
|
/* Level translator is present if CK signal is propagated to CKIN */
|
|
|
|
if (!clk_hi || clk_lo) {
|
|
|
|
host->clk_reg_add &= ~MCI_STM32_CLK_SELCKIN;
|
|
|
|
dev_warn(dev,
|
|
|
|
"Level translator inoperable, CK signal not detected on CKIN, disabling.\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
gpiod_put(ckin_gpio);
|
|
|
|
|
|
|
|
exit_ckin:
|
|
|
|
gpiod_put(ck_gpio);
|
|
|
|
exit_ck:
|
|
|
|
gpiod_put(cmd_gpio);
|
|
|
|
exit_cmd:
|
|
|
|
pinctrl_select_default_state(dev);
|
|
|
|
}
|
|
|
|
|
2014-03-21 17:13:05 +08:00
|
|
|
static int mmci_of_parse(struct device_node *np, struct mmc_host *mmc)
|
2012-04-16 17:18:43 +08:00
|
|
|
{
|
2014-03-21 17:13:05 +08:00
|
|
|
struct mmci_host *host = mmc_priv(mmc);
|
|
|
|
int ret = mmc_of_parse(mmc);
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2014-03-18 17:46:37 +08:00
|
|
|
if (of_get_property(np, "st,sig-dir-dat0", NULL))
|
2014-03-21 17:13:05 +08:00
|
|
|
host->pwr_reg_add |= MCI_ST_DATA0DIREN;
|
2014-03-18 17:46:37 +08:00
|
|
|
if (of_get_property(np, "st,sig-dir-dat2", NULL))
|
2014-03-21 17:13:05 +08:00
|
|
|
host->pwr_reg_add |= MCI_ST_DATA2DIREN;
|
2014-03-18 17:46:37 +08:00
|
|
|
if (of_get_property(np, "st,sig-dir-dat31", NULL))
|
2014-03-21 17:13:05 +08:00
|
|
|
host->pwr_reg_add |= MCI_ST_DATA31DIREN;
|
2014-03-18 17:46:37 +08:00
|
|
|
if (of_get_property(np, "st,sig-dir-dat74", NULL))
|
2014-03-21 17:13:05 +08:00
|
|
|
host->pwr_reg_add |= MCI_ST_DATA74DIREN;
|
2014-03-18 17:46:37 +08:00
|
|
|
if (of_get_property(np, "st,sig-dir-cmd", NULL))
|
2014-03-21 17:13:05 +08:00
|
|
|
host->pwr_reg_add |= MCI_ST_CMDDIREN;
|
2014-03-31 20:19:21 +08:00
|
|
|
if (of_get_property(np, "st,sig-pin-fbclk", NULL))
|
2014-03-21 17:13:05 +08:00
|
|
|
host->pwr_reg_add |= MCI_ST_FBCLKEN;
|
2018-10-08 20:08:55 +08:00
|
|
|
if (of_get_property(np, "st,sig-dir", NULL))
|
|
|
|
host->pwr_reg_add |= MCI_STM32_DIRPOL;
|
|
|
|
if (of_get_property(np, "st,neg-edge", NULL))
|
|
|
|
host->clk_reg_add |= MCI_STM32_CLK_NEGEDGE;
|
|
|
|
if (of_get_property(np, "st,use-ckin", NULL))
|
2021-01-25 01:02:56 +08:00
|
|
|
mmci_probe_level_translator(mmc);
|
2012-04-16 17:18:43 +08:00
|
|
|
|
|
|
|
if (of_get_property(np, "mmc-cap-mmc-highspeed", NULL))
|
2014-03-17 22:53:07 +08:00
|
|
|
mmc->caps |= MMC_CAP_MMC_HIGHSPEED;
|
2012-04-16 17:18:43 +08:00
|
|
|
if (of_get_property(np, "mmc-cap-sd-highspeed", NULL))
|
2014-03-17 22:53:07 +08:00
|
|
|
mmc->caps |= MMC_CAP_SD_HIGHSPEED;
|
2012-04-16 17:18:43 +08:00
|
|
|
|
2014-03-17 22:53:07 +08:00
|
|
|
return 0;
|
2012-05-08 20:59:38 +08:00
|
|
|
}
|
2012-04-16 17:18:43 +08:00
|
|
|
|
2012-11-20 02:23:06 +08:00
|
|
|
static int mmci_probe(struct amba_device *dev,
|
2011-02-19 23:55:00 +08:00
|
|
|
const struct amba_id *id)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2009-09-22 21:29:36 +08:00
|
|
|
struct mmci_platform_data *plat = dev->dev.platform_data;
|
2012-04-16 17:18:43 +08:00
|
|
|
struct device_node *np = dev->dev.of_node;
|
2010-07-21 19:54:40 +08:00
|
|
|
struct variant_data *variant = id->data;
|
2005-04-17 06:20:36 +08:00
|
|
|
struct mmci_host *host;
|
|
|
|
struct mmc_host *mmc;
|
|
|
|
int ret;
|
|
|
|
|
2012-04-16 17:18:43 +08:00
|
|
|
/* Must have platform data or Device Tree. */
|
|
|
|
if (!plat && !np) {
|
|
|
|
dev_err(&dev->dev, "No plat data or DT found\n");
|
|
|
|
return -EINVAL;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2012-06-12 17:49:51 +08:00
|
|
|
if (!plat) {
|
|
|
|
plat = devm_kzalloc(&dev->dev, sizeof(*plat), GFP_KERNEL);
|
|
|
|
if (!plat)
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev);
|
2014-03-17 20:56:32 +08:00
|
|
|
if (!mmc)
|
|
|
|
return -ENOMEM;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
host = mmc_priv(mmc);
|
2009-04-17 11:14:19 +08:00
|
|
|
host->mmc = mmc;
|
2020-01-28 17:06:30 +08:00
|
|
|
host->mmc_ops = &mmci_ops;
|
|
|
|
mmc->ops = &mmci_ops;
|
2009-07-09 22:13:56 +08:00
|
|
|
|
2021-01-25 01:02:56 +08:00
|
|
|
ret = mmci_of_parse(np, mmc);
|
|
|
|
if (ret)
|
|
|
|
goto host_free;
|
|
|
|
|
2018-01-18 22:34:20 +08:00
|
|
|
/*
|
|
|
|
* Some variant (STM32) doesn't have opendrain bit, nevertheless
|
|
|
|
* pins can be set accordingly using pinctrl
|
|
|
|
*/
|
|
|
|
if (!variant->opendrain) {
|
|
|
|
host->pinctrl = devm_pinctrl_get(&dev->dev);
|
|
|
|
if (IS_ERR(host->pinctrl)) {
|
|
|
|
dev_err(&dev->dev, "failed to get pinctrl");
|
2018-01-23 10:09:13 +08:00
|
|
|
ret = PTR_ERR(host->pinctrl);
|
2018-01-18 22:34:20 +08:00
|
|
|
goto host_free;
|
|
|
|
}
|
|
|
|
|
|
|
|
host->pins_opendrain = pinctrl_lookup_state(host->pinctrl,
|
|
|
|
MMCI_PINCTRL_STATE_OPENDRAIN);
|
|
|
|
if (IS_ERR(host->pins_opendrain)) {
|
|
|
|
dev_err(mmc_dev(mmc), "Can't select opendrain pins\n");
|
2018-01-23 10:09:13 +08:00
|
|
|
ret = PTR_ERR(host->pins_opendrain);
|
2018-01-18 22:34:20 +08:00
|
|
|
goto host_free;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-07-09 22:13:56 +08:00
|
|
|
host->hw_designer = amba_manf(dev);
|
|
|
|
host->hw_revision = amba_rev(dev);
|
2010-02-19 08:09:10 +08:00
|
|
|
dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer);
|
|
|
|
dev_dbg(mmc_dev(mmc), "revision = 0x%01x\n", host->hw_revision);
|
2009-07-09 22:13:56 +08:00
|
|
|
|
2013-05-13 22:39:17 +08:00
|
|
|
host->clk = devm_clk_get(&dev->dev, NULL);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (IS_ERR(host->clk)) {
|
|
|
|
ret = PTR_ERR(host->clk);
|
|
|
|
goto host_free;
|
|
|
|
}
|
|
|
|
|
2012-08-27 00:00:59 +08:00
|
|
|
ret = clk_prepare_enable(host->clk);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (ret)
|
2013-05-13 22:39:17 +08:00
|
|
|
goto host_free;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2014-06-02 17:10:04 +08:00
|
|
|
if (variant->qcom_fifo)
|
|
|
|
host->get_rx_fifocnt = mmci_qcom_get_rx_fifocnt;
|
|
|
|
else
|
|
|
|
host->get_rx_fifocnt = mmci_get_rx_fifocnt;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
host->plat = plat;
|
2010-07-21 19:54:40 +08:00
|
|
|
host->variant = variant;
|
2005-04-17 06:20:36 +08:00
|
|
|
host->mclk = clk_get_rate(host->clk);
|
2008-04-29 16:34:07 +08:00
|
|
|
/*
|
|
|
|
* According to the spec, mclk is max 100 MHz,
|
|
|
|
* so we try to adjust the clock down to this,
|
|
|
|
* (if possible).
|
|
|
|
*/
|
2014-06-02 17:09:47 +08:00
|
|
|
if (host->mclk > variant->f_max) {
|
|
|
|
ret = clk_set_rate(host->clk, variant->f_max);
|
2008-04-29 16:34:07 +08:00
|
|
|
if (ret < 0)
|
|
|
|
goto clk_disable;
|
|
|
|
host->mclk = clk_get_rate(host->clk);
|
2010-02-19 08:09:10 +08:00
|
|
|
dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n",
|
|
|
|
host->mclk);
|
2008-04-29 16:34:07 +08:00
|
|
|
}
|
2014-03-17 20:56:32 +08:00
|
|
|
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-12 03:35:53 +08:00
|
|
|
host->phybase = dev->res.start;
|
2014-03-17 20:56:32 +08:00
|
|
|
host->base = devm_ioremap_resource(&dev->dev, &dev->res);
|
|
|
|
if (IS_ERR(host->base)) {
|
|
|
|
ret = PTR_ERR(host->base);
|
2005-04-17 06:20:36 +08:00
|
|
|
goto clk_disable;
|
|
|
|
}
|
|
|
|
|
2018-07-13 19:15:23 +08:00
|
|
|
if (variant->init)
|
|
|
|
variant->init(host);
|
|
|
|
|
2011-07-08 16:57:15 +08:00
|
|
|
/*
|
|
|
|
* The ARM and ST versions of the block have slightly different
|
|
|
|
* clock divider equations which means that the minimum divider
|
|
|
|
* differs too.
|
2014-06-02 17:09:55 +08:00
|
|
|
* on Qualcomm like controllers get the nearest minimum clock to 100Khz
|
2011-07-08 16:57:15 +08:00
|
|
|
*/
|
|
|
|
if (variant->st_clkdiv)
|
|
|
|
mmc->f_min = DIV_ROUND_UP(host->mclk, 257);
|
2018-10-08 20:08:52 +08:00
|
|
|
else if (variant->stm32_clkdiv)
|
|
|
|
mmc->f_min = DIV_ROUND_UP(host->mclk, 2046);
|
2014-06-02 17:09:55 +08:00
|
|
|
else if (variant->explicit_mclk_control)
|
|
|
|
mmc->f_min = clk_round_rate(host->clk, 100000);
|
2011-07-08 16:57:15 +08:00
|
|
|
else
|
|
|
|
mmc->f_min = DIV_ROUND_UP(host->mclk, 512);
|
2010-04-08 14:39:38 +08:00
|
|
|
/*
|
2014-03-17 22:53:07 +08:00
|
|
|
* If no maximum operating frequency is supplied, fall back to use
|
|
|
|
* the module parameter, which has a (low) default value in case it
|
|
|
|
* is not specified. Either value must not exceed the clock rate into
|
2014-03-21 17:46:39 +08:00
|
|
|
* the block, of course.
|
2010-04-08 14:39:38 +08:00
|
|
|
*/
|
2014-03-17 22:53:07 +08:00
|
|
|
if (mmc->f_max)
|
2014-06-02 17:09:55 +08:00
|
|
|
mmc->f_max = variant->explicit_mclk_control ?
|
|
|
|
min(variant->f_max, mmc->f_max) :
|
|
|
|
min(host->mclk, mmc->f_max);
|
2010-04-08 14:39:38 +08:00
|
|
|
else
|
2014-06-02 17:09:55 +08:00
|
|
|
mmc->f_max = variant->explicit_mclk_control ?
|
|
|
|
fmax : min(host->mclk, fmax);
|
|
|
|
|
|
|
|
|
2010-02-19 08:09:10 +08:00
|
|
|
dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max);
|
|
|
|
|
2018-10-08 20:08:51 +08:00
|
|
|
host->rst = devm_reset_control_get_optional_exclusive(&dev->dev, NULL);
|
|
|
|
if (IS_ERR(host->rst)) {
|
|
|
|
ret = PTR_ERR(host->rst);
|
|
|
|
goto clk_disable;
|
|
|
|
}
|
2021-06-30 18:24:08 +08:00
|
|
|
ret = reset_control_deassert(host->rst);
|
|
|
|
if (ret)
|
|
|
|
dev_err(mmc_dev(mmc), "failed to de-assert reset\n");
|
2018-10-08 20:08:51 +08:00
|
|
|
|
2013-01-07 23:22:50 +08:00
|
|
|
/* Get regulators and the supported OCR mask */
|
2015-03-25 09:39:49 +08:00
|
|
|
ret = mmc_regulator_get_supply(mmc);
|
2017-10-15 03:17:14 +08:00
|
|
|
if (ret)
|
2015-03-25 09:39:49 +08:00
|
|
|
goto clk_disable;
|
|
|
|
|
2013-01-07 23:22:50 +08:00
|
|
|
if (!mmc->ocr_avail)
|
2009-09-22 21:41:40 +08:00
|
|
|
mmc->ocr_avail = plat->ocr_mask;
|
2013-01-07 23:22:50 +08:00
|
|
|
else if (plat->ocr_mask)
|
|
|
|
dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n");
|
|
|
|
|
2014-03-19 20:54:18 +08:00
|
|
|
/* We support these capabilities. */
|
|
|
|
mmc->caps |= MMC_CAP_CMD23;
|
|
|
|
|
2016-10-25 17:06:06 +08:00
|
|
|
/*
|
|
|
|
* Enable busy detection.
|
|
|
|
*/
|
2014-01-13 23:49:31 +08:00
|
|
|
if (variant->busy_detect) {
|
|
|
|
mmci_ops.card_busy = mmci_card_busy;
|
2016-10-25 17:06:06 +08:00
|
|
|
/*
|
|
|
|
* Not all variants have a flag to enable busy detection
|
|
|
|
* in the DPSM, but if they do, set it here.
|
|
|
|
*/
|
|
|
|
if (variant->busy_dpsm_flag)
|
|
|
|
mmci_write_datactrlreg(host,
|
|
|
|
host->variant->busy_dpsm_flag);
|
2014-01-13 23:49:31 +08:00
|
|
|
mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
|
|
|
|
}
|
|
|
|
|
mmc: mmci: Add MMC_CAP_NEED_RSP_BUSY for the stm32 variants
An issue has been observed on STM32MP157C-EV1 board, with an erase command
with secure erase argument, ending up waiting for ~4 hours before timeout.
The requested busy timeout from the mmc core ends up with 14784000ms (~4
hours), but the supported host->max_busy_timeout is 86767ms, which leads to
that the core switch to use an R1 response in favor of the R1B and polls
for busy with the host->card_busy() ops. In this case the polling doesn't
work as expected, as we never detects that the card stops signaling busy,
which leads to the following message:
mmc1: Card stuck being busy! __mmc_poll_for_busy
The problem boils done to that the stm32 variants can't use R1 responses in
favor of R1B responses, as it leads to an internal state machine in the
controller to get stuck. To continue to process requests, it would need to
be reset.
To fix this problem, let's set MMC_CAP_NEED_RSP_BUSY for the stm32 variant,
which prevent the mmc core from switching to R1 responses. Additionally,
let's cap the cmd->busy_timeout to the host->max_busy_timeout, thus rely on
86767ms to be sufficient (~66 seconds was need for this test case).
Fixes: 94fe2580a2f3 ("mmc: core: Enable erase/discard/trim support for all mmc hosts")
Signed-off-by: Yann Gautier <yann.gautier@foss.st.com>
Link: https://lore.kernel.org/r/20210225145454.12780-1-yann.gautier@foss.st.com
Cc: stable@vger.kernel.org
[Ulf: Simplified the code and extended the commit message]
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
2021-02-25 22:54:54 +08:00
|
|
|
/* Variants with mandatory busy timeout in HW needs R1B responses. */
|
|
|
|
if (variant->busy_timeout)
|
|
|
|
mmc->caps |= MMC_CAP_NEED_RSP_BUSY;
|
|
|
|
|
2019-01-29 22:35:56 +08:00
|
|
|
/* Prepare a CMD12 - needed to clear the DPSM on some variants. */
|
|
|
|
host->stop_abort.opcode = MMC_STOP_TRANSMISSION;
|
|
|
|
host->stop_abort.arg = 0;
|
|
|
|
host->stop_abort.flags = MMC_RSP_R1B | MMC_CMD_AC;
|
|
|
|
|
2013-01-07 22:35:06 +08:00
|
|
|
/* We support these PM capabilities. */
|
2014-03-17 22:53:07 +08:00
|
|
|
mmc->pm_caps |= MMC_PM_KEEP_POWER;
|
2013-01-07 22:35:06 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* We can do SGIO
|
|
|
|
*/
|
2010-09-10 13:33:59 +08:00
|
|
|
mmc->max_segs = NR_SG;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*
|
2010-07-21 19:55:59 +08:00
|
|
|
* Since only a certain number of bits are valid in the data length
|
|
|
|
* register, we must ensure that we don't exceed 2^num-1 bytes in a
|
|
|
|
* single request.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2010-07-21 19:55:59 +08:00
|
|
|
mmc->max_req_size = (1 << variant->datalength_bits) - 1;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Set the maximum segment size. Since we aren't doing DMA
|
|
|
|
* (yet) we are only limited by the data length register.
|
|
|
|
*/
|
2006-11-22 00:55:45 +08:00
|
|
|
mmc->max_seg_size = mmc->max_req_size;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-11-22 00:54:23 +08:00
|
|
|
/*
|
|
|
|
* Block size can be up to 2048 bytes, but must be a power of two.
|
|
|
|
*/
|
2018-10-08 20:08:43 +08:00
|
|
|
mmc->max_blk_size = 1 << variant->datactrl_blocksz;
|
2006-11-22 00:54:23 +08:00
|
|
|
|
2006-11-22 00:55:45 +08:00
|
|
|
/*
|
2012-02-24 19:25:21 +08:00
|
|
|
* Limit the number of blocks transferred so that we don't overflow
|
|
|
|
* the maximum request size.
|
2006-11-22 00:55:45 +08:00
|
|
|
*/
|
2018-10-08 20:08:43 +08:00
|
|
|
mmc->max_blk_count = mmc->max_req_size >> variant->datactrl_blocksz;
|
2006-11-22 00:55:45 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
spin_lock_init(&host->lock);
|
|
|
|
|
|
|
|
writel(0, host->base + MMCIMASK0);
|
2018-01-18 22:34:17 +08:00
|
|
|
|
|
|
|
if (variant->mmcimask1)
|
|
|
|
writel(0, host->base + MMCIMASK1);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
writel(0xfff, host->base + MMCICLEAR);
|
|
|
|
|
2014-08-27 21:13:54 +08:00
|
|
|
/*
|
|
|
|
* If:
|
|
|
|
* - not using DT but using a descriptor table, or
|
|
|
|
* - using a table of descriptors ALONGSIDE DT, or
|
|
|
|
* look up these descriptors named "cd" and "wp" right here, fail
|
2018-09-21 07:01:10 +08:00
|
|
|
* silently of these do not exist
|
2014-08-27 21:13:54 +08:00
|
|
|
*/
|
|
|
|
if (!np) {
|
2019-12-11 10:40:56 +08:00
|
|
|
ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0);
|
2018-09-21 07:01:10 +08:00
|
|
|
if (ret == -EPROBE_DEFER)
|
|
|
|
goto clk_disable;
|
2014-08-27 21:13:54 +08:00
|
|
|
|
2019-12-11 10:40:56 +08:00
|
|
|
ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0);
|
2018-09-21 07:01:10 +08:00
|
|
|
if (ret == -EPROBE_DEFER)
|
|
|
|
goto clk_disable;
|
2009-07-09 22:16:07 +08:00
|
|
|
}
|
|
|
|
|
2019-12-11 21:39:34 +08:00
|
|
|
ret = devm_request_threaded_irq(&dev->dev, dev->irq[0], mmci_irq,
|
|
|
|
mmci_irq_thread, IRQF_SHARED,
|
|
|
|
DRIVER_NAME " (cmd)", host);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (ret)
|
2014-03-17 20:56:32 +08:00
|
|
|
goto clk_disable;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2012-05-03 18:33:15 +08:00
|
|
|
if (!dev->irq[1])
|
2010-10-19 19:39:48 +08:00
|
|
|
host->singleirq = true;
|
|
|
|
else {
|
2014-03-17 20:56:32 +08:00
|
|
|
ret = devm_request_irq(&dev->dev, dev->irq[1], mmci_pio_irq,
|
|
|
|
IRQF_SHARED, DRIVER_NAME " (pio)", host);
|
2010-10-19 19:39:48 +08:00
|
|
|
if (ret)
|
2014-03-17 20:56:32 +08:00
|
|
|
goto clk_disable;
|
2010-10-19 19:39:48 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2018-10-08 20:08:44 +08:00
|
|
|
writel(MCI_IRQENABLE | variant->start_err, host->base + MMCIMASK0);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
amba_set_drvdata(dev, mmc);
|
|
|
|
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-12 03:35:53 +08:00
|
|
|
dev_info(&dev->dev, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n",
|
|
|
|
mmc_hostname(mmc), amba_part(dev), amba_manf(dev),
|
|
|
|
amba_rev(dev), (unsigned long long)dev->res.start,
|
|
|
|
dev->irq[0], dev->irq[1]);
|
|
|
|
|
|
|
|
mmci_dma_setup(host);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-12-14 00:01:11 +08:00
|
|
|
pm_runtime_set_autosuspend_delay(&dev->dev, 50);
|
|
|
|
pm_runtime_use_autosuspend(&dev->dev);
|
2011-08-14 16:17:05 +08:00
|
|
|
|
2010-12-29 03:40:40 +08:00
|
|
|
mmc_add_host(mmc);
|
|
|
|
|
2014-12-11 21:35:55 +08:00
|
|
|
pm_runtime_put(&dev->dev);
|
2005-04-17 06:20:36 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
clk_disable:
|
2012-08-27 00:00:59 +08:00
|
|
|
clk_disable_unprepare(host->clk);
|
2005-04-17 06:20:36 +08:00
|
|
|
host_free:
|
|
|
|
mmc_free_host(mmc);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2021-01-27 00:58:34 +08:00
|
|
|
static void mmci_remove(struct amba_device *dev)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct mmc_host *mmc = amba_get_drvdata(dev);
|
|
|
|
|
|
|
|
if (mmc) {
|
|
|
|
struct mmci_host *host = mmc_priv(mmc);
|
2018-01-18 22:34:17 +08:00
|
|
|
struct variant_data *variant = host->variant;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-08-14 16:17:05 +08:00
|
|
|
/*
|
|
|
|
* Undo pm_runtime_put() in probe. We use the _sync
|
|
|
|
* version here so that we can access the primecell.
|
|
|
|
*/
|
|
|
|
pm_runtime_get_sync(&dev->dev);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
mmc_remove_host(mmc);
|
|
|
|
|
|
|
|
writel(0, host->base + MMCIMASK0);
|
2018-01-18 22:34:17 +08:00
|
|
|
|
|
|
|
if (variant->mmcimask1)
|
|
|
|
writel(0, host->base + MMCIMASK1);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
writel(0, host->base + MMCICOMMAND);
|
|
|
|
writel(0, host->base + MMCIDATACTRL);
|
|
|
|
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-12 03:35:53 +08:00
|
|
|
mmci_dma_release(host);
|
2012-08-27 00:00:59 +08:00
|
|
|
clk_disable_unprepare(host->clk);
|
2005-04-17 06:20:36 +08:00
|
|
|
mmc_free_host(mmc);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-01-23 07:38:00 +08:00
|
|
|
#ifdef CONFIG_PM
|
2013-09-04 16:05:17 +08:00
|
|
|
static void mmci_save(struct mmci_host *host)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
2014-01-23 07:19:38 +08:00
|
|
|
spin_lock_irqsave(&host->lock, flags);
|
2013-09-04 16:05:17 +08:00
|
|
|
|
2014-01-23 07:19:38 +08:00
|
|
|
writel(0, host->base + MMCIMASK0);
|
|
|
|
if (host->variant->pwrreg_nopower) {
|
2013-09-04 16:05:17 +08:00
|
|
|
writel(0, host->base + MMCIDATACTRL);
|
|
|
|
writel(0, host->base + MMCIPOWER);
|
|
|
|
writel(0, host->base + MMCICLOCK);
|
|
|
|
}
|
2014-01-23 07:19:38 +08:00
|
|
|
mmci_reg_delay(host);
|
2013-09-04 16:05:17 +08:00
|
|
|
|
2014-01-23 07:19:38 +08:00
|
|
|
spin_unlock_irqrestore(&host->lock, flags);
|
2013-09-04 16:05:17 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void mmci_restore(struct mmci_host *host)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
2014-01-23 07:19:38 +08:00
|
|
|
spin_lock_irqsave(&host->lock, flags);
|
2013-09-04 16:05:17 +08:00
|
|
|
|
2014-01-23 07:19:38 +08:00
|
|
|
if (host->variant->pwrreg_nopower) {
|
2013-09-04 16:05:17 +08:00
|
|
|
writel(host->clk_reg, host->base + MMCICLOCK);
|
|
|
|
writel(host->datactrl_reg, host->base + MMCIDATACTRL);
|
|
|
|
writel(host->pwr_reg, host->base + MMCIPOWER);
|
|
|
|
}
|
2018-10-08 20:08:44 +08:00
|
|
|
writel(MCI_IRQENABLE | host->variant->start_err,
|
|
|
|
host->base + MMCIMASK0);
|
2014-01-23 07:19:38 +08:00
|
|
|
mmci_reg_delay(host);
|
|
|
|
|
|
|
|
spin_unlock_irqrestore(&host->lock, flags);
|
2013-09-04 16:05:17 +08:00
|
|
|
}
|
|
|
|
|
2013-01-09 18:15:26 +08:00
|
|
|
static int mmci_runtime_suspend(struct device *dev)
|
|
|
|
{
|
|
|
|
struct amba_device *adev = to_amba_device(dev);
|
|
|
|
struct mmc_host *mmc = amba_get_drvdata(adev);
|
|
|
|
|
|
|
|
if (mmc) {
|
|
|
|
struct mmci_host *host = mmc_priv(mmc);
|
2013-09-04 16:00:37 +08:00
|
|
|
pinctrl_pm_select_sleep_state(dev);
|
2013-09-04 16:05:17 +08:00
|
|
|
mmci_save(host);
|
2013-01-09 18:15:26 +08:00
|
|
|
clk_disable_unprepare(host->clk);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mmci_runtime_resume(struct device *dev)
|
|
|
|
{
|
|
|
|
struct amba_device *adev = to_amba_device(dev);
|
|
|
|
struct mmc_host *mmc = amba_get_drvdata(adev);
|
|
|
|
|
|
|
|
if (mmc) {
|
|
|
|
struct mmci_host *host = mmc_priv(mmc);
|
|
|
|
clk_prepare_enable(host->clk);
|
2013-09-04 16:05:17 +08:00
|
|
|
mmci_restore(host);
|
2019-12-07 01:08:15 +08:00
|
|
|
pinctrl_select_default_state(dev);
|
2013-01-09 18:15:26 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2011-12-13 23:59:34 +08:00
|
|
|
static const struct dev_pm_ops mmci_dev_pm_ops = {
|
2014-01-23 08:11:33 +08:00
|
|
|
SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
|
|
|
|
pm_runtime_force_resume)
|
2014-12-04 07:34:11 +08:00
|
|
|
SET_RUNTIME_PM_OPS(mmci_runtime_suspend, mmci_runtime_resume, NULL)
|
2011-12-13 23:59:34 +08:00
|
|
|
};
|
|
|
|
|
2017-08-24 00:30:49 +08:00
|
|
|
static const struct amba_id mmci_ids[] = {
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
.id = 0x00041180,
|
2011-03-12 01:18:07 +08:00
|
|
|
.mask = 0xff0fffff,
|
2010-07-21 19:54:40 +08:00
|
|
|
.data = &variant_arm,
|
2005-04-17 06:20:36 +08:00
|
|
|
},
|
2011-03-12 01:18:07 +08:00
|
|
|
{
|
|
|
|
.id = 0x01041180,
|
|
|
|
.mask = 0xff0fffff,
|
|
|
|
.data = &variant_arm_extended_fifo,
|
|
|
|
},
|
2013-01-24 21:12:45 +08:00
|
|
|
{
|
|
|
|
.id = 0x02041180,
|
|
|
|
.mask = 0xff0fffff,
|
|
|
|
.data = &variant_arm_extended_fifo_hwfc,
|
|
|
|
},
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
.id = 0x00041181,
|
|
|
|
.mask = 0x000fffff,
|
2010-07-21 19:54:40 +08:00
|
|
|
.data = &variant_arm,
|
2005-04-17 06:20:36 +08:00
|
|
|
},
|
2009-01-04 22:18:54 +08:00
|
|
|
/* ST Micro variants */
|
|
|
|
{
|
|
|
|
.id = 0x00180180,
|
|
|
|
.mask = 0x00ffffff,
|
2010-07-21 19:54:40 +08:00
|
|
|
.data = &variant_u300,
|
2009-01-04 22:18:54 +08:00
|
|
|
},
|
2012-04-11 00:43:59 +08:00
|
|
|
{
|
|
|
|
.id = 0x10180180,
|
|
|
|
.mask = 0xf0ffffff,
|
|
|
|
.data = &variant_nomadik,
|
|
|
|
},
|
2009-01-04 22:18:54 +08:00
|
|
|
{
|
|
|
|
.id = 0x00280180,
|
|
|
|
.mask = 0x00ffffff,
|
2016-01-04 09:21:55 +08:00
|
|
|
.data = &variant_nomadik,
|
2010-07-21 19:54:40 +08:00
|
|
|
},
|
|
|
|
{
|
|
|
|
.id = 0x00480180,
|
2011-03-25 15:51:52 +08:00
|
|
|
.mask = 0xf0ffffff,
|
2010-07-21 19:54:40 +08:00
|
|
|
.data = &variant_ux500,
|
2009-01-04 22:18:54 +08:00
|
|
|
},
|
2011-03-25 15:51:52 +08:00
|
|
|
{
|
|
|
|
.id = 0x10480180,
|
|
|
|
.mask = 0xf0ffffff,
|
|
|
|
.data = &variant_ux500v2,
|
|
|
|
},
|
2018-01-18 22:34:21 +08:00
|
|
|
{
|
|
|
|
.id = 0x00880180,
|
|
|
|
.mask = 0x00ffffff,
|
|
|
|
.data = &variant_stm32,
|
|
|
|
},
|
2018-10-08 20:08:55 +08:00
|
|
|
{
|
|
|
|
.id = 0x10153180,
|
|
|
|
.mask = 0xf0ffffff,
|
|
|
|
.data = &variant_stm32_sdmmc,
|
|
|
|
},
|
2020-01-28 17:06:36 +08:00
|
|
|
{
|
|
|
|
.id = 0x00253180,
|
|
|
|
.mask = 0xf0ffffff,
|
|
|
|
.data = &variant_stm32_sdmmcv2,
|
|
|
|
},
|
2021-12-15 22:17:24 +08:00
|
|
|
{
|
|
|
|
.id = 0x20253180,
|
|
|
|
.mask = 0xf0ffffff,
|
|
|
|
.data = &variant_stm32_sdmmcv2,
|
|
|
|
},
|
2014-06-02 17:10:13 +08:00
|
|
|
/* Qualcomm variants */
|
|
|
|
{
|
|
|
|
.id = 0x00051180,
|
|
|
|
.mask = 0x000fffff,
|
|
|
|
.data = &variant_qcom,
|
|
|
|
},
|
2005-04-17 06:20:36 +08:00
|
|
|
{ 0, 0 },
|
|
|
|
};
|
|
|
|
|
2011-10-05 22:15:21 +08:00
|
|
|
MODULE_DEVICE_TABLE(amba, mmci_ids);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
static struct amba_driver mmci_driver = {
|
|
|
|
.drv = {
|
|
|
|
.name = DRIVER_NAME,
|
2011-12-13 23:59:34 +08:00
|
|
|
.pm = &mmci_dev_pm_ops,
|
2005-04-17 06:20:36 +08:00
|
|
|
},
|
|
|
|
.probe = mmci_probe,
|
2012-11-20 02:20:26 +08:00
|
|
|
.remove = mmci_remove,
|
2005-04-17 06:20:36 +08:00
|
|
|
.id_table = mmci_ids,
|
|
|
|
};
|
|
|
|
|
2012-03-15 17:40:38 +08:00
|
|
|
module_amba_driver(mmci_driver);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
module_param(fmax, uint, 0444);
|
|
|
|
|
|
|
|
MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver");
|
|
|
|
MODULE_LICENSE("GPL");
|