2019-05-29 22:17:56 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2011-01-02 12:52:56 +08:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2010 Google, Inc.
|
|
|
|
*/
|
|
|
|
|
2016-03-01 04:56:25 +08:00
|
|
|
#include <linux/delay.h>
|
mmc: tegra: Implement ->set_dma_mask()
The SDHCI controller on Tegra186 supports 40-bit addressing, which is
usually enough to address all of system memory. However, if the SDHCI
controller is behind an IOMMU, the address space can go beyond. This
happens on Tegra186 and later where the ARM SMMU has an input address
space of 48 bits. If the DMA API is backed by this ARM SMMU, the top-
down IOVA allocator will cause IOV addresses to be returned that the
SDHCI controller cannot access.
Unfortunately, prior to the introduction of the ->set_dma_mask() host
operation, the SDHCI core would set either a 64-bit DMA mask if the
controller claimed to support 64-bit addressing, or a 32-bit DMA mask
otherwise.
Since the full 64 bits cannot be addressed on Tegra, this had to be
worked around in commit 68481a7e1c84 ("mmc: tegra: Mark 64 bit dma
broken on Tegra186") by setting the SDHCI_QUIRK2_BROKEN_64_BIT_DMA
quirk, which effectively restricts the DMA mask to 32 bits.
One disadvantage of this is that dma_map_*() APIs will now try to use
the swiotlb to bounce DMA to addresses beyond of the controller's DMA
mask. This in turn caused degraded performance and can lead to
situations where the swiotlb buffer is exhausted, which in turn leads
to DMA transfers to fail.
With the recent introduction of the ->set_dma_mask() host operation,
this can now be properly fixed. For each generation of Tegra, the exact
supported DMA mask can be configured. This kills two birds with one
stone: it avoids the use of bounce buffers because system memory never
exceeds the addressable memory range of the SDHCI controllers on these
devices, and at the same time when an IOMMU is involved, it prevents
IOV addresses from being allocated beyond the addressible range of the
controllers.
Since the DMA mask is now properly handled, the 64-bit DMA quirk can be
removed.
Signed-off-by: Nicolin Chen <nicoleotsuka@gmail.com>
[treding@nvidia.com: provide more background in commit message]
Tested-by: Nicolin Chen <nicoleotsuka@gmail.com>
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
Signed-off-by: Thierry Reding <treding@nvidia.com>
Cc: stable@vger.kernel.org # v4.15 +
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
2019-09-23 18:08:10 +08:00
|
|
|
#include <linux/dma-mapping.h>
|
2011-01-02 12:52:56 +08:00
|
|
|
#include <linux/err.h>
|
2011-07-04 03:15:51 +08:00
|
|
|
#include <linux/module.h>
|
2011-01-02 12:52:56 +08:00
|
|
|
#include <linux/init.h>
|
2018-08-30 23:06:13 +08:00
|
|
|
#include <linux/iopoll.h>
|
2011-01-02 12:52:56 +08:00
|
|
|
#include <linux/platform_device.h>
|
|
|
|
#include <linux/clk.h>
|
|
|
|
#include <linux/io.h>
|
2011-08-31 03:17:16 +08:00
|
|
|
#include <linux/of.h>
|
2012-02-02 07:30:55 +08:00
|
|
|
#include <linux/of_device.h>
|
2018-08-30 23:06:12 +08:00
|
|
|
#include <linux/pinctrl/consumer.h>
|
|
|
|
#include <linux/regulator/consumer.h>
|
2017-03-09 03:00:39 +08:00
|
|
|
#include <linux/reset.h>
|
2011-01-02 12:52:56 +08:00
|
|
|
#include <linux/mmc/card.h>
|
|
|
|
#include <linux/mmc/host.h>
|
2015-12-23 02:41:02 +08:00
|
|
|
#include <linux/mmc/mmc.h>
|
2013-03-12 04:44:11 +08:00
|
|
|
#include <linux/mmc/slot-gpio.h>
|
2015-03-31 05:39:25 +08:00
|
|
|
#include <linux/gpio/consumer.h>
|
2018-08-20 17:23:33 +08:00
|
|
|
#include <linux/ktime.h>
|
2011-01-02 12:52:56 +08:00
|
|
|
|
|
|
|
#include "sdhci-pltfm.h"
|
2019-01-24 03:30:54 +08:00
|
|
|
#include "cqhci.h"
|
2011-01-02 12:52:56 +08:00
|
|
|
|
2012-04-18 21:18:02 +08:00
|
|
|
/* Tegra SDHOST controller vendor register definitions */
|
2015-12-23 02:41:01 +08:00
|
|
|
#define SDHCI_TEGRA_VENDOR_CLOCK_CTRL 0x100
|
2015-12-23 02:41:02 +08:00
|
|
|
#define SDHCI_CLOCK_CTRL_TAP_MASK 0x00ff0000
|
|
|
|
#define SDHCI_CLOCK_CTRL_TAP_SHIFT 16
|
2018-08-30 23:06:23 +08:00
|
|
|
#define SDHCI_CLOCK_CTRL_TRIM_MASK 0x1f000000
|
|
|
|
#define SDHCI_CLOCK_CTRL_TRIM_SHIFT 24
|
2015-12-23 02:41:02 +08:00
|
|
|
#define SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE BIT(5)
|
2015-12-23 02:41:01 +08:00
|
|
|
#define SDHCI_CLOCK_CTRL_PADPIPE_CLKEN_OVERRIDE BIT(3)
|
|
|
|
#define SDHCI_CLOCK_CTRL_SPI_MODE_CLKEN_OVERRIDE BIT(2)
|
|
|
|
|
2018-08-11 02:14:00 +08:00
|
|
|
#define SDHCI_TEGRA_VENDOR_SYS_SW_CTRL 0x104
|
|
|
|
#define SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE BIT(31)
|
|
|
|
|
2018-08-11 02:13:59 +08:00
|
|
|
#define SDHCI_TEGRA_VENDOR_CAP_OVERRIDES 0x10c
|
|
|
|
#define SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_MASK 0x00003f00
|
|
|
|
#define SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_SHIFT 8
|
|
|
|
|
2018-08-30 23:06:14 +08:00
|
|
|
#define SDHCI_TEGRA_VENDOR_MISC_CTRL 0x120
|
2020-03-11 23:47:54 +08:00
|
|
|
#define SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT BIT(0)
|
2018-08-30 23:06:14 +08:00
|
|
|
#define SDHCI_MISC_CTRL_ENABLE_SDR104 0x8
|
|
|
|
#define SDHCI_MISC_CTRL_ENABLE_SDR50 0x10
|
|
|
|
#define SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300 0x20
|
|
|
|
#define SDHCI_MISC_CTRL_ENABLE_DDR50 0x200
|
|
|
|
|
2018-08-11 02:14:01 +08:00
|
|
|
#define SDHCI_TEGRA_VENDOR_DLLCAL_CFG 0x1b0
|
|
|
|
#define SDHCI_TEGRA_DLLCAL_CALIBRATE BIT(31)
|
|
|
|
|
|
|
|
#define SDHCI_TEGRA_VENDOR_DLLCAL_STA 0x1bc
|
|
|
|
#define SDHCI_TEGRA_DLLCAL_STA_ACTIVE BIT(31)
|
|
|
|
|
2018-08-30 23:06:20 +08:00
|
|
|
#define SDHCI_VNDR_TUN_CTRL0_0 0x1c0
|
|
|
|
#define SDHCI_VNDR_TUN_CTRL0_TUN_HW_TAP 0x20000
|
2019-03-24 12:45:20 +08:00
|
|
|
#define SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_MASK 0x03fc0000
|
|
|
|
#define SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_SHIFT 18
|
|
|
|
#define SDHCI_VNDR_TUN_CTRL0_MUL_M_MASK 0x00001fc0
|
|
|
|
#define SDHCI_VNDR_TUN_CTRL0_MUL_M_SHIFT 6
|
|
|
|
#define SDHCI_VNDR_TUN_CTRL0_TUN_ITER_MASK 0x000e000
|
|
|
|
#define SDHCI_VNDR_TUN_CTRL0_TUN_ITER_SHIFT 13
|
|
|
|
#define TRIES_128 2
|
|
|
|
#define TRIES_256 4
|
|
|
|
#define SDHCI_VNDR_TUN_CTRL0_TUN_WORD_SEL_MASK 0x7
|
|
|
|
|
|
|
|
#define SDHCI_TEGRA_VNDR_TUN_CTRL1_0 0x1c4
|
|
|
|
#define SDHCI_TEGRA_VNDR_TUN_STATUS0 0x1C8
|
|
|
|
#define SDHCI_TEGRA_VNDR_TUN_STATUS1 0x1CC
|
|
|
|
#define SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK 0xFF
|
|
|
|
#define SDHCI_TEGRA_VNDR_TUN_STATUS1_END_TAP_SHIFT 0x8
|
|
|
|
#define TUNING_WORD_BIT_SIZE 32
|
2018-08-30 23:06:20 +08:00
|
|
|
|
2018-08-30 23:06:14 +08:00
|
|
|
#define SDHCI_TEGRA_AUTO_CAL_CONFIG 0x1e4
|
|
|
|
#define SDHCI_AUTO_CAL_START BIT(31)
|
|
|
|
#define SDHCI_AUTO_CAL_ENABLE BIT(29)
|
2018-08-30 23:06:17 +08:00
|
|
|
#define SDHCI_AUTO_CAL_PDPU_OFFSET_MASK 0x0000ffff
|
2018-08-30 23:06:14 +08:00
|
|
|
|
|
|
|
#define SDHCI_TEGRA_SDMEM_COMP_PADCTRL 0x1e0
|
|
|
|
#define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_MASK 0x0000000f
|
|
|
|
#define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_VAL 0x7
|
2018-08-30 23:06:15 +08:00
|
|
|
#define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD BIT(31)
|
2019-01-11 06:46:03 +08:00
|
|
|
#define SDHCI_COMP_PADCTRL_DRVUPDN_OFFSET_MASK 0x07FFF000
|
2018-08-30 23:06:14 +08:00
|
|
|
|
|
|
|
#define SDHCI_TEGRA_AUTO_CAL_STATUS 0x1ec
|
|
|
|
#define SDHCI_TEGRA_AUTO_CAL_ACTIVE BIT(31)
|
|
|
|
|
|
|
|
#define NVQUIRK_FORCE_SDHCI_SPEC_200 BIT(0)
|
|
|
|
#define NVQUIRK_ENABLE_BLOCK_GAP_DET BIT(1)
|
|
|
|
#define NVQUIRK_ENABLE_SDHCI_SPEC_300 BIT(2)
|
|
|
|
#define NVQUIRK_ENABLE_SDR50 BIT(3)
|
|
|
|
#define NVQUIRK_ENABLE_SDR104 BIT(4)
|
|
|
|
#define NVQUIRK_ENABLE_DDR50 BIT(5)
|
2020-06-05 11:04:00 +08:00
|
|
|
/*
|
|
|
|
* HAS_PADCALIB NVQUIRK is for SoC's supporting auto calibration of pads
|
|
|
|
* drive strength.
|
|
|
|
*/
|
2018-08-30 23:06:14 +08:00
|
|
|
#define NVQUIRK_HAS_PADCALIB BIT(6)
|
2020-06-05 11:04:00 +08:00
|
|
|
/*
|
|
|
|
* NEEDS_PAD_CONTROL NVQUIRK is for SoC's having separate 3V3 and 1V8 pads.
|
|
|
|
* 3V3/1V8 pad selection happens through pinctrl state selection depending
|
|
|
|
* on the signaling mode.
|
|
|
|
*/
|
2018-08-30 23:06:14 +08:00
|
|
|
#define NVQUIRK_NEEDS_PAD_CONTROL BIT(7)
|
2018-08-30 23:06:20 +08:00
|
|
|
#define NVQUIRK_DIS_CARD_CLK_CONFIG_TAP BIT(8)
|
2019-03-24 12:45:24 +08:00
|
|
|
#define NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING BIT(9)
|
2012-02-02 07:30:55 +08:00
|
|
|
|
2020-08-28 01:21:01 +08:00
|
|
|
/*
|
|
|
|
* NVQUIRK_HAS_TMCLK is for SoC's having separate timeout clock for Tegra
|
|
|
|
* SDMMC hardware data timeout.
|
|
|
|
*/
|
|
|
|
#define NVQUIRK_HAS_TMCLK BIT(10)
|
|
|
|
|
2019-01-24 03:30:54 +08:00
|
|
|
/* SDMMC CQE Base Address for Tegra Host Ver 4.1 and Higher */
|
|
|
|
#define SDHCI_TEGRA_CQE_BASE_ADDR 0xF000
|
|
|
|
|
2021-04-07 17:46:17 +08:00
|
|
|
#define SDHCI_TEGRA_CQE_TRNS_MODE (SDHCI_TRNS_MULTI | \
|
|
|
|
SDHCI_TRNS_BLK_CNT_EN | \
|
|
|
|
SDHCI_TRNS_DMA)
|
|
|
|
|
2012-02-02 07:30:55 +08:00
|
|
|
struct sdhci_tegra_soc_data {
|
2013-03-14 02:26:03 +08:00
|
|
|
const struct sdhci_pltfm_data *pdata;
|
mmc: tegra: Implement ->set_dma_mask()
The SDHCI controller on Tegra186 supports 40-bit addressing, which is
usually enough to address all of system memory. However, if the SDHCI
controller is behind an IOMMU, the address space can go beyond. This
happens on Tegra186 and later where the ARM SMMU has an input address
space of 48 bits. If the DMA API is backed by this ARM SMMU, the top-
down IOVA allocator will cause IOV addresses to be returned that the
SDHCI controller cannot access.
Unfortunately, prior to the introduction of the ->set_dma_mask() host
operation, the SDHCI core would set either a 64-bit DMA mask if the
controller claimed to support 64-bit addressing, or a 32-bit DMA mask
otherwise.
Since the full 64 bits cannot be addressed on Tegra, this had to be
worked around in commit 68481a7e1c84 ("mmc: tegra: Mark 64 bit dma
broken on Tegra186") by setting the SDHCI_QUIRK2_BROKEN_64_BIT_DMA
quirk, which effectively restricts the DMA mask to 32 bits.
One disadvantage of this is that dma_map_*() APIs will now try to use
the swiotlb to bounce DMA to addresses beyond of the controller's DMA
mask. This in turn caused degraded performance and can lead to
situations where the swiotlb buffer is exhausted, which in turn leads
to DMA transfers to fail.
With the recent introduction of the ->set_dma_mask() host operation,
this can now be properly fixed. For each generation of Tegra, the exact
supported DMA mask can be configured. This kills two birds with one
stone: it avoids the use of bounce buffers because system memory never
exceeds the addressable memory range of the SDHCI controllers on these
devices, and at the same time when an IOMMU is involved, it prevents
IOV addresses from being allocated beyond the addressible range of the
controllers.
Since the DMA mask is now properly handled, the 64-bit DMA quirk can be
removed.
Signed-off-by: Nicolin Chen <nicoleotsuka@gmail.com>
[treding@nvidia.com: provide more background in commit message]
Tested-by: Nicolin Chen <nicoleotsuka@gmail.com>
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
Signed-off-by: Thierry Reding <treding@nvidia.com>
Cc: stable@vger.kernel.org # v4.15 +
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
2019-09-23 18:08:10 +08:00
|
|
|
u64 dma_mask;
|
2012-02-02 07:30:55 +08:00
|
|
|
u32 nvquirks;
|
2019-03-24 12:45:20 +08:00
|
|
|
u8 min_tap_delay;
|
|
|
|
u8 max_tap_delay;
|
2012-02-02 07:30:55 +08:00
|
|
|
};
|
|
|
|
|
2018-08-30 23:06:17 +08:00
|
|
|
/* Magic pull up and pull down pad calibration offsets */
|
|
|
|
struct sdhci_tegra_autocal_offsets {
|
|
|
|
u32 pull_up_3v3;
|
|
|
|
u32 pull_down_3v3;
|
|
|
|
u32 pull_up_3v3_timeout;
|
|
|
|
u32 pull_down_3v3_timeout;
|
|
|
|
u32 pull_up_1v8;
|
|
|
|
u32 pull_down_1v8;
|
|
|
|
u32 pull_up_1v8_timeout;
|
|
|
|
u32 pull_down_1v8_timeout;
|
|
|
|
u32 pull_up_sdr104;
|
|
|
|
u32 pull_down_sdr104;
|
|
|
|
u32 pull_up_hs400;
|
|
|
|
u32 pull_down_hs400;
|
|
|
|
};
|
|
|
|
|
2012-02-02 07:30:55 +08:00
|
|
|
struct sdhci_tegra {
|
|
|
|
const struct sdhci_tegra_soc_data *soc_data;
|
2015-03-31 05:39:25 +08:00
|
|
|
struct gpio_desc *power_gpio;
|
2020-08-28 01:21:01 +08:00
|
|
|
struct clk *tmclk;
|
2015-12-23 02:41:00 +08:00
|
|
|
bool ddr_signaling;
|
2016-03-01 04:56:25 +08:00
|
|
|
bool pad_calib_required;
|
2018-08-30 23:06:12 +08:00
|
|
|
bool pad_control_available;
|
2017-03-09 03:00:39 +08:00
|
|
|
|
|
|
|
struct reset_control *rst;
|
2018-08-30 23:06:12 +08:00
|
|
|
struct pinctrl *pinctrl_sdmmc;
|
|
|
|
struct pinctrl_state *pinctrl_state_3v3;
|
|
|
|
struct pinctrl_state *pinctrl_state_1v8;
|
2019-01-11 06:46:03 +08:00
|
|
|
struct pinctrl_state *pinctrl_state_3v3_drv;
|
|
|
|
struct pinctrl_state *pinctrl_state_1v8_drv;
|
2018-08-30 23:06:17 +08:00
|
|
|
|
|
|
|
struct sdhci_tegra_autocal_offsets autocal_offsets;
|
2018-08-20 17:23:33 +08:00
|
|
|
ktime_t last_calib;
|
2018-08-30 23:06:21 +08:00
|
|
|
|
|
|
|
u32 default_tap;
|
|
|
|
u32 default_trim;
|
2018-08-11 02:13:59 +08:00
|
|
|
u32 dqs_trim;
|
2019-01-24 03:30:54 +08:00
|
|
|
bool enable_hwcq;
|
2019-03-24 12:45:20 +08:00
|
|
|
unsigned long curr_clk_rate;
|
|
|
|
u8 tuned_tap_delay;
|
2012-02-02 07:30:55 +08:00
|
|
|
};
|
|
|
|
|
2011-01-02 12:52:56 +08:00
|
|
|
static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg)
|
|
|
|
{
|
2012-02-02 07:30:55 +08:00
|
|
|
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
2016-02-16 21:08:29 +08:00
|
|
|
struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
|
2012-02-02 07:30:55 +08:00
|
|
|
const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
|
|
|
|
|
|
|
|
if (unlikely((soc_data->nvquirks & NVQUIRK_FORCE_SDHCI_SPEC_200) &&
|
|
|
|
(reg == SDHCI_HOST_VERSION))) {
|
2011-01-02 12:52:56 +08:00
|
|
|
/* Erratum: Version register is invalid in HW. */
|
|
|
|
return SDHCI_SPEC_200;
|
|
|
|
}
|
|
|
|
|
|
|
|
return readw(host->ioaddr + reg);
|
|
|
|
}
|
|
|
|
|
2015-01-29 00:45:16 +08:00
|
|
|
static void tegra_sdhci_writew(struct sdhci_host *host, u16 val, int reg)
|
|
|
|
{
|
|
|
|
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
|
|
|
|
2015-02-12 01:55:51 +08:00
|
|
|
switch (reg) {
|
|
|
|
case SDHCI_TRANSFER_MODE:
|
|
|
|
/*
|
|
|
|
* Postpone this write, we must do it together with a
|
|
|
|
* command write that is down below.
|
|
|
|
*/
|
|
|
|
pltfm_host->xfer_mode_shadow = val;
|
|
|
|
return;
|
|
|
|
case SDHCI_COMMAND:
|
|
|
|
writel((val << 16) | pltfm_host->xfer_mode_shadow,
|
|
|
|
host->ioaddr + SDHCI_TRANSFER_MODE);
|
|
|
|
return;
|
2015-01-29 00:45:16 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
writew(val, host->ioaddr + reg);
|
|
|
|
}
|
|
|
|
|
2011-01-02 12:52:56 +08:00
|
|
|
static void tegra_sdhci_writel(struct sdhci_host *host, u32 val, int reg)
|
|
|
|
{
|
2012-02-02 07:30:55 +08:00
|
|
|
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
2016-02-16 21:08:29 +08:00
|
|
|
struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
|
2012-02-02 07:30:55 +08:00
|
|
|
const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
|
|
|
|
|
2011-01-02 12:52:56 +08:00
|
|
|
/* Seems like we're getting spurious timeout and crc errors, so
|
|
|
|
* disable signalling of them. In case of real errors software
|
|
|
|
* timers should take care of eventually detecting them.
|
|
|
|
*/
|
|
|
|
if (unlikely(reg == SDHCI_SIGNAL_ENABLE))
|
|
|
|
val &= ~(SDHCI_INT_TIMEOUT|SDHCI_INT_CRC);
|
|
|
|
|
|
|
|
writel(val, host->ioaddr + reg);
|
|
|
|
|
2012-02-02 07:30:55 +08:00
|
|
|
if (unlikely((soc_data->nvquirks & NVQUIRK_ENABLE_BLOCK_GAP_DET) &&
|
|
|
|
(reg == SDHCI_INT_ENABLE))) {
|
2011-01-02 12:52:56 +08:00
|
|
|
/* Erratum: Must enable block gap interrupt detection */
|
|
|
|
u8 gap_ctrl = readb(host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
|
|
|
|
if (val & SDHCI_INT_CARD_INT)
|
|
|
|
gap_ctrl |= 0x8;
|
|
|
|
else
|
|
|
|
gap_ctrl &= ~0x8;
|
|
|
|
writeb(gap_ctrl, host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-30 23:06:26 +08:00
|
|
|
static bool tegra_sdhci_configure_card_clk(struct sdhci_host *host, bool enable)
|
|
|
|
{
|
|
|
|
bool status;
|
|
|
|
u32 reg;
|
|
|
|
|
|
|
|
reg = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
|
|
|
|
status = !!(reg & SDHCI_CLOCK_CARD_EN);
|
|
|
|
|
|
|
|
if (status == enable)
|
|
|
|
return status;
|
|
|
|
|
|
|
|
if (enable)
|
|
|
|
reg |= SDHCI_CLOCK_CARD_EN;
|
|
|
|
else
|
|
|
|
reg &= ~SDHCI_CLOCK_CARD_EN;
|
|
|
|
|
|
|
|
sdhci_writew(host, reg, SDHCI_CLOCK_CONTROL);
|
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tegra210_sdhci_writew(struct sdhci_host *host, u16 val, int reg)
|
|
|
|
{
|
|
|
|
bool is_tuning_cmd = 0;
|
|
|
|
bool clk_enabled;
|
|
|
|
u8 cmd;
|
|
|
|
|
|
|
|
if (reg == SDHCI_COMMAND) {
|
|
|
|
cmd = SDHCI_GET_CMD(val);
|
|
|
|
is_tuning_cmd = cmd == MMC_SEND_TUNING_BLOCK ||
|
|
|
|
cmd == MMC_SEND_TUNING_BLOCK_HS200;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (is_tuning_cmd)
|
|
|
|
clk_enabled = tegra_sdhci_configure_card_clk(host, 0);
|
|
|
|
|
|
|
|
writew(val, host->ioaddr + reg);
|
|
|
|
|
|
|
|
if (is_tuning_cmd) {
|
|
|
|
udelay(1);
|
2019-03-24 12:45:20 +08:00
|
|
|
sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
|
2018-08-30 23:06:26 +08:00
|
|
|
tegra_sdhci_configure_card_clk(host, clk_enabled);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-08-09 06:24:30 +08:00
|
|
|
static unsigned int tegra_sdhci_get_ro(struct sdhci_host *host)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Write-enable shall be assumed if GPIO is missing in a board's
|
|
|
|
* device-tree because SDHCI's WRITE_PROTECT bit doesn't work on
|
|
|
|
* Tegra.
|
|
|
|
*/
|
|
|
|
return mmc_gpio_get_ro(host->mmc);
|
|
|
|
}
|
|
|
|
|
2018-08-30 23:06:12 +08:00
|
|
|
static bool tegra_sdhci_is_pad_and_regulator_valid(struct sdhci_host *host)
|
|
|
|
{
|
|
|
|
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
|
|
|
struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
|
|
|
|
int has_1v8, has_3v3;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The SoCs which have NVQUIRK_NEEDS_PAD_CONTROL require software pad
|
|
|
|
* voltage configuration in order to perform voltage switching. This
|
|
|
|
* means that valid pinctrl info is required on SDHCI instances capable
|
|
|
|
* of performing voltage switching. Whether or not an SDHCI instance is
|
|
|
|
* capable of voltage switching is determined based on the regulator.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (!(tegra_host->soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
if (IS_ERR(host->mmc->supply.vqmmc))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
has_1v8 = regulator_is_supported_voltage(host->mmc->supply.vqmmc,
|
|
|
|
1700000, 1950000);
|
|
|
|
|
|
|
|
has_3v3 = regulator_is_supported_voltage(host->mmc->supply.vqmmc,
|
|
|
|
2700000, 3600000);
|
|
|
|
|
|
|
|
if (has_1v8 == 1 && has_3v3 == 1)
|
|
|
|
return tegra_host->pad_control_available;
|
|
|
|
|
|
|
|
/* Fixed voltage, no pad control required. */
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-08-30 23:06:22 +08:00
|
|
|
static void tegra_sdhci_set_tap(struct sdhci_host *host, unsigned int tap)
|
|
|
|
{
|
|
|
|
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
|
|
|
struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
|
|
|
|
const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
|
|
|
|
bool card_clk_enabled = false;
|
|
|
|
u32 reg;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Touching the tap values is a bit tricky on some SoC generations.
|
|
|
|
* The quirk enables a workaround for a glitch that sometimes occurs if
|
|
|
|
* the tap values are changed.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (soc_data->nvquirks & NVQUIRK_DIS_CARD_CLK_CONFIG_TAP)
|
|
|
|
card_clk_enabled = tegra_sdhci_configure_card_clk(host, false);
|
|
|
|
|
|
|
|
reg = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
|
|
|
|
reg &= ~SDHCI_CLOCK_CTRL_TAP_MASK;
|
|
|
|
reg |= tap << SDHCI_CLOCK_CTRL_TAP_SHIFT;
|
|
|
|
sdhci_writel(host, reg, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
|
|
|
|
|
|
|
|
if (soc_data->nvquirks & NVQUIRK_DIS_CARD_CLK_CONFIG_TAP &&
|
|
|
|
card_clk_enabled) {
|
|
|
|
udelay(1);
|
|
|
|
sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
|
|
|
|
tegra_sdhci_configure_card_clk(host, card_clk_enabled);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-11 02:14:00 +08:00
|
|
|
static void tegra_sdhci_hs400_enhanced_strobe(struct mmc_host *mmc,
|
|
|
|
struct mmc_ios *ios)
|
|
|
|
{
|
|
|
|
struct sdhci_host *host = mmc_priv(mmc);
|
|
|
|
u32 val;
|
|
|
|
|
|
|
|
val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL);
|
|
|
|
|
|
|
|
if (ios->enhanced_strobe)
|
|
|
|
val |= SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE;
|
|
|
|
else
|
|
|
|
val &= ~SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE;
|
|
|
|
|
|
|
|
sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2014-04-25 19:57:12 +08:00
|
|
|
static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask)
|
2012-04-18 21:18:02 +08:00
|
|
|
{
|
|
|
|
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
2016-02-16 21:08:29 +08:00
|
|
|
struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
|
2012-04-18 21:18:02 +08:00
|
|
|
const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
|
2018-08-30 23:06:14 +08:00
|
|
|
u32 misc_ctrl, clk_ctrl, pad_ctrl;
|
2012-04-18 21:18:02 +08:00
|
|
|
|
2014-04-25 19:57:12 +08:00
|
|
|
sdhci_reset(host, mask);
|
|
|
|
|
2012-04-18 21:18:02 +08:00
|
|
|
if (!(mask & SDHCI_RESET_ALL))
|
|
|
|
return;
|
|
|
|
|
2018-08-30 23:06:22 +08:00
|
|
|
tegra_sdhci_set_tap(host, tegra_host->default_tap);
|
|
|
|
|
2015-12-23 02:41:04 +08:00
|
|
|
misc_ctrl = sdhci_readl(host, SDHCI_TEGRA_VENDOR_MISC_CTRL);
|
2015-12-23 02:41:01 +08:00
|
|
|
clk_ctrl = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
|
2016-07-12 21:53:37 +08:00
|
|
|
|
|
|
|
misc_ctrl &= ~(SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300 |
|
|
|
|
SDHCI_MISC_CTRL_ENABLE_SDR50 |
|
|
|
|
SDHCI_MISC_CTRL_ENABLE_DDR50 |
|
|
|
|
SDHCI_MISC_CTRL_ENABLE_SDR104);
|
|
|
|
|
2018-08-30 23:06:23 +08:00
|
|
|
clk_ctrl &= ~(SDHCI_CLOCK_CTRL_TRIM_MASK |
|
|
|
|
SDHCI_CLOCK_CTRL_SPI_MODE_CLKEN_OVERRIDE);
|
2016-07-12 21:53:37 +08:00
|
|
|
|
2018-08-30 23:06:12 +08:00
|
|
|
if (tegra_sdhci_is_pad_and_regulator_valid(host)) {
|
2016-07-12 21:53:37 +08:00
|
|
|
/* Erratum: Enable SDHCI spec v3.00 support */
|
|
|
|
if (soc_data->nvquirks & NVQUIRK_ENABLE_SDHCI_SPEC_300)
|
|
|
|
misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300;
|
|
|
|
/* Advertise UHS modes as supported by host */
|
|
|
|
if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50)
|
|
|
|
misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR50;
|
|
|
|
if (soc_data->nvquirks & NVQUIRK_ENABLE_DDR50)
|
|
|
|
misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_DDR50;
|
|
|
|
if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR104)
|
|
|
|
misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR104;
|
2020-01-07 17:47:34 +08:00
|
|
|
if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50)
|
2016-07-12 21:53:37 +08:00
|
|
|
clk_ctrl |= SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE;
|
|
|
|
}
|
|
|
|
|
2018-08-30 23:06:23 +08:00
|
|
|
clk_ctrl |= tegra_host->default_trim << SDHCI_CLOCK_CTRL_TRIM_SHIFT;
|
|
|
|
|
2016-07-12 21:53:37 +08:00
|
|
|
sdhci_writel(host, misc_ctrl, SDHCI_TEGRA_VENDOR_MISC_CTRL);
|
2015-12-23 02:41:01 +08:00
|
|
|
sdhci_writel(host, clk_ctrl, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
|
|
|
|
|
2018-08-30 23:06:14 +08:00
|
|
|
if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB) {
|
|
|
|
pad_ctrl = sdhci_readl(host, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
|
|
|
|
pad_ctrl &= ~SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_MASK;
|
|
|
|
pad_ctrl |= SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_VAL;
|
|
|
|
sdhci_writel(host, pad_ctrl, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
|
|
|
|
|
2016-03-01 04:56:25 +08:00
|
|
|
tegra_host->pad_calib_required = true;
|
2018-08-30 23:06:14 +08:00
|
|
|
}
|
2016-03-01 04:56:25 +08:00
|
|
|
|
2015-12-23 02:41:00 +08:00
|
|
|
tegra_host->ddr_signaling = false;
|
2012-04-18 21:18:02 +08:00
|
|
|
}
|
|
|
|
|
2018-08-30 23:06:15 +08:00
|
|
|
static void tegra_sdhci_configure_cal_pad(struct sdhci_host *host, bool enable)
|
|
|
|
{
|
|
|
|
u32 val;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Enable or disable the additional I/O pad used by the drive strength
|
|
|
|
* calibration process.
|
|
|
|
*/
|
|
|
|
val = sdhci_readl(host, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
|
|
|
|
|
|
|
|
if (enable)
|
|
|
|
val |= SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD;
|
|
|
|
else
|
|
|
|
val &= ~SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD;
|
|
|
|
|
|
|
|
sdhci_writel(host, val, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
|
|
|
|
|
|
|
|
if (enable)
|
|
|
|
usleep_range(1, 2);
|
|
|
|
}
|
|
|
|
|
2018-08-30 23:06:17 +08:00
|
|
|
static void tegra_sdhci_set_pad_autocal_offset(struct sdhci_host *host,
|
|
|
|
u16 pdpu)
|
|
|
|
{
|
|
|
|
u32 reg;
|
|
|
|
|
|
|
|
reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG);
|
|
|
|
reg &= ~SDHCI_AUTO_CAL_PDPU_OFFSET_MASK;
|
|
|
|
reg |= pdpu;
|
|
|
|
sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG);
|
|
|
|
}
|
|
|
|
|
2019-01-11 06:46:03 +08:00
|
|
|
static int tegra_sdhci_set_padctrl(struct sdhci_host *host, int voltage,
|
|
|
|
bool state_drvupdn)
|
|
|
|
{
|
|
|
|
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
|
|
|
struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
|
|
|
|
struct sdhci_tegra_autocal_offsets *offsets =
|
|
|
|
&tegra_host->autocal_offsets;
|
|
|
|
struct pinctrl_state *pinctrl_drvupdn = NULL;
|
|
|
|
int ret = 0;
|
|
|
|
u8 drvup = 0, drvdn = 0;
|
|
|
|
u32 reg;
|
|
|
|
|
|
|
|
if (!state_drvupdn) {
|
|
|
|
/* PADS Drive Strength */
|
|
|
|
if (voltage == MMC_SIGNAL_VOLTAGE_180) {
|
|
|
|
if (tegra_host->pinctrl_state_1v8_drv) {
|
|
|
|
pinctrl_drvupdn =
|
|
|
|
tegra_host->pinctrl_state_1v8_drv;
|
|
|
|
} else {
|
|
|
|
drvup = offsets->pull_up_1v8_timeout;
|
|
|
|
drvdn = offsets->pull_down_1v8_timeout;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (tegra_host->pinctrl_state_3v3_drv) {
|
|
|
|
pinctrl_drvupdn =
|
|
|
|
tegra_host->pinctrl_state_3v3_drv;
|
|
|
|
} else {
|
|
|
|
drvup = offsets->pull_up_3v3_timeout;
|
|
|
|
drvdn = offsets->pull_down_3v3_timeout;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pinctrl_drvupdn != NULL) {
|
|
|
|
ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
|
|
|
|
pinctrl_drvupdn);
|
|
|
|
if (ret < 0)
|
|
|
|
dev_err(mmc_dev(host->mmc),
|
|
|
|
"failed pads drvupdn, ret: %d\n", ret);
|
|
|
|
} else if ((drvup) || (drvdn)) {
|
|
|
|
reg = sdhci_readl(host,
|
|
|
|
SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
|
|
|
|
reg &= ~SDHCI_COMP_PADCTRL_DRVUPDN_OFFSET_MASK;
|
|
|
|
reg |= (drvup << 20) | (drvdn << 12);
|
|
|
|
sdhci_writel(host, reg,
|
|
|
|
SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
|
|
|
|
}
|
|
|
|
|
|
|
|
} else {
|
|
|
|
/* Dual Voltage PADS Voltage selection */
|
|
|
|
if (!tegra_host->pad_control_available)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (voltage == MMC_SIGNAL_VOLTAGE_180) {
|
|
|
|
ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
|
|
|
|
tegra_host->pinctrl_state_1v8);
|
|
|
|
if (ret < 0)
|
|
|
|
dev_err(mmc_dev(host->mmc),
|
|
|
|
"setting 1.8V failed, ret: %d\n", ret);
|
|
|
|
} else {
|
|
|
|
ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
|
|
|
|
tegra_host->pinctrl_state_3v3);
|
|
|
|
if (ret < 0)
|
|
|
|
dev_err(mmc_dev(host->mmc),
|
|
|
|
"setting 3.3V failed, ret: %d\n", ret);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-03-01 04:56:25 +08:00
|
|
|
static void tegra_sdhci_pad_autocalib(struct sdhci_host *host)
|
|
|
|
{
|
2018-08-30 23:06:17 +08:00
|
|
|
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
|
|
|
struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
|
|
|
|
struct sdhci_tegra_autocal_offsets offsets =
|
|
|
|
tegra_host->autocal_offsets;
|
|
|
|
struct mmc_ios *ios = &host->mmc->ios;
|
2018-08-30 23:06:16 +08:00
|
|
|
bool card_clk_enabled;
|
2018-08-30 23:06:17 +08:00
|
|
|
u16 pdpu;
|
2018-08-30 23:06:13 +08:00
|
|
|
u32 reg;
|
|
|
|
int ret;
|
|
|
|
|
2018-08-30 23:06:17 +08:00
|
|
|
switch (ios->timing) {
|
|
|
|
case MMC_TIMING_UHS_SDR104:
|
|
|
|
pdpu = offsets.pull_down_sdr104 << 8 | offsets.pull_up_sdr104;
|
|
|
|
break;
|
|
|
|
case MMC_TIMING_MMC_HS400:
|
|
|
|
pdpu = offsets.pull_down_hs400 << 8 | offsets.pull_up_hs400;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180)
|
|
|
|
pdpu = offsets.pull_down_1v8 << 8 | offsets.pull_up_1v8;
|
|
|
|
else
|
|
|
|
pdpu = offsets.pull_down_3v3 << 8 | offsets.pull_up_3v3;
|
|
|
|
}
|
|
|
|
|
2019-01-11 06:46:03 +08:00
|
|
|
/* Set initial offset before auto-calibration */
|
2018-08-30 23:06:17 +08:00
|
|
|
tegra_sdhci_set_pad_autocal_offset(host, pdpu);
|
|
|
|
|
2018-08-30 23:06:16 +08:00
|
|
|
card_clk_enabled = tegra_sdhci_configure_card_clk(host, false);
|
|
|
|
|
2018-08-30 23:06:15 +08:00
|
|
|
tegra_sdhci_configure_cal_pad(host, true);
|
|
|
|
|
2018-08-30 23:06:13 +08:00
|
|
|
reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG);
|
|
|
|
reg |= SDHCI_AUTO_CAL_ENABLE | SDHCI_AUTO_CAL_START;
|
|
|
|
sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG);
|
2016-03-01 04:56:25 +08:00
|
|
|
|
2018-08-30 23:06:13 +08:00
|
|
|
usleep_range(1, 2);
|
|
|
|
/* 10 ms timeout */
|
|
|
|
ret = readl_poll_timeout(host->ioaddr + SDHCI_TEGRA_AUTO_CAL_STATUS,
|
|
|
|
reg, !(reg & SDHCI_TEGRA_AUTO_CAL_ACTIVE),
|
|
|
|
1000, 10000);
|
2016-03-01 04:56:25 +08:00
|
|
|
|
2018-08-30 23:06:15 +08:00
|
|
|
tegra_sdhci_configure_cal_pad(host, false);
|
|
|
|
|
2018-08-30 23:06:16 +08:00
|
|
|
tegra_sdhci_configure_card_clk(host, card_clk_enabled);
|
|
|
|
|
2018-08-30 23:06:17 +08:00
|
|
|
if (ret) {
|
2018-08-30 23:06:13 +08:00
|
|
|
dev_err(mmc_dev(host->mmc), "Pad autocal timed out\n");
|
2018-08-30 23:06:17 +08:00
|
|
|
|
2019-01-11 06:46:03 +08:00
|
|
|
/* Disable automatic cal and use fixed Drive Strengths */
|
2018-08-30 23:06:17 +08:00
|
|
|
reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG);
|
|
|
|
reg &= ~SDHCI_AUTO_CAL_ENABLE;
|
|
|
|
sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG);
|
|
|
|
|
2019-01-11 06:46:03 +08:00
|
|
|
ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, false);
|
|
|
|
if (ret < 0)
|
|
|
|
dev_err(mmc_dev(host->mmc),
|
|
|
|
"Setting drive strengths failed: %d\n", ret);
|
2018-08-30 23:06:17 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tegra_sdhci_parse_pad_autocal_dt(struct sdhci_host *host)
|
|
|
|
{
|
|
|
|
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
|
|
|
struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
|
|
|
|
struct sdhci_tegra_autocal_offsets *autocal =
|
|
|
|
&tegra_host->autocal_offsets;
|
|
|
|
int err;
|
|
|
|
|
2021-03-24 15:50:13 +08:00
|
|
|
err = device_property_read_u32(mmc_dev(host->mmc),
|
2018-08-30 23:06:17 +08:00
|
|
|
"nvidia,pad-autocal-pull-up-offset-3v3",
|
|
|
|
&autocal->pull_up_3v3);
|
|
|
|
if (err)
|
|
|
|
autocal->pull_up_3v3 = 0;
|
|
|
|
|
2021-03-24 15:50:13 +08:00
|
|
|
err = device_property_read_u32(mmc_dev(host->mmc),
|
2018-08-30 23:06:17 +08:00
|
|
|
"nvidia,pad-autocal-pull-down-offset-3v3",
|
|
|
|
&autocal->pull_down_3v3);
|
|
|
|
if (err)
|
|
|
|
autocal->pull_down_3v3 = 0;
|
|
|
|
|
2021-03-24 15:50:13 +08:00
|
|
|
err = device_property_read_u32(mmc_dev(host->mmc),
|
2018-08-30 23:06:17 +08:00
|
|
|
"nvidia,pad-autocal-pull-up-offset-1v8",
|
|
|
|
&autocal->pull_up_1v8);
|
|
|
|
if (err)
|
|
|
|
autocal->pull_up_1v8 = 0;
|
|
|
|
|
2021-03-24 15:50:13 +08:00
|
|
|
err = device_property_read_u32(mmc_dev(host->mmc),
|
2018-08-30 23:06:17 +08:00
|
|
|
"nvidia,pad-autocal-pull-down-offset-1v8",
|
|
|
|
&autocal->pull_down_1v8);
|
|
|
|
if (err)
|
|
|
|
autocal->pull_down_1v8 = 0;
|
|
|
|
|
2021-03-24 15:50:13 +08:00
|
|
|
err = device_property_read_u32(mmc_dev(host->mmc),
|
2020-05-21 04:08:57 +08:00
|
|
|
"nvidia,pad-autocal-pull-up-offset-sdr104",
|
|
|
|
&autocal->pull_up_sdr104);
|
|
|
|
if (err)
|
|
|
|
autocal->pull_up_sdr104 = autocal->pull_up_1v8;
|
|
|
|
|
2021-03-24 15:50:13 +08:00
|
|
|
err = device_property_read_u32(mmc_dev(host->mmc),
|
2020-05-21 04:08:57 +08:00
|
|
|
"nvidia,pad-autocal-pull-down-offset-sdr104",
|
|
|
|
&autocal->pull_down_sdr104);
|
|
|
|
if (err)
|
|
|
|
autocal->pull_down_sdr104 = autocal->pull_down_1v8;
|
|
|
|
|
2021-03-24 15:50:13 +08:00
|
|
|
err = device_property_read_u32(mmc_dev(host->mmc),
|
2020-05-21 04:08:57 +08:00
|
|
|
"nvidia,pad-autocal-pull-up-offset-hs400",
|
|
|
|
&autocal->pull_up_hs400);
|
|
|
|
if (err)
|
|
|
|
autocal->pull_up_hs400 = autocal->pull_up_1v8;
|
|
|
|
|
2021-03-24 15:50:13 +08:00
|
|
|
err = device_property_read_u32(mmc_dev(host->mmc),
|
2020-05-21 04:08:57 +08:00
|
|
|
"nvidia,pad-autocal-pull-down-offset-hs400",
|
|
|
|
&autocal->pull_down_hs400);
|
|
|
|
if (err)
|
|
|
|
autocal->pull_down_hs400 = autocal->pull_down_1v8;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Different fail-safe drive strength values based on the signaling
|
|
|
|
* voltage are applicable for SoCs supporting 3V3 and 1V8 pad controls.
|
|
|
|
* So, avoid reading below device tree properties for SoCs that don't
|
|
|
|
* have NVQUIRK_NEEDS_PAD_CONTROL.
|
|
|
|
*/
|
|
|
|
if (!(tegra_host->soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL))
|
|
|
|
return;
|
|
|
|
|
2021-03-24 15:50:13 +08:00
|
|
|
err = device_property_read_u32(mmc_dev(host->mmc),
|
2018-08-30 23:06:17 +08:00
|
|
|
"nvidia,pad-autocal-pull-up-offset-3v3-timeout",
|
2018-12-14 04:25:43 +08:00
|
|
|
&autocal->pull_up_3v3_timeout);
|
2019-01-11 06:46:03 +08:00
|
|
|
if (err) {
|
|
|
|
if (!IS_ERR(tegra_host->pinctrl_state_3v3) &&
|
|
|
|
(tegra_host->pinctrl_state_3v3_drv == NULL))
|
|
|
|
pr_warn("%s: Missing autocal timeout 3v3-pad drvs\n",
|
|
|
|
mmc_hostname(host->mmc));
|
2018-08-30 23:06:17 +08:00
|
|
|
autocal->pull_up_3v3_timeout = 0;
|
2019-01-11 06:46:03 +08:00
|
|
|
}
|
2018-08-30 23:06:17 +08:00
|
|
|
|
2021-03-24 15:50:13 +08:00
|
|
|
err = device_property_read_u32(mmc_dev(host->mmc),
|
2018-08-30 23:06:17 +08:00
|
|
|
"nvidia,pad-autocal-pull-down-offset-3v3-timeout",
|
2018-12-14 04:25:43 +08:00
|
|
|
&autocal->pull_down_3v3_timeout);
|
2019-01-11 06:46:03 +08:00
|
|
|
if (err) {
|
|
|
|
if (!IS_ERR(tegra_host->pinctrl_state_3v3) &&
|
|
|
|
(tegra_host->pinctrl_state_3v3_drv == NULL))
|
|
|
|
pr_warn("%s: Missing autocal timeout 3v3-pad drvs\n",
|
|
|
|
mmc_hostname(host->mmc));
|
2018-08-30 23:06:17 +08:00
|
|
|
autocal->pull_down_3v3_timeout = 0;
|
2019-01-11 06:46:03 +08:00
|
|
|
}
|
2018-08-30 23:06:17 +08:00
|
|
|
|
2021-03-24 15:50:13 +08:00
|
|
|
err = device_property_read_u32(mmc_dev(host->mmc),
|
2018-08-30 23:06:17 +08:00
|
|
|
"nvidia,pad-autocal-pull-up-offset-1v8-timeout",
|
2018-12-14 04:25:43 +08:00
|
|
|
&autocal->pull_up_1v8_timeout);
|
2019-01-11 06:46:03 +08:00
|
|
|
if (err) {
|
|
|
|
if (!IS_ERR(tegra_host->pinctrl_state_1v8) &&
|
|
|
|
(tegra_host->pinctrl_state_1v8_drv == NULL))
|
|
|
|
pr_warn("%s: Missing autocal timeout 1v8-pad drvs\n",
|
|
|
|
mmc_hostname(host->mmc));
|
2018-08-30 23:06:17 +08:00
|
|
|
autocal->pull_up_1v8_timeout = 0;
|
2019-01-11 06:46:03 +08:00
|
|
|
}
|
2018-08-30 23:06:17 +08:00
|
|
|
|
2021-03-24 15:50:13 +08:00
|
|
|
err = device_property_read_u32(mmc_dev(host->mmc),
|
2018-08-30 23:06:17 +08:00
|
|
|
"nvidia,pad-autocal-pull-down-offset-1v8-timeout",
|
2018-12-14 04:25:43 +08:00
|
|
|
&autocal->pull_down_1v8_timeout);
|
2019-01-11 06:46:03 +08:00
|
|
|
if (err) {
|
|
|
|
if (!IS_ERR(tegra_host->pinctrl_state_1v8) &&
|
|
|
|
(tegra_host->pinctrl_state_1v8_drv == NULL))
|
|
|
|
pr_warn("%s: Missing autocal timeout 1v8-pad drvs\n",
|
|
|
|
mmc_hostname(host->mmc));
|
2018-08-30 23:06:17 +08:00
|
|
|
autocal->pull_down_1v8_timeout = 0;
|
2019-01-11 06:46:03 +08:00
|
|
|
}
|
2016-03-01 04:56:25 +08:00
|
|
|
}
|
|
|
|
|
2018-08-20 17:23:33 +08:00
|
|
|
static void tegra_sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
|
|
|
|
{
|
|
|
|
struct sdhci_host *host = mmc_priv(mmc);
|
|
|
|
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
|
|
|
struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
|
|
|
|
ktime_t since_calib = ktime_sub(ktime_get(), tegra_host->last_calib);
|
|
|
|
|
|
|
|
/* 100 ms calibration interval is specified in the TRM */
|
|
|
|
if (ktime_to_ms(since_calib) > 100) {
|
|
|
|
tegra_sdhci_pad_autocalib(host);
|
|
|
|
tegra_host->last_calib = ktime_get();
|
|
|
|
}
|
|
|
|
|
|
|
|
sdhci_request(mmc, mrq);
|
|
|
|
}
|
|
|
|
|
2018-08-11 02:13:59 +08:00
|
|
|
static void tegra_sdhci_parse_tap_and_trim(struct sdhci_host *host)
|
2018-08-30 23:06:21 +08:00
|
|
|
{
|
|
|
|
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
|
|
|
struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
|
|
|
|
int err;
|
|
|
|
|
2021-03-24 15:50:13 +08:00
|
|
|
err = device_property_read_u32(mmc_dev(host->mmc), "nvidia,default-tap",
|
2018-08-30 23:06:21 +08:00
|
|
|
&tegra_host->default_tap);
|
|
|
|
if (err)
|
|
|
|
tegra_host->default_tap = 0;
|
|
|
|
|
2021-03-24 15:50:13 +08:00
|
|
|
err = device_property_read_u32(mmc_dev(host->mmc), "nvidia,default-trim",
|
2018-08-30 23:06:21 +08:00
|
|
|
&tegra_host->default_trim);
|
|
|
|
if (err)
|
|
|
|
tegra_host->default_trim = 0;
|
2018-08-11 02:13:59 +08:00
|
|
|
|
2021-03-24 15:50:13 +08:00
|
|
|
err = device_property_read_u32(mmc_dev(host->mmc), "nvidia,dqs-trim",
|
2018-08-11 02:13:59 +08:00
|
|
|
&tegra_host->dqs_trim);
|
|
|
|
if (err)
|
|
|
|
tegra_host->dqs_trim = 0x11;
|
2018-08-30 23:06:21 +08:00
|
|
|
}
|
|
|
|
|
2019-01-24 03:30:54 +08:00
|
|
|
static void tegra_sdhci_parse_dt(struct sdhci_host *host)
|
|
|
|
{
|
|
|
|
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
|
|
|
struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
|
|
|
|
|
2021-03-24 15:50:13 +08:00
|
|
|
if (device_property_read_bool(mmc_dev(host->mmc), "supports-cqe"))
|
2019-01-24 03:30:54 +08:00
|
|
|
tegra_host->enable_hwcq = true;
|
|
|
|
else
|
|
|
|
tegra_host->enable_hwcq = false;
|
|
|
|
|
|
|
|
tegra_sdhci_parse_pad_autocal_dt(host);
|
|
|
|
tegra_sdhci_parse_tap_and_trim(host);
|
|
|
|
}
|
|
|
|
|
2015-12-23 02:41:00 +08:00
|
|
|
static void tegra_sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
|
|
|
|
{
|
|
|
|
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
2016-02-16 21:08:29 +08:00
|
|
|
struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
|
2015-12-23 02:41:00 +08:00
|
|
|
unsigned long host_clk;
|
|
|
|
|
|
|
|
if (!clock)
|
2016-03-01 04:56:24 +08:00
|
|
|
return sdhci_set_clock(host, clock);
|
2015-12-23 02:41:00 +08:00
|
|
|
|
2018-07-16 22:34:29 +08:00
|
|
|
/*
|
|
|
|
* In DDR50/52 modes the Tegra SDHCI controllers require the SDHCI
|
|
|
|
* divider to be configured to divided the host clock by two. The SDHCI
|
|
|
|
* clock divider is calculated as part of sdhci_set_clock() by
|
|
|
|
* sdhci_calc_clk(). The divider is calculated from host->max_clk and
|
|
|
|
* the requested clock rate.
|
|
|
|
*
|
|
|
|
* By setting the host->max_clk to clock * 2 the divider calculation
|
|
|
|
* will always result in the correct value for DDR50/52 modes,
|
|
|
|
* regardless of clock rate rounding, which may happen if the value
|
|
|
|
* from clk_get_rate() is used.
|
|
|
|
*/
|
2015-12-23 02:41:00 +08:00
|
|
|
host_clk = tegra_host->ddr_signaling ? clock * 2 : clock;
|
|
|
|
clk_set_rate(pltfm_host->clk, host_clk);
|
2019-03-24 12:45:20 +08:00
|
|
|
tegra_host->curr_clk_rate = host_clk;
|
2018-07-16 22:34:29 +08:00
|
|
|
if (tegra_host->ddr_signaling)
|
|
|
|
host->max_clk = host_clk;
|
|
|
|
else
|
|
|
|
host->max_clk = clk_get_rate(pltfm_host->clk);
|
2015-12-23 02:41:00 +08:00
|
|
|
|
2016-03-01 04:56:25 +08:00
|
|
|
sdhci_set_clock(host, clock);
|
|
|
|
|
|
|
|
if (tegra_host->pad_calib_required) {
|
|
|
|
tegra_sdhci_pad_autocalib(host);
|
|
|
|
tegra_host->pad_calib_required = false;
|
|
|
|
}
|
2015-12-23 02:41:00 +08:00
|
|
|
}
|
|
|
|
|
2018-07-13 21:17:45 +08:00
|
|
|
static unsigned int tegra_sdhci_get_max_clock(struct sdhci_host *host)
|
|
|
|
{
|
|
|
|
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
|
|
|
|
|
|
|
return clk_round_rate(pltfm_host->clk, UINT_MAX);
|
|
|
|
}
|
|
|
|
|
2018-08-11 02:13:59 +08:00
|
|
|
static void tegra_sdhci_set_dqs_trim(struct sdhci_host *host, u8 trim)
|
|
|
|
{
|
|
|
|
u32 val;
|
|
|
|
|
|
|
|
val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CAP_OVERRIDES);
|
|
|
|
val &= ~SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_MASK;
|
|
|
|
val |= trim << SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_SHIFT;
|
|
|
|
sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_CAP_OVERRIDES);
|
|
|
|
}
|
|
|
|
|
2018-08-11 02:14:01 +08:00
|
|
|
static void tegra_sdhci_hs400_dll_cal(struct sdhci_host *host)
|
|
|
|
{
|
|
|
|
u32 reg;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
reg = sdhci_readl(host, SDHCI_TEGRA_VENDOR_DLLCAL_CFG);
|
|
|
|
reg |= SDHCI_TEGRA_DLLCAL_CALIBRATE;
|
|
|
|
sdhci_writel(host, reg, SDHCI_TEGRA_VENDOR_DLLCAL_CFG);
|
|
|
|
|
|
|
|
/* 1 ms sleep, 5 ms timeout */
|
|
|
|
err = readl_poll_timeout(host->ioaddr + SDHCI_TEGRA_VENDOR_DLLCAL_STA,
|
|
|
|
reg, !(reg & SDHCI_TEGRA_DLLCAL_STA_ACTIVE),
|
|
|
|
1000, 5000);
|
|
|
|
if (err)
|
|
|
|
dev_err(mmc_dev(host->mmc),
|
|
|
|
"HS400 delay line calibration timed out\n");
|
|
|
|
}
|
|
|
|
|
2019-03-24 12:45:20 +08:00
|
|
|
static void tegra_sdhci_tap_correction(struct sdhci_host *host, u8 thd_up,
|
|
|
|
u8 thd_low, u8 fixed_tap)
|
|
|
|
{
|
|
|
|
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
|
|
|
struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
|
|
|
|
u32 val, tun_status;
|
|
|
|
u8 word, bit, edge1, tap, window;
|
|
|
|
bool tap_result;
|
|
|
|
bool start_fail = false;
|
|
|
|
bool start_pass = false;
|
|
|
|
bool end_pass = false;
|
|
|
|
bool first_fail = false;
|
|
|
|
bool first_pass = false;
|
|
|
|
u8 start_pass_tap = 0;
|
|
|
|
u8 end_pass_tap = 0;
|
|
|
|
u8 first_fail_tap = 0;
|
|
|
|
u8 first_pass_tap = 0;
|
|
|
|
u8 total_tuning_words = host->tuning_loop_count / TUNING_WORD_BIT_SIZE;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Read auto-tuned results and extract good valid passing window by
|
|
|
|
* filtering out un-wanted bubble/partial/merged windows.
|
|
|
|
*/
|
|
|
|
for (word = 0; word < total_tuning_words; word++) {
|
|
|
|
val = sdhci_readl(host, SDHCI_VNDR_TUN_CTRL0_0);
|
|
|
|
val &= ~SDHCI_VNDR_TUN_CTRL0_TUN_WORD_SEL_MASK;
|
|
|
|
val |= word;
|
|
|
|
sdhci_writel(host, val, SDHCI_VNDR_TUN_CTRL0_0);
|
|
|
|
tun_status = sdhci_readl(host, SDHCI_TEGRA_VNDR_TUN_STATUS0);
|
|
|
|
bit = 0;
|
|
|
|
while (bit < TUNING_WORD_BIT_SIZE) {
|
|
|
|
tap = word * TUNING_WORD_BIT_SIZE + bit;
|
|
|
|
tap_result = tun_status & (1 << bit);
|
|
|
|
if (!tap_result && !start_fail) {
|
|
|
|
start_fail = true;
|
|
|
|
if (!first_fail) {
|
|
|
|
first_fail_tap = tap;
|
|
|
|
first_fail = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
} else if (tap_result && start_fail && !start_pass) {
|
|
|
|
start_pass_tap = tap;
|
|
|
|
start_pass = true;
|
|
|
|
if (!first_pass) {
|
|
|
|
first_pass_tap = tap;
|
|
|
|
first_pass = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
} else if (!tap_result && start_fail && start_pass &&
|
|
|
|
!end_pass) {
|
|
|
|
end_pass_tap = tap - 1;
|
|
|
|
end_pass = true;
|
|
|
|
} else if (tap_result && start_pass && start_fail &&
|
|
|
|
end_pass) {
|
|
|
|
window = end_pass_tap - start_pass_tap;
|
|
|
|
/* discard merged window and bubble window */
|
|
|
|
if (window >= thd_up || window < thd_low) {
|
|
|
|
start_pass_tap = tap;
|
|
|
|
end_pass = false;
|
|
|
|
} else {
|
|
|
|
/* set tap at middle of valid window */
|
|
|
|
tap = start_pass_tap + window / 2;
|
|
|
|
tegra_host->tuned_tap_delay = tap;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bit++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!first_fail) {
|
2019-05-15 17:35:14 +08:00
|
|
|
WARN(1, "no edge detected, continue with hw tuned delay.\n");
|
2019-03-24 12:45:20 +08:00
|
|
|
} else if (first_pass) {
|
|
|
|
/* set tap location at fixed tap relative to the first edge */
|
|
|
|
edge1 = first_fail_tap + (first_pass_tap - first_fail_tap) / 2;
|
|
|
|
if (edge1 - 1 > fixed_tap)
|
|
|
|
tegra_host->tuned_tap_delay = edge1 - fixed_tap;
|
|
|
|
else
|
|
|
|
tegra_host->tuned_tap_delay = edge1 + fixed_tap;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tegra_sdhci_post_tuning(struct sdhci_host *host)
|
|
|
|
{
|
|
|
|
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
|
|
|
struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
|
|
|
|
const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
|
|
|
|
u32 avg_tap_dly, val, min_tap_dly, max_tap_dly;
|
|
|
|
u8 fixed_tap, start_tap, end_tap, window_width;
|
|
|
|
u8 thdupper, thdlower;
|
|
|
|
u8 num_iter;
|
|
|
|
u32 clk_rate_mhz, period_ps, bestcase, worstcase;
|
|
|
|
|
|
|
|
/* retain HW tuned tap to use incase if no correction is needed */
|
|
|
|
val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
|
|
|
|
tegra_host->tuned_tap_delay = (val & SDHCI_CLOCK_CTRL_TAP_MASK) >>
|
|
|
|
SDHCI_CLOCK_CTRL_TAP_SHIFT;
|
|
|
|
if (soc_data->min_tap_delay && soc_data->max_tap_delay) {
|
|
|
|
min_tap_dly = soc_data->min_tap_delay;
|
|
|
|
max_tap_dly = soc_data->max_tap_delay;
|
|
|
|
clk_rate_mhz = tegra_host->curr_clk_rate / USEC_PER_SEC;
|
|
|
|
period_ps = USEC_PER_SEC / clk_rate_mhz;
|
|
|
|
bestcase = period_ps / min_tap_dly;
|
|
|
|
worstcase = period_ps / max_tap_dly;
|
|
|
|
/*
|
|
|
|
* Upper and Lower bound thresholds used to detect merged and
|
|
|
|
* bubble windows
|
|
|
|
*/
|
|
|
|
thdupper = (2 * worstcase + bestcase) / 2;
|
|
|
|
thdlower = worstcase / 4;
|
|
|
|
/*
|
|
|
|
* fixed tap is used when HW tuning result contains single edge
|
|
|
|
* and tap is set at fixed tap delay relative to the first edge
|
|
|
|
*/
|
|
|
|
avg_tap_dly = (period_ps * 2) / (min_tap_dly + max_tap_dly);
|
|
|
|
fixed_tap = avg_tap_dly / 2;
|
|
|
|
|
|
|
|
val = sdhci_readl(host, SDHCI_TEGRA_VNDR_TUN_STATUS1);
|
|
|
|
start_tap = val & SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK;
|
|
|
|
end_tap = (val >> SDHCI_TEGRA_VNDR_TUN_STATUS1_END_TAP_SHIFT) &
|
|
|
|
SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK;
|
|
|
|
window_width = end_tap - start_tap;
|
|
|
|
num_iter = host->tuning_loop_count;
|
|
|
|
/*
|
|
|
|
* partial window includes edges of the tuning range.
|
|
|
|
* merged window includes more taps so window width is higher
|
|
|
|
* than upper threshold.
|
|
|
|
*/
|
|
|
|
if (start_tap == 0 || (end_tap == (num_iter - 1)) ||
|
|
|
|
(end_tap == num_iter - 2) || window_width >= thdupper) {
|
|
|
|
pr_debug("%s: Apply tuning correction\n",
|
|
|
|
mmc_hostname(host->mmc));
|
|
|
|
tegra_sdhci_tap_correction(host, thdupper, thdlower,
|
|
|
|
fixed_tap);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
tegra_sdhci_set_tap(host, tegra_host->tuned_tap_delay);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int tegra_sdhci_execute_hw_tuning(struct mmc_host *mmc, u32 opcode)
|
|
|
|
{
|
|
|
|
struct sdhci_host *host = mmc_priv(mmc);
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = sdhci_execute_tuning(mmc, opcode);
|
|
|
|
if (!err && !host->tuning_err)
|
|
|
|
tegra_sdhci_post_tuning(host);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-08-30 23:06:22 +08:00
|
|
|
static void tegra_sdhci_set_uhs_signaling(struct sdhci_host *host,
|
|
|
|
unsigned timing)
|
2015-12-23 02:41:02 +08:00
|
|
|
{
|
2018-08-30 23:06:20 +08:00
|
|
|
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
|
|
|
struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
|
2018-08-30 23:06:22 +08:00
|
|
|
bool set_default_tap = false;
|
2018-08-11 02:13:59 +08:00
|
|
|
bool set_dqs_trim = false;
|
2018-08-11 02:14:01 +08:00
|
|
|
bool do_hs400_dll_cal = false;
|
2019-03-24 12:45:20 +08:00
|
|
|
u8 iter = TRIES_256;
|
|
|
|
u32 val;
|
2015-12-23 02:41:02 +08:00
|
|
|
|
2019-03-24 12:45:18 +08:00
|
|
|
tegra_host->ddr_signaling = false;
|
2018-08-30 23:06:22 +08:00
|
|
|
switch (timing) {
|
|
|
|
case MMC_TIMING_UHS_SDR50:
|
2019-03-24 12:45:20 +08:00
|
|
|
break;
|
2018-08-30 23:06:22 +08:00
|
|
|
case MMC_TIMING_UHS_SDR104:
|
|
|
|
case MMC_TIMING_MMC_HS200:
|
|
|
|
/* Don't set default tap on tunable modes. */
|
2019-03-24 12:45:20 +08:00
|
|
|
iter = TRIES_128;
|
2018-08-30 23:06:22 +08:00
|
|
|
break;
|
2018-08-11 02:13:59 +08:00
|
|
|
case MMC_TIMING_MMC_HS400:
|
|
|
|
set_dqs_trim = true;
|
2018-08-11 02:14:01 +08:00
|
|
|
do_hs400_dll_cal = true;
|
2019-03-24 12:45:20 +08:00
|
|
|
iter = TRIES_128;
|
2018-08-11 02:13:59 +08:00
|
|
|
break;
|
2018-08-30 23:06:22 +08:00
|
|
|
case MMC_TIMING_MMC_DDR52:
|
|
|
|
case MMC_TIMING_UHS_DDR50:
|
|
|
|
tegra_host->ddr_signaling = true;
|
|
|
|
set_default_tap = true;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
set_default_tap = true;
|
|
|
|
break;
|
|
|
|
}
|
2018-08-30 23:06:20 +08:00
|
|
|
|
2019-03-24 12:45:20 +08:00
|
|
|
val = sdhci_readl(host, SDHCI_VNDR_TUN_CTRL0_0);
|
|
|
|
val &= ~(SDHCI_VNDR_TUN_CTRL0_TUN_ITER_MASK |
|
|
|
|
SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_MASK |
|
|
|
|
SDHCI_VNDR_TUN_CTRL0_MUL_M_MASK);
|
|
|
|
val |= (iter << SDHCI_VNDR_TUN_CTRL0_TUN_ITER_SHIFT |
|
|
|
|
0 << SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_SHIFT |
|
|
|
|
1 << SDHCI_VNDR_TUN_CTRL0_MUL_M_SHIFT);
|
|
|
|
sdhci_writel(host, val, SDHCI_VNDR_TUN_CTRL0_0);
|
|
|
|
sdhci_writel(host, 0, SDHCI_TEGRA_VNDR_TUN_CTRL1_0);
|
|
|
|
|
|
|
|
host->tuning_loop_count = (iter == TRIES_128) ? 128 : 256;
|
|
|
|
|
2018-08-30 23:06:22 +08:00
|
|
|
sdhci_set_uhs_signaling(host, timing);
|
2018-08-30 23:06:20 +08:00
|
|
|
|
2018-08-30 23:06:22 +08:00
|
|
|
tegra_sdhci_pad_autocalib(host);
|
2018-08-30 23:06:20 +08:00
|
|
|
|
2019-03-24 12:45:20 +08:00
|
|
|
if (tegra_host->tuned_tap_delay && !set_default_tap)
|
|
|
|
tegra_sdhci_set_tap(host, tegra_host->tuned_tap_delay);
|
|
|
|
else
|
2018-08-30 23:06:22 +08:00
|
|
|
tegra_sdhci_set_tap(host, tegra_host->default_tap);
|
2018-08-11 02:13:59 +08:00
|
|
|
|
|
|
|
if (set_dqs_trim)
|
|
|
|
tegra_sdhci_set_dqs_trim(host, tegra_host->dqs_trim);
|
2018-08-11 02:14:01 +08:00
|
|
|
|
|
|
|
if (do_hs400_dll_cal)
|
|
|
|
tegra_sdhci_hs400_dll_cal(host);
|
2015-12-23 02:41:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int tegra_sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
|
|
|
|
{
|
|
|
|
unsigned int min, max;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Start search for minimum tap value at 10, as smaller values are
|
|
|
|
* may wrongly be reported as working but fail at higher speeds,
|
|
|
|
* according to the TRM.
|
|
|
|
*/
|
|
|
|
min = 10;
|
|
|
|
while (min < 255) {
|
|
|
|
tegra_sdhci_set_tap(host, min);
|
|
|
|
if (!mmc_send_tuning(host->mmc, opcode, NULL))
|
|
|
|
break;
|
|
|
|
min++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Find the maximum tap value that still passes. */
|
|
|
|
max = min + 1;
|
|
|
|
while (max < 255) {
|
|
|
|
tegra_sdhci_set_tap(host, max);
|
|
|
|
if (mmc_send_tuning(host->mmc, opcode, NULL)) {
|
|
|
|
max--;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
max++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* The TRM states the ideal tap value is at 75% in the passing range. */
|
|
|
|
tegra_sdhci_set_tap(host, min + ((max - min) * 3 / 4));
|
|
|
|
|
|
|
|
return mmc_send_tuning(host->mmc, opcode, NULL);
|
|
|
|
}
|
|
|
|
|
2018-08-30 23:06:12 +08:00
|
|
|
static int sdhci_tegra_start_signal_voltage_switch(struct mmc_host *mmc,
|
|
|
|
struct mmc_ios *ios)
|
|
|
|
{
|
|
|
|
struct sdhci_host *host = mmc_priv(mmc);
|
2018-08-30 23:06:18 +08:00
|
|
|
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
|
|
|
struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
|
2018-08-30 23:06:12 +08:00
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
|
2019-01-11 06:46:03 +08:00
|
|
|
ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, true);
|
2018-08-30 23:06:12 +08:00
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
ret = sdhci_start_signal_voltage_switch(mmc, ios);
|
|
|
|
} else if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) {
|
|
|
|
ret = sdhci_start_signal_voltage_switch(mmc, ios);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
2019-01-11 06:46:03 +08:00
|
|
|
ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, true);
|
2018-08-30 23:06:12 +08:00
|
|
|
}
|
|
|
|
|
2018-08-30 23:06:18 +08:00
|
|
|
if (tegra_host->pad_calib_required)
|
|
|
|
tegra_sdhci_pad_autocalib(host);
|
|
|
|
|
2018-08-30 23:06:12 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int tegra_sdhci_init_pinctrl_info(struct device *dev,
|
|
|
|
struct sdhci_tegra *tegra_host)
|
|
|
|
{
|
|
|
|
tegra_host->pinctrl_sdmmc = devm_pinctrl_get(dev);
|
|
|
|
if (IS_ERR(tegra_host->pinctrl_sdmmc)) {
|
|
|
|
dev_dbg(dev, "No pinctrl info, err: %ld\n",
|
|
|
|
PTR_ERR(tegra_host->pinctrl_sdmmc));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2019-01-11 06:46:03 +08:00
|
|
|
tegra_host->pinctrl_state_1v8_drv = pinctrl_lookup_state(
|
|
|
|
tegra_host->pinctrl_sdmmc, "sdmmc-1v8-drv");
|
|
|
|
if (IS_ERR(tegra_host->pinctrl_state_1v8_drv)) {
|
|
|
|
if (PTR_ERR(tegra_host->pinctrl_state_1v8_drv) == -ENODEV)
|
|
|
|
tegra_host->pinctrl_state_1v8_drv = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
tegra_host->pinctrl_state_3v3_drv = pinctrl_lookup_state(
|
|
|
|
tegra_host->pinctrl_sdmmc, "sdmmc-3v3-drv");
|
|
|
|
if (IS_ERR(tegra_host->pinctrl_state_3v3_drv)) {
|
|
|
|
if (PTR_ERR(tegra_host->pinctrl_state_3v3_drv) == -ENODEV)
|
|
|
|
tegra_host->pinctrl_state_3v3_drv = NULL;
|
|
|
|
}
|
|
|
|
|
2018-08-30 23:06:12 +08:00
|
|
|
tegra_host->pinctrl_state_3v3 =
|
|
|
|
pinctrl_lookup_state(tegra_host->pinctrl_sdmmc, "sdmmc-3v3");
|
|
|
|
if (IS_ERR(tegra_host->pinctrl_state_3v3)) {
|
|
|
|
dev_warn(dev, "Missing 3.3V pad state, err: %ld\n",
|
|
|
|
PTR_ERR(tegra_host->pinctrl_state_3v3));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
tegra_host->pinctrl_state_1v8 =
|
|
|
|
pinctrl_lookup_state(tegra_host->pinctrl_sdmmc, "sdmmc-1v8");
|
|
|
|
if (IS_ERR(tegra_host->pinctrl_state_1v8)) {
|
|
|
|
dev_warn(dev, "Missing 1.8V pad state, err: %ld\n",
|
2018-09-04 10:59:09 +08:00
|
|
|
PTR_ERR(tegra_host->pinctrl_state_1v8));
|
2018-08-30 23:06:12 +08:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
tegra_host->pad_control_available = true;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-03-01 04:56:25 +08:00
|
|
|
static void tegra_sdhci_voltage_switch(struct sdhci_host *host)
|
|
|
|
{
|
|
|
|
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
|
|
|
struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
|
|
|
|
const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
|
|
|
|
|
|
|
|
if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB)
|
|
|
|
tegra_host->pad_calib_required = true;
|
|
|
|
}
|
|
|
|
|
2019-03-24 12:45:26 +08:00
|
|
|
static void tegra_cqhci_writel(struct cqhci_host *cq_host, u32 val, int reg)
|
|
|
|
{
|
|
|
|
struct mmc_host *mmc = cq_host->mmc;
|
2021-04-07 17:46:17 +08:00
|
|
|
struct sdhci_host *host = mmc_priv(mmc);
|
2019-03-24 12:45:26 +08:00
|
|
|
u8 ctrl;
|
|
|
|
ktime_t timeout;
|
|
|
|
bool timed_out;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* During CQE resume/unhalt, CQHCI driver unhalts CQE prior to
|
|
|
|
* cqhci_host_ops enable where SDHCI DMA and BLOCK_SIZE registers need
|
|
|
|
* to be re-configured.
|
|
|
|
* Tegra CQHCI/SDHCI prevents write access to block size register when
|
|
|
|
* CQE is unhalted. So handling CQE resume sequence here to configure
|
|
|
|
* SDHCI block registers prior to exiting CQE halt state.
|
|
|
|
*/
|
|
|
|
if (reg == CQHCI_CTL && !(val & CQHCI_HALT) &&
|
|
|
|
cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT) {
|
2021-04-07 17:46:17 +08:00
|
|
|
sdhci_writew(host, SDHCI_TEGRA_CQE_TRNS_MODE, SDHCI_TRANSFER_MODE);
|
2019-03-24 12:45:26 +08:00
|
|
|
sdhci_cqe_enable(mmc);
|
|
|
|
writel(val, cq_host->mmio + reg);
|
|
|
|
timeout = ktime_add_us(ktime_get(), 50);
|
|
|
|
while (1) {
|
|
|
|
timed_out = ktime_compare(ktime_get(), timeout) > 0;
|
|
|
|
ctrl = cqhci_readl(cq_host, CQHCI_CTL);
|
|
|
|
if (!(ctrl & CQHCI_HALT) || timed_out)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* CQE usually resumes very quick, but incase if Tegra CQE
|
|
|
|
* doesn't resume retry unhalt.
|
|
|
|
*/
|
|
|
|
if (timed_out)
|
|
|
|
writel(val, cq_host->mmio + reg);
|
|
|
|
} else {
|
|
|
|
writel(val, cq_host->mmio + reg);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-24 12:45:24 +08:00
|
|
|
static void sdhci_tegra_update_dcmd_desc(struct mmc_host *mmc,
|
|
|
|
struct mmc_request *mrq, u64 *data)
|
|
|
|
{
|
|
|
|
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(mmc_priv(mmc));
|
|
|
|
struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
|
|
|
|
const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
|
|
|
|
|
|
|
|
if (soc_data->nvquirks & NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING &&
|
|
|
|
mrq->cmd->flags & MMC_RSP_R1B)
|
|
|
|
*data |= CQHCI_CMD_TIMING(1);
|
|
|
|
}
|
|
|
|
|
2019-01-24 03:30:54 +08:00
|
|
|
static void sdhci_tegra_cqe_enable(struct mmc_host *mmc)
|
|
|
|
{
|
|
|
|
struct cqhci_host *cq_host = mmc->cqe_private;
|
2021-04-07 17:46:17 +08:00
|
|
|
struct sdhci_host *host = mmc_priv(mmc);
|
2019-03-24 12:45:26 +08:00
|
|
|
u32 val;
|
2019-01-24 03:30:54 +08:00
|
|
|
|
|
|
|
/*
|
2019-03-24 12:45:26 +08:00
|
|
|
* Tegra CQHCI/SDMMC design prevents write access to sdhci block size
|
|
|
|
* register when CQE is enabled and unhalted.
|
|
|
|
* CQHCI driver enables CQE prior to activation, so disable CQE before
|
|
|
|
* programming block size in sdhci controller and enable it back.
|
2019-01-24 03:30:54 +08:00
|
|
|
*/
|
2019-03-24 12:45:26 +08:00
|
|
|
if (!cq_host->activated) {
|
|
|
|
val = cqhci_readl(cq_host, CQHCI_CFG);
|
|
|
|
if (val & CQHCI_ENABLE)
|
|
|
|
cqhci_writel(cq_host, (val & ~CQHCI_ENABLE),
|
|
|
|
CQHCI_CFG);
|
2021-04-07 17:46:17 +08:00
|
|
|
sdhci_writew(host, SDHCI_TEGRA_CQE_TRNS_MODE, SDHCI_TRANSFER_MODE);
|
2019-03-24 12:45:26 +08:00
|
|
|
sdhci_cqe_enable(mmc);
|
|
|
|
if (val & CQHCI_ENABLE)
|
|
|
|
cqhci_writel(cq_host, val, CQHCI_CFG);
|
|
|
|
}
|
2019-01-24 03:30:54 +08:00
|
|
|
|
2019-03-24 12:45:26 +08:00
|
|
|
/*
|
|
|
|
* CMD CRC errors are seen sometimes with some eMMC devices when status
|
|
|
|
* command is sent during transfer of last data block which is the
|
|
|
|
* default case as send status command block counter (CBC) is 1.
|
|
|
|
* Recommended fix to set CBC to 0 allowing send status command only
|
|
|
|
* when data lines are idle.
|
|
|
|
*/
|
|
|
|
val = cqhci_readl(cq_host, CQHCI_SSC1);
|
|
|
|
val &= ~CQHCI_SSC1_CBC_MASK;
|
|
|
|
cqhci_writel(cq_host, val, CQHCI_SSC1);
|
2019-01-24 03:30:54 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void sdhci_tegra_dumpregs(struct mmc_host *mmc)
|
|
|
|
{
|
|
|
|
sdhci_dumpregs(mmc_priv(mmc));
|
|
|
|
}
|
|
|
|
|
|
|
|
static u32 sdhci_tegra_cqhci_irq(struct sdhci_host *host, u32 intmask)
|
|
|
|
{
|
|
|
|
int cmd_error = 0;
|
|
|
|
int data_error = 0;
|
|
|
|
|
|
|
|
if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
|
|
|
|
return intmask;
|
|
|
|
|
|
|
|
cqhci_irq(host->mmc, intmask, cmd_error, data_error);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-03-11 23:47:54 +08:00
|
|
|
static void tegra_sdhci_set_timeout(struct sdhci_host *host,
|
|
|
|
struct mmc_command *cmd)
|
|
|
|
{
|
|
|
|
u32 val;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* HW busy detection timeout is based on programmed data timeout
|
|
|
|
* counter and maximum supported timeout is 11s which may not be
|
|
|
|
* enough for long operations like cache flush, sleep awake, erase.
|
|
|
|
*
|
|
|
|
* ERASE_TIMEOUT_LIMIT bit of VENDOR_MISC_CTRL register allows
|
|
|
|
* host controller to wait for busy state until the card is busy
|
|
|
|
* without HW timeout.
|
|
|
|
*
|
|
|
|
* So, use infinite busy wait mode for operations that may take
|
|
|
|
* more than maximum HW busy timeout of 11s otherwise use finite
|
|
|
|
* busy wait mode.
|
|
|
|
*/
|
|
|
|
val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_MISC_CTRL);
|
2020-11-16 21:22:06 +08:00
|
|
|
if (cmd && cmd->busy_timeout >= 11 * MSEC_PER_SEC)
|
2020-03-11 23:47:54 +08:00
|
|
|
val |= SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT;
|
|
|
|
else
|
|
|
|
val &= ~SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT;
|
|
|
|
sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_MISC_CTRL);
|
|
|
|
|
|
|
|
__sdhci_set_timeout(host, cmd);
|
|
|
|
}
|
|
|
|
|
2021-04-07 17:46:17 +08:00
|
|
|
static void sdhci_tegra_cqe_pre_enable(struct mmc_host *mmc)
|
|
|
|
{
|
|
|
|
struct cqhci_host *cq_host = mmc->cqe_private;
|
|
|
|
u32 reg;
|
|
|
|
|
|
|
|
reg = cqhci_readl(cq_host, CQHCI_CFG);
|
|
|
|
reg |= CQHCI_ENABLE;
|
|
|
|
cqhci_writel(cq_host, reg, CQHCI_CFG);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void sdhci_tegra_cqe_post_disable(struct mmc_host *mmc)
|
|
|
|
{
|
|
|
|
struct cqhci_host *cq_host = mmc->cqe_private;
|
|
|
|
struct sdhci_host *host = mmc_priv(mmc);
|
|
|
|
u32 reg;
|
|
|
|
|
|
|
|
reg = cqhci_readl(cq_host, CQHCI_CFG);
|
|
|
|
reg &= ~CQHCI_ENABLE;
|
|
|
|
cqhci_writel(cq_host, reg, CQHCI_CFG);
|
|
|
|
sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
|
|
|
|
}
|
|
|
|
|
2019-01-24 03:30:54 +08:00
|
|
|
static const struct cqhci_host_ops sdhci_tegra_cqhci_ops = {
|
2019-03-24 12:45:26 +08:00
|
|
|
.write_l = tegra_cqhci_writel,
|
2019-01-24 03:30:54 +08:00
|
|
|
.enable = sdhci_tegra_cqe_enable,
|
|
|
|
.disable = sdhci_cqe_disable,
|
|
|
|
.dumpregs = sdhci_tegra_dumpregs,
|
2019-03-24 12:45:24 +08:00
|
|
|
.update_dcmd_desc = sdhci_tegra_update_dcmd_desc,
|
2021-04-07 17:46:17 +08:00
|
|
|
.pre_enable = sdhci_tegra_cqe_pre_enable,
|
|
|
|
.post_disable = sdhci_tegra_cqe_post_disable,
|
2019-01-24 03:30:54 +08:00
|
|
|
};
|
|
|
|
|
mmc: tegra: Implement ->set_dma_mask()
The SDHCI controller on Tegra186 supports 40-bit addressing, which is
usually enough to address all of system memory. However, if the SDHCI
controller is behind an IOMMU, the address space can go beyond. This
happens on Tegra186 and later where the ARM SMMU has an input address
space of 48 bits. If the DMA API is backed by this ARM SMMU, the top-
down IOVA allocator will cause IOV addresses to be returned that the
SDHCI controller cannot access.
Unfortunately, prior to the introduction of the ->set_dma_mask() host
operation, the SDHCI core would set either a 64-bit DMA mask if the
controller claimed to support 64-bit addressing, or a 32-bit DMA mask
otherwise.
Since the full 64 bits cannot be addressed on Tegra, this had to be
worked around in commit 68481a7e1c84 ("mmc: tegra: Mark 64 bit dma
broken on Tegra186") by setting the SDHCI_QUIRK2_BROKEN_64_BIT_DMA
quirk, which effectively restricts the DMA mask to 32 bits.
One disadvantage of this is that dma_map_*() APIs will now try to use
the swiotlb to bounce DMA to addresses beyond of the controller's DMA
mask. This in turn caused degraded performance and can lead to
situations where the swiotlb buffer is exhausted, which in turn leads
to DMA transfers to fail.
With the recent introduction of the ->set_dma_mask() host operation,
this can now be properly fixed. For each generation of Tegra, the exact
supported DMA mask can be configured. This kills two birds with one
stone: it avoids the use of bounce buffers because system memory never
exceeds the addressable memory range of the SDHCI controllers on these
devices, and at the same time when an IOMMU is involved, it prevents
IOV addresses from being allocated beyond the addressible range of the
controllers.
Since the DMA mask is now properly handled, the 64-bit DMA quirk can be
removed.
Signed-off-by: Nicolin Chen <nicoleotsuka@gmail.com>
[treding@nvidia.com: provide more background in commit message]
Tested-by: Nicolin Chen <nicoleotsuka@gmail.com>
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
Signed-off-by: Thierry Reding <treding@nvidia.com>
Cc: stable@vger.kernel.org # v4.15 +
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
2019-09-23 18:08:10 +08:00
|
|
|
static int tegra_sdhci_set_dma_mask(struct sdhci_host *host)
|
|
|
|
{
|
|
|
|
struct sdhci_pltfm_host *platform = sdhci_priv(host);
|
|
|
|
struct sdhci_tegra *tegra = sdhci_pltfm_priv(platform);
|
|
|
|
const struct sdhci_tegra_soc_data *soc = tegra->soc_data;
|
|
|
|
struct device *dev = mmc_dev(host->mmc);
|
|
|
|
|
|
|
|
if (soc->dma_mask)
|
|
|
|
return dma_set_mask_and_coherent(dev, soc->dma_mask);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-03-14 02:26:05 +08:00
|
|
|
static const struct sdhci_ops tegra_sdhci_ops = {
|
2019-08-09 06:24:30 +08:00
|
|
|
.get_ro = tegra_sdhci_get_ro,
|
2011-05-27 23:48:12 +08:00
|
|
|
.read_w = tegra_sdhci_readw,
|
|
|
|
.write_l = tegra_sdhci_writel,
|
2015-12-23 02:41:00 +08:00
|
|
|
.set_clock = tegra_sdhci_set_clock,
|
mmc: tegra: Implement ->set_dma_mask()
The SDHCI controller on Tegra186 supports 40-bit addressing, which is
usually enough to address all of system memory. However, if the SDHCI
controller is behind an IOMMU, the address space can go beyond. This
happens on Tegra186 and later where the ARM SMMU has an input address
space of 48 bits. If the DMA API is backed by this ARM SMMU, the top-
down IOVA allocator will cause IOV addresses to be returned that the
SDHCI controller cannot access.
Unfortunately, prior to the introduction of the ->set_dma_mask() host
operation, the SDHCI core would set either a 64-bit DMA mask if the
controller claimed to support 64-bit addressing, or a 32-bit DMA mask
otherwise.
Since the full 64 bits cannot be addressed on Tegra, this had to be
worked around in commit 68481a7e1c84 ("mmc: tegra: Mark 64 bit dma
broken on Tegra186") by setting the SDHCI_QUIRK2_BROKEN_64_BIT_DMA
quirk, which effectively restricts the DMA mask to 32 bits.
One disadvantage of this is that dma_map_*() APIs will now try to use
the swiotlb to bounce DMA to addresses beyond of the controller's DMA
mask. This in turn caused degraded performance and can lead to
situations where the swiotlb buffer is exhausted, which in turn leads
to DMA transfers to fail.
With the recent introduction of the ->set_dma_mask() host operation,
this can now be properly fixed. For each generation of Tegra, the exact
supported DMA mask can be configured. This kills two birds with one
stone: it avoids the use of bounce buffers because system memory never
exceeds the addressable memory range of the SDHCI controllers on these
devices, and at the same time when an IOMMU is involved, it prevents
IOV addresses from being allocated beyond the addressible range of the
controllers.
Since the DMA mask is now properly handled, the 64-bit DMA quirk can be
removed.
Signed-off-by: Nicolin Chen <nicoleotsuka@gmail.com>
[treding@nvidia.com: provide more background in commit message]
Tested-by: Nicolin Chen <nicoleotsuka@gmail.com>
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
Signed-off-by: Thierry Reding <treding@nvidia.com>
Cc: stable@vger.kernel.org # v4.15 +
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
2019-09-23 18:08:10 +08:00
|
|
|
.set_dma_mask = tegra_sdhci_set_dma_mask,
|
2017-08-15 04:00:24 +08:00
|
|
|
.set_bus_width = sdhci_set_bus_width,
|
2014-04-25 19:57:12 +08:00
|
|
|
.reset = tegra_sdhci_reset,
|
2015-12-23 02:41:02 +08:00
|
|
|
.platform_execute_tuning = tegra_sdhci_execute_tuning,
|
2015-12-23 02:41:00 +08:00
|
|
|
.set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
|
2016-03-01 04:56:25 +08:00
|
|
|
.voltage_switch = tegra_sdhci_voltage_switch,
|
2018-07-13 21:17:45 +08:00
|
|
|
.get_max_clock = tegra_sdhci_get_max_clock,
|
2011-05-27 23:48:12 +08:00
|
|
|
};
|
|
|
|
|
2013-03-14 02:26:03 +08:00
|
|
|
static const struct sdhci_pltfm_data sdhci_tegra20_pdata = {
|
2012-02-02 07:30:55 +08:00
|
|
|
.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
|
|
|
|
SDHCI_QUIRK_SINGLE_POWER_WRITE |
|
|
|
|
SDHCI_QUIRK_NO_HISPD_BIT |
|
2014-05-22 23:55:36 +08:00
|
|
|
SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
|
|
|
|
SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
|
2012-02-02 07:30:55 +08:00
|
|
|
.ops = &tegra_sdhci_ops,
|
|
|
|
};
|
|
|
|
|
2015-11-16 17:27:14 +08:00
|
|
|
static const struct sdhci_tegra_soc_data soc_data_tegra20 = {
|
2012-02-02 07:30:55 +08:00
|
|
|
.pdata = &sdhci_tegra20_pdata,
|
mmc: tegra: Implement ->set_dma_mask()
The SDHCI controller on Tegra186 supports 40-bit addressing, which is
usually enough to address all of system memory. However, if the SDHCI
controller is behind an IOMMU, the address space can go beyond. This
happens on Tegra186 and later where the ARM SMMU has an input address
space of 48 bits. If the DMA API is backed by this ARM SMMU, the top-
down IOVA allocator will cause IOV addresses to be returned that the
SDHCI controller cannot access.
Unfortunately, prior to the introduction of the ->set_dma_mask() host
operation, the SDHCI core would set either a 64-bit DMA mask if the
controller claimed to support 64-bit addressing, or a 32-bit DMA mask
otherwise.
Since the full 64 bits cannot be addressed on Tegra, this had to be
worked around in commit 68481a7e1c84 ("mmc: tegra: Mark 64 bit dma
broken on Tegra186") by setting the SDHCI_QUIRK2_BROKEN_64_BIT_DMA
quirk, which effectively restricts the DMA mask to 32 bits.
One disadvantage of this is that dma_map_*() APIs will now try to use
the swiotlb to bounce DMA to addresses beyond of the controller's DMA
mask. This in turn caused degraded performance and can lead to
situations where the swiotlb buffer is exhausted, which in turn leads
to DMA transfers to fail.
With the recent introduction of the ->set_dma_mask() host operation,
this can now be properly fixed. For each generation of Tegra, the exact
supported DMA mask can be configured. This kills two birds with one
stone: it avoids the use of bounce buffers because system memory never
exceeds the addressable memory range of the SDHCI controllers on these
devices, and at the same time when an IOMMU is involved, it prevents
IOV addresses from being allocated beyond the addressible range of the
controllers.
Since the DMA mask is now properly handled, the 64-bit DMA quirk can be
removed.
Signed-off-by: Nicolin Chen <nicoleotsuka@gmail.com>
[treding@nvidia.com: provide more background in commit message]
Tested-by: Nicolin Chen <nicoleotsuka@gmail.com>
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
Signed-off-by: Thierry Reding <treding@nvidia.com>
Cc: stable@vger.kernel.org # v4.15 +
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
2019-09-23 18:08:10 +08:00
|
|
|
.dma_mask = DMA_BIT_MASK(32),
|
2012-02-02 07:30:55 +08:00
|
|
|
.nvquirks = NVQUIRK_FORCE_SDHCI_SPEC_200 |
|
|
|
|
NVQUIRK_ENABLE_BLOCK_GAP_DET,
|
|
|
|
};
|
|
|
|
|
2013-03-14 02:26:03 +08:00
|
|
|
static const struct sdhci_pltfm_data sdhci_tegra30_pdata = {
|
2011-05-27 23:48:12 +08:00
|
|
|
.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
|
2012-02-02 07:30:55 +08:00
|
|
|
SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
|
2011-05-27 23:48:12 +08:00
|
|
|
SDHCI_QUIRK_SINGLE_POWER_WRITE |
|
|
|
|
SDHCI_QUIRK_NO_HISPD_BIT |
|
2014-05-22 23:55:36 +08:00
|
|
|
SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
|
|
|
|
SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
|
2018-07-12 15:39:02 +08:00
|
|
|
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
|
2018-07-12 15:39:04 +08:00
|
|
|
SDHCI_QUIRK2_BROKEN_HS200 |
|
|
|
|
/*
|
|
|
|
* Auto-CMD23 leads to "Got command interrupt 0x00010000 even
|
|
|
|
* though no command operation was in progress."
|
|
|
|
*
|
|
|
|
* The exact reason is unknown, as the same hardware seems
|
|
|
|
* to support Auto CMD23 on a downstream 3.1 kernel.
|
|
|
|
*/
|
|
|
|
SDHCI_QUIRK2_ACMD23_BROKEN,
|
2011-05-27 23:48:12 +08:00
|
|
|
.ops = &tegra_sdhci_ops,
|
|
|
|
};
|
2011-01-02 12:52:56 +08:00
|
|
|
|
2015-11-16 17:27:14 +08:00
|
|
|
static const struct sdhci_tegra_soc_data soc_data_tegra30 = {
|
2012-02-02 07:30:55 +08:00
|
|
|
.pdata = &sdhci_tegra30_pdata,
|
mmc: tegra: Implement ->set_dma_mask()
The SDHCI controller on Tegra186 supports 40-bit addressing, which is
usually enough to address all of system memory. However, if the SDHCI
controller is behind an IOMMU, the address space can go beyond. This
happens on Tegra186 and later where the ARM SMMU has an input address
space of 48 bits. If the DMA API is backed by this ARM SMMU, the top-
down IOVA allocator will cause IOV addresses to be returned that the
SDHCI controller cannot access.
Unfortunately, prior to the introduction of the ->set_dma_mask() host
operation, the SDHCI core would set either a 64-bit DMA mask if the
controller claimed to support 64-bit addressing, or a 32-bit DMA mask
otherwise.
Since the full 64 bits cannot be addressed on Tegra, this had to be
worked around in commit 68481a7e1c84 ("mmc: tegra: Mark 64 bit dma
broken on Tegra186") by setting the SDHCI_QUIRK2_BROKEN_64_BIT_DMA
quirk, which effectively restricts the DMA mask to 32 bits.
One disadvantage of this is that dma_map_*() APIs will now try to use
the swiotlb to bounce DMA to addresses beyond of the controller's DMA
mask. This in turn caused degraded performance and can lead to
situations where the swiotlb buffer is exhausted, which in turn leads
to DMA transfers to fail.
With the recent introduction of the ->set_dma_mask() host operation,
this can now be properly fixed. For each generation of Tegra, the exact
supported DMA mask can be configured. This kills two birds with one
stone: it avoids the use of bounce buffers because system memory never
exceeds the addressable memory range of the SDHCI controllers on these
devices, and at the same time when an IOMMU is involved, it prevents
IOV addresses from being allocated beyond the addressible range of the
controllers.
Since the DMA mask is now properly handled, the 64-bit DMA quirk can be
removed.
Signed-off-by: Nicolin Chen <nicoleotsuka@gmail.com>
[treding@nvidia.com: provide more background in commit message]
Tested-by: Nicolin Chen <nicoleotsuka@gmail.com>
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
Signed-off-by: Thierry Reding <treding@nvidia.com>
Cc: stable@vger.kernel.org # v4.15 +
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
2019-09-23 18:08:10 +08:00
|
|
|
.dma_mask = DMA_BIT_MASK(32),
|
2014-05-22 23:55:35 +08:00
|
|
|
.nvquirks = NVQUIRK_ENABLE_SDHCI_SPEC_300 |
|
2015-12-23 02:41:03 +08:00
|
|
|
NVQUIRK_ENABLE_SDR50 |
|
2016-03-01 04:56:25 +08:00
|
|
|
NVQUIRK_ENABLE_SDR104 |
|
|
|
|
NVQUIRK_HAS_PADCALIB,
|
2012-02-02 07:30:55 +08:00
|
|
|
};
|
|
|
|
|
2015-02-12 01:55:51 +08:00
|
|
|
static const struct sdhci_ops tegra114_sdhci_ops = {
|
2019-08-09 06:24:30 +08:00
|
|
|
.get_ro = tegra_sdhci_get_ro,
|
2015-02-12 01:55:51 +08:00
|
|
|
.read_w = tegra_sdhci_readw,
|
|
|
|
.write_w = tegra_sdhci_writew,
|
|
|
|
.write_l = tegra_sdhci_writel,
|
2015-12-23 02:41:00 +08:00
|
|
|
.set_clock = tegra_sdhci_set_clock,
|
mmc: tegra: Implement ->set_dma_mask()
The SDHCI controller on Tegra186 supports 40-bit addressing, which is
usually enough to address all of system memory. However, if the SDHCI
controller is behind an IOMMU, the address space can go beyond. This
happens on Tegra186 and later where the ARM SMMU has an input address
space of 48 bits. If the DMA API is backed by this ARM SMMU, the top-
down IOVA allocator will cause IOV addresses to be returned that the
SDHCI controller cannot access.
Unfortunately, prior to the introduction of the ->set_dma_mask() host
operation, the SDHCI core would set either a 64-bit DMA mask if the
controller claimed to support 64-bit addressing, or a 32-bit DMA mask
otherwise.
Since the full 64 bits cannot be addressed on Tegra, this had to be
worked around in commit 68481a7e1c84 ("mmc: tegra: Mark 64 bit dma
broken on Tegra186") by setting the SDHCI_QUIRK2_BROKEN_64_BIT_DMA
quirk, which effectively restricts the DMA mask to 32 bits.
One disadvantage of this is that dma_map_*() APIs will now try to use
the swiotlb to bounce DMA to addresses beyond of the controller's DMA
mask. This in turn caused degraded performance and can lead to
situations where the swiotlb buffer is exhausted, which in turn leads
to DMA transfers to fail.
With the recent introduction of the ->set_dma_mask() host operation,
this can now be properly fixed. For each generation of Tegra, the exact
supported DMA mask can be configured. This kills two birds with one
stone: it avoids the use of bounce buffers because system memory never
exceeds the addressable memory range of the SDHCI controllers on these
devices, and at the same time when an IOMMU is involved, it prevents
IOV addresses from being allocated beyond the addressible range of the
controllers.
Since the DMA mask is now properly handled, the 64-bit DMA quirk can be
removed.
Signed-off-by: Nicolin Chen <nicoleotsuka@gmail.com>
[treding@nvidia.com: provide more background in commit message]
Tested-by: Nicolin Chen <nicoleotsuka@gmail.com>
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
Signed-off-by: Thierry Reding <treding@nvidia.com>
Cc: stable@vger.kernel.org # v4.15 +
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
2019-09-23 18:08:10 +08:00
|
|
|
.set_dma_mask = tegra_sdhci_set_dma_mask,
|
2017-08-15 04:00:24 +08:00
|
|
|
.set_bus_width = sdhci_set_bus_width,
|
2015-02-12 01:55:51 +08:00
|
|
|
.reset = tegra_sdhci_reset,
|
2015-12-23 02:41:02 +08:00
|
|
|
.platform_execute_tuning = tegra_sdhci_execute_tuning,
|
2015-12-23 02:41:00 +08:00
|
|
|
.set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
|
2016-03-01 04:56:25 +08:00
|
|
|
.voltage_switch = tegra_sdhci_voltage_switch,
|
2018-07-13 21:17:45 +08:00
|
|
|
.get_max_clock = tegra_sdhci_get_max_clock,
|
2015-02-12 01:55:51 +08:00
|
|
|
};
|
|
|
|
|
2013-03-14 02:26:03 +08:00
|
|
|
static const struct sdhci_pltfm_data sdhci_tegra114_pdata = {
|
2013-02-21 02:35:17 +08:00
|
|
|
.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
|
|
|
|
SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
|
|
|
|
SDHCI_QUIRK_SINGLE_POWER_WRITE |
|
|
|
|
SDHCI_QUIRK_NO_HISPD_BIT |
|
2014-05-22 23:55:36 +08:00
|
|
|
SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
|
|
|
|
SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
|
2015-12-23 02:41:00 +08:00
|
|
|
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
|
2015-02-12 01:55:51 +08:00
|
|
|
.ops = &tegra114_sdhci_ops,
|
2013-02-21 02:35:17 +08:00
|
|
|
};
|
|
|
|
|
2015-11-16 17:27:14 +08:00
|
|
|
static const struct sdhci_tegra_soc_data soc_data_tegra114 = {
|
2013-02-21 02:35:17 +08:00
|
|
|
.pdata = &sdhci_tegra114_pdata,
|
mmc: tegra: Implement ->set_dma_mask()
The SDHCI controller on Tegra186 supports 40-bit addressing, which is
usually enough to address all of system memory. However, if the SDHCI
controller is behind an IOMMU, the address space can go beyond. This
happens on Tegra186 and later where the ARM SMMU has an input address
space of 48 bits. If the DMA API is backed by this ARM SMMU, the top-
down IOVA allocator will cause IOV addresses to be returned that the
SDHCI controller cannot access.
Unfortunately, prior to the introduction of the ->set_dma_mask() host
operation, the SDHCI core would set either a 64-bit DMA mask if the
controller claimed to support 64-bit addressing, or a 32-bit DMA mask
otherwise.
Since the full 64 bits cannot be addressed on Tegra, this had to be
worked around in commit 68481a7e1c84 ("mmc: tegra: Mark 64 bit dma
broken on Tegra186") by setting the SDHCI_QUIRK2_BROKEN_64_BIT_DMA
quirk, which effectively restricts the DMA mask to 32 bits.
One disadvantage of this is that dma_map_*() APIs will now try to use
the swiotlb to bounce DMA to addresses beyond of the controller's DMA
mask. This in turn caused degraded performance and can lead to
situations where the swiotlb buffer is exhausted, which in turn leads
to DMA transfers to fail.
With the recent introduction of the ->set_dma_mask() host operation,
this can now be properly fixed. For each generation of Tegra, the exact
supported DMA mask can be configured. This kills two birds with one
stone: it avoids the use of bounce buffers because system memory never
exceeds the addressable memory range of the SDHCI controllers on these
devices, and at the same time when an IOMMU is involved, it prevents
IOV addresses from being allocated beyond the addressible range of the
controllers.
Since the DMA mask is now properly handled, the 64-bit DMA quirk can be
removed.
Signed-off-by: Nicolin Chen <nicoleotsuka@gmail.com>
[treding@nvidia.com: provide more background in commit message]
Tested-by: Nicolin Chen <nicoleotsuka@gmail.com>
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
Signed-off-by: Thierry Reding <treding@nvidia.com>
Cc: stable@vger.kernel.org # v4.15 +
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
2019-09-23 18:08:10 +08:00
|
|
|
.dma_mask = DMA_BIT_MASK(32),
|
2016-02-26 17:34:17 +08:00
|
|
|
};
|
|
|
|
|
2016-09-01 19:46:17 +08:00
|
|
|
static const struct sdhci_pltfm_data sdhci_tegra124_pdata = {
|
|
|
|
.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
|
|
|
|
SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
|
|
|
|
SDHCI_QUIRK_SINGLE_POWER_WRITE |
|
|
|
|
SDHCI_QUIRK_NO_HISPD_BIT |
|
|
|
|
SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
|
|
|
|
SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
|
mmc: tegra: Implement ->set_dma_mask()
The SDHCI controller on Tegra186 supports 40-bit addressing, which is
usually enough to address all of system memory. However, if the SDHCI
controller is behind an IOMMU, the address space can go beyond. This
happens on Tegra186 and later where the ARM SMMU has an input address
space of 48 bits. If the DMA API is backed by this ARM SMMU, the top-
down IOVA allocator will cause IOV addresses to be returned that the
SDHCI controller cannot access.
Unfortunately, prior to the introduction of the ->set_dma_mask() host
operation, the SDHCI core would set either a 64-bit DMA mask if the
controller claimed to support 64-bit addressing, or a 32-bit DMA mask
otherwise.
Since the full 64 bits cannot be addressed on Tegra, this had to be
worked around in commit 68481a7e1c84 ("mmc: tegra: Mark 64 bit dma
broken on Tegra186") by setting the SDHCI_QUIRK2_BROKEN_64_BIT_DMA
quirk, which effectively restricts the DMA mask to 32 bits.
One disadvantage of this is that dma_map_*() APIs will now try to use
the swiotlb to bounce DMA to addresses beyond of the controller's DMA
mask. This in turn caused degraded performance and can lead to
situations where the swiotlb buffer is exhausted, which in turn leads
to DMA transfers to fail.
With the recent introduction of the ->set_dma_mask() host operation,
this can now be properly fixed. For each generation of Tegra, the exact
supported DMA mask can be configured. This kills two birds with one
stone: it avoids the use of bounce buffers because system memory never
exceeds the addressable memory range of the SDHCI controllers on these
devices, and at the same time when an IOMMU is involved, it prevents
IOV addresses from being allocated beyond the addressible range of the
controllers.
Since the DMA mask is now properly handled, the 64-bit DMA quirk can be
removed.
Signed-off-by: Nicolin Chen <nicoleotsuka@gmail.com>
[treding@nvidia.com: provide more background in commit message]
Tested-by: Nicolin Chen <nicoleotsuka@gmail.com>
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
Signed-off-by: Thierry Reding <treding@nvidia.com>
Cc: stable@vger.kernel.org # v4.15 +
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
2019-09-23 18:08:10 +08:00
|
|
|
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
|
2016-09-01 19:46:17 +08:00
|
|
|
.ops = &tegra114_sdhci_ops,
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct sdhci_tegra_soc_data soc_data_tegra124 = {
|
|
|
|
.pdata = &sdhci_tegra124_pdata,
|
mmc: tegra: Implement ->set_dma_mask()
The SDHCI controller on Tegra186 supports 40-bit addressing, which is
usually enough to address all of system memory. However, if the SDHCI
controller is behind an IOMMU, the address space can go beyond. This
happens on Tegra186 and later where the ARM SMMU has an input address
space of 48 bits. If the DMA API is backed by this ARM SMMU, the top-
down IOVA allocator will cause IOV addresses to be returned that the
SDHCI controller cannot access.
Unfortunately, prior to the introduction of the ->set_dma_mask() host
operation, the SDHCI core would set either a 64-bit DMA mask if the
controller claimed to support 64-bit addressing, or a 32-bit DMA mask
otherwise.
Since the full 64 bits cannot be addressed on Tegra, this had to be
worked around in commit 68481a7e1c84 ("mmc: tegra: Mark 64 bit dma
broken on Tegra186") by setting the SDHCI_QUIRK2_BROKEN_64_BIT_DMA
quirk, which effectively restricts the DMA mask to 32 bits.
One disadvantage of this is that dma_map_*() APIs will now try to use
the swiotlb to bounce DMA to addresses beyond of the controller's DMA
mask. This in turn caused degraded performance and can lead to
situations where the swiotlb buffer is exhausted, which in turn leads
to DMA transfers to fail.
With the recent introduction of the ->set_dma_mask() host operation,
this can now be properly fixed. For each generation of Tegra, the exact
supported DMA mask can be configured. This kills two birds with one
stone: it avoids the use of bounce buffers because system memory never
exceeds the addressable memory range of the SDHCI controllers on these
devices, and at the same time when an IOMMU is involved, it prevents
IOV addresses from being allocated beyond the addressible range of the
controllers.
Since the DMA mask is now properly handled, the 64-bit DMA quirk can be
removed.
Signed-off-by: Nicolin Chen <nicoleotsuka@gmail.com>
[treding@nvidia.com: provide more background in commit message]
Tested-by: Nicolin Chen <nicoleotsuka@gmail.com>
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
Signed-off-by: Thierry Reding <treding@nvidia.com>
Cc: stable@vger.kernel.org # v4.15 +
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
2019-09-23 18:08:10 +08:00
|
|
|
.dma_mask = DMA_BIT_MASK(34),
|
2016-09-01 19:46:17 +08:00
|
|
|
};
|
|
|
|
|
2018-08-30 23:06:24 +08:00
|
|
|
static const struct sdhci_ops tegra210_sdhci_ops = {
|
2019-08-09 06:24:30 +08:00
|
|
|
.get_ro = tegra_sdhci_get_ro,
|
2018-08-30 23:06:24 +08:00
|
|
|
.read_w = tegra_sdhci_readw,
|
2018-08-30 23:06:26 +08:00
|
|
|
.write_w = tegra210_sdhci_writew,
|
2018-08-30 23:06:24 +08:00
|
|
|
.write_l = tegra_sdhci_writel,
|
|
|
|
.set_clock = tegra_sdhci_set_clock,
|
mmc: tegra: Implement ->set_dma_mask()
The SDHCI controller on Tegra186 supports 40-bit addressing, which is
usually enough to address all of system memory. However, if the SDHCI
controller is behind an IOMMU, the address space can go beyond. This
happens on Tegra186 and later where the ARM SMMU has an input address
space of 48 bits. If the DMA API is backed by this ARM SMMU, the top-
down IOVA allocator will cause IOV addresses to be returned that the
SDHCI controller cannot access.
Unfortunately, prior to the introduction of the ->set_dma_mask() host
operation, the SDHCI core would set either a 64-bit DMA mask if the
controller claimed to support 64-bit addressing, or a 32-bit DMA mask
otherwise.
Since the full 64 bits cannot be addressed on Tegra, this had to be
worked around in commit 68481a7e1c84 ("mmc: tegra: Mark 64 bit dma
broken on Tegra186") by setting the SDHCI_QUIRK2_BROKEN_64_BIT_DMA
quirk, which effectively restricts the DMA mask to 32 bits.
One disadvantage of this is that dma_map_*() APIs will now try to use
the swiotlb to bounce DMA to addresses beyond of the controller's DMA
mask. This in turn caused degraded performance and can lead to
situations where the swiotlb buffer is exhausted, which in turn leads
to DMA transfers to fail.
With the recent introduction of the ->set_dma_mask() host operation,
this can now be properly fixed. For each generation of Tegra, the exact
supported DMA mask can be configured. This kills two birds with one
stone: it avoids the use of bounce buffers because system memory never
exceeds the addressable memory range of the SDHCI controllers on these
devices, and at the same time when an IOMMU is involved, it prevents
IOV addresses from being allocated beyond the addressible range of the
controllers.
Since the DMA mask is now properly handled, the 64-bit DMA quirk can be
removed.
Signed-off-by: Nicolin Chen <nicoleotsuka@gmail.com>
[treding@nvidia.com: provide more background in commit message]
Tested-by: Nicolin Chen <nicoleotsuka@gmail.com>
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
Signed-off-by: Thierry Reding <treding@nvidia.com>
Cc: stable@vger.kernel.org # v4.15 +
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
2019-09-23 18:08:10 +08:00
|
|
|
.set_dma_mask = tegra_sdhci_set_dma_mask,
|
2018-08-30 23:06:24 +08:00
|
|
|
.set_bus_width = sdhci_set_bus_width,
|
|
|
|
.reset = tegra_sdhci_reset,
|
|
|
|
.set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
|
|
|
|
.voltage_switch = tegra_sdhci_voltage_switch,
|
|
|
|
.get_max_clock = tegra_sdhci_get_max_clock,
|
2020-03-11 23:47:54 +08:00
|
|
|
.set_timeout = tegra_sdhci_set_timeout,
|
2018-08-30 23:06:24 +08:00
|
|
|
};
|
|
|
|
|
2015-11-16 17:27:15 +08:00
|
|
|
static const struct sdhci_pltfm_data sdhci_tegra210_pdata = {
|
|
|
|
.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
|
|
|
|
SDHCI_QUIRK_SINGLE_POWER_WRITE |
|
|
|
|
SDHCI_QUIRK_NO_HISPD_BIT |
|
2015-12-23 02:41:00 +08:00
|
|
|
SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
|
|
|
|
SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
|
|
|
|
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
|
2018-08-30 23:06:24 +08:00
|
|
|
.ops = &tegra210_sdhci_ops,
|
2015-11-16 17:27:15 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static const struct sdhci_tegra_soc_data soc_data_tegra210 = {
|
|
|
|
.pdata = &sdhci_tegra210_pdata,
|
mmc: tegra: Implement ->set_dma_mask()
The SDHCI controller on Tegra186 supports 40-bit addressing, which is
usually enough to address all of system memory. However, if the SDHCI
controller is behind an IOMMU, the address space can go beyond. This
happens on Tegra186 and later where the ARM SMMU has an input address
space of 48 bits. If the DMA API is backed by this ARM SMMU, the top-
down IOVA allocator will cause IOV addresses to be returned that the
SDHCI controller cannot access.
Unfortunately, prior to the introduction of the ->set_dma_mask() host
operation, the SDHCI core would set either a 64-bit DMA mask if the
controller claimed to support 64-bit addressing, or a 32-bit DMA mask
otherwise.
Since the full 64 bits cannot be addressed on Tegra, this had to be
worked around in commit 68481a7e1c84 ("mmc: tegra: Mark 64 bit dma
broken on Tegra186") by setting the SDHCI_QUIRK2_BROKEN_64_BIT_DMA
quirk, which effectively restricts the DMA mask to 32 bits.
One disadvantage of this is that dma_map_*() APIs will now try to use
the swiotlb to bounce DMA to addresses beyond of the controller's DMA
mask. This in turn caused degraded performance and can lead to
situations where the swiotlb buffer is exhausted, which in turn leads
to DMA transfers to fail.
With the recent introduction of the ->set_dma_mask() host operation,
this can now be properly fixed. For each generation of Tegra, the exact
supported DMA mask can be configured. This kills two birds with one
stone: it avoids the use of bounce buffers because system memory never
exceeds the addressable memory range of the SDHCI controllers on these
devices, and at the same time when an IOMMU is involved, it prevents
IOV addresses from being allocated beyond the addressible range of the
controllers.
Since the DMA mask is now properly handled, the 64-bit DMA quirk can be
removed.
Signed-off-by: Nicolin Chen <nicoleotsuka@gmail.com>
[treding@nvidia.com: provide more background in commit message]
Tested-by: Nicolin Chen <nicoleotsuka@gmail.com>
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
Signed-off-by: Thierry Reding <treding@nvidia.com>
Cc: stable@vger.kernel.org # v4.15 +
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
2019-09-23 18:08:10 +08:00
|
|
|
.dma_mask = DMA_BIT_MASK(34),
|
2018-08-30 23:06:19 +08:00
|
|
|
.nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
|
2018-08-30 23:06:20 +08:00
|
|
|
NVQUIRK_HAS_PADCALIB |
|
2018-08-30 23:06:27 +08:00
|
|
|
NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
|
|
|
|
NVQUIRK_ENABLE_SDR50 |
|
2020-08-28 01:21:01 +08:00
|
|
|
NVQUIRK_ENABLE_SDR104 |
|
|
|
|
NVQUIRK_HAS_TMCLK,
|
2019-03-24 12:45:20 +08:00
|
|
|
.min_tap_delay = 106,
|
|
|
|
.max_tap_delay = 185,
|
2015-11-16 17:27:15 +08:00
|
|
|
};
|
|
|
|
|
2018-08-30 23:06:26 +08:00
|
|
|
static const struct sdhci_ops tegra186_sdhci_ops = {
|
2019-08-09 06:24:30 +08:00
|
|
|
.get_ro = tegra_sdhci_get_ro,
|
2018-08-30 23:06:26 +08:00
|
|
|
.read_w = tegra_sdhci_readw,
|
|
|
|
.write_l = tegra_sdhci_writel,
|
|
|
|
.set_clock = tegra_sdhci_set_clock,
|
mmc: tegra: Implement ->set_dma_mask()
The SDHCI controller on Tegra186 supports 40-bit addressing, which is
usually enough to address all of system memory. However, if the SDHCI
controller is behind an IOMMU, the address space can go beyond. This
happens on Tegra186 and later where the ARM SMMU has an input address
space of 48 bits. If the DMA API is backed by this ARM SMMU, the top-
down IOVA allocator will cause IOV addresses to be returned that the
SDHCI controller cannot access.
Unfortunately, prior to the introduction of the ->set_dma_mask() host
operation, the SDHCI core would set either a 64-bit DMA mask if the
controller claimed to support 64-bit addressing, or a 32-bit DMA mask
otherwise.
Since the full 64 bits cannot be addressed on Tegra, this had to be
worked around in commit 68481a7e1c84 ("mmc: tegra: Mark 64 bit dma
broken on Tegra186") by setting the SDHCI_QUIRK2_BROKEN_64_BIT_DMA
quirk, which effectively restricts the DMA mask to 32 bits.
One disadvantage of this is that dma_map_*() APIs will now try to use
the swiotlb to bounce DMA to addresses beyond of the controller's DMA
mask. This in turn caused degraded performance and can lead to
situations where the swiotlb buffer is exhausted, which in turn leads
to DMA transfers to fail.
With the recent introduction of the ->set_dma_mask() host operation,
this can now be properly fixed. For each generation of Tegra, the exact
supported DMA mask can be configured. This kills two birds with one
stone: it avoids the use of bounce buffers because system memory never
exceeds the addressable memory range of the SDHCI controllers on these
devices, and at the same time when an IOMMU is involved, it prevents
IOV addresses from being allocated beyond the addressible range of the
controllers.
Since the DMA mask is now properly handled, the 64-bit DMA quirk can be
removed.
Signed-off-by: Nicolin Chen <nicoleotsuka@gmail.com>
[treding@nvidia.com: provide more background in commit message]
Tested-by: Nicolin Chen <nicoleotsuka@gmail.com>
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
Signed-off-by: Thierry Reding <treding@nvidia.com>
Cc: stable@vger.kernel.org # v4.15 +
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
2019-09-23 18:08:10 +08:00
|
|
|
.set_dma_mask = tegra_sdhci_set_dma_mask,
|
2018-08-30 23:06:26 +08:00
|
|
|
.set_bus_width = sdhci_set_bus_width,
|
|
|
|
.reset = tegra_sdhci_reset,
|
|
|
|
.set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
|
|
|
|
.voltage_switch = tegra_sdhci_voltage_switch,
|
|
|
|
.get_max_clock = tegra_sdhci_get_max_clock,
|
2019-01-24 03:30:54 +08:00
|
|
|
.irq = sdhci_tegra_cqhci_irq,
|
2020-03-11 23:47:54 +08:00
|
|
|
.set_timeout = tegra_sdhci_set_timeout,
|
2018-08-30 23:06:26 +08:00
|
|
|
};
|
|
|
|
|
2017-03-09 03:00:40 +08:00
|
|
|
static const struct sdhci_pltfm_data sdhci_tegra186_pdata = {
|
|
|
|
.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
|
|
|
|
SDHCI_QUIRK_SINGLE_POWER_WRITE |
|
|
|
|
SDHCI_QUIRK_NO_HISPD_BIT |
|
|
|
|
SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
|
|
|
|
SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
|
mmc: tegra: Implement ->set_dma_mask()
The SDHCI controller on Tegra186 supports 40-bit addressing, which is
usually enough to address all of system memory. However, if the SDHCI
controller is behind an IOMMU, the address space can go beyond. This
happens on Tegra186 and later where the ARM SMMU has an input address
space of 48 bits. If the DMA API is backed by this ARM SMMU, the top-
down IOVA allocator will cause IOV addresses to be returned that the
SDHCI controller cannot access.
Unfortunately, prior to the introduction of the ->set_dma_mask() host
operation, the SDHCI core would set either a 64-bit DMA mask if the
controller claimed to support 64-bit addressing, or a 32-bit DMA mask
otherwise.
Since the full 64 bits cannot be addressed on Tegra, this had to be
worked around in commit 68481a7e1c84 ("mmc: tegra: Mark 64 bit dma
broken on Tegra186") by setting the SDHCI_QUIRK2_BROKEN_64_BIT_DMA
quirk, which effectively restricts the DMA mask to 32 bits.
One disadvantage of this is that dma_map_*() APIs will now try to use
the swiotlb to bounce DMA to addresses beyond of the controller's DMA
mask. This in turn caused degraded performance and can lead to
situations where the swiotlb buffer is exhausted, which in turn leads
to DMA transfers to fail.
With the recent introduction of the ->set_dma_mask() host operation,
this can now be properly fixed. For each generation of Tegra, the exact
supported DMA mask can be configured. This kills two birds with one
stone: it avoids the use of bounce buffers because system memory never
exceeds the addressable memory range of the SDHCI controllers on these
devices, and at the same time when an IOMMU is involved, it prevents
IOV addresses from being allocated beyond the addressible range of the
controllers.
Since the DMA mask is now properly handled, the 64-bit DMA quirk can be
removed.
Signed-off-by: Nicolin Chen <nicoleotsuka@gmail.com>
[treding@nvidia.com: provide more background in commit message]
Tested-by: Nicolin Chen <nicoleotsuka@gmail.com>
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
Signed-off-by: Thierry Reding <treding@nvidia.com>
Cc: stable@vger.kernel.org # v4.15 +
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
2019-09-23 18:08:10 +08:00
|
|
|
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
|
2018-08-30 23:06:26 +08:00
|
|
|
.ops = &tegra186_sdhci_ops,
|
2017-03-09 03:00:40 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static const struct sdhci_tegra_soc_data soc_data_tegra186 = {
|
|
|
|
.pdata = &sdhci_tegra186_pdata,
|
mmc: tegra: Implement ->set_dma_mask()
The SDHCI controller on Tegra186 supports 40-bit addressing, which is
usually enough to address all of system memory. However, if the SDHCI
controller is behind an IOMMU, the address space can go beyond. This
happens on Tegra186 and later where the ARM SMMU has an input address
space of 48 bits. If the DMA API is backed by this ARM SMMU, the top-
down IOVA allocator will cause IOV addresses to be returned that the
SDHCI controller cannot access.
Unfortunately, prior to the introduction of the ->set_dma_mask() host
operation, the SDHCI core would set either a 64-bit DMA mask if the
controller claimed to support 64-bit addressing, or a 32-bit DMA mask
otherwise.
Since the full 64 bits cannot be addressed on Tegra, this had to be
worked around in commit 68481a7e1c84 ("mmc: tegra: Mark 64 bit dma
broken on Tegra186") by setting the SDHCI_QUIRK2_BROKEN_64_BIT_DMA
quirk, which effectively restricts the DMA mask to 32 bits.
One disadvantage of this is that dma_map_*() APIs will now try to use
the swiotlb to bounce DMA to addresses beyond of the controller's DMA
mask. This in turn caused degraded performance and can lead to
situations where the swiotlb buffer is exhausted, which in turn leads
to DMA transfers to fail.
With the recent introduction of the ->set_dma_mask() host operation,
this can now be properly fixed. For each generation of Tegra, the exact
supported DMA mask can be configured. This kills two birds with one
stone: it avoids the use of bounce buffers because system memory never
exceeds the addressable memory range of the SDHCI controllers on these
devices, and at the same time when an IOMMU is involved, it prevents
IOV addresses from being allocated beyond the addressible range of the
controllers.
Since the DMA mask is now properly handled, the 64-bit DMA quirk can be
removed.
Signed-off-by: Nicolin Chen <nicoleotsuka@gmail.com>
[treding@nvidia.com: provide more background in commit message]
Tested-by: Nicolin Chen <nicoleotsuka@gmail.com>
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
Signed-off-by: Thierry Reding <treding@nvidia.com>
Cc: stable@vger.kernel.org # v4.15 +
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
2019-09-23 18:08:10 +08:00
|
|
|
.dma_mask = DMA_BIT_MASK(40),
|
2018-08-30 23:06:19 +08:00
|
|
|
.nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
|
2018-08-30 23:06:20 +08:00
|
|
|
NVQUIRK_HAS_PADCALIB |
|
2018-08-30 23:06:28 +08:00
|
|
|
NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
|
|
|
|
NVQUIRK_ENABLE_SDR50 |
|
2019-03-24 12:45:24 +08:00
|
|
|
NVQUIRK_ENABLE_SDR104 |
|
2020-08-28 01:21:01 +08:00
|
|
|
NVQUIRK_HAS_TMCLK |
|
2019-03-24 12:45:24 +08:00
|
|
|
NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING,
|
2019-03-24 12:45:20 +08:00
|
|
|
.min_tap_delay = 84,
|
|
|
|
.max_tap_delay = 136,
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct sdhci_tegra_soc_data soc_data_tegra194 = {
|
|
|
|
.pdata = &sdhci_tegra186_pdata,
|
mmc: tegra: Implement ->set_dma_mask()
The SDHCI controller on Tegra186 supports 40-bit addressing, which is
usually enough to address all of system memory. However, if the SDHCI
controller is behind an IOMMU, the address space can go beyond. This
happens on Tegra186 and later where the ARM SMMU has an input address
space of 48 bits. If the DMA API is backed by this ARM SMMU, the top-
down IOVA allocator will cause IOV addresses to be returned that the
SDHCI controller cannot access.
Unfortunately, prior to the introduction of the ->set_dma_mask() host
operation, the SDHCI core would set either a 64-bit DMA mask if the
controller claimed to support 64-bit addressing, or a 32-bit DMA mask
otherwise.
Since the full 64 bits cannot be addressed on Tegra, this had to be
worked around in commit 68481a7e1c84 ("mmc: tegra: Mark 64 bit dma
broken on Tegra186") by setting the SDHCI_QUIRK2_BROKEN_64_BIT_DMA
quirk, which effectively restricts the DMA mask to 32 bits.
One disadvantage of this is that dma_map_*() APIs will now try to use
the swiotlb to bounce DMA to addresses beyond of the controller's DMA
mask. This in turn caused degraded performance and can lead to
situations where the swiotlb buffer is exhausted, which in turn leads
to DMA transfers to fail.
With the recent introduction of the ->set_dma_mask() host operation,
this can now be properly fixed. For each generation of Tegra, the exact
supported DMA mask can be configured. This kills two birds with one
stone: it avoids the use of bounce buffers because system memory never
exceeds the addressable memory range of the SDHCI controllers on these
devices, and at the same time when an IOMMU is involved, it prevents
IOV addresses from being allocated beyond the addressible range of the
controllers.
Since the DMA mask is now properly handled, the 64-bit DMA quirk can be
removed.
Signed-off-by: Nicolin Chen <nicoleotsuka@gmail.com>
[treding@nvidia.com: provide more background in commit message]
Tested-by: Nicolin Chen <nicoleotsuka@gmail.com>
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
Signed-off-by: Thierry Reding <treding@nvidia.com>
Cc: stable@vger.kernel.org # v4.15 +
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
2019-09-23 18:08:10 +08:00
|
|
|
.dma_mask = DMA_BIT_MASK(39),
|
2019-03-24 12:45:20 +08:00
|
|
|
.nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
|
|
|
|
NVQUIRK_HAS_PADCALIB |
|
|
|
|
NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
|
|
|
|
NVQUIRK_ENABLE_SDR50 |
|
2020-08-28 01:21:01 +08:00
|
|
|
NVQUIRK_ENABLE_SDR104 |
|
|
|
|
NVQUIRK_HAS_TMCLK,
|
2019-03-24 12:45:20 +08:00
|
|
|
.min_tap_delay = 96,
|
|
|
|
.max_tap_delay = 139,
|
2017-03-09 03:00:40 +08:00
|
|
|
};
|
|
|
|
|
2012-11-20 02:24:22 +08:00
|
|
|
static const struct of_device_id sdhci_tegra_dt_match[] = {
|
2019-03-24 12:45:20 +08:00
|
|
|
{ .compatible = "nvidia,tegra194-sdhci", .data = &soc_data_tegra194 },
|
2017-03-09 03:00:40 +08:00
|
|
|
{ .compatible = "nvidia,tegra186-sdhci", .data = &soc_data_tegra186 },
|
2015-11-16 17:27:15 +08:00
|
|
|
{ .compatible = "nvidia,tegra210-sdhci", .data = &soc_data_tegra210 },
|
2016-09-01 19:46:17 +08:00
|
|
|
{ .compatible = "nvidia,tegra124-sdhci", .data = &soc_data_tegra124 },
|
2013-02-21 02:35:17 +08:00
|
|
|
{ .compatible = "nvidia,tegra114-sdhci", .data = &soc_data_tegra114 },
|
2012-02-02 07:30:55 +08:00
|
|
|
{ .compatible = "nvidia,tegra30-sdhci", .data = &soc_data_tegra30 },
|
|
|
|
{ .compatible = "nvidia,tegra20-sdhci", .data = &soc_data_tegra20 },
|
2011-08-24 02:15:33 +08:00
|
|
|
{}
|
|
|
|
};
|
2013-04-24 03:05:57 +08:00
|
|
|
MODULE_DEVICE_TABLE(of, sdhci_tegra_dt_match);
|
2011-08-24 02:15:33 +08:00
|
|
|
|
2019-01-24 03:30:54 +08:00
|
|
|
static int sdhci_tegra_add_host(struct sdhci_host *host)
|
|
|
|
{
|
|
|
|
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
|
|
|
struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
|
|
|
|
struct cqhci_host *cq_host;
|
|
|
|
bool dma64;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!tegra_host->enable_hwcq)
|
|
|
|
return sdhci_add_host(host);
|
|
|
|
|
|
|
|
sdhci_enable_v4_mode(host);
|
|
|
|
|
|
|
|
ret = sdhci_setup_host(host);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD;
|
|
|
|
|
2021-03-24 15:50:13 +08:00
|
|
|
cq_host = devm_kzalloc(mmc_dev(host->mmc),
|
2019-01-24 03:30:54 +08:00
|
|
|
sizeof(*cq_host), GFP_KERNEL);
|
|
|
|
if (!cq_host) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
cq_host->mmio = host->ioaddr + SDHCI_TEGRA_CQE_BASE_ADDR;
|
|
|
|
cq_host->ops = &sdhci_tegra_cqhci_ops;
|
|
|
|
|
|
|
|
dma64 = host->flags & SDHCI_USE_64_BIT_DMA;
|
|
|
|
if (dma64)
|
|
|
|
cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
|
|
|
|
|
|
|
|
ret = cqhci_init(cq_host, host->mmc, dma64);
|
|
|
|
if (ret)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
ret = __sdhci_add_host(host);
|
|
|
|
if (ret)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
sdhci_cleanup_host(host);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-11-20 02:23:06 +08:00
|
|
|
static int sdhci_tegra_probe(struct platform_device *pdev)
|
2011-01-02 12:52:56 +08:00
|
|
|
{
|
2012-02-02 07:30:55 +08:00
|
|
|
const struct of_device_id *match;
|
|
|
|
const struct sdhci_tegra_soc_data *soc_data;
|
|
|
|
struct sdhci_host *host;
|
2011-05-27 23:48:12 +08:00
|
|
|
struct sdhci_pltfm_host *pltfm_host;
|
2012-02-02 07:30:55 +08:00
|
|
|
struct sdhci_tegra *tegra_host;
|
2011-01-02 12:52:56 +08:00
|
|
|
struct clk *clk;
|
|
|
|
int rc;
|
|
|
|
|
2012-02-02 07:30:55 +08:00
|
|
|
match = of_match_device(sdhci_tegra_dt_match, &pdev->dev);
|
2012-08-17 15:04:31 +08:00
|
|
|
if (!match)
|
|
|
|
return -EINVAL;
|
|
|
|
soc_data = match->data;
|
2012-02-02 07:30:55 +08:00
|
|
|
|
2016-02-16 21:08:29 +08:00
|
|
|
host = sdhci_pltfm_init(pdev, soc_data->pdata, sizeof(*tegra_host));
|
2011-05-27 23:48:12 +08:00
|
|
|
if (IS_ERR(host))
|
|
|
|
return PTR_ERR(host);
|
|
|
|
pltfm_host = sdhci_priv(host);
|
|
|
|
|
2016-02-16 21:08:29 +08:00
|
|
|
tegra_host = sdhci_pltfm_priv(pltfm_host);
|
2015-12-23 02:41:00 +08:00
|
|
|
tegra_host->ddr_signaling = false;
|
2016-03-01 04:56:25 +08:00
|
|
|
tegra_host->pad_calib_required = false;
|
2018-08-30 23:06:12 +08:00
|
|
|
tegra_host->pad_control_available = false;
|
2012-02-02 07:30:55 +08:00
|
|
|
tegra_host->soc_data = soc_data;
|
2011-08-24 02:15:33 +08:00
|
|
|
|
2018-08-30 23:06:12 +08:00
|
|
|
if (soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL) {
|
|
|
|
rc = tegra_sdhci_init_pinctrl_info(&pdev->dev, tegra_host);
|
|
|
|
if (rc == 0)
|
|
|
|
host->mmc_host_ops.start_signal_voltage_switch =
|
|
|
|
sdhci_tegra_start_signal_voltage_switch;
|
|
|
|
}
|
|
|
|
|
2018-08-20 17:23:33 +08:00
|
|
|
/* Hook to periodically rerun pad calibration */
|
|
|
|
if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB)
|
|
|
|
host->mmc_host_ops.request = tegra_sdhci_request;
|
|
|
|
|
2018-08-11 02:14:00 +08:00
|
|
|
host->mmc_host_ops.hs400_enhanced_strobe =
|
|
|
|
tegra_sdhci_hs400_enhanced_strobe;
|
|
|
|
|
2019-03-24 12:45:20 +08:00
|
|
|
if (!host->ops->platform_execute_tuning)
|
|
|
|
host->mmc_host_ops.execute_tuning =
|
|
|
|
tegra_sdhci_execute_hw_tuning;
|
|
|
|
|
2015-03-31 05:39:25 +08:00
|
|
|
rc = mmc_of_parse(host->mmc);
|
2013-06-10 04:14:16 +08:00
|
|
|
if (rc)
|
|
|
|
goto err_parse_dt;
|
2013-02-16 06:07:19 +08:00
|
|
|
|
2015-12-23 02:41:03 +08:00
|
|
|
if (tegra_host->soc_data->nvquirks & NVQUIRK_ENABLE_DDR50)
|
2015-12-23 02:41:02 +08:00
|
|
|
host->mmc->caps |= MMC_CAP_1_8V_DDR;
|
|
|
|
|
2020-03-11 23:47:55 +08:00
|
|
|
/* HW busy detection is supported, but R1B responses are required. */
|
|
|
|
host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_NEED_RSP_BUSY;
|
2020-03-10 22:50:11 +08:00
|
|
|
|
2019-01-24 03:30:54 +08:00
|
|
|
tegra_sdhci_parse_dt(host);
|
2018-08-30 23:06:21 +08:00
|
|
|
|
2015-03-31 05:39:25 +08:00
|
|
|
tegra_host->power_gpio = devm_gpiod_get_optional(&pdev->dev, "power",
|
|
|
|
GPIOD_OUT_HIGH);
|
|
|
|
if (IS_ERR(tegra_host->power_gpio)) {
|
|
|
|
rc = PTR_ERR(tegra_host->power_gpio);
|
|
|
|
goto err_power_req;
|
2011-01-02 12:52:56 +08:00
|
|
|
}
|
|
|
|
|
2020-08-28 01:21:01 +08:00
|
|
|
/*
|
|
|
|
* Tegra210 has a separate SDMMC_LEGACY_TM clock used for host
|
|
|
|
* timeout clock and SW can choose TMCLK or SDCLK for hardware
|
|
|
|
* data timeout through the bit USE_TMCLK_FOR_DATA_TIMEOUT of
|
|
|
|
* the register SDHCI_TEGRA_VENDOR_SYS_SW_CTRL.
|
|
|
|
*
|
|
|
|
* USE_TMCLK_FOR_DATA_TIMEOUT bit default is set to 1 and SDMMC uses
|
|
|
|
* 12Mhz TMCLK which is advertised in host capability register.
|
|
|
|
* With TMCLK of 12Mhz provides maximum data timeout period that can
|
|
|
|
* be achieved is 11s better than using SDCLK for data timeout.
|
|
|
|
*
|
|
|
|
* So, TMCLK is set to 12Mhz and kept enabled all the time on SoC's
|
|
|
|
* supporting separate TMCLK.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (soc_data->nvquirks & NVQUIRK_HAS_TMCLK) {
|
|
|
|
clk = devm_clk_get(&pdev->dev, "tmclk");
|
|
|
|
if (IS_ERR(clk)) {
|
|
|
|
rc = PTR_ERR(clk);
|
|
|
|
if (rc == -EPROBE_DEFER)
|
|
|
|
goto err_power_req;
|
|
|
|
|
|
|
|
dev_warn(&pdev->dev, "failed to get tmclk: %d\n", rc);
|
|
|
|
clk = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
clk_set_rate(clk, 12000000);
|
|
|
|
rc = clk_prepare_enable(clk);
|
|
|
|
if (rc) {
|
|
|
|
dev_err(&pdev->dev,
|
|
|
|
"failed to enable tmclk: %d\n", rc);
|
|
|
|
goto err_power_req;
|
|
|
|
}
|
|
|
|
|
|
|
|
tegra_host->tmclk = clk;
|
|
|
|
}
|
|
|
|
|
2015-02-27 15:47:27 +08:00
|
|
|
clk = devm_clk_get(mmc_dev(host->mmc), NULL);
|
2011-01-02 12:52:56 +08:00
|
|
|
if (IS_ERR(clk)) {
|
2020-09-03 03:36:55 +08:00
|
|
|
rc = dev_err_probe(&pdev->dev, PTR_ERR(clk),
|
|
|
|
"failed to get clock\n");
|
2011-05-27 23:48:12 +08:00
|
|
|
goto err_clk_get;
|
2011-01-02 12:52:56 +08:00
|
|
|
}
|
2012-06-05 12:29:37 +08:00
|
|
|
clk_prepare_enable(clk);
|
2011-01-02 12:52:56 +08:00
|
|
|
pltfm_host->clk = clk;
|
|
|
|
|
2017-07-19 23:25:45 +08:00
|
|
|
tegra_host->rst = devm_reset_control_get_exclusive(&pdev->dev,
|
|
|
|
"sdhci");
|
2017-03-09 03:00:39 +08:00
|
|
|
if (IS_ERR(tegra_host->rst)) {
|
|
|
|
rc = PTR_ERR(tegra_host->rst);
|
|
|
|
dev_err(&pdev->dev, "failed to get reset control: %d\n", rc);
|
|
|
|
goto err_rst_get;
|
|
|
|
}
|
|
|
|
|
|
|
|
rc = reset_control_assert(tegra_host->rst);
|
|
|
|
if (rc)
|
|
|
|
goto err_rst_get;
|
|
|
|
|
|
|
|
usleep_range(2000, 4000);
|
|
|
|
|
|
|
|
rc = reset_control_deassert(tegra_host->rst);
|
|
|
|
if (rc)
|
|
|
|
goto err_rst_get;
|
|
|
|
|
|
|
|
usleep_range(2000, 4000);
|
|
|
|
|
2019-01-24 03:30:54 +08:00
|
|
|
rc = sdhci_tegra_add_host(host);
|
2011-05-27 23:48:12 +08:00
|
|
|
if (rc)
|
|
|
|
goto err_add_host;
|
|
|
|
|
2011-01-02 12:52:56 +08:00
|
|
|
return 0;
|
|
|
|
|
2011-05-27 23:48:12 +08:00
|
|
|
err_add_host:
|
2017-03-09 03:00:39 +08:00
|
|
|
reset_control_assert(tegra_host->rst);
|
|
|
|
err_rst_get:
|
2012-06-05 12:29:37 +08:00
|
|
|
clk_disable_unprepare(pltfm_host->clk);
|
2011-05-27 23:48:12 +08:00
|
|
|
err_clk_get:
|
2020-08-28 01:21:01 +08:00
|
|
|
clk_disable_unprepare(tegra_host->tmclk);
|
2011-05-27 23:48:12 +08:00
|
|
|
err_power_req:
|
2013-06-10 04:14:16 +08:00
|
|
|
err_parse_dt:
|
2011-05-27 23:48:12 +08:00
|
|
|
sdhci_pltfm_free(pdev);
|
2011-01-02 12:52:56 +08:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2017-03-09 03:00:39 +08:00
|
|
|
static int sdhci_tegra_remove(struct platform_device *pdev)
|
|
|
|
{
|
|
|
|
struct sdhci_host *host = platform_get_drvdata(pdev);
|
|
|
|
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
|
|
|
struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
|
|
|
|
|
|
|
|
sdhci_remove_host(host, 0);
|
|
|
|
|
|
|
|
reset_control_assert(tegra_host->rst);
|
|
|
|
usleep_range(2000, 4000);
|
|
|
|
clk_disable_unprepare(pltfm_host->clk);
|
2020-08-28 01:21:01 +08:00
|
|
|
clk_disable_unprepare(tegra_host->tmclk);
|
2017-03-09 03:00:39 +08:00
|
|
|
|
|
|
|
sdhci_pltfm_free(pdev);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-04-12 03:18:19 +08:00
|
|
|
#ifdef CONFIG_PM_SLEEP
|
|
|
|
static int __maybe_unused sdhci_tegra_suspend(struct device *dev)
|
|
|
|
{
|
|
|
|
struct sdhci_host *host = dev_get_drvdata(dev);
|
|
|
|
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (host->mmc->caps2 & MMC_CAP2_CQE) {
|
|
|
|
ret = cqhci_suspend(host->mmc);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = sdhci_suspend_host(host);
|
|
|
|
if (ret) {
|
|
|
|
cqhci_resume(host->mmc);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
clk_disable_unprepare(pltfm_host->clk);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int __maybe_unused sdhci_tegra_resume(struct device *dev)
|
|
|
|
{
|
|
|
|
struct sdhci_host *host = dev_get_drvdata(dev);
|
|
|
|
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = clk_prepare_enable(pltfm_host->clk);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
ret = sdhci_resume_host(host);
|
|
|
|
if (ret)
|
|
|
|
goto disable_clk;
|
|
|
|
|
|
|
|
if (host->mmc->caps2 & MMC_CAP2_CQE) {
|
|
|
|
ret = cqhci_resume(host->mmc);
|
|
|
|
if (ret)
|
|
|
|
goto suspend_host;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
suspend_host:
|
|
|
|
sdhci_suspend_host(host);
|
|
|
|
disable_clk:
|
|
|
|
clk_disable_unprepare(pltfm_host->clk);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static SIMPLE_DEV_PM_OPS(sdhci_tegra_dev_pm_ops, sdhci_tegra_suspend,
|
|
|
|
sdhci_tegra_resume);
|
|
|
|
|
2011-05-27 23:48:12 +08:00
|
|
|
static struct platform_driver sdhci_tegra_driver = {
|
|
|
|
.driver = {
|
|
|
|
.name = "sdhci-tegra",
|
2020-09-04 07:24:36 +08:00
|
|
|
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
|
2011-08-24 02:15:33 +08:00
|
|
|
.of_match_table = sdhci_tegra_dt_match,
|
2019-04-12 03:18:19 +08:00
|
|
|
.pm = &sdhci_tegra_dev_pm_ops,
|
2011-05-27 23:48:12 +08:00
|
|
|
},
|
|
|
|
.probe = sdhci_tegra_probe,
|
2017-03-09 03:00:39 +08:00
|
|
|
.remove = sdhci_tegra_remove,
|
2011-01-02 12:52:56 +08:00
|
|
|
};
|
|
|
|
|
2011-11-26 12:55:43 +08:00
|
|
|
module_platform_driver(sdhci_tegra_driver);
|
2011-05-27 23:48:12 +08:00
|
|
|
|
|
|
|
MODULE_DESCRIPTION("SDHCI driver for Tegra");
|
2012-02-02 07:30:55 +08:00
|
|
|
MODULE_AUTHOR("Google, Inc.");
|
2011-05-27 23:48:12 +08:00
|
|
|
MODULE_LICENSE("GPL v2");
|