MMC core:

- Return a proper response in case of an ioctl error
  - Issue HPI to interrupt BKOPS for eMMC if it timed out
  - Avoid hogging the CPU while polling for busy
  - Extend sd8787 pwrseq to support the wilc1000 SDIO
  - Remove a couple of confusing warning messages
  - Clarify comment for ->card_busy() host ops
 
 MMC host:
  - dw_mmc: Add data CRC error injection
  - mmci: De-assert reset during ->probe()
  - rtsx_pci: Fix long reads when clock is pre-scaled
  - sdhci: Correct the tuning command handle for PIO mode
  - sdhci-esdhc-imx: Improve support for auto tuning
  - sdhci-msm: Add support for the sc7280
  - sdhci-of-arasan: Don't auto tune for DDR50 mode for ZynqMP
  - sdhci-of-arasan: Enable support for auto cmd12
  - sdhci-of-arasan: Use 19MHz for SD default speed for ZynqMP for level shifter
  - usdhi6rol0: Implement the ->card_busy() host ops
 
 MEMSTICK:
  - A couple of minor cleanups.
 -----BEGIN PGP SIGNATURE-----
 
 iQJLBAABCgA1FiEEugLDXPmKSktSkQsV/iaEJXNYjCkFAmEsrPsXHHVsZi5oYW5z
 c29uQGxpbmFyby5vcmcACgkQ/iaEJXNYjCnjJA/9G+DDHmJ8zRsKLfLxeyyF3Ovm
 b4Iji5HWG5y3mrPPUQ6/murMo1vshXgBqlnoJKt+VFBSSBK1G0Q3huOho9C1ZeTk
 Hh6iu6lDUlErzqcEcjAu3CNblXkL7pL+5FFsxjfZ6WeB2OD+DzFllbu1p7wMwfZS
 78jgn/LJJnTDsCiDVKMK/Z9nsLXCqzuL4y+fyeOVIgqwcr+hobkA9WCACV5cGCKt
 kosPNdeN5llX7qVPl6q5EYsdgXVtftjCjvx9L36sxhnXkKslf7RAdhECvTYR+OPx
 wqovNhBKpNYFG8M9wIcdfwsE7RQy2h6hVSsjKaaZQLghd80xLd02VeTR3qH9xuJX
 MTyClK0QF9io0sT669umPalYs7eb1ulEAMmv98nNjPHKZQ4FmNNury0VQ57ZJcrj
 glVDUkALKLtlT/7gOHTJP6YWD4trdH4s5jIH3pyCStMHrLpNKvDWa0Jnuq25A9YL
 BHixobWf+8S7LkDEJIqgZa2s7pgYJug5vEDBXNipQoUiSvtgCfngyFkmoN9JdL8l
 6YqJweDclYpvh0Sq/UPPSIaRMYml6P4iBDTK55K2ZgahVG5SeaiY6IxDWIxaQHiE
 6dn7yaNy49BRRfLEFlIhKMxQAJfY5wrXH/arIUf52oxItP76mzNNkf9U/U8SQiZu
 A7F+R6YvmXgPJ9EMJVk=
 =RXpg
 -----END PGP SIGNATURE-----

Merge tag 'mmc-v5.15' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc

Pull MMC and MEMSTICK updates from Ulf Hansson:
 "MMC core:
   - Return a proper response in case of an ioctl error
   - Issue HPI to interrupt BKOPS for eMMC if it timed out
   - Avoid hogging the CPU while polling for busy
   - Extend sd8787 pwrseq to support the wilc1000 SDIO
   - Remove a couple of confusing warning messages
   - Clarify comment for ->card_busy() host ops

  MMC host:
   - dw_mmc: Add data CRC error injection
   - mmci: De-assert reset during ->probe()
   - rtsx_pci: Fix long reads when clock is pre-scaled
   - sdhci: Correct the tuning command handle for PIO mode
   - sdhci-esdhc-imx: Improve support for auto tuning
   - sdhci-msm: Add support for the sc7280
   - sdhci-of-arasan: Don't auto tune for DDR50 mode for ZynqMP
   - sdhci-of-arasan: Enable support for auto cmd12
   - sdhci-of-arasan: Use 19MHz for SD default speed for ZynqMP for level shifter
   - usdhi6rol0: Implement the ->card_busy() host ops

  MEMSTICK:
   - A couple of minor cleanups"

* tag 'mmc-v5.15' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc: (52 commits)
  mmc: queue: Remove unused parameters(request_queue)
  mmc: pwrseq: sd8787: fix compilation warning
  mmc: core: Return correct emmc response in case of ioctl error
  mmc: sdhci-esdhc-imx: Select the correct mode for auto tuning
  mmc: sdhci-esdhc-imx: Remove redundant code for manual tuning
  mmc: core: Issue HPI in case the BKOPS timed out
  mmc: queue: Match the data type of max_segments
  mmc: switch from 'pci_' to 'dma_' API
  memstick: switch from 'pci_' to 'dma_' API
  memstick: r592: Change the name of the 'pci_driver' structure to be consistent
  mmc: pwrseq: add wilc1000_sdio dependency for pwrseq_sd8787
  mmc: pwrseq: sd8787: add support for wilc1000
  dt-bindings: mmc: Extend pwrseq-sd8787 binding for wilc1000
  dt-bindings: mmc: fsl-imx-esdhc: change the pinctrl-names rule
  dt-bindings: mmc: fsl-imx-esdhc: add a new compatible string
  dt-bindings: mmc: renesas,sdhi: Document RZ/G2L bindings
  dt-bindings: mmc: renesas,sdhi: Fix dtbs-check warning
  mmc: core: Update ->card_busy() callback comment
  mmc: usdhi6rol0: Implement card_busy function
  mmc: sdhci: Correct the tuning command handle for PIO mode
  ...
This commit is contained in:
Linus Torvalds 2021-08-31 14:31:10 -07:00
commit 359f3d743f
41 changed files with 651 additions and 378 deletions

View File

@ -29,6 +29,7 @@ properties:
- fsl,imx53-esdhc
- fsl,imx6q-usdhc
- fsl,imx6sl-usdhc
- fsl,imx6sll-usdhc
- fsl,imx6sx-usdhc
- fsl,imx6ull-usdhc
- fsl,imx7d-usdhc
@ -115,12 +116,17 @@ properties:
- const: per
pinctrl-names:
minItems: 1
items:
- const: default
- const: state_100mhz
- const: state_200mhz
- const: sleep
oneOf:
- minItems: 3
items:
- const: default
- const: state_100mhz
- const: state_200mhz
- const: sleep
- minItems: 1
items:
- const: default
- const: sleep
required:
- compatible

View File

@ -11,7 +11,9 @@ maintainers:
properties:
compatible:
const: mmc-pwrseq-sd8787
enum:
- mmc-pwrseq-sd8787
- mmc-pwrseq-wilc1000
powerdown-gpios:
minItems: 1

View File

@ -9,9 +9,6 @@ title: Renesas SDHI SD/MMC controller
maintainers:
- Wolfram Sang <wsa+renesas@sang-engineering.com>
allOf:
- $ref: "mmc-controller.yaml"
properties:
compatible:
oneOf:
@ -47,19 +44,20 @@ properties:
- const: renesas,sdhi-mmc-r8a77470 # RZ/G1C (SDHI/MMC IP)
- items:
- enum:
- renesas,sdhi-r8a774a1 # RZ/G2M
- renesas,sdhi-r8a774b1 # RZ/G2N
- renesas,sdhi-r8a774c0 # RZ/G2E
- renesas,sdhi-r8a774e1 # RZ/G2H
- renesas,sdhi-r8a7795 # R-Car H3
- renesas,sdhi-r8a7796 # R-Car M3-W
- renesas,sdhi-r8a77961 # R-Car M3-W+
- renesas,sdhi-r8a77965 # R-Car M3-N
- renesas,sdhi-r8a77970 # R-Car V3M
- renesas,sdhi-r8a77980 # R-Car V3H
- renesas,sdhi-r8a77990 # R-Car E3
- renesas,sdhi-r8a77995 # R-Car D3
- renesas,sdhi-r8a779a0 # R-Car V3U
- renesas,sdhi-r8a774a1 # RZ/G2M
- renesas,sdhi-r8a774b1 # RZ/G2N
- renesas,sdhi-r8a774c0 # RZ/G2E
- renesas,sdhi-r8a774e1 # RZ/G2H
- renesas,sdhi-r8a7795 # R-Car H3
- renesas,sdhi-r8a7796 # R-Car M3-W
- renesas,sdhi-r8a77961 # R-Car M3-W+
- renesas,sdhi-r8a77965 # R-Car M3-N
- renesas,sdhi-r8a77970 # R-Car V3M
- renesas,sdhi-r8a77980 # R-Car V3H
- renesas,sdhi-r8a77990 # R-Car E3
- renesas,sdhi-r8a77995 # R-Car D3
- renesas,sdhi-r8a779a0 # R-Car V3U
- renesas,sdhi-r9a07g044 # RZ/G2{L,LC}
- const: renesas,rcar-gen3-sdhi # R-Car Gen3 or RZ/G2
reg:
@ -69,15 +67,9 @@ properties:
minItems: 1
maxItems: 3
clocks:
minItems: 1
maxItems: 2
clocks: true
clock-names:
minItems: 1
items:
- const: core
- const: cd
clock-names: true
dmas:
minItems: 4
@ -104,14 +96,82 @@ properties:
pinctrl-1:
maxItems: 1
pinctrl-names:
minItems: 1
items:
- const: default
- const: state_uhs
pinctrl-names: true
max-frequency: true
allOf:
- $ref: "mmc-controller.yaml"
- if:
properties:
compatible:
contains:
const: renesas,sdhi-r9a07g044
then:
properties:
clocks:
items:
- description: IMCLK, SDHI channel main clock1.
- description: IMCLK2, SDHI channel main clock2. When this clock is
turned off, external SD card detection cannot be
detected.
- description: CLK_HS, SDHI channel High speed clock which operates
4 times that of SDHI channel main clock1.
- description: ACLK, SDHI channel bus clock.
clock-names:
items:
- const: imclk
- const: imclk2
- const: clk_hs
- const: aclk
required:
- clock-names
- resets
else:
properties:
clocks:
minItems: 1
maxItems: 2
clock-names:
minItems: 1
items:
- const: core
- const: cd
- if:
properties:
compatible:
contains:
const: renesas,sdhi-mmc-r8a77470
then:
properties:
pinctrl-names:
items:
- const: state_uhs
else:
properties:
pinctrl-names:
minItems: 1
items:
- const: default
- const: state_uhs
- if:
properties:
compatible:
contains:
enum:
- renesas,sdhi-r7s72100
- renesas,sdhi-r7s9210
then:
required:
- clock-names
description:
The internal card detection logic that exists in these controllers is
sectioned off to be run by a separate second clock source to allow
the main core clock to be turned off to save power.
required:
- compatible
- reg
@ -119,21 +179,6 @@ required:
- clocks
- power-domains
if:
properties:
compatible:
contains:
enum:
- renesas,sdhi-r7s72100
- renesas,sdhi-r7s9210
then:
required:
- clock-names
description:
The internal card detection logic that exists in these controllers is
sectioned off to be run by a separate second clock source to allow
the main core clock to be turned off to save power.
unevaluatedProperties: false
examples:

View File

@ -19,6 +19,7 @@ Required properties:
"qcom,msm8996-sdhci", "qcom,sdhci-msm-v4"
"qcom,qcs404-sdhci", "qcom,sdhci-msm-v5"
"qcom,sc7180-sdhci", "qcom,sdhci-msm-v5";
"qcom,sc7280-sdhci", "qcom,sdhci-msm-v5";
"qcom,sdm845-sdhci", "qcom,sdhci-msm-v5"
"qcom,sdx55-sdhci", "qcom,sdhci-msm-v5";
"qcom,sm8250-sdhci", "qcom,sdhci-msm-v5"

View File

@ -1105,7 +1105,7 @@ static u16 msb_get_free_block(struct msb_data *msb, int zone)
dbg_verbose("result of the free blocks scan: pba %d", pba);
if (pba == msb->block_count || (msb_get_zone_from_pba(pba)) != zone) {
pr_err("BUG: cant get a free block");
pr_err("BUG: can't get a free block");
msb->read_only = true;
return MS_BLOCK_INVALID;
}

View File

@ -293,7 +293,7 @@ static int r592_transfer_fifo_dma(struct r592_device *dev)
/* TODO: hidden assumption about nenth beeing always 1 */
sg_count = dma_map_sg(&dev->pci_dev->dev, &dev->req->sg, 1, is_write ?
PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
DMA_TO_DEVICE : DMA_FROM_DEVICE);
if (sg_count != 1 || sg_dma_len(&dev->req->sg) < R592_LFIFO_SIZE) {
message("problem in dma_map_sg");
@ -310,8 +310,7 @@ static int r592_transfer_fifo_dma(struct r592_device *dev)
}
dma_unmap_sg(&dev->pci_dev->dev, &dev->req->sg, 1, is_write ?
PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
DMA_TO_DEVICE : DMA_FROM_DEVICE);
return dev->dma_error;
}
@ -877,7 +876,7 @@ static SIMPLE_DEV_PM_OPS(r592_pm_ops, r592_suspend, r592_resume);
MODULE_DEVICE_TABLE(pci, r592_pci_id_tbl);
static struct pci_driver r852_pci_driver = {
static struct pci_driver r592_pci_driver = {
.name = DRV_NAME,
.id_table = r592_pci_id_tbl,
.probe = r592_probe,
@ -885,7 +884,7 @@ static struct pci_driver r852_pci_driver = {
.driver.pm = &r592_pm_ops,
};
module_pci_driver(r852_pci_driver);
module_pci_driver(r592_pci_driver);
module_param_named(enable_dma, r592_enable_dma, bool, S_IRUGO);
MODULE_PARM_DESC(enable_dma, "Enable usage of the DMA (default)");

View File

@ -279,8 +279,8 @@ static int tifm_ms_issue_cmd(struct tifm_ms *host)
if (host->use_dma) {
if (1 != tifm_map_sg(sock, &host->req->sg, 1,
host->req->data_dir == READ
? PCI_DMA_FROMDEVICE
: PCI_DMA_TODEVICE)) {
? DMA_FROM_DEVICE
: DMA_TO_DEVICE)) {
host->req->error = -ENOMEM;
return host->req->error;
}
@ -350,8 +350,8 @@ static void tifm_ms_complete_cmd(struct tifm_ms *host)
if (host->use_dma) {
tifm_unmap_sg(sock, &host->req->sg, 1,
host->req->data_dir == READ
? PCI_DMA_FROMDEVICE
: PCI_DMA_TODEVICE);
? DMA_FROM_DEVICE
: DMA_TO_DEVICE);
}
writel((~TIFM_CTRL_LED) & readl(sock->addr + SOCK_CONTROL),
@ -607,8 +607,8 @@ static void tifm_ms_remove(struct tifm_dev *sock)
if (host->use_dma)
tifm_unmap_sg(sock, &host->req->sg, 1,
host->req->data_dir == READ
? PCI_DMA_TODEVICE
: PCI_DMA_FROMDEVICE);
? DMA_TO_DEVICE
: DMA_FROM_DEVICE);
host->req->error = -ETIME;
do {

View File

@ -15,7 +15,7 @@ config PWRSEQ_EMMC
config PWRSEQ_SD8787
tristate "HW reset support for SD8787 BT + Wifi module"
depends on OF && (MWIFIEX || BT_MRVL_SDIO || LIBERTAS_SDIO)
depends on OF && (MWIFIEX || BT_MRVL_SDIO || LIBERTAS_SDIO || WILC1000_SDIO)
help
This selects hardware reset support for the SD8787 BT + Wifi
module. By default this option is set to n.

View File

@ -98,6 +98,11 @@ static int max_devices;
static DEFINE_IDA(mmc_blk_ida);
static DEFINE_IDA(mmc_rpmb_ida);
struct mmc_blk_busy_data {
struct mmc_card *card;
u32 status;
};
/*
* There is one mmc_blk_data per slot.
*/
@ -456,42 +461,6 @@ static int mmc_blk_ioctl_copy_to_user(struct mmc_ioc_cmd __user *ic_ptr,
return 0;
}
static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
u32 *resp_errs)
{
unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
int err = 0;
u32 status;
do {
bool done = time_after(jiffies, timeout);
err = __mmc_send_status(card, &status, 5);
if (err) {
dev_err(mmc_dev(card->host),
"error %d requesting status\n", err);
return err;
}
/* Accumulate any response error bits seen */
if (resp_errs)
*resp_errs |= status;
/*
* Timeout if the device never becomes ready for data and never
* leaves the program state.
*/
if (done) {
dev_err(mmc_dev(card->host),
"Card stuck in wrong state! %s status: %#x\n",
__func__, status);
return -ETIMEDOUT;
}
} while (!mmc_ready_for_data(status));
return err;
}
static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
struct mmc_blk_ioc_data *idata)
{
@ -588,6 +557,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
return mmc_sanitize(card, idata->ic.cmd_timeout_ms);
mmc_wait_for_req(card->host, &mrq);
memcpy(&idata->ic.response, cmd.resp, sizeof(cmd.resp));
if (cmd.error) {
dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
@ -637,14 +607,13 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
if (idata->ic.postsleep_min_us)
usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
memcpy(&(idata->ic.response), cmd.resp, sizeof(cmd.resp));
if (idata->rpmb || (cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
/*
* Ensure RPMB/R1B command has completed by polling CMD13
* "Send Status".
*/
err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, NULL);
err = mmc_poll_for_busy(card, MMC_BLK_TIMEOUT_MS, false,
MMC_BUSY_IO);
}
return err;
@ -1696,7 +1665,7 @@ static int mmc_blk_fix_state(struct mmc_card *card, struct request *req)
mmc_blk_send_stop(card, timeout);
err = card_busy_detect(card, timeout, NULL);
err = mmc_poll_for_busy(card, timeout, false, MMC_BUSY_IO);
mmc_retune_release(card->host);
@ -1911,28 +1880,48 @@ static inline bool mmc_blk_rq_error(struct mmc_blk_request *brq)
brq->data.error || brq->cmd.resp[0] & CMD_ERRORS;
}
static int mmc_blk_busy_cb(void *cb_data, bool *busy)
{
struct mmc_blk_busy_data *data = cb_data;
u32 status = 0;
int err;
err = mmc_send_status(data->card, &status);
if (err)
return err;
/* Accumulate response error bits. */
data->status |= status;
*busy = !mmc_ready_for_data(status);
return 0;
}
static int mmc_blk_card_busy(struct mmc_card *card, struct request *req)
{
struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
u32 status = 0;
struct mmc_blk_busy_data cb_data;
int err;
if (mmc_host_is_spi(card->host) || rq_data_dir(req) == READ)
return 0;
err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, &status);
cb_data.card = card;
cb_data.status = 0;
err = __mmc_poll_for_busy(card, MMC_BLK_TIMEOUT_MS, &mmc_blk_busy_cb,
&cb_data);
/*
* Do not assume data transferred correctly if there are any error bits
* set.
*/
if (status & mmc_blk_stop_err_bits(&mqrq->brq)) {
if (cb_data.status & mmc_blk_stop_err_bits(&mqrq->brq)) {
mqrq->brq.data.bytes_xfered = 0;
err = err ? err : -EIO;
}
/* Copy the exception bit so it will be seen later on */
if (mmc_card_mmc(card) && status & R1_EXCEPTION_EVENT)
if (mmc_card_mmc(card) && cb_data.status & R1_EXCEPTION_EVENT)
mqrq->brq.cmd.resp[0] |= R1_EXCEPTION_EVENT;
return err;

View File

@ -936,15 +936,16 @@ int mmc_execute_tuning(struct mmc_card *card)
opcode = MMC_SEND_TUNING_BLOCK;
err = host->ops->execute_tuning(host, opcode);
if (!err) {
mmc_retune_clear(host);
mmc_retune_enable(host);
return 0;
}
if (err) {
/* Only print error when we don't check for card removal */
if (!host->detect_change)
pr_err("%s: tuning execution failed: %d\n",
mmc_hostname(host), err);
} else {
host->retune_now = 0;
host->need_retune = 0;
mmc_retune_enable(host);
}
return err;
}

View File

@ -31,18 +31,11 @@ void mmc_crypto_prepare_req(struct mmc_queue_req *mqrq)
struct request *req = mmc_queue_req_to_req(mqrq);
struct mmc_request *mrq = &mqrq->brq.mrq;
if (!req->crypt_keyslot)
if (!req->crypt_ctx)
return;
mrq->crypto_enabled = true;
mrq->crypto_key_slot = blk_ksm_get_slot_idx(req->crypt_keyslot);
/*
* For now we assume that all MMC drivers set max_dun_bytes_supported=4,
* which is the limit for CQHCI crypto. So all DUNs should be 32-bit.
*/
WARN_ON_ONCE(req->crypt_ctx->bc_dun[0] > U32_MAX);
mrq->data_unit_num = req->crypt_ctx->bc_dun[0];
mrq->crypto_ctx = req->crypt_ctx;
if (req->crypt_keyslot)
mrq->crypto_key_slot = blk_ksm_get_slot_idx(req->crypt_keyslot);
}
EXPORT_SYMBOL_GPL(mmc_crypto_prepare_req);

View File

@ -96,6 +96,10 @@ void mmc_unregister_host_class(void)
class_unregister(&mmc_host_class);
}
/**
* mmc_retune_enable() - enter a transfer mode that requires retuning
* @host: host which should retune now
*/
void mmc_retune_enable(struct mmc_host *host)
{
host->can_retune = 1;
@ -127,13 +131,18 @@ void mmc_retune_unpause(struct mmc_host *host)
}
EXPORT_SYMBOL(mmc_retune_unpause);
/**
* mmc_retune_disable() - exit a transfer mode that requires retuning
* @host: host which should not retune anymore
*
* It is not meant for temporarily preventing retuning!
*/
void mmc_retune_disable(struct mmc_host *host)
{
mmc_retune_unpause(host);
host->can_retune = 0;
del_timer_sync(&host->retune_timer);
host->retune_now = 0;
host->need_retune = 0;
mmc_retune_clear(host);
}
void mmc_retune_timer_stop(struct mmc_host *host)

View File

@ -21,6 +21,12 @@ int mmc_retune(struct mmc_host *host);
void mmc_retune_pause(struct mmc_host *host);
void mmc_retune_unpause(struct mmc_host *host);
static inline void mmc_retune_clear(struct mmc_host *host)
{
host->retune_now = 0;
host->need_retune = 0;
}
static inline void mmc_retune_hold_now(struct mmc_host *host)
{
host->retune_now = 0;

View File

@ -435,7 +435,7 @@ static int mmc_busy_cb(void *cb_data, bool *busy)
u32 status = 0;
int err;
if (host->ops->card_busy) {
if (data->busy_cmd != MMC_BUSY_IO && host->ops->card_busy) {
*busy = host->ops->card_busy(host);
return 0;
}
@ -457,6 +457,7 @@ static int mmc_busy_cb(void *cb_data, bool *busy)
break;
case MMC_BUSY_HPI:
case MMC_BUSY_EXTR_SINGLE:
case MMC_BUSY_IO:
break;
default:
err = -EINVAL;
@ -509,6 +510,7 @@ int __mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
return 0;
}
EXPORT_SYMBOL_GPL(__mmc_poll_for_busy);
int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
bool retry_crc_err, enum mmc_busy_cmd busy_cmd)
@ -521,6 +523,7 @@ int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
return __mmc_poll_for_busy(card, timeout_ms, &mmc_busy_cb, &cb_data);
}
EXPORT_SYMBOL_GPL(mmc_poll_for_busy);
bool mmc_prepare_busy_cmd(struct mmc_host *host, struct mmc_command *cmd,
unsigned int timeout_ms)
@ -956,8 +959,15 @@ void mmc_run_bkops(struct mmc_card *card)
*/
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_BKOPS_START, 1, MMC_BKOPS_TIMEOUT_MS);
if (err)
pr_warn("%s: Error %d starting bkops\n",
/*
* If the BKOPS timed out, the card is probably still busy in the
* R1_STATE_PRG. Rather than continue to wait, let's try to abort
* it with a HPI command to get back into R1_STATE_TRAN.
*/
if (err == -ETIMEDOUT && !mmc_interrupt_hpi(card))
pr_warn("%s: BKOPS aborted\n", mmc_hostname(card->host));
else if (err)
pr_warn("%s: Error %d running bkops\n",
mmc_hostname(card->host), err);
mmc_retune_release(card->host);

View File

@ -15,6 +15,7 @@ enum mmc_busy_cmd {
MMC_BUSY_ERASE,
MMC_BUSY_HPI,
MMC_BUSY_EXTR_SINGLE,
MMC_BUSY_IO,
};
struct mmc_host;

View File

@ -14,6 +14,7 @@
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/err.h>
@ -27,6 +28,7 @@ struct mmc_pwrseq_sd8787 {
struct mmc_pwrseq pwrseq;
struct gpio_desc *reset_gpio;
struct gpio_desc *pwrdn_gpio;
u32 reset_pwrdwn_delay_ms;
};
#define to_pwrseq_sd8787(p) container_of(p, struct mmc_pwrseq_sd8787, pwrseq)
@ -37,7 +39,7 @@ static void mmc_pwrseq_sd8787_pre_power_on(struct mmc_host *host)
gpiod_set_value_cansleep(pwrseq->reset_gpio, 1);
msleep(300);
msleep(pwrseq->reset_pwrdwn_delay_ms);
gpiod_set_value_cansleep(pwrseq->pwrdn_gpio, 1);
}
@ -54,8 +56,12 @@ static const struct mmc_pwrseq_ops mmc_pwrseq_sd8787_ops = {
.power_off = mmc_pwrseq_sd8787_power_off,
};
static const u32 sd8787_delay_ms = 300;
static const u32 wilc1000_delay_ms = 5;
static const struct of_device_id mmc_pwrseq_sd8787_of_match[] = {
{ .compatible = "mmc-pwrseq-sd8787",},
{ .compatible = "mmc-pwrseq-sd8787", .data = &sd8787_delay_ms },
{ .compatible = "mmc-pwrseq-wilc1000", .data = &wilc1000_delay_ms },
{/* sentinel */},
};
MODULE_DEVICE_TABLE(of, mmc_pwrseq_sd8787_of_match);
@ -64,11 +70,15 @@ static int mmc_pwrseq_sd8787_probe(struct platform_device *pdev)
{
struct mmc_pwrseq_sd8787 *pwrseq;
struct device *dev = &pdev->dev;
const struct of_device_id *match;
pwrseq = devm_kzalloc(dev, sizeof(*pwrseq), GFP_KERNEL);
if (!pwrseq)
return -ENOMEM;
match = of_match_node(mmc_pwrseq_sd8787_of_match, pdev->dev.of_node);
pwrseq->reset_pwrdwn_delay_ms = *(u32 *)match->data;
pwrseq->pwrdn_gpio = devm_gpiod_get(dev, "powerdown", GPIOD_OUT_LOW);
if (IS_ERR(pwrseq->pwrdn_gpio))
return PTR_ERR(pwrseq->pwrdn_gpio);

View File

@ -163,7 +163,7 @@ static void mmc_mq_recovery_handler(struct work_struct *work)
blk_mq_run_hw_queues(q, true);
}
static struct scatterlist *mmc_alloc_sg(int sg_len, gfp_t gfp)
static struct scatterlist *mmc_alloc_sg(unsigned short sg_len, gfp_t gfp)
{
struct scatterlist *sg;
@ -193,33 +193,29 @@ static void mmc_queue_setup_discard(struct request_queue *q,
blk_queue_flag_set(QUEUE_FLAG_SECERASE, q);
}
static unsigned int mmc_get_max_segments(struct mmc_host *host)
static unsigned short mmc_get_max_segments(struct mmc_host *host)
{
return host->can_dma_map_merge ? MMC_DMA_MAP_MERGE_SEGMENTS :
host->max_segs;
}
/**
* mmc_init_request() - initialize the MMC-specific per-request data
* @mq: the request queue
* @req: the request
* @gfp: memory allocation policy
*/
static int __mmc_init_request(struct mmc_queue *mq, struct request *req,
gfp_t gfp)
static int mmc_mq_init_request(struct blk_mq_tag_set *set, struct request *req,
unsigned int hctx_idx, unsigned int numa_node)
{
struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
struct mmc_queue *mq = set->driver_data;
struct mmc_card *card = mq->card;
struct mmc_host *host = card->host;
mq_rq->sg = mmc_alloc_sg(mmc_get_max_segments(host), gfp);
mq_rq->sg = mmc_alloc_sg(mmc_get_max_segments(host), GFP_KERNEL);
if (!mq_rq->sg)
return -ENOMEM;
return 0;
}
static void mmc_exit_request(struct request_queue *q, struct request *req)
static void mmc_mq_exit_request(struct blk_mq_tag_set *set, struct request *req,
unsigned int hctx_idx)
{
struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
@ -227,20 +223,6 @@ static void mmc_exit_request(struct request_queue *q, struct request *req)
mq_rq->sg = NULL;
}
static int mmc_mq_init_request(struct blk_mq_tag_set *set, struct request *req,
unsigned int hctx_idx, unsigned int numa_node)
{
return __mmc_init_request(set->driver_data, req, GFP_KERNEL);
}
static void mmc_mq_exit_request(struct blk_mq_tag_set *set, struct request *req,
unsigned int hctx_idx)
{
struct mmc_queue *mq = set->driver_data;
mmc_exit_request(mq->queue, req);
}
static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{

View File

@ -330,13 +330,25 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func)
prev = &this->next;
if (ret == -ENOENT) {
if (time_after(jiffies, timeout))
break;
/* warn about unknown tuples */
pr_warn_ratelimited("%s: queuing unknown"
" CIS tuple 0x%02x (%u bytes)\n",
mmc_hostname(card->host),
tpl_code, tpl_link);
#define FMT(type) "%s: queuing " type " CIS tuple 0x%02x [%*ph] (%u bytes)\n"
/*
* Tuples in this range are reserved for
* vendors, so don't warn about them
*/
if (tpl_code >= 0x80 && tpl_code <= 0x8f)
pr_debug_ratelimited(FMT("vendor"),
mmc_hostname(card->host),
tpl_code, tpl_link, this->data,
tpl_link);
else
pr_warn_ratelimited(FMT("unknown"),
mmc_hostname(card->host),
tpl_code, tpl_link, this->data,
tpl_link);
}
/* keep on analyzing tuples */

View File

@ -22,12 +22,15 @@ int cqhci_crypto_init(struct cqhci_host *host);
*/
static inline u64 cqhci_crypto_prep_task_desc(struct mmc_request *mrq)
{
if (!mrq->crypto_enabled)
if (!mrq->crypto_ctx)
return 0;
/* We set max_dun_bytes_supported=4, so all DUNs should be 32-bit. */
WARN_ON_ONCE(mrq->crypto_ctx->bc_dun[0] > U32_MAX);
return CQHCI_CRYPTO_ENABLE_BIT |
CQHCI_CRYPTO_KEYSLOT(mrq->crypto_key_slot) |
mrq->data_unit_num;
mrq->crypto_ctx->bc_dun[0];
}
#else /* CONFIG_MMC_CRYPTO */

View File

@ -17,9 +17,11 @@
#include <linux/interrupt.h>
#include <linux/iopoll.h>
#include <linux/ioport.h>
#include <linux/ktime.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/prandom.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/stat.h>
@ -181,6 +183,9 @@ static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
&host->pending_events);
debugfs_create_xul("completed_events", S_IRUSR, root,
&host->completed_events);
#ifdef CONFIG_FAULT_INJECTION
fault_create_debugfs_attr("fail_data_crc", root, &host->fail_data_crc);
#endif
}
#endif /* defined(CONFIG_DEBUG_FS) */
@ -782,6 +787,7 @@ static int dw_mci_edmac_start_dma(struct dw_mci *host,
int ret = 0;
/* Set external dma config: burst size, burst width */
memset(&cfg, 0, sizeof(cfg));
cfg.dst_addr = host->phy_regs + fifo_offset;
cfg.src_addr = cfg.dst_addr;
cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
@ -1788,6 +1794,68 @@ static const struct mmc_host_ops dw_mci_ops = {
.prepare_hs400_tuning = dw_mci_prepare_hs400_tuning,
};
#ifdef CONFIG_FAULT_INJECTION
static enum hrtimer_restart dw_mci_fault_timer(struct hrtimer *t)
{
struct dw_mci *host = container_of(t, struct dw_mci, fault_timer);
unsigned long flags;
spin_lock_irqsave(&host->irq_lock, flags);
if (!host->data_status)
host->data_status = SDMMC_INT_DCRC;
set_bit(EVENT_DATA_ERROR, &host->pending_events);
tasklet_schedule(&host->tasklet);
spin_unlock_irqrestore(&host->irq_lock, flags);
return HRTIMER_NORESTART;
}
static void dw_mci_start_fault_timer(struct dw_mci *host)
{
struct mmc_data *data = host->data;
if (!data || data->blocks <= 1)
return;
if (!should_fail(&host->fail_data_crc, 1))
return;
/*
* Try to inject the error at random points during the data transfer.
*/
hrtimer_start(&host->fault_timer,
ms_to_ktime(prandom_u32() % 25),
HRTIMER_MODE_REL);
}
static void dw_mci_stop_fault_timer(struct dw_mci *host)
{
hrtimer_cancel(&host->fault_timer);
}
static void dw_mci_init_fault(struct dw_mci *host)
{
host->fail_data_crc = (struct fault_attr) FAULT_ATTR_INITIALIZER;
hrtimer_init(&host->fault_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
host->fault_timer.function = dw_mci_fault_timer;
}
#else
static void dw_mci_init_fault(struct dw_mci *host)
{
}
static void dw_mci_start_fault_timer(struct dw_mci *host)
{
}
static void dw_mci_stop_fault_timer(struct dw_mci *host)
{
}
#endif
static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
__releases(&host->lock)
__acquires(&host->lock)
@ -2102,6 +2170,7 @@ static void dw_mci_tasklet_func(struct tasklet_struct *t)
break;
}
dw_mci_stop_fault_timer(host);
host->data = NULL;
set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
err = dw_mci_data_complete(host, data);
@ -2151,6 +2220,7 @@ static void dw_mci_tasklet_func(struct tasklet_struct *t)
if (mrq->cmd->error && mrq->data)
dw_mci_reset(host);
dw_mci_stop_fault_timer(host);
host->cmd = NULL;
host->data = NULL;
@ -2600,6 +2670,8 @@ static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
tasklet_schedule(&host->tasklet);
dw_mci_start_fault_timer(host);
}
static void dw_mci_handle_cd(struct dw_mci *host)
@ -3223,6 +3295,8 @@ int dw_mci_probe(struct dw_mci *host)
spin_lock_init(&host->irq_lock);
INIT_LIST_HEAD(&host->queue);
dw_mci_init_fault(host);
/*
* Get the host data width - this assumes that HCON has been set with
* the correct values.

View File

@ -14,6 +14,8 @@
#include <linux/mmc/core.h>
#include <linux/dmaengine.h>
#include <linux/reset.h>
#include <linux/fault-inject.h>
#include <linux/hrtimer.h>
#include <linux/interrupt.h>
enum dw_mci_state {
@ -230,6 +232,11 @@ struct dw_mci {
struct timer_list cmd11_timer;
struct timer_list cto_timer;
struct timer_list dto_timer;
#ifdef CONFIG_FAULT_INJECTION
struct fault_attr fail_data_crc;
struct hrtimer fault_timer;
#endif
};
/* DMA ops for Internal/External DMAC interface */

View File

@ -180,7 +180,7 @@ static int mmc_spi_skip(struct mmc_spi_host *host, unsigned long timeout,
u8 *cp = host->data->status;
unsigned long start = jiffies;
while (1) {
do {
int status;
unsigned i;
@ -193,16 +193,9 @@ static int mmc_spi_skip(struct mmc_spi_host *host, unsigned long timeout,
return cp[i];
}
if (time_is_before_jiffies(start + timeout))
break;
/* If we need long timeouts, we may release the CPU.
* We use jiffies here because we want to have a relation
* between elapsed time and the blocking of the scheduler.
*/
if (time_is_before_jiffies(start + 1))
schedule();
}
/* If we need long timeouts, we may release the CPU */
cond_resched();
} while (time_is_after_jiffies(start + timeout));
return -ETIMEDOUT;
}

View File

@ -2126,6 +2126,9 @@ static int mmci_probe(struct amba_device *dev,
ret = PTR_ERR(host->rst);
goto clk_disable;
}
ret = reset_control_deassert(host->rst);
if (ret)
dev_err(mmc_dev(mmc), "failed to de-assert reset\n");
/* Get regulators and the supported OCR mask */
ret = mmc_regulator_get_supply(mmc);

View File

@ -628,6 +628,7 @@ static int moxart_probe(struct platform_device *pdev)
host->dma_chan_tx, host->dma_chan_rx);
host->have_dma = true;
memset(&cfg, 0, sizeof(cfg));
cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;

View File

@ -42,6 +42,11 @@ struct renesas_sdhi_quirks {
const u8 (*hs400_calib_table)[SDHI_CALIB_TABLE_MAX];
};
struct renesas_sdhi_of_data_with_quirks {
const struct renesas_sdhi_of_data *of_data;
const struct renesas_sdhi_quirks *quirks;
};
struct tmio_mmc_dma {
enum dma_slave_buswidth dma_buswidth;
bool (*filter)(struct dma_chan *chan, void *arg);
@ -78,6 +83,8 @@ struct renesas_sdhi {
container_of((host)->pdata, struct renesas_sdhi, mmc_data)
int renesas_sdhi_probe(struct platform_device *pdev,
const struct tmio_mmc_dma_ops *dma_ops);
const struct tmio_mmc_dma_ops *dma_ops,
const struct renesas_sdhi_of_data *of_data,
const struct renesas_sdhi_quirks *quirks);
int renesas_sdhi_remove(struct platform_device *pdev);
#endif

View File

@ -305,27 +305,6 @@ static int renesas_sdhi_start_signal_voltage_switch(struct mmc_host *mmc,
#define SH_MOBILE_SDHI_SCC_TMPPORT_CALIB_CODE_MASK 0x1f
#define SH_MOBILE_SDHI_SCC_TMPPORT_MANUAL_MODE BIT(7)
static const u8 r8a7796_es13_calib_table[2][SDHI_CALIB_TABLE_MAX] = {
{ 3, 3, 3, 3, 3, 3, 3, 4, 4, 5, 6, 7, 8, 9, 10, 15,
16, 16, 16, 16, 16, 16, 17, 18, 18, 19, 20, 21, 22, 23, 24, 25 },
{ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 7, 8, 11,
12, 17, 18, 18, 18, 18, 18, 18, 18, 19, 20, 21, 22, 23, 25, 25 }
};
static const u8 r8a77965_calib_table[2][SDHI_CALIB_TABLE_MAX] = {
{ 1, 2, 6, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 15, 15, 16,
17, 18, 19, 20, 21, 22, 23, 24, 25, 25, 26, 27, 28, 29, 30, 31 },
{ 2, 3, 4, 4, 5, 6, 7, 9, 10, 11, 12, 13, 14, 15, 16, 17,
17, 17, 20, 21, 22, 23, 24, 25, 27, 28, 29, 30, 31, 31, 31, 31 }
};
static const u8 r8a77990_calib_table[2][SDHI_CALIB_TABLE_MAX] = {
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 1, 2, 3, 3, 4, 4, 4, 5, 5, 6, 8, 9, 10,
11, 12, 13, 15, 16, 17, 17, 18, 18, 19, 20, 22, 24, 25, 26, 26 }
};
static inline u32 sd_scc_read32(struct tmio_mmc_host *host,
struct renesas_sdhi *priv, int addr)
{
@ -895,69 +874,12 @@ static void renesas_sdhi_enable_dma(struct tmio_mmc_host *host, bool enable)
renesas_sdhi_sdbuf_width(host, enable ? width : 16);
}
static const struct renesas_sdhi_quirks sdhi_quirks_4tap_nohs400 = {
.hs400_disabled = true,
.hs400_4taps = true,
};
static const struct renesas_sdhi_quirks sdhi_quirks_4tap = {
.hs400_4taps = true,
.hs400_bad_taps = BIT(2) | BIT(3) | BIT(6) | BIT(7),
};
static const struct renesas_sdhi_quirks sdhi_quirks_nohs400 = {
.hs400_disabled = true,
};
static const struct renesas_sdhi_quirks sdhi_quirks_bad_taps1357 = {
.hs400_bad_taps = BIT(1) | BIT(3) | BIT(5) | BIT(7),
};
static const struct renesas_sdhi_quirks sdhi_quirks_bad_taps2367 = {
.hs400_bad_taps = BIT(2) | BIT(3) | BIT(6) | BIT(7),
};
static const struct renesas_sdhi_quirks sdhi_quirks_r8a7796_es13 = {
.hs400_4taps = true,
.hs400_bad_taps = BIT(2) | BIT(3) | BIT(6) | BIT(7),
.hs400_calib_table = r8a7796_es13_calib_table,
};
static const struct renesas_sdhi_quirks sdhi_quirks_r8a77965 = {
.hs400_bad_taps = BIT(2) | BIT(3) | BIT(6) | BIT(7),
.hs400_calib_table = r8a77965_calib_table,
};
static const struct renesas_sdhi_quirks sdhi_quirks_r8a77990 = {
.hs400_calib_table = r8a77990_calib_table,
};
/*
* Note for r8a7796 / r8a774a1: we can't distinguish ES1.1 and 1.2 as of now.
* So, we want to treat them equally and only have a match for ES1.2 to enforce
* this if there ever will be a way to distinguish ES1.2.
*/
static const struct soc_device_attribute sdhi_quirks_match[] = {
{ .soc_id = "r8a774a1", .revision = "ES1.[012]", .data = &sdhi_quirks_4tap_nohs400 },
{ .soc_id = "r8a7795", .revision = "ES1.*", .data = &sdhi_quirks_4tap_nohs400 },
{ .soc_id = "r8a7795", .revision = "ES2.0", .data = &sdhi_quirks_4tap },
{ .soc_id = "r8a7795", .revision = "ES3.*", .data = &sdhi_quirks_bad_taps2367 },
{ .soc_id = "r8a7796", .revision = "ES1.[012]", .data = &sdhi_quirks_4tap_nohs400 },
{ .soc_id = "r8a7796", .revision = "ES1.*", .data = &sdhi_quirks_r8a7796_es13 },
{ .soc_id = "r8a77961", .data = &sdhi_quirks_bad_taps1357 },
{ .soc_id = "r8a77965", .data = &sdhi_quirks_r8a77965 },
{ .soc_id = "r8a77980", .data = &sdhi_quirks_nohs400 },
{ .soc_id = "r8a77990", .data = &sdhi_quirks_r8a77990 },
{ /* Sentinel. */ },
};
int renesas_sdhi_probe(struct platform_device *pdev,
const struct tmio_mmc_dma_ops *dma_ops)
const struct tmio_mmc_dma_ops *dma_ops,
const struct renesas_sdhi_of_data *of_data,
const struct renesas_sdhi_quirks *quirks)
{
struct tmio_mmc_data *mmd = pdev->dev.platform_data;
const struct renesas_sdhi_quirks *quirks = NULL;
const struct renesas_sdhi_of_data *of_data;
const struct soc_device_attribute *attr;
struct tmio_mmc_data *mmc_data;
struct tmio_mmc_dma *dma_priv;
struct tmio_mmc_host *host;
@ -966,12 +888,6 @@ int renesas_sdhi_probe(struct platform_device *pdev,
struct resource *res;
u16 ver;
of_data = of_device_get_match_data(&pdev->dev);
attr = soc_device_match(sdhi_quirks_match);
if (attr)
quirks = attr->data;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -EINVAL;

View File

@ -15,6 +15,7 @@
#include <linux/mmc/host.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/pagemap.h>
#include <linux/scatterlist.h>
#include <linux/sys_soc.h>
@ -92,7 +93,7 @@ static struct renesas_sdhi_scc rcar_gen3_scc_taps[] = {
},
};
static const struct renesas_sdhi_of_data of_rza2_compatible = {
static const struct renesas_sdhi_of_data of_data_rza2 = {
.tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_CLK_ACTUAL |
TMIO_MMC_HAVE_CBSY,
.tmio_ocr_mask = MMC_VDD_32_33,
@ -107,7 +108,11 @@ static const struct renesas_sdhi_of_data of_rza2_compatible = {
.max_segs = 1,
};
static const struct renesas_sdhi_of_data of_rcar_gen3_compatible = {
static const struct renesas_sdhi_of_data_with_quirks of_rza2_compatible = {
.of_data = &of_data_rza2,
};
static const struct renesas_sdhi_of_data of_data_rcar_gen3 = {
.tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_CLK_ACTUAL |
TMIO_MMC_HAVE_CBSY | TMIO_MMC_MIN_RCAR2,
.capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
@ -122,11 +127,116 @@ static const struct renesas_sdhi_of_data of_rcar_gen3_compatible = {
.max_segs = 1,
};
static const u8 r8a7796_es13_calib_table[2][SDHI_CALIB_TABLE_MAX] = {
{ 3, 3, 3, 3, 3, 3, 3, 4, 4, 5, 6, 7, 8, 9, 10, 15,
16, 16, 16, 16, 16, 16, 17, 18, 18, 19, 20, 21, 22, 23, 24, 25 },
{ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 7, 8, 11,
12, 17, 18, 18, 18, 18, 18, 18, 18, 19, 20, 21, 22, 23, 25, 25 }
};
static const u8 r8a77965_calib_table[2][SDHI_CALIB_TABLE_MAX] = {
{ 1, 2, 6, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 15, 15, 16,
17, 18, 19, 20, 21, 22, 23, 24, 25, 25, 26, 27, 28, 29, 30, 31 },
{ 2, 3, 4, 4, 5, 6, 7, 9, 10, 11, 12, 13, 14, 15, 16, 17,
17, 17, 20, 21, 22, 23, 24, 25, 27, 28, 29, 30, 31, 31, 31, 31 }
};
static const u8 r8a77990_calib_table[2][SDHI_CALIB_TABLE_MAX] = {
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 1, 2, 3, 3, 4, 4, 4, 5, 5, 6, 8, 9, 10,
11, 12, 13, 15, 16, 17, 17, 18, 18, 19, 20, 22, 24, 25, 26, 26 }
};
static const struct renesas_sdhi_quirks sdhi_quirks_4tap_nohs400 = {
.hs400_disabled = true,
.hs400_4taps = true,
};
static const struct renesas_sdhi_quirks sdhi_quirks_4tap = {
.hs400_4taps = true,
.hs400_bad_taps = BIT(2) | BIT(3) | BIT(6) | BIT(7),
};
static const struct renesas_sdhi_quirks sdhi_quirks_nohs400 = {
.hs400_disabled = true,
};
static const struct renesas_sdhi_quirks sdhi_quirks_bad_taps1357 = {
.hs400_bad_taps = BIT(1) | BIT(3) | BIT(5) | BIT(7),
};
static const struct renesas_sdhi_quirks sdhi_quirks_bad_taps2367 = {
.hs400_bad_taps = BIT(2) | BIT(3) | BIT(6) | BIT(7),
};
static const struct renesas_sdhi_quirks sdhi_quirks_r8a7796_es13 = {
.hs400_4taps = true,
.hs400_bad_taps = BIT(2) | BIT(3) | BIT(6) | BIT(7),
.hs400_calib_table = r8a7796_es13_calib_table,
};
static const struct renesas_sdhi_quirks sdhi_quirks_r8a77965 = {
.hs400_bad_taps = BIT(2) | BIT(3) | BIT(6) | BIT(7),
.hs400_calib_table = r8a77965_calib_table,
};
static const struct renesas_sdhi_quirks sdhi_quirks_r8a77990 = {
.hs400_calib_table = r8a77990_calib_table,
};
/*
* Note for r8a7796 / r8a774a1: we can't distinguish ES1.1 and 1.2 as of now.
* So, we want to treat them equally and only have a match for ES1.2 to enforce
* this if there ever will be a way to distinguish ES1.2.
*/
static const struct soc_device_attribute sdhi_quirks_match[] = {
{ .soc_id = "r8a774a1", .revision = "ES1.[012]", .data = &sdhi_quirks_4tap_nohs400 },
{ .soc_id = "r8a7795", .revision = "ES1.*", .data = &sdhi_quirks_4tap_nohs400 },
{ .soc_id = "r8a7795", .revision = "ES2.0", .data = &sdhi_quirks_4tap },
{ .soc_id = "r8a7796", .revision = "ES1.[012]", .data = &sdhi_quirks_4tap_nohs400 },
{ .soc_id = "r8a7796", .revision = "ES1.*", .data = &sdhi_quirks_r8a7796_es13 },
{ /* Sentinel. */ },
};
static const struct renesas_sdhi_of_data_with_quirks of_r8a7795_compatible = {
.of_data = &of_data_rcar_gen3,
.quirks = &sdhi_quirks_bad_taps2367,
};
static const struct renesas_sdhi_of_data_with_quirks of_r8a77961_compatible = {
.of_data = &of_data_rcar_gen3,
.quirks = &sdhi_quirks_bad_taps1357,
};
static const struct renesas_sdhi_of_data_with_quirks of_r8a77965_compatible = {
.of_data = &of_data_rcar_gen3,
.quirks = &sdhi_quirks_r8a77965,
};
static const struct renesas_sdhi_of_data_with_quirks of_r8a77980_compatible = {
.of_data = &of_data_rcar_gen3,
.quirks = &sdhi_quirks_nohs400,
};
static const struct renesas_sdhi_of_data_with_quirks of_r8a77990_compatible = {
.of_data = &of_data_rcar_gen3,
.quirks = &sdhi_quirks_r8a77990,
};
static const struct renesas_sdhi_of_data_with_quirks of_rcar_gen3_compatible = {
.of_data = &of_data_rcar_gen3,
};
static const struct of_device_id renesas_sdhi_internal_dmac_of_match[] = {
{ .compatible = "renesas,sdhi-r7s9210", .data = &of_rza2_compatible, },
{ .compatible = "renesas,sdhi-mmc-r8a77470", .data = &of_rcar_gen3_compatible, },
{ .compatible = "renesas,sdhi-r8a7795", .data = &of_rcar_gen3_compatible, },
{ .compatible = "renesas,sdhi-r8a7795", .data = &of_r8a7795_compatible, },
{ .compatible = "renesas,sdhi-r8a7796", .data = &of_rcar_gen3_compatible, },
{ .compatible = "renesas,sdhi-r8a77961", .data = &of_r8a77961_compatible, },
{ .compatible = "renesas,sdhi-r8a77965", .data = &of_r8a77965_compatible, },
{ .compatible = "renesas,sdhi-r8a77980", .data = &of_r8a77980_compatible, },
{ .compatible = "renesas,sdhi-r8a77990", .data = &of_r8a77990_compatible, },
{ .compatible = "renesas,rcar-gen3-sdhi", .data = &of_rcar_gen3_compatible, },
{},
};
@ -405,16 +515,27 @@ static const struct soc_device_attribute soc_dma_quirks[] = {
static int renesas_sdhi_internal_dmac_probe(struct platform_device *pdev)
{
const struct soc_device_attribute *soc = soc_device_match(soc_dma_quirks);
const struct soc_device_attribute *attr;
const struct renesas_sdhi_of_data_with_quirks *of_data_quirks;
const struct renesas_sdhi_quirks *quirks;
struct device *dev = &pdev->dev;
if (soc)
global_flags |= (unsigned long)soc->data;
of_data_quirks = of_device_get_match_data(&pdev->dev);
quirks = of_data_quirks->quirks;
attr = soc_device_match(soc_dma_quirks);
if (attr)
global_flags |= (unsigned long)attr->data;
attr = soc_device_match(sdhi_quirks_match);
if (attr)
quirks = attr->data;
/* value is max of SD_SECCNT. Confirmed by HW engineers */
dma_set_max_seg_size(dev, 0xffffffff);
return renesas_sdhi_probe(pdev, &renesas_sdhi_internal_dmac_dma_ops);
return renesas_sdhi_probe(pdev, &renesas_sdhi_internal_dmac_dma_ops,
of_data_quirks->of_data, quirks);
}
static const struct dev_pm_ops renesas_sdhi_internal_dmac_dev_pm_ops = {

View File

@ -108,9 +108,9 @@ static void renesas_sdhi_sys_dmac_abort_dma(struct tmio_mmc_host *host)
renesas_sdhi_sys_dmac_enable_dma(host, false);
if (host->chan_rx)
dmaengine_terminate_all(host->chan_rx);
dmaengine_terminate_sync(host->chan_rx);
if (host->chan_tx)
dmaengine_terminate_all(host->chan_tx);
dmaengine_terminate_sync(host->chan_tx);
renesas_sdhi_sys_dmac_enable_dma(host, true);
}
@ -451,7 +451,8 @@ static const struct tmio_mmc_dma_ops renesas_sdhi_sys_dmac_dma_ops = {
static int renesas_sdhi_sys_dmac_probe(struct platform_device *pdev)
{
return renesas_sdhi_probe(pdev, &renesas_sdhi_sys_dmac_dma_ops);
return renesas_sdhi_probe(pdev, &renesas_sdhi_sys_dmac_dma_ops,
of_device_get_match_data(&pdev->dev), NULL);
}
static const struct dev_pm_ops renesas_sdhi_sys_dmac_dev_pm_ops = {

View File

@ -542,23 +542,6 @@ static int sd_write_long_data(struct realtek_pci_sdmmc *host,
return 0;
}
static int sd_rw_multi(struct realtek_pci_sdmmc *host, struct mmc_request *mrq)
{
struct mmc_data *data = mrq->data;
if (host->sg_count < 0) {
data->error = host->sg_count;
dev_dbg(sdmmc_dev(host), "%s: sg_count = %d is invalid\n",
__func__, host->sg_count);
return data->error;
}
if (data->flags & MMC_DATA_READ)
return sd_read_long_data(host, mrq);
return sd_write_long_data(host, mrq);
}
static inline void sd_enable_initial_mode(struct realtek_pci_sdmmc *host)
{
rtsx_pci_write_register(host->pcr, SD_CFG1,
@ -571,6 +554,33 @@ static inline void sd_disable_initial_mode(struct realtek_pci_sdmmc *host)
SD_CLK_DIVIDE_MASK, SD_CLK_DIVIDE_0);
}
static int sd_rw_multi(struct realtek_pci_sdmmc *host, struct mmc_request *mrq)
{
struct mmc_data *data = mrq->data;
int err;
if (host->sg_count < 0) {
data->error = host->sg_count;
dev_dbg(sdmmc_dev(host), "%s: sg_count = %d is invalid\n",
__func__, host->sg_count);
return data->error;
}
if (data->flags & MMC_DATA_READ) {
if (host->initial_mode)
sd_disable_initial_mode(host);
err = sd_read_long_data(host, mrq);
if (host->initial_mode)
sd_enable_initial_mode(host);
return err;
}
return sd_write_long_data(host, mrq);
}
static void sd_normal_rw(struct realtek_pci_sdmmc *host,
struct mmc_request *mrq)
{

View File

@ -24,7 +24,6 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_data/mmc-esdhc-imx.h>
#include <linux/pm_runtime.h>
#include "sdhci-pltfm.h"
#include "sdhci-esdhc.h"
@ -95,6 +94,11 @@
#define ESDHC_VEND_SPEC2 0xc8
#define ESDHC_VEND_SPEC2_EN_BUSY_IRQ (1 << 8)
#define ESDHC_VEND_SPEC2_AUTO_TUNE_8BIT_EN (1 << 4)
#define ESDHC_VEND_SPEC2_AUTO_TUNE_4BIT_EN (0 << 4)
#define ESDHC_VEND_SPEC2_AUTO_TUNE_1BIT_EN (2 << 4)
#define ESDHC_VEND_SPEC2_AUTO_TUNE_CMD_EN (1 << 6)
#define ESDHC_VEND_SPEC2_AUTO_TUNE_MODE_MASK (7 << 4)
#define ESDHC_TUNING_CTRL 0xcc
#define ESDHC_STD_TUNING_EN (1 << 24)
@ -115,6 +119,7 @@
#define ESDHC_CTRL_4BITBUS (0x1 << 1)
#define ESDHC_CTRL_8BITBUS (0x2 << 1)
#define ESDHC_CTRL_BUSWIDTH_MASK (0x3 << 1)
#define USDHC_GET_BUSWIDTH(c) (c & ESDHC_CTRL_BUSWIDTH_MASK)
/*
* There is an INT DMA ERR mismatch between eSDHC and STD SDHC SPEC:
@ -191,6 +196,38 @@
*/
#define ESDHC_FLAG_BROKEN_AUTO_CMD23 BIT(16)
enum wp_types {
ESDHC_WP_NONE, /* no WP, neither controller nor gpio */
ESDHC_WP_CONTROLLER, /* mmc controller internal WP */
ESDHC_WP_GPIO, /* external gpio pin for WP */
};
enum cd_types {
ESDHC_CD_NONE, /* no CD, neither controller nor gpio */
ESDHC_CD_CONTROLLER, /* mmc controller internal CD */
ESDHC_CD_GPIO, /* external gpio pin for CD */
ESDHC_CD_PERMANENT, /* no CD, card permanently wired to host */
};
/*
* struct esdhc_platform_data - platform data for esdhc on i.MX
*
* ESDHC_WP(CD)_CONTROLLER type is not available on i.MX25/35.
*
* @wp_type: type of write_protect method (see wp_types enum above)
* @cd_type: type of card_detect method (see cd_types enum above)
*/
struct esdhc_platform_data {
enum wp_types wp_type;
enum cd_types cd_type;
int max_bus_width;
unsigned int delay_line;
unsigned int tuning_step; /* The delay cell steps in tuning procedure */
unsigned int tuning_start_tap; /* The start delay cell point in tuning procedure */
unsigned int strobe_dll_delay_target; /* The delay cell for strobe pad (read clock) */
};
struct esdhc_soc_data {
u32 flags;
};
@ -376,6 +413,30 @@ static inline void esdhc_wait_for_card_clock_gate_off(struct sdhci_host *host)
dev_warn(mmc_dev(host->mmc), "%s: card clock still not gate off in 100us!.\n", __func__);
}
/* Enable the auto tuning circuit to check the CMD line and BUS line */
static inline void usdhc_auto_tuning_mode_sel(struct sdhci_host *host)
{
u32 buswidth, auto_tune_buswidth;
buswidth = USDHC_GET_BUSWIDTH(readl(host->ioaddr + SDHCI_HOST_CONTROL));
switch (buswidth) {
case ESDHC_CTRL_8BITBUS:
auto_tune_buswidth = ESDHC_VEND_SPEC2_AUTO_TUNE_8BIT_EN;
break;
case ESDHC_CTRL_4BITBUS:
auto_tune_buswidth = ESDHC_VEND_SPEC2_AUTO_TUNE_4BIT_EN;
break;
default: /* 1BITBUS */
auto_tune_buswidth = ESDHC_VEND_SPEC2_AUTO_TUNE_1BIT_EN;
break;
}
esdhc_clrset_le(host, ESDHC_VEND_SPEC2_AUTO_TUNE_MODE_MASK,
auto_tune_buswidth | ESDHC_VEND_SPEC2_AUTO_TUNE_CMD_EN,
ESDHC_VEND_SPEC2);
}
static u32 esdhc_readl_le(struct sdhci_host *host, int reg)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@ -597,17 +658,7 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
else
new_val &= ~ESDHC_VENDOR_SPEC_VSELECT;
writel(new_val, host->ioaddr + ESDHC_VENDOR_SPEC);
if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) {
new_val = readl(host->ioaddr + ESDHC_MIX_CTRL);
if (val & SDHCI_CTRL_TUNED_CLK) {
new_val |= ESDHC_MIX_CTRL_SMPCLK_SEL;
new_val |= ESDHC_MIX_CTRL_AUTO_TUNE_EN;
} else {
new_val &= ~ESDHC_MIX_CTRL_SMPCLK_SEL;
new_val &= ~ESDHC_MIX_CTRL_AUTO_TUNE_EN;
}
writel(new_val , host->ioaddr + ESDHC_MIX_CTRL);
} else if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) {
if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) {
u32 v = readl(host->ioaddr + SDHCI_AUTO_CMD_STATUS);
u32 m = readl(host->ioaddr + ESDHC_MIX_CTRL);
if (val & SDHCI_CTRL_TUNED_CLK) {
@ -622,6 +673,7 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
v |= ESDHC_MIX_CTRL_EXE_TUNE;
m |= ESDHC_MIX_CTRL_FBCLK_SEL;
m |= ESDHC_MIX_CTRL_AUTO_TUNE_EN;
usdhc_auto_tuning_mode_sel(host);
} else {
v &= ~ESDHC_MIX_CTRL_EXE_TUNE;
}
@ -991,6 +1043,8 @@ static void esdhc_post_tuning(struct sdhci_host *host)
{
u32 reg;
usdhc_auto_tuning_mode_sel(host);
reg = readl(host->ioaddr + ESDHC_MIX_CTRL);
reg &= ~ESDHC_MIX_CTRL_EXE_TUNE;
reg |= ESDHC_MIX_CTRL_AUTO_TUNE_EN;

View File

@ -2714,6 +2714,9 @@ static int sdhci_msm_probe(struct platform_device *pdev)
msm_host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_NEED_RSP_BUSY;
/* Set the timeout value to max possible */
host->max_timeout_count = 0xF;
pm_runtime_get_noresume(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);

View File

@ -159,6 +159,12 @@ struct sdhci_arasan_data {
/* Controller immediately reports SDHCI_CLOCK_INT_STABLE after enabling the
* internal clock even when the clock isn't stable */
#define SDHCI_ARASAN_QUIRK_CLOCK_UNSTABLE BIT(1)
/*
* Some of the Arasan variations might not have timing requirements
* met at 25MHz for Default Speed mode, those controllers work at
* 19MHz instead
*/
#define SDHCI_ARASAN_QUIRK_CLOCK_25_BROKEN BIT(2)
};
struct sdhci_arasan_of_data {
@ -267,7 +273,12 @@ static void sdhci_arasan_set_clock(struct sdhci_host *host, unsigned int clock)
* through low speeds without power cycling.
*/
sdhci_set_clock(host, host->max_clk);
phy_power_on(sdhci_arasan->phy);
if (phy_power_on(sdhci_arasan->phy)) {
pr_err("%s: Cannot power on phy.\n",
mmc_hostname(host->mmc));
return;
}
sdhci_arasan->is_phy_on = true;
/*
@ -290,6 +301,16 @@ static void sdhci_arasan_set_clock(struct sdhci_host *host, unsigned int clock)
sdhci_arasan->is_phy_on = false;
}
if (sdhci_arasan->quirks & SDHCI_ARASAN_QUIRK_CLOCK_25_BROKEN) {
/*
* Some of the Arasan variations might not have timing
* requirements met at 25MHz for Default Speed mode,
* those controllers work at 19MHz instead.
*/
if (clock == DEFAULT_SPEED_MAX_DTR)
clock = (DEFAULT_SPEED_MAX_DTR * 19) / 25;
}
/* Set the Input and Output Clock Phase Delays */
if (clk_data->set_clk_delays)
clk_data->set_clk_delays(host);
@ -307,7 +328,12 @@ static void sdhci_arasan_set_clock(struct sdhci_host *host, unsigned int clock)
msleep(20);
if (ctrl_phy) {
phy_power_on(sdhci_arasan->phy);
if (phy_power_on(sdhci_arasan->phy)) {
pr_err("%s: Cannot power on phy.\n",
mmc_hostname(host->mmc));
return;
}
sdhci_arasan->is_phy_on = true;
}
}
@ -463,7 +489,9 @@ static int sdhci_arasan_suspend(struct device *dev)
ret = phy_power_off(sdhci_arasan->phy);
if (ret) {
dev_err(dev, "Cannot power off phy.\n");
sdhci_resume_host(host);
if (sdhci_resume_host(host))
dev_err(dev, "Cannot resume host.\n");
return ret;
}
sdhci_arasan->is_phy_on = false;
@ -878,6 +906,10 @@ static int arasan_zynqmp_execute_tuning(struct mmc_host *mmc, u32 opcode)
NODE_SD_1;
int err;
/* ZynqMP SD controller does not perform auto tuning in DDR50 mode */
if (mmc->ios.timing == MMC_TIMING_UHS_DDR50)
return 0;
arasan_zynqmp_dll_reset(host, device_id);
err = sdhci_execute_tuning(mmc, opcode);
@ -952,7 +984,7 @@ static void sdhci_arasan_update_baseclkfreq(struct sdhci_host *host)
struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host);
const struct sdhci_arasan_soc_ctl_map *soc_ctl_map =
sdhci_arasan->soc_ctl_map;
u32 mhz = DIV_ROUND_CLOSEST(clk_get_rate(pltfm_host->clk), 1000000);
u32 mhz = DIV_ROUND_CLOSEST_ULL(clk_get_rate(pltfm_host->clk), 1000000);
/* Having a map is optional */
if (!soc_ctl_map)
@ -986,14 +1018,16 @@ static void arasan_dt_read_clk_phase(struct device *dev,
{
struct device_node *np = dev->of_node;
int clk_phase[2] = {0};
u32 clk_phase[2] = {0};
int ret;
/*
* Read Tap Delay values from DT, if the DT does not contain the
* Tap Values then use the pre-defined values.
*/
if (of_property_read_variable_u32_array(np, prop, &clk_phase[0],
2, 0)) {
ret = of_property_read_variable_u32_array(np, prop, &clk_phase[0],
2, 0);
if (ret < 0) {
dev_dbg(dev, "Using predefined clock phase for %s = %d %d\n",
prop, clk_data->clk_phase_in[timing],
clk_data->clk_phase_out[timing]);
@ -1608,6 +1642,9 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
if (of_device_is_compatible(np, "xlnx,zynqmp-8.9a")) {
host->mmc_host_ops.execute_tuning =
arasan_zynqmp_execute_tuning;
sdhci_arasan->quirks |= SDHCI_ARASAN_QUIRK_CLOCK_25_BROKEN;
host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12;
}
arasan_dt_parse_clk_phases(dev, &sdhci_arasan->clk_data);

View File

@ -934,21 +934,21 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd,
/*
* If the host controller provides us with an incorrect timeout
* value, just skip the check and use 0xE. The hardware may take
* value, just skip the check and use the maximum. The hardware may take
* longer to time out, but that's much better than having a too-short
* timeout value.
*/
if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
return 0xE;
return host->max_timeout_count;
/* Unspecified command, asume max */
if (cmd == NULL)
return 0xE;
return host->max_timeout_count;
data = cmd->data;
/* Unspecified timeout, assume max */
if (!data && !cmd->busy_timeout)
return 0xE;
return host->max_timeout_count;
/* timeout in us */
target_timeout = sdhci_target_timeout(host, cmd, data);
@ -968,15 +968,15 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd,
while (current_timeout < target_timeout) {
count++;
current_timeout <<= 1;
if (count >= 0xF)
if (count > host->max_timeout_count)
break;
}
if (count >= 0xF) {
if (count > host->max_timeout_count) {
if (!(host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT))
DBG("Too large timeout 0x%x requested for CMD%d!\n",
count, cmd->opcode);
count = 0xE;
count = host->max_timeout_count;
} else {
*too_big = false;
}
@ -1222,6 +1222,7 @@ static int sdhci_external_dma_setup(struct sdhci_host *host,
if (!host->mapbase)
return -EINVAL;
memset(&cfg, 0, sizeof(cfg));
cfg.src_addr = host->mapbase + SDHCI_BUFFER;
cfg.dst_addr = host->mapbase + SDHCI_BUFFER;
cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
@ -3278,8 +3279,14 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
{
u32 command;
/* CMD19 generates _only_ Buffer Read Ready interrupt */
if (intmask & SDHCI_INT_DATA_AVAIL) {
/*
* CMD19 generates _only_ Buffer Read Ready interrupt if
* use sdhci_send_tuning.
* Need to exclude this case: PIO mode and use mmc_send_tuning,
* If not, sdhci_transfer_pio will never be called, make the
* SDHCI_INT_DATA_AVAIL always there, stuck in irq storm.
*/
if (intmask & SDHCI_INT_DATA_AVAIL && !host->data) {
command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
if (command == MMC_SEND_TUNING_BLOCK ||
command == MMC_SEND_TUNING_BLOCK_HS200) {
@ -3940,6 +3947,8 @@ struct sdhci_host *sdhci_alloc_host(struct device *dev,
*/
host->adma_table_cnt = SDHCI_MAX_SEGS * 2 + 1;
host->max_timeout_count = 0xE;
return host;
}

View File

@ -517,6 +517,7 @@ struct sdhci_host {
unsigned int max_clk; /* Max possible freq (MHz) */
unsigned int timeout_clk; /* Timeout freq (KHz) */
u8 max_timeout_count; /* Vendor specific max timeout count */
unsigned int clk_mul; /* Clock Muliplier value */
unsigned int clock; /* Current clock (MHz) */

View File

@ -1164,9 +1164,9 @@ static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host)
data->bytes_xfered = 0;
/* Abort DMA */
if (data->flags & MMC_DATA_READ)
dmaengine_terminate_all(host->chan_rx);
dmaengine_terminate_sync(host->chan_rx);
else
dmaengine_terminate_all(host->chan_tx);
dmaengine_terminate_sync(host->chan_tx);
}
return false;

View File

@ -669,8 +669,8 @@ static void tifm_sd_request(struct mmc_host *mmc, struct mmc_request *mrq)
if(1 != tifm_map_sg(sock, &host->bounce_buf, 1,
r_data->flags & MMC_DATA_WRITE
? PCI_DMA_TODEVICE
: PCI_DMA_FROMDEVICE)) {
? DMA_TO_DEVICE
: DMA_FROM_DEVICE)) {
pr_err("%s : scatterlist map failed\n",
dev_name(&sock->dev));
mrq->cmd->error = -ENOMEM;
@ -680,15 +680,15 @@ static void tifm_sd_request(struct mmc_host *mmc, struct mmc_request *mrq)
r_data->sg_len,
r_data->flags
& MMC_DATA_WRITE
? PCI_DMA_TODEVICE
: PCI_DMA_FROMDEVICE);
? DMA_TO_DEVICE
: DMA_FROM_DEVICE);
if (host->sg_len < 1) {
pr_err("%s : scatterlist map failed\n",
dev_name(&sock->dev));
tifm_unmap_sg(sock, &host->bounce_buf, 1,
r_data->flags & MMC_DATA_WRITE
? PCI_DMA_TODEVICE
: PCI_DMA_FROMDEVICE);
? DMA_TO_DEVICE
: DMA_FROM_DEVICE);
mrq->cmd->error = -ENOMEM;
goto err_out;
}
@ -762,10 +762,10 @@ static void tifm_sd_end_cmd(struct tasklet_struct *t)
} else {
tifm_unmap_sg(sock, &host->bounce_buf, 1,
(r_data->flags & MMC_DATA_WRITE)
? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
? DMA_TO_DEVICE : DMA_FROM_DEVICE);
tifm_unmap_sg(sock, r_data->sg, r_data->sg_len,
(r_data->flags & MMC_DATA_WRITE)
? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
? DMA_TO_DEVICE : DMA_FROM_DEVICE);
}
r_data->bytes_xfered = r_data->blocks

View File

@ -631,9 +631,9 @@ static void usdhi6_dma_kill(struct usdhi6_host *host)
__func__, data->sg_len, data->blocks, data->blksz);
/* Abort DMA */
if (data->flags & MMC_DATA_READ)
dmaengine_terminate_all(host->chan_rx);
dmaengine_terminate_sync(host->chan_rx);
else
dmaengine_terminate_all(host->chan_tx);
dmaengine_terminate_sync(host->chan_tx);
}
static void usdhi6_dma_check_error(struct usdhi6_host *host)
@ -1186,6 +1186,15 @@ static int usdhi6_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios)
return ret;
}
static int usdhi6_card_busy(struct mmc_host *mmc)
{
struct usdhi6_host *host = mmc_priv(mmc);
u32 tmp = usdhi6_read(host, USDHI6_SD_INFO2);
/* Card is busy if it is pulling dat[0] low */
return !(tmp & USDHI6_SD_INFO2_SDDAT0);
}
static const struct mmc_host_ops usdhi6_ops = {
.request = usdhi6_request,
.set_ios = usdhi6_set_ios,
@ -1193,6 +1202,7 @@ static const struct mmc_host_ops usdhi6_ops = {
.get_ro = usdhi6_get_ro,
.enable_sdio_irq = usdhi6_enable_sdio_irq,
.start_signal_voltage_switch = usdhi6_sig_volt_switch,
.card_busy = usdhi6_card_busy,
};
/* State machine handlers */

View File

@ -491,7 +491,7 @@ static void via_sdc_preparedata(struct via_crdr_mmc_host *host,
count = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
((data->flags & MMC_DATA_READ) ?
PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE));
DMA_FROM_DEVICE : DMA_TO_DEVICE));
BUG_ON(count != 1);
via_set_ddma(host, sg_dma_address(data->sg), sg_dma_len(data->sg),
@ -638,7 +638,7 @@ static void via_sdc_finish_data(struct via_crdr_mmc_host *host)
dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
((data->flags & MMC_DATA_READ) ?
PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE));
DMA_FROM_DEVICE : DMA_TO_DEVICE));
if (data->stop)
via_sdc_send_command(host, data->stop);

View File

@ -164,9 +164,8 @@ struct mmc_request {
int tag;
#ifdef CONFIG_MMC_CRYPTO
bool crypto_enabled;
const struct bio_crypt_ctx *crypto_ctx;
int crypto_key_slot;
u32 data_unit_num;
#endif
};

View File

@ -153,7 +153,7 @@ struct mmc_host_ops {
int (*start_signal_voltage_switch)(struct mmc_host *host, struct mmc_ios *ios);
/* Check if the card is pulling dat[0:3] low */
/* Check if the card is pulling dat[0] low */
int (*card_busy)(struct mmc_host *host);
/* The tuning command opcode value is different for SD and eMMC cards */

View File

@ -1,42 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2010 Wolfram Sang <kernel@pengutronix.de>
*/
#ifndef __ASM_ARCH_IMX_ESDHC_H
#define __ASM_ARCH_IMX_ESDHC_H
#include <linux/types.h>
enum wp_types {
ESDHC_WP_NONE, /* no WP, neither controller nor gpio */
ESDHC_WP_CONTROLLER, /* mmc controller internal WP */
ESDHC_WP_GPIO, /* external gpio pin for WP */
};
enum cd_types {
ESDHC_CD_NONE, /* no CD, neither controller nor gpio */
ESDHC_CD_CONTROLLER, /* mmc controller internal CD */
ESDHC_CD_GPIO, /* external gpio pin for CD */
ESDHC_CD_PERMANENT, /* no CD, card permanently wired to host */
};
/**
* struct esdhc_platform_data - platform data for esdhc on i.MX
*
* ESDHC_WP(CD)_CONTROLLER type is not available on i.MX25/35.
*
* @wp_type: type of write_protect method (see wp_types enum above)
* @cd_type: type of card_detect method (see cd_types enum above)
*/
struct esdhc_platform_data {
enum wp_types wp_type;
enum cd_types cd_type;
int max_bus_width;
unsigned int delay_line;
unsigned int tuning_step; /* The delay cell steps in tuning procedure */
unsigned int tuning_start_tap; /* The start delay cell point in tuning procedure */
unsigned int strobe_dll_delay_target; /* The delay cell for strobe pad (read clock) */
};
#endif /* __ASM_ARCH_IMX_ESDHC_H */