MMC core:
- Add support for host software queue for (e)MMC/SD - Throttle polling rate for CMD6 - Update CMD13 busy condition check for CMD6 commands - Improve busy detect polling for erase/trim/discard/HPI - Fixup support for HW busy detection for HPI commands - Re-work and improve support for eMMC sanitize commands MMC host: - mmci: Add support for sdmmc variant revision 2.0 - mmci_sdmmc: Improve support for busyend detection - mmci_sdmmc: Fixup support for signal voltage switch - mmci_sdmmc: Add support for tuning with delay block - mtk-sd: Fix another SDIO irq issue - sdhci: Disable native card detect when GPIO based type exist - sdhci: Add option to defer request completion - sdhci_am654: Add support to set a tap value per speed mode - sdhci-esdhc-imx: Add support for i.MX8MM based variant - sdhci-esdhc-imx: Fixup support for standard tuning on i.MX8 usdhc - sdhci-esdhc-imx: Optimize for strobe/clock dll settings - sdhci-esdhc-imx: Fixup support for system and runtime suspend/resume - sdhci-iproc: Update regulator/bus-voltage management for bcm2711 - sdhci-msm: Prevent clock gating with PWRSAVE_DLL on broken variants - sdhci-msm: Fix management of CQE during SDHCI reset - sdhci-of-arasan: Add support for auto tuning on ZynqMP based platforms - sdhci-omap: Add support for system suspend/resume - sdhci-sprd: Add support for HW busy detection - sdhci-sprd: Enable support host software queue - sdhci-tegra: Add support for HW busy detection - tmio/renesas_sdhi: Enforce retune after runtime suspend - renesas_sdhi: Use manual tap correction for HS400 on some variants - renesas_sdhi: Add support for manual correction of tap values for tunings -----BEGIN PGP SIGNATURE----- iQJLBAABCgA1FiEEugLDXPmKSktSkQsV/iaEJXNYjCkFAl6CGT8XHHVsZi5oYW5z c29uQGxpbmFyby5vcmcACgkQ/iaEJXNYjClFWg/+LzX09vHBOfAu7hT/RokcTaBT uQnSAfmhkBI+CZerVulPjDX9lFpG2Jb/fu44Ae9EqOAOESAgsTJpxywRRO2f+aNL ie9mc0WOkmz1wuAbqYPJImES0CIL2WNpivovLgquRWyltbneh+ImkCbqoWmDYff7 uIuIC4EPhrWYJczdKr5RCw6HVbsNEAgAr6oJEbmzC63HciCPx5Zo99FN5WHoyRnf 3c3Ehc4wkVy5iu/wlXqmRdvuayDHhAAmVq6FP5J3IfuoeES3EYeKHc2Ej+pwhYi9 IFCrO8RDKEu3/o5hLp60ShhF7N/LGWYsl+5KfrwOQ6YPyMLYawR6L0iTYSqkQijy 3admTGD4OGFuN/8DvQb0yUwhSpRm/Dj+jBZTP3uk9FJHteFlLNHnzREk7weo8i/R 2WNDSbbV3+TudfC0uC4ipsHtDoidyds+TvR/ebO53pH2Dcr/z6h7i+1tKczA2rK4 x9mqXhOsskNZC26/UBb9K2oElRON4XDv+VZdQI5ddDuabIYIswXMWLYD1TGYoX5z 1PXSrrj/Jl/Sz65ZpabKJOexa24s2uThvpOnrGCy2aDc/tbDpcvVhKwL6NX9iRK0 yYKpwy9yWCGMryVfLI+ahJpvJfQDY4ufKpLC2429LVvgFvNZDG233ZcZhdlhoLNG nWh9qHTGTPWo/213yx0= =gILc -----END PGP SIGNATURE----- Merge tag 'mmc-v5.7' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc Pull MMC updates from Ulf Hansson: "MMC core: - Add support for host software queue for (e)MMC/SD - Throttle polling rate for CMD6 - Update CMD13 busy condition check for CMD6 commands - Improve busy detect polling for erase/trim/discard/HPI - Fixup support for HW busy detection for HPI commands - Re-work and improve support for eMMC sanitize commands MMC host: - mmci: * Add support for sdmmc variant revision 2.0 - mmci_sdmmc: * Improve support for busyend detection * Fixup support for signal voltage switch * Add support for tuning with delay block - mtk-sd: * Fix another SDIO irq issue - sdhci: * Disable native card detect when GPIO based type exist - sdhci: * Add option to defer request completion - sdhci_am654: * Add support to set a tap value per speed mode - sdhci-esdhc-imx: * Add support for i.MX8MM based variant * Fixup support for standard tuning on i.MX8 usdhc * Optimize for strobe/clock dll settings * Fixup support for system and runtime suspend/resume - sdhci-iproc: * Update regulator/bus-voltage management for bcm2711 - sdhci-msm: * Prevent clock gating with PWRSAVE_DLL on broken variants * Fix management of CQE during SDHCI reset - sdhci-of-arasan: * Add support for auto tuning on ZynqMP based platforms - sdhci-omap: * Add support for system suspend/resume - sdhci-sprd: * Add support for HW busy detection * Enable support host software queue - sdhci-tegra: * Add support for HW busy detection - tmio/renesas_sdhi: * Enforce retune after runtime suspend - renesas_sdhi: * Use manual tap correction for HS400 on some variants * Add support for manual correction of tap values for tunings" * tag 'mmc-v5.7' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc: (86 commits) mmc: cavium-octeon: remove nonsense variable coercion mmc: mediatek: fix SDIO irq issue mmc: mmci_sdmmc: Fix clear busyd0end irq flag dt-bindings: mmc: Fix node name in an example mmc: core: Re-work the code for eMMC sanitize mmc: sdhci: use FIELD_GET for preset value bit masks mmc: sdhci-of-at91: Display clock changes for debug purpose only mmc: sdhci: iproc: Add custom set_power() callback for bcm2711 mmc: sdhci: am654: Use sdhci_set_power_and_voltage() mmc: sdhci: at91: Use sdhci_set_power_and_voltage() mmc: sdhci: milbeaut: Use sdhci_set_power_and_voltage() mmc: sdhci: arasan: Use sdhci_set_power_and_voltage() mmc: sdhci: Introduce sdhci_set_power_and_bus_voltage() mmc: vub300: Use scnprintf() for avoiding potential buffer overflow dt-bindings: mmc: synopsys-dw-mshc: fix clock-freq-min-max in example sdhci: tegra: Enable MMC_CAP_WAIT_WHILE_BUSY host capability sdhci: tegra: Implement Tegra specific set_timeout callback mmc: sdhci-omap: Add Support for Suspend/Resume mmc: renesas_sdhi: simplify execute_tuning mmc: renesas_sdhi: Use BITS_PER_LONG helper ...
This commit is contained in:
commit
dfabb077d6
|
@ -43,6 +43,11 @@ Optional properties:
|
|||
This property allows user to change the tuning step to more than one delay
|
||||
cells which is useful for some special boards or cards when the default
|
||||
tuning step can't find the proper delay window within limited tuning retries.
|
||||
- fsl,strobe-dll-delay-target: Specify the strobe dll control slave delay target.
|
||||
This delay target programming host controller loopback read clock, and this
|
||||
property allows user to change the delay target for the strobe input read clock.
|
||||
If not use this property, driver default set the delay target to value 7.
|
||||
Only eMMC HS400 mode need to take care of this property.
|
||||
|
||||
Examples:
|
||||
|
||||
|
|
|
@ -351,7 +351,7 @@ dependencies:
|
|||
|
||||
examples:
|
||||
- |
|
||||
sdhci@ab000000 {
|
||||
mmc@ab000000 {
|
||||
compatible = "sdhci";
|
||||
reg = <0xab000000 0x200>;
|
||||
interrupts = <23>;
|
||||
|
|
|
@ -28,6 +28,8 @@ specific for ux500 variant:
|
|||
- st,sig-pin-fbclk : feedback clock signal pin used.
|
||||
|
||||
specific for sdmmc variant:
|
||||
- reg : a second base register may be defined if a delay
|
||||
block is present and used for tuning.
|
||||
- st,sig-dir : signal direction polarity used for cmd, dat0 dat123.
|
||||
- st,neg-edge : data & command phase relation, generated on
|
||||
sd clock falling edge.
|
||||
|
|
|
@ -18,7 +18,20 @@ Required Properties:
|
|||
- clocks: Handles to the clock inputs.
|
||||
- clock-names: Tuple including "clk_xin" and "clk_ahb"
|
||||
- interrupts: Interrupt specifiers
|
||||
- ti,otap-del-sel: Output Tap Delay select
|
||||
Output tap delay for each speed mode:
|
||||
- ti,otap-del-sel-legacy
|
||||
- ti,otap-del-sel-mmc-hs
|
||||
- ti,otap-del-sel-sd-hs
|
||||
- ti,otap-del-sel-sdr12
|
||||
- ti,otap-del-sel-sdr25
|
||||
- ti,otap-del-sel-sdr50
|
||||
- ti,otap-del-sel-sdr104
|
||||
- ti,otap-del-sel-ddr50
|
||||
- ti,otap-del-sel-ddr52
|
||||
- ti,otap-del-sel-hs200
|
||||
- ti,otap-del-sel-hs400
|
||||
These bindings must be provided otherwise the driver will disable the
|
||||
corresponding speed mode (i.e. all nodes must provide at least -legacy)
|
||||
|
||||
Optional Properties (Required for ti,am654-sdhci-5.1 and ti,j721e-sdhci-8bit):
|
||||
- ti,trm-icp: DLL trim select
|
||||
|
@ -38,6 +51,10 @@ Example:
|
|||
interrupts = <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>;
|
||||
sdhci-caps-mask = <0x80000007 0x0>;
|
||||
mmc-ddr-1_8v;
|
||||
ti,otap-del-sel = <0x2>;
|
||||
ti,otap-del-sel-legacy = <0x0>;
|
||||
ti,otap-del-sel-mmc-hs = <0x0>;
|
||||
ti,otap-del-sel-ddr52 = <0x5>;
|
||||
ti,otap-del-sel-hs200 = <0x5>;
|
||||
ti,otap-del-sel-hs400 = <0x0>;
|
||||
ti,trm-icp = <0x8>;
|
||||
};
|
||||
|
|
|
@ -26,7 +26,13 @@ Required properties:
|
|||
|
||||
- reg: Base address and length of the register in the following order:
|
||||
- Host controller register map (required)
|
||||
- SD Core register map (required for msm-v4 and below)
|
||||
- SD Core register map (required for controllers earlier than msm-v5)
|
||||
- CQE register map (Optional, CQE support is present on SDHC instance meant
|
||||
for eMMC and version v4.2 and above)
|
||||
- reg-names: When CQE register map is supplied, below reg-names are required
|
||||
- "hc" for Host controller register map
|
||||
- "core" for SD core register map
|
||||
- "cqhci" for CQE register map
|
||||
- interrupts: Should contain an interrupt-specifiers for the interrupts:
|
||||
- Host controller interrupt (required)
|
||||
- pinctrl-names: Should contain only one value - "default".
|
||||
|
|
|
@ -62,7 +62,7 @@ examples:
|
|||
cap-mmc-highspeed;
|
||||
cap-sd-highspeed;
|
||||
card-detect-delay = <200>;
|
||||
clock-freq-min-max = <400000 200000000>;
|
||||
max-frequency = <200000000>;
|
||||
clock-frequency = <400000000>;
|
||||
data-addr = <0x200>;
|
||||
fifo-depth = <0x80>;
|
||||
|
|
|
@ -512,6 +512,8 @@ static int zynqmp_pm_clock_getparent(u32 clock_id, u32 *parent_id)
|
|||
static inline int zynqmp_is_valid_ioctl(u32 ioctl_id)
|
||||
{
|
||||
switch (ioctl_id) {
|
||||
case IOCTL_SD_DLL_RESET:
|
||||
case IOCTL_SET_SD_TAPDELAY:
|
||||
case IOCTL_SET_PLL_FRAC_MODE:
|
||||
case IOCTL_GET_PLL_FRAC_MODE:
|
||||
case IOCTL_SET_PLL_FRAC_DATA:
|
||||
|
|
|
@ -70,7 +70,6 @@ MODULE_ALIAS("mmc:block");
|
|||
* ample.
|
||||
*/
|
||||
#define MMC_BLK_TIMEOUT_MS (10 * 1000)
|
||||
#define MMC_SANITIZE_REQ_TIMEOUT 240000
|
||||
#define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16)
|
||||
#define MMC_EXTRACT_VALUE_FROM_ARG(x) ((x & 0x0000FF00) >> 8)
|
||||
|
||||
|
@ -168,6 +167,11 @@ MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
|
|||
|
||||
static inline int mmc_blk_part_switch(struct mmc_card *card,
|
||||
unsigned int part_type);
|
||||
static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
|
||||
struct mmc_card *card,
|
||||
int disable_multi,
|
||||
struct mmc_queue *mq);
|
||||
static void mmc_blk_hsq_req_done(struct mmc_request *mrq);
|
||||
|
||||
static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
|
||||
{
|
||||
|
@ -408,44 +412,6 @@ static int mmc_blk_ioctl_copy_to_user(struct mmc_ioc_cmd __user *ic_ptr,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int ioctl_do_sanitize(struct mmc_card *card)
|
||||
{
|
||||
int err;
|
||||
|
||||
if (!mmc_can_sanitize(card)) {
|
||||
pr_warn("%s: %s - SANITIZE is not supported\n",
|
||||
mmc_hostname(card->host), __func__);
|
||||
err = -EOPNOTSUPP;
|
||||
goto out;
|
||||
}
|
||||
|
||||
pr_debug("%s: %s - SANITIZE IN PROGRESS...\n",
|
||||
mmc_hostname(card->host), __func__);
|
||||
|
||||
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
|
||||
EXT_CSD_SANITIZE_START, 1,
|
||||
MMC_SANITIZE_REQ_TIMEOUT);
|
||||
|
||||
if (err)
|
||||
pr_err("%s: %s - EXT_CSD_SANITIZE_START failed. err=%d\n",
|
||||
mmc_hostname(card->host), __func__, err);
|
||||
|
||||
pr_debug("%s: %s - SANITIZE COMPLETED\n", mmc_hostname(card->host),
|
||||
__func__);
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static inline bool mmc_blk_in_tran_state(u32 status)
|
||||
{
|
||||
/*
|
||||
* Some cards mishandle the status bits, so make sure to check both the
|
||||
* busy indication and the card state.
|
||||
*/
|
||||
return status & R1_READY_FOR_DATA &&
|
||||
(R1_CURRENT_STATE(status) == R1_STATE_TRAN);
|
||||
}
|
||||
|
||||
static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
|
||||
u32 *resp_errs)
|
||||
{
|
||||
|
@ -477,13 +443,7 @@ static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
|
|||
__func__, status);
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
/*
|
||||
* Some cards mishandle the status bits,
|
||||
* so make sure to check both the busy
|
||||
* indication and the card state.
|
||||
*/
|
||||
} while (!mmc_blk_in_tran_state(status));
|
||||
} while (!mmc_ready_for_data(status));
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -580,15 +540,8 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
|
|||
}
|
||||
|
||||
if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) &&
|
||||
(cmd.opcode == MMC_SWITCH)) {
|
||||
err = ioctl_do_sanitize(card);
|
||||
|
||||
if (err)
|
||||
pr_err("%s: ioctl_do_sanitize() failed. err = %d",
|
||||
__func__, err);
|
||||
|
||||
return err;
|
||||
}
|
||||
(cmd.opcode == MMC_SWITCH))
|
||||
return mmc_sanitize(card);
|
||||
|
||||
mmc_wait_for_req(card->host, &mrq);
|
||||
|
||||
|
@ -1532,9 +1485,30 @@ static int mmc_blk_cqe_issue_flush(struct mmc_queue *mq, struct request *req)
|
|||
return mmc_blk_cqe_start_req(mq->card->host, mrq);
|
||||
}
|
||||
|
||||
static int mmc_blk_hsq_issue_rw_rq(struct mmc_queue *mq, struct request *req)
|
||||
{
|
||||
struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
|
||||
struct mmc_host *host = mq->card->host;
|
||||
int err;
|
||||
|
||||
mmc_blk_rw_rq_prep(mqrq, mq->card, 0, mq);
|
||||
mqrq->brq.mrq.done = mmc_blk_hsq_req_done;
|
||||
mmc_pre_req(host, &mqrq->brq.mrq);
|
||||
|
||||
err = mmc_cqe_start_req(host, &mqrq->brq.mrq);
|
||||
if (err)
|
||||
mmc_post_req(host, &mqrq->brq.mrq, err);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int mmc_blk_cqe_issue_rw_rq(struct mmc_queue *mq, struct request *req)
|
||||
{
|
||||
struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
|
||||
struct mmc_host *host = mq->card->host;
|
||||
|
||||
if (host->hsq_enabled)
|
||||
return mmc_blk_hsq_issue_rw_rq(mq, req);
|
||||
|
||||
mmc_blk_data_prep(mq, mqrq, 0, NULL, NULL);
|
||||
|
||||
|
@ -1666,7 +1640,7 @@ static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req)
|
|||
goto error_exit;
|
||||
|
||||
if (!mmc_host_is_spi(host) &&
|
||||
!mmc_blk_in_tran_state(status)) {
|
||||
!mmc_ready_for_data(status)) {
|
||||
err = mmc_blk_fix_state(card, req);
|
||||
if (err)
|
||||
goto error_exit;
|
||||
|
@ -1726,7 +1700,7 @@ static bool mmc_blk_status_error(struct request *req, u32 status)
|
|||
return brq->cmd.resp[0] & CMD_ERRORS ||
|
||||
brq->stop.resp[0] & stop_err_bits ||
|
||||
status & stop_err_bits ||
|
||||
(rq_data_dir(req) == WRITE && !mmc_blk_in_tran_state(status));
|
||||
(rq_data_dir(req) == WRITE && !mmc_ready_for_data(status));
|
||||
}
|
||||
|
||||
static inline bool mmc_blk_cmd_started(struct mmc_blk_request *brq)
|
||||
|
@ -1788,7 +1762,7 @@ static void mmc_blk_mq_rw_recovery(struct mmc_queue *mq, struct request *req)
|
|||
|
||||
/* Try to get back to "tran" state */
|
||||
if (!mmc_host_is_spi(mq->card->host) &&
|
||||
(err || !mmc_blk_in_tran_state(status)))
|
||||
(err || !mmc_ready_for_data(status)))
|
||||
err = mmc_blk_fix_state(mq->card, req);
|
||||
|
||||
/*
|
||||
|
@ -1920,6 +1894,41 @@ static void mmc_blk_urgent_bkops(struct mmc_queue *mq,
|
|||
mmc_run_bkops(mq->card);
|
||||
}
|
||||
|
||||
static void mmc_blk_hsq_req_done(struct mmc_request *mrq)
|
||||
{
|
||||
struct mmc_queue_req *mqrq =
|
||||
container_of(mrq, struct mmc_queue_req, brq.mrq);
|
||||
struct request *req = mmc_queue_req_to_req(mqrq);
|
||||
struct request_queue *q = req->q;
|
||||
struct mmc_queue *mq = q->queuedata;
|
||||
struct mmc_host *host = mq->card->host;
|
||||
unsigned long flags;
|
||||
|
||||
if (mmc_blk_rq_error(&mqrq->brq) ||
|
||||
mmc_blk_urgent_bkops_needed(mq, mqrq)) {
|
||||
spin_lock_irqsave(&mq->lock, flags);
|
||||
mq->recovery_needed = true;
|
||||
mq->recovery_req = req;
|
||||
spin_unlock_irqrestore(&mq->lock, flags);
|
||||
|
||||
host->cqe_ops->cqe_recovery_start(host);
|
||||
|
||||
schedule_work(&mq->recovery_work);
|
||||
return;
|
||||
}
|
||||
|
||||
mmc_blk_rw_reset_success(mq, req);
|
||||
|
||||
/*
|
||||
* Block layer timeouts race with completions which means the normal
|
||||
* completion path cannot be used during recovery.
|
||||
*/
|
||||
if (mq->in_recovery)
|
||||
mmc_blk_cqe_complete_rq(mq, req);
|
||||
else
|
||||
blk_mq_complete_request(req);
|
||||
}
|
||||
|
||||
void mmc_blk_mq_complete(struct request *req)
|
||||
{
|
||||
struct mmc_queue *mq = req->q->queuedata;
|
||||
|
|
|
@ -403,23 +403,6 @@ void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq)
|
|||
|
||||
cmd = mrq->cmd;
|
||||
|
||||
/*
|
||||
* If host has timed out waiting for the sanitize
|
||||
* to complete, card might be still in programming state
|
||||
* so let's try to bring the card out of programming
|
||||
* state.
|
||||
*/
|
||||
if (cmd->sanitize_busy && cmd->error == -ETIMEDOUT) {
|
||||
if (!mmc_interrupt_hpi(host->card)) {
|
||||
pr_warn("%s: %s: Interrupted sanitize\n",
|
||||
mmc_hostname(host), __func__);
|
||||
cmd->error = 0;
|
||||
break;
|
||||
} else {
|
||||
pr_err("%s: %s: Failed to interrupt sanitize\n",
|
||||
mmc_hostname(host), __func__);
|
||||
}
|
||||
}
|
||||
if (!cmd->error || !cmd->retries ||
|
||||
mmc_card_removed(host->card))
|
||||
break;
|
||||
|
@ -1658,8 +1641,6 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from,
|
|||
struct mmc_command cmd = {};
|
||||
unsigned int qty = 0, busy_timeout = 0;
|
||||
bool use_r1b_resp = false;
|
||||
unsigned long timeout;
|
||||
int loop_udelay=64, udelay_max=32768;
|
||||
int err;
|
||||
|
||||
mmc_retune_hold(card->host);
|
||||
|
@ -1763,38 +1744,8 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from,
|
|||
if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
|
||||
goto out;
|
||||
|
||||
timeout = jiffies + msecs_to_jiffies(busy_timeout);
|
||||
do {
|
||||
memset(&cmd, 0, sizeof(struct mmc_command));
|
||||
cmd.opcode = MMC_SEND_STATUS;
|
||||
cmd.arg = card->rca << 16;
|
||||
cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
|
||||
/* Do not retry else we can't see errors */
|
||||
err = mmc_wait_for_cmd(card->host, &cmd, 0);
|
||||
if (err || R1_STATUS(cmd.resp[0])) {
|
||||
pr_err("error %d requesting status %#x\n",
|
||||
err, cmd.resp[0]);
|
||||
err = -EIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Timeout if the device never becomes ready for data and
|
||||
* never leaves the program state.
|
||||
*/
|
||||
if (time_after(jiffies, timeout)) {
|
||||
pr_err("%s: Card stuck in programming state! %s\n",
|
||||
mmc_hostname(card->host), __func__);
|
||||
err = -EIO;
|
||||
goto out;
|
||||
}
|
||||
if ((cmd.resp[0] & R1_READY_FOR_DATA) &&
|
||||
R1_CURRENT_STATE(cmd.resp[0]) != R1_STATE_PRG)
|
||||
break;
|
||||
|
||||
usleep_range(loop_udelay, loop_udelay*2);
|
||||
if (loop_udelay < udelay_max)
|
||||
loop_udelay *= 2;
|
||||
} while (1);
|
||||
/* Let's poll to find out when the erase operation completes. */
|
||||
err = mmc_poll_for_busy(card, busy_timeout, MMC_BUSY_ERASE);
|
||||
|
||||
out:
|
||||
mmc_retune_release(card->host);
|
||||
|
@ -1957,7 +1908,6 @@ int mmc_can_sanitize(struct mmc_card *card)
|
|||
return 1;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(mmc_can_sanitize);
|
||||
|
||||
int mmc_can_secure_erase_trim(struct mmc_card *card)
|
||||
{
|
||||
|
|
|
@ -1055,7 +1055,7 @@ static int mmc_select_hs(struct mmc_card *card)
|
|||
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
|
||||
EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS,
|
||||
card->ext_csd.generic_cmd6_time, MMC_TIMING_MMC_HS,
|
||||
true, true, true);
|
||||
true, true);
|
||||
if (err)
|
||||
pr_warn("%s: switch to high-speed failed, err:%d\n",
|
||||
mmc_hostname(card->host), err);
|
||||
|
@ -1087,7 +1087,7 @@ static int mmc_select_hs_ddr(struct mmc_card *card)
|
|||
ext_csd_bits,
|
||||
card->ext_csd.generic_cmd6_time,
|
||||
MMC_TIMING_MMC_DDR52,
|
||||
true, true, true);
|
||||
true, true);
|
||||
if (err) {
|
||||
pr_err("%s: switch to bus width %d ddr failed\n",
|
||||
mmc_hostname(host), 1 << bus_width);
|
||||
|
@ -1155,7 +1155,7 @@ static int mmc_select_hs400(struct mmc_card *card)
|
|||
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
|
||||
EXT_CSD_HS_TIMING, val,
|
||||
card->ext_csd.generic_cmd6_time, 0,
|
||||
true, false, true);
|
||||
false, true);
|
||||
if (err) {
|
||||
pr_err("%s: switch to high-speed from hs200 failed, err:%d\n",
|
||||
mmc_hostname(host), err);
|
||||
|
@ -1173,7 +1173,7 @@ static int mmc_select_hs400(struct mmc_card *card)
|
|||
max_dtr = card->ext_csd.hs_max_dtr;
|
||||
mmc_set_clock(host, max_dtr);
|
||||
|
||||
err = mmc_switch_status(card);
|
||||
err = mmc_switch_status(card, true);
|
||||
if (err)
|
||||
goto out_err;
|
||||
|
||||
|
@ -1197,7 +1197,7 @@ static int mmc_select_hs400(struct mmc_card *card)
|
|||
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
|
||||
EXT_CSD_HS_TIMING, val,
|
||||
card->ext_csd.generic_cmd6_time, 0,
|
||||
true, false, true);
|
||||
false, true);
|
||||
if (err) {
|
||||
pr_err("%s: switch to hs400 failed, err:%d\n",
|
||||
mmc_hostname(host), err);
|
||||
|
@ -1211,7 +1211,7 @@ static int mmc_select_hs400(struct mmc_card *card)
|
|||
if (host->ops->hs400_complete)
|
||||
host->ops->hs400_complete(host);
|
||||
|
||||
err = mmc_switch_status(card);
|
||||
err = mmc_switch_status(card, true);
|
||||
if (err)
|
||||
goto out_err;
|
||||
|
||||
|
@ -1243,20 +1243,20 @@ int mmc_hs400_to_hs200(struct mmc_card *card)
|
|||
val = EXT_CSD_TIMING_HS;
|
||||
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
|
||||
val, card->ext_csd.generic_cmd6_time, 0,
|
||||
true, false, true);
|
||||
false, true);
|
||||
if (err)
|
||||
goto out_err;
|
||||
|
||||
mmc_set_timing(host, MMC_TIMING_MMC_DDR52);
|
||||
|
||||
err = mmc_switch_status(card);
|
||||
err = mmc_switch_status(card, true);
|
||||
if (err)
|
||||
goto out_err;
|
||||
|
||||
/* Switch HS DDR to HS */
|
||||
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
|
||||
EXT_CSD_BUS_WIDTH_8, card->ext_csd.generic_cmd6_time,
|
||||
0, true, false, true);
|
||||
0, false, true);
|
||||
if (err)
|
||||
goto out_err;
|
||||
|
||||
|
@ -1265,7 +1265,7 @@ int mmc_hs400_to_hs200(struct mmc_card *card)
|
|||
if (host->ops->hs400_downgrade)
|
||||
host->ops->hs400_downgrade(host);
|
||||
|
||||
err = mmc_switch_status(card);
|
||||
err = mmc_switch_status(card, true);
|
||||
if (err)
|
||||
goto out_err;
|
||||
|
||||
|
@ -1274,7 +1274,7 @@ int mmc_hs400_to_hs200(struct mmc_card *card)
|
|||
card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
|
||||
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
|
||||
val, card->ext_csd.generic_cmd6_time, 0,
|
||||
true, false, true);
|
||||
false, true);
|
||||
if (err)
|
||||
goto out_err;
|
||||
|
||||
|
@ -1285,7 +1285,7 @@ int mmc_hs400_to_hs200(struct mmc_card *card)
|
|||
* failed. If there really is a problem, we would expect tuning will
|
||||
* fail and the result ends up the same.
|
||||
*/
|
||||
err = __mmc_switch_status(card, false);
|
||||
err = mmc_switch_status(card, false);
|
||||
if (err)
|
||||
goto out_err;
|
||||
|
||||
|
@ -1358,7 +1358,7 @@ static int mmc_select_hs400es(struct mmc_card *card)
|
|||
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
|
||||
EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS,
|
||||
card->ext_csd.generic_cmd6_time, 0,
|
||||
true, false, true);
|
||||
false, true);
|
||||
if (err) {
|
||||
pr_err("%s: switch to hs for hs400es failed, err:%d\n",
|
||||
mmc_hostname(host), err);
|
||||
|
@ -1366,7 +1366,7 @@ static int mmc_select_hs400es(struct mmc_card *card)
|
|||
}
|
||||
|
||||
mmc_set_timing(host, MMC_TIMING_MMC_HS);
|
||||
err = mmc_switch_status(card);
|
||||
err = mmc_switch_status(card, true);
|
||||
if (err)
|
||||
goto out_err;
|
||||
|
||||
|
@ -1392,7 +1392,7 @@ static int mmc_select_hs400es(struct mmc_card *card)
|
|||
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
|
||||
EXT_CSD_HS_TIMING, val,
|
||||
card->ext_csd.generic_cmd6_time, 0,
|
||||
true, false, true);
|
||||
false, true);
|
||||
if (err) {
|
||||
pr_err("%s: switch to hs400es failed, err:%d\n",
|
||||
mmc_hostname(host), err);
|
||||
|
@ -1407,7 +1407,7 @@ static int mmc_select_hs400es(struct mmc_card *card)
|
|||
if (host->ops->hs400_enhanced_strobe)
|
||||
host->ops->hs400_enhanced_strobe(host, &host->ios);
|
||||
|
||||
err = mmc_switch_status(card);
|
||||
err = mmc_switch_status(card, true);
|
||||
if (err)
|
||||
goto out_err;
|
||||
|
||||
|
@ -1457,7 +1457,7 @@ static int mmc_select_hs200(struct mmc_card *card)
|
|||
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
|
||||
EXT_CSD_HS_TIMING, val,
|
||||
card->ext_csd.generic_cmd6_time, 0,
|
||||
true, false, true);
|
||||
false, true);
|
||||
if (err)
|
||||
goto err;
|
||||
old_timing = host->ios.timing;
|
||||
|
@ -1468,7 +1468,7 @@ static int mmc_select_hs200(struct mmc_card *card)
|
|||
* switch failed. If there really is a problem, we would expect
|
||||
* tuning will fail and the result ends up the same.
|
||||
*/
|
||||
err = __mmc_switch_status(card, false);
|
||||
err = mmc_switch_status(card, false);
|
||||
|
||||
/*
|
||||
* mmc_select_timing() assumes timing has not changed if
|
||||
|
@ -1851,15 +1851,19 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
|
|||
*/
|
||||
card->reenable_cmdq = card->ext_csd.cmdq_en;
|
||||
|
||||
if (card->ext_csd.cmdq_en && !host->cqe_enabled) {
|
||||
if (host->cqe_ops && !host->cqe_enabled) {
|
||||
err = host->cqe_ops->cqe_enable(host, card);
|
||||
if (err) {
|
||||
pr_err("%s: Failed to enable CQE, error %d\n",
|
||||
mmc_hostname(host), err);
|
||||
} else {
|
||||
if (!err) {
|
||||
host->cqe_enabled = true;
|
||||
pr_info("%s: Command Queue Engine enabled\n",
|
||||
mmc_hostname(host));
|
||||
|
||||
if (card->ext_csd.cmdq_en) {
|
||||
pr_info("%s: Command Queue Engine enabled\n",
|
||||
mmc_hostname(host));
|
||||
} else {
|
||||
host->hsq_enabled = true;
|
||||
pr_info("%s: Host Software Queue enabled\n",
|
||||
mmc_hostname(host));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1958,7 +1962,7 @@ static int mmc_poweroff_notify(struct mmc_card *card, unsigned int notify_type)
|
|||
|
||||
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
|
||||
EXT_CSD_POWER_OFF_NOTIFICATION,
|
||||
notify_type, timeout, 0, true, false, false);
|
||||
notify_type, timeout, 0, false, false);
|
||||
if (err)
|
||||
pr_err("%s: Power Off Notification timed out, %u\n",
|
||||
mmc_hostname(card->host), timeout);
|
||||
|
|
|
@ -19,9 +19,9 @@
|
|||
#include "host.h"
|
||||
#include "mmc_ops.h"
|
||||
|
||||
#define MMC_OPS_TIMEOUT_MS (10 * 60 * 1000) /* 10min*/
|
||||
#define MMC_BKOPS_TIMEOUT_MS (120 * 1000) /* 120s */
|
||||
#define MMC_CACHE_FLUSH_TIMEOUT_MS (30 * 1000) /* 30s */
|
||||
#define MMC_SANITIZE_TIMEOUT_MS (240 * 1000) /* 240s */
|
||||
|
||||
static const u8 tuning_blk_pattern_4bit[] = {
|
||||
0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
|
||||
|
@ -431,7 +431,7 @@ static int mmc_switch_status_error(struct mmc_host *host, u32 status)
|
|||
}
|
||||
|
||||
/* Caller must hold re-tuning */
|
||||
int __mmc_switch_status(struct mmc_card *card, bool crc_err_fatal)
|
||||
int mmc_switch_status(struct mmc_card *card, bool crc_err_fatal)
|
||||
{
|
||||
u32 status;
|
||||
int err;
|
||||
|
@ -445,18 +445,54 @@ int __mmc_switch_status(struct mmc_card *card, bool crc_err_fatal)
|
|||
return mmc_switch_status_error(card->host, status);
|
||||
}
|
||||
|
||||
int mmc_switch_status(struct mmc_card *card)
|
||||
static int mmc_busy_status(struct mmc_card *card, bool retry_crc_err,
|
||||
enum mmc_busy_cmd busy_cmd, bool *busy)
|
||||
{
|
||||
return __mmc_switch_status(card, true);
|
||||
struct mmc_host *host = card->host;
|
||||
u32 status = 0;
|
||||
int err;
|
||||
|
||||
if (host->ops->card_busy) {
|
||||
*busy = host->ops->card_busy(host);
|
||||
return 0;
|
||||
}
|
||||
|
||||
err = mmc_send_status(card, &status);
|
||||
if (retry_crc_err && err == -EILSEQ) {
|
||||
*busy = true;
|
||||
return 0;
|
||||
}
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
switch (busy_cmd) {
|
||||
case MMC_BUSY_CMD6:
|
||||
err = mmc_switch_status_error(card->host, status);
|
||||
break;
|
||||
case MMC_BUSY_ERASE:
|
||||
err = R1_STATUS(status) ? -EIO : 0;
|
||||
break;
|
||||
case MMC_BUSY_HPI:
|
||||
break;
|
||||
default:
|
||||
err = -EINVAL;
|
||||
}
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
*busy = !mmc_ready_for_data(status);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
|
||||
bool send_status, bool retry_crc_err)
|
||||
static int __mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
|
||||
bool send_status, bool retry_crc_err,
|
||||
enum mmc_busy_cmd busy_cmd)
|
||||
{
|
||||
struct mmc_host *host = card->host;
|
||||
int err;
|
||||
unsigned long timeout;
|
||||
u32 status = 0;
|
||||
unsigned int udelay = 32, udelay_max = 32768;
|
||||
bool expired = false;
|
||||
bool busy = false;
|
||||
|
||||
|
@ -478,21 +514,9 @@ static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
|
|||
*/
|
||||
expired = time_after(jiffies, timeout);
|
||||
|
||||
if (host->ops->card_busy) {
|
||||
busy = host->ops->card_busy(host);
|
||||
} else {
|
||||
err = mmc_send_status(card, &status);
|
||||
if (retry_crc_err && err == -EILSEQ) {
|
||||
busy = true;
|
||||
} else if (err) {
|
||||
return err;
|
||||
} else {
|
||||
err = mmc_switch_status_error(host, status);
|
||||
if (err)
|
||||
return err;
|
||||
busy = R1_CURRENT_STATE(status) == R1_STATE_PRG;
|
||||
}
|
||||
}
|
||||
err = mmc_busy_status(card, retry_crc_err, busy_cmd, &busy);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* Timeout if the device still remains busy. */
|
||||
if (expired && busy) {
|
||||
|
@ -500,11 +524,24 @@ static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
|
|||
mmc_hostname(host), __func__);
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
/* Throttle the polling rate to avoid hogging the CPU. */
|
||||
if (busy) {
|
||||
usleep_range(udelay, udelay * 2);
|
||||
if (udelay < udelay_max)
|
||||
udelay *= 2;
|
||||
}
|
||||
} while (busy);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
|
||||
enum mmc_busy_cmd busy_cmd)
|
||||
{
|
||||
return __mmc_poll_for_busy(card, timeout_ms, true, false, busy_cmd);
|
||||
}
|
||||
|
||||
/**
|
||||
* __mmc_switch - modify EXT_CSD register
|
||||
* @card: the MMC card associated with the data transfer
|
||||
|
@ -514,7 +551,6 @@ static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
|
|||
* @timeout_ms: timeout (ms) for operation performed by register write,
|
||||
* timeout of zero implies maximum possible timeout
|
||||
* @timing: new timing to change to
|
||||
* @use_busy_signal: use the busy signal as response type
|
||||
* @send_status: send status cmd to poll for busy
|
||||
* @retry_crc_err: retry when CRC errors when polling with CMD13 for busy
|
||||
*
|
||||
|
@ -522,12 +558,12 @@ static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
|
|||
*/
|
||||
int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
|
||||
unsigned int timeout_ms, unsigned char timing,
|
||||
bool use_busy_signal, bool send_status, bool retry_crc_err)
|
||||
bool send_status, bool retry_crc_err)
|
||||
{
|
||||
struct mmc_host *host = card->host;
|
||||
int err;
|
||||
struct mmc_command cmd = {};
|
||||
bool use_r1b_resp = use_busy_signal;
|
||||
bool use_r1b_resp = true;
|
||||
unsigned char old_timing = host->ios.timing;
|
||||
|
||||
mmc_retune_hold(host);
|
||||
|
@ -562,24 +598,18 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
|
|||
cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1;
|
||||
}
|
||||
|
||||
if (index == EXT_CSD_SANITIZE_START)
|
||||
cmd.sanitize_busy = true;
|
||||
|
||||
err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
/* No need to check card status in case of unblocking command */
|
||||
if (!use_busy_signal)
|
||||
goto out;
|
||||
|
||||
/*If SPI or used HW busy detection above, then we don't need to poll. */
|
||||
if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) ||
|
||||
mmc_host_is_spi(host))
|
||||
goto out_tim;
|
||||
|
||||
/* Let's try to poll to find out when the command is completed. */
|
||||
err = mmc_poll_for_busy(card, timeout_ms, send_status, retry_crc_err);
|
||||
err = __mmc_poll_for_busy(card, timeout_ms, send_status, retry_crc_err,
|
||||
MMC_BUSY_CMD6);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
|
@ -589,7 +619,7 @@ out_tim:
|
|||
mmc_set_timing(host, timing);
|
||||
|
||||
if (send_status) {
|
||||
err = mmc_switch_status(card);
|
||||
err = mmc_switch_status(card, true);
|
||||
if (err && timing)
|
||||
mmc_set_timing(host, old_timing);
|
||||
}
|
||||
|
@ -603,7 +633,7 @@ int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
|
|||
unsigned int timeout_ms)
|
||||
{
|
||||
return __mmc_switch(card, set, index, value, timeout_ms, 0,
|
||||
true, true, false);
|
||||
true, false);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mmc_switch);
|
||||
|
||||
|
@ -799,32 +829,46 @@ int mmc_bus_test(struct mmc_card *card, u8 bus_width)
|
|||
return mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
|
||||
}
|
||||
|
||||
static int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status)
|
||||
static int mmc_send_hpi_cmd(struct mmc_card *card)
|
||||
{
|
||||
unsigned int busy_timeout_ms = card->ext_csd.out_of_int_time;
|
||||
struct mmc_host *host = card->host;
|
||||
bool use_r1b_resp = true;
|
||||
struct mmc_command cmd = {};
|
||||
unsigned int opcode;
|
||||
int err;
|
||||
|
||||
opcode = card->ext_csd.hpi_cmd;
|
||||
if (opcode == MMC_STOP_TRANSMISSION)
|
||||
cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
|
||||
else if (opcode == MMC_SEND_STATUS)
|
||||
cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
|
||||
|
||||
cmd.opcode = opcode;
|
||||
cmd.opcode = card->ext_csd.hpi_cmd;
|
||||
cmd.arg = card->rca << 16 | 1;
|
||||
|
||||
err = mmc_wait_for_cmd(card->host, &cmd, 0);
|
||||
/*
|
||||
* Make sure the host's max_busy_timeout fit the needed timeout for HPI.
|
||||
* In case it doesn't, let's instruct the host to avoid HW busy
|
||||
* detection, by using a R1 response instead of R1B.
|
||||
*/
|
||||
if (host->max_busy_timeout && busy_timeout_ms > host->max_busy_timeout)
|
||||
use_r1b_resp = false;
|
||||
|
||||
if (cmd.opcode == MMC_STOP_TRANSMISSION && use_r1b_resp) {
|
||||
cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
|
||||
cmd.busy_timeout = busy_timeout_ms;
|
||||
} else {
|
||||
cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
|
||||
use_r1b_resp = false;
|
||||
}
|
||||
|
||||
err = mmc_wait_for_cmd(host, &cmd, 0);
|
||||
if (err) {
|
||||
pr_warn("%s: error %d interrupting operation. "
|
||||
"HPI command response %#x\n", mmc_hostname(card->host),
|
||||
err, cmd.resp[0]);
|
||||
pr_warn("%s: HPI error %d. Command response %#x\n",
|
||||
mmc_hostname(host), err, cmd.resp[0]);
|
||||
return err;
|
||||
}
|
||||
if (status)
|
||||
*status = cmd.resp[0];
|
||||
|
||||
return 0;
|
||||
/* No need to poll when using HW busy detection. */
|
||||
if (host->caps & MMC_CAP_WAIT_WHILE_BUSY && use_r1b_resp)
|
||||
return 0;
|
||||
|
||||
/* Let's poll to find out when the HPI request completes. */
|
||||
return mmc_poll_for_busy(card, busy_timeout_ms, MMC_BUSY_HPI);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -838,7 +882,6 @@ int mmc_interrupt_hpi(struct mmc_card *card)
|
|||
{
|
||||
int err;
|
||||
u32 status;
|
||||
unsigned long prg_wait;
|
||||
|
||||
if (!card->ext_csd.hpi_en) {
|
||||
pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
|
||||
|
@ -871,20 +914,7 @@ int mmc_interrupt_hpi(struct mmc_card *card)
|
|||
goto out;
|
||||
}
|
||||
|
||||
err = mmc_send_hpi_cmd(card, &status);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time);
|
||||
do {
|
||||
err = mmc_send_status(card, &status);
|
||||
|
||||
if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN)
|
||||
break;
|
||||
if (time_after(jiffies, prg_wait))
|
||||
err = -ETIMEDOUT;
|
||||
} while (!err);
|
||||
|
||||
err = mmc_send_hpi_cmd(card);
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
@ -1000,3 +1030,37 @@ int mmc_cmdq_disable(struct mmc_card *card)
|
|||
return mmc_cmdq_switch(card, false);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mmc_cmdq_disable);
|
||||
|
||||
int mmc_sanitize(struct mmc_card *card)
|
||||
{
|
||||
struct mmc_host *host = card->host;
|
||||
int err;
|
||||
|
||||
if (!mmc_can_sanitize(card)) {
|
||||
pr_warn("%s: Sanitize not supported\n", mmc_hostname(host));
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
pr_debug("%s: Sanitize in progress...\n", mmc_hostname(host));
|
||||
|
||||
mmc_retune_hold(host);
|
||||
|
||||
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_SANITIZE_START,
|
||||
1, MMC_SANITIZE_TIMEOUT_MS);
|
||||
if (err)
|
||||
pr_err("%s: Sanitize failed err=%d\n", mmc_hostname(host), err);
|
||||
|
||||
/*
|
||||
* If the sanitize operation timed out, the card is probably still busy
|
||||
* in the R1_STATE_PRG. Rather than continue to wait, let's try to abort
|
||||
* it with a HPI command to get back into R1_STATE_TRAN.
|
||||
*/
|
||||
if (err == -ETIMEDOUT && !mmc_interrupt_hpi(card))
|
||||
pr_warn("%s: Sanitize aborted\n", mmc_hostname(host));
|
||||
|
||||
mmc_retune_release(host);
|
||||
|
||||
pr_debug("%s: Sanitize completed\n", mmc_hostname(host));
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mmc_sanitize);
|
||||
|
|
|
@ -10,6 +10,12 @@
|
|||
|
||||
#include <linux/types.h>
|
||||
|
||||
enum mmc_busy_cmd {
|
||||
MMC_BUSY_CMD6,
|
||||
MMC_BUSY_ERASE,
|
||||
MMC_BUSY_HPI,
|
||||
};
|
||||
|
||||
struct mmc_host;
|
||||
struct mmc_card;
|
||||
|
||||
|
@ -26,20 +32,21 @@ int mmc_send_cid(struct mmc_host *host, u32 *cid);
|
|||
int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp);
|
||||
int mmc_spi_set_crc(struct mmc_host *host, int use_crc);
|
||||
int mmc_bus_test(struct mmc_card *card, u8 bus_width);
|
||||
int mmc_interrupt_hpi(struct mmc_card *card);
|
||||
int mmc_can_ext_csd(struct mmc_card *card);
|
||||
int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd);
|
||||
int mmc_switch_status(struct mmc_card *card);
|
||||
int __mmc_switch_status(struct mmc_card *card, bool crc_err_fatal);
|
||||
int mmc_switch_status(struct mmc_card *card, bool crc_err_fatal);
|
||||
int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
|
||||
enum mmc_busy_cmd busy_cmd);
|
||||
int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
|
||||
unsigned int timeout_ms, unsigned char timing,
|
||||
bool use_busy_signal, bool send_status, bool retry_crc_err);
|
||||
bool send_status, bool retry_crc_err);
|
||||
int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
|
||||
unsigned int timeout_ms);
|
||||
void mmc_run_bkops(struct mmc_card *card);
|
||||
int mmc_flush_cache(struct mmc_card *card);
|
||||
int mmc_cmdq_enable(struct mmc_card *card);
|
||||
int mmc_cmdq_disable(struct mmc_card *card);
|
||||
int mmc_sanitize(struct mmc_card *card);
|
||||
|
||||
#endif
|
||||
|
||||
|
|
|
@ -71,6 +71,7 @@ struct mmc_test_mem {
|
|||
* @sg_len: length of currently mapped scatterlist @sg
|
||||
* @mem: allocated memory
|
||||
* @sg: scatterlist
|
||||
* @sg_areq: scatterlist for non-blocking request
|
||||
*/
|
||||
struct mmc_test_area {
|
||||
unsigned long max_sz;
|
||||
|
@ -82,6 +83,7 @@ struct mmc_test_area {
|
|||
unsigned int sg_len;
|
||||
struct mmc_test_mem *mem;
|
||||
struct scatterlist *sg;
|
||||
struct scatterlist *sg_areq;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -836,14 +838,16 @@ static int mmc_test_start_areq(struct mmc_test_card *test,
|
|||
}
|
||||
|
||||
static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
|
||||
struct scatterlist *sg, unsigned sg_len,
|
||||
unsigned dev_addr, unsigned blocks,
|
||||
unsigned blksz, int write, int count)
|
||||
unsigned int dev_addr, int write,
|
||||
int count)
|
||||
{
|
||||
struct mmc_test_req *rq1, *rq2;
|
||||
struct mmc_request *mrq, *prev_mrq;
|
||||
int i;
|
||||
int ret = RESULT_OK;
|
||||
struct mmc_test_area *t = &test->area;
|
||||
struct scatterlist *sg = t->sg;
|
||||
struct scatterlist *sg_areq = t->sg_areq;
|
||||
|
||||
rq1 = mmc_test_req_alloc();
|
||||
rq2 = mmc_test_req_alloc();
|
||||
|
@ -857,8 +861,8 @@ static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
|
|||
|
||||
for (i = 0; i < count; i++) {
|
||||
mmc_test_req_reset(container_of(mrq, struct mmc_test_req, mrq));
|
||||
mmc_test_prepare_mrq(test, mrq, sg, sg_len, dev_addr, blocks,
|
||||
blksz, write);
|
||||
mmc_test_prepare_mrq(test, mrq, sg, t->sg_len, dev_addr,
|
||||
t->blocks, 512, write);
|
||||
ret = mmc_test_start_areq(test, mrq, prev_mrq);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
@ -867,7 +871,8 @@ static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
|
|||
prev_mrq = &rq2->mrq;
|
||||
|
||||
swap(mrq, prev_mrq);
|
||||
dev_addr += blocks;
|
||||
swap(sg, sg_areq);
|
||||
dev_addr += t->blocks;
|
||||
}
|
||||
|
||||
ret = mmc_test_start_areq(test, NULL, prev_mrq);
|
||||
|
@ -1396,10 +1401,11 @@ static int mmc_test_no_highmem(struct mmc_test_card *test)
|
|||
* Map sz bytes so that it can be transferred.
|
||||
*/
|
||||
static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz,
|
||||
int max_scatter, int min_sg_len)
|
||||
int max_scatter, int min_sg_len, bool nonblock)
|
||||
{
|
||||
struct mmc_test_area *t = &test->area;
|
||||
int err;
|
||||
unsigned int sg_len = 0;
|
||||
|
||||
t->blocks = sz >> 9;
|
||||
|
||||
|
@ -1411,6 +1417,22 @@ static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz,
|
|||
err = mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs,
|
||||
t->max_seg_sz, &t->sg_len, min_sg_len);
|
||||
}
|
||||
|
||||
if (err || !nonblock)
|
||||
goto err;
|
||||
|
||||
if (max_scatter) {
|
||||
err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg_areq,
|
||||
t->max_segs, t->max_seg_sz,
|
||||
&sg_len);
|
||||
} else {
|
||||
err = mmc_test_map_sg(t->mem, sz, t->sg_areq, 1, t->max_segs,
|
||||
t->max_seg_sz, &sg_len, min_sg_len);
|
||||
}
|
||||
if (!err && sg_len != t->sg_len)
|
||||
err = -EINVAL;
|
||||
|
||||
err:
|
||||
if (err)
|
||||
pr_info("%s: Failed to map sg list\n",
|
||||
mmc_hostname(test->card->host));
|
||||
|
@ -1440,7 +1462,6 @@ static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz,
|
|||
struct timespec64 ts1, ts2;
|
||||
int ret = 0;
|
||||
int i;
|
||||
struct mmc_test_area *t = &test->area;
|
||||
|
||||
/*
|
||||
* In the case of a maximally scattered transfer, the maximum transfer
|
||||
|
@ -1458,15 +1479,14 @@ static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz,
|
|||
sz = max_tfr;
|
||||
}
|
||||
|
||||
ret = mmc_test_area_map(test, sz, max_scatter, min_sg_len);
|
||||
ret = mmc_test_area_map(test, sz, max_scatter, min_sg_len, nonblock);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (timed)
|
||||
ktime_get_ts64(&ts1);
|
||||
if (nonblock)
|
||||
ret = mmc_test_nonblock_transfer(test, t->sg, t->sg_len,
|
||||
dev_addr, t->blocks, 512, write, count);
|
||||
ret = mmc_test_nonblock_transfer(test, dev_addr, write, count);
|
||||
else
|
||||
for (i = 0; i < count && ret == 0; i++) {
|
||||
ret = mmc_test_area_transfer(test, dev_addr, write);
|
||||
|
@ -1525,6 +1545,7 @@ static int mmc_test_area_cleanup(struct mmc_test_card *test)
|
|||
struct mmc_test_area *t = &test->area;
|
||||
|
||||
kfree(t->sg);
|
||||
kfree(t->sg_areq);
|
||||
mmc_test_free_mem(t->mem);
|
||||
|
||||
return 0;
|
||||
|
@ -1584,6 +1605,13 @@ static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill)
|
|||
goto out_free;
|
||||
}
|
||||
|
||||
t->sg_areq = kmalloc_array(t->max_segs, sizeof(*t->sg_areq),
|
||||
GFP_KERNEL);
|
||||
if (!t->sg_areq) {
|
||||
ret = -ENOMEM;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
t->dev_addr = mmc_test_capacity(test->card) / 2;
|
||||
t->dev_addr -= t->dev_addr % (t->max_sz >> 9);
|
||||
|
||||
|
@ -2468,7 +2496,7 @@ static int __mmc_test_cmds_during_tfr(struct mmc_test_card *test,
|
|||
if (!(test->card->host->caps & MMC_CAP_CMD_DURING_TFR))
|
||||
return RESULT_UNSUP_HOST;
|
||||
|
||||
ret = mmc_test_area_map(test, sz, 0, 0);
|
||||
ret = mmc_test_area_map(test, sz, 0, 0, use_areq);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
|
|
@ -62,7 +62,7 @@ enum mmc_issue_type mmc_issue_type(struct mmc_queue *mq, struct request *req)
|
|||
{
|
||||
struct mmc_host *host = mq->card->host;
|
||||
|
||||
if (mq->use_cqe)
|
||||
if (mq->use_cqe && !host->hsq_enabled)
|
||||
return mmc_cqe_issue_type(host, req);
|
||||
|
||||
if (req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_WRITE)
|
||||
|
@ -124,12 +124,14 @@ static enum blk_eh_timer_return mmc_mq_timed_out(struct request *req,
|
|||
{
|
||||
struct request_queue *q = req->q;
|
||||
struct mmc_queue *mq = q->queuedata;
|
||||
struct mmc_card *card = mq->card;
|
||||
struct mmc_host *host = card->host;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
spin_lock_irqsave(&mq->lock, flags);
|
||||
|
||||
if (mq->recovery_needed || !mq->use_cqe)
|
||||
if (mq->recovery_needed || !mq->use_cqe || host->hsq_enabled)
|
||||
ret = BLK_EH_RESET_TIMER;
|
||||
else
|
||||
ret = mmc_cqe_timed_out(req);
|
||||
|
@ -144,12 +146,13 @@ static void mmc_mq_recovery_handler(struct work_struct *work)
|
|||
struct mmc_queue *mq = container_of(work, struct mmc_queue,
|
||||
recovery_work);
|
||||
struct request_queue *q = mq->queue;
|
||||
struct mmc_host *host = mq->card->host;
|
||||
|
||||
mmc_get_card(mq->card, &mq->ctx);
|
||||
|
||||
mq->in_recovery = true;
|
||||
|
||||
if (mq->use_cqe)
|
||||
if (mq->use_cqe && !host->hsq_enabled)
|
||||
mmc_blk_cqe_recovery(mq);
|
||||
else
|
||||
mmc_blk_mq_recovery(mq);
|
||||
|
@ -160,6 +163,9 @@ static void mmc_mq_recovery_handler(struct work_struct *work)
|
|||
mq->recovery_needed = false;
|
||||
spin_unlock_irq(&mq->lock);
|
||||
|
||||
if (host->hsq_enabled)
|
||||
host->cqe_ops->cqe_recovery_finish(host);
|
||||
|
||||
mmc_put_card(mq->card, &mq->ctx);
|
||||
|
||||
blk_mq_run_hw_queues(q, true);
|
||||
|
@ -279,6 +285,14 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|||
}
|
||||
break;
|
||||
case MMC_ISSUE_ASYNC:
|
||||
/*
|
||||
* For MMC host software queue, we only allow 2 requests in
|
||||
* flight to avoid a long latency.
|
||||
*/
|
||||
if (host->hsq_enabled && mq->in_flight[issue_type] > 2) {
|
||||
spin_unlock_irq(&mq->lock);
|
||||
return BLK_STS_RESOURCE;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
/*
|
||||
|
@ -430,7 +444,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card)
|
|||
* The queue depth for CQE must match the hardware because the request
|
||||
* tag is used to index the hardware queue.
|
||||
*/
|
||||
if (mq->use_cqe)
|
||||
if (mq->use_cqe && !host->hsq_enabled)
|
||||
mq->tag_set.queue_depth =
|
||||
min_t(int, card->ext_csd.cmdq_depth, host->cqe_qdepth);
|
||||
else
|
||||
|
|
|
@ -1082,6 +1082,16 @@ retry:
|
|||
}
|
||||
}
|
||||
|
||||
if (host->cqe_ops && !host->cqe_enabled) {
|
||||
err = host->cqe_ops->cqe_enable(host, card);
|
||||
if (!err) {
|
||||
host->cqe_enabled = true;
|
||||
host->hsq_enabled = true;
|
||||
pr_info("%s: Host Software Queue enabled\n",
|
||||
mmc_hostname(host));
|
||||
}
|
||||
}
|
||||
|
||||
if (host->caps2 & MMC_CAP2_AVOID_3_3V &&
|
||||
host->ios.signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
|
||||
pr_err("%s: Host failed to negotiate down from 3.3V\n",
|
||||
|
|
|
@ -276,14 +276,15 @@ static void sdio_single_irq_set(struct mmc_card *card)
|
|||
|
||||
card->sdio_single_irq = NULL;
|
||||
if ((card->host->caps & MMC_CAP_SDIO_IRQ) &&
|
||||
card->host->sdio_irqs == 1)
|
||||
card->host->sdio_irqs == 1) {
|
||||
for (i = 0; i < card->sdio_funcs; i++) {
|
||||
func = card->sdio_func[i];
|
||||
if (func && func->irq_handler) {
|
||||
card->sdio_single_irq = func;
|
||||
break;
|
||||
}
|
||||
}
|
||||
func = card->sdio_func[i];
|
||||
if (func && func->irq_handler) {
|
||||
card->sdio_single_irq = func;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -645,6 +645,7 @@ config MMC_SDHCI_SPRD
|
|||
depends on ARCH_SPRD
|
||||
depends on MMC_SDHCI_PLTFM
|
||||
select MMC_SDHCI_IO_ACCESSORS
|
||||
select MMC_HSQ
|
||||
help
|
||||
This selects the SDIO Host Controller in Spreadtrum
|
||||
SoCs, this driver supports R11(IP version: R11P0).
|
||||
|
@ -949,6 +950,17 @@ config MMC_CQHCI
|
|||
|
||||
If unsure, say N.
|
||||
|
||||
config MMC_HSQ
|
||||
tristate "MMC Host Software Queue support"
|
||||
help
|
||||
This selects the MMC Host Software Queue support. This may increase
|
||||
performance, if the host controller and its driver supports it.
|
||||
|
||||
If you have a controller/driver supporting this interface, say Y or M
|
||||
here.
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
config MMC_TOSHIBA_PCI
|
||||
tristate "Toshiba Type A SD/MMC Card Interface Driver"
|
||||
depends on PCI
|
||||
|
|
|
@ -100,6 +100,7 @@ obj-$(CONFIG_MMC_SDHCI_BRCMSTB) += sdhci-brcmstb.o
|
|||
obj-$(CONFIG_MMC_SDHCI_OMAP) += sdhci-omap.o
|
||||
obj-$(CONFIG_MMC_SDHCI_SPRD) += sdhci-sprd.o
|
||||
obj-$(CONFIG_MMC_CQHCI) += cqhci.o
|
||||
obj-$(CONFIG_MMC_HSQ) += mmc_hsq.o
|
||||
|
||||
ifeq ($(CONFIG_CB710_DEBUG),y)
|
||||
CFLAGS-cb710-mmc += -DDEBUG
|
||||
|
|
|
@ -207,13 +207,13 @@ static int octeon_mmc_probe(struct platform_device *pdev)
|
|||
base = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(base))
|
||||
return PTR_ERR(base);
|
||||
host->base = (void __iomem *)base;
|
||||
host->base = base;
|
||||
host->reg_off = 0;
|
||||
|
||||
base = devm_platform_ioremap_resource(pdev, 1);
|
||||
if (IS_ERR(base))
|
||||
return PTR_ERR(base);
|
||||
host->dma_base = (void __iomem *)base;
|
||||
host->dma_base = base;
|
||||
/*
|
||||
* To keep the register addresses shared we intentionaly use
|
||||
* a negative offset here, first register used on Octeon therefore
|
||||
|
|
|
@ -298,16 +298,16 @@ static void __cqhci_disable(struct cqhci_host *cq_host)
|
|||
cq_host->activated = false;
|
||||
}
|
||||
|
||||
int cqhci_suspend(struct mmc_host *mmc)
|
||||
int cqhci_deactivate(struct mmc_host *mmc)
|
||||
{
|
||||
struct cqhci_host *cq_host = mmc->cqe_private;
|
||||
|
||||
if (cq_host->enabled)
|
||||
if (cq_host->enabled && cq_host->activated)
|
||||
__cqhci_disable(cq_host);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(cqhci_suspend);
|
||||
EXPORT_SYMBOL(cqhci_deactivate);
|
||||
|
||||
int cqhci_resume(struct mmc_host *mmc)
|
||||
{
|
||||
|
@ -321,14 +321,20 @@ static int cqhci_enable(struct mmc_host *mmc, struct mmc_card *card)
|
|||
struct cqhci_host *cq_host = mmc->cqe_private;
|
||||
int err;
|
||||
|
||||
if (!card->ext_csd.cmdq_en)
|
||||
return -EINVAL;
|
||||
|
||||
if (cq_host->enabled)
|
||||
return 0;
|
||||
|
||||
cq_host->rca = card->rca;
|
||||
|
||||
err = cqhci_host_alloc_tdl(cq_host);
|
||||
if (err)
|
||||
if (err) {
|
||||
pr_err("%s: Failed to enable CQE, error %d\n",
|
||||
mmc_hostname(mmc), err);
|
||||
return err;
|
||||
}
|
||||
|
||||
__cqhci_enable(cq_host);
|
||||
|
||||
|
@ -1071,7 +1077,7 @@ struct cqhci_host *cqhci_pltfm_init(struct platform_device *pdev)
|
|||
|
||||
/* check and setup CMDQ interface */
|
||||
cqhci_memres = platform_get_resource_byname(pdev, IORESOURCE_MEM,
|
||||
"cqhci_mem");
|
||||
"cqhci");
|
||||
if (!cqhci_memres) {
|
||||
dev_dbg(&pdev->dev, "CMDQ not supported\n");
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
|
|
@ -230,7 +230,11 @@ irqreturn_t cqhci_irq(struct mmc_host *mmc, u32 intmask, int cmd_error,
|
|||
int data_error);
|
||||
int cqhci_init(struct cqhci_host *cq_host, struct mmc_host *mmc, bool dma64);
|
||||
struct cqhci_host *cqhci_pltfm_init(struct platform_device *pdev);
|
||||
int cqhci_suspend(struct mmc_host *mmc);
|
||||
int cqhci_deactivate(struct mmc_host *mmc);
|
||||
static inline int cqhci_suspend(struct mmc_host *mmc)
|
||||
{
|
||||
return cqhci_deactivate(mmc);
|
||||
}
|
||||
int cqhci_resume(struct mmc_host *mmc);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -0,0 +1,348 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
*
|
||||
* MMC software queue support based on command queue interfaces
|
||||
*
|
||||
* Copyright (C) 2019 Linaro, Inc.
|
||||
* Author: Baolin Wang <baolin.wang@linaro.org>
|
||||
*/
|
||||
|
||||
#include <linux/mmc/card.h>
|
||||
#include <linux/mmc/host.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#include "mmc_hsq.h"
|
||||
|
||||
#define HSQ_NUM_SLOTS 64
|
||||
#define HSQ_INVALID_TAG HSQ_NUM_SLOTS
|
||||
|
||||
static void mmc_hsq_pump_requests(struct mmc_hsq *hsq)
|
||||
{
|
||||
struct mmc_host *mmc = hsq->mmc;
|
||||
struct hsq_slot *slot;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&hsq->lock, flags);
|
||||
|
||||
/* Make sure we are not already running a request now */
|
||||
if (hsq->mrq) {
|
||||
spin_unlock_irqrestore(&hsq->lock, flags);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Make sure there are remain requests need to pump */
|
||||
if (!hsq->qcnt || !hsq->enabled) {
|
||||
spin_unlock_irqrestore(&hsq->lock, flags);
|
||||
return;
|
||||
}
|
||||
|
||||
slot = &hsq->slot[hsq->next_tag];
|
||||
hsq->mrq = slot->mrq;
|
||||
hsq->qcnt--;
|
||||
|
||||
spin_unlock_irqrestore(&hsq->lock, flags);
|
||||
|
||||
mmc->ops->request(mmc, hsq->mrq);
|
||||
}
|
||||
|
||||
static void mmc_hsq_update_next_tag(struct mmc_hsq *hsq, int remains)
|
||||
{
|
||||
struct hsq_slot *slot;
|
||||
int tag;
|
||||
|
||||
/*
|
||||
* If there are no remain requests in software queue, then set a invalid
|
||||
* tag.
|
||||
*/
|
||||
if (!remains) {
|
||||
hsq->next_tag = HSQ_INVALID_TAG;
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Increasing the next tag and check if the corresponding request is
|
||||
* available, if yes, then we found a candidate request.
|
||||
*/
|
||||
if (++hsq->next_tag != HSQ_INVALID_TAG) {
|
||||
slot = &hsq->slot[hsq->next_tag];
|
||||
if (slot->mrq)
|
||||
return;
|
||||
}
|
||||
|
||||
/* Othersie we should iterate all slots to find a available tag. */
|
||||
for (tag = 0; tag < HSQ_NUM_SLOTS; tag++) {
|
||||
slot = &hsq->slot[tag];
|
||||
if (slot->mrq)
|
||||
break;
|
||||
}
|
||||
|
||||
if (tag == HSQ_NUM_SLOTS)
|
||||
tag = HSQ_INVALID_TAG;
|
||||
|
||||
hsq->next_tag = tag;
|
||||
}
|
||||
|
||||
static void mmc_hsq_post_request(struct mmc_hsq *hsq)
|
||||
{
|
||||
unsigned long flags;
|
||||
int remains;
|
||||
|
||||
spin_lock_irqsave(&hsq->lock, flags);
|
||||
|
||||
remains = hsq->qcnt;
|
||||
hsq->mrq = NULL;
|
||||
|
||||
/* Update the next available tag to be queued. */
|
||||
mmc_hsq_update_next_tag(hsq, remains);
|
||||
|
||||
if (hsq->waiting_for_idle && !remains) {
|
||||
hsq->waiting_for_idle = false;
|
||||
wake_up(&hsq->wait_queue);
|
||||
}
|
||||
|
||||
/* Do not pump new request in recovery mode. */
|
||||
if (hsq->recovery_halt) {
|
||||
spin_unlock_irqrestore(&hsq->lock, flags);
|
||||
return;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&hsq->lock, flags);
|
||||
|
||||
/*
|
||||
* Try to pump new request to host controller as fast as possible,
|
||||
* after completing previous request.
|
||||
*/
|
||||
if (remains > 0)
|
||||
mmc_hsq_pump_requests(hsq);
|
||||
}
|
||||
|
||||
/**
|
||||
* mmc_hsq_finalize_request - finalize one request if the request is done
|
||||
* @mmc: the host controller
|
||||
* @mrq: the request need to be finalized
|
||||
*
|
||||
* Return true if we finalized the corresponding request in software queue,
|
||||
* otherwise return false.
|
||||
*/
|
||||
bool mmc_hsq_finalize_request(struct mmc_host *mmc, struct mmc_request *mrq)
|
||||
{
|
||||
struct mmc_hsq *hsq = mmc->cqe_private;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&hsq->lock, flags);
|
||||
|
||||
if (!hsq->enabled || !hsq->mrq || hsq->mrq != mrq) {
|
||||
spin_unlock_irqrestore(&hsq->lock, flags);
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Clear current completed slot request to make a room for new request.
|
||||
*/
|
||||
hsq->slot[hsq->next_tag].mrq = NULL;
|
||||
|
||||
spin_unlock_irqrestore(&hsq->lock, flags);
|
||||
|
||||
mmc_cqe_request_done(mmc, hsq->mrq);
|
||||
|
||||
mmc_hsq_post_request(hsq);
|
||||
|
||||
return true;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mmc_hsq_finalize_request);
|
||||
|
||||
static void mmc_hsq_recovery_start(struct mmc_host *mmc)
|
||||
{
|
||||
struct mmc_hsq *hsq = mmc->cqe_private;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&hsq->lock, flags);
|
||||
|
||||
hsq->recovery_halt = true;
|
||||
|
||||
spin_unlock_irqrestore(&hsq->lock, flags);
|
||||
}
|
||||
|
||||
static void mmc_hsq_recovery_finish(struct mmc_host *mmc)
|
||||
{
|
||||
struct mmc_hsq *hsq = mmc->cqe_private;
|
||||
int remains;
|
||||
|
||||
spin_lock_irq(&hsq->lock);
|
||||
|
||||
hsq->recovery_halt = false;
|
||||
remains = hsq->qcnt;
|
||||
|
||||
spin_unlock_irq(&hsq->lock);
|
||||
|
||||
/*
|
||||
* Try to pump new request if there are request pending in software
|
||||
* queue after finishing recovery.
|
||||
*/
|
||||
if (remains > 0)
|
||||
mmc_hsq_pump_requests(hsq);
|
||||
}
|
||||
|
||||
static int mmc_hsq_request(struct mmc_host *mmc, struct mmc_request *mrq)
|
||||
{
|
||||
struct mmc_hsq *hsq = mmc->cqe_private;
|
||||
int tag = mrq->tag;
|
||||
|
||||
spin_lock_irq(&hsq->lock);
|
||||
|
||||
if (!hsq->enabled) {
|
||||
spin_unlock_irq(&hsq->lock);
|
||||
return -ESHUTDOWN;
|
||||
}
|
||||
|
||||
/* Do not queue any new requests in recovery mode. */
|
||||
if (hsq->recovery_halt) {
|
||||
spin_unlock_irq(&hsq->lock);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
hsq->slot[tag].mrq = mrq;
|
||||
|
||||
/*
|
||||
* Set the next tag as current request tag if no available
|
||||
* next tag.
|
||||
*/
|
||||
if (hsq->next_tag == HSQ_INVALID_TAG)
|
||||
hsq->next_tag = tag;
|
||||
|
||||
hsq->qcnt++;
|
||||
|
||||
spin_unlock_irq(&hsq->lock);
|
||||
|
||||
mmc_hsq_pump_requests(hsq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mmc_hsq_post_req(struct mmc_host *mmc, struct mmc_request *mrq)
|
||||
{
|
||||
if (mmc->ops->post_req)
|
||||
mmc->ops->post_req(mmc, mrq, 0);
|
||||
}
|
||||
|
||||
static bool mmc_hsq_queue_is_idle(struct mmc_hsq *hsq, int *ret)
|
||||
{
|
||||
bool is_idle;
|
||||
|
||||
spin_lock_irq(&hsq->lock);
|
||||
|
||||
is_idle = (!hsq->mrq && !hsq->qcnt) ||
|
||||
hsq->recovery_halt;
|
||||
|
||||
*ret = hsq->recovery_halt ? -EBUSY : 0;
|
||||
hsq->waiting_for_idle = !is_idle;
|
||||
|
||||
spin_unlock_irq(&hsq->lock);
|
||||
|
||||
return is_idle;
|
||||
}
|
||||
|
||||
static int mmc_hsq_wait_for_idle(struct mmc_host *mmc)
|
||||
{
|
||||
struct mmc_hsq *hsq = mmc->cqe_private;
|
||||
int ret;
|
||||
|
||||
wait_event(hsq->wait_queue,
|
||||
mmc_hsq_queue_is_idle(hsq, &ret));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void mmc_hsq_disable(struct mmc_host *mmc)
|
||||
{
|
||||
struct mmc_hsq *hsq = mmc->cqe_private;
|
||||
u32 timeout = 500;
|
||||
int ret;
|
||||
|
||||
spin_lock_irq(&hsq->lock);
|
||||
|
||||
if (!hsq->enabled) {
|
||||
spin_unlock_irq(&hsq->lock);
|
||||
return;
|
||||
}
|
||||
|
||||
spin_unlock_irq(&hsq->lock);
|
||||
|
||||
ret = wait_event_timeout(hsq->wait_queue,
|
||||
mmc_hsq_queue_is_idle(hsq, &ret),
|
||||
msecs_to_jiffies(timeout));
|
||||
if (ret == 0) {
|
||||
pr_warn("could not stop mmc software queue\n");
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock_irq(&hsq->lock);
|
||||
|
||||
hsq->enabled = false;
|
||||
|
||||
spin_unlock_irq(&hsq->lock);
|
||||
}
|
||||
|
||||
static int mmc_hsq_enable(struct mmc_host *mmc, struct mmc_card *card)
|
||||
{
|
||||
struct mmc_hsq *hsq = mmc->cqe_private;
|
||||
|
||||
spin_lock_irq(&hsq->lock);
|
||||
|
||||
if (hsq->enabled) {
|
||||
spin_unlock_irq(&hsq->lock);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
hsq->enabled = true;
|
||||
|
||||
spin_unlock_irq(&hsq->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct mmc_cqe_ops mmc_hsq_ops = {
|
||||
.cqe_enable = mmc_hsq_enable,
|
||||
.cqe_disable = mmc_hsq_disable,
|
||||
.cqe_request = mmc_hsq_request,
|
||||
.cqe_post_req = mmc_hsq_post_req,
|
||||
.cqe_wait_for_idle = mmc_hsq_wait_for_idle,
|
||||
.cqe_recovery_start = mmc_hsq_recovery_start,
|
||||
.cqe_recovery_finish = mmc_hsq_recovery_finish,
|
||||
};
|
||||
|
||||
int mmc_hsq_init(struct mmc_hsq *hsq, struct mmc_host *mmc)
|
||||
{
|
||||
hsq->num_slots = HSQ_NUM_SLOTS;
|
||||
hsq->next_tag = HSQ_INVALID_TAG;
|
||||
|
||||
hsq->slot = devm_kcalloc(mmc_dev(mmc), hsq->num_slots,
|
||||
sizeof(struct hsq_slot), GFP_KERNEL);
|
||||
if (!hsq->slot)
|
||||
return -ENOMEM;
|
||||
|
||||
hsq->mmc = mmc;
|
||||
hsq->mmc->cqe_private = hsq;
|
||||
mmc->cqe_ops = &mmc_hsq_ops;
|
||||
|
||||
spin_lock_init(&hsq->lock);
|
||||
init_waitqueue_head(&hsq->wait_queue);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mmc_hsq_init);
|
||||
|
||||
void mmc_hsq_suspend(struct mmc_host *mmc)
|
||||
{
|
||||
mmc_hsq_disable(mmc);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mmc_hsq_suspend);
|
||||
|
||||
int mmc_hsq_resume(struct mmc_host *mmc)
|
||||
{
|
||||
return mmc_hsq_enable(mmc, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mmc_hsq_resume);
|
||||
|
||||
MODULE_DESCRIPTION("MMC Host Software Queue support");
|
||||
MODULE_LICENSE("GPL v2");
|
|
@ -0,0 +1,30 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef LINUX_MMC_HSQ_H
|
||||
#define LINUX_MMC_HSQ_H
|
||||
|
||||
struct hsq_slot {
|
||||
struct mmc_request *mrq;
|
||||
};
|
||||
|
||||
struct mmc_hsq {
|
||||
struct mmc_host *mmc;
|
||||
struct mmc_request *mrq;
|
||||
wait_queue_head_t wait_queue;
|
||||
struct hsq_slot *slot;
|
||||
spinlock_t lock;
|
||||
|
||||
int next_tag;
|
||||
int num_slots;
|
||||
int qcnt;
|
||||
|
||||
bool enabled;
|
||||
bool waiting_for_idle;
|
||||
bool recovery_halt;
|
||||
};
|
||||
|
||||
int mmc_hsq_init(struct mmc_hsq *hsq, struct mmc_host *mmc);
|
||||
void mmc_hsq_suspend(struct mmc_host *mmc);
|
||||
int mmc_hsq_resume(struct mmc_host *mmc);
|
||||
bool mmc_hsq_finalize_request(struct mmc_host *mmc, struct mmc_request *mrq);
|
||||
|
||||
#endif
|
|
@ -22,6 +22,7 @@
|
|||
#include <linux/mmc/pm.h>
|
||||
#include <linux/mmc/host.h>
|
||||
#include <linux/mmc/card.h>
|
||||
#include <linux/mmc/sd.h>
|
||||
#include <linux/mmc/slot-gpio.h>
|
||||
#include <linux/amba/bus.h>
|
||||
#include <linux/clk.h>
|
||||
|
@ -274,6 +275,32 @@ static struct variant_data variant_stm32_sdmmc = {
|
|||
.init = sdmmc_variant_init,
|
||||
};
|
||||
|
||||
static struct variant_data variant_stm32_sdmmcv2 = {
|
||||
.fifosize = 16 * 4,
|
||||
.fifohalfsize = 8 * 4,
|
||||
.f_max = 208000000,
|
||||
.stm32_clkdiv = true,
|
||||
.cmdreg_cpsm_enable = MCI_CPSM_STM32_ENABLE,
|
||||
.cmdreg_lrsp_crc = MCI_CPSM_STM32_LRSP_CRC,
|
||||
.cmdreg_srsp_crc = MCI_CPSM_STM32_SRSP_CRC,
|
||||
.cmdreg_srsp = MCI_CPSM_STM32_SRSP,
|
||||
.cmdreg_stop = MCI_CPSM_STM32_CMDSTOP,
|
||||
.data_cmd_enable = MCI_CPSM_STM32_CMDTRANS,
|
||||
.irq_pio_mask = MCI_IRQ_PIO_STM32_MASK,
|
||||
.datactrl_first = true,
|
||||
.datacnt_useless = true,
|
||||
.datalength_bits = 25,
|
||||
.datactrl_blocksz = 14,
|
||||
.datactrl_any_blocksz = true,
|
||||
.stm32_idmabsize_mask = GENMASK(16, 5),
|
||||
.dma_lli = true,
|
||||
.busy_timeout = true,
|
||||
.busy_detect = true,
|
||||
.busy_detect_flag = MCI_STM32_BUSYD0,
|
||||
.busy_detect_mask = MCI_STM32_BUSYD0ENDMASK,
|
||||
.init = sdmmc_variant_init,
|
||||
};
|
||||
|
||||
static struct variant_data variant_qcom = {
|
||||
.fifosize = 16 * 4,
|
||||
.fifohalfsize = 8 * 4,
|
||||
|
@ -1217,6 +1244,9 @@ mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
|
|||
writel_relaxed(clks, host->base + MMCIDATATIMER);
|
||||
}
|
||||
|
||||
if (host->ops->pre_sig_volt_switch && cmd->opcode == SD_SWITCH_VOLTAGE)
|
||||
host->ops->pre_sig_volt_switch(host);
|
||||
|
||||
if (/*interrupt*/0)
|
||||
c |= MCI_CPSM_INTERRUPT;
|
||||
|
||||
|
@ -1830,6 +1860,7 @@ static int mmci_get_cd(struct mmc_host *mmc)
|
|||
|
||||
static int mmci_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios)
|
||||
{
|
||||
struct mmci_host *host = mmc_priv(mmc);
|
||||
int ret = 0;
|
||||
|
||||
if (!IS_ERR(mmc->supply.vqmmc)) {
|
||||
|
@ -1849,6 +1880,9 @@ static int mmci_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios)
|
|||
break;
|
||||
}
|
||||
|
||||
if (!ret && host->ops && host->ops->post_sig_volt_switch)
|
||||
ret = host->ops->post_sig_volt_switch(host, ios);
|
||||
|
||||
if (ret)
|
||||
dev_warn(mmc_dev(mmc), "Voltage switch failed\n");
|
||||
}
|
||||
|
@ -1933,6 +1967,8 @@ static int mmci_probe(struct amba_device *dev,
|
|||
|
||||
host = mmc_priv(mmc);
|
||||
host->mmc = mmc;
|
||||
host->mmc_ops = &mmci_ops;
|
||||
mmc->ops = &mmci_ops;
|
||||
|
||||
/*
|
||||
* Some variant (STM32) doesn't have opendrain bit, nevertheless
|
||||
|
@ -2072,8 +2108,6 @@ static int mmci_probe(struct amba_device *dev,
|
|||
host->stop_abort.arg = 0;
|
||||
host->stop_abort.flags = MMC_RSP_R1B | MMC_CMD_AC;
|
||||
|
||||
mmc->ops = &mmci_ops;
|
||||
|
||||
/* We support these PM capabilities. */
|
||||
mmc->pm_caps |= MMC_PM_KEEP_POWER;
|
||||
|
||||
|
@ -2335,6 +2369,11 @@ static const struct amba_id mmci_ids[] = {
|
|||
.mask = 0xf0ffffff,
|
||||
.data = &variant_stm32_sdmmc,
|
||||
},
|
||||
{
|
||||
.id = 0x00253180,
|
||||
.mask = 0xf0ffffff,
|
||||
.data = &variant_stm32_sdmmcv2,
|
||||
},
|
||||
/* Qualcomm variants */
|
||||
{
|
||||
.id = 0x00051180,
|
||||
|
|
|
@ -165,6 +165,7 @@
|
|||
/* Extended status bits for the STM32 variants */
|
||||
#define MCI_STM32_BUSYD0 BIT(20)
|
||||
#define MCI_STM32_BUSYD0END BIT(21)
|
||||
#define MCI_STM32_VSWEND BIT(25)
|
||||
|
||||
#define MMCICLEAR 0x038
|
||||
#define MCI_CMDCRCFAILCLR (1 << 0)
|
||||
|
@ -182,6 +183,9 @@
|
|||
#define MCI_ST_SDIOITC (1 << 22)
|
||||
#define MCI_ST_CEATAENDC (1 << 23)
|
||||
#define MCI_ST_BUSYENDC (1 << 24)
|
||||
/* Extended clear bits for the STM32 variants */
|
||||
#define MCI_STM32_VSWENDC BIT(25)
|
||||
#define MCI_STM32_CKSTOPC BIT(26)
|
||||
|
||||
#define MMCIMASK0 0x03c
|
||||
#define MCI_CMDCRCFAILMASK (1 << 0)
|
||||
|
@ -377,6 +381,8 @@ struct mmci_host_ops {
|
|||
void (*set_clkreg)(struct mmci_host *host, unsigned int desired);
|
||||
void (*set_pwrreg)(struct mmci_host *host, unsigned int pwr);
|
||||
bool (*busy_complete)(struct mmci_host *host, u32 status, u32 err_msk);
|
||||
void (*pre_sig_volt_switch)(struct mmci_host *host);
|
||||
int (*post_sig_volt_switch)(struct mmci_host *host, struct mmc_ios *ios);
|
||||
};
|
||||
|
||||
struct mmci_host {
|
||||
|
@ -407,8 +413,10 @@ struct mmci_host {
|
|||
u32 mask1_reg;
|
||||
u8 vqmmc_enabled:1;
|
||||
struct mmci_platform_data *plat;
|
||||
struct mmc_host_ops *mmc_ops;
|
||||
struct mmci_host_ops *ops;
|
||||
struct variant_data *variant;
|
||||
void *variant_priv;
|
||||
struct pinctrl *pinctrl;
|
||||
struct pinctrl_state *pins_opendrain;
|
||||
|
||||
|
|
|
@ -3,10 +3,13 @@
|
|||
* Copyright (C) STMicroelectronics 2018 - All Rights Reserved
|
||||
* Author: Ludovic.barre@st.com for STMicroelectronics.
|
||||
*/
|
||||
#include <linux/bitfield.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/iopoll.h>
|
||||
#include <linux/mmc/host.h>
|
||||
#include <linux/mmc/card.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/reset.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include "mmci.h"
|
||||
|
@ -14,17 +17,40 @@
|
|||
#define SDMMC_LLI_BUF_LEN PAGE_SIZE
|
||||
#define SDMMC_IDMA_BURST BIT(MMCI_STM32_IDMABNDT_SHIFT)
|
||||
|
||||
#define DLYB_CR 0x0
|
||||
#define DLYB_CR_DEN BIT(0)
|
||||
#define DLYB_CR_SEN BIT(1)
|
||||
|
||||
#define DLYB_CFGR 0x4
|
||||
#define DLYB_CFGR_SEL_MASK GENMASK(3, 0)
|
||||
#define DLYB_CFGR_UNIT_MASK GENMASK(14, 8)
|
||||
#define DLYB_CFGR_LNG_MASK GENMASK(27, 16)
|
||||
#define DLYB_CFGR_LNGF BIT(31)
|
||||
|
||||
#define DLYB_NB_DELAY 11
|
||||
#define DLYB_CFGR_SEL_MAX (DLYB_NB_DELAY + 1)
|
||||
#define DLYB_CFGR_UNIT_MAX 127
|
||||
|
||||
#define DLYB_LNG_TIMEOUT_US 1000
|
||||
#define SDMMC_VSWEND_TIMEOUT_US 10000
|
||||
|
||||
struct sdmmc_lli_desc {
|
||||
u32 idmalar;
|
||||
u32 idmabase;
|
||||
u32 idmasize;
|
||||
};
|
||||
|
||||
struct sdmmc_priv {
|
||||
struct sdmmc_idma {
|
||||
dma_addr_t sg_dma;
|
||||
void *sg_cpu;
|
||||
};
|
||||
|
||||
struct sdmmc_dlyb {
|
||||
void __iomem *base;
|
||||
u32 unit;
|
||||
u32 max;
|
||||
};
|
||||
|
||||
static int sdmmc_idma_validate_data(struct mmci_host *host,
|
||||
struct mmc_data *data)
|
||||
{
|
||||
|
@ -36,8 +62,8 @@ static int sdmmc_idma_validate_data(struct mmci_host *host,
|
|||
* excepted the last element which has no constraint on idmasize
|
||||
*/
|
||||
for_each_sg(data->sg, sg, data->sg_len - 1, i) {
|
||||
if (!IS_ALIGNED(sg_dma_address(data->sg), sizeof(u32)) ||
|
||||
!IS_ALIGNED(sg_dma_len(data->sg), SDMMC_IDMA_BURST)) {
|
||||
if (!IS_ALIGNED(data->sg->offset, sizeof(u32)) ||
|
||||
!IS_ALIGNED(data->sg->length, SDMMC_IDMA_BURST)) {
|
||||
dev_err(mmc_dev(host->mmc),
|
||||
"unaligned scatterlist: ofst:%x length:%d\n",
|
||||
data->sg->offset, data->sg->length);
|
||||
|
@ -45,7 +71,7 @@ static int sdmmc_idma_validate_data(struct mmci_host *host,
|
|||
}
|
||||
}
|
||||
|
||||
if (!IS_ALIGNED(sg_dma_address(data->sg), sizeof(u32))) {
|
||||
if (!IS_ALIGNED(data->sg->offset, sizeof(u32))) {
|
||||
dev_err(mmc_dev(host->mmc),
|
||||
"unaligned last scatterlist: ofst:%x length:%d\n",
|
||||
data->sg->offset, data->sg->length);
|
||||
|
@ -92,7 +118,7 @@ static void sdmmc_idma_unprep_data(struct mmci_host *host,
|
|||
|
||||
static int sdmmc_idma_setup(struct mmci_host *host)
|
||||
{
|
||||
struct sdmmc_priv *idma;
|
||||
struct sdmmc_idma *idma;
|
||||
|
||||
idma = devm_kzalloc(mmc_dev(host->mmc), sizeof(*idma), GFP_KERNEL);
|
||||
if (!idma)
|
||||
|
@ -123,7 +149,7 @@ static int sdmmc_idma_setup(struct mmci_host *host)
|
|||
static int sdmmc_idma_start(struct mmci_host *host, unsigned int *datactrl)
|
||||
|
||||
{
|
||||
struct sdmmc_priv *idma = host->dma_priv;
|
||||
struct sdmmc_idma *idma = host->dma_priv;
|
||||
struct sdmmc_lli_desc *desc = (struct sdmmc_lli_desc *)idma->sg_cpu;
|
||||
struct mmc_data *data = host->data;
|
||||
struct scatterlist *sg;
|
||||
|
@ -226,12 +252,25 @@ static void mmci_sdmmc_set_clkreg(struct mmci_host *host, unsigned int desired)
|
|||
mmci_write_clkreg(host, clk);
|
||||
}
|
||||
|
||||
static void sdmmc_dlyb_input_ck(struct sdmmc_dlyb *dlyb)
|
||||
{
|
||||
if (!dlyb || !dlyb->base)
|
||||
return;
|
||||
|
||||
/* Output clock = Input clock */
|
||||
writel_relaxed(0, dlyb->base + DLYB_CR);
|
||||
}
|
||||
|
||||
static void mmci_sdmmc_set_pwrreg(struct mmci_host *host, unsigned int pwr)
|
||||
{
|
||||
struct mmc_ios ios = host->mmc->ios;
|
||||
struct sdmmc_dlyb *dlyb = host->variant_priv;
|
||||
|
||||
/* adds OF options */
|
||||
pwr = host->pwr_reg_add;
|
||||
|
||||
sdmmc_dlyb_input_ck(dlyb);
|
||||
|
||||
if (ios.power_mode == MMC_POWER_OFF) {
|
||||
/* Only a reset could power-off sdmmc */
|
||||
reset_control_assert(host->rst);
|
||||
|
@ -254,6 +293,10 @@ static void mmci_sdmmc_set_pwrreg(struct mmci_host *host, unsigned int pwr)
|
|||
writel(MCI_IRQENABLE | host->variant->start_err,
|
||||
host->base + MMCIMASK0);
|
||||
|
||||
/* preserves voltage switch bits */
|
||||
pwr |= host->pwr_reg & (MCI_STM32_VSWITCHEN |
|
||||
MCI_STM32_VSWITCH);
|
||||
|
||||
/*
|
||||
* After a power-cycle state, we must set the SDMMC in
|
||||
* Power-off. The SDMMC_D[7:0], SDMMC_CMD and SDMMC_CK are
|
||||
|
@ -315,14 +358,145 @@ complete:
|
|||
if (host->busy_status) {
|
||||
writel_relaxed(mask & ~host->variant->busy_detect_mask,
|
||||
base + MMCIMASK0);
|
||||
writel_relaxed(host->variant->busy_detect_mask,
|
||||
base + MMCICLEAR);
|
||||
host->busy_status = 0;
|
||||
}
|
||||
|
||||
writel_relaxed(host->variant->busy_detect_mask, base + MMCICLEAR);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void sdmmc_dlyb_set_cfgr(struct sdmmc_dlyb *dlyb,
|
||||
int unit, int phase, bool sampler)
|
||||
{
|
||||
u32 cfgr;
|
||||
|
||||
writel_relaxed(DLYB_CR_SEN | DLYB_CR_DEN, dlyb->base + DLYB_CR);
|
||||
|
||||
cfgr = FIELD_PREP(DLYB_CFGR_UNIT_MASK, unit) |
|
||||
FIELD_PREP(DLYB_CFGR_SEL_MASK, phase);
|
||||
writel_relaxed(cfgr, dlyb->base + DLYB_CFGR);
|
||||
|
||||
if (!sampler)
|
||||
writel_relaxed(DLYB_CR_DEN, dlyb->base + DLYB_CR);
|
||||
}
|
||||
|
||||
static int sdmmc_dlyb_lng_tuning(struct mmci_host *host)
|
||||
{
|
||||
struct sdmmc_dlyb *dlyb = host->variant_priv;
|
||||
u32 cfgr;
|
||||
int i, lng, ret;
|
||||
|
||||
for (i = 0; i <= DLYB_CFGR_UNIT_MAX; i++) {
|
||||
sdmmc_dlyb_set_cfgr(dlyb, i, DLYB_CFGR_SEL_MAX, true);
|
||||
|
||||
ret = readl_relaxed_poll_timeout(dlyb->base + DLYB_CFGR, cfgr,
|
||||
(cfgr & DLYB_CFGR_LNGF),
|
||||
1, DLYB_LNG_TIMEOUT_US);
|
||||
if (ret) {
|
||||
dev_warn(mmc_dev(host->mmc),
|
||||
"delay line cfg timeout unit:%d cfgr:%d\n",
|
||||
i, cfgr);
|
||||
continue;
|
||||
}
|
||||
|
||||
lng = FIELD_GET(DLYB_CFGR_LNG_MASK, cfgr);
|
||||
if (lng < BIT(DLYB_NB_DELAY) && lng > 0)
|
||||
break;
|
||||
}
|
||||
|
||||
if (i > DLYB_CFGR_UNIT_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
dlyb->unit = i;
|
||||
dlyb->max = __fls(lng);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sdmmc_dlyb_phase_tuning(struct mmci_host *host, u32 opcode)
|
||||
{
|
||||
struct sdmmc_dlyb *dlyb = host->variant_priv;
|
||||
int cur_len = 0, max_len = 0, end_of_len = 0;
|
||||
int phase;
|
||||
|
||||
for (phase = 0; phase <= dlyb->max; phase++) {
|
||||
sdmmc_dlyb_set_cfgr(dlyb, dlyb->unit, phase, false);
|
||||
|
||||
if (mmc_send_tuning(host->mmc, opcode, NULL)) {
|
||||
cur_len = 0;
|
||||
} else {
|
||||
cur_len++;
|
||||
if (cur_len > max_len) {
|
||||
max_len = cur_len;
|
||||
end_of_len = phase;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!max_len) {
|
||||
dev_err(mmc_dev(host->mmc), "no tuning point found\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
phase = end_of_len - max_len / 2;
|
||||
sdmmc_dlyb_set_cfgr(dlyb, dlyb->unit, phase, false);
|
||||
|
||||
dev_dbg(mmc_dev(host->mmc), "unit:%d max_dly:%d phase:%d\n",
|
||||
dlyb->unit, dlyb->max, phase);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sdmmc_execute_tuning(struct mmc_host *mmc, u32 opcode)
|
||||
{
|
||||
struct mmci_host *host = mmc_priv(mmc);
|
||||
struct sdmmc_dlyb *dlyb = host->variant_priv;
|
||||
|
||||
if (!dlyb || !dlyb->base)
|
||||
return -EINVAL;
|
||||
|
||||
if (sdmmc_dlyb_lng_tuning(host))
|
||||
return -EINVAL;
|
||||
|
||||
return sdmmc_dlyb_phase_tuning(host, opcode);
|
||||
}
|
||||
|
||||
static void sdmmc_pre_sig_volt_vswitch(struct mmci_host *host)
|
||||
{
|
||||
/* clear the voltage switch completion flag */
|
||||
writel_relaxed(MCI_STM32_VSWENDC, host->base + MMCICLEAR);
|
||||
/* enable Voltage switch procedure */
|
||||
mmci_write_pwrreg(host, host->pwr_reg | MCI_STM32_VSWITCHEN);
|
||||
}
|
||||
|
||||
static int sdmmc_post_sig_volt_switch(struct mmci_host *host,
|
||||
struct mmc_ios *ios)
|
||||
{
|
||||
unsigned long flags;
|
||||
u32 status;
|
||||
int ret = 0;
|
||||
|
||||
if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) {
|
||||
spin_lock_irqsave(&host->lock, flags);
|
||||
mmci_write_pwrreg(host, host->pwr_reg | MCI_STM32_VSWITCH);
|
||||
spin_unlock_irqrestore(&host->lock, flags);
|
||||
|
||||
/* wait voltage switch completion while 10ms */
|
||||
ret = readl_relaxed_poll_timeout(host->base + MMCISTATUS,
|
||||
status,
|
||||
(status & MCI_STM32_VSWEND),
|
||||
10, SDMMC_VSWEND_TIMEOUT_US);
|
||||
|
||||
writel_relaxed(MCI_STM32_VSWENDC | MCI_STM32_CKSTOPC,
|
||||
host->base + MMCICLEAR);
|
||||
mmci_write_pwrreg(host, host->pwr_reg &
|
||||
~(MCI_STM32_VSWITCHEN | MCI_STM32_VSWITCH));
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct mmci_host_ops sdmmc_variant_ops = {
|
||||
.validate_data = sdmmc_idma_validate_data,
|
||||
.prep_data = sdmmc_idma_prep_data,
|
||||
|
@ -334,9 +508,27 @@ static struct mmci_host_ops sdmmc_variant_ops = {
|
|||
.set_clkreg = mmci_sdmmc_set_clkreg,
|
||||
.set_pwrreg = mmci_sdmmc_set_pwrreg,
|
||||
.busy_complete = sdmmc_busy_complete,
|
||||
.pre_sig_volt_switch = sdmmc_pre_sig_volt_vswitch,
|
||||
.post_sig_volt_switch = sdmmc_post_sig_volt_switch,
|
||||
};
|
||||
|
||||
void sdmmc_variant_init(struct mmci_host *host)
|
||||
{
|
||||
struct device_node *np = host->mmc->parent->of_node;
|
||||
void __iomem *base_dlyb;
|
||||
struct sdmmc_dlyb *dlyb;
|
||||
|
||||
host->ops = &sdmmc_variant_ops;
|
||||
|
||||
base_dlyb = devm_of_iomap(mmc_dev(host->mmc), np, 1, NULL);
|
||||
if (IS_ERR(base_dlyb))
|
||||
return;
|
||||
|
||||
dlyb = devm_kzalloc(mmc_dev(host->mmc), sizeof(*dlyb), GFP_KERNEL);
|
||||
if (!dlyb)
|
||||
return;
|
||||
|
||||
dlyb->base = base_dlyb;
|
||||
host->variant_priv = dlyb;
|
||||
host->mmc_ops->execute_tuning = sdmmc_execute_tuning;
|
||||
}
|
||||
|
|
|
@ -128,6 +128,7 @@
|
|||
#define MSDC_PS_CDSTS (0x1 << 1) /* R */
|
||||
#define MSDC_PS_CDDEBOUNCE (0xf << 12) /* RW */
|
||||
#define MSDC_PS_DAT (0xff << 16) /* R */
|
||||
#define MSDC_PS_DATA1 (0x1 << 17) /* R */
|
||||
#define MSDC_PS_CMD (0x1 << 24) /* R */
|
||||
#define MSDC_PS_WP (0x1 << 31) /* R */
|
||||
|
||||
|
@ -361,6 +362,7 @@ struct msdc_save_para {
|
|||
|
||||
struct mtk_mmc_compatible {
|
||||
u8 clk_div_bits;
|
||||
bool recheck_sdio_irq;
|
||||
bool hs400_tune; /* only used for MT8173 */
|
||||
u32 pad_tune_reg;
|
||||
bool async_fifo;
|
||||
|
@ -436,6 +438,7 @@ struct msdc_host {
|
|||
|
||||
static const struct mtk_mmc_compatible mt8135_compat = {
|
||||
.clk_div_bits = 8,
|
||||
.recheck_sdio_irq = false,
|
||||
.hs400_tune = false,
|
||||
.pad_tune_reg = MSDC_PAD_TUNE,
|
||||
.async_fifo = false,
|
||||
|
@ -448,6 +451,7 @@ static const struct mtk_mmc_compatible mt8135_compat = {
|
|||
|
||||
static const struct mtk_mmc_compatible mt8173_compat = {
|
||||
.clk_div_bits = 8,
|
||||
.recheck_sdio_irq = true,
|
||||
.hs400_tune = true,
|
||||
.pad_tune_reg = MSDC_PAD_TUNE,
|
||||
.async_fifo = false,
|
||||
|
@ -460,6 +464,7 @@ static const struct mtk_mmc_compatible mt8173_compat = {
|
|||
|
||||
static const struct mtk_mmc_compatible mt8183_compat = {
|
||||
.clk_div_bits = 12,
|
||||
.recheck_sdio_irq = false,
|
||||
.hs400_tune = false,
|
||||
.pad_tune_reg = MSDC_PAD_TUNE0,
|
||||
.async_fifo = true,
|
||||
|
@ -472,6 +477,7 @@ static const struct mtk_mmc_compatible mt8183_compat = {
|
|||
|
||||
static const struct mtk_mmc_compatible mt2701_compat = {
|
||||
.clk_div_bits = 12,
|
||||
.recheck_sdio_irq = false,
|
||||
.hs400_tune = false,
|
||||
.pad_tune_reg = MSDC_PAD_TUNE0,
|
||||
.async_fifo = true,
|
||||
|
@ -484,6 +490,7 @@ static const struct mtk_mmc_compatible mt2701_compat = {
|
|||
|
||||
static const struct mtk_mmc_compatible mt2712_compat = {
|
||||
.clk_div_bits = 12,
|
||||
.recheck_sdio_irq = false,
|
||||
.hs400_tune = false,
|
||||
.pad_tune_reg = MSDC_PAD_TUNE0,
|
||||
.async_fifo = true,
|
||||
|
@ -496,6 +503,7 @@ static const struct mtk_mmc_compatible mt2712_compat = {
|
|||
|
||||
static const struct mtk_mmc_compatible mt7622_compat = {
|
||||
.clk_div_bits = 12,
|
||||
.recheck_sdio_irq = false,
|
||||
.hs400_tune = false,
|
||||
.pad_tune_reg = MSDC_PAD_TUNE0,
|
||||
.async_fifo = true,
|
||||
|
@ -508,6 +516,7 @@ static const struct mtk_mmc_compatible mt7622_compat = {
|
|||
|
||||
static const struct mtk_mmc_compatible mt8516_compat = {
|
||||
.clk_div_bits = 12,
|
||||
.recheck_sdio_irq = false,
|
||||
.hs400_tune = false,
|
||||
.pad_tune_reg = MSDC_PAD_TUNE0,
|
||||
.async_fifo = true,
|
||||
|
@ -518,6 +527,7 @@ static const struct mtk_mmc_compatible mt8516_compat = {
|
|||
|
||||
static const struct mtk_mmc_compatible mt7620_compat = {
|
||||
.clk_div_bits = 8,
|
||||
.recheck_sdio_irq = false,
|
||||
.hs400_tune = false,
|
||||
.pad_tune_reg = MSDC_PAD_TUNE,
|
||||
.async_fifo = false,
|
||||
|
@ -591,6 +601,7 @@ static void msdc_reset_hw(struct msdc_host *host)
|
|||
|
||||
static void msdc_cmd_next(struct msdc_host *host,
|
||||
struct mmc_request *mrq, struct mmc_command *cmd);
|
||||
static void __msdc_enable_sdio_irq(struct msdc_host *host, int enb);
|
||||
|
||||
static const u32 cmd_ints_mask = MSDC_INTEN_CMDRDY | MSDC_INTEN_RSPCRCERR |
|
||||
MSDC_INTEN_CMDTMO | MSDC_INTEN_ACMDRDY |
|
||||
|
@ -1007,6 +1018,32 @@ static int msdc_auto_cmd_done(struct msdc_host *host, int events,
|
|||
return cmd->error;
|
||||
}
|
||||
|
||||
/**
|
||||
* msdc_recheck_sdio_irq - recheck whether the SDIO irq is lost
|
||||
*
|
||||
* Host controller may lost interrupt in some special case.
|
||||
* Add SDIO irq recheck mechanism to make sure all interrupts
|
||||
* can be processed immediately
|
||||
*
|
||||
*/
|
||||
static void msdc_recheck_sdio_irq(struct msdc_host *host)
|
||||
{
|
||||
u32 reg_int, reg_inten, reg_ps;
|
||||
|
||||
if (host->mmc->caps & MMC_CAP_SDIO_IRQ) {
|
||||
reg_inten = readl(host->base + MSDC_INTEN);
|
||||
if (reg_inten & MSDC_INTEN_SDIOIRQ) {
|
||||
reg_int = readl(host->base + MSDC_INT);
|
||||
reg_ps = readl(host->base + MSDC_PS);
|
||||
if (!(reg_int & MSDC_INT_SDIOIRQ ||
|
||||
reg_ps & MSDC_PS_DATA1)) {
|
||||
__msdc_enable_sdio_irq(host, 0);
|
||||
sdio_signal_irq(host->mmc);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void msdc_track_cmd_data(struct msdc_host *host,
|
||||
struct mmc_command *cmd, struct mmc_data *data)
|
||||
{
|
||||
|
@ -1035,6 +1072,8 @@ static void msdc_request_done(struct msdc_host *host, struct mmc_request *mrq)
|
|||
if (host->error)
|
||||
msdc_reset_hw(host);
|
||||
mmc_request_done(host->mmc, mrq);
|
||||
if (host->dev_comp->recheck_sdio_irq)
|
||||
msdc_recheck_sdio_irq(host);
|
||||
}
|
||||
|
||||
/* returns true if command is fully handled; returns false otherwise */
|
||||
|
@ -1393,6 +1432,8 @@ static void __msdc_enable_sdio_irq(struct msdc_host *host, int enb)
|
|||
if (enb) {
|
||||
sdr_set_bits(host->base + MSDC_INTEN, MSDC_INTEN_SDIOIRQ);
|
||||
sdr_set_bits(host->base + SDC_CFG, SDC_CFG_SDIOIDE);
|
||||
if (host->dev_comp->recheck_sdio_irq)
|
||||
msdc_recheck_sdio_irq(host);
|
||||
} else {
|
||||
sdr_clr_bits(host->base + MSDC_INTEN, MSDC_INTEN_SDIOIRQ);
|
||||
sdr_clr_bits(host->base + SDC_CFG, SDC_CFG_SDIOIDE);
|
||||
|
|
|
@ -57,6 +57,12 @@ struct renesas_sdhi {
|
|||
void __iomem *scc_ctl;
|
||||
u32 scc_tappos;
|
||||
u32 scc_tappos_hs400;
|
||||
bool doing_tune;
|
||||
|
||||
/* Tuning values: 1 for success, 0 for failure */
|
||||
DECLARE_BITMAP(taps, BITS_PER_LONG);
|
||||
unsigned int tap_num;
|
||||
unsigned long tap_set;
|
||||
};
|
||||
|
||||
#define host_to_priv(host) \
|
||||
|
|
|
@ -250,20 +250,25 @@ static int renesas_sdhi_start_signal_voltage_switch(struct mmc_host *mmc,
|
|||
#define SH_MOBILE_SDHI_SCC_CKSEL 0x006
|
||||
#define SH_MOBILE_SDHI_SCC_RVSCNTL 0x008
|
||||
#define SH_MOBILE_SDHI_SCC_RVSREQ 0x00A
|
||||
#define SH_MOBILE_SDHI_SCC_SMPCMP 0x00C
|
||||
#define SH_MOBILE_SDHI_SCC_TMPPORT2 0x00E
|
||||
|
||||
/* Definitions for values the SH_MOBILE_SDHI_SCC_DTCNTL register */
|
||||
#define SH_MOBILE_SDHI_SCC_DTCNTL_TAPEN BIT(0)
|
||||
#define SH_MOBILE_SDHI_SCC_DTCNTL_TAPNUM_SHIFT 16
|
||||
#define SH_MOBILE_SDHI_SCC_DTCNTL_TAPNUM_MASK 0xff
|
||||
|
||||
/* Definitions for values the SH_MOBILE_SDHI_SCC_CKSEL register */
|
||||
#define SH_MOBILE_SDHI_SCC_CKSEL_DTSEL BIT(0)
|
||||
/* Definitions for values the SH_MOBILE_SDHI_SCC_RVSCNTL register */
|
||||
|
||||
#define SH_MOBILE_SDHI_SCC_RVSCNTL_RVSEN BIT(0)
|
||||
/* Definitions for values the SH_MOBILE_SDHI_SCC_RVSREQ register */
|
||||
|
||||
#define SH_MOBILE_SDHI_SCC_RVSREQ_REQTAPDOWN BIT(0)
|
||||
#define SH_MOBILE_SDHI_SCC_RVSREQ_REQTAPUP BIT(1)
|
||||
#define SH_MOBILE_SDHI_SCC_RVSREQ_RVSERR BIT(2)
|
||||
/* Definitions for values the SH_MOBILE_SDHI_SCC_TMPPORT2 register */
|
||||
|
||||
#define SH_MOBILE_SDHI_SCC_SMPCMP_CMD_REQDOWN BIT(8)
|
||||
#define SH_MOBILE_SDHI_SCC_SMPCMP_CMD_REQUP BIT(24)
|
||||
#define SH_MOBILE_SDHI_SCC_SMPCMP_CMD_ERR (BIT(8) | BIT(24))
|
||||
|
||||
#define SH_MOBILE_SDHI_SCC_TMPPORT2_HS400OSEL BIT(4)
|
||||
#define SH_MOBILE_SDHI_SCC_TMPPORT2_HS400EN BIT(31)
|
||||
|
||||
|
@ -316,17 +321,9 @@ static unsigned int renesas_sdhi_init_tuning(struct tmio_mmc_host *host)
|
|||
SH_MOBILE_SDHI_SCC_DTCNTL_TAPNUM_MASK;
|
||||
}
|
||||
|
||||
static void renesas_sdhi_prepare_tuning(struct tmio_mmc_host *host,
|
||||
unsigned long tap)
|
||||
{
|
||||
struct renesas_sdhi *priv = host_to_priv(host);
|
||||
|
||||
/* Set sampling clock position */
|
||||
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TAPSET, tap);
|
||||
}
|
||||
|
||||
static void renesas_sdhi_hs400_complete(struct tmio_mmc_host *host)
|
||||
static void renesas_sdhi_hs400_complete(struct mmc_host *mmc)
|
||||
{
|
||||
struct tmio_mmc_host *host = mmc_priv(mmc);
|
||||
struct renesas_sdhi *priv = host_to_priv(host);
|
||||
|
||||
sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
|
||||
|
@ -339,6 +336,12 @@ static void renesas_sdhi_hs400_complete(struct tmio_mmc_host *host)
|
|||
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_DT2FF,
|
||||
priv->scc_tappos_hs400);
|
||||
|
||||
/* Gen3 can't do automatic tap correction with HS400, so disable it */
|
||||
if (sd_ctrl_read16(host, CTL_VERSION) == SDHI_VER_GEN3_SDMMC)
|
||||
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL,
|
||||
~SH_MOBILE_SDHI_SCC_RVSCNTL_RVSEN &
|
||||
sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL));
|
||||
|
||||
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT2,
|
||||
(SH_MOBILE_SDHI_SCC_TMPPORT2_HS400EN |
|
||||
SH_MOBILE_SDHI_SCC_TMPPORT2_HS400OSEL) |
|
||||
|
@ -352,7 +355,7 @@ static void renesas_sdhi_hs400_complete(struct tmio_mmc_host *host)
|
|||
|
||||
if (priv->quirks && priv->quirks->hs400_4taps)
|
||||
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TAPSET,
|
||||
host->tap_set / 2);
|
||||
priv->tap_set / 2);
|
||||
|
||||
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_CKSEL,
|
||||
SH_MOBILE_SDHI_SCC_CKSEL_DTSEL |
|
||||
|
@ -374,8 +377,9 @@ static void renesas_sdhi_reset_scc(struct tmio_mmc_host *host,
|
|||
SH_MOBILE_SDHI_SCC_CKSEL));
|
||||
}
|
||||
|
||||
static void renesas_sdhi_disable_scc(struct tmio_mmc_host *host)
|
||||
static void renesas_sdhi_disable_scc(struct mmc_host *mmc)
|
||||
{
|
||||
struct tmio_mmc_host *host = mmc_priv(mmc);
|
||||
struct renesas_sdhi *priv = host_to_priv(host);
|
||||
|
||||
renesas_sdhi_reset_scc(host, priv);
|
||||
|
@ -410,9 +414,12 @@ static void renesas_sdhi_reset_hs400_mode(struct tmio_mmc_host *host,
|
|||
sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
|
||||
}
|
||||
|
||||
static void renesas_sdhi_prepare_hs400_tuning(struct tmio_mmc_host *host)
|
||||
static int renesas_sdhi_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
|
||||
{
|
||||
struct tmio_mmc_host *host = mmc_priv(mmc);
|
||||
|
||||
renesas_sdhi_reset_hs400_mode(host, host_to_priv(host));
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define SH_MOBILE_SDHI_MAX_TAP 3
|
||||
|
@ -426,6 +433,8 @@ static int renesas_sdhi_select_tuning(struct tmio_mmc_host *host)
|
|||
unsigned long ntap; /* temporary counter of tuning success */
|
||||
unsigned long i;
|
||||
|
||||
priv->doing_tune = false;
|
||||
|
||||
/* Clear SCC_RVSREQ */
|
||||
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSREQ, 0);
|
||||
|
||||
|
@ -434,11 +443,11 @@ static int renesas_sdhi_select_tuning(struct tmio_mmc_host *host)
|
|||
* result requiring the tap to be good in both runs before
|
||||
* considering it for tuning selection.
|
||||
*/
|
||||
for (i = 0; i < host->tap_num * 2; i++) {
|
||||
int offset = host->tap_num * (i < host->tap_num ? 1 : -1);
|
||||
for (i = 0; i < priv->tap_num * 2; i++) {
|
||||
int offset = priv->tap_num * (i < priv->tap_num ? 1 : -1);
|
||||
|
||||
if (!test_bit(i, host->taps))
|
||||
clear_bit(i + offset, host->taps);
|
||||
if (!test_bit(i, priv->taps))
|
||||
clear_bit(i + offset, priv->taps);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -450,8 +459,8 @@ static int renesas_sdhi_select_tuning(struct tmio_mmc_host *host)
|
|||
ntap = 0;
|
||||
tap_start = 0;
|
||||
tap_end = 0;
|
||||
for (i = 0; i < host->tap_num * 2; i++) {
|
||||
if (test_bit(i, host->taps)) {
|
||||
for (i = 0; i < priv->tap_num * 2; i++) {
|
||||
if (test_bit(i, priv->taps)) {
|
||||
ntap++;
|
||||
} else {
|
||||
if (ntap > tap_cnt) {
|
||||
|
@ -470,12 +479,12 @@ static int renesas_sdhi_select_tuning(struct tmio_mmc_host *host)
|
|||
}
|
||||
|
||||
if (tap_cnt >= SH_MOBILE_SDHI_MAX_TAP)
|
||||
host->tap_set = (tap_start + tap_end) / 2 % host->tap_num;
|
||||
priv->tap_set = (tap_start + tap_end) / 2 % priv->tap_num;
|
||||
else
|
||||
return -EIO;
|
||||
|
||||
/* Set SCC */
|
||||
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TAPSET, host->tap_set);
|
||||
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TAPSET, priv->tap_set);
|
||||
|
||||
/* Enable auto re-tuning */
|
||||
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL,
|
||||
|
@ -485,6 +494,97 @@ static int renesas_sdhi_select_tuning(struct tmio_mmc_host *host)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int renesas_sdhi_execute_tuning(struct tmio_mmc_host *host, u32 opcode)
|
||||
{
|
||||
struct renesas_sdhi *priv = host_to_priv(host);
|
||||
int i;
|
||||
|
||||
priv->tap_num = renesas_sdhi_init_tuning(host);
|
||||
if (!priv->tap_num)
|
||||
return 0; /* Tuning is not supported */
|
||||
|
||||
if (priv->tap_num * 2 >= sizeof(priv->taps) * BITS_PER_BYTE) {
|
||||
dev_err(&host->pdev->dev,
|
||||
"Too many taps, please update 'taps' in tmio_mmc_host!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
priv->doing_tune = true;
|
||||
bitmap_zero(priv->taps, priv->tap_num * 2);
|
||||
|
||||
/* Issue CMD19 twice for each tap */
|
||||
for (i = 0; i < 2 * priv->tap_num; i++) {
|
||||
/* Set sampling clock position */
|
||||
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TAPSET, i % priv->tap_num);
|
||||
|
||||
if (mmc_send_tuning(host->mmc, opcode, NULL) == 0)
|
||||
set_bit(i, priv->taps);
|
||||
}
|
||||
|
||||
return renesas_sdhi_select_tuning(host);
|
||||
}
|
||||
|
||||
static bool renesas_sdhi_manual_correction(struct tmio_mmc_host *host, bool use_4tap)
|
||||
{
|
||||
struct renesas_sdhi *priv = host_to_priv(host);
|
||||
unsigned long new_tap = priv->tap_set;
|
||||
u32 val;
|
||||
|
||||
val = sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSREQ);
|
||||
if (!val)
|
||||
return false;
|
||||
|
||||
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSREQ, 0);
|
||||
|
||||
/* Change TAP position according to correction status */
|
||||
if (sd_ctrl_read16(host, CTL_VERSION) == SDHI_VER_GEN3_SDMMC &&
|
||||
host->mmc->ios.timing == MMC_TIMING_MMC_HS400) {
|
||||
/*
|
||||
* With HS400, the DAT signal is based on DS, not CLK.
|
||||
* Therefore, use only CMD status.
|
||||
*/
|
||||
u32 smpcmp = sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_SMPCMP) &
|
||||
SH_MOBILE_SDHI_SCC_SMPCMP_CMD_ERR;
|
||||
if (!smpcmp)
|
||||
return false; /* no error in CMD signal */
|
||||
else if (smpcmp == SH_MOBILE_SDHI_SCC_SMPCMP_CMD_REQUP)
|
||||
new_tap++;
|
||||
else if (smpcmp == SH_MOBILE_SDHI_SCC_SMPCMP_CMD_REQDOWN)
|
||||
new_tap--;
|
||||
else
|
||||
return true; /* need retune */
|
||||
} else {
|
||||
if (val & SH_MOBILE_SDHI_SCC_RVSREQ_RVSERR)
|
||||
return true; /* need retune */
|
||||
else if (val & SH_MOBILE_SDHI_SCC_RVSREQ_REQTAPUP)
|
||||
new_tap++;
|
||||
else if (val & SH_MOBILE_SDHI_SCC_RVSREQ_REQTAPDOWN)
|
||||
new_tap--;
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
priv->tap_set = (new_tap % priv->tap_num);
|
||||
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TAPSET,
|
||||
priv->tap_set / (use_4tap ? 2 : 1));
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool renesas_sdhi_auto_correction(struct tmio_mmc_host *host)
|
||||
{
|
||||
struct renesas_sdhi *priv = host_to_priv(host);
|
||||
|
||||
/* Check SCC error */
|
||||
if (sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSREQ) &
|
||||
SH_MOBILE_SDHI_SCC_RVSREQ_RVSERR) {
|
||||
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSREQ, 0);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool renesas_sdhi_check_scc_error(struct tmio_mmc_host *host)
|
||||
{
|
||||
struct renesas_sdhi *priv = host_to_priv(host);
|
||||
|
@ -499,20 +599,14 @@ static bool renesas_sdhi_check_scc_error(struct tmio_mmc_host *host)
|
|||
!(host->mmc->ios.timing == MMC_TIMING_MMC_HS400 && !use_4tap))
|
||||
return false;
|
||||
|
||||
if (mmc_doing_retune(host->mmc))
|
||||
if (mmc_doing_retune(host->mmc) || priv->doing_tune)
|
||||
return false;
|
||||
|
||||
/* Check SCC error */
|
||||
if (sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL) &
|
||||
SH_MOBILE_SDHI_SCC_RVSCNTL_RVSEN &&
|
||||
sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSREQ) &
|
||||
SH_MOBILE_SDHI_SCC_RVSREQ_RVSERR) {
|
||||
/* Clear SCC error */
|
||||
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSREQ, 0);
|
||||
return true;
|
||||
}
|
||||
SH_MOBILE_SDHI_SCC_RVSCNTL_RVSEN)
|
||||
return renesas_sdhi_auto_correction(host);
|
||||
|
||||
return false;
|
||||
return renesas_sdhi_manual_correction(host, use_4tap);
|
||||
}
|
||||
|
||||
static void renesas_sdhi_hw_reset(struct tmio_mmc_host *host)
|
||||
|
@ -531,10 +625,6 @@ static void renesas_sdhi_hw_reset(struct tmio_mmc_host *host)
|
|||
~SH_MOBILE_SDHI_SCC_RVSCNTL_RVSEN &
|
||||
sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL));
|
||||
|
||||
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL,
|
||||
~SH_MOBILE_SDHI_SCC_RVSCNTL_RVSEN &
|
||||
sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL));
|
||||
|
||||
if (host->pdata->flags & TMIO_MMC_MIN_RCAR2)
|
||||
sd_ctrl_write32_as_16_and_16(host, CTL_IRQ_MASK,
|
||||
TMIO_MASK_INIT_RCAR2);
|
||||
|
@ -811,14 +901,11 @@ int renesas_sdhi_probe(struct platform_device *pdev,
|
|||
if (!hit)
|
||||
dev_warn(&host->pdev->dev, "Unknown clock rate for tuning\n");
|
||||
|
||||
host->init_tuning = renesas_sdhi_init_tuning;
|
||||
host->prepare_tuning = renesas_sdhi_prepare_tuning;
|
||||
host->select_tuning = renesas_sdhi_select_tuning;
|
||||
host->check_scc_error = renesas_sdhi_check_scc_error;
|
||||
host->prepare_hs400_tuning =
|
||||
renesas_sdhi_prepare_hs400_tuning;
|
||||
host->hs400_downgrade = renesas_sdhi_disable_scc;
|
||||
host->hs400_complete = renesas_sdhi_hs400_complete;
|
||||
host->execute_tuning = renesas_sdhi_execute_tuning;
|
||||
host->check_retune = renesas_sdhi_check_scc_error;
|
||||
host->ops.prepare_hs400_tuning = renesas_sdhi_prepare_hs400_tuning;
|
||||
host->ops.hs400_downgrade = renesas_sdhi_disable_scc;
|
||||
host->ops.hs400_complete = renesas_sdhi_hs400_complete;
|
||||
}
|
||||
|
||||
num_irqs = platform_irq_count(pdev);
|
||||
|
|
|
@ -75,7 +75,7 @@ struct sdhci_acpi_host {
|
|||
bool use_runtime_pm;
|
||||
bool is_intel;
|
||||
bool reset_signal_volt_on_suspend;
|
||||
unsigned long private[0] ____cacheline_aligned;
|
||||
unsigned long private[] ____cacheline_aligned;
|
||||
};
|
||||
|
||||
enum {
|
||||
|
|
|
@ -68,7 +68,7 @@ struct sdhci_cdns_priv {
|
|||
void __iomem *hrs_addr;
|
||||
bool enhanced_strobe;
|
||||
unsigned int nr_phy_params;
|
||||
struct sdhci_cdns_phy_param phy_params[0];
|
||||
struct sdhci_cdns_phy_param phy_params[];
|
||||
};
|
||||
|
||||
struct sdhci_cdns_phy_cfg {
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
*/
|
||||
|
||||
#include <linux/io.h>
|
||||
#include <linux/iopoll.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/clk.h>
|
||||
|
@ -73,6 +74,7 @@
|
|||
#define ESDHC_STROBE_DLL_CTRL 0x70
|
||||
#define ESDHC_STROBE_DLL_CTRL_ENABLE (1 << 0)
|
||||
#define ESDHC_STROBE_DLL_CTRL_RESET (1 << 1)
|
||||
#define ESDHC_STROBE_DLL_CTRL_SLV_DLY_TARGET_DEFAULT 0x7
|
||||
#define ESDHC_STROBE_DLL_CTRL_SLV_DLY_TARGET_SHIFT 3
|
||||
#define ESDHC_STROBE_DLL_CTRL_SLV_UPDATE_INT_DEFAULT (4 << 20)
|
||||
|
||||
|
@ -160,6 +162,22 @@
|
|||
#define ESDHC_FLAG_CQHCI BIT(12)
|
||||
/* need request pmqos during low power */
|
||||
#define ESDHC_FLAG_PMQOS BIT(13)
|
||||
/* The IP state got lost in low power mode */
|
||||
#define ESDHC_FLAG_STATE_LOST_IN_LPMODE BIT(14)
|
||||
/* The IP lost clock rate in PM_RUNTIME */
|
||||
#define ESDHC_FLAG_CLK_RATE_LOST_IN_PM_RUNTIME BIT(15)
|
||||
/*
|
||||
* The IP do not support the ACMD23 feature completely when use ADMA mode.
|
||||
* In ADMA mode, it only use the 16 bit block count of the register 0x4
|
||||
* (BLOCK_ATT) as the CMD23's argument for ACMD23 mode, which means it will
|
||||
* ignore the upper 16 bit of the CMD23's argument. This will block the reliable
|
||||
* write operation in RPMB, because RPMB reliable write need to set the bit31
|
||||
* of the CMD23's argument.
|
||||
* imx6qpdl/imx6sx/imx6sl/imx7d has this limitation only for ADMA mode, SDMA
|
||||
* do not has this limitation. so when these SoC use ADMA mode, it need to
|
||||
* disable the ACMD23 feature.
|
||||
*/
|
||||
#define ESDHC_FLAG_BROKEN_AUTO_CMD23 BIT(16)
|
||||
|
||||
struct esdhc_soc_data {
|
||||
u32 flags;
|
||||
|
@ -182,43 +200,67 @@ static const struct esdhc_soc_data esdhc_imx53_data = {
|
|||
};
|
||||
|
||||
static const struct esdhc_soc_data usdhc_imx6q_data = {
|
||||
.flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_MAN_TUNING,
|
||||
.flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_MAN_TUNING
|
||||
| ESDHC_FLAG_BROKEN_AUTO_CMD23,
|
||||
};
|
||||
|
||||
static const struct esdhc_soc_data usdhc_imx6sl_data = {
|
||||
.flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
|
||||
| ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_ERR004536
|
||||
| ESDHC_FLAG_HS200,
|
||||
| ESDHC_FLAG_HS200
|
||||
| ESDHC_FLAG_BROKEN_AUTO_CMD23,
|
||||
};
|
||||
|
||||
static const struct esdhc_soc_data usdhc_imx6sll_data = {
|
||||
.flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
|
||||
| ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
|
||||
| ESDHC_FLAG_STATE_LOST_IN_LPMODE,
|
||||
};
|
||||
|
||||
static const struct esdhc_soc_data usdhc_imx6sx_data = {
|
||||
.flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
|
||||
| ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200,
|
||||
| ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
|
||||
| ESDHC_FLAG_STATE_LOST_IN_LPMODE
|
||||
| ESDHC_FLAG_BROKEN_AUTO_CMD23,
|
||||
};
|
||||
|
||||
static const struct esdhc_soc_data usdhc_imx6ull_data = {
|
||||
.flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
|
||||
| ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
|
||||
| ESDHC_FLAG_ERR010450,
|
||||
| ESDHC_FLAG_ERR010450
|
||||
| ESDHC_FLAG_STATE_LOST_IN_LPMODE,
|
||||
};
|
||||
|
||||
static const struct esdhc_soc_data usdhc_imx7d_data = {
|
||||
.flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
|
||||
| ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
|
||||
| ESDHC_FLAG_HS400,
|
||||
| ESDHC_FLAG_HS400
|
||||
| ESDHC_FLAG_STATE_LOST_IN_LPMODE
|
||||
| ESDHC_FLAG_BROKEN_AUTO_CMD23,
|
||||
};
|
||||
|
||||
static struct esdhc_soc_data usdhc_imx7ulp_data = {
|
||||
.flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
|
||||
| ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
|
||||
| ESDHC_FLAG_PMQOS | ESDHC_FLAG_HS400,
|
||||
| ESDHC_FLAG_PMQOS | ESDHC_FLAG_HS400
|
||||
| ESDHC_FLAG_STATE_LOST_IN_LPMODE,
|
||||
};
|
||||
|
||||
static struct esdhc_soc_data usdhc_imx8qxp_data = {
|
||||
.flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
|
||||
| ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
|
||||
| ESDHC_FLAG_HS400 | ESDHC_FLAG_HS400_ES
|
||||
| ESDHC_FLAG_CQHCI,
|
||||
| ESDHC_FLAG_CQHCI
|
||||
| ESDHC_FLAG_STATE_LOST_IN_LPMODE
|
||||
| ESDHC_FLAG_CLK_RATE_LOST_IN_PM_RUNTIME,
|
||||
};
|
||||
|
||||
static struct esdhc_soc_data usdhc_imx8mm_data = {
|
||||
.flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
|
||||
| ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
|
||||
| ESDHC_FLAG_HS400 | ESDHC_FLAG_HS400_ES
|
||||
| ESDHC_FLAG_CQHCI
|
||||
| ESDHC_FLAG_STATE_LOST_IN_LPMODE,
|
||||
};
|
||||
|
||||
struct pltfm_imx_data {
|
||||
|
@ -264,11 +306,13 @@ static const struct of_device_id imx_esdhc_dt_ids[] = {
|
|||
{ .compatible = "fsl,imx53-esdhc", .data = &esdhc_imx53_data, },
|
||||
{ .compatible = "fsl,imx6sx-usdhc", .data = &usdhc_imx6sx_data, },
|
||||
{ .compatible = "fsl,imx6sl-usdhc", .data = &usdhc_imx6sl_data, },
|
||||
{ .compatible = "fsl,imx6sll-usdhc", .data = &usdhc_imx6sll_data, },
|
||||
{ .compatible = "fsl,imx6q-usdhc", .data = &usdhc_imx6q_data, },
|
||||
{ .compatible = "fsl,imx6ull-usdhc", .data = &usdhc_imx6ull_data, },
|
||||
{ .compatible = "fsl,imx7d-usdhc", .data = &usdhc_imx7d_data, },
|
||||
{ .compatible = "fsl,imx7ulp-usdhc", .data = &usdhc_imx7ulp_data, },
|
||||
{ .compatible = "fsl,imx8qxp-usdhc", .data = &usdhc_imx8qxp_data, },
|
||||
{ .compatible = "fsl,imx8mm-usdhc", .data = &usdhc_imx8mm_data, },
|
||||
{ /* sentinel */ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, imx_esdhc_dt_ids);
|
||||
|
@ -301,6 +345,17 @@ static inline void esdhc_clrset_le(struct sdhci_host *host, u32 mask, u32 val, i
|
|||
writel(((readl(base) & ~(mask << shift)) | (val << shift)), base);
|
||||
}
|
||||
|
||||
static inline void esdhc_wait_for_card_clock_gate_off(struct sdhci_host *host)
|
||||
{
|
||||
u32 present_state;
|
||||
int ret;
|
||||
|
||||
ret = readl_poll_timeout(host->ioaddr + ESDHC_PRSSTAT, present_state,
|
||||
(present_state & ESDHC_CLOCK_GATE_OFF), 2, 100);
|
||||
if (ret == -ETIMEDOUT)
|
||||
dev_warn(mmc_dev(host->mmc), "%s: card clock still not gate off in 100us!.\n", __func__);
|
||||
}
|
||||
|
||||
static u32 esdhc_readl_le(struct sdhci_host *host, int reg)
|
||||
{
|
||||
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
||||
|
@ -514,6 +569,8 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
|
|||
else
|
||||
new_val &= ~ESDHC_VENDOR_SPEC_FRC_SDCLK_ON;
|
||||
writel(new_val, host->ioaddr + ESDHC_VENDOR_SPEC);
|
||||
if (!(new_val & ESDHC_VENDOR_SPEC_FRC_SDCLK_ON))
|
||||
esdhc_wait_for_card_clock_gate_off(host);
|
||||
return;
|
||||
case SDHCI_HOST_CONTROL2:
|
||||
new_val = readl(host->ioaddr + ESDHC_VENDOR_SPEC);
|
||||
|
@ -582,10 +639,24 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
|
|||
* For DMA access restore the levels to default value.
|
||||
*/
|
||||
m = readl(host->ioaddr + ESDHC_WTMK_LVL);
|
||||
if (val & SDHCI_TRNS_DMA)
|
||||
if (val & SDHCI_TRNS_DMA) {
|
||||
wml = ESDHC_WTMK_LVL_WML_VAL_DEF;
|
||||
else
|
||||
} else {
|
||||
u8 ctrl;
|
||||
wml = ESDHC_WTMK_LVL_WML_VAL_MAX;
|
||||
|
||||
/*
|
||||
* Since already disable DMA mode, so also need
|
||||
* to clear the DMASEL. Otherwise, for standard
|
||||
* tuning, when send tuning command, usdhc will
|
||||
* still prefetch the ADMA script from wrong
|
||||
* DMA address, then we will see IOMMU report
|
||||
* some error which show lack of TLB mapping.
|
||||
*/
|
||||
ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
|
||||
ctrl &= ~SDHCI_CTRL_DMA_MASK;
|
||||
sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
|
||||
}
|
||||
m &= ~(ESDHC_WTMK_LVL_RD_WML_MASK |
|
||||
ESDHC_WTMK_LVL_WR_WML_MASK);
|
||||
m |= (wml << ESDHC_WTMK_LVL_RD_WML_SHIFT) |
|
||||
|
@ -742,12 +813,14 @@ static inline void esdhc_pltfm_set_clock(struct sdhci_host *host,
|
|||
int ddr_pre_div = imx_data->is_ddr ? 2 : 1;
|
||||
int pre_div = 1;
|
||||
int div = 1;
|
||||
int ret;
|
||||
u32 temp, val;
|
||||
|
||||
if (esdhc_is_usdhc(imx_data)) {
|
||||
val = readl(host->ioaddr + ESDHC_VENDOR_SPEC);
|
||||
writel(val & ~ESDHC_VENDOR_SPEC_FRC_SDCLK_ON,
|
||||
host->ioaddr + ESDHC_VENDOR_SPEC);
|
||||
esdhc_wait_for_card_clock_gate_off(host);
|
||||
}
|
||||
|
||||
if (clock == 0) {
|
||||
|
@ -802,13 +875,18 @@ static inline void esdhc_pltfm_set_clock(struct sdhci_host *host,
|
|||
| (pre_div << ESDHC_PREDIV_SHIFT));
|
||||
sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
|
||||
|
||||
/* need to wait the bit 3 of the PRSSTAT to be set, make sure card clock is stable */
|
||||
ret = readl_poll_timeout(host->ioaddr + ESDHC_PRSSTAT, temp,
|
||||
(temp & ESDHC_CLOCK_STABLE), 2, 100);
|
||||
if (ret == -ETIMEDOUT)
|
||||
dev_warn(mmc_dev(host->mmc), "card clock still not stable in 100us!.\n");
|
||||
|
||||
if (esdhc_is_usdhc(imx_data)) {
|
||||
val = readl(host->ioaddr + ESDHC_VENDOR_SPEC);
|
||||
writel(val | ESDHC_VENDOR_SPEC_FRC_SDCLK_ON,
|
||||
host->ioaddr + ESDHC_VENDOR_SPEC);
|
||||
}
|
||||
|
||||
mdelay(1);
|
||||
}
|
||||
|
||||
static unsigned int esdhc_pltfm_get_ro(struct sdhci_host *host)
|
||||
|
@ -983,12 +1061,17 @@ static int esdhc_change_pinstate(struct sdhci_host *host,
|
|||
*/
|
||||
static void esdhc_set_strobe_dll(struct sdhci_host *host)
|
||||
{
|
||||
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
||||
struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
|
||||
u32 strobe_delay;
|
||||
u32 v;
|
||||
int ret;
|
||||
|
||||
/* disable clock before enabling strobe dll */
|
||||
writel(readl(host->ioaddr + ESDHC_VENDOR_SPEC) &
|
||||
~ESDHC_VENDOR_SPEC_FRC_SDCLK_ON,
|
||||
host->ioaddr + ESDHC_VENDOR_SPEC);
|
||||
esdhc_wait_for_card_clock_gate_off(host);
|
||||
|
||||
/* force a reset on strobe dll */
|
||||
writel(ESDHC_STROBE_DLL_CTRL_RESET,
|
||||
|
@ -1000,19 +1083,21 @@ static void esdhc_set_strobe_dll(struct sdhci_host *host)
|
|||
* enable strobe dll ctrl and adjust the delay target
|
||||
* for the uSDHC loopback read clock
|
||||
*/
|
||||
if (imx_data->boarddata.strobe_dll_delay_target)
|
||||
strobe_delay = imx_data->boarddata.strobe_dll_delay_target;
|
||||
else
|
||||
strobe_delay = ESDHC_STROBE_DLL_CTRL_SLV_DLY_TARGET_DEFAULT;
|
||||
v = ESDHC_STROBE_DLL_CTRL_ENABLE |
|
||||
ESDHC_STROBE_DLL_CTRL_SLV_UPDATE_INT_DEFAULT |
|
||||
(7 << ESDHC_STROBE_DLL_CTRL_SLV_DLY_TARGET_SHIFT);
|
||||
(strobe_delay << ESDHC_STROBE_DLL_CTRL_SLV_DLY_TARGET_SHIFT);
|
||||
writel(v, host->ioaddr + ESDHC_STROBE_DLL_CTRL);
|
||||
/* wait 5us to make sure strobe dll status register stable */
|
||||
udelay(5);
|
||||
v = readl(host->ioaddr + ESDHC_STROBE_DLL_STATUS);
|
||||
if (!(v & ESDHC_STROBE_DLL_STS_REF_LOCK))
|
||||
|
||||
/* wait max 50us to get the REF/SLV lock */
|
||||
ret = readl_poll_timeout(host->ioaddr + ESDHC_STROBE_DLL_STATUS, v,
|
||||
((v & ESDHC_STROBE_DLL_STS_REF_LOCK) && (v & ESDHC_STROBE_DLL_STS_SLV_LOCK)), 1, 50);
|
||||
if (ret == -ETIMEDOUT)
|
||||
dev_warn(mmc_dev(host->mmc),
|
||||
"warning! HS400 strobe DLL status REF not lock!\n");
|
||||
if (!(v & ESDHC_STROBE_DLL_STS_SLV_LOCK))
|
||||
dev_warn(mmc_dev(host->mmc),
|
||||
"warning! HS400 strobe DLL status SLV not lock!\n");
|
||||
"warning! HS400 strobe DLL status REF/SLV not lock in 50us, STROBE DLL status is %x!\n", v);
|
||||
}
|
||||
|
||||
static void esdhc_reset_tuning(struct sdhci_host *host)
|
||||
|
@ -1162,6 +1247,7 @@ static void sdhci_esdhc_imx_hwinit(struct sdhci_host *host)
|
|||
{
|
||||
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
||||
struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
|
||||
struct cqhci_host *cq_host = host->mmc->cqe_private;
|
||||
int tmp;
|
||||
|
||||
if (esdhc_is_usdhc(imx_data)) {
|
||||
|
@ -1238,6 +1324,21 @@ static void sdhci_esdhc_imx_hwinit(struct sdhci_host *host)
|
|||
tmp &= ~ESDHC_STD_TUNING_EN;
|
||||
writel(tmp, host->ioaddr + ESDHC_TUNING_CTRL);
|
||||
}
|
||||
|
||||
/*
|
||||
* On i.MX8MM, we are running Dual Linux OS, with 1st Linux using SD Card
|
||||
* as rootfs storage, 2nd Linux using eMMC as rootfs storage. We let the
|
||||
* the 1st linux configure power/clock for the 2nd Linux.
|
||||
*
|
||||
* When the 2nd Linux is booting into rootfs stage, we let the 1st Linux
|
||||
* to destroy the 2nd linux, then restart the 2nd linux, we met SDHCI dump.
|
||||
* After we clear the pending interrupt and halt CQCTL, issue gone.
|
||||
*/
|
||||
if (cq_host) {
|
||||
tmp = cqhci_readl(cq_host, CQHCI_IS);
|
||||
cqhci_writel(cq_host, tmp, CQHCI_IS);
|
||||
cqhci_writel(cq_host, CQHCI_HALT, CQHCI_CTL);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1328,6 +1429,8 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
|
|||
of_property_read_u32(np, "fsl,tuning-start-tap",
|
||||
&boarddata->tuning_start_tap);
|
||||
|
||||
of_property_read_u32(np, "fsl,strobe-dll-delay-target",
|
||||
&boarddata->strobe_dll_delay_target);
|
||||
if (of_find_property(np, "no-1-8-v", NULL))
|
||||
host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
|
||||
|
||||
|
@ -1487,7 +1590,7 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
|
|||
imx_data->pinctrl = devm_pinctrl_get(&pdev->dev);
|
||||
if (IS_ERR(imx_data->pinctrl)) {
|
||||
err = PTR_ERR(imx_data->pinctrl);
|
||||
goto disable_ahb_clk;
|
||||
dev_warn(mmc_dev(host->mmc), "could not get pinctrl\n");
|
||||
}
|
||||
|
||||
if (esdhc_is_usdhc(imx_data)) {
|
||||
|
@ -1518,6 +1621,9 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
|
|||
if (imx_data->socdata->flags & ESDHC_FLAG_HS400)
|
||||
host->quirks2 |= SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400;
|
||||
|
||||
if (imx_data->socdata->flags & ESDHC_FLAG_BROKEN_AUTO_CMD23)
|
||||
host->quirks2 |= SDHCI_QUIRK2_ACMD23_BROKEN;
|
||||
|
||||
if (imx_data->socdata->flags & ESDHC_FLAG_HS400_ES) {
|
||||
host->mmc->caps2 |= MMC_CAP2_HS400_ES;
|
||||
host->mmc_host_ops.hs400_enhanced_strobe =
|
||||
|
@ -1605,6 +1711,8 @@ static int sdhci_esdhc_imx_remove(struct platform_device *pdev)
|
|||
static int sdhci_esdhc_suspend(struct device *dev)
|
||||
{
|
||||
struct sdhci_host *host = dev_get_drvdata(dev);
|
||||
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
||||
struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
|
||||
int ret;
|
||||
|
||||
if (host->mmc->caps2 & MMC_CAP2_CQE) {
|
||||
|
@ -1613,10 +1721,20 @@ static int sdhci_esdhc_suspend(struct device *dev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
if ((imx_data->socdata->flags & ESDHC_FLAG_STATE_LOST_IN_LPMODE) &&
|
||||
(host->tuning_mode != SDHCI_TUNING_MODE_1)) {
|
||||
mmc_retune_timer_stop(host->mmc);
|
||||
mmc_retune_needed(host->mmc);
|
||||
}
|
||||
|
||||
if (host->tuning_mode != SDHCI_TUNING_MODE_3)
|
||||
mmc_retune_needed(host->mmc);
|
||||
|
||||
return sdhci_suspend_host(host);
|
||||
ret = sdhci_suspend_host(host);
|
||||
if (!ret)
|
||||
return pinctrl_pm_select_sleep_state(dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int sdhci_esdhc_resume(struct device *dev)
|
||||
|
@ -1624,6 +1742,10 @@ static int sdhci_esdhc_resume(struct device *dev)
|
|||
struct sdhci_host *host = dev_get_drvdata(dev);
|
||||
int ret;
|
||||
|
||||
ret = pinctrl_pm_select_default_state(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* re-initialize hw state in case it's lost in low power mode */
|
||||
sdhci_esdhc_imx_hwinit(host);
|
||||
|
||||
|
@ -1681,6 +1803,9 @@ static int sdhci_esdhc_runtime_resume(struct device *dev)
|
|||
if (imx_data->socdata->flags & ESDHC_FLAG_PMQOS)
|
||||
cpu_latency_qos_add_request(&imx_data->pm_qos_req, 0);
|
||||
|
||||
if (imx_data->socdata->flags & ESDHC_FLAG_CLK_RATE_LOST_IN_PM_RUNTIME)
|
||||
clk_set_rate(imx_data->clk_per, pltfm_host->clock);
|
||||
|
||||
err = clk_prepare_enable(imx_data->clk_ahb);
|
||||
if (err)
|
||||
goto remove_pm_qos_request;
|
||||
|
|
|
@ -31,6 +31,7 @@
|
|||
|
||||
/* Present State Register */
|
||||
#define ESDHC_PRSSTAT 0x24
|
||||
#define ESDHC_CLOCK_GATE_OFF 0x00000080
|
||||
#define ESDHC_CLOCK_STABLE 0x00000008
|
||||
|
||||
/* Protocol Control Register */
|
||||
|
|
|
@ -261,9 +261,24 @@ static const struct sdhci_iproc_data bcm2835_data = {
|
|||
.mmc_caps = 0x00000000,
|
||||
};
|
||||
|
||||
static const struct sdhci_ops sdhci_iproc_bcm2711_ops = {
|
||||
.read_l = sdhci_iproc_readl,
|
||||
.read_w = sdhci_iproc_readw,
|
||||
.read_b = sdhci_iproc_readb,
|
||||
.write_l = sdhci_iproc_writel,
|
||||
.write_w = sdhci_iproc_writew,
|
||||
.write_b = sdhci_iproc_writeb,
|
||||
.set_clock = sdhci_set_clock,
|
||||
.set_power = sdhci_set_power_and_bus_voltage,
|
||||
.get_max_clock = sdhci_iproc_get_max_clock,
|
||||
.set_bus_width = sdhci_set_bus_width,
|
||||
.reset = sdhci_reset,
|
||||
.set_uhs_signaling = sdhci_set_uhs_signaling,
|
||||
};
|
||||
|
||||
static const struct sdhci_pltfm_data sdhci_bcm2711_pltfm_data = {
|
||||
.quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
|
||||
.ops = &sdhci_iproc_32only_ops,
|
||||
.ops = &sdhci_iproc_bcm2711_ops,
|
||||
};
|
||||
|
||||
static const struct sdhci_iproc_data bcm2711_data = {
|
||||
|
|
|
@ -121,17 +121,6 @@ static void sdhci_milbeaut_reset(struct sdhci_host *host, u8 mask)
|
|||
}
|
||||
}
|
||||
|
||||
static void sdhci_milbeaut_set_power(struct sdhci_host *host,
|
||||
unsigned char mode, unsigned short vdd)
|
||||
{
|
||||
if (!IS_ERR(host->mmc->supply.vmmc)) {
|
||||
struct mmc_host *mmc = host->mmc;
|
||||
|
||||
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
|
||||
}
|
||||
sdhci_set_power_noreg(host, mode, vdd);
|
||||
}
|
||||
|
||||
static const struct sdhci_ops sdhci_milbeaut_ops = {
|
||||
.voltage_switch = sdhci_milbeaut_soft_voltage_switch,
|
||||
.get_min_clock = sdhci_milbeaut_get_min_clock,
|
||||
|
@ -139,7 +128,7 @@ static const struct sdhci_ops sdhci_milbeaut_ops = {
|
|||
.set_clock = sdhci_set_clock,
|
||||
.set_bus_width = sdhci_set_bus_width,
|
||||
.set_uhs_signaling = sdhci_set_uhs_signaling,
|
||||
.set_power = sdhci_milbeaut_set_power,
|
||||
.set_power = sdhci_set_power_and_bus_voltage,
|
||||
};
|
||||
|
||||
static void sdhci_milbeaut_bridge_reset(struct sdhci_host *host,
|
||||
|
|
|
@ -977,9 +977,21 @@ static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host)
|
|||
goto out;
|
||||
}
|
||||
|
||||
config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec3);
|
||||
config |= CORE_PWRSAVE_DLL;
|
||||
writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec3);
|
||||
/*
|
||||
* Set CORE_PWRSAVE_DLL bit in CORE_VENDOR_SPEC3.
|
||||
* When MCLK is gated OFF, it is not gated for less than 0.5us
|
||||
* and MCLK must be switched on for at-least 1us before DATA
|
||||
* starts coming. Controllers with 14lpp and later tech DLL cannot
|
||||
* guarantee above requirement. So PWRSAVE_DLL should not be
|
||||
* turned on for host controllers using this DLL.
|
||||
*/
|
||||
if (!msm_host->use_14lpp_dll_reset) {
|
||||
config = readl_relaxed(host->ioaddr +
|
||||
msm_offset->core_vendor_spec3);
|
||||
config |= CORE_PWRSAVE_DLL;
|
||||
writel_relaxed(config, host->ioaddr +
|
||||
msm_offset->core_vendor_spec3);
|
||||
}
|
||||
|
||||
/*
|
||||
* Drain writebuffer to ensure above DLL calibration
|
||||
|
@ -1811,6 +1823,13 @@ static void sdhci_msm_set_regulator_caps(struct sdhci_msm_host *msm_host)
|
|||
pr_debug("%s: supported caps: 0x%08x\n", mmc_hostname(mmc), caps);
|
||||
}
|
||||
|
||||
static void sdhci_msm_reset(struct sdhci_host *host, u8 mask)
|
||||
{
|
||||
if ((host->mmc->caps2 & MMC_CAP2_CQE) && (mask & SDHCI_RESET_ALL))
|
||||
cqhci_deactivate(host->mmc);
|
||||
sdhci_reset(host, mask);
|
||||
}
|
||||
|
||||
static const struct sdhci_msm_variant_ops mci_var_ops = {
|
||||
.msm_readl_relaxed = sdhci_msm_mci_variant_readl_relaxed,
|
||||
.msm_writel_relaxed = sdhci_msm_mci_variant_writel_relaxed,
|
||||
|
@ -1849,7 +1868,7 @@ static const struct of_device_id sdhci_msm_dt_match[] = {
|
|||
MODULE_DEVICE_TABLE(of, sdhci_msm_dt_match);
|
||||
|
||||
static const struct sdhci_ops sdhci_msm_ops = {
|
||||
.reset = sdhci_reset,
|
||||
.reset = sdhci_msm_reset,
|
||||
.set_clock = sdhci_msm_set_clock,
|
||||
.get_min_clock = sdhci_msm_get_min_clock,
|
||||
.get_max_clock = sdhci_msm_get_max_clock,
|
||||
|
|
|
@ -325,17 +325,6 @@ static int sdhci_arasan_voltage_switch(struct mmc_host *mmc,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
static void sdhci_arasan_set_power(struct sdhci_host *host, unsigned char mode,
|
||||
unsigned short vdd)
|
||||
{
|
||||
if (!IS_ERR(host->mmc->supply.vmmc)) {
|
||||
struct mmc_host *mmc = host->mmc;
|
||||
|
||||
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
|
||||
}
|
||||
sdhci_set_power_noreg(host, mode, vdd);
|
||||
}
|
||||
|
||||
static const struct sdhci_ops sdhci_arasan_ops = {
|
||||
.set_clock = sdhci_arasan_set_clock,
|
||||
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
|
||||
|
@ -343,7 +332,7 @@ static const struct sdhci_ops sdhci_arasan_ops = {
|
|||
.set_bus_width = sdhci_set_bus_width,
|
||||
.reset = sdhci_arasan_reset,
|
||||
.set_uhs_signaling = sdhci_set_uhs_signaling,
|
||||
.set_power = sdhci_arasan_set_power,
|
||||
.set_power = sdhci_set_power_and_bus_voltage,
|
||||
};
|
||||
|
||||
static const struct sdhci_pltfm_data sdhci_arasan_pdata = {
|
||||
|
@ -358,6 +347,17 @@ static struct sdhci_arasan_of_data sdhci_arasan_data = {
|
|||
.pdata = &sdhci_arasan_pdata,
|
||||
};
|
||||
|
||||
static const struct sdhci_pltfm_data sdhci_arasan_zynqmp_pdata = {
|
||||
.ops = &sdhci_arasan_ops,
|
||||
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
|
||||
SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN |
|
||||
SDHCI_QUIRK2_STOP_WITH_TC,
|
||||
};
|
||||
|
||||
static struct sdhci_arasan_of_data sdhci_arasan_zynqmp_data = {
|
||||
.pdata = &sdhci_arasan_zynqmp_pdata,
|
||||
};
|
||||
|
||||
static u32 sdhci_arasan_cqhci_irq(struct sdhci_host *host, u32 intmask)
|
||||
{
|
||||
int cmd_error = 0;
|
||||
|
@ -403,7 +403,7 @@ static const struct sdhci_ops sdhci_arasan_cqe_ops = {
|
|||
.set_bus_width = sdhci_set_bus_width,
|
||||
.reset = sdhci_arasan_reset,
|
||||
.set_uhs_signaling = sdhci_set_uhs_signaling,
|
||||
.set_power = sdhci_arasan_set_power,
|
||||
.set_power = sdhci_set_power_and_bus_voltage,
|
||||
.irq = sdhci_arasan_cqhci_irq,
|
||||
};
|
||||
|
||||
|
@ -553,7 +553,7 @@ static const struct of_device_id sdhci_arasan_of_match[] = {
|
|||
},
|
||||
{
|
||||
.compatible = "xlnx,zynqmp-8.9a",
|
||||
.data = &sdhci_arasan_data,
|
||||
.data = &sdhci_arasan_zynqmp_data,
|
||||
},
|
||||
{ /* sentinel */ }
|
||||
};
|
||||
|
@ -757,6 +757,50 @@ static const struct clk_ops zynqmp_sampleclk_ops = {
|
|||
.set_phase = sdhci_zynqmp_sampleclk_set_phase,
|
||||
};
|
||||
|
||||
static void arasan_zynqmp_dll_reset(struct sdhci_host *host, u32 deviceid)
|
||||
{
|
||||
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
||||
struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host);
|
||||
struct sdhci_arasan_zynqmp_clk_data *zynqmp_clk_data =
|
||||
sdhci_arasan->clk_data.clk_of_data;
|
||||
const struct zynqmp_eemi_ops *eemi_ops = zynqmp_clk_data->eemi_ops;
|
||||
u16 clk;
|
||||
|
||||
clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
|
||||
clk &= ~(SDHCI_CLOCK_CARD_EN | SDHCI_CLOCK_INT_EN);
|
||||
sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
|
||||
|
||||
/* Issue DLL Reset */
|
||||
eemi_ops->ioctl(deviceid, IOCTL_SD_DLL_RESET,
|
||||
PM_DLL_RESET_PULSE, 0, NULL);
|
||||
|
||||
clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
|
||||
|
||||
sdhci_enable_clk(host, clk);
|
||||
}
|
||||
|
||||
static int arasan_zynqmp_execute_tuning(struct mmc_host *mmc, u32 opcode)
|
||||
{
|
||||
struct sdhci_host *host = mmc_priv(mmc);
|
||||
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
||||
struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host);
|
||||
struct clk_hw *hw = &sdhci_arasan->clk_data.sdcardclk_hw;
|
||||
const char *clk_name = clk_hw_get_name(hw);
|
||||
u32 device_id = !strcmp(clk_name, "clk_out_sd0") ? NODE_SD_0 :
|
||||
NODE_SD_1;
|
||||
int err;
|
||||
|
||||
arasan_zynqmp_dll_reset(host, device_id);
|
||||
|
||||
err = sdhci_execute_tuning(mmc, opcode);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
arasan_zynqmp_dll_reset(host, device_id);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* sdhci_arasan_update_clockmultiplier - Set corecfg_clockmultiplier
|
||||
*
|
||||
|
@ -1247,6 +1291,8 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
|
|||
|
||||
zynqmp_clk_data->eemi_ops = eemi_ops;
|
||||
sdhci_arasan->clk_data.clk_of_data = zynqmp_clk_data;
|
||||
host->mmc_host_ops.execute_tuning =
|
||||
arasan_zynqmp_execute_tuning;
|
||||
}
|
||||
|
||||
arasan_dt_parse_clk_phases(&pdev->dev, &sdhci_arasan->clk_data);
|
||||
|
|
|
@ -101,22 +101,6 @@ static void sdhci_at91_set_clock(struct sdhci_host *host, unsigned int clock)
|
|||
sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
|
||||
}
|
||||
|
||||
/*
|
||||
* In this specific implementation of the SDHCI controller, the power register
|
||||
* needs to have a valid voltage set even when the power supply is managed by
|
||||
* an external regulator.
|
||||
*/
|
||||
static void sdhci_at91_set_power(struct sdhci_host *host, unsigned char mode,
|
||||
unsigned short vdd)
|
||||
{
|
||||
if (!IS_ERR(host->mmc->supply.vmmc)) {
|
||||
struct mmc_host *mmc = host->mmc;
|
||||
|
||||
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
|
||||
}
|
||||
sdhci_set_power_noreg(host, mode, vdd);
|
||||
}
|
||||
|
||||
static void sdhci_at91_set_uhs_signaling(struct sdhci_host *host,
|
||||
unsigned int timing)
|
||||
{
|
||||
|
@ -146,7 +130,7 @@ static const struct sdhci_ops sdhci_at91_sama5d2_ops = {
|
|||
.set_bus_width = sdhci_set_bus_width,
|
||||
.reset = sdhci_at91_reset,
|
||||
.set_uhs_signaling = sdhci_at91_set_uhs_signaling,
|
||||
.set_power = sdhci_at91_set_power,
|
||||
.set_power = sdhci_set_power_and_bus_voltage,
|
||||
};
|
||||
|
||||
static const struct sdhci_pltfm_data sdhci_sama5d2_pdata = {
|
||||
|
@ -205,8 +189,8 @@ static int sdhci_at91_set_clks_presets(struct device *dev)
|
|||
/* Set capabilities in ro mode. */
|
||||
writel(0, host->ioaddr + SDMMC_CACR);
|
||||
|
||||
dev_info(dev, "update clk mul to %u as gck rate is %u Hz and clk base is %u Hz\n",
|
||||
clk_mul, gck_rate, clk_base_rate);
|
||||
dev_dbg(dev, "update clk mul to %u as gck rate is %u Hz and clk base is %u Hz\n",
|
||||
clk_mul, gck_rate, clk_base_rate);
|
||||
|
||||
/*
|
||||
* We have to set preset values because it depends on the clk_mul
|
||||
|
|
|
@ -108,6 +108,11 @@ struct sdhci_omap_host {
|
|||
struct pinctrl *pinctrl;
|
||||
struct pinctrl_state **pinctrl_state;
|
||||
bool is_tuning;
|
||||
/* Omap specific context save */
|
||||
u32 con;
|
||||
u32 hctl;
|
||||
u32 sysctl;
|
||||
u32 capa;
|
||||
};
|
||||
|
||||
static void sdhci_omap_start_clock(struct sdhci_omap_host *omap_host);
|
||||
|
@ -1235,12 +1240,64 @@ static int sdhci_omap_remove(struct platform_device *pdev)
|
|||
|
||||
return 0;
|
||||
}
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
static void sdhci_omap_context_save(struct sdhci_omap_host *omap_host)
|
||||
{
|
||||
omap_host->con = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON);
|
||||
omap_host->hctl = sdhci_omap_readl(omap_host, SDHCI_OMAP_HCTL);
|
||||
omap_host->capa = sdhci_omap_readl(omap_host, SDHCI_OMAP_CAPA);
|
||||
}
|
||||
|
||||
static void sdhci_omap_context_restore(struct sdhci_omap_host *omap_host)
|
||||
{
|
||||
sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, omap_host->con);
|
||||
sdhci_omap_writel(omap_host, SDHCI_OMAP_HCTL, omap_host->hctl);
|
||||
sdhci_omap_writel(omap_host, SDHCI_OMAP_CAPA, omap_host->capa);
|
||||
}
|
||||
|
||||
static int __maybe_unused sdhci_omap_suspend(struct device *dev)
|
||||
{
|
||||
struct sdhci_host *host = dev_get_drvdata(dev);
|
||||
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
||||
struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
|
||||
|
||||
sdhci_suspend_host(host);
|
||||
|
||||
sdhci_omap_context_save(omap_host);
|
||||
|
||||
pinctrl_pm_select_idle_state(dev);
|
||||
|
||||
pm_runtime_force_suspend(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __maybe_unused sdhci_omap_resume(struct device *dev)
|
||||
{
|
||||
struct sdhci_host *host = dev_get_drvdata(dev);
|
||||
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
||||
struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
|
||||
|
||||
pm_runtime_force_resume(dev);
|
||||
|
||||
pinctrl_pm_select_default_state(dev);
|
||||
|
||||
sdhci_omap_context_restore(omap_host);
|
||||
|
||||
sdhci_resume_host(host);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
static SIMPLE_DEV_PM_OPS(sdhci_omap_dev_pm_ops, sdhci_omap_suspend,
|
||||
sdhci_omap_resume);
|
||||
|
||||
static struct platform_driver sdhci_omap_driver = {
|
||||
.probe = sdhci_omap_probe,
|
||||
.remove = sdhci_omap_remove,
|
||||
.driver = {
|
||||
.name = "sdhci-omap",
|
||||
.pm = &sdhci_omap_dev_pm_ops,
|
||||
.of_match_table = omap_sdhci_match,
|
||||
},
|
||||
};
|
||||
|
|
|
@ -163,7 +163,7 @@ struct sdhci_pci_slot {
|
|||
bool cd_override_level;
|
||||
|
||||
void (*hw_reset)(struct sdhci_host *host);
|
||||
unsigned long private[0] ____cacheline_aligned;
|
||||
unsigned long private[] ____cacheline_aligned;
|
||||
};
|
||||
|
||||
struct sdhci_pci_chip {
|
||||
|
|
|
@ -25,7 +25,7 @@ struct sdhci_pltfm_host {
|
|||
unsigned int clock;
|
||||
u16 xfer_mode_shadow;
|
||||
|
||||
unsigned long private[0] ____cacheline_aligned;
|
||||
unsigned long private[] ____cacheline_aligned;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#include <linux/slab.h>
|
||||
|
||||
#include "sdhci-pltfm.h"
|
||||
#include "mmc_hsq.h"
|
||||
|
||||
/* SDHCI_ARGUMENT2 register high 16bit */
|
||||
#define SDHCI_SPRD_ARG2_STUFF GENMASK(31, 16)
|
||||
|
@ -379,6 +380,16 @@ static unsigned int sdhci_sprd_get_ro(struct sdhci_host *host)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void sdhci_sprd_request_done(struct sdhci_host *host,
|
||||
struct mmc_request *mrq)
|
||||
{
|
||||
/* Validate if the request was from software queue firstly. */
|
||||
if (mmc_hsq_finalize_request(host->mmc, mrq))
|
||||
return;
|
||||
|
||||
mmc_request_done(host->mmc, mrq);
|
||||
}
|
||||
|
||||
static struct sdhci_ops sdhci_sprd_ops = {
|
||||
.read_l = sdhci_sprd_readl,
|
||||
.write_l = sdhci_sprd_writel,
|
||||
|
@ -392,6 +403,7 @@ static struct sdhci_ops sdhci_sprd_ops = {
|
|||
.hw_reset = sdhci_sprd_hw_reset,
|
||||
.get_max_timeout_count = sdhci_sprd_get_max_timeout_count,
|
||||
.get_ro = sdhci_sprd_get_ro,
|
||||
.request_done = sdhci_sprd_request_done,
|
||||
};
|
||||
|
||||
static void sdhci_sprd_request(struct mmc_host *mmc, struct mmc_request *mrq)
|
||||
|
@ -521,6 +533,7 @@ static int sdhci_sprd_probe(struct platform_device *pdev)
|
|||
{
|
||||
struct sdhci_host *host;
|
||||
struct sdhci_sprd_host *sprd_host;
|
||||
struct mmc_hsq *hsq;
|
||||
struct clk *clk;
|
||||
int ret = 0;
|
||||
|
||||
|
@ -543,7 +556,7 @@ static int sdhci_sprd_probe(struct platform_device *pdev)
|
|||
sdhci_sprd_voltage_switch;
|
||||
|
||||
host->mmc->caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED |
|
||||
MMC_CAP_ERASE | MMC_CAP_CMD23;
|
||||
MMC_CAP_ERASE | MMC_CAP_CMD23 | MMC_CAP_WAIT_WHILE_BUSY;
|
||||
ret = mmc_of_parse(host->mmc);
|
||||
if (ret)
|
||||
goto pltfm_free;
|
||||
|
@ -631,6 +644,18 @@ static int sdhci_sprd_probe(struct platform_device *pdev)
|
|||
|
||||
sprd_host->flags = host->flags;
|
||||
|
||||
hsq = devm_kzalloc(&pdev->dev, sizeof(*hsq), GFP_KERNEL);
|
||||
if (!hsq) {
|
||||
ret = -ENOMEM;
|
||||
goto err_cleanup_host;
|
||||
}
|
||||
|
||||
ret = mmc_hsq_init(hsq, host->mmc);
|
||||
if (ret)
|
||||
goto err_cleanup_host;
|
||||
|
||||
host->always_defer_done = true;
|
||||
|
||||
ret = __sdhci_add_host(host);
|
||||
if (ret)
|
||||
goto err_cleanup_host;
|
||||
|
@ -689,6 +714,7 @@ static int sdhci_sprd_runtime_suspend(struct device *dev)
|
|||
struct sdhci_host *host = dev_get_drvdata(dev);
|
||||
struct sdhci_sprd_host *sprd_host = TO_SPRD_HOST(host);
|
||||
|
||||
mmc_hsq_suspend(host->mmc);
|
||||
sdhci_runtime_suspend_host(host);
|
||||
|
||||
clk_disable_unprepare(sprd_host->clk_sdio);
|
||||
|
@ -717,6 +743,8 @@ static int sdhci_sprd_runtime_resume(struct device *dev)
|
|||
goto clk_disable;
|
||||
|
||||
sdhci_runtime_resume_host(host, 1);
|
||||
mmc_hsq_resume(host->mmc);
|
||||
|
||||
return 0;
|
||||
|
||||
clk_disable:
|
||||
|
|
|
@ -45,6 +45,7 @@
|
|||
#define SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_SHIFT 8
|
||||
|
||||
#define SDHCI_TEGRA_VENDOR_MISC_CTRL 0x120
|
||||
#define SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT BIT(0)
|
||||
#define SDHCI_MISC_CTRL_ENABLE_SDR104 0x8
|
||||
#define SDHCI_MISC_CTRL_ENABLE_SDR50 0x10
|
||||
#define SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300 0x20
|
||||
|
@ -1227,6 +1228,34 @@ static u32 sdhci_tegra_cqhci_irq(struct sdhci_host *host, u32 intmask)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void tegra_sdhci_set_timeout(struct sdhci_host *host,
|
||||
struct mmc_command *cmd)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
/*
|
||||
* HW busy detection timeout is based on programmed data timeout
|
||||
* counter and maximum supported timeout is 11s which may not be
|
||||
* enough for long operations like cache flush, sleep awake, erase.
|
||||
*
|
||||
* ERASE_TIMEOUT_LIMIT bit of VENDOR_MISC_CTRL register allows
|
||||
* host controller to wait for busy state until the card is busy
|
||||
* without HW timeout.
|
||||
*
|
||||
* So, use infinite busy wait mode for operations that may take
|
||||
* more than maximum HW busy timeout of 11s otherwise use finite
|
||||
* busy wait mode.
|
||||
*/
|
||||
val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_MISC_CTRL);
|
||||
if (cmd && cmd->busy_timeout >= 11 * HZ)
|
||||
val |= SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT;
|
||||
else
|
||||
val &= ~SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT;
|
||||
sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_MISC_CTRL);
|
||||
|
||||
__sdhci_set_timeout(host, cmd);
|
||||
}
|
||||
|
||||
static const struct cqhci_host_ops sdhci_tegra_cqhci_ops = {
|
||||
.write_l = tegra_cqhci_writel,
|
||||
.enable = sdhci_tegra_cqe_enable,
|
||||
|
@ -1366,6 +1395,7 @@ static const struct sdhci_ops tegra210_sdhci_ops = {
|
|||
.set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
|
||||
.voltage_switch = tegra_sdhci_voltage_switch,
|
||||
.get_max_clock = tegra_sdhci_get_max_clock,
|
||||
.set_timeout = tegra_sdhci_set_timeout,
|
||||
};
|
||||
|
||||
static const struct sdhci_pltfm_data sdhci_tegra210_pdata = {
|
||||
|
@ -1403,6 +1433,7 @@ static const struct sdhci_ops tegra186_sdhci_ops = {
|
|||
.voltage_switch = tegra_sdhci_voltage_switch,
|
||||
.get_max_clock = tegra_sdhci_get_max_clock,
|
||||
.irq = sdhci_tegra_cqhci_irq,
|
||||
.set_timeout = tegra_sdhci_set_timeout,
|
||||
};
|
||||
|
||||
static const struct sdhci_pltfm_data sdhci_tegra186_pdata = {
|
||||
|
@ -1552,8 +1583,8 @@ static int sdhci_tegra_probe(struct platform_device *pdev)
|
|||
if (tegra_host->soc_data->nvquirks & NVQUIRK_ENABLE_DDR50)
|
||||
host->mmc->caps |= MMC_CAP_1_8V_DDR;
|
||||
|
||||
/* R1B responses is required to properly manage HW busy detection. */
|
||||
host->mmc->caps |= MMC_CAP_NEED_RSP_BUSY;
|
||||
/* HW busy detection is supported, but R1B responses are required. */
|
||||
host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_NEED_RSP_BUSY;
|
||||
|
||||
tegra_sdhci_parse_dt(host);
|
||||
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
* - JMicron (hardware and technical support)
|
||||
*/
|
||||
|
||||
#include <linux/bitfield.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/dmaengine.h>
|
||||
#include <linux/ktime.h>
|
||||
|
@ -153,7 +154,7 @@ static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
|
|||
u32 present;
|
||||
|
||||
if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
|
||||
!mmc_card_is_removable(host->mmc))
|
||||
!mmc_card_is_removable(host->mmc) || mmc_can_gpio_cd(host->mmc))
|
||||
return;
|
||||
|
||||
if (enable) {
|
||||
|
@ -1766,10 +1767,9 @@ u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock,
|
|||
|
||||
clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
|
||||
pre_val = sdhci_get_preset_value(host);
|
||||
div = (pre_val & SDHCI_PRESET_SDCLK_FREQ_MASK)
|
||||
>> SDHCI_PRESET_SDCLK_FREQ_SHIFT;
|
||||
div = FIELD_GET(SDHCI_PRESET_SDCLK_FREQ_MASK, pre_val);
|
||||
if (host->clk_mul &&
|
||||
(pre_val & SDHCI_PRESET_CLKGEN_SEL_MASK)) {
|
||||
(pre_val & SDHCI_PRESET_CLKGEN_SEL)) {
|
||||
clk = SDHCI_PROG_CLOCK_MODE;
|
||||
real_div = div + 1;
|
||||
clk_mul = host->clk_mul;
|
||||
|
@ -2010,6 +2010,25 @@ void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(sdhci_set_power);
|
||||
|
||||
/*
|
||||
* Some controllers need to configure a valid bus voltage on their power
|
||||
* register regardless of whether an external regulator is taking care of power
|
||||
* supply. This helper function takes care of it if set as the controller's
|
||||
* sdhci_ops.set_power callback.
|
||||
*/
|
||||
void sdhci_set_power_and_bus_voltage(struct sdhci_host *host,
|
||||
unsigned char mode,
|
||||
unsigned short vdd)
|
||||
{
|
||||
if (!IS_ERR(host->mmc->supply.vmmc)) {
|
||||
struct mmc_host *mmc = host->mmc;
|
||||
|
||||
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
|
||||
}
|
||||
sdhci_set_power_noreg(host, mode, vdd);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sdhci_set_power_and_bus_voltage);
|
||||
|
||||
/*****************************************************************************\
|
||||
* *
|
||||
* MMC callbacks *
|
||||
|
@ -2227,8 +2246,8 @@ void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
|
|||
|
||||
sdhci_enable_preset_value(host, true);
|
||||
preset = sdhci_get_preset_value(host);
|
||||
ios->drv_type = (preset & SDHCI_PRESET_DRV_MASK)
|
||||
>> SDHCI_PRESET_DRV_SHIFT;
|
||||
ios->drv_type = FIELD_GET(SDHCI_PRESET_DRV_MASK,
|
||||
preset);
|
||||
}
|
||||
|
||||
/* Re-enable SD Clock */
|
||||
|
@ -2944,7 +2963,10 @@ static bool sdhci_request_done(struct sdhci_host *host)
|
|||
|
||||
spin_unlock_irqrestore(&host->lock, flags);
|
||||
|
||||
mmc_request_done(host->mmc, mrq);
|
||||
if (host->ops->request_done)
|
||||
host->ops->request_done(host, mrq);
|
||||
else
|
||||
mmc_request_done(host->mmc, mrq);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
@ -3247,7 +3269,7 @@ static inline bool sdhci_defer_done(struct sdhci_host *host,
|
|||
{
|
||||
struct mmc_data *data = mrq->data;
|
||||
|
||||
return host->pending_reset ||
|
||||
return host->pending_reset || host->always_defer_done ||
|
||||
((host->flags & SDHCI_REQ_USE_DMA) && data &&
|
||||
data->host_cookie == COOKIE_MAPPED);
|
||||
}
|
||||
|
@ -3372,7 +3394,12 @@ out:
|
|||
|
||||
/* Process mrqs ready for immediate completion */
|
||||
for (i = 0; i < SDHCI_MAX_MRQS; i++) {
|
||||
if (mrqs_done[i])
|
||||
if (!mrqs_done[i])
|
||||
continue;
|
||||
|
||||
if (host->ops->request_done)
|
||||
host->ops->request_done(host, mrqs_done[i]);
|
||||
else
|
||||
mmc_request_done(host->mmc, mrqs_done[i]);
|
||||
}
|
||||
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
#ifndef __SDHCI_HW_H
|
||||
#define __SDHCI_HW_H
|
||||
|
||||
#include <linux/bits.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/types.h>
|
||||
|
@ -267,12 +268,9 @@
|
|||
#define SDHCI_PRESET_FOR_SDR104 0x6C
|
||||
#define SDHCI_PRESET_FOR_DDR50 0x6E
|
||||
#define SDHCI_PRESET_FOR_HS400 0x74 /* Non-standard */
|
||||
#define SDHCI_PRESET_DRV_MASK 0xC000
|
||||
#define SDHCI_PRESET_DRV_SHIFT 14
|
||||
#define SDHCI_PRESET_CLKGEN_SEL_MASK 0x400
|
||||
#define SDHCI_PRESET_CLKGEN_SEL_SHIFT 10
|
||||
#define SDHCI_PRESET_SDCLK_FREQ_MASK 0x3FF
|
||||
#define SDHCI_PRESET_SDCLK_FREQ_SHIFT 0
|
||||
#define SDHCI_PRESET_DRV_MASK GENMASK(15, 14)
|
||||
#define SDHCI_PRESET_CLKGEN_SEL BIT(10)
|
||||
#define SDHCI_PRESET_SDCLK_FREQ_MASK GENMASK(9, 0)
|
||||
|
||||
#define SDHCI_SLOT_INT_STATUS 0xFC
|
||||
|
||||
|
@ -537,6 +535,7 @@ struct sdhci_host {
|
|||
bool irq_wake_enabled; /* IRQ wakeup is enabled */
|
||||
bool v4_mode; /* Host Version 4 Enable */
|
||||
bool use_external_dma; /* Host selects to use external DMA */
|
||||
bool always_defer_done; /* Always defer to complete requests */
|
||||
|
||||
struct mmc_request *mrqs_done[SDHCI_MAX_MRQS]; /* Requests done */
|
||||
struct mmc_command *cmd; /* Current command */
|
||||
|
@ -613,7 +612,7 @@ struct sdhci_host {
|
|||
|
||||
u64 data_timeout;
|
||||
|
||||
unsigned long private[0] ____cacheline_aligned;
|
||||
unsigned long private[] ____cacheline_aligned;
|
||||
};
|
||||
|
||||
struct sdhci_ops {
|
||||
|
@ -654,6 +653,8 @@ struct sdhci_ops {
|
|||
void (*voltage_switch)(struct sdhci_host *host);
|
||||
void (*adma_write_desc)(struct sdhci_host *host, void **desc,
|
||||
dma_addr_t addr, int len, unsigned int cmd);
|
||||
void (*request_done)(struct sdhci_host *host,
|
||||
struct mmc_request *mrq);
|
||||
};
|
||||
|
||||
#ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS
|
||||
|
@ -769,6 +770,9 @@ void sdhci_set_clock(struct sdhci_host *host, unsigned int clock);
|
|||
void sdhci_enable_clk(struct sdhci_host *host, u16 clk);
|
||||
void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
|
||||
unsigned short vdd);
|
||||
void sdhci_set_power_and_bus_voltage(struct sdhci_host *host,
|
||||
unsigned char mode,
|
||||
unsigned short vdd);
|
||||
void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
|
||||
unsigned short vdd);
|
||||
void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq);
|
||||
|
|
|
@ -81,7 +81,8 @@ static struct regmap_config sdhci_am654_regmap_config = {
|
|||
|
||||
struct sdhci_am654_data {
|
||||
struct regmap *base;
|
||||
int otap_del_sel;
|
||||
bool legacy_otapdly;
|
||||
int otap_del_sel[11];
|
||||
int trm_icp;
|
||||
int drv_strength;
|
||||
bool dll_on;
|
||||
|
@ -98,7 +99,27 @@ struct sdhci_am654_driver_data {
|
|||
#define DLL_PRESENT (1 << 3)
|
||||
};
|
||||
|
||||
static void sdhci_am654_set_clock(struct sdhci_host *host, unsigned int clock)
|
||||
struct timing_data {
|
||||
const char *binding;
|
||||
u32 capability;
|
||||
};
|
||||
|
||||
static const struct timing_data td[] = {
|
||||
[MMC_TIMING_LEGACY] = {"ti,otap-del-sel-legacy", 0},
|
||||
[MMC_TIMING_MMC_HS] = {"ti,otap-del-sel-mmc-hs", MMC_CAP_MMC_HIGHSPEED},
|
||||
[MMC_TIMING_SD_HS] = {"ti,otap-del-sel-sd-hs", MMC_CAP_SD_HIGHSPEED},
|
||||
[MMC_TIMING_UHS_SDR12] = {"ti,otap-del-sel-sdr12", MMC_CAP_UHS_SDR12},
|
||||
[MMC_TIMING_UHS_SDR25] = {"ti,otap-del-sel-sdr25", MMC_CAP_UHS_SDR25},
|
||||
[MMC_TIMING_UHS_SDR50] = {"ti,otap-del-sel-sdr50", MMC_CAP_UHS_SDR50},
|
||||
[MMC_TIMING_UHS_SDR104] = {"ti,otap-del-sel-sdr104",
|
||||
MMC_CAP_UHS_SDR104},
|
||||
[MMC_TIMING_UHS_DDR50] = {"ti,otap-del-sel-ddr50", MMC_CAP_UHS_DDR50},
|
||||
[MMC_TIMING_MMC_DDR52] = {"ti,otap-del-sel-ddr52", MMC_CAP_DDR},
|
||||
[MMC_TIMING_MMC_HS200] = {"ti,otap-del-sel-hs200", MMC_CAP2_HS200},
|
||||
[MMC_TIMING_MMC_HS400] = {"ti,otap-del-sel-hs400", MMC_CAP2_HS400},
|
||||
};
|
||||
|
||||
static void sdhci_am654_setup_dll(struct sdhci_host *host, unsigned int clock)
|
||||
{
|
||||
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
||||
struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host);
|
||||
|
@ -106,6 +127,73 @@ static void sdhci_am654_set_clock(struct sdhci_host *host, unsigned int clock)
|
|||
u32 mask, val;
|
||||
int ret;
|
||||
|
||||
if (sdhci_am654->flags & FREQSEL_2_BIT) {
|
||||
switch (clock) {
|
||||
case 200000000:
|
||||
sel50 = 0;
|
||||
sel100 = 0;
|
||||
break;
|
||||
case 100000000:
|
||||
sel50 = 0;
|
||||
sel100 = 1;
|
||||
break;
|
||||
default:
|
||||
sel50 = 1;
|
||||
sel100 = 0;
|
||||
}
|
||||
|
||||
/* Configure PHY DLL frequency */
|
||||
mask = SEL50_MASK | SEL100_MASK;
|
||||
val = (sel50 << SEL50_SHIFT) | (sel100 << SEL100_SHIFT);
|
||||
regmap_update_bits(sdhci_am654->base, PHY_CTRL5, mask, val);
|
||||
|
||||
} else {
|
||||
switch (clock) {
|
||||
case 200000000:
|
||||
freqsel = 0x0;
|
||||
break;
|
||||
default:
|
||||
freqsel = 0x4;
|
||||
}
|
||||
|
||||
regmap_update_bits(sdhci_am654->base, PHY_CTRL5, FREQSEL_MASK,
|
||||
freqsel << FREQSEL_SHIFT);
|
||||
}
|
||||
/* Configure DLL TRIM */
|
||||
mask = DLL_TRIM_ICP_MASK;
|
||||
val = sdhci_am654->trm_icp << DLL_TRIM_ICP_SHIFT;
|
||||
|
||||
/* Configure DLL driver strength */
|
||||
mask |= DR_TY_MASK;
|
||||
val |= sdhci_am654->drv_strength << DR_TY_SHIFT;
|
||||
regmap_update_bits(sdhci_am654->base, PHY_CTRL1, mask, val);
|
||||
|
||||
/* Enable DLL */
|
||||
regmap_update_bits(sdhci_am654->base, PHY_CTRL1, ENDLL_MASK,
|
||||
0x1 << ENDLL_SHIFT);
|
||||
/*
|
||||
* Poll for DLL ready. Use a one second timeout.
|
||||
* Works in all experiments done so far
|
||||
*/
|
||||
ret = regmap_read_poll_timeout(sdhci_am654->base, PHY_STAT1, val,
|
||||
val & DLLRDY_MASK, 1000, 1000000);
|
||||
if (ret) {
|
||||
dev_err(mmc_dev(host->mmc), "DLL failed to relock\n");
|
||||
return;
|
||||
}
|
||||
|
||||
sdhci_am654->dll_on = true;
|
||||
}
|
||||
|
||||
static void sdhci_am654_set_clock(struct sdhci_host *host, unsigned int clock)
|
||||
{
|
||||
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
||||
struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host);
|
||||
unsigned char timing = host->mmc->ios.timing;
|
||||
u32 otap_del_sel;
|
||||
u32 otap_del_ena;
|
||||
u32 mask, val;
|
||||
|
||||
if (sdhci_am654->dll_on) {
|
||||
regmap_update_bits(sdhci_am654->base, PHY_CTRL1, ENDLL_MASK, 0);
|
||||
|
||||
|
@ -116,80 +204,31 @@ static void sdhci_am654_set_clock(struct sdhci_host *host, unsigned int clock)
|
|||
|
||||
if (clock > CLOCK_TOO_SLOW_HZ) {
|
||||
/* Setup DLL Output TAP delay */
|
||||
if (sdhci_am654->legacy_otapdly)
|
||||
otap_del_sel = sdhci_am654->otap_del_sel[0];
|
||||
else
|
||||
otap_del_sel = sdhci_am654->otap_del_sel[timing];
|
||||
|
||||
otap_del_ena = (timing > MMC_TIMING_UHS_SDR25) ? 1 : 0;
|
||||
|
||||
mask = OTAPDLYENA_MASK | OTAPDLYSEL_MASK;
|
||||
val = (1 << OTAPDLYENA_SHIFT) |
|
||||
(sdhci_am654->otap_del_sel << OTAPDLYSEL_SHIFT);
|
||||
regmap_update_bits(sdhci_am654->base, PHY_CTRL4, mask, val);
|
||||
val = (otap_del_ena << OTAPDLYENA_SHIFT) |
|
||||
(otap_del_sel << OTAPDLYSEL_SHIFT);
|
||||
|
||||
/* Write to STRBSEL for HS400 speed mode */
|
||||
if (host->mmc->ios.timing == MMC_TIMING_MMC_HS400) {
|
||||
if (timing == MMC_TIMING_MMC_HS400) {
|
||||
if (sdhci_am654->flags & STRBSEL_4_BIT)
|
||||
mask = STRBSEL_4BIT_MASK;
|
||||
mask |= STRBSEL_4BIT_MASK;
|
||||
else
|
||||
mask = STRBSEL_8BIT_MASK;
|
||||
mask |= STRBSEL_8BIT_MASK;
|
||||
|
||||
regmap_update_bits(sdhci_am654->base, PHY_CTRL4, mask,
|
||||
sdhci_am654->strb_sel <<
|
||||
STRBSEL_SHIFT);
|
||||
val |= sdhci_am654->strb_sel << STRBSEL_SHIFT;
|
||||
}
|
||||
|
||||
if (sdhci_am654->flags & FREQSEL_2_BIT) {
|
||||
switch (clock) {
|
||||
case 200000000:
|
||||
sel50 = 0;
|
||||
sel100 = 0;
|
||||
break;
|
||||
case 100000000:
|
||||
sel50 = 0;
|
||||
sel100 = 1;
|
||||
break;
|
||||
default:
|
||||
sel50 = 1;
|
||||
sel100 = 0;
|
||||
}
|
||||
regmap_update_bits(sdhci_am654->base, PHY_CTRL4, mask, val);
|
||||
|
||||
/* Configure PHY DLL frequency */
|
||||
mask = SEL50_MASK | SEL100_MASK;
|
||||
val = (sel50 << SEL50_SHIFT) | (sel100 << SEL100_SHIFT);
|
||||
regmap_update_bits(sdhci_am654->base, PHY_CTRL5, mask,
|
||||
val);
|
||||
} else {
|
||||
switch (clock) {
|
||||
case 200000000:
|
||||
freqsel = 0x0;
|
||||
break;
|
||||
default:
|
||||
freqsel = 0x4;
|
||||
}
|
||||
|
||||
regmap_update_bits(sdhci_am654->base, PHY_CTRL5,
|
||||
FREQSEL_MASK,
|
||||
freqsel << FREQSEL_SHIFT);
|
||||
}
|
||||
|
||||
/* Configure DLL TRIM */
|
||||
mask = DLL_TRIM_ICP_MASK;
|
||||
val = sdhci_am654->trm_icp << DLL_TRIM_ICP_SHIFT;
|
||||
|
||||
/* Configure DLL driver strength */
|
||||
mask |= DR_TY_MASK;
|
||||
val |= sdhci_am654->drv_strength << DR_TY_SHIFT;
|
||||
regmap_update_bits(sdhci_am654->base, PHY_CTRL1, mask, val);
|
||||
/* Enable DLL */
|
||||
regmap_update_bits(sdhci_am654->base, PHY_CTRL1, ENDLL_MASK,
|
||||
0x1 << ENDLL_SHIFT);
|
||||
/*
|
||||
* Poll for DLL ready. Use a one second timeout.
|
||||
* Works in all experiments done so far
|
||||
*/
|
||||
ret = regmap_read_poll_timeout(sdhci_am654->base, PHY_STAT1,
|
||||
val, val & DLLRDY_MASK, 1000,
|
||||
1000000);
|
||||
if (ret) {
|
||||
dev_err(mmc_dev(host->mmc), "DLL failed to relock\n");
|
||||
return;
|
||||
}
|
||||
|
||||
sdhci_am654->dll_on = true;
|
||||
if (timing > MMC_TIMING_UHS_SDR25)
|
||||
sdhci_am654_setup_dll(host, clock);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -198,27 +237,24 @@ static void sdhci_j721e_4bit_set_clock(struct sdhci_host *host,
|
|||
{
|
||||
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
||||
struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host);
|
||||
int val, mask;
|
||||
unsigned char timing = host->mmc->ios.timing;
|
||||
u32 otap_del_sel;
|
||||
u32 mask, val;
|
||||
|
||||
/* Setup DLL Output TAP delay */
|
||||
if (sdhci_am654->legacy_otapdly)
|
||||
otap_del_sel = sdhci_am654->otap_del_sel[0];
|
||||
else
|
||||
otap_del_sel = sdhci_am654->otap_del_sel[timing];
|
||||
|
||||
mask = OTAPDLYENA_MASK | OTAPDLYSEL_MASK;
|
||||
val = (1 << OTAPDLYENA_SHIFT) |
|
||||
(sdhci_am654->otap_del_sel << OTAPDLYSEL_SHIFT);
|
||||
val = (0x1 << OTAPDLYENA_SHIFT) |
|
||||
(otap_del_sel << OTAPDLYSEL_SHIFT);
|
||||
regmap_update_bits(sdhci_am654->base, PHY_CTRL4, mask, val);
|
||||
|
||||
sdhci_set_clock(host, clock);
|
||||
}
|
||||
|
||||
static void sdhci_am654_set_power(struct sdhci_host *host, unsigned char mode,
|
||||
unsigned short vdd)
|
||||
{
|
||||
if (!IS_ERR(host->mmc->supply.vmmc)) {
|
||||
struct mmc_host *mmc = host->mmc;
|
||||
|
||||
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
|
||||
}
|
||||
sdhci_set_power_noreg(host, mode, vdd);
|
||||
}
|
||||
|
||||
static void sdhci_am654_write_b(struct sdhci_host *host, u8 val, int reg)
|
||||
{
|
||||
unsigned char timing = host->mmc->ios.timing;
|
||||
|
@ -274,7 +310,7 @@ static struct sdhci_ops sdhci_am654_ops = {
|
|||
.get_timeout_clock = sdhci_pltfm_clk_get_max_clock,
|
||||
.set_uhs_signaling = sdhci_set_uhs_signaling,
|
||||
.set_bus_width = sdhci_set_bus_width,
|
||||
.set_power = sdhci_am654_set_power,
|
||||
.set_power = sdhci_set_power_and_bus_voltage,
|
||||
.set_clock = sdhci_am654_set_clock,
|
||||
.write_b = sdhci_am654_write_b,
|
||||
.irq = sdhci_am654_cqhci_irq,
|
||||
|
@ -297,7 +333,7 @@ static struct sdhci_ops sdhci_j721e_8bit_ops = {
|
|||
.get_timeout_clock = sdhci_pltfm_clk_get_max_clock,
|
||||
.set_uhs_signaling = sdhci_set_uhs_signaling,
|
||||
.set_bus_width = sdhci_set_bus_width,
|
||||
.set_power = sdhci_am654_set_power,
|
||||
.set_power = sdhci_set_power_and_bus_voltage,
|
||||
.set_clock = sdhci_am654_set_clock,
|
||||
.write_b = sdhci_am654_write_b,
|
||||
.irq = sdhci_am654_cqhci_irq,
|
||||
|
@ -320,7 +356,7 @@ static struct sdhci_ops sdhci_j721e_4bit_ops = {
|
|||
.get_timeout_clock = sdhci_pltfm_clk_get_max_clock,
|
||||
.set_uhs_signaling = sdhci_set_uhs_signaling,
|
||||
.set_bus_width = sdhci_set_bus_width,
|
||||
.set_power = sdhci_am654_set_power,
|
||||
.set_power = sdhci_set_power_and_bus_voltage,
|
||||
.set_clock = sdhci_j721e_4bit_set_clock,
|
||||
.write_b = sdhci_am654_write_b,
|
||||
.irq = sdhci_am654_cqhci_irq,
|
||||
|
@ -371,6 +407,55 @@ static int sdhci_am654_cqe_add_host(struct sdhci_host *host)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int sdhci_am654_get_otap_delay(struct sdhci_host *host,
|
||||
struct sdhci_am654_data *sdhci_am654)
|
||||
{
|
||||
struct device *dev = mmc_dev(host->mmc);
|
||||
int i;
|
||||
int ret;
|
||||
|
||||
ret = device_property_read_u32(dev, td[MMC_TIMING_LEGACY].binding,
|
||||
&sdhci_am654->otap_del_sel[MMC_TIMING_LEGACY]);
|
||||
if (ret) {
|
||||
/*
|
||||
* ti,otap-del-sel-legacy is mandatory, look for old binding
|
||||
* if not found.
|
||||
*/
|
||||
ret = device_property_read_u32(dev, "ti,otap-del-sel",
|
||||
&sdhci_am654->otap_del_sel[0]);
|
||||
if (ret) {
|
||||
dev_err(dev, "Couldn't find otap-del-sel\n");
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
dev_info(dev, "Using legacy binding ti,otap-del-sel\n");
|
||||
sdhci_am654->legacy_otapdly = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
for (i = MMC_TIMING_MMC_HS; i <= MMC_TIMING_MMC_HS400; i++) {
|
||||
|
||||
ret = device_property_read_u32(dev, td[i].binding,
|
||||
&sdhci_am654->otap_del_sel[i]);
|
||||
if (ret) {
|
||||
dev_dbg(dev, "Couldn't find %s\n",
|
||||
td[i].binding);
|
||||
/*
|
||||
* Remove the corresponding capability
|
||||
* if an otap-del-sel value is not found
|
||||
*/
|
||||
if (i <= MMC_TIMING_MMC_DDR52)
|
||||
host->mmc->caps &= ~td[i].capability;
|
||||
else
|
||||
host->mmc->caps2 &= ~td[i].capability;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sdhci_am654_init(struct sdhci_host *host)
|
||||
{
|
||||
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
||||
|
@ -419,6 +504,10 @@ static int sdhci_am654_init(struct sdhci_host *host)
|
|||
if (ret)
|
||||
goto err_cleanup_host;
|
||||
|
||||
ret = sdhci_am654_get_otap_delay(host, sdhci_am654);
|
||||
if (ret)
|
||||
goto err_cleanup_host;
|
||||
|
||||
ret = __sdhci_add_host(host);
|
||||
if (ret)
|
||||
goto err_cleanup_host;
|
||||
|
@ -437,11 +526,6 @@ static int sdhci_am654_get_of_property(struct platform_device *pdev,
|
|||
int drv_strength;
|
||||
int ret;
|
||||
|
||||
ret = device_property_read_u32(dev, "ti,otap-del-sel",
|
||||
&sdhci_am654->otap_del_sel);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (sdhci_am654->flags & DLL_PRESENT) {
|
||||
ret = device_property_read_u32(dev, "ti,trm-icp",
|
||||
&sdhci_am654->trm_icp);
|
||||
|
|
|
@ -176,20 +176,13 @@ struct tmio_mmc_host {
|
|||
int (*write16_hook)(struct tmio_mmc_host *host, int addr);
|
||||
void (*reset)(struct tmio_mmc_host *host);
|
||||
void (*hw_reset)(struct tmio_mmc_host *host);
|
||||
void (*prepare_tuning)(struct tmio_mmc_host *host, unsigned long tap);
|
||||
bool (*check_scc_error)(struct tmio_mmc_host *host);
|
||||
bool (*check_retune)(struct tmio_mmc_host *host);
|
||||
|
||||
/*
|
||||
* Mandatory callback for tuning to occur which is optional for SDR50
|
||||
* and mandatory for SDR104.
|
||||
*/
|
||||
unsigned int (*init_tuning)(struct tmio_mmc_host *host);
|
||||
int (*select_tuning)(struct tmio_mmc_host *host);
|
||||
|
||||
/* Tuning values: 1 for success, 0 for failure */
|
||||
DECLARE_BITMAP(taps, BITS_PER_BYTE * sizeof(long));
|
||||
unsigned int tap_num;
|
||||
unsigned long tap_set;
|
||||
int (*execute_tuning)(struct tmio_mmc_host *host, u32 opcode);
|
||||
|
||||
void (*prepare_hs400_tuning)(struct tmio_mmc_host *host);
|
||||
void (*hs400_downgrade)(struct tmio_mmc_host *host);
|
||||
|
|
|
@ -718,38 +718,13 @@ static int tmio_mmc_start_data(struct tmio_mmc_host *host,
|
|||
static int tmio_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode)
|
||||
{
|
||||
struct tmio_mmc_host *host = mmc_priv(mmc);
|
||||
int i, ret = 0;
|
||||
int ret;
|
||||
|
||||
if (!host->init_tuning || !host->select_tuning)
|
||||
/* Tuning is not supported */
|
||||
goto out;
|
||||
if (!host->execute_tuning)
|
||||
return 0;
|
||||
|
||||
host->tap_num = host->init_tuning(host);
|
||||
if (!host->tap_num)
|
||||
/* Tuning is not supported */
|
||||
goto out;
|
||||
ret = host->execute_tuning(host, opcode);
|
||||
|
||||
if (host->tap_num * 2 >= sizeof(host->taps) * BITS_PER_BYTE) {
|
||||
dev_warn_once(&host->pdev->dev,
|
||||
"Too many taps, skipping tuning. Please consider updating size of taps field of tmio_mmc_host\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
bitmap_zero(host->taps, host->tap_num * 2);
|
||||
|
||||
/* Issue CMD19 twice for each tap */
|
||||
for (i = 0; i < 2 * host->tap_num; i++) {
|
||||
if (host->prepare_tuning)
|
||||
host->prepare_tuning(host, i % host->tap_num);
|
||||
|
||||
ret = mmc_send_tuning(mmc, opcode, NULL);
|
||||
if (ret == 0)
|
||||
set_bit(i, host->taps);
|
||||
}
|
||||
|
||||
ret = host->select_tuning(host);
|
||||
|
||||
out:
|
||||
if (ret < 0) {
|
||||
dev_warn(&host->pdev->dev, "Tuning procedure failed\n");
|
||||
tmio_mmc_hw_reset(mmc);
|
||||
|
@ -843,8 +818,8 @@ static void tmio_mmc_finish_request(struct tmio_mmc_host *host)
|
|||
if (mrq->cmd->error || (mrq->data && mrq->data->error))
|
||||
tmio_mmc_abort_dma(host);
|
||||
|
||||
/* SCC error means retune, but executed command was still successful */
|
||||
if (host->check_scc_error && host->check_scc_error(host))
|
||||
/* Error means retune, but executed command was still successful */
|
||||
if (host->check_retune && host->check_retune(host))
|
||||
mmc_retune_needed(host->mmc);
|
||||
|
||||
/* If SET_BLOCK_COUNT, continue with main command */
|
||||
|
@ -1022,34 +997,7 @@ static int tmio_multi_io_quirk(struct mmc_card *card,
|
|||
return blk_size;
|
||||
}
|
||||
|
||||
static int tmio_mmc_prepare_hs400_tuning(struct mmc_host *mmc,
|
||||
struct mmc_ios *ios)
|
||||
{
|
||||
struct tmio_mmc_host *host = mmc_priv(mmc);
|
||||
|
||||
if (host->prepare_hs400_tuning)
|
||||
host->prepare_hs400_tuning(host);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void tmio_mmc_hs400_downgrade(struct mmc_host *mmc)
|
||||
{
|
||||
struct tmio_mmc_host *host = mmc_priv(mmc);
|
||||
|
||||
if (host->hs400_downgrade)
|
||||
host->hs400_downgrade(host);
|
||||
}
|
||||
|
||||
static void tmio_mmc_hs400_complete(struct mmc_host *mmc)
|
||||
{
|
||||
struct tmio_mmc_host *host = mmc_priv(mmc);
|
||||
|
||||
if (host->hs400_complete)
|
||||
host->hs400_complete(host);
|
||||
}
|
||||
|
||||
static const struct mmc_host_ops tmio_mmc_ops = {
|
||||
static struct mmc_host_ops tmio_mmc_ops = {
|
||||
.request = tmio_mmc_request,
|
||||
.set_ios = tmio_mmc_set_ios,
|
||||
.get_ro = tmio_mmc_get_ro,
|
||||
|
@ -1058,9 +1006,6 @@ static const struct mmc_host_ops tmio_mmc_ops = {
|
|||
.multi_io_quirk = tmio_multi_io_quirk,
|
||||
.hw_reset = tmio_mmc_hw_reset,
|
||||
.execute_tuning = tmio_mmc_execute_tuning,
|
||||
.prepare_hs400_tuning = tmio_mmc_prepare_hs400_tuning,
|
||||
.hs400_downgrade = tmio_mmc_hs400_downgrade,
|
||||
.hs400_complete = tmio_mmc_hs400_complete,
|
||||
};
|
||||
|
||||
static int tmio_mmc_init_ocr(struct tmio_mmc_host *host)
|
||||
|
@ -1325,11 +1270,6 @@ int tmio_mmc_host_runtime_suspend(struct device *dev)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(tmio_mmc_host_runtime_suspend);
|
||||
|
||||
static bool tmio_mmc_can_retune(struct tmio_mmc_host *host)
|
||||
{
|
||||
return host->tap_num && mmc_can_retune(host->mmc);
|
||||
}
|
||||
|
||||
int tmio_mmc_host_runtime_resume(struct device *dev)
|
||||
{
|
||||
struct tmio_mmc_host *host = dev_get_drvdata(dev);
|
||||
|
@ -1346,8 +1286,7 @@ int tmio_mmc_host_runtime_resume(struct device *dev)
|
|||
|
||||
tmio_mmc_enable_dma(host, true);
|
||||
|
||||
if (tmio_mmc_can_retune(host) && host->select_tuning(host))
|
||||
dev_warn(&host->pdev->dev, "Tuning selection failed\n");
|
||||
mmc_retune_needed(host->mmc);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -95,7 +95,7 @@ struct sd_response_header {
|
|||
u8 port_number;
|
||||
u8 command_type;
|
||||
u8 command_index;
|
||||
u8 command_response[0];
|
||||
u8 command_response[];
|
||||
} __packed;
|
||||
|
||||
struct sd_status_header {
|
||||
|
@ -1363,7 +1363,7 @@ static void download_offload_pseudocode(struct vub300_mmc_host *vub300)
|
|||
int retval;
|
||||
for (n = 0; n < sdio_funcs; n++) {
|
||||
struct sdio_func *sf = card->sdio_func[n];
|
||||
l += snprintf(vub300->vub_name + l,
|
||||
l += scnprintf(vub300->vub_name + l,
|
||||
sizeof(vub300->vub_name) - l, "_%04X%04X",
|
||||
sf->vendor, sf->device);
|
||||
}
|
||||
|
|
|
@ -100,7 +100,8 @@ enum pm_ret_status {
|
|||
};
|
||||
|
||||
enum pm_ioctl_id {
|
||||
IOCTL_SET_SD_TAPDELAY = 7,
|
||||
IOCTL_SD_DLL_RESET = 6,
|
||||
IOCTL_SET_SD_TAPDELAY,
|
||||
IOCTL_SET_PLL_FRAC_MODE,
|
||||
IOCTL_GET_PLL_FRAC_MODE,
|
||||
IOCTL_SET_PLL_FRAC_DATA,
|
||||
|
@ -271,6 +272,12 @@ enum tap_delay_type {
|
|||
PM_TAPDELAY_OUTPUT,
|
||||
};
|
||||
|
||||
enum dll_reset_type {
|
||||
PM_DLL_RESET_ASSERT,
|
||||
PM_DLL_RESET_RELEASE,
|
||||
PM_DLL_RESET_PULSE,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct zynqmp_pm_query_data - PM query data
|
||||
* @qid: query ID
|
||||
|
|
|
@ -107,9 +107,6 @@ struct mmc_command {
|
|||
*/
|
||||
|
||||
unsigned int busy_timeout; /* busy detect timeout in ms */
|
||||
/* Set this flag only for blocking sanitize request */
|
||||
bool sanitize_busy;
|
||||
|
||||
struct mmc_data *data; /* data segment associated with cmd */
|
||||
struct mmc_request *mrq; /* associated request */
|
||||
};
|
||||
|
|
|
@ -322,6 +322,8 @@ struct mmc_host {
|
|||
#define MMC_CAP_3_3V_DDR (1 << 11) /* Host supports eMMC DDR 3.3V */
|
||||
#define MMC_CAP_1_8V_DDR (1 << 12) /* Host supports eMMC DDR 1.8V */
|
||||
#define MMC_CAP_1_2V_DDR (1 << 13) /* Host supports eMMC DDR 1.2V */
|
||||
#define MMC_CAP_DDR (MMC_CAP_3_3V_DDR | MMC_CAP_1_8V_DDR | \
|
||||
MMC_CAP_1_2V_DDR)
|
||||
#define MMC_CAP_POWER_OFF_CARD (1 << 14) /* Can power off after boot */
|
||||
#define MMC_CAP_BUS_WIDTH_TEST (1 << 15) /* CMD14/CMD19 bus width ok */
|
||||
#define MMC_CAP_UHS_SDR12 (1 << 16) /* Host supports UHS SDR12 mode */
|
||||
|
@ -463,7 +465,10 @@ struct mmc_host {
|
|||
bool cqe_enabled;
|
||||
bool cqe_on;
|
||||
|
||||
unsigned long private[0] ____cacheline_aligned;
|
||||
/* Host Software Queue support */
|
||||
bool hsq_enabled;
|
||||
|
||||
unsigned long private[] ____cacheline_aligned;
|
||||
};
|
||||
|
||||
struct device_node;
|
||||
|
|
|
@ -161,6 +161,16 @@ static inline bool mmc_op_multi(u32 opcode)
|
|||
#define R1_STATE_PRG 7
|
||||
#define R1_STATE_DIS 8
|
||||
|
||||
static inline bool mmc_ready_for_data(u32 status)
|
||||
{
|
||||
/*
|
||||
* Some cards mishandle the status bits, so make sure to check both the
|
||||
* busy indication and the card state.
|
||||
*/
|
||||
return status & R1_READY_FOR_DATA &&
|
||||
R1_CURRENT_STATE(status) == R1_STATE_TRAN;
|
||||
}
|
||||
|
||||
/*
|
||||
* MMC/SD in SPI mode reports R1 status always, and R2 for SEND_STATUS
|
||||
* R1 is the low order byte; R2 is the next highest byte, when present.
|
||||
|
|
|
@ -25,7 +25,7 @@ struct sdio_func_tuple {
|
|||
struct sdio_func_tuple *next;
|
||||
unsigned char code;
|
||||
unsigned char size;
|
||||
unsigned char data[0];
|
||||
unsigned char data[];
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -37,5 +37,6 @@ struct esdhc_platform_data {
|
|||
unsigned int delay_line;
|
||||
unsigned int tuning_step; /* The delay cell steps in tuning procedure */
|
||||
unsigned int tuning_start_tap; /* The start delay cell point in tuning procedure */
|
||||
unsigned int strobe_dll_delay_target; /* The delay cell for strobe pad (read clock) */
|
||||
};
|
||||
#endif /* __ASM_ARCH_IMX_ESDHC_H */
|
||||
|
|
|
@ -57,7 +57,7 @@ struct mmc_ioc_cmd {
|
|||
*/
|
||||
struct mmc_ioc_multi_cmd {
|
||||
__u64 num_of_cmds;
|
||||
struct mmc_ioc_cmd cmds[0];
|
||||
struct mmc_ioc_cmd cmds[];
|
||||
};
|
||||
|
||||
#define MMC_IOC_CMD _IOWR(MMC_BLOCK_MAJOR, 0, struct mmc_ioc_cmd)
|
||||
|
|
Loading…
Reference in New Issue