2011-01-02 14:11:59 +08:00
|
|
|
/*
|
|
|
|
* Synopsys DesignWare Multimedia Card Interface driver
|
|
|
|
* (Based on NXP driver for lpc 31xx)
|
|
|
|
*
|
|
|
|
* Copyright (C) 2009 NXP Semiconductors
|
|
|
|
* Copyright (C) 2009, 2010 Imagination Technologies Ltd.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License as published by
|
|
|
|
* the Free Software Foundation; either version 2 of the License, or
|
|
|
|
* (at your option) any later version.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/blkdev.h>
|
|
|
|
#include <linux/clk.h>
|
|
|
|
#include <linux/debugfs.h>
|
|
|
|
#include <linux/device.h>
|
|
|
|
#include <linux/dma-mapping.h>
|
|
|
|
#include <linux/err.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/interrupt.h>
|
2017-02-17 10:56:39 +08:00
|
|
|
#include <linux/iopoll.h>
|
2011-01-02 14:11:59 +08:00
|
|
|
#include <linux/ioport.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/platform_device.h>
|
2017-04-12 06:55:43 +08:00
|
|
|
#include <linux/pm_runtime.h>
|
2011-01-02 14:11:59 +08:00
|
|
|
#include <linux/seq_file.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/stat.h>
|
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <linux/irq.h>
|
2014-12-03 07:42:46 +08:00
|
|
|
#include <linux/mmc/card.h>
|
2011-01-02 14:11:59 +08:00
|
|
|
#include <linux/mmc/host.h>
|
|
|
|
#include <linux/mmc/mmc.h>
|
2014-08-22 21:47:51 +08:00
|
|
|
#include <linux/mmc/sd.h>
|
2013-08-30 23:14:05 +08:00
|
|
|
#include <linux/mmc/sdio.h>
|
2011-01-02 14:11:59 +08:00
|
|
|
#include <linux/bitops.h>
|
2011-02-25 10:08:14 +08:00
|
|
|
#include <linux/regulator/consumer.h>
|
2012-09-18 02:16:40 +08:00
|
|
|
#include <linux/of.h>
|
2013-01-12 01:03:53 +08:00
|
|
|
#include <linux/of_gpio.h>
|
2014-01-09 22:35:10 +08:00
|
|
|
#include <linux/mmc/slot-gpio.h>
|
2011-01-02 14:11:59 +08:00
|
|
|
|
|
|
|
#include "dw_mmc.h"
|
|
|
|
|
|
|
|
/* Common flag combinations */
|
2013-05-27 12:47:57 +08:00
|
|
|
#define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
|
2011-01-02 14:11:59 +08:00
|
|
|
SDMMC_INT_HTO | SDMMC_INT_SBE | \
|
2015-03-10 23:48:10 +08:00
|
|
|
SDMMC_INT_EBE | SDMMC_INT_HLE)
|
2011-01-02 14:11:59 +08:00
|
|
|
#define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
|
2015-03-10 23:48:10 +08:00
|
|
|
SDMMC_INT_RESP_ERR | SDMMC_INT_HLE)
|
2011-01-02 14:11:59 +08:00
|
|
|
#define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
|
2015-03-10 23:48:10 +08:00
|
|
|
DW_MCI_CMD_ERROR_FLAGS)
|
2011-01-02 14:11:59 +08:00
|
|
|
#define DW_MCI_SEND_STATUS 1
|
|
|
|
#define DW_MCI_RECV_STATUS 2
|
|
|
|
#define DW_MCI_DMA_THRESHOLD 16
|
|
|
|
|
2013-08-30 23:13:31 +08:00
|
|
|
#define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */
|
2016-11-17 15:40:35 +08:00
|
|
|
#define DW_MCI_FREQ_MIN 100000 /* unit: HZ */
|
2013-08-30 23:13:31 +08:00
|
|
|
|
2013-04-26 14:35:22 +08:00
|
|
|
#define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
|
|
|
|
SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
|
|
|
|
SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
|
|
|
|
SDMMC_IDMAC_INT_TI)
|
|
|
|
|
2016-09-02 12:14:39 +08:00
|
|
|
#define DESC_RING_BUF_SZ PAGE_SIZE
|
|
|
|
|
2014-10-20 15:12:33 +08:00
|
|
|
struct idmac_desc_64addr {
|
|
|
|
u32 des0; /* Control Descriptor */
|
2017-02-17 10:56:39 +08:00
|
|
|
#define IDMAC_OWN_CLR64(x) \
|
|
|
|
!((x) & cpu_to_le32(IDMAC_DES0_OWN))
|
2014-10-20 15:12:33 +08:00
|
|
|
|
|
|
|
u32 des1; /* Reserved */
|
|
|
|
|
|
|
|
u32 des2; /*Buffer sizes */
|
|
|
|
#define IDMAC_64ADDR_SET_BUFFER1_SIZE(d, s) \
|
2015-03-25 19:27:51 +08:00
|
|
|
((d)->des2 = ((d)->des2 & cpu_to_le32(0x03ffe000)) | \
|
|
|
|
((cpu_to_le32(s)) & cpu_to_le32(0x1fff)))
|
2014-10-20 15:12:33 +08:00
|
|
|
|
|
|
|
u32 des3; /* Reserved */
|
|
|
|
|
|
|
|
u32 des4; /* Lower 32-bits of Buffer Address Pointer 1*/
|
|
|
|
u32 des5; /* Upper 32-bits of Buffer Address Pointer 1*/
|
|
|
|
|
|
|
|
u32 des6; /* Lower 32-bits of Next Descriptor Address */
|
|
|
|
u32 des7; /* Upper 32-bits of Next Descriptor Address */
|
|
|
|
};
|
|
|
|
|
2011-01-02 14:11:59 +08:00
|
|
|
struct idmac_desc {
|
2015-03-25 19:27:51 +08:00
|
|
|
__le32 des0; /* Control Descriptor */
|
2011-01-02 14:11:59 +08:00
|
|
|
#define IDMAC_DES0_DIC BIT(1)
|
|
|
|
#define IDMAC_DES0_LD BIT(2)
|
|
|
|
#define IDMAC_DES0_FD BIT(3)
|
|
|
|
#define IDMAC_DES0_CH BIT(4)
|
|
|
|
#define IDMAC_DES0_ER BIT(5)
|
|
|
|
#define IDMAC_DES0_CES BIT(30)
|
|
|
|
#define IDMAC_DES0_OWN BIT(31)
|
|
|
|
|
2015-03-25 19:27:51 +08:00
|
|
|
__le32 des1; /* Buffer sizes */
|
2011-01-02 14:11:59 +08:00
|
|
|
#define IDMAC_SET_BUFFER1_SIZE(d, s) \
|
2016-06-07 21:37:19 +08:00
|
|
|
((d)->des1 = ((d)->des1 & cpu_to_le32(0x03ffe000)) | (cpu_to_le32((s) & 0x1fff)))
|
2011-01-02 14:11:59 +08:00
|
|
|
|
2015-03-25 19:27:51 +08:00
|
|
|
__le32 des2; /* buffer 1 physical address */
|
2011-01-02 14:11:59 +08:00
|
|
|
|
2015-03-25 19:27:51 +08:00
|
|
|
__le32 des3; /* buffer 2 physical address */
|
2011-01-02 14:11:59 +08:00
|
|
|
};
|
2015-06-25 16:25:07 +08:00
|
|
|
|
|
|
|
/* Each descriptor can transfer up to 4KB of data in chained mode */
|
|
|
|
#define DW_MCI_DESC_DATA_LENGTH 0x1000
|
2011-01-02 14:11:59 +08:00
|
|
|
|
|
|
|
#if defined(CONFIG_DEBUG_FS)
|
|
|
|
static int dw_mci_req_show(struct seq_file *s, void *v)
|
|
|
|
{
|
|
|
|
struct dw_mci_slot *slot = s->private;
|
|
|
|
struct mmc_request *mrq;
|
|
|
|
struct mmc_command *cmd;
|
|
|
|
struct mmc_command *stop;
|
|
|
|
struct mmc_data *data;
|
|
|
|
|
|
|
|
/* Make sure we get a consistent snapshot */
|
|
|
|
spin_lock_bh(&slot->host->lock);
|
|
|
|
mrq = slot->mrq;
|
|
|
|
|
|
|
|
if (mrq) {
|
|
|
|
cmd = mrq->cmd;
|
|
|
|
data = mrq->data;
|
|
|
|
stop = mrq->stop;
|
|
|
|
|
|
|
|
if (cmd)
|
|
|
|
seq_printf(s,
|
|
|
|
"CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
|
|
|
|
cmd->opcode, cmd->arg, cmd->flags,
|
|
|
|
cmd->resp[0], cmd->resp[1], cmd->resp[2],
|
|
|
|
cmd->resp[2], cmd->error);
|
|
|
|
if (data)
|
|
|
|
seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
|
|
|
|
data->bytes_xfered, data->blocks,
|
|
|
|
data->blksz, data->flags, data->error);
|
|
|
|
if (stop)
|
|
|
|
seq_printf(s,
|
|
|
|
"CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
|
|
|
|
stop->opcode, stop->arg, stop->flags,
|
|
|
|
stop->resp[0], stop->resp[1], stop->resp[2],
|
|
|
|
stop->resp[2], stop->error);
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_unlock_bh(&slot->host->lock);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2018-02-23 16:47:26 +08:00
|
|
|
DEFINE_SHOW_ATTRIBUTE(dw_mci_req);
|
2011-01-02 14:11:59 +08:00
|
|
|
|
|
|
|
static int dw_mci_regs_show(struct seq_file *s, void *v)
|
|
|
|
{
|
2016-11-17 15:40:33 +08:00
|
|
|
struct dw_mci *host = s->private;
|
|
|
|
|
2018-02-23 16:47:25 +08:00
|
|
|
pm_runtime_get_sync(host->dev);
|
|
|
|
|
2016-11-17 15:40:33 +08:00
|
|
|
seq_printf(s, "STATUS:\t0x%08x\n", mci_readl(host, STATUS));
|
|
|
|
seq_printf(s, "RINTSTS:\t0x%08x\n", mci_readl(host, RINTSTS));
|
|
|
|
seq_printf(s, "CMD:\t0x%08x\n", mci_readl(host, CMD));
|
|
|
|
seq_printf(s, "CTRL:\t0x%08x\n", mci_readl(host, CTRL));
|
|
|
|
seq_printf(s, "INTMASK:\t0x%08x\n", mci_readl(host, INTMASK));
|
|
|
|
seq_printf(s, "CLKENA:\t0x%08x\n", mci_readl(host, CLKENA));
|
2011-01-02 14:11:59 +08:00
|
|
|
|
2018-02-23 16:47:25 +08:00
|
|
|
pm_runtime_put_autosuspend(host->dev);
|
|
|
|
|
2011-01-02 14:11:59 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2018-02-23 16:47:26 +08:00
|
|
|
DEFINE_SHOW_ATTRIBUTE(dw_mci_regs);
|
2011-01-02 14:11:59 +08:00
|
|
|
|
|
|
|
static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
|
|
|
|
{
|
|
|
|
struct mmc_host *mmc = slot->mmc;
|
|
|
|
struct dw_mci *host = slot->host;
|
|
|
|
struct dentry *root;
|
|
|
|
struct dentry *node;
|
|
|
|
|
|
|
|
root = mmc->debugfs_root;
|
|
|
|
if (!root)
|
|
|
|
return;
|
|
|
|
|
|
|
|
node = debugfs_create_file("regs", S_IRUSR, root, host,
|
|
|
|
&dw_mci_regs_fops);
|
|
|
|
if (!node)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
node = debugfs_create_file("req", S_IRUSR, root, slot,
|
|
|
|
&dw_mci_req_fops);
|
|
|
|
if (!node)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
|
|
|
|
if (!node)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
node = debugfs_create_x32("pending_events", S_IRUSR, root,
|
|
|
|
(u32 *)&host->pending_events);
|
|
|
|
if (!node)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
node = debugfs_create_x32("completed_events", S_IRUSR, root,
|
|
|
|
(u32 *)&host->completed_events);
|
|
|
|
if (!node)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
err:
|
|
|
|
dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
|
|
|
|
}
|
|
|
|
#endif /* defined(CONFIG_DEBUG_FS) */
|
|
|
|
|
2017-02-17 10:56:41 +08:00
|
|
|
static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
|
|
|
|
{
|
|
|
|
u32 ctrl;
|
|
|
|
|
|
|
|
ctrl = mci_readl(host, CTRL);
|
|
|
|
ctrl |= reset;
|
|
|
|
mci_writel(host, CTRL, ctrl);
|
|
|
|
|
|
|
|
/* wait till resets clear */
|
|
|
|
if (readl_poll_timeout_atomic(host->regs + SDMMC_CTRL, ctrl,
|
|
|
|
!(ctrl & reset),
|
|
|
|
1, 500 * USEC_PER_MSEC)) {
|
|
|
|
dev_err(host->dev,
|
|
|
|
"Timeout resetting block (ctrl reset %#x)\n",
|
|
|
|
ctrl & reset);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
2014-08-22 21:47:51 +08:00
|
|
|
|
2017-02-17 10:59:44 +08:00
|
|
|
static void dw_mci_wait_while_busy(struct dw_mci *host, u32 cmd_flags)
|
|
|
|
{
|
|
|
|
u32 status;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Databook says that before issuing a new data transfer command
|
|
|
|
* we need to check to see if the card is busy. Data transfer commands
|
|
|
|
* all have SDMMC_CMD_PRV_DAT_WAIT set, so we'll key off that.
|
|
|
|
*
|
|
|
|
* ...also allow sending for SDMMC_CMD_VOLT_SWITCH where busy is
|
|
|
|
* expected.
|
|
|
|
*/
|
|
|
|
if ((cmd_flags & SDMMC_CMD_PRV_DAT_WAIT) &&
|
|
|
|
!(cmd_flags & SDMMC_CMD_VOLT_SWITCH)) {
|
|
|
|
if (readl_poll_timeout_atomic(host->regs + SDMMC_STATUS,
|
|
|
|
status,
|
|
|
|
!(status & SDMMC_STATUS_BUSY),
|
|
|
|
10, 500 * USEC_PER_MSEC))
|
|
|
|
dev_err(host->dev, "Busy; trying anyway\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
|
|
|
|
{
|
|
|
|
struct dw_mci *host = slot->host;
|
|
|
|
unsigned int cmd_status = 0;
|
|
|
|
|
|
|
|
mci_writel(host, CMDARG, arg);
|
|
|
|
wmb(); /* drain writebuffer */
|
|
|
|
dw_mci_wait_while_busy(host, cmd);
|
|
|
|
mci_writel(host, CMD, SDMMC_CMD_START | cmd);
|
|
|
|
|
|
|
|
if (readl_poll_timeout_atomic(host->regs + SDMMC_CMD, cmd_status,
|
|
|
|
!(cmd_status & SDMMC_CMD_START),
|
|
|
|
1, 500 * USEC_PER_MSEC))
|
|
|
|
dev_err(&slot->mmc->class_dev,
|
|
|
|
"Timeout sending command (cmd %#x arg %#x status %#x)\n",
|
|
|
|
cmd, arg, cmd_status);
|
|
|
|
}
|
|
|
|
|
2011-01-02 14:11:59 +08:00
|
|
|
static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
|
|
|
|
{
|
2012-09-18 02:16:42 +08:00
|
|
|
struct dw_mci_slot *slot = mmc_priv(mmc);
|
2014-08-22 21:47:51 +08:00
|
|
|
struct dw_mci *host = slot->host;
|
2011-01-02 14:11:59 +08:00
|
|
|
u32 cmdr;
|
|
|
|
|
2015-08-03 15:07:21 +08:00
|
|
|
cmd->error = -EINPROGRESS;
|
2011-01-02 14:11:59 +08:00
|
|
|
cmdr = cmd->opcode;
|
|
|
|
|
2013-08-30 23:14:05 +08:00
|
|
|
if (cmd->opcode == MMC_STOP_TRANSMISSION ||
|
|
|
|
cmd->opcode == MMC_GO_IDLE_STATE ||
|
|
|
|
cmd->opcode == MMC_GO_INACTIVE_STATE ||
|
|
|
|
(cmd->opcode == SD_IO_RW_DIRECT &&
|
|
|
|
((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT))
|
2011-01-02 14:11:59 +08:00
|
|
|
cmdr |= SDMMC_CMD_STOP;
|
2014-03-03 10:36:44 +08:00
|
|
|
else if (cmd->opcode != MMC_SEND_STATUS && cmd->data)
|
|
|
|
cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
|
2011-01-02 14:11:59 +08:00
|
|
|
|
2014-08-22 21:47:51 +08:00
|
|
|
if (cmd->opcode == SD_SWITCH_VOLTAGE) {
|
|
|
|
u32 clk_en_a;
|
|
|
|
|
|
|
|
/* Special bit makes CMD11 not die */
|
|
|
|
cmdr |= SDMMC_CMD_VOLT_SWITCH;
|
|
|
|
|
|
|
|
/* Change state to continue to handle CMD11 weirdness */
|
|
|
|
WARN_ON(slot->host->state != STATE_SENDING_CMD);
|
|
|
|
slot->host->state = STATE_SENDING_CMD11;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We need to disable low power mode (automatic clock stop)
|
|
|
|
* while doing voltage switch so we don't confuse the card,
|
|
|
|
* since stopping the clock is a specific part of the UHS
|
|
|
|
* voltage change dance.
|
|
|
|
*
|
|
|
|
* Note that low power mode (SDMMC_CLKEN_LOW_PWR) will be
|
|
|
|
* unconditionally turned back on in dw_mci_setup_bus() if it's
|
|
|
|
* ever called with a non-zero clock. That shouldn't happen
|
|
|
|
* until the voltage change is all done.
|
|
|
|
*/
|
|
|
|
clk_en_a = mci_readl(host, CLKENA);
|
|
|
|
clk_en_a &= ~(SDMMC_CLKEN_LOW_PWR << slot->id);
|
|
|
|
mci_writel(host, CLKENA, clk_en_a);
|
|
|
|
mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
|
|
|
|
SDMMC_CMD_PRV_DAT_WAIT, 0);
|
|
|
|
}
|
|
|
|
|
2011-01-02 14:11:59 +08:00
|
|
|
if (cmd->flags & MMC_RSP_PRESENT) {
|
|
|
|
/* We expect a response, so set this bit */
|
|
|
|
cmdr |= SDMMC_CMD_RESP_EXP;
|
|
|
|
if (cmd->flags & MMC_RSP_136)
|
|
|
|
cmdr |= SDMMC_CMD_RESP_LONG;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cmd->flags & MMC_RSP_CRC)
|
|
|
|
cmdr |= SDMMC_CMD_RESP_CRC;
|
|
|
|
|
2016-11-17 15:40:39 +08:00
|
|
|
if (cmd->data) {
|
2011-01-02 14:11:59 +08:00
|
|
|
cmdr |= SDMMC_CMD_DAT_EXP;
|
2016-11-17 15:40:39 +08:00
|
|
|
if (cmd->data->flags & MMC_DATA_WRITE)
|
2011-01-02 14:11:59 +08:00
|
|
|
cmdr |= SDMMC_CMD_DAT_WR;
|
|
|
|
}
|
|
|
|
|
2016-01-21 10:01:06 +08:00
|
|
|
if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &slot->flags))
|
|
|
|
cmdr |= SDMMC_CMD_USE_HOLD_REG;
|
2012-09-18 02:16:42 +08:00
|
|
|
|
2011-01-02 14:11:59 +08:00
|
|
|
return cmdr;
|
|
|
|
}
|
|
|
|
|
2013-08-30 23:14:05 +08:00
|
|
|
static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
|
|
|
|
{
|
|
|
|
struct mmc_command *stop;
|
|
|
|
u32 cmdr;
|
|
|
|
|
|
|
|
if (!cmd->data)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
stop = &host->stop_abort;
|
|
|
|
cmdr = cmd->opcode;
|
|
|
|
memset(stop, 0, sizeof(struct mmc_command));
|
|
|
|
|
|
|
|
if (cmdr == MMC_READ_SINGLE_BLOCK ||
|
|
|
|
cmdr == MMC_READ_MULTIPLE_BLOCK ||
|
|
|
|
cmdr == MMC_WRITE_BLOCK ||
|
2014-12-01 23:13:39 +08:00
|
|
|
cmdr == MMC_WRITE_MULTIPLE_BLOCK ||
|
|
|
|
cmdr == MMC_SEND_TUNING_BLOCK ||
|
|
|
|
cmdr == MMC_SEND_TUNING_BLOCK_HS200) {
|
2013-08-30 23:14:05 +08:00
|
|
|
stop->opcode = MMC_STOP_TRANSMISSION;
|
|
|
|
stop->arg = 0;
|
|
|
|
stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
|
|
|
|
} else if (cmdr == SD_IO_RW_EXTENDED) {
|
|
|
|
stop->opcode = SD_IO_RW_DIRECT;
|
|
|
|
stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
|
|
|
|
((cmd->arg >> 28) & 0x7);
|
|
|
|
stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
|
|
|
|
} else {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
cmdr = stop->opcode | SDMMC_CMD_STOP |
|
|
|
|
SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
|
|
|
|
|
2017-06-05 12:41:34 +08:00
|
|
|
if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &host->slot->flags))
|
2016-11-17 15:40:36 +08:00
|
|
|
cmdr |= SDMMC_CMD_USE_HOLD_REG;
|
|
|
|
|
2013-08-30 23:14:05 +08:00
|
|
|
return cmdr;
|
|
|
|
}
|
|
|
|
|
2017-07-11 17:38:37 +08:00
|
|
|
static inline void dw_mci_set_cto(struct dw_mci *host)
|
|
|
|
{
|
|
|
|
unsigned int cto_clks;
|
2017-10-13 04:11:15 +08:00
|
|
|
unsigned int cto_div;
|
2017-07-11 17:38:37 +08:00
|
|
|
unsigned int cto_ms;
|
2017-10-13 04:11:16 +08:00
|
|
|
unsigned long irqflags;
|
2017-07-11 17:38:37 +08:00
|
|
|
|
|
|
|
cto_clks = mci_readl(host, TMOUT) & 0xff;
|
2017-10-13 04:11:15 +08:00
|
|
|
cto_div = (mci_readl(host, CLKDIV) & 0xff) * 2;
|
|
|
|
if (cto_div == 0)
|
|
|
|
cto_div = 1;
|
2018-02-28 19:53:18 +08:00
|
|
|
|
|
|
|
cto_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * cto_clks * cto_div,
|
|
|
|
host->bus_hz);
|
2017-07-11 17:38:37 +08:00
|
|
|
|
|
|
|
/* add a bit spare time */
|
|
|
|
cto_ms += 10;
|
|
|
|
|
2017-10-13 04:11:16 +08:00
|
|
|
/*
|
|
|
|
* The durations we're working with are fairly short so we have to be
|
|
|
|
* extra careful about synchronization here. Specifically in hardware a
|
|
|
|
* command timeout is _at most_ 5.1 ms, so that means we expect an
|
|
|
|
* interrupt (either command done or timeout) to come rather quickly
|
|
|
|
* after the mci_writel. ...but just in case we have a long interrupt
|
|
|
|
* latency let's add a bit of paranoia.
|
|
|
|
*
|
|
|
|
* In general we'll assume that at least an interrupt will be asserted
|
|
|
|
* in hardware by the time the cto_timer runs. ...and if it hasn't
|
|
|
|
* been asserted in hardware by that time then we'll assume it'll never
|
|
|
|
* come.
|
|
|
|
*/
|
|
|
|
spin_lock_irqsave(&host->irq_lock, irqflags);
|
|
|
|
if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events))
|
|
|
|
mod_timer(&host->cto_timer,
|
|
|
|
jiffies + msecs_to_jiffies(cto_ms) + 1);
|
|
|
|
spin_unlock_irqrestore(&host->irq_lock, irqflags);
|
2017-07-11 17:38:37 +08:00
|
|
|
}
|
|
|
|
|
2011-01-02 14:11:59 +08:00
|
|
|
static void dw_mci_start_command(struct dw_mci *host,
|
|
|
|
struct mmc_command *cmd, u32 cmd_flags)
|
|
|
|
{
|
|
|
|
host->cmd = cmd;
|
2012-09-18 02:16:35 +08:00
|
|
|
dev_vdbg(host->dev,
|
2011-01-02 14:11:59 +08:00
|
|
|
"start command: ARGR=0x%08x CMDR=0x%08x\n",
|
|
|
|
cmd->arg, cmd_flags);
|
|
|
|
|
|
|
|
mci_writel(host, CMDARG, cmd->arg);
|
2015-08-03 15:07:21 +08:00
|
|
|
wmb(); /* drain writebuffer */
|
2015-02-21 04:31:56 +08:00
|
|
|
dw_mci_wait_while_busy(host, cmd_flags);
|
2011-01-02 14:11:59 +08:00
|
|
|
|
2017-10-13 04:11:16 +08:00
|
|
|
mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
|
|
|
|
|
2017-07-11 17:38:37 +08:00
|
|
|
/* response expected command only */
|
|
|
|
if (cmd_flags & SDMMC_CMD_RESP_EXP)
|
|
|
|
dw_mci_set_cto(host);
|
2011-01-02 14:11:59 +08:00
|
|
|
}
|
|
|
|
|
2013-08-30 23:14:05 +08:00
|
|
|
static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data)
|
2011-01-02 14:11:59 +08:00
|
|
|
{
|
2016-11-17 15:40:37 +08:00
|
|
|
struct mmc_command *stop = &host->stop_abort;
|
2015-08-03 15:07:21 +08:00
|
|
|
|
2013-08-30 23:14:05 +08:00
|
|
|
dw_mci_start_command(host, stop, host->stop_cmdr);
|
2011-01-02 14:11:59 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* DMA interface functions */
|
|
|
|
static void dw_mci_stop_dma(struct dw_mci *host)
|
|
|
|
{
|
2011-06-29 16:28:43 +08:00
|
|
|
if (host->using_dma) {
|
2011-01-02 14:11:59 +08:00
|
|
|
host->dma_ops->stop(host);
|
|
|
|
host->dma_ops->cleanup(host);
|
|
|
|
}
|
2013-08-30 23:14:38 +08:00
|
|
|
|
|
|
|
/* Data transfer was stopped by the interrupt handler */
|
|
|
|
set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
|
2011-01-02 14:11:59 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void dw_mci_dma_cleanup(struct dw_mci *host)
|
|
|
|
{
|
|
|
|
struct mmc_data *data = host->data;
|
|
|
|
|
2016-11-17 15:40:38 +08:00
|
|
|
if (data && data->host_cookie == COOKIE_MAPPED) {
|
|
|
|
dma_unmap_sg(host->dev,
|
|
|
|
data->sg,
|
|
|
|
data->sg_len,
|
2017-03-27 02:45:56 +08:00
|
|
|
mmc_get_dma_dir(data));
|
2016-11-17 15:40:38 +08:00
|
|
|
data->host_cookie = COOKIE_UNMAPPED;
|
|
|
|
}
|
2011-01-02 14:11:59 +08:00
|
|
|
}
|
|
|
|
|
2013-08-30 23:14:33 +08:00
|
|
|
static void dw_mci_idmac_reset(struct dw_mci *host)
|
|
|
|
{
|
|
|
|
u32 bmod = mci_readl(host, BMOD);
|
|
|
|
/* Software reset of DMA */
|
|
|
|
bmod |= SDMMC_IDMAC_SWRESET;
|
|
|
|
mci_writel(host, BMOD, bmod);
|
|
|
|
}
|
|
|
|
|
2011-01-02 14:11:59 +08:00
|
|
|
static void dw_mci_idmac_stop_dma(struct dw_mci *host)
|
|
|
|
{
|
|
|
|
u32 temp;
|
|
|
|
|
|
|
|
/* Disable and reset the IDMAC interface */
|
|
|
|
temp = mci_readl(host, CTRL);
|
|
|
|
temp &= ~SDMMC_CTRL_USE_IDMAC;
|
|
|
|
temp |= SDMMC_CTRL_DMA_RESET;
|
|
|
|
mci_writel(host, CTRL, temp);
|
|
|
|
|
|
|
|
/* Stop the IDMAC running */
|
|
|
|
temp = mci_readl(host, BMOD);
|
2011-02-25 10:08:13 +08:00
|
|
|
temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
|
2013-08-30 23:14:33 +08:00
|
|
|
temp |= SDMMC_IDMAC_SWRESET;
|
2011-01-02 14:11:59 +08:00
|
|
|
mci_writel(host, BMOD, temp);
|
|
|
|
}
|
|
|
|
|
2015-09-16 14:41:23 +08:00
|
|
|
static void dw_mci_dmac_complete_dma(void *arg)
|
2011-01-02 14:11:59 +08:00
|
|
|
{
|
2015-09-16 14:41:23 +08:00
|
|
|
struct dw_mci *host = arg;
|
2011-01-02 14:11:59 +08:00
|
|
|
struct mmc_data *data = host->data;
|
|
|
|
|
2012-09-18 02:16:35 +08:00
|
|
|
dev_vdbg(host->dev, "DMA complete\n");
|
2011-01-02 14:11:59 +08:00
|
|
|
|
2015-09-16 14:41:23 +08:00
|
|
|
if ((host->use_dma == TRANS_MODE_EDMAC) &&
|
|
|
|
data && (data->flags & MMC_DATA_READ))
|
|
|
|
/* Invalidate cache after read */
|
2017-06-05 12:41:34 +08:00
|
|
|
dma_sync_sg_for_cpu(mmc_dev(host->slot->mmc),
|
2015-09-16 14:41:23 +08:00
|
|
|
data->sg,
|
|
|
|
data->sg_len,
|
|
|
|
DMA_FROM_DEVICE);
|
|
|
|
|
2011-01-02 14:11:59 +08:00
|
|
|
host->dma_ops->cleanup(host);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the card was removed, data will be NULL. No point in trying to
|
|
|
|
* send the stop command or waiting for NBUSY in this case.
|
|
|
|
*/
|
|
|
|
if (data) {
|
|
|
|
set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
|
|
|
|
tasklet_schedule(&host->tasklet);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-09-02 12:14:37 +08:00
|
|
|
static int dw_mci_idmac_init(struct dw_mci *host)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (host->dma_64bit_address == 1) {
|
|
|
|
struct idmac_desc_64addr *p;
|
|
|
|
/* Number of descriptors in the ring buffer */
|
2016-09-02 12:14:39 +08:00
|
|
|
host->ring_size =
|
|
|
|
DESC_RING_BUF_SZ / sizeof(struct idmac_desc_64addr);
|
2016-09-02 12:14:37 +08:00
|
|
|
|
|
|
|
/* Forward link the descriptor list */
|
|
|
|
for (i = 0, p = host->sg_cpu; i < host->ring_size - 1;
|
|
|
|
i++, p++) {
|
|
|
|
p->des6 = (host->sg_dma +
|
|
|
|
(sizeof(struct idmac_desc_64addr) *
|
|
|
|
(i + 1))) & 0xffffffff;
|
|
|
|
|
|
|
|
p->des7 = (u64)(host->sg_dma +
|
|
|
|
(sizeof(struct idmac_desc_64addr) *
|
|
|
|
(i + 1))) >> 32;
|
|
|
|
/* Initialize reserved and buffer size fields to "0" */
|
2018-03-15 03:30:51 +08:00
|
|
|
p->des0 = 0;
|
2016-09-02 12:14:37 +08:00
|
|
|
p->des1 = 0;
|
|
|
|
p->des2 = 0;
|
|
|
|
p->des3 = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Set the last descriptor as the end-of-ring descriptor */
|
|
|
|
p->des6 = host->sg_dma & 0xffffffff;
|
|
|
|
p->des7 = (u64)host->sg_dma >> 32;
|
|
|
|
p->des0 = IDMAC_DES0_ER;
|
|
|
|
|
|
|
|
} else {
|
|
|
|
struct idmac_desc *p;
|
|
|
|
/* Number of descriptors in the ring buffer */
|
2016-09-02 12:14:39 +08:00
|
|
|
host->ring_size =
|
|
|
|
DESC_RING_BUF_SZ / sizeof(struct idmac_desc);
|
2016-09-02 12:14:37 +08:00
|
|
|
|
|
|
|
/* Forward link the descriptor list */
|
|
|
|
for (i = 0, p = host->sg_cpu;
|
|
|
|
i < host->ring_size - 1;
|
|
|
|
i++, p++) {
|
|
|
|
p->des3 = cpu_to_le32(host->sg_dma +
|
|
|
|
(sizeof(struct idmac_desc) * (i + 1)));
|
2018-03-15 03:30:51 +08:00
|
|
|
p->des0 = 0;
|
2016-09-02 12:14:37 +08:00
|
|
|
p->des1 = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Set the last descriptor as the end-of-ring descriptor */
|
|
|
|
p->des3 = cpu_to_le32(host->sg_dma);
|
|
|
|
p->des0 = cpu_to_le32(IDMAC_DES0_ER);
|
|
|
|
}
|
|
|
|
|
|
|
|
dw_mci_idmac_reset(host);
|
|
|
|
|
|
|
|
if (host->dma_64bit_address == 1) {
|
|
|
|
/* Mask out interrupts - get Tx & Rx complete only */
|
|
|
|
mci_writel(host, IDSTS64, IDMAC_INT_CLR);
|
|
|
|
mci_writel(host, IDINTEN64, SDMMC_IDMAC_INT_NI |
|
|
|
|
SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
|
|
|
|
|
|
|
|
/* Set the descriptor base address */
|
|
|
|
mci_writel(host, DBADDRL, host->sg_dma & 0xffffffff);
|
|
|
|
mci_writel(host, DBADDRU, (u64)host->sg_dma >> 32);
|
|
|
|
|
|
|
|
} else {
|
|
|
|
/* Mask out interrupts - get Tx & Rx complete only */
|
|
|
|
mci_writel(host, IDSTS, IDMAC_INT_CLR);
|
|
|
|
mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI |
|
|
|
|
SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
|
|
|
|
|
|
|
|
/* Set the descriptor base address */
|
|
|
|
mci_writel(host, DBADDR, host->sg_dma);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int dw_mci_prepare_desc64(struct dw_mci *host,
|
2016-09-02 12:14:36 +08:00
|
|
|
struct mmc_data *data,
|
|
|
|
unsigned int sg_len)
|
2011-01-02 14:11:59 +08:00
|
|
|
{
|
2015-06-25 16:25:07 +08:00
|
|
|
unsigned int desc_len;
|
2016-09-02 12:14:36 +08:00
|
|
|
struct idmac_desc_64addr *desc_first, *desc_last, *desc;
|
2017-02-17 10:56:39 +08:00
|
|
|
u32 val;
|
2011-01-02 14:11:59 +08:00
|
|
|
int i;
|
2015-08-03 15:07:21 +08:00
|
|
|
|
2016-09-02 12:14:36 +08:00
|
|
|
desc_first = desc_last = desc = host->sg_cpu;
|
2015-06-25 16:25:07 +08:00
|
|
|
|
2016-09-02 12:14:36 +08:00
|
|
|
for (i = 0; i < sg_len; i++) {
|
|
|
|
unsigned int length = sg_dma_len(&data->sg[i]);
|
2014-10-20 15:12:33 +08:00
|
|
|
|
2016-09-02 12:14:36 +08:00
|
|
|
u64 mem_addr = sg_dma_address(&data->sg[i]);
|
2015-08-03 15:07:21 +08:00
|
|
|
|
2016-09-02 12:14:36 +08:00
|
|
|
for ( ; length ; desc++) {
|
|
|
|
desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
|
|
|
|
length : DW_MCI_DESC_DATA_LENGTH;
|
2011-01-02 14:11:59 +08:00
|
|
|
|
2016-09-02 12:14:36 +08:00
|
|
|
length -= desc_len;
|
2015-06-25 16:25:07 +08:00
|
|
|
|
2016-09-02 12:14:37 +08:00
|
|
|
/*
|
|
|
|
* Wait for the former clear OWN bit operation
|
|
|
|
* of IDMAC to make sure that this descriptor
|
|
|
|
* isn't still owned by IDMAC as IDMAC's write
|
|
|
|
* ops and CPU's read ops are asynchronous.
|
|
|
|
*/
|
2017-02-17 10:56:39 +08:00
|
|
|
if (readl_poll_timeout_atomic(&desc->des0, val,
|
|
|
|
!(val & IDMAC_DES0_OWN),
|
|
|
|
10, 100 * USEC_PER_MSEC))
|
|
|
|
goto err_own_bit;
|
2016-09-02 12:14:37 +08:00
|
|
|
|
2016-09-02 12:14:36 +08:00
|
|
|
/*
|
|
|
|
* Set the OWN bit and disable interrupts
|
|
|
|
* for this descriptor
|
|
|
|
*/
|
|
|
|
desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC |
|
|
|
|
IDMAC_DES0_CH;
|
2015-06-25 16:25:07 +08:00
|
|
|
|
2016-09-02 12:14:36 +08:00
|
|
|
/* Buffer length */
|
|
|
|
IDMAC_64ADDR_SET_BUFFER1_SIZE(desc, desc_len);
|
2015-06-25 16:25:07 +08:00
|
|
|
|
2016-09-02 12:14:36 +08:00
|
|
|
/* Physical address to DMA to/from */
|
|
|
|
desc->des4 = mem_addr & 0xffffffff;
|
|
|
|
desc->des5 = mem_addr >> 32;
|
2015-06-25 16:25:07 +08:00
|
|
|
|
2016-09-02 12:14:36 +08:00
|
|
|
/* Update physical address for the next desc */
|
|
|
|
mem_addr += desc_len;
|
2015-06-25 16:25:07 +08:00
|
|
|
|
2016-09-02 12:14:36 +08:00
|
|
|
/* Save pointer to the last descriptor */
|
|
|
|
desc_last = desc;
|
2014-10-20 15:12:33 +08:00
|
|
|
}
|
2016-09-02 12:14:36 +08:00
|
|
|
}
|
2011-01-02 14:11:59 +08:00
|
|
|
|
2016-09-02 12:14:36 +08:00
|
|
|
/* Set first descriptor */
|
|
|
|
desc_first->des0 |= IDMAC_DES0_FD;
|
2011-01-02 14:11:59 +08:00
|
|
|
|
2016-09-02 12:14:36 +08:00
|
|
|
/* Set last descriptor */
|
|
|
|
desc_last->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
|
|
|
|
desc_last->des0 |= IDMAC_DES0_LD;
|
2016-09-02 12:14:37 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
err_own_bit:
|
|
|
|
/* restore the descriptor chain as it's polluted */
|
2016-11-17 02:55:01 +08:00
|
|
|
dev_dbg(host->dev, "descriptor is still owned by IDMAC.\n");
|
2016-09-02 12:14:39 +08:00
|
|
|
memset(host->sg_cpu, 0, DESC_RING_BUF_SZ);
|
2016-09-02 12:14:37 +08:00
|
|
|
dw_mci_idmac_init(host);
|
|
|
|
return -EINVAL;
|
2016-09-02 12:14:36 +08:00
|
|
|
}
|
2015-06-25 16:25:07 +08:00
|
|
|
|
2014-10-20 15:12:33 +08:00
|
|
|
|
2016-09-02 12:14:37 +08:00
|
|
|
static inline int dw_mci_prepare_desc32(struct dw_mci *host,
|
2016-09-02 12:14:36 +08:00
|
|
|
struct mmc_data *data,
|
|
|
|
unsigned int sg_len)
|
|
|
|
{
|
|
|
|
unsigned int desc_len;
|
|
|
|
struct idmac_desc *desc_first, *desc_last, *desc;
|
2017-02-17 10:56:39 +08:00
|
|
|
u32 val;
|
2016-09-02 12:14:36 +08:00
|
|
|
int i;
|
2015-08-03 15:07:21 +08:00
|
|
|
|
2016-09-02 12:14:36 +08:00
|
|
|
desc_first = desc_last = desc = host->sg_cpu;
|
2014-10-20 15:12:33 +08:00
|
|
|
|
2016-09-02 12:14:36 +08:00
|
|
|
for (i = 0; i < sg_len; i++) {
|
|
|
|
unsigned int length = sg_dma_len(&data->sg[i]);
|
2015-06-25 16:25:07 +08:00
|
|
|
|
2016-09-02 12:14:36 +08:00
|
|
|
u32 mem_addr = sg_dma_address(&data->sg[i]);
|
2015-06-25 16:25:07 +08:00
|
|
|
|
2016-09-02 12:14:36 +08:00
|
|
|
for ( ; length ; desc++) {
|
|
|
|
desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
|
|
|
|
length : DW_MCI_DESC_DATA_LENGTH;
|
2015-06-25 16:25:07 +08:00
|
|
|
|
2016-09-02 12:14:36 +08:00
|
|
|
length -= desc_len;
|
2011-01-02 14:11:59 +08:00
|
|
|
|
2016-09-02 12:14:37 +08:00
|
|
|
/*
|
|
|
|
* Wait for the former clear OWN bit operation
|
|
|
|
* of IDMAC to make sure that this descriptor
|
|
|
|
* isn't still owned by IDMAC as IDMAC's write
|
|
|
|
* ops and CPU's read ops are asynchronous.
|
|
|
|
*/
|
2017-02-17 10:56:39 +08:00
|
|
|
if (readl_poll_timeout_atomic(&desc->des0, val,
|
|
|
|
IDMAC_OWN_CLR64(val),
|
|
|
|
10,
|
|
|
|
100 * USEC_PER_MSEC))
|
|
|
|
goto err_own_bit;
|
2016-09-02 12:14:37 +08:00
|
|
|
|
2016-09-02 12:14:36 +08:00
|
|
|
/*
|
|
|
|
* Set the OWN bit and disable interrupts
|
|
|
|
* for this descriptor
|
|
|
|
*/
|
|
|
|
desc->des0 = cpu_to_le32(IDMAC_DES0_OWN |
|
|
|
|
IDMAC_DES0_DIC |
|
|
|
|
IDMAC_DES0_CH);
|
2015-06-25 16:25:07 +08:00
|
|
|
|
2016-09-02 12:14:36 +08:00
|
|
|
/* Buffer length */
|
|
|
|
IDMAC_SET_BUFFER1_SIZE(desc, desc_len);
|
2015-06-25 16:25:07 +08:00
|
|
|
|
2016-09-02 12:14:36 +08:00
|
|
|
/* Physical address to DMA to/from */
|
|
|
|
desc->des2 = cpu_to_le32(mem_addr);
|
2014-10-20 15:12:33 +08:00
|
|
|
|
2016-09-02 12:14:36 +08:00
|
|
|
/* Update physical address for the next desc */
|
|
|
|
mem_addr += desc_len;
|
2011-01-02 14:11:59 +08:00
|
|
|
|
2016-09-02 12:14:36 +08:00
|
|
|
/* Save pointer to the last descriptor */
|
|
|
|
desc_last = desc;
|
|
|
|
}
|
2014-10-20 15:12:33 +08:00
|
|
|
}
|
2011-01-02 14:11:59 +08:00
|
|
|
|
2016-09-02 12:14:36 +08:00
|
|
|
/* Set first descriptor */
|
|
|
|
desc_first->des0 |= cpu_to_le32(IDMAC_DES0_FD);
|
|
|
|
|
|
|
|
/* Set last descriptor */
|
|
|
|
desc_last->des0 &= cpu_to_le32(~(IDMAC_DES0_CH |
|
|
|
|
IDMAC_DES0_DIC));
|
|
|
|
desc_last->des0 |= cpu_to_le32(IDMAC_DES0_LD);
|
2016-09-02 12:14:37 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
err_own_bit:
|
|
|
|
/* restore the descriptor chain as it's polluted */
|
2016-11-17 02:55:01 +08:00
|
|
|
dev_dbg(host->dev, "descriptor is still owned by IDMAC.\n");
|
2016-09-02 12:14:39 +08:00
|
|
|
memset(host->sg_cpu, 0, DESC_RING_BUF_SZ);
|
2016-09-02 12:14:37 +08:00
|
|
|
dw_mci_idmac_init(host);
|
|
|
|
return -EINVAL;
|
2011-01-02 14:11:59 +08:00
|
|
|
}
|
|
|
|
|
2015-09-16 14:41:23 +08:00
|
|
|
static int dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
|
2011-01-02 14:11:59 +08:00
|
|
|
{
|
|
|
|
u32 temp;
|
2016-09-02 12:14:37 +08:00
|
|
|
int ret;
|
2011-01-02 14:11:59 +08:00
|
|
|
|
2016-09-02 12:14:36 +08:00
|
|
|
if (host->dma_64bit_address == 1)
|
2016-09-02 12:14:37 +08:00
|
|
|
ret = dw_mci_prepare_desc64(host, host->data, sg_len);
|
2016-09-02 12:14:36 +08:00
|
|
|
else
|
2016-09-02 12:14:37 +08:00
|
|
|
ret = dw_mci_prepare_desc32(host, host->data, sg_len);
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
2016-09-02 12:14:36 +08:00
|
|
|
|
|
|
|
/* drain writebuffer */
|
|
|
|
wmb();
|
2011-01-02 14:11:59 +08:00
|
|
|
|
2014-10-17 00:58:05 +08:00
|
|
|
/* Make sure to reset DMA in case we did PIO before this */
|
|
|
|
dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);
|
|
|
|
dw_mci_idmac_reset(host);
|
|
|
|
|
2011-01-02 14:11:59 +08:00
|
|
|
/* Select IDMAC interface */
|
|
|
|
temp = mci_readl(host, CTRL);
|
|
|
|
temp |= SDMMC_CTRL_USE_IDMAC;
|
|
|
|
mci_writel(host, CTRL, temp);
|
|
|
|
|
2015-08-03 15:07:21 +08:00
|
|
|
/* drain writebuffer */
|
2011-01-02 14:11:59 +08:00
|
|
|
wmb();
|
|
|
|
|
|
|
|
/* Enable the IDMAC */
|
|
|
|
temp = mci_readl(host, BMOD);
|
2011-02-25 10:08:13 +08:00
|
|
|
temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
|
2011-01-02 14:11:59 +08:00
|
|
|
mci_writel(host, BMOD, temp);
|
|
|
|
|
|
|
|
/* Start it running */
|
|
|
|
mci_writel(host, PLDMND, 1);
|
2015-09-16 14:41:23 +08:00
|
|
|
|
2016-09-02 12:14:37 +08:00
|
|
|
out:
|
|
|
|
return ret;
|
2011-01-02 14:11:59 +08:00
|
|
|
}
|
|
|
|
|
2012-11-07 05:55:31 +08:00
|
|
|
static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
|
2012-02-20 10:01:43 +08:00
|
|
|
.init = dw_mci_idmac_init,
|
|
|
|
.start = dw_mci_idmac_start_dma,
|
|
|
|
.stop = dw_mci_idmac_stop_dma,
|
2015-09-16 14:41:23 +08:00
|
|
|
.complete = dw_mci_dmac_complete_dma,
|
|
|
|
.cleanup = dw_mci_dma_cleanup,
|
|
|
|
};
|
|
|
|
|
|
|
|
static void dw_mci_edmac_stop_dma(struct dw_mci *host)
|
|
|
|
{
|
2016-03-09 10:34:46 +08:00
|
|
|
dmaengine_terminate_async(host->dms->ch);
|
2015-09-16 14:41:23 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int dw_mci_edmac_start_dma(struct dw_mci *host,
|
|
|
|
unsigned int sg_len)
|
|
|
|
{
|
|
|
|
struct dma_slave_config cfg;
|
|
|
|
struct dma_async_tx_descriptor *desc = NULL;
|
|
|
|
struct scatterlist *sgl = host->data->sg;
|
2017-09-03 21:39:50 +08:00
|
|
|
static const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
|
2015-09-16 14:41:23 +08:00
|
|
|
u32 sg_elems = host->data->sg_len;
|
|
|
|
u32 fifoth_val;
|
|
|
|
u32 fifo_offset = host->fifo_reg - host->regs;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
/* Set external dma config: burst size, burst width */
|
2015-11-12 22:14:23 +08:00
|
|
|
cfg.dst_addr = host->phy_regs + fifo_offset;
|
2015-09-16 14:41:23 +08:00
|
|
|
cfg.src_addr = cfg.dst_addr;
|
|
|
|
cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
|
|
|
|
cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
|
|
|
|
|
|
|
|
/* Match burst msize with external dma config */
|
|
|
|
fifoth_val = mci_readl(host, FIFOTH);
|
|
|
|
cfg.dst_maxburst = mszs[(fifoth_val >> 28) & 0x7];
|
|
|
|
cfg.src_maxburst = cfg.dst_maxburst;
|
|
|
|
|
|
|
|
if (host->data->flags & MMC_DATA_WRITE)
|
|
|
|
cfg.direction = DMA_MEM_TO_DEV;
|
|
|
|
else
|
|
|
|
cfg.direction = DMA_DEV_TO_MEM;
|
|
|
|
|
|
|
|
ret = dmaengine_slave_config(host->dms->ch, &cfg);
|
|
|
|
if (ret) {
|
|
|
|
dev_err(host->dev, "Failed to config edmac.\n");
|
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
|
|
|
|
desc = dmaengine_prep_slave_sg(host->dms->ch, sgl,
|
|
|
|
sg_len, cfg.direction,
|
|
|
|
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
|
|
|
if (!desc) {
|
|
|
|
dev_err(host->dev, "Can't prepare slave sg.\n");
|
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Set dw_mci_dmac_complete_dma as callback */
|
|
|
|
desc->callback = dw_mci_dmac_complete_dma;
|
|
|
|
desc->callback_param = (void *)host;
|
|
|
|
dmaengine_submit(desc);
|
|
|
|
|
|
|
|
/* Flush cache before write */
|
|
|
|
if (host->data->flags & MMC_DATA_WRITE)
|
2017-06-05 12:41:34 +08:00
|
|
|
dma_sync_sg_for_device(mmc_dev(host->slot->mmc), sgl,
|
2015-09-16 14:41:23 +08:00
|
|
|
sg_elems, DMA_TO_DEVICE);
|
|
|
|
|
|
|
|
dma_async_issue_pending(host->dms->ch);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int dw_mci_edmac_init(struct dw_mci *host)
|
|
|
|
{
|
|
|
|
/* Request external dma channel */
|
|
|
|
host->dms = kzalloc(sizeof(struct dw_mci_dma_slave), GFP_KERNEL);
|
|
|
|
if (!host->dms)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
host->dms->ch = dma_request_slave_channel(host->dev, "rx-tx");
|
|
|
|
if (!host->dms->ch) {
|
2015-10-23 03:53:46 +08:00
|
|
|
dev_err(host->dev, "Failed to get external DMA channel.\n");
|
2015-09-16 14:41:23 +08:00
|
|
|
kfree(host->dms);
|
|
|
|
host->dms = NULL;
|
|
|
|
return -ENXIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void dw_mci_edmac_exit(struct dw_mci *host)
|
|
|
|
{
|
|
|
|
if (host->dms) {
|
|
|
|
if (host->dms->ch) {
|
|
|
|
dma_release_channel(host->dms->ch);
|
|
|
|
host->dms->ch = NULL;
|
|
|
|
}
|
|
|
|
kfree(host->dms);
|
|
|
|
host->dms = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct dw_mci_dma_ops dw_mci_edmac_ops = {
|
|
|
|
.init = dw_mci_edmac_init,
|
|
|
|
.exit = dw_mci_edmac_exit,
|
|
|
|
.start = dw_mci_edmac_start_dma,
|
|
|
|
.stop = dw_mci_edmac_stop_dma,
|
|
|
|
.complete = dw_mci_dmac_complete_dma,
|
2012-02-20 10:01:43 +08:00
|
|
|
.cleanup = dw_mci_dma_cleanup,
|
|
|
|
};
|
|
|
|
|
2012-02-06 15:55:07 +08:00
|
|
|
static int dw_mci_pre_dma_transfer(struct dw_mci *host,
|
|
|
|
struct mmc_data *data,
|
2016-11-17 15:40:38 +08:00
|
|
|
int cookie)
|
2011-01-02 14:11:59 +08:00
|
|
|
{
|
|
|
|
struct scatterlist *sg;
|
2012-02-06 15:55:07 +08:00
|
|
|
unsigned int i, sg_len;
|
2011-06-29 16:28:43 +08:00
|
|
|
|
2016-11-17 15:40:38 +08:00
|
|
|
if (data->host_cookie == COOKIE_PRE_MAPPED)
|
|
|
|
return data->sg_len;
|
2011-01-02 14:11:59 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We don't do DMA on "complex" transfers, i.e. with
|
|
|
|
* non-word-aligned buffers or lengths. Also, we don't bother
|
|
|
|
* with all the DMA setup overhead for short transfers.
|
|
|
|
*/
|
|
|
|
if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
|
|
|
|
return -EINVAL;
|
2012-02-06 15:55:07 +08:00
|
|
|
|
2011-01-02 14:11:59 +08:00
|
|
|
if (data->blksz & 3)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
for_each_sg(data->sg, sg, data->sg_len, i) {
|
|
|
|
if (sg->offset & 3 || sg->length & 3)
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2012-09-18 02:16:35 +08:00
|
|
|
sg_len = dma_map_sg(host->dev,
|
2012-02-06 15:55:07 +08:00
|
|
|
data->sg,
|
|
|
|
data->sg_len,
|
2017-03-27 02:45:56 +08:00
|
|
|
mmc_get_dma_dir(data));
|
2012-02-06 15:55:07 +08:00
|
|
|
if (sg_len == 0)
|
|
|
|
return -EINVAL;
|
2011-06-29 16:28:43 +08:00
|
|
|
|
2016-11-17 15:40:38 +08:00
|
|
|
data->host_cookie = cookie;
|
2011-01-02 14:11:59 +08:00
|
|
|
|
2012-02-06 15:55:07 +08:00
|
|
|
return sg_len;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void dw_mci_pre_req(struct mmc_host *mmc,
|
2016-11-23 18:02:24 +08:00
|
|
|
struct mmc_request *mrq)
|
2012-02-06 15:55:07 +08:00
|
|
|
{
|
|
|
|
struct dw_mci_slot *slot = mmc_priv(mmc);
|
|
|
|
struct mmc_data *data = mrq->data;
|
|
|
|
|
|
|
|
if (!slot->host->use_dma || !data)
|
|
|
|
return;
|
|
|
|
|
2016-11-17 15:40:38 +08:00
|
|
|
/* This data might be unmapped at this time */
|
|
|
|
data->host_cookie = COOKIE_UNMAPPED;
|
2012-02-06 15:55:07 +08:00
|
|
|
|
2016-11-17 15:40:38 +08:00
|
|
|
if (dw_mci_pre_dma_transfer(slot->host, mrq->data,
|
|
|
|
COOKIE_PRE_MAPPED) < 0)
|
|
|
|
data->host_cookie = COOKIE_UNMAPPED;
|
2012-02-06 15:55:07 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void dw_mci_post_req(struct mmc_host *mmc,
|
|
|
|
struct mmc_request *mrq,
|
|
|
|
int err)
|
|
|
|
{
|
|
|
|
struct dw_mci_slot *slot = mmc_priv(mmc);
|
|
|
|
struct mmc_data *data = mrq->data;
|
|
|
|
|
|
|
|
if (!slot->host->use_dma || !data)
|
|
|
|
return;
|
|
|
|
|
2016-11-17 15:40:38 +08:00
|
|
|
if (data->host_cookie != COOKIE_UNMAPPED)
|
2012-09-18 02:16:35 +08:00
|
|
|
dma_unmap_sg(slot->host->dev,
|
2012-02-06 15:55:07 +08:00
|
|
|
data->sg,
|
|
|
|
data->sg_len,
|
2017-03-27 02:45:56 +08:00
|
|
|
mmc_get_dma_dir(data));
|
2016-11-17 15:40:38 +08:00
|
|
|
data->host_cookie = COOKIE_UNMAPPED;
|
2012-02-06 15:55:07 +08:00
|
|
|
}
|
|
|
|
|
2017-02-17 10:56:42 +08:00
|
|
|
static int dw_mci_get_cd(struct mmc_host *mmc)
|
|
|
|
{
|
|
|
|
int present;
|
|
|
|
struct dw_mci_slot *slot = mmc_priv(mmc);
|
|
|
|
struct dw_mci *host = slot->host;
|
|
|
|
int gpio_cd = mmc_gpio_get_cd(mmc);
|
|
|
|
|
|
|
|
/* Use platform get_cd function, else try onboard card detect */
|
|
|
|
if (((mmc->caps & MMC_CAP_NEEDS_POLL)
|
|
|
|
|| !mmc_card_is_removable(mmc))) {
|
|
|
|
present = 1;
|
|
|
|
|
|
|
|
if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
|
|
|
|
if (mmc->caps & MMC_CAP_NEEDS_POLL) {
|
|
|
|
dev_info(&mmc->class_dev,
|
|
|
|
"card is polling.\n");
|
|
|
|
} else {
|
|
|
|
dev_info(&mmc->class_dev,
|
|
|
|
"card is non-removable.\n");
|
|
|
|
}
|
|
|
|
set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
return present;
|
|
|
|
} else if (gpio_cd >= 0)
|
|
|
|
present = gpio_cd;
|
|
|
|
else
|
|
|
|
present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
|
|
|
|
== 0 ? 1 : 0;
|
|
|
|
|
|
|
|
spin_lock_bh(&host->lock);
|
|
|
|
if (present && !test_and_set_bit(DW_MMC_CARD_PRESENT, &slot->flags))
|
|
|
|
dev_dbg(&mmc->class_dev, "card is present\n");
|
|
|
|
else if (!present &&
|
|
|
|
!test_and_clear_bit(DW_MMC_CARD_PRESENT, &slot->flags))
|
|
|
|
dev_dbg(&mmc->class_dev, "card is not present\n");
|
|
|
|
spin_unlock_bh(&host->lock);
|
|
|
|
|
|
|
|
return present;
|
|
|
|
}
|
|
|
|
|
2013-08-30 23:13:42 +08:00
|
|
|
static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
|
|
|
|
{
|
|
|
|
unsigned int blksz = data->blksz;
|
2017-09-03 21:39:50 +08:00
|
|
|
static const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
|
2013-08-30 23:13:42 +08:00
|
|
|
u32 fifo_width = 1 << host->data_shift;
|
|
|
|
u32 blksz_depth = blksz / fifo_width, fifoth_val;
|
|
|
|
u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
|
2015-08-03 15:07:21 +08:00
|
|
|
int idx = ARRAY_SIZE(mszs) - 1;
|
2013-08-30 23:13:42 +08:00
|
|
|
|
2015-09-16 14:41:23 +08:00
|
|
|
/* pio should ship this scenario */
|
|
|
|
if (!host->use_dma)
|
|
|
|
return;
|
|
|
|
|
2013-08-30 23:13:42 +08:00
|
|
|
tx_wmark = (host->fifo_depth) / 2;
|
|
|
|
tx_wmark_invers = host->fifo_depth - tx_wmark;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* MSIZE is '1',
|
|
|
|
* if blksz is not a multiple of the FIFO width
|
|
|
|
*/
|
2016-09-21 10:40:25 +08:00
|
|
|
if (blksz % fifo_width)
|
2013-08-30 23:13:42 +08:00
|
|
|
goto done;
|
|
|
|
|
|
|
|
do {
|
|
|
|
if (!((blksz_depth % mszs[idx]) ||
|
|
|
|
(tx_wmark_invers % mszs[idx]))) {
|
|
|
|
msize = idx;
|
|
|
|
rx_wmark = mszs[idx] - 1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} while (--idx > 0);
|
|
|
|
/*
|
|
|
|
* If idx is '0', it won't be tried
|
|
|
|
* Thus, initial values are uesed
|
|
|
|
*/
|
|
|
|
done:
|
|
|
|
fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
|
|
|
|
mci_writel(host, FIFOTH, fifoth_val);
|
|
|
|
}
|
|
|
|
|
2016-06-21 13:35:38 +08:00
|
|
|
static void dw_mci_ctrl_thld(struct dw_mci *host, struct mmc_data *data)
|
2013-08-30 23:13:55 +08:00
|
|
|
{
|
|
|
|
unsigned int blksz = data->blksz;
|
|
|
|
u32 blksz_depth, fifo_depth;
|
|
|
|
u16 thld_size;
|
2016-06-21 13:35:38 +08:00
|
|
|
u8 enable;
|
2013-08-30 23:13:55 +08:00
|
|
|
|
2014-11-18 01:49:05 +08:00
|
|
|
/*
|
|
|
|
* CDTHRCTL doesn't exist prior to 240A (in fact that register offset is
|
|
|
|
* in the FIFO region, so we really shouldn't access it).
|
|
|
|
*/
|
2016-06-21 13:35:38 +08:00
|
|
|
if (host->verid < DW_MMC_240A ||
|
|
|
|
(host->verid < DW_MMC_280A && data->flags & MMC_DATA_WRITE))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Card write Threshold is introduced since 2.80a
|
|
|
|
* It's used when HS400 mode is enabled.
|
|
|
|
*/
|
|
|
|
if (data->flags & MMC_DATA_WRITE &&
|
2018-07-03 15:06:27 +08:00
|
|
|
host->timing != MMC_TIMING_MMC_HS400)
|
|
|
|
goto disable;
|
2014-11-18 01:49:05 +08:00
|
|
|
|
2016-06-21 13:35:38 +08:00
|
|
|
if (data->flags & MMC_DATA_WRITE)
|
|
|
|
enable = SDMMC_CARD_WR_THR_EN;
|
|
|
|
else
|
|
|
|
enable = SDMMC_CARD_RD_THR_EN;
|
|
|
|
|
2013-08-30 23:13:55 +08:00
|
|
|
if (host->timing != MMC_TIMING_MMC_HS200 &&
|
2018-07-03 15:06:27 +08:00
|
|
|
host->timing != MMC_TIMING_UHS_SDR104 &&
|
|
|
|
host->timing != MMC_TIMING_MMC_HS400)
|
2013-08-30 23:13:55 +08:00
|
|
|
goto disable;
|
|
|
|
|
|
|
|
blksz_depth = blksz / (1 << host->data_shift);
|
|
|
|
fifo_depth = host->fifo_depth;
|
|
|
|
|
|
|
|
if (blksz_depth > fifo_depth)
|
|
|
|
goto disable;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
|
|
|
|
* If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
|
|
|
|
* Currently just choose blksz.
|
|
|
|
*/
|
|
|
|
thld_size = blksz;
|
2016-06-21 13:35:38 +08:00
|
|
|
mci_writel(host, CDTHRCTL, SDMMC_SET_THLD(thld_size, enable));
|
2013-08-30 23:13:55 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
disable:
|
2016-06-21 13:35:38 +08:00
|
|
|
mci_writel(host, CDTHRCTL, 0);
|
2013-08-30 23:13:55 +08:00
|
|
|
}
|
|
|
|
|
2012-02-06 15:55:07 +08:00
|
|
|
static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
|
|
|
|
{
|
2014-12-03 07:42:47 +08:00
|
|
|
unsigned long irqflags;
|
2012-02-06 15:55:07 +08:00
|
|
|
int sg_len;
|
|
|
|
u32 temp;
|
|
|
|
|
|
|
|
host->using_dma = 0;
|
|
|
|
|
|
|
|
/* If we don't have a channel, we can't do DMA */
|
|
|
|
if (!host->use_dma)
|
|
|
|
return -ENODEV;
|
|
|
|
|
2016-11-17 15:40:38 +08:00
|
|
|
sg_len = dw_mci_pre_dma_transfer(host, data, COOKIE_MAPPED);
|
2012-04-10 08:53:32 +08:00
|
|
|
if (sg_len < 0) {
|
|
|
|
host->dma_ops->stop(host);
|
2012-02-06 15:55:07 +08:00
|
|
|
return sg_len;
|
2012-04-10 08:53:32 +08:00
|
|
|
}
|
2012-02-06 15:55:07 +08:00
|
|
|
|
|
|
|
host->using_dma = 1;
|
2011-01-02 14:11:59 +08:00
|
|
|
|
2015-09-16 14:41:23 +08:00
|
|
|
if (host->use_dma == TRANS_MODE_IDMAC)
|
|
|
|
dev_vdbg(host->dev,
|
|
|
|
"sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
|
|
|
|
(unsigned long)host->sg_cpu,
|
|
|
|
(unsigned long)host->sg_dma,
|
|
|
|
sg_len);
|
2011-01-02 14:11:59 +08:00
|
|
|
|
2013-08-30 23:13:42 +08:00
|
|
|
/*
|
|
|
|
* Decide the MSIZE and RX/TX Watermark.
|
|
|
|
* If current block size is same with previous size,
|
|
|
|
* no need to update fifoth.
|
|
|
|
*/
|
|
|
|
if (host->prev_blksz != data->blksz)
|
|
|
|
dw_mci_adjust_fifoth(host, data);
|
|
|
|
|
2011-01-02 14:11:59 +08:00
|
|
|
/* Enable the DMA interface */
|
|
|
|
temp = mci_readl(host, CTRL);
|
|
|
|
temp |= SDMMC_CTRL_DMA_ENABLE;
|
|
|
|
mci_writel(host, CTRL, temp);
|
|
|
|
|
|
|
|
/* Disable RX/TX IRQs, let DMA handle it */
|
2014-12-03 07:42:47 +08:00
|
|
|
spin_lock_irqsave(&host->irq_lock, irqflags);
|
2011-01-02 14:11:59 +08:00
|
|
|
temp = mci_readl(host, INTMASK);
|
|
|
|
temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
|
|
|
|
mci_writel(host, INTMASK, temp);
|
2014-12-03 07:42:47 +08:00
|
|
|
spin_unlock_irqrestore(&host->irq_lock, irqflags);
|
2011-01-02 14:11:59 +08:00
|
|
|
|
2015-09-16 14:41:23 +08:00
|
|
|
if (host->dma_ops->start(host, sg_len)) {
|
2016-11-21 09:51:48 +08:00
|
|
|
host->dma_ops->stop(host);
|
2016-09-02 12:14:38 +08:00
|
|
|
/* We can't do DMA, try PIO for this one */
|
|
|
|
dev_dbg(host->dev,
|
|
|
|
"%s: fall back to PIO mode for current transfer\n",
|
|
|
|
__func__);
|
2015-09-16 14:41:23 +08:00
|
|
|
return -ENODEV;
|
|
|
|
}
|
2011-01-02 14:11:59 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
|
|
|
|
{
|
2014-12-03 07:42:47 +08:00
|
|
|
unsigned long irqflags;
|
2015-08-03 15:07:21 +08:00
|
|
|
int flags = SG_MITER_ATOMIC;
|
2011-01-02 14:11:59 +08:00
|
|
|
u32 temp;
|
|
|
|
|
|
|
|
data->error = -EINPROGRESS;
|
|
|
|
|
|
|
|
WARN_ON(host->data);
|
|
|
|
host->sg = NULL;
|
|
|
|
host->data = data;
|
|
|
|
|
2016-06-21 13:35:38 +08:00
|
|
|
if (data->flags & MMC_DATA_READ)
|
2011-06-29 16:29:58 +08:00
|
|
|
host->dir_status = DW_MCI_RECV_STATUS;
|
2016-06-21 13:35:38 +08:00
|
|
|
else
|
2011-06-29 16:29:58 +08:00
|
|
|
host->dir_status = DW_MCI_SEND_STATUS;
|
2016-06-21 13:35:38 +08:00
|
|
|
|
|
|
|
dw_mci_ctrl_thld(host, data);
|
2011-06-29 16:29:58 +08:00
|
|
|
|
2011-01-02 14:11:59 +08:00
|
|
|
if (dw_mci_submit_data_dma(host, data)) {
|
2012-02-09 13:32:43 +08:00
|
|
|
if (host->data->flags & MMC_DATA_READ)
|
|
|
|
flags |= SG_MITER_TO_SG;
|
|
|
|
else
|
|
|
|
flags |= SG_MITER_FROM_SG;
|
|
|
|
|
|
|
|
sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
|
2011-01-02 14:11:59 +08:00
|
|
|
host->sg = data->sg;
|
2011-06-24 20:57:56 +08:00
|
|
|
host->part_buf_start = 0;
|
|
|
|
host->part_buf_count = 0;
|
2011-01-02 14:11:59 +08:00
|
|
|
|
2011-06-24 20:54:06 +08:00
|
|
|
mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
|
2014-12-03 07:42:47 +08:00
|
|
|
|
|
|
|
spin_lock_irqsave(&host->irq_lock, irqflags);
|
2011-01-02 14:11:59 +08:00
|
|
|
temp = mci_readl(host, INTMASK);
|
|
|
|
temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
|
|
|
|
mci_writel(host, INTMASK, temp);
|
2014-12-03 07:42:47 +08:00
|
|
|
spin_unlock_irqrestore(&host->irq_lock, irqflags);
|
2011-01-02 14:11:59 +08:00
|
|
|
|
|
|
|
temp = mci_readl(host, CTRL);
|
|
|
|
temp &= ~SDMMC_CTRL_DMA_ENABLE;
|
|
|
|
mci_writel(host, CTRL, temp);
|
2013-08-30 23:13:42 +08:00
|
|
|
|
|
|
|
/*
|
2017-01-11 14:37:26 +08:00
|
|
|
* Use the initial fifoth_val for PIO mode. If wm_algined
|
|
|
|
* is set, we set watermark same as data size.
|
2013-08-30 23:13:42 +08:00
|
|
|
* If next issued data may be transfered by DMA mode,
|
|
|
|
* prev_blksz should be invalidated.
|
|
|
|
*/
|
2017-01-11 14:37:26 +08:00
|
|
|
if (host->wm_aligned)
|
|
|
|
dw_mci_adjust_fifoth(host, data);
|
|
|
|
else
|
|
|
|
mci_writel(host, FIFOTH, host->fifoth_val);
|
2013-08-30 23:13:42 +08:00
|
|
|
host->prev_blksz = 0;
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* Keep the current block size.
|
|
|
|
* It will be used to decide whether to update
|
|
|
|
* fifoth register next time.
|
|
|
|
*/
|
|
|
|
host->prev_blksz = data->blksz;
|
2011-01-02 14:11:59 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-11-19 12:56:21 +08:00
|
|
|
static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
|
2011-01-02 14:11:59 +08:00
|
|
|
{
|
|
|
|
struct dw_mci *host = slot->host;
|
2013-08-30 23:11:43 +08:00
|
|
|
unsigned int clock = slot->clock;
|
2011-01-02 14:11:59 +08:00
|
|
|
u32 div;
|
2012-07-25 23:33:17 +08:00
|
|
|
u32 clk_en_a;
|
2014-08-22 21:47:51 +08:00
|
|
|
u32 sdmmc_cmd_bits = SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT;
|
|
|
|
|
|
|
|
/* We must continue to set bit 28 in CMD until the change is complete */
|
|
|
|
if (host->state == STATE_WAITING_CMD11_DONE)
|
|
|
|
sdmmc_cmd_bits |= SDMMC_CMD_VOLT_SWITCH;
|
2011-01-02 14:11:59 +08:00
|
|
|
|
2018-03-26 17:26:25 +08:00
|
|
|
slot->mmc->actual_clock = 0;
|
|
|
|
|
2013-08-30 23:11:43 +08:00
|
|
|
if (!clock) {
|
|
|
|
mci_writel(host, CLKENA, 0);
|
2014-08-22 21:47:51 +08:00
|
|
|
mci_send_cmd(slot, sdmmc_cmd_bits, 0);
|
2013-08-30 23:11:43 +08:00
|
|
|
} else if (clock != host->current_speed || force_clkinit) {
|
|
|
|
div = host->bus_hz / clock;
|
|
|
|
if (host->bus_hz % clock && host->bus_hz > clock)
|
2011-01-02 14:11:59 +08:00
|
|
|
/*
|
|
|
|
* move the + 1 after the divide to prevent
|
|
|
|
* over-clocking the card.
|
|
|
|
*/
|
2012-05-22 12:01:21 +08:00
|
|
|
div += 1;
|
|
|
|
|
2013-08-30 23:11:43 +08:00
|
|
|
div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
|
2011-01-02 14:11:59 +08:00
|
|
|
|
2016-11-24 19:04:42 +08:00
|
|
|
if ((clock != slot->__clk_old &&
|
|
|
|
!test_bit(DW_MMC_CARD_NEEDS_POLL, &slot->flags)) ||
|
|
|
|
force_clkinit) {
|
2017-01-17 09:22:55 +08:00
|
|
|
/* Silent the verbose log if calling from PM context */
|
|
|
|
if (!force_clkinit)
|
|
|
|
dev_info(&slot->mmc->class_dev,
|
|
|
|
"Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
|
|
|
|
slot->id, host->bus_hz, clock,
|
|
|
|
div ? ((host->bus_hz / div) >> 1) :
|
|
|
|
host->bus_hz, div);
|
2011-01-02 14:11:59 +08:00
|
|
|
|
2016-11-24 19:04:42 +08:00
|
|
|
/*
|
|
|
|
* If card is polling, display the message only
|
|
|
|
* one time at boot time.
|
|
|
|
*/
|
|
|
|
if (slot->mmc->caps & MMC_CAP_NEEDS_POLL &&
|
|
|
|
slot->mmc->f_min == clock)
|
|
|
|
set_bit(DW_MMC_CARD_NEEDS_POLL, &slot->flags);
|
|
|
|
}
|
|
|
|
|
2011-01-02 14:11:59 +08:00
|
|
|
/* disable clock */
|
|
|
|
mci_writel(host, CLKENA, 0);
|
|
|
|
mci_writel(host, CLKSRC, 0);
|
|
|
|
|
|
|
|
/* inform CIU */
|
2014-08-22 21:47:51 +08:00
|
|
|
mci_send_cmd(slot, sdmmc_cmd_bits, 0);
|
2011-01-02 14:11:59 +08:00
|
|
|
|
|
|
|
/* set clock to desired speed */
|
|
|
|
mci_writel(host, CLKDIV, div);
|
|
|
|
|
|
|
|
/* inform CIU */
|
2014-08-22 21:47:51 +08:00
|
|
|
mci_send_cmd(slot, sdmmc_cmd_bits, 0);
|
2011-01-02 14:11:59 +08:00
|
|
|
|
2012-07-25 23:33:17 +08:00
|
|
|
/* enable clock; only low power if no SDIO */
|
|
|
|
clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
|
2014-12-03 07:42:46 +08:00
|
|
|
if (!test_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags))
|
2012-07-25 23:33:17 +08:00
|
|
|
clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
|
|
|
|
mci_writel(host, CLKENA, clk_en_a);
|
2011-01-02 14:11:59 +08:00
|
|
|
|
|
|
|
/* inform CIU */
|
2014-08-22 21:47:51 +08:00
|
|
|
mci_send_cmd(slot, sdmmc_cmd_bits, 0);
|
2016-09-22 13:12:00 +08:00
|
|
|
|
|
|
|
/* keep the last clock value that was requested from core */
|
|
|
|
slot->__clk_old = clock;
|
2018-03-26 17:26:25 +08:00
|
|
|
slot->mmc->actual_clock = div ? ((host->bus_hz / div) >> 1) :
|
|
|
|
host->bus_hz;
|
2011-01-02 14:11:59 +08:00
|
|
|
}
|
|
|
|
|
2013-08-30 23:11:43 +08:00
|
|
|
host->current_speed = clock;
|
|
|
|
|
2011-01-02 14:11:59 +08:00
|
|
|
/* Set the current slot bus width */
|
2011-06-20 16:23:53 +08:00
|
|
|
mci_writel(host, CTYPE, (slot->ctype << slot->id));
|
2011-01-02 14:11:59 +08:00
|
|
|
}
|
|
|
|
|
2011-12-22 17:01:29 +08:00
|
|
|
static void __dw_mci_start_request(struct dw_mci *host,
|
|
|
|
struct dw_mci_slot *slot,
|
|
|
|
struct mmc_command *cmd)
|
2011-01-02 14:11:59 +08:00
|
|
|
{
|
|
|
|
struct mmc_request *mrq;
|
|
|
|
struct mmc_data *data;
|
|
|
|
u32 cmdflags;
|
|
|
|
|
|
|
|
mrq = slot->mrq;
|
|
|
|
|
|
|
|
host->mrq = mrq;
|
|
|
|
|
|
|
|
host->pending_events = 0;
|
|
|
|
host->completed_events = 0;
|
2013-08-30 23:14:17 +08:00
|
|
|
host->cmd_status = 0;
|
2011-01-02 14:11:59 +08:00
|
|
|
host->data_status = 0;
|
2013-08-30 23:14:17 +08:00
|
|
|
host->dir_status = 0;
|
2011-01-02 14:11:59 +08:00
|
|
|
|
2011-12-22 17:01:29 +08:00
|
|
|
data = cmd->data;
|
2011-01-02 14:11:59 +08:00
|
|
|
if (data) {
|
2014-03-03 10:36:45 +08:00
|
|
|
mci_writel(host, TMOUT, 0xFFFFFFFF);
|
2011-01-02 14:11:59 +08:00
|
|
|
mci_writel(host, BYTCNT, data->blksz*data->blocks);
|
|
|
|
mci_writel(host, BLKSIZ, data->blksz);
|
|
|
|
}
|
|
|
|
|
|
|
|
cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
|
|
|
|
|
|
|
|
/* this is the first command, send the initialization clock */
|
|
|
|
if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
|
|
|
|
cmdflags |= SDMMC_CMD_INIT;
|
|
|
|
|
|
|
|
if (data) {
|
|
|
|
dw_mci_submit_data(host, data);
|
2015-08-03 15:07:21 +08:00
|
|
|
wmb(); /* drain writebuffer */
|
2011-01-02 14:11:59 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
dw_mci_start_command(host, cmd, cmdflags);
|
|
|
|
|
2015-03-10 07:18:21 +08:00
|
|
|
if (cmd->opcode == SD_SWITCH_VOLTAGE) {
|
2015-04-04 02:13:07 +08:00
|
|
|
unsigned long irqflags;
|
|
|
|
|
2015-03-10 07:18:21 +08:00
|
|
|
/*
|
2015-04-04 02:13:05 +08:00
|
|
|
* Databook says to fail after 2ms w/ no response, but evidence
|
|
|
|
* shows that sometimes the cmd11 interrupt takes over 130ms.
|
|
|
|
* We'll set to 500ms, plus an extra jiffy just in case jiffies
|
|
|
|
* is just about to roll over.
|
2015-04-04 02:13:07 +08:00
|
|
|
*
|
|
|
|
* We do this whole thing under spinlock and only if the
|
|
|
|
* command hasn't already completed (indicating the the irq
|
|
|
|
* already ran so we don't want the timeout).
|
2015-03-10 07:18:21 +08:00
|
|
|
*/
|
2015-04-04 02:13:07 +08:00
|
|
|
spin_lock_irqsave(&host->irq_lock, irqflags);
|
|
|
|
if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events))
|
|
|
|
mod_timer(&host->cmd11_timer,
|
|
|
|
jiffies + msecs_to_jiffies(500) + 1);
|
|
|
|
spin_unlock_irqrestore(&host->irq_lock, irqflags);
|
2015-03-10 07:18:21 +08:00
|
|
|
}
|
|
|
|
|
2016-11-17 15:40:37 +08:00
|
|
|
host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd);
|
2011-01-02 14:11:59 +08:00
|
|
|
}
|
|
|
|
|
2011-12-22 17:01:29 +08:00
|
|
|
static void dw_mci_start_request(struct dw_mci *host,
|
|
|
|
struct dw_mci_slot *slot)
|
|
|
|
{
|
|
|
|
struct mmc_request *mrq = slot->mrq;
|
|
|
|
struct mmc_command *cmd;
|
|
|
|
|
|
|
|
cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
|
|
|
|
__dw_mci_start_request(host, slot, cmd);
|
|
|
|
}
|
|
|
|
|
2011-06-24 20:55:10 +08:00
|
|
|
/* must be called with host->lock held */
|
2011-01-02 14:11:59 +08:00
|
|
|
static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
|
|
|
|
struct mmc_request *mrq)
|
|
|
|
{
|
|
|
|
dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
|
|
|
|
host->state);
|
|
|
|
|
|
|
|
slot->mrq = mrq;
|
|
|
|
|
2014-08-22 21:47:51 +08:00
|
|
|
if (host->state == STATE_WAITING_CMD11_DONE) {
|
|
|
|
dev_warn(&slot->mmc->class_dev,
|
|
|
|
"Voltage change didn't complete\n");
|
|
|
|
/*
|
|
|
|
* this case isn't expected to happen, so we can
|
|
|
|
* either crash here or just try to continue on
|
|
|
|
* in the closest possible state
|
|
|
|
*/
|
|
|
|
host->state = STATE_IDLE;
|
|
|
|
}
|
|
|
|
|
2011-01-02 14:11:59 +08:00
|
|
|
if (host->state == STATE_IDLE) {
|
|
|
|
host->state = STATE_SENDING_CMD;
|
|
|
|
dw_mci_start_request(host, slot);
|
|
|
|
} else {
|
|
|
|
list_add_tail(&slot->queue_node, &host->queue);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
|
|
|
|
{
|
|
|
|
struct dw_mci_slot *slot = mmc_priv(mmc);
|
|
|
|
struct dw_mci *host = slot->host;
|
|
|
|
|
|
|
|
WARN_ON(slot->mrq);
|
|
|
|
|
2011-06-24 20:55:10 +08:00
|
|
|
/*
|
|
|
|
* The check for card presence and queueing of the request must be
|
|
|
|
* atomic, otherwise the card could be removed in between and the
|
|
|
|
* request wouldn't fail until another card was inserted.
|
|
|
|
*/
|
|
|
|
|
2016-05-27 14:37:05 +08:00
|
|
|
if (!dw_mci_get_cd(mmc)) {
|
2011-01-02 14:11:59 +08:00
|
|
|
mrq->cmd->error = -ENOMEDIUM;
|
|
|
|
mmc_request_done(mmc, mrq);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-05-27 14:37:05 +08:00
|
|
|
spin_lock_bh(&host->lock);
|
|
|
|
|
2011-01-02 14:11:59 +08:00
|
|
|
dw_mci_queue_request(host, slot, mrq);
|
2011-06-24 20:55:10 +08:00
|
|
|
|
|
|
|
spin_unlock_bh(&host->lock);
|
2011-01-02 14:11:59 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
|
|
|
|
{
|
|
|
|
struct dw_mci_slot *slot = mmc_priv(mmc);
|
2012-11-08 22:26:11 +08:00
|
|
|
const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
|
2011-02-24 12:46:11 +08:00
|
|
|
u32 regs;
|
2014-08-22 21:47:50 +08:00
|
|
|
int ret;
|
2011-01-02 14:11:59 +08:00
|
|
|
|
|
|
|
switch (ios->bus_width) {
|
|
|
|
case MMC_BUS_WIDTH_4:
|
|
|
|
slot->ctype = SDMMC_CTYPE_4BIT;
|
|
|
|
break;
|
2011-02-17 15:12:38 +08:00
|
|
|
case MMC_BUS_WIDTH_8:
|
|
|
|
slot->ctype = SDMMC_CTYPE_8BIT;
|
|
|
|
break;
|
2012-11-08 16:35:31 +08:00
|
|
|
default:
|
|
|
|
/* set default 1 bit mode */
|
|
|
|
slot->ctype = SDMMC_CTYPE_1BIT;
|
2011-01-02 14:11:59 +08:00
|
|
|
}
|
|
|
|
|
2012-01-02 15:00:02 +08:00
|
|
|
regs = mci_readl(slot->host, UHS_REG);
|
|
|
|
|
2011-02-24 12:46:11 +08:00
|
|
|
/* DDR mode set */
|
2015-01-29 10:41:57 +08:00
|
|
|
if (ios->timing == MMC_TIMING_MMC_DDR52 ||
|
2015-10-21 18:49:42 +08:00
|
|
|
ios->timing == MMC_TIMING_UHS_DDR50 ||
|
2015-01-29 10:41:57 +08:00
|
|
|
ios->timing == MMC_TIMING_MMC_HS400)
|
2013-02-22 08:32:46 +08:00
|
|
|
regs |= ((0x1 << slot->id) << 16);
|
2012-01-02 15:00:02 +08:00
|
|
|
else
|
2013-02-22 08:32:46 +08:00
|
|
|
regs &= ~((0x1 << slot->id) << 16);
|
2012-01-02 15:00:02 +08:00
|
|
|
|
|
|
|
mci_writel(slot->host, UHS_REG, regs);
|
2013-08-30 23:13:55 +08:00
|
|
|
slot->host->timing = ios->timing;
|
2011-02-24 12:46:11 +08:00
|
|
|
|
2013-08-30 23:11:43 +08:00
|
|
|
/*
|
|
|
|
* Use mirror of ios->clock to prevent race with mmc
|
|
|
|
* core ios update when finding the minimum.
|
|
|
|
*/
|
|
|
|
slot->clock = ios->clock;
|
2011-01-02 14:11:59 +08:00
|
|
|
|
2012-10-16 16:43:08 +08:00
|
|
|
if (drv_data && drv_data->set_ios)
|
|
|
|
drv_data->set_ios(slot->host, ios);
|
2012-09-18 02:16:42 +08:00
|
|
|
|
2011-01-02 14:11:59 +08:00
|
|
|
switch (ios->power_mode) {
|
|
|
|
case MMC_POWER_UP:
|
2014-08-22 21:47:50 +08:00
|
|
|
if (!IS_ERR(mmc->supply.vmmc)) {
|
|
|
|
ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
|
|
|
|
ios->vdd);
|
|
|
|
if (ret) {
|
|
|
|
dev_err(slot->host->dev,
|
|
|
|
"failed to enable vmmc regulator\n");
|
|
|
|
/*return, if failed turn on vmmc*/
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2015-01-14 07:58:44 +08:00
|
|
|
set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
|
|
|
|
regs = mci_readl(slot->host, PWREN);
|
|
|
|
regs |= (1 << slot->id);
|
|
|
|
mci_writel(slot->host, PWREN, regs);
|
|
|
|
break;
|
|
|
|
case MMC_POWER_ON:
|
2015-02-21 02:57:19 +08:00
|
|
|
if (!slot->host->vqmmc_enabled) {
|
|
|
|
if (!IS_ERR(mmc->supply.vqmmc)) {
|
|
|
|
ret = regulator_enable(mmc->supply.vqmmc);
|
|
|
|
if (ret < 0)
|
|
|
|
dev_err(slot->host->dev,
|
|
|
|
"failed to enable vqmmc\n");
|
|
|
|
else
|
|
|
|
slot->host->vqmmc_enabled = true;
|
|
|
|
|
|
|
|
} else {
|
|
|
|
/* Keep track so we don't reset again */
|
2014-08-22 21:47:50 +08:00
|
|
|
slot->host->vqmmc_enabled = true;
|
2015-02-21 02:57:19 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Reset our state machine after powering on */
|
|
|
|
dw_mci_ctrl_reset(slot->host,
|
|
|
|
SDMMC_CTRL_ALL_RESET_FLAGS);
|
2014-08-22 21:47:50 +08:00
|
|
|
}
|
2015-02-21 02:57:18 +08:00
|
|
|
|
|
|
|
/* Adjust clock / bus width after power is up */
|
|
|
|
dw_mci_setup_bus(slot, false);
|
|
|
|
|
2013-03-12 18:43:32 +08:00
|
|
|
break;
|
|
|
|
case MMC_POWER_OFF:
|
2015-02-21 02:57:18 +08:00
|
|
|
/* Turn clock off before power goes down */
|
|
|
|
dw_mci_setup_bus(slot, false);
|
|
|
|
|
2014-08-22 21:47:50 +08:00
|
|
|
if (!IS_ERR(mmc->supply.vmmc))
|
|
|
|
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
|
|
|
|
|
2015-02-21 02:57:19 +08:00
|
|
|
if (!IS_ERR(mmc->supply.vqmmc) && slot->host->vqmmc_enabled)
|
2014-08-22 21:47:50 +08:00
|
|
|
regulator_disable(mmc->supply.vqmmc);
|
2015-02-21 02:57:19 +08:00
|
|
|
slot->host->vqmmc_enabled = false;
|
2014-08-22 21:47:50 +08:00
|
|
|
|
2013-03-26 20:36:14 +08:00
|
|
|
regs = mci_readl(slot->host, PWREN);
|
|
|
|
regs &= ~(1 << slot->id);
|
|
|
|
mci_writel(slot->host, PWREN, regs);
|
2011-01-02 14:11:59 +08:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
2015-02-21 02:57:18 +08:00
|
|
|
|
|
|
|
if (slot->host->state == STATE_WAITING_CMD11_DONE && ios->clock != 0)
|
|
|
|
slot->host->state = STATE_IDLE;
|
2011-01-02 14:11:59 +08:00
|
|
|
}
|
|
|
|
|
2014-08-22 21:47:51 +08:00
|
|
|
static int dw_mci_card_busy(struct mmc_host *mmc)
|
|
|
|
{
|
|
|
|
struct dw_mci_slot *slot = mmc_priv(mmc);
|
|
|
|
u32 status;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check the busy bit which is low when DAT[3:0]
|
|
|
|
* (the data lines) are 0000
|
|
|
|
*/
|
|
|
|
status = mci_readl(slot->host, STATUS);
|
|
|
|
|
|
|
|
return !!(status & SDMMC_STATUS_BUSY);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int dw_mci_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
|
|
|
|
{
|
|
|
|
struct dw_mci_slot *slot = mmc_priv(mmc);
|
|
|
|
struct dw_mci *host = slot->host;
|
2015-05-14 16:45:18 +08:00
|
|
|
const struct dw_mci_drv_data *drv_data = host->drv_data;
|
2014-08-22 21:47:51 +08:00
|
|
|
u32 uhs;
|
|
|
|
u32 v18 = SDMMC_UHS_18V << slot->id;
|
|
|
|
int ret;
|
|
|
|
|
2015-05-14 16:45:18 +08:00
|
|
|
if (drv_data && drv_data->switch_voltage)
|
|
|
|
return drv_data->switch_voltage(mmc, ios);
|
|
|
|
|
2014-08-22 21:47:51 +08:00
|
|
|
/*
|
|
|
|
* Program the voltage. Note that some instances of dw_mmc may use
|
|
|
|
* the UHS_REG for this. For other instances (like exynos) the UHS_REG
|
|
|
|
* does no harm but you need to set the regulator directly. Try both.
|
|
|
|
*/
|
|
|
|
uhs = mci_readl(host, UHS_REG);
|
2015-10-12 20:48:26 +08:00
|
|
|
if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330)
|
2014-08-22 21:47:51 +08:00
|
|
|
uhs &= ~v18;
|
2015-10-12 20:48:26 +08:00
|
|
|
else
|
2014-08-22 21:47:51 +08:00
|
|
|
uhs |= v18;
|
2015-10-12 20:48:26 +08:00
|
|
|
|
2014-08-22 21:47:51 +08:00
|
|
|
if (!IS_ERR(mmc->supply.vqmmc)) {
|
2015-10-12 20:48:26 +08:00
|
|
|
ret = mmc_regulator_set_vqmmc(mmc, ios);
|
2014-08-22 21:47:51 +08:00
|
|
|
|
|
|
|
if (ret) {
|
2014-10-11 12:16:16 +08:00
|
|
|
dev_dbg(&mmc->class_dev,
|
2015-10-12 20:48:26 +08:00
|
|
|
"Regulator set error %d - %s V\n",
|
|
|
|
ret, uhs & v18 ? "1.8" : "3.3");
|
2014-08-22 21:47:51 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
mci_writel(host, UHS_REG, uhs);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-01-02 14:11:59 +08:00
|
|
|
static int dw_mci_get_ro(struct mmc_host *mmc)
|
|
|
|
{
|
|
|
|
int read_only;
|
|
|
|
struct dw_mci_slot *slot = mmc_priv(mmc);
|
2014-03-03 10:36:46 +08:00
|
|
|
int gpio_ro = mmc_gpio_get_ro(mmc);
|
2011-01-02 14:11:59 +08:00
|
|
|
|
|
|
|
/* Use platform get_ro function, else try on board write protect */
|
remove lots of IS_ERR_VALUE abuses
Most users of IS_ERR_VALUE() in the kernel are wrong, as they
pass an 'int' into a function that takes an 'unsigned long'
argument. This happens to work because the type is sign-extended
on 64-bit architectures before it gets converted into an
unsigned type.
However, anything that passes an 'unsigned short' or 'unsigned int'
argument into IS_ERR_VALUE() is guaranteed to be broken, as are
8-bit integers and types that are wider than 'unsigned long'.
Andrzej Hajda has already fixed a lot of the worst abusers that
were causing actual bugs, but it would be nice to prevent any
users that are not passing 'unsigned long' arguments.
This patch changes all users of IS_ERR_VALUE() that I could find
on 32-bit ARM randconfig builds and x86 allmodconfig. For the
moment, this doesn't change the definition of IS_ERR_VALUE()
because there are probably still architecture specific users
elsewhere.
Almost all the warnings I got are for files that are better off
using 'if (err)' or 'if (err < 0)'.
The only legitimate user I could find that we get a warning for
is the (32-bit only) freescale fman driver, so I did not remove
the IS_ERR_VALUE() there but changed the type to 'unsigned long'.
For 9pfs, I just worked around one user whose calling conventions
are so obscure that I did not dare change the behavior.
I was using this definition for testing:
#define IS_ERR_VALUE(x) ((unsigned long*)NULL == (typeof (x)*)NULL && \
unlikely((unsigned long long)(x) >= (unsigned long long)(typeof(x))-MAX_ERRNO))
which ends up making all 16-bit or wider types work correctly with
the most plausible interpretation of what IS_ERR_VALUE() was supposed
to return according to its users, but also causes a compile-time
warning for any users that do not pass an 'unsigned long' argument.
I suggested this approach earlier this year, but back then we ended
up deciding to just fix the users that are obviously broken. After
the initial warning that caused me to get involved in the discussion
(fs/gfs2/dir.c) showed up again in the mainline kernel, Linus
asked me to send the whole thing again.
[ Updated the 9p parts as per Al Viro - Linus ]
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Cc: Andrzej Hajda <a.hajda@samsung.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Link: https://lkml.org/lkml/2016/1/7/363
Link: https://lkml.org/lkml/2016/5/27/486
Acked-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> # For nvmem part
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-05-28 05:23:25 +08:00
|
|
|
if (gpio_ro >= 0)
|
2014-03-03 10:36:46 +08:00
|
|
|
read_only = gpio_ro;
|
2011-01-02 14:11:59 +08:00
|
|
|
else
|
|
|
|
read_only =
|
|
|
|
mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
|
|
|
|
|
|
|
|
dev_dbg(&mmc->class_dev, "card is %s\n",
|
|
|
|
read_only ? "read-only" : "read-write");
|
|
|
|
|
|
|
|
return read_only;
|
|
|
|
}
|
|
|
|
|
2016-01-14 09:08:02 +08:00
|
|
|
static void dw_mci_hw_reset(struct mmc_host *mmc)
|
|
|
|
{
|
|
|
|
struct dw_mci_slot *slot = mmc_priv(mmc);
|
|
|
|
struct dw_mci *host = slot->host;
|
|
|
|
int reset;
|
|
|
|
|
|
|
|
if (host->use_dma == TRANS_MODE_IDMAC)
|
|
|
|
dw_mci_idmac_reset(host);
|
|
|
|
|
|
|
|
if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET |
|
|
|
|
SDMMC_CTRL_FIFO_RESET))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* According to eMMC spec, card reset procedure:
|
|
|
|
* tRstW >= 1us: RST_n pulse width
|
|
|
|
* tRSCA >= 200us: RST_n to Command time
|
|
|
|
* tRSTH >= 1us: RST_n high period
|
|
|
|
*/
|
|
|
|
reset = mci_readl(host, RST_N);
|
|
|
|
reset &= ~(SDMMC_RST_HWACTIVE << slot->id);
|
|
|
|
mci_writel(host, RST_N, reset);
|
|
|
|
usleep_range(1, 2);
|
|
|
|
reset |= SDMMC_RST_HWACTIVE << slot->id;
|
|
|
|
mci_writel(host, RST_N, reset);
|
|
|
|
usleep_range(200, 300);
|
|
|
|
}
|
|
|
|
|
2014-12-03 07:42:46 +08:00
|
|
|
static void dw_mci_init_card(struct mmc_host *mmc, struct mmc_card *card)
|
2012-07-25 23:33:17 +08:00
|
|
|
{
|
2014-12-03 07:42:46 +08:00
|
|
|
struct dw_mci_slot *slot = mmc_priv(mmc);
|
2012-07-25 23:33:17 +08:00
|
|
|
struct dw_mci *host = slot->host;
|
|
|
|
|
2014-12-03 07:42:46 +08:00
|
|
|
/*
|
|
|
|
* Low power mode will stop the card clock when idle. According to the
|
|
|
|
* description of the CLKENA register we should disable low power mode
|
|
|
|
* for SDIO cards if we need SDIO interrupts to work.
|
|
|
|
*/
|
|
|
|
if (mmc->caps & MMC_CAP_SDIO_IRQ) {
|
|
|
|
const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
|
|
|
|
u32 clk_en_a_old;
|
|
|
|
u32 clk_en_a;
|
2012-07-25 23:33:17 +08:00
|
|
|
|
2014-12-03 07:42:46 +08:00
|
|
|
clk_en_a_old = mci_readl(host, CLKENA);
|
|
|
|
|
|
|
|
if (card->type == MMC_TYPE_SDIO ||
|
|
|
|
card->type == MMC_TYPE_SD_COMBO) {
|
2017-04-20 04:41:43 +08:00
|
|
|
set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
|
2014-12-03 07:42:46 +08:00
|
|
|
clk_en_a = clk_en_a_old & ~clken_low_pwr;
|
|
|
|
} else {
|
2017-04-20 04:41:43 +08:00
|
|
|
clear_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
|
2014-12-03 07:42:46 +08:00
|
|
|
clk_en_a = clk_en_a_old | clken_low_pwr;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (clk_en_a != clk_en_a_old) {
|
|
|
|
mci_writel(host, CLKENA, clk_en_a);
|
|
|
|
mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
|
|
|
|
SDMMC_CMD_PRV_DAT_WAIT, 0);
|
|
|
|
}
|
2012-07-25 23:33:17 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-18 19:29:20 +08:00
|
|
|
static void __dw_mci_enable_sdio_irq(struct dw_mci_slot *slot, int enb)
|
2011-08-29 15:41:46 +08:00
|
|
|
{
|
|
|
|
struct dw_mci *host = slot->host;
|
2014-12-03 07:42:47 +08:00
|
|
|
unsigned long irqflags;
|
2011-08-29 15:41:46 +08:00
|
|
|
u32 int_mask;
|
|
|
|
|
2014-12-03 07:42:47 +08:00
|
|
|
spin_lock_irqsave(&host->irq_lock, irqflags);
|
|
|
|
|
2011-08-29 15:41:46 +08:00
|
|
|
/* Enable/disable Slot Specific SDIO interrupt */
|
|
|
|
int_mask = mci_readl(host, INTMASK);
|
2014-12-03 07:42:46 +08:00
|
|
|
if (enb)
|
|
|
|
int_mask |= SDMMC_INT_SDIO(slot->sdio_id);
|
|
|
|
else
|
|
|
|
int_mask &= ~SDMMC_INT_SDIO(slot->sdio_id);
|
|
|
|
mci_writel(host, INTMASK, int_mask);
|
2014-12-03 07:42:47 +08:00
|
|
|
|
|
|
|
spin_unlock_irqrestore(&host->irq_lock, irqflags);
|
2011-08-29 15:41:46 +08:00
|
|
|
}
|
|
|
|
|
2017-04-18 19:29:20 +08:00
|
|
|
static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
|
|
|
|
{
|
|
|
|
struct dw_mci_slot *slot = mmc_priv(mmc);
|
2017-04-18 19:37:32 +08:00
|
|
|
struct dw_mci *host = slot->host;
|
2017-04-18 19:29:20 +08:00
|
|
|
|
|
|
|
__dw_mci_enable_sdio_irq(slot, enb);
|
2017-04-18 19:37:32 +08:00
|
|
|
|
|
|
|
/* Avoid runtime suspending the device when SDIO IRQ is enabled */
|
|
|
|
if (enb)
|
|
|
|
pm_runtime_get_noresume(host->dev);
|
|
|
|
else
|
|
|
|
pm_runtime_put_noidle(host->dev);
|
2017-04-18 19:29:20 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void dw_mci_ack_sdio_irq(struct mmc_host *mmc)
|
|
|
|
{
|
|
|
|
struct dw_mci_slot *slot = mmc_priv(mmc);
|
|
|
|
|
|
|
|
__dw_mci_enable_sdio_irq(slot, 1);
|
|
|
|
}
|
|
|
|
|
2013-08-30 23:12:42 +08:00
|
|
|
static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
|
|
|
|
{
|
|
|
|
struct dw_mci_slot *slot = mmc_priv(mmc);
|
|
|
|
struct dw_mci *host = slot->host;
|
|
|
|
const struct dw_mci_drv_data *drv_data = host->drv_data;
|
2015-08-03 15:07:21 +08:00
|
|
|
int err = -EINVAL;
|
2013-08-30 23:12:42 +08:00
|
|
|
|
|
|
|
if (drv_data && drv_data->execute_tuning)
|
2015-10-27 14:24:28 +08:00
|
|
|
err = drv_data->execute_tuning(slot, opcode);
|
2013-08-30 23:12:42 +08:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2015-08-03 15:07:21 +08:00
|
|
|
static int dw_mci_prepare_hs400_tuning(struct mmc_host *mmc,
|
|
|
|
struct mmc_ios *ios)
|
2015-01-29 10:41:57 +08:00
|
|
|
{
|
|
|
|
struct dw_mci_slot *slot = mmc_priv(mmc);
|
|
|
|
struct dw_mci *host = slot->host;
|
|
|
|
const struct dw_mci_drv_data *drv_data = host->drv_data;
|
|
|
|
|
|
|
|
if (drv_data && drv_data->prepare_hs400_tuning)
|
|
|
|
return drv_data->prepare_hs400_tuning(host, ios);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-02-17 10:56:40 +08:00
|
|
|
static bool dw_mci_reset(struct dw_mci *host)
|
|
|
|
{
|
|
|
|
u32 flags = SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET;
|
|
|
|
bool ret = false;
|
2017-02-17 10:59:52 +08:00
|
|
|
u32 status = 0;
|
2017-02-17 10:56:40 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Resetting generates a block interrupt, hence setting
|
|
|
|
* the scatter-gather pointer to NULL.
|
|
|
|
*/
|
|
|
|
if (host->sg) {
|
|
|
|
sg_miter_stop(&host->sg_miter);
|
|
|
|
host->sg = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (host->use_dma)
|
|
|
|
flags |= SDMMC_CTRL_DMA_RESET;
|
|
|
|
|
|
|
|
if (dw_mci_ctrl_reset(host, flags)) {
|
|
|
|
/*
|
2017-02-17 10:59:52 +08:00
|
|
|
* In all cases we clear the RAWINTS
|
|
|
|
* register to clear any interrupts.
|
2017-02-17 10:56:40 +08:00
|
|
|
*/
|
|
|
|
mci_writel(host, RINTSTS, 0xFFFFFFFF);
|
|
|
|
|
2017-02-17 10:59:52 +08:00
|
|
|
if (!host->use_dma) {
|
|
|
|
ret = true;
|
|
|
|
goto ciu_out;
|
|
|
|
}
|
2017-02-17 10:56:40 +08:00
|
|
|
|
2017-02-17 10:59:52 +08:00
|
|
|
/* Wait for dma_req to be cleared */
|
|
|
|
if (readl_poll_timeout_atomic(host->regs + SDMMC_STATUS,
|
|
|
|
status,
|
|
|
|
!(status & SDMMC_STATUS_DMA_REQ),
|
|
|
|
1, 500 * USEC_PER_MSEC)) {
|
|
|
|
dev_err(host->dev,
|
|
|
|
"%s: Timeout waiting for dma_req to be cleared\n",
|
|
|
|
__func__);
|
|
|
|
goto ciu_out;
|
2017-02-17 10:56:40 +08:00
|
|
|
}
|
2017-02-17 10:59:52 +08:00
|
|
|
|
|
|
|
/* when using DMA next we reset the fifo again */
|
|
|
|
if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET))
|
|
|
|
goto ciu_out;
|
2017-02-17 10:56:40 +08:00
|
|
|
} else {
|
|
|
|
/* if the controller reset bit did clear, then set clock regs */
|
|
|
|
if (!(mci_readl(host, CTRL) & SDMMC_CTRL_RESET)) {
|
|
|
|
dev_err(host->dev,
|
|
|
|
"%s: fifo/dma reset bits didn't clear but ciu was reset, doing clock update\n",
|
|
|
|
__func__);
|
|
|
|
goto ciu_out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (host->use_dma == TRANS_MODE_IDMAC)
|
2018-03-15 03:30:51 +08:00
|
|
|
/* It is also required that we reinit idmac */
|
|
|
|
dw_mci_idmac_init(host);
|
2017-02-17 10:56:40 +08:00
|
|
|
|
|
|
|
ret = true;
|
|
|
|
|
|
|
|
ciu_out:
|
|
|
|
/* After a CTRL reset we need to have CIU set clock registers */
|
2017-06-05 12:41:34 +08:00
|
|
|
mci_send_cmd(host->slot, SDMMC_CMD_UPD_CLK, 0);
|
2017-02-17 10:56:40 +08:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-01-02 14:11:59 +08:00
|
|
|
static const struct mmc_host_ops dw_mci_ops = {
|
2011-08-29 15:41:46 +08:00
|
|
|
.request = dw_mci_request,
|
2012-02-06 15:55:07 +08:00
|
|
|
.pre_req = dw_mci_pre_req,
|
|
|
|
.post_req = dw_mci_post_req,
|
2011-08-29 15:41:46 +08:00
|
|
|
.set_ios = dw_mci_set_ios,
|
|
|
|
.get_ro = dw_mci_get_ro,
|
|
|
|
.get_cd = dw_mci_get_cd,
|
2016-01-14 09:08:02 +08:00
|
|
|
.hw_reset = dw_mci_hw_reset,
|
2011-08-29 15:41:46 +08:00
|
|
|
.enable_sdio_irq = dw_mci_enable_sdio_irq,
|
2017-04-18 19:29:20 +08:00
|
|
|
.ack_sdio_irq = dw_mci_ack_sdio_irq,
|
2013-08-30 23:12:42 +08:00
|
|
|
.execute_tuning = dw_mci_execute_tuning,
|
2014-08-22 21:47:51 +08:00
|
|
|
.card_busy = dw_mci_card_busy,
|
|
|
|
.start_signal_voltage_switch = dw_mci_switch_voltage,
|
2014-12-03 07:42:46 +08:00
|
|
|
.init_card = dw_mci_init_card,
|
2015-01-29 10:41:57 +08:00
|
|
|
.prepare_hs400_tuning = dw_mci_prepare_hs400_tuning,
|
2011-01-02 14:11:59 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
|
|
|
|
__releases(&host->lock)
|
|
|
|
__acquires(&host->lock)
|
|
|
|
{
|
|
|
|
struct dw_mci_slot *slot;
|
2017-06-05 12:41:34 +08:00
|
|
|
struct mmc_host *prev_mmc = host->slot->mmc;
|
2011-01-02 14:11:59 +08:00
|
|
|
|
|
|
|
WARN_ON(host->cmd || host->data);
|
|
|
|
|
2017-06-05 12:41:34 +08:00
|
|
|
host->slot->mrq = NULL;
|
2011-01-02 14:11:59 +08:00
|
|
|
host->mrq = NULL;
|
|
|
|
if (!list_empty(&host->queue)) {
|
|
|
|
slot = list_entry(host->queue.next,
|
|
|
|
struct dw_mci_slot, queue_node);
|
|
|
|
list_del(&slot->queue_node);
|
2012-09-18 02:16:35 +08:00
|
|
|
dev_vdbg(host->dev, "list not empty: %s is next\n",
|
2011-01-02 14:11:59 +08:00
|
|
|
mmc_hostname(slot->mmc));
|
|
|
|
host->state = STATE_SENDING_CMD;
|
|
|
|
dw_mci_start_request(host, slot);
|
|
|
|
} else {
|
2012-09-18 02:16:35 +08:00
|
|
|
dev_vdbg(host->dev, "list empty\n");
|
2014-08-22 21:47:51 +08:00
|
|
|
|
|
|
|
if (host->state == STATE_SENDING_CMD11)
|
|
|
|
host->state = STATE_WAITING_CMD11_DONE;
|
|
|
|
else
|
|
|
|
host->state = STATE_IDLE;
|
2011-01-02 14:11:59 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
spin_unlock(&host->lock);
|
|
|
|
mmc_request_done(prev_mmc, mrq);
|
|
|
|
spin_lock(&host->lock);
|
|
|
|
}
|
|
|
|
|
2013-08-30 23:14:17 +08:00
|
|
|
static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
|
2011-01-02 14:11:59 +08:00
|
|
|
{
|
|
|
|
u32 status = host->cmd_status;
|
|
|
|
|
|
|
|
host->cmd_status = 0;
|
|
|
|
|
|
|
|
/* Read the response from the card (up to 16 bytes) */
|
|
|
|
if (cmd->flags & MMC_RSP_PRESENT) {
|
|
|
|
if (cmd->flags & MMC_RSP_136) {
|
|
|
|
cmd->resp[3] = mci_readl(host, RESP0);
|
|
|
|
cmd->resp[2] = mci_readl(host, RESP1);
|
|
|
|
cmd->resp[1] = mci_readl(host, RESP2);
|
|
|
|
cmd->resp[0] = mci_readl(host, RESP3);
|
|
|
|
} else {
|
|
|
|
cmd->resp[0] = mci_readl(host, RESP0);
|
|
|
|
cmd->resp[1] = 0;
|
|
|
|
cmd->resp[2] = 0;
|
|
|
|
cmd->resp[3] = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (status & SDMMC_INT_RTO)
|
|
|
|
cmd->error = -ETIMEDOUT;
|
|
|
|
else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
|
|
|
|
cmd->error = -EILSEQ;
|
|
|
|
else if (status & SDMMC_INT_RESP_ERR)
|
|
|
|
cmd->error = -EIO;
|
|
|
|
else
|
|
|
|
cmd->error = 0;
|
|
|
|
|
2013-08-30 23:14:17 +08:00
|
|
|
return cmd->error;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data)
|
|
|
|
{
|
2013-08-30 23:14:23 +08:00
|
|
|
u32 status = host->data_status;
|
2013-08-30 23:14:17 +08:00
|
|
|
|
|
|
|
if (status & DW_MCI_DATA_ERROR_FLAGS) {
|
|
|
|
if (status & SDMMC_INT_DRTO) {
|
|
|
|
data->error = -ETIMEDOUT;
|
|
|
|
} else if (status & SDMMC_INT_DCRC) {
|
|
|
|
data->error = -EILSEQ;
|
|
|
|
} else if (status & SDMMC_INT_EBE) {
|
|
|
|
if (host->dir_status ==
|
|
|
|
DW_MCI_SEND_STATUS) {
|
|
|
|
/*
|
|
|
|
* No data CRC status was returned.
|
|
|
|
* The number of bytes transferred
|
|
|
|
* will be exaggerated in PIO mode.
|
|
|
|
*/
|
|
|
|
data->bytes_xfered = 0;
|
|
|
|
data->error = -ETIMEDOUT;
|
|
|
|
} else if (host->dir_status ==
|
|
|
|
DW_MCI_RECV_STATUS) {
|
mmc: dw_mmc: return -EILSEQ for EBE and SBE error
The following log we found indicate the fact that dw_mmc
didn't treat EBE or SBE as a similar problem as CRC error.
-EIO is quite not informative as it may indicate that the device
is broken rather than that of tuning stuff.
...
[ 89.057226] bcmsdh_sdmmc: Failed to Read byte F1:@0x1001f=ff, Err: -5
[ 89.058811] bcmsdh_sdmmc: Failed to Read byte F1:@0x1001f=ff, Err: -5
[ 89.059415] bcmsdh_sdmmc: Failed to Read byte F1:@0x1000e=ff, Err: -84
[ 89.254248] dwmmc_rockchip fe310000.dwmmc: Successfully tuned phase to 199
[ 89.273912] dhd_set_suspend: Remove extra suspend setting
[ 89.274478] dhd_enable_packet_filter: enter, value = 0
64 bytes from 112.90.83.112: icmp_seq=24 ttl=53 time=1321 ms
64 bytes from 112.90.83.112: icmp_seq=25 ttl=53 time=319 ms
64 bytes from 112.90.83.112: icmp_seq=26 ttl=53 time=69.8 ms
64 bytes from 112.90.83.112: icmp_seq=27 ttl=53 time=37.5 ms
...
For the host, when failing to sample cmd's response due to
tuning stuff, we still return -EIO as it's quite vague to figure
out whether it related to signal or just the broken devices, especially
for the card type detection when booting kernel as all things go well
but the cmd set used.
But for the data phase, if receiving the cmd's response which
carriess data transfer, we should have more confidence that it
is very probably related to the tuning stuff.
Just as the log shown above, we sometimes suffer too much
this kind of pain as the dw_mmc return -EIO for the case, so
mmc-core will not do retune and caller drivers like bcm's wifi
driver, still retry the failure more and more until dw_mmc
finally generate CRC.
Adrian suggested that drivers who care the specific cases should
call mmc_retune_needed rather than doing it in mmc core. It makes
sense but I'm considering that -EILSEQ actually means illegal sequence
, so we use it for CRC cases. Meanwhile, SBE/EBE indicate the illegal
sequence of start bit or end bit for data0~7. So I realize that we should
use -EILSEQ for them both as well CRC cases.
Suggested-by: Adrian Hunter <adrian.hunter@intel.com>
Signed-off-by: Shawn Lin <shawn.lin@rock-chips.com>
Signed-off-by: Jaehoon Chung <jh80.chung@samsung.com>
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
2016-08-22 10:57:16 +08:00
|
|
|
data->error = -EILSEQ;
|
2013-08-30 23:14:17 +08:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* SDMMC_INT_SBE is included */
|
mmc: dw_mmc: return -EILSEQ for EBE and SBE error
The following log we found indicate the fact that dw_mmc
didn't treat EBE or SBE as a similar problem as CRC error.
-EIO is quite not informative as it may indicate that the device
is broken rather than that of tuning stuff.
...
[ 89.057226] bcmsdh_sdmmc: Failed to Read byte F1:@0x1001f=ff, Err: -5
[ 89.058811] bcmsdh_sdmmc: Failed to Read byte F1:@0x1001f=ff, Err: -5
[ 89.059415] bcmsdh_sdmmc: Failed to Read byte F1:@0x1000e=ff, Err: -84
[ 89.254248] dwmmc_rockchip fe310000.dwmmc: Successfully tuned phase to 199
[ 89.273912] dhd_set_suspend: Remove extra suspend setting
[ 89.274478] dhd_enable_packet_filter: enter, value = 0
64 bytes from 112.90.83.112: icmp_seq=24 ttl=53 time=1321 ms
64 bytes from 112.90.83.112: icmp_seq=25 ttl=53 time=319 ms
64 bytes from 112.90.83.112: icmp_seq=26 ttl=53 time=69.8 ms
64 bytes from 112.90.83.112: icmp_seq=27 ttl=53 time=37.5 ms
...
For the host, when failing to sample cmd's response due to
tuning stuff, we still return -EIO as it's quite vague to figure
out whether it related to signal or just the broken devices, especially
for the card type detection when booting kernel as all things go well
but the cmd set used.
But for the data phase, if receiving the cmd's response which
carriess data transfer, we should have more confidence that it
is very probably related to the tuning stuff.
Just as the log shown above, we sometimes suffer too much
this kind of pain as the dw_mmc return -EIO for the case, so
mmc-core will not do retune and caller drivers like bcm's wifi
driver, still retry the failure more and more until dw_mmc
finally generate CRC.
Adrian suggested that drivers who care the specific cases should
call mmc_retune_needed rather than doing it in mmc core. It makes
sense but I'm considering that -EILSEQ actually means illegal sequence
, so we use it for CRC cases. Meanwhile, SBE/EBE indicate the illegal
sequence of start bit or end bit for data0~7. So I realize that we should
use -EILSEQ for them both as well CRC cases.
Suggested-by: Adrian Hunter <adrian.hunter@intel.com>
Signed-off-by: Shawn Lin <shawn.lin@rock-chips.com>
Signed-off-by: Jaehoon Chung <jh80.chung@samsung.com>
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
2016-08-22 10:57:16 +08:00
|
|
|
data->error = -EILSEQ;
|
2013-08-30 23:14:17 +08:00
|
|
|
}
|
|
|
|
|
2014-04-23 07:51:21 +08:00
|
|
|
dev_dbg(host->dev, "data error, status 0x%08x\n", status);
|
2013-08-30 23:14:17 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* After an error, there may be data lingering
|
2013-08-30 23:14:23 +08:00
|
|
|
* in the FIFO
|
2013-08-30 23:14:17 +08:00
|
|
|
*/
|
2014-08-05 09:19:50 +08:00
|
|
|
dw_mci_reset(host);
|
2013-08-30 23:14:17 +08:00
|
|
|
} else {
|
|
|
|
data->bytes_xfered = data->blocks * data->blksz;
|
|
|
|
data->error = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return data->error;
|
2011-01-02 14:11:59 +08:00
|
|
|
}
|
|
|
|
|
2015-08-11 00:27:18 +08:00
|
|
|
static void dw_mci_set_drto(struct dw_mci *host)
|
|
|
|
{
|
|
|
|
unsigned int drto_clks;
|
2017-10-13 04:11:17 +08:00
|
|
|
unsigned int drto_div;
|
2015-08-11 00:27:18 +08:00
|
|
|
unsigned int drto_ms;
|
2017-10-13 04:11:18 +08:00
|
|
|
unsigned long irqflags;
|
2015-08-11 00:27:18 +08:00
|
|
|
|
|
|
|
drto_clks = mci_readl(host, TMOUT) >> 8;
|
2017-10-13 04:11:17 +08:00
|
|
|
drto_div = (mci_readl(host, CLKDIV) & 0xff) * 2;
|
|
|
|
if (drto_div == 0)
|
|
|
|
drto_div = 1;
|
2018-02-28 19:53:18 +08:00
|
|
|
|
|
|
|
drto_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * drto_clks * drto_div,
|
|
|
|
host->bus_hz);
|
2015-08-11 00:27:18 +08:00
|
|
|
|
|
|
|
/* add a bit spare time */
|
|
|
|
drto_ms += 10;
|
|
|
|
|
2017-10-13 04:11:18 +08:00
|
|
|
spin_lock_irqsave(&host->irq_lock, irqflags);
|
|
|
|
if (!test_bit(EVENT_DATA_COMPLETE, &host->pending_events))
|
|
|
|
mod_timer(&host->dto_timer,
|
|
|
|
jiffies + msecs_to_jiffies(drto_ms));
|
|
|
|
spin_unlock_irqrestore(&host->irq_lock, irqflags);
|
2015-08-11 00:27:18 +08:00
|
|
|
}
|
|
|
|
|
2017-10-13 04:11:16 +08:00
|
|
|
static bool dw_mci_clear_pending_cmd_complete(struct dw_mci *host)
|
|
|
|
{
|
|
|
|
if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Really be certain that the timer has stopped. This is a bit of
|
|
|
|
* paranoia and could only really happen if we had really bad
|
|
|
|
* interrupt latency and the interrupt routine and timeout were
|
|
|
|
* running concurrently so that the del_timer() in the interrupt
|
|
|
|
* handler couldn't run.
|
|
|
|
*/
|
|
|
|
WARN_ON(del_timer_sync(&host->cto_timer));
|
|
|
|
clear_bit(EVENT_CMD_COMPLETE, &host->pending_events);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-10-13 04:11:18 +08:00
|
|
|
static bool dw_mci_clear_pending_data_complete(struct dw_mci *host)
|
|
|
|
{
|
|
|
|
if (!test_bit(EVENT_DATA_COMPLETE, &host->pending_events))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/* Extra paranoia just like dw_mci_clear_pending_cmd_complete() */
|
|
|
|
WARN_ON(del_timer_sync(&host->dto_timer));
|
|
|
|
clear_bit(EVENT_DATA_COMPLETE, &host->pending_events);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2011-01-02 14:11:59 +08:00
|
|
|
static void dw_mci_tasklet_func(unsigned long priv)
|
|
|
|
{
|
|
|
|
struct dw_mci *host = (struct dw_mci *)priv;
|
|
|
|
struct mmc_data *data;
|
|
|
|
struct mmc_command *cmd;
|
2013-08-30 23:14:17 +08:00
|
|
|
struct mmc_request *mrq;
|
2011-01-02 14:11:59 +08:00
|
|
|
enum dw_mci_state state;
|
|
|
|
enum dw_mci_state prev_state;
|
2013-08-30 23:14:17 +08:00
|
|
|
unsigned int err;
|
2011-01-02 14:11:59 +08:00
|
|
|
|
|
|
|
spin_lock(&host->lock);
|
|
|
|
|
|
|
|
state = host->state;
|
|
|
|
data = host->data;
|
2013-08-30 23:14:17 +08:00
|
|
|
mrq = host->mrq;
|
2011-01-02 14:11:59 +08:00
|
|
|
|
|
|
|
do {
|
|
|
|
prev_state = state;
|
|
|
|
|
|
|
|
switch (state) {
|
|
|
|
case STATE_IDLE:
|
2014-08-22 21:47:51 +08:00
|
|
|
case STATE_WAITING_CMD11_DONE:
|
2011-01-02 14:11:59 +08:00
|
|
|
break;
|
|
|
|
|
2014-08-22 21:47:51 +08:00
|
|
|
case STATE_SENDING_CMD11:
|
2011-01-02 14:11:59 +08:00
|
|
|
case STATE_SENDING_CMD:
|
2017-10-13 04:11:16 +08:00
|
|
|
if (!dw_mci_clear_pending_cmd_complete(host))
|
2011-01-02 14:11:59 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
cmd = host->cmd;
|
|
|
|
host->cmd = NULL;
|
|
|
|
set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
|
2013-08-30 23:14:17 +08:00
|
|
|
err = dw_mci_command_complete(host, cmd);
|
|
|
|
if (cmd == mrq->sbc && !err) {
|
2017-06-05 12:41:34 +08:00
|
|
|
__dw_mci_start_request(host, host->slot,
|
2013-08-30 23:14:17 +08:00
|
|
|
mrq->cmd);
|
2011-12-22 17:01:29 +08:00
|
|
|
goto unlock;
|
|
|
|
}
|
|
|
|
|
2013-08-30 23:14:17 +08:00
|
|
|
if (cmd->data && err) {
|
2016-04-26 16:03:58 +08:00
|
|
|
/*
|
|
|
|
* During UHS tuning sequence, sending the stop
|
|
|
|
* command after the response CRC error would
|
|
|
|
* throw the system into a confused state
|
|
|
|
* causing all future tuning phases to report
|
|
|
|
* failure.
|
|
|
|
*
|
|
|
|
* In such case controller will move into a data
|
|
|
|
* transfer state after a response error or
|
|
|
|
* response CRC error. Let's let that finish
|
|
|
|
* before trying to send a stop, so we'll go to
|
|
|
|
* STATE_SENDING_DATA.
|
|
|
|
*
|
|
|
|
* Although letting the data transfer take place
|
|
|
|
* will waste a bit of time (we already know
|
|
|
|
* the command was bad), it can't cause any
|
|
|
|
* errors since it's possible it would have
|
|
|
|
* taken place anyway if this tasklet got
|
|
|
|
* delayed. Allowing the transfer to take place
|
|
|
|
* avoids races and keeps things simple.
|
|
|
|
*/
|
|
|
|
if ((err != -ETIMEDOUT) &&
|
|
|
|
(cmd->opcode == MMC_SEND_TUNING_BLOCK)) {
|
|
|
|
state = STATE_SENDING_DATA;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2013-08-30 23:13:59 +08:00
|
|
|
dw_mci_stop_dma(host);
|
2013-08-30 23:14:05 +08:00
|
|
|
send_stop_abort(host, data);
|
|
|
|
state = STATE_SENDING_STOP;
|
|
|
|
break;
|
2013-08-30 23:13:59 +08:00
|
|
|
}
|
|
|
|
|
2013-08-30 23:14:17 +08:00
|
|
|
if (!cmd->data || err) {
|
|
|
|
dw_mci_request_end(host, mrq);
|
2011-01-02 14:11:59 +08:00
|
|
|
goto unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
prev_state = state = STATE_SENDING_DATA;
|
|
|
|
/* fall through */
|
|
|
|
|
|
|
|
case STATE_SENDING_DATA:
|
2014-08-13 23:13:43 +08:00
|
|
|
/*
|
|
|
|
* We could get a data error and never a transfer
|
|
|
|
* complete so we'd better check for it here.
|
|
|
|
*
|
|
|
|
* Note that we don't really care if we also got a
|
|
|
|
* transfer complete; stopping the DMA and sending an
|
|
|
|
* abort won't hurt.
|
|
|
|
*/
|
2011-01-02 14:11:59 +08:00
|
|
|
if (test_and_clear_bit(EVENT_DATA_ERROR,
|
|
|
|
&host->pending_events)) {
|
|
|
|
dw_mci_stop_dma(host);
|
2016-11-17 15:40:37 +08:00
|
|
|
if (!(host->data_status & (SDMMC_INT_DRTO |
|
2015-02-20 10:55:25 +08:00
|
|
|
SDMMC_INT_EBE)))
|
|
|
|
send_stop_abort(host, data);
|
2011-01-02 14:11:59 +08:00
|
|
|
state = STATE_DATA_ERROR;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
|
2015-08-11 00:27:18 +08:00
|
|
|
&host->pending_events)) {
|
|
|
|
/*
|
|
|
|
* If all data-related interrupts don't come
|
|
|
|
* within the given time in reading data state.
|
|
|
|
*/
|
2016-06-21 13:35:37 +08:00
|
|
|
if (host->dir_status == DW_MCI_RECV_STATUS)
|
2015-08-11 00:27:18 +08:00
|
|
|
dw_mci_set_drto(host);
|
2011-01-02 14:11:59 +08:00
|
|
|
break;
|
2015-08-11 00:27:18 +08:00
|
|
|
}
|
2011-01-02 14:11:59 +08:00
|
|
|
|
|
|
|
set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
|
2014-08-13 23:13:43 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Handle an EVENT_DATA_ERROR that might have shown up
|
|
|
|
* before the transfer completed. This might not have
|
|
|
|
* been caught by the check above because the interrupt
|
|
|
|
* could have gone off between the previous check and
|
|
|
|
* the check for transfer complete.
|
|
|
|
*
|
|
|
|
* Technically this ought not be needed assuming we
|
|
|
|
* get a DATA_COMPLETE eventually (we'll notice the
|
|
|
|
* error and end the request), but it shouldn't hurt.
|
|
|
|
*
|
|
|
|
* This has the advantage of sending the stop command.
|
|
|
|
*/
|
|
|
|
if (test_and_clear_bit(EVENT_DATA_ERROR,
|
|
|
|
&host->pending_events)) {
|
|
|
|
dw_mci_stop_dma(host);
|
2016-11-17 15:40:37 +08:00
|
|
|
if (!(host->data_status & (SDMMC_INT_DRTO |
|
2015-02-20 10:55:25 +08:00
|
|
|
SDMMC_INT_EBE)))
|
|
|
|
send_stop_abort(host, data);
|
2014-08-13 23:13:43 +08:00
|
|
|
state = STATE_DATA_ERROR;
|
|
|
|
break;
|
|
|
|
}
|
2011-01-02 14:11:59 +08:00
|
|
|
prev_state = state = STATE_DATA_BUSY;
|
2014-08-13 23:13:43 +08:00
|
|
|
|
2011-01-02 14:11:59 +08:00
|
|
|
/* fall through */
|
|
|
|
|
|
|
|
case STATE_DATA_BUSY:
|
2017-10-13 04:11:18 +08:00
|
|
|
if (!dw_mci_clear_pending_data_complete(host)) {
|
2015-08-11 00:27:18 +08:00
|
|
|
/*
|
|
|
|
* If data error interrupt comes but data over
|
|
|
|
* interrupt doesn't come within the given time.
|
|
|
|
* in reading data state.
|
|
|
|
*/
|
2016-06-21 13:35:37 +08:00
|
|
|
if (host->dir_status == DW_MCI_RECV_STATUS)
|
2015-08-11 00:27:18 +08:00
|
|
|
dw_mci_set_drto(host);
|
2011-01-02 14:11:59 +08:00
|
|
|
break;
|
2015-08-11 00:27:18 +08:00
|
|
|
}
|
2011-01-02 14:11:59 +08:00
|
|
|
|
|
|
|
host->data = NULL;
|
|
|
|
set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
|
2013-08-30 23:14:17 +08:00
|
|
|
err = dw_mci_data_complete(host, data);
|
|
|
|
|
|
|
|
if (!err) {
|
|
|
|
if (!data->stop || mrq->sbc) {
|
2014-02-25 17:48:28 +08:00
|
|
|
if (mrq->sbc && data->stop)
|
2013-08-30 23:14:17 +08:00
|
|
|
data->stop->error = 0;
|
|
|
|
dw_mci_request_end(host, mrq);
|
|
|
|
goto unlock;
|
2011-01-02 14:11:59 +08:00
|
|
|
}
|
|
|
|
|
2013-08-30 23:14:17 +08:00
|
|
|
/* stop command for open-ended transfer*/
|
|
|
|
if (data->stop)
|
|
|
|
send_stop_abort(host, data);
|
2014-08-13 23:13:43 +08:00
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* If we don't have a command complete now we'll
|
|
|
|
* never get one since we just reset everything;
|
|
|
|
* better end the request.
|
|
|
|
*
|
|
|
|
* If we do have a command complete we'll fall
|
|
|
|
* through to the SENDING_STOP command and
|
|
|
|
* everything will be peachy keen.
|
|
|
|
*/
|
|
|
|
if (!test_bit(EVENT_CMD_COMPLETE,
|
|
|
|
&host->pending_events)) {
|
|
|
|
host->cmd = NULL;
|
|
|
|
dw_mci_request_end(host, mrq);
|
|
|
|
goto unlock;
|
|
|
|
}
|
2011-12-22 17:01:29 +08:00
|
|
|
}
|
|
|
|
|
2013-08-30 23:14:17 +08:00
|
|
|
/*
|
|
|
|
* If err has non-zero,
|
|
|
|
* stop-abort command has been already issued.
|
|
|
|
*/
|
2011-01-02 14:11:59 +08:00
|
|
|
prev_state = state = STATE_SENDING_STOP;
|
2013-08-30 23:14:17 +08:00
|
|
|
|
2011-01-02 14:11:59 +08:00
|
|
|
/* fall through */
|
|
|
|
|
|
|
|
case STATE_SENDING_STOP:
|
2017-10-13 04:11:16 +08:00
|
|
|
if (!dw_mci_clear_pending_cmd_complete(host))
|
2011-01-02 14:11:59 +08:00
|
|
|
break;
|
|
|
|
|
2013-08-30 23:13:59 +08:00
|
|
|
/* CMD error in data command */
|
2013-08-30 23:14:23 +08:00
|
|
|
if (mrq->cmd->error && mrq->data)
|
2014-08-05 09:19:50 +08:00
|
|
|
dw_mci_reset(host);
|
2013-08-30 23:13:59 +08:00
|
|
|
|
2011-01-02 14:11:59 +08:00
|
|
|
host->cmd = NULL;
|
2013-08-30 23:13:59 +08:00
|
|
|
host->data = NULL;
|
2013-08-30 23:14:05 +08:00
|
|
|
|
2016-11-17 15:40:37 +08:00
|
|
|
if (!mrq->sbc && mrq->stop)
|
2013-08-30 23:14:17 +08:00
|
|
|
dw_mci_command_complete(host, mrq->stop);
|
2013-08-30 23:14:05 +08:00
|
|
|
else
|
|
|
|
host->cmd_status = 0;
|
|
|
|
|
2013-08-30 23:14:17 +08:00
|
|
|
dw_mci_request_end(host, mrq);
|
2011-01-02 14:11:59 +08:00
|
|
|
goto unlock;
|
|
|
|
|
|
|
|
case STATE_DATA_ERROR:
|
|
|
|
if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
|
|
|
|
&host->pending_events))
|
|
|
|
break;
|
|
|
|
|
|
|
|
state = STATE_DATA_BUSY;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} while (state != prev_state);
|
|
|
|
|
|
|
|
host->state = state;
|
|
|
|
unlock:
|
|
|
|
spin_unlock(&host->lock);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2011-06-24 20:57:56 +08:00
|
|
|
/* push final bytes to part_buf, only use during push */
|
|
|
|
static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
|
2011-01-02 14:11:59 +08:00
|
|
|
{
|
2011-06-24 20:57:56 +08:00
|
|
|
memcpy((void *)&host->part_buf, buf, cnt);
|
|
|
|
host->part_buf_count = cnt;
|
|
|
|
}
|
2011-01-02 14:11:59 +08:00
|
|
|
|
2011-06-24 20:57:56 +08:00
|
|
|
/* append bytes to part_buf, only use during push */
|
|
|
|
static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
|
|
|
|
{
|
|
|
|
cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
|
|
|
|
memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
|
|
|
|
host->part_buf_count += cnt;
|
|
|
|
return cnt;
|
|
|
|
}
|
2011-01-02 14:11:59 +08:00
|
|
|
|
2011-06-24 20:57:56 +08:00
|
|
|
/* pull first bytes from part_buf, only use during pull */
|
|
|
|
static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
|
|
|
|
{
|
2015-08-03 15:07:21 +08:00
|
|
|
cnt = min_t(int, cnt, host->part_buf_count);
|
2011-06-24 20:57:56 +08:00
|
|
|
if (cnt) {
|
|
|
|
memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
|
|
|
|
cnt);
|
|
|
|
host->part_buf_count -= cnt;
|
|
|
|
host->part_buf_start += cnt;
|
2011-01-02 14:11:59 +08:00
|
|
|
}
|
2011-06-24 20:57:56 +08:00
|
|
|
return cnt;
|
2011-01-02 14:11:59 +08:00
|
|
|
}
|
|
|
|
|
2011-06-24 20:57:56 +08:00
|
|
|
/* pull final bytes from the part_buf, assuming it's just been filled */
|
|
|
|
static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
|
2011-01-02 14:11:59 +08:00
|
|
|
{
|
2011-06-24 20:57:56 +08:00
|
|
|
memcpy(buf, &host->part_buf, cnt);
|
|
|
|
host->part_buf_start = cnt;
|
|
|
|
host->part_buf_count = (1 << host->data_shift) - cnt;
|
|
|
|
}
|
2011-01-02 14:11:59 +08:00
|
|
|
|
2011-06-24 20:57:56 +08:00
|
|
|
static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
|
|
|
|
{
|
2013-03-12 18:53:13 +08:00
|
|
|
struct mmc_data *data = host->data;
|
|
|
|
int init_cnt = cnt;
|
|
|
|
|
2011-06-24 20:57:56 +08:00
|
|
|
/* try and push anything in the part_buf */
|
|
|
|
if (unlikely(host->part_buf_count)) {
|
|
|
|
int len = dw_mci_push_part_bytes(host, buf, cnt);
|
2015-08-03 15:07:21 +08:00
|
|
|
|
2011-06-24 20:57:56 +08:00
|
|
|
buf += len;
|
|
|
|
cnt -= len;
|
2013-03-12 18:53:13 +08:00
|
|
|
if (host->part_buf_count == 2) {
|
2015-03-25 19:27:52 +08:00
|
|
|
mci_fifo_writew(host->fifo_reg, host->part_buf16);
|
2011-06-24 20:57:56 +08:00
|
|
|
host->part_buf_count = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
|
|
|
|
if (unlikely((unsigned long)buf & 0x1)) {
|
|
|
|
while (cnt >= 2) {
|
|
|
|
u16 aligned_buf[64];
|
|
|
|
int len = min(cnt & -2, (int)sizeof(aligned_buf));
|
|
|
|
int items = len >> 1;
|
|
|
|
int i;
|
|
|
|
/* memcpy from input buffer into aligned buffer */
|
|
|
|
memcpy(aligned_buf, buf, len);
|
|
|
|
buf += len;
|
|
|
|
cnt -= len;
|
|
|
|
/* push data from aligned buffer into fifo */
|
|
|
|
for (i = 0; i < items; ++i)
|
2015-03-25 19:27:52 +08:00
|
|
|
mci_fifo_writew(host->fifo_reg, aligned_buf[i]);
|
2011-06-24 20:57:56 +08:00
|
|
|
}
|
|
|
|
} else
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
u16 *pdata = buf;
|
2015-08-03 15:07:21 +08:00
|
|
|
|
2011-06-24 20:57:56 +08:00
|
|
|
for (; cnt >= 2; cnt -= 2)
|
2015-03-25 19:27:52 +08:00
|
|
|
mci_fifo_writew(host->fifo_reg, *pdata++);
|
2011-06-24 20:57:56 +08:00
|
|
|
buf = pdata;
|
|
|
|
}
|
|
|
|
/* put anything remaining in the part_buf */
|
|
|
|
if (cnt) {
|
|
|
|
dw_mci_set_part_bytes(host, buf, cnt);
|
2013-03-12 18:53:13 +08:00
|
|
|
/* Push data if we have reached the expected data length */
|
|
|
|
if ((data->bytes_xfered + init_cnt) ==
|
|
|
|
(data->blksz * data->blocks))
|
2015-03-25 19:27:52 +08:00
|
|
|
mci_fifo_writew(host->fifo_reg, host->part_buf16);
|
2011-06-24 20:57:56 +08:00
|
|
|
}
|
|
|
|
}
|
2011-01-02 14:11:59 +08:00
|
|
|
|
2011-06-24 20:57:56 +08:00
|
|
|
static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
|
|
|
|
{
|
|
|
|
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
|
|
|
|
if (unlikely((unsigned long)buf & 0x1)) {
|
|
|
|
while (cnt >= 2) {
|
|
|
|
/* pull data from fifo into aligned buffer */
|
|
|
|
u16 aligned_buf[64];
|
|
|
|
int len = min(cnt & -2, (int)sizeof(aligned_buf));
|
|
|
|
int items = len >> 1;
|
|
|
|
int i;
|
2015-08-03 15:07:21 +08:00
|
|
|
|
2011-06-24 20:57:56 +08:00
|
|
|
for (i = 0; i < items; ++i)
|
2015-03-25 19:27:52 +08:00
|
|
|
aligned_buf[i] = mci_fifo_readw(host->fifo_reg);
|
2011-06-24 20:57:56 +08:00
|
|
|
/* memcpy from aligned buffer into output buffer */
|
|
|
|
memcpy(buf, aligned_buf, len);
|
|
|
|
buf += len;
|
|
|
|
cnt -= len;
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
u16 *pdata = buf;
|
2015-08-03 15:07:21 +08:00
|
|
|
|
2011-06-24 20:57:56 +08:00
|
|
|
for (; cnt >= 2; cnt -= 2)
|
2015-03-25 19:27:52 +08:00
|
|
|
*pdata++ = mci_fifo_readw(host->fifo_reg);
|
2011-06-24 20:57:56 +08:00
|
|
|
buf = pdata;
|
|
|
|
}
|
|
|
|
if (cnt) {
|
2015-03-25 19:27:52 +08:00
|
|
|
host->part_buf16 = mci_fifo_readw(host->fifo_reg);
|
2011-06-24 20:57:56 +08:00
|
|
|
dw_mci_pull_final_bytes(host, buf, cnt);
|
2011-01-02 14:11:59 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
|
|
|
|
{
|
2013-03-12 18:53:13 +08:00
|
|
|
struct mmc_data *data = host->data;
|
|
|
|
int init_cnt = cnt;
|
|
|
|
|
2011-06-24 20:57:56 +08:00
|
|
|
/* try and push anything in the part_buf */
|
|
|
|
if (unlikely(host->part_buf_count)) {
|
|
|
|
int len = dw_mci_push_part_bytes(host, buf, cnt);
|
2015-08-03 15:07:21 +08:00
|
|
|
|
2011-06-24 20:57:56 +08:00
|
|
|
buf += len;
|
|
|
|
cnt -= len;
|
2013-03-12 18:53:13 +08:00
|
|
|
if (host->part_buf_count == 4) {
|
2015-03-25 19:27:52 +08:00
|
|
|
mci_fifo_writel(host->fifo_reg, host->part_buf32);
|
2011-06-24 20:57:56 +08:00
|
|
|
host->part_buf_count = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
|
|
|
|
if (unlikely((unsigned long)buf & 0x3)) {
|
|
|
|
while (cnt >= 4) {
|
|
|
|
u32 aligned_buf[32];
|
|
|
|
int len = min(cnt & -4, (int)sizeof(aligned_buf));
|
|
|
|
int items = len >> 2;
|
|
|
|
int i;
|
|
|
|
/* memcpy from input buffer into aligned buffer */
|
|
|
|
memcpy(aligned_buf, buf, len);
|
|
|
|
buf += len;
|
|
|
|
cnt -= len;
|
|
|
|
/* push data from aligned buffer into fifo */
|
|
|
|
for (i = 0; i < items; ++i)
|
2015-03-25 19:27:52 +08:00
|
|
|
mci_fifo_writel(host->fifo_reg, aligned_buf[i]);
|
2011-06-24 20:57:56 +08:00
|
|
|
}
|
|
|
|
} else
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
u32 *pdata = buf;
|
2015-08-03 15:07:21 +08:00
|
|
|
|
2011-06-24 20:57:56 +08:00
|
|
|
for (; cnt >= 4; cnt -= 4)
|
2015-03-25 19:27:52 +08:00
|
|
|
mci_fifo_writel(host->fifo_reg, *pdata++);
|
2011-06-24 20:57:56 +08:00
|
|
|
buf = pdata;
|
|
|
|
}
|
|
|
|
/* put anything remaining in the part_buf */
|
|
|
|
if (cnt) {
|
|
|
|
dw_mci_set_part_bytes(host, buf, cnt);
|
2013-03-12 18:53:13 +08:00
|
|
|
/* Push data if we have reached the expected data length */
|
|
|
|
if ((data->bytes_xfered + init_cnt) ==
|
|
|
|
(data->blksz * data->blocks))
|
2015-03-25 19:27:52 +08:00
|
|
|
mci_fifo_writel(host->fifo_reg, host->part_buf32);
|
2011-01-02 14:11:59 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
|
|
|
|
{
|
2011-06-24 20:57:56 +08:00
|
|
|
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
|
|
|
|
if (unlikely((unsigned long)buf & 0x3)) {
|
|
|
|
while (cnt >= 4) {
|
|
|
|
/* pull data from fifo into aligned buffer */
|
|
|
|
u32 aligned_buf[32];
|
|
|
|
int len = min(cnt & -4, (int)sizeof(aligned_buf));
|
|
|
|
int items = len >> 2;
|
|
|
|
int i;
|
2015-08-03 15:07:21 +08:00
|
|
|
|
2011-06-24 20:57:56 +08:00
|
|
|
for (i = 0; i < items; ++i)
|
2015-03-25 19:27:52 +08:00
|
|
|
aligned_buf[i] = mci_fifo_readl(host->fifo_reg);
|
2011-06-24 20:57:56 +08:00
|
|
|
/* memcpy from aligned buffer into output buffer */
|
|
|
|
memcpy(buf, aligned_buf, len);
|
|
|
|
buf += len;
|
|
|
|
cnt -= len;
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
u32 *pdata = buf;
|
2015-08-03 15:07:21 +08:00
|
|
|
|
2011-06-24 20:57:56 +08:00
|
|
|
for (; cnt >= 4; cnt -= 4)
|
2015-03-25 19:27:52 +08:00
|
|
|
*pdata++ = mci_fifo_readl(host->fifo_reg);
|
2011-06-24 20:57:56 +08:00
|
|
|
buf = pdata;
|
|
|
|
}
|
|
|
|
if (cnt) {
|
2015-03-25 19:27:52 +08:00
|
|
|
host->part_buf32 = mci_fifo_readl(host->fifo_reg);
|
2011-06-24 20:57:56 +08:00
|
|
|
dw_mci_pull_final_bytes(host, buf, cnt);
|
2011-01-02 14:11:59 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
|
|
|
|
{
|
2013-03-12 18:53:13 +08:00
|
|
|
struct mmc_data *data = host->data;
|
|
|
|
int init_cnt = cnt;
|
|
|
|
|
2011-06-24 20:57:56 +08:00
|
|
|
/* try and push anything in the part_buf */
|
|
|
|
if (unlikely(host->part_buf_count)) {
|
|
|
|
int len = dw_mci_push_part_bytes(host, buf, cnt);
|
2015-08-03 15:07:21 +08:00
|
|
|
|
2011-06-24 20:57:56 +08:00
|
|
|
buf += len;
|
|
|
|
cnt -= len;
|
2013-03-25 15:28:22 +08:00
|
|
|
|
2013-03-12 18:53:13 +08:00
|
|
|
if (host->part_buf_count == 8) {
|
2015-03-25 19:27:52 +08:00
|
|
|
mci_fifo_writeq(host->fifo_reg, host->part_buf);
|
2011-06-24 20:57:56 +08:00
|
|
|
host->part_buf_count = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
|
|
|
|
if (unlikely((unsigned long)buf & 0x7)) {
|
|
|
|
while (cnt >= 8) {
|
|
|
|
u64 aligned_buf[16];
|
|
|
|
int len = min(cnt & -8, (int)sizeof(aligned_buf));
|
|
|
|
int items = len >> 3;
|
|
|
|
int i;
|
|
|
|
/* memcpy from input buffer into aligned buffer */
|
|
|
|
memcpy(aligned_buf, buf, len);
|
|
|
|
buf += len;
|
|
|
|
cnt -= len;
|
|
|
|
/* push data from aligned buffer into fifo */
|
|
|
|
for (i = 0; i < items; ++i)
|
2015-03-25 19:27:52 +08:00
|
|
|
mci_fifo_writeq(host->fifo_reg, aligned_buf[i]);
|
2011-06-24 20:57:56 +08:00
|
|
|
}
|
|
|
|
} else
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
u64 *pdata = buf;
|
2015-08-03 15:07:21 +08:00
|
|
|
|
2011-06-24 20:57:56 +08:00
|
|
|
for (; cnt >= 8; cnt -= 8)
|
2015-03-25 19:27:52 +08:00
|
|
|
mci_fifo_writeq(host->fifo_reg, *pdata++);
|
2011-06-24 20:57:56 +08:00
|
|
|
buf = pdata;
|
|
|
|
}
|
|
|
|
/* put anything remaining in the part_buf */
|
|
|
|
if (cnt) {
|
|
|
|
dw_mci_set_part_bytes(host, buf, cnt);
|
2013-03-12 18:53:13 +08:00
|
|
|
/* Push data if we have reached the expected data length */
|
|
|
|
if ((data->bytes_xfered + init_cnt) ==
|
|
|
|
(data->blksz * data->blocks))
|
2015-03-25 19:27:52 +08:00
|
|
|
mci_fifo_writeq(host->fifo_reg, host->part_buf);
|
2011-01-02 14:11:59 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
|
|
|
|
{
|
2011-06-24 20:57:56 +08:00
|
|
|
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
|
|
|
|
if (unlikely((unsigned long)buf & 0x7)) {
|
|
|
|
while (cnt >= 8) {
|
|
|
|
/* pull data from fifo into aligned buffer */
|
|
|
|
u64 aligned_buf[16];
|
|
|
|
int len = min(cnt & -8, (int)sizeof(aligned_buf));
|
|
|
|
int items = len >> 3;
|
|
|
|
int i;
|
2015-08-03 15:07:21 +08:00
|
|
|
|
2011-06-24 20:57:56 +08:00
|
|
|
for (i = 0; i < items; ++i)
|
2015-03-25 19:27:52 +08:00
|
|
|
aligned_buf[i] = mci_fifo_readq(host->fifo_reg);
|
|
|
|
|
2011-06-24 20:57:56 +08:00
|
|
|
/* memcpy from aligned buffer into output buffer */
|
|
|
|
memcpy(buf, aligned_buf, len);
|
|
|
|
buf += len;
|
|
|
|
cnt -= len;
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
u64 *pdata = buf;
|
2015-08-03 15:07:21 +08:00
|
|
|
|
2011-06-24 20:57:56 +08:00
|
|
|
for (; cnt >= 8; cnt -= 8)
|
2015-03-25 19:27:52 +08:00
|
|
|
*pdata++ = mci_fifo_readq(host->fifo_reg);
|
2011-06-24 20:57:56 +08:00
|
|
|
buf = pdata;
|
|
|
|
}
|
|
|
|
if (cnt) {
|
2015-03-25 19:27:52 +08:00
|
|
|
host->part_buf = mci_fifo_readq(host->fifo_reg);
|
2011-06-24 20:57:56 +08:00
|
|
|
dw_mci_pull_final_bytes(host, buf, cnt);
|
|
|
|
}
|
|
|
|
}
|
2011-01-02 14:11:59 +08:00
|
|
|
|
2011-06-24 20:57:56 +08:00
|
|
|
static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
|
|
|
|
{
|
|
|
|
int len;
|
2011-01-02 14:11:59 +08:00
|
|
|
|
2011-06-24 20:57:56 +08:00
|
|
|
/* get remaining partial bytes */
|
|
|
|
len = dw_mci_pull_part_bytes(host, buf, cnt);
|
|
|
|
if (unlikely(len == cnt))
|
|
|
|
return;
|
|
|
|
buf += len;
|
|
|
|
cnt -= len;
|
|
|
|
|
|
|
|
/* get the rest of the data */
|
|
|
|
host->pull_data(host, buf, cnt);
|
2011-01-02 14:11:59 +08:00
|
|
|
}
|
|
|
|
|
2013-01-22 15:46:30 +08:00
|
|
|
static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
|
2011-01-02 14:11:59 +08:00
|
|
|
{
|
2012-02-09 13:32:43 +08:00
|
|
|
struct sg_mapping_iter *sg_miter = &host->sg_miter;
|
|
|
|
void *buf;
|
|
|
|
unsigned int offset;
|
2011-01-02 14:11:59 +08:00
|
|
|
struct mmc_data *data = host->data;
|
|
|
|
int shift = host->data_shift;
|
|
|
|
u32 status;
|
2013-03-23 00:50:05 +08:00
|
|
|
unsigned int len;
|
2012-02-09 13:32:43 +08:00
|
|
|
unsigned int remain, fcnt;
|
2011-01-02 14:11:59 +08:00
|
|
|
|
|
|
|
do {
|
2012-02-09 13:32:43 +08:00
|
|
|
if (!sg_miter_next(sg_miter))
|
|
|
|
goto done;
|
|
|
|
|
2013-02-28 09:02:57 +08:00
|
|
|
host->sg = sg_miter->piter.sg;
|
2012-02-09 13:32:43 +08:00
|
|
|
buf = sg_miter->addr;
|
|
|
|
remain = sg_miter->length;
|
|
|
|
offset = 0;
|
|
|
|
|
|
|
|
do {
|
|
|
|
fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
|
|
|
|
<< shift) + host->part_buf_count;
|
|
|
|
len = min(remain, fcnt);
|
|
|
|
if (!len)
|
|
|
|
break;
|
2011-06-24 20:57:56 +08:00
|
|
|
dw_mci_pull_data(host, (void *)(buf + offset), len);
|
2013-03-23 00:50:05 +08:00
|
|
|
data->bytes_xfered += len;
|
2011-01-02 14:11:59 +08:00
|
|
|
offset += len;
|
2012-02-09 13:32:43 +08:00
|
|
|
remain -= len;
|
|
|
|
} while (remain);
|
2011-01-02 14:11:59 +08:00
|
|
|
|
2012-08-01 08:30:46 +08:00
|
|
|
sg_miter->consumed = offset;
|
2011-01-02 14:11:59 +08:00
|
|
|
status = mci_readl(host, MINTSTS);
|
|
|
|
mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
|
2013-01-22 15:46:30 +08:00
|
|
|
/* if the RXDR is ready read again */
|
|
|
|
} while ((status & SDMMC_INT_RXDR) ||
|
|
|
|
(dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
|
2012-02-09 13:32:43 +08:00
|
|
|
|
|
|
|
if (!remain) {
|
|
|
|
if (!sg_miter_next(sg_miter))
|
|
|
|
goto done;
|
|
|
|
sg_miter->consumed = 0;
|
|
|
|
}
|
|
|
|
sg_miter_stop(sg_miter);
|
2011-01-02 14:11:59 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
done:
|
2012-02-09 13:32:43 +08:00
|
|
|
sg_miter_stop(sg_miter);
|
|
|
|
host->sg = NULL;
|
2015-08-03 15:07:21 +08:00
|
|
|
smp_wmb(); /* drain writebuffer */
|
2011-01-02 14:11:59 +08:00
|
|
|
set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void dw_mci_write_data_pio(struct dw_mci *host)
|
|
|
|
{
|
2012-02-09 13:32:43 +08:00
|
|
|
struct sg_mapping_iter *sg_miter = &host->sg_miter;
|
|
|
|
void *buf;
|
|
|
|
unsigned int offset;
|
2011-01-02 14:11:59 +08:00
|
|
|
struct mmc_data *data = host->data;
|
|
|
|
int shift = host->data_shift;
|
|
|
|
u32 status;
|
2013-03-23 00:50:05 +08:00
|
|
|
unsigned int len;
|
2012-02-09 13:32:43 +08:00
|
|
|
unsigned int fifo_depth = host->fifo_depth;
|
|
|
|
unsigned int remain, fcnt;
|
2011-01-02 14:11:59 +08:00
|
|
|
|
|
|
|
do {
|
2012-02-09 13:32:43 +08:00
|
|
|
if (!sg_miter_next(sg_miter))
|
|
|
|
goto done;
|
|
|
|
|
2013-02-28 09:02:57 +08:00
|
|
|
host->sg = sg_miter->piter.sg;
|
2012-02-09 13:32:43 +08:00
|
|
|
buf = sg_miter->addr;
|
|
|
|
remain = sg_miter->length;
|
|
|
|
offset = 0;
|
|
|
|
|
|
|
|
do {
|
|
|
|
fcnt = ((fifo_depth -
|
|
|
|
SDMMC_GET_FCNT(mci_readl(host, STATUS)))
|
|
|
|
<< shift) - host->part_buf_count;
|
|
|
|
len = min(remain, fcnt);
|
|
|
|
if (!len)
|
|
|
|
break;
|
2011-01-02 14:11:59 +08:00
|
|
|
host->push_data(host, (void *)(buf + offset), len);
|
2013-03-23 00:50:05 +08:00
|
|
|
data->bytes_xfered += len;
|
2011-01-02 14:11:59 +08:00
|
|
|
offset += len;
|
2012-02-09 13:32:43 +08:00
|
|
|
remain -= len;
|
|
|
|
} while (remain);
|
2011-01-02 14:11:59 +08:00
|
|
|
|
2012-08-01 08:30:46 +08:00
|
|
|
sg_miter->consumed = offset;
|
2011-01-02 14:11:59 +08:00
|
|
|
status = mci_readl(host, MINTSTS);
|
|
|
|
mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
|
|
|
|
} while (status & SDMMC_INT_TXDR); /* if TXDR write again */
|
2012-02-09 13:32:43 +08:00
|
|
|
|
|
|
|
if (!remain) {
|
|
|
|
if (!sg_miter_next(sg_miter))
|
|
|
|
goto done;
|
|
|
|
sg_miter->consumed = 0;
|
|
|
|
}
|
|
|
|
sg_miter_stop(sg_miter);
|
2011-01-02 14:11:59 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
done:
|
2012-02-09 13:32:43 +08:00
|
|
|
sg_miter_stop(sg_miter);
|
|
|
|
host->sg = NULL;
|
2015-08-03 15:07:21 +08:00
|
|
|
smp_wmb(); /* drain writebuffer */
|
2011-01-02 14:11:59 +08:00
|
|
|
set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
|
|
|
|
{
|
2017-10-13 04:11:14 +08:00
|
|
|
del_timer(&host->cto_timer);
|
|
|
|
|
2011-01-02 14:11:59 +08:00
|
|
|
if (!host->cmd_status)
|
|
|
|
host->cmd_status = status;
|
|
|
|
|
2015-08-03 15:07:21 +08:00
|
|
|
smp_wmb(); /* drain writebuffer */
|
2011-01-02 14:11:59 +08:00
|
|
|
|
|
|
|
set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
|
|
|
|
tasklet_schedule(&host->tasklet);
|
|
|
|
}
|
|
|
|
|
2014-10-15 00:33:09 +08:00
|
|
|
static void dw_mci_handle_cd(struct dw_mci *host)
|
|
|
|
{
|
2017-06-05 12:41:32 +08:00
|
|
|
struct dw_mci_slot *slot = host->slot;
|
2014-10-15 00:33:09 +08:00
|
|
|
|
2017-06-05 12:41:31 +08:00
|
|
|
if (slot->mmc->ops->card_event)
|
|
|
|
slot->mmc->ops->card_event(slot->mmc);
|
|
|
|
mmc_detect_change(slot->mmc,
|
|
|
|
msecs_to_jiffies(host->pdata->detect_delay_ms));
|
2014-10-15 00:33:09 +08:00
|
|
|
}
|
|
|
|
|
2011-01-02 14:11:59 +08:00
|
|
|
static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
|
|
|
|
{
|
|
|
|
struct dw_mci *host = dev_id;
|
2012-08-01 08:30:30 +08:00
|
|
|
u32 pending;
|
2017-06-05 12:41:32 +08:00
|
|
|
struct dw_mci_slot *slot = host->slot;
|
2017-10-13 04:11:16 +08:00
|
|
|
unsigned long irqflags;
|
2011-01-02 14:11:59 +08:00
|
|
|
|
2013-03-12 18:53:11 +08:00
|
|
|
pending = mci_readl(host, MINTSTS); /* read-only mask reg */
|
|
|
|
|
2013-07-10 04:04:40 +08:00
|
|
|
if (pending) {
|
2014-08-22 21:47:51 +08:00
|
|
|
/* Check volt switch first, since it can look like an error */
|
|
|
|
if ((host->state == STATE_SENDING_CMD11) &&
|
|
|
|
(pending & SDMMC_INT_VOLT_SWITCH)) {
|
|
|
|
mci_writel(host, RINTSTS, SDMMC_INT_VOLT_SWITCH);
|
|
|
|
pending &= ~SDMMC_INT_VOLT_SWITCH;
|
2015-04-04 02:13:07 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Hold the lock; we know cmd11_timer can't be kicked
|
|
|
|
* off after the lock is released, so safe to delete.
|
|
|
|
*/
|
|
|
|
spin_lock_irqsave(&host->irq_lock, irqflags);
|
2014-08-22 21:47:51 +08:00
|
|
|
dw_mci_cmd_interrupt(host, pending);
|
2015-04-04 02:13:07 +08:00
|
|
|
spin_unlock_irqrestore(&host->irq_lock, irqflags);
|
|
|
|
|
|
|
|
del_timer(&host->cmd11_timer);
|
2014-08-22 21:47:51 +08:00
|
|
|
}
|
|
|
|
|
2011-01-02 14:11:59 +08:00
|
|
|
if (pending & DW_MCI_CMD_ERROR_FLAGS) {
|
2017-10-13 04:11:16 +08:00
|
|
|
spin_lock_irqsave(&host->irq_lock, irqflags);
|
|
|
|
|
2017-07-11 17:38:37 +08:00
|
|
|
del_timer(&host->cto_timer);
|
2011-01-02 14:11:59 +08:00
|
|
|
mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
|
2012-08-01 08:30:30 +08:00
|
|
|
host->cmd_status = pending;
|
2015-08-03 15:07:21 +08:00
|
|
|
smp_wmb(); /* drain writebuffer */
|
2011-01-02 14:11:59 +08:00
|
|
|
set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
|
2017-10-13 04:11:16 +08:00
|
|
|
|
|
|
|
spin_unlock_irqrestore(&host->irq_lock, irqflags);
|
2011-01-02 14:11:59 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (pending & DW_MCI_DATA_ERROR_FLAGS) {
|
|
|
|
/* if there is an error report DATA_ERROR */
|
|
|
|
mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
|
2012-08-01 08:30:30 +08:00
|
|
|
host->data_status = pending;
|
2015-08-03 15:07:21 +08:00
|
|
|
smp_wmb(); /* drain writebuffer */
|
2011-01-02 14:11:59 +08:00
|
|
|
set_bit(EVENT_DATA_ERROR, &host->pending_events);
|
2012-08-01 08:30:40 +08:00
|
|
|
tasklet_schedule(&host->tasklet);
|
2011-01-02 14:11:59 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (pending & SDMMC_INT_DATA_OVER) {
|
2017-10-13 04:11:18 +08:00
|
|
|
spin_lock_irqsave(&host->irq_lock, irqflags);
|
|
|
|
|
2016-06-21 13:35:37 +08:00
|
|
|
del_timer(&host->dto_timer);
|
2015-08-11 00:27:18 +08:00
|
|
|
|
2011-01-02 14:11:59 +08:00
|
|
|
mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
|
|
|
|
if (!host->data_status)
|
2012-08-01 08:30:30 +08:00
|
|
|
host->data_status = pending;
|
2015-08-03 15:07:21 +08:00
|
|
|
smp_wmb(); /* drain writebuffer */
|
2011-01-02 14:11:59 +08:00
|
|
|
if (host->dir_status == DW_MCI_RECV_STATUS) {
|
|
|
|
if (host->sg != NULL)
|
2013-01-22 15:46:30 +08:00
|
|
|
dw_mci_read_data_pio(host, true);
|
2011-01-02 14:11:59 +08:00
|
|
|
}
|
|
|
|
set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
|
|
|
|
tasklet_schedule(&host->tasklet);
|
2017-10-13 04:11:18 +08:00
|
|
|
|
|
|
|
spin_unlock_irqrestore(&host->irq_lock, irqflags);
|
2011-01-02 14:11:59 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (pending & SDMMC_INT_RXDR) {
|
|
|
|
mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
|
2011-06-24 20:54:06 +08:00
|
|
|
if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
|
2013-01-22 15:46:30 +08:00
|
|
|
dw_mci_read_data_pio(host, false);
|
2011-01-02 14:11:59 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (pending & SDMMC_INT_TXDR) {
|
|
|
|
mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
|
2011-06-24 20:54:06 +08:00
|
|
|
if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
|
2011-01-02 14:11:59 +08:00
|
|
|
dw_mci_write_data_pio(host);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pending & SDMMC_INT_CMD_DONE) {
|
2017-10-13 04:11:16 +08:00
|
|
|
spin_lock_irqsave(&host->irq_lock, irqflags);
|
|
|
|
|
2011-01-02 14:11:59 +08:00
|
|
|
mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
|
2012-08-01 08:30:30 +08:00
|
|
|
dw_mci_cmd_interrupt(host, pending);
|
2017-10-13 04:11:16 +08:00
|
|
|
|
|
|
|
spin_unlock_irqrestore(&host->irq_lock, irqflags);
|
2011-01-02 14:11:59 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (pending & SDMMC_INT_CD) {
|
|
|
|
mci_writel(host, RINTSTS, SDMMC_INT_CD);
|
2014-10-15 00:33:09 +08:00
|
|
|
dw_mci_handle_cd(host);
|
2011-01-02 14:11:59 +08:00
|
|
|
}
|
|
|
|
|
2017-06-05 12:41:31 +08:00
|
|
|
if (pending & SDMMC_INT_SDIO(slot->sdio_id)) {
|
|
|
|
mci_writel(host, RINTSTS,
|
|
|
|
SDMMC_INT_SDIO(slot->sdio_id));
|
|
|
|
__dw_mci_enable_sdio_irq(slot, 0);
|
|
|
|
sdio_signal_irq(slot->mmc);
|
2011-08-29 15:41:46 +08:00
|
|
|
}
|
|
|
|
|
2013-03-12 18:53:11 +08:00
|
|
|
}
|
2011-01-02 14:11:59 +08:00
|
|
|
|
2015-09-16 14:41:23 +08:00
|
|
|
if (host->use_dma != TRANS_MODE_IDMAC)
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
|
|
|
|
/* Handle IDMA interrupts */
|
2014-10-20 15:12:33 +08:00
|
|
|
if (host->dma_64bit_address == 1) {
|
|
|
|
pending = mci_readl(host, IDSTS64);
|
|
|
|
if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
|
|
|
|
mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_TI |
|
|
|
|
SDMMC_IDMAC_INT_RI);
|
|
|
|
mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_NI);
|
2016-06-24 15:39:52 +08:00
|
|
|
if (!test_bit(EVENT_DATA_ERROR, &host->pending_events))
|
|
|
|
host->dma_ops->complete((void *)host);
|
2014-10-20 15:12:33 +08:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
pending = mci_readl(host, IDSTS);
|
|
|
|
if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
|
|
|
|
mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI |
|
|
|
|
SDMMC_IDMAC_INT_RI);
|
|
|
|
mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
|
2016-06-24 15:39:52 +08:00
|
|
|
if (!test_bit(EVENT_DATA_ERROR, &host->pending_events))
|
|
|
|
host->dma_ops->complete((void *)host);
|
2014-10-20 15:12:33 +08:00
|
|
|
}
|
2011-01-02 14:11:59 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
2018-02-24 14:17:22 +08:00
|
|
|
static int dw_mci_init_slot_caps(struct dw_mci_slot *slot)
|
|
|
|
{
|
|
|
|
struct dw_mci *host = slot->host;
|
|
|
|
const struct dw_mci_drv_data *drv_data = host->drv_data;
|
|
|
|
struct mmc_host *mmc = slot->mmc;
|
|
|
|
int ctrl_id;
|
|
|
|
|
|
|
|
if (host->pdata->caps)
|
|
|
|
mmc->caps = host->pdata->caps;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Support MMC_CAP_ERASE by default.
|
|
|
|
* It needs to use trim/discard/erase commands.
|
|
|
|
*/
|
|
|
|
mmc->caps |= MMC_CAP_ERASE;
|
|
|
|
|
|
|
|
if (host->pdata->pm_caps)
|
|
|
|
mmc->pm_caps = host->pdata->pm_caps;
|
|
|
|
|
|
|
|
if (host->dev->of_node) {
|
|
|
|
ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
|
|
|
|
if (ctrl_id < 0)
|
|
|
|
ctrl_id = 0;
|
|
|
|
} else {
|
|
|
|
ctrl_id = to_platform_device(host->dev)->id;
|
|
|
|
}
|
2018-02-24 14:17:23 +08:00
|
|
|
|
|
|
|
if (drv_data && drv_data->caps) {
|
|
|
|
if (ctrl_id >= drv_data->num_caps) {
|
|
|
|
dev_err(host->dev, "invalid controller id %d\n",
|
|
|
|
ctrl_id);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2018-02-24 14:17:22 +08:00
|
|
|
mmc->caps |= drv_data->caps[ctrl_id];
|
2018-02-24 14:17:23 +08:00
|
|
|
}
|
2018-02-24 14:17:22 +08:00
|
|
|
|
|
|
|
if (host->pdata->caps2)
|
|
|
|
mmc->caps2 = host->pdata->caps2;
|
|
|
|
|
2018-02-23 14:41:33 +08:00
|
|
|
mmc->f_min = DW_MCI_FREQ_MIN;
|
|
|
|
if (!mmc->f_max)
|
|
|
|
mmc->f_max = DW_MCI_FREQ_MAX;
|
|
|
|
|
2018-02-24 14:17:22 +08:00
|
|
|
/* Process SDIO IRQs through the sdio_irq_work. */
|
|
|
|
if (mmc->caps & MMC_CAP_SDIO_IRQ)
|
|
|
|
mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-06-05 12:41:33 +08:00
|
|
|
static int dw_mci_init_slot(struct dw_mci *host)
|
2011-01-02 14:11:59 +08:00
|
|
|
{
|
|
|
|
struct mmc_host *mmc;
|
|
|
|
struct dw_mci_slot *slot;
|
2018-02-24 14:17:22 +08:00
|
|
|
int ret;
|
2011-01-02 14:11:59 +08:00
|
|
|
|
2012-09-18 02:16:35 +08:00
|
|
|
mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
|
2011-01-02 14:11:59 +08:00
|
|
|
if (!mmc)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
slot = mmc_priv(mmc);
|
2017-06-05 12:41:33 +08:00
|
|
|
slot->id = 0;
|
|
|
|
slot->sdio_id = host->sdio_id0 + slot->id;
|
2011-01-02 14:11:59 +08:00
|
|
|
slot->mmc = mmc;
|
|
|
|
slot->host = host;
|
2017-06-05 12:41:32 +08:00
|
|
|
host->slot = slot;
|
2011-01-02 14:11:59 +08:00
|
|
|
|
|
|
|
mmc->ops = &dw_mci_ops;
|
|
|
|
|
2014-08-22 21:47:50 +08:00
|
|
|
/*if there are external regulators, get them*/
|
|
|
|
ret = mmc_regulator_get_supply(mmc);
|
2017-10-15 03:17:11 +08:00
|
|
|
if (ret)
|
2014-08-26 02:19:04 +08:00
|
|
|
goto err_host_allocated;
|
2014-08-22 21:47:50 +08:00
|
|
|
|
|
|
|
if (!mmc->ocr_avail)
|
|
|
|
mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
|
2011-01-02 14:11:59 +08:00
|
|
|
|
2014-08-26 02:19:04 +08:00
|
|
|
ret = mmc_of_parse(mmc);
|
|
|
|
if (ret)
|
|
|
|
goto err_host_allocated;
|
2011-01-02 14:11:59 +08:00
|
|
|
|
2018-02-24 14:17:22 +08:00
|
|
|
ret = dw_mci_init_slot_caps(slot);
|
|
|
|
if (ret)
|
|
|
|
goto err_host_allocated;
|
2017-04-18 19:29:20 +08:00
|
|
|
|
2015-08-06 15:23:25 +08:00
|
|
|
/* Useful defaults if platform data is unset. */
|
2015-09-16 14:41:23 +08:00
|
|
|
if (host->use_dma == TRANS_MODE_IDMAC) {
|
2015-08-06 15:23:25 +08:00
|
|
|
mmc->max_segs = host->ring_size;
|
2016-05-04 10:24:14 +08:00
|
|
|
mmc->max_blk_size = 65535;
|
2015-08-06 15:23:25 +08:00
|
|
|
mmc->max_seg_size = 0x1000;
|
|
|
|
mmc->max_req_size = mmc->max_seg_size * host->ring_size;
|
|
|
|
mmc->max_blk_count = mmc->max_req_size / 512;
|
2015-09-16 14:41:23 +08:00
|
|
|
} else if (host->use_dma == TRANS_MODE_EDMAC) {
|
|
|
|
mmc->max_segs = 64;
|
2016-05-04 10:24:14 +08:00
|
|
|
mmc->max_blk_size = 65535;
|
2015-09-16 14:41:23 +08:00
|
|
|
mmc->max_blk_count = 65535;
|
|
|
|
mmc->max_req_size =
|
|
|
|
mmc->max_blk_size * mmc->max_blk_count;
|
|
|
|
mmc->max_seg_size = mmc->max_req_size;
|
2011-01-02 14:11:59 +08:00
|
|
|
} else {
|
2015-09-16 14:41:23 +08:00
|
|
|
/* TRANS_MODE_PIO */
|
2015-08-06 15:23:25 +08:00
|
|
|
mmc->max_segs = 64;
|
2016-05-04 10:24:14 +08:00
|
|
|
mmc->max_blk_size = 65535; /* BLKSIZ is 16 bits */
|
2015-08-06 15:23:25 +08:00
|
|
|
mmc->max_blk_count = 512;
|
|
|
|
mmc->max_req_size = mmc->max_blk_size *
|
|
|
|
mmc->max_blk_count;
|
|
|
|
mmc->max_seg_size = mmc->max_req_size;
|
2012-02-05 06:00:27 +08:00
|
|
|
}
|
2011-01-02 14:11:59 +08:00
|
|
|
|
2016-05-27 14:36:40 +08:00
|
|
|
dw_mci_get_cd(mmc);
|
2014-03-03 10:36:48 +08:00
|
|
|
|
2013-02-15 22:45:45 +08:00
|
|
|
ret = mmc_add_host(mmc);
|
|
|
|
if (ret)
|
2014-08-26 02:19:04 +08:00
|
|
|
goto err_host_allocated;
|
2011-01-02 14:11:59 +08:00
|
|
|
|
|
|
|
#if defined(CONFIG_DEBUG_FS)
|
|
|
|
dw_mci_init_debugfs(slot);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
return 0;
|
2012-09-18 02:16:42 +08:00
|
|
|
|
2014-08-26 02:19:04 +08:00
|
|
|
err_host_allocated:
|
2012-09-18 02:16:42 +08:00
|
|
|
mmc_free_host(mmc);
|
2014-08-22 21:47:50 +08:00
|
|
|
return ret;
|
2011-01-02 14:11:59 +08:00
|
|
|
}
|
|
|
|
|
2017-06-05 12:41:33 +08:00
|
|
|
static void dw_mci_cleanup_slot(struct dw_mci_slot *slot)
|
2011-01-02 14:11:59 +08:00
|
|
|
{
|
|
|
|
/* Debugfs stuff is cleaned up by mmc core */
|
|
|
|
mmc_remove_host(slot->mmc);
|
2017-06-05 12:41:32 +08:00
|
|
|
slot->host->slot = NULL;
|
2011-01-02 14:11:59 +08:00
|
|
|
mmc_free_host(slot->mmc);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void dw_mci_init_dma(struct dw_mci *host)
|
|
|
|
{
|
2014-10-20 15:12:33 +08:00
|
|
|
int addr_config;
|
2015-09-16 14:41:23 +08:00
|
|
|
struct device *dev = host->dev;
|
2014-10-20 15:12:33 +08:00
|
|
|
|
2015-09-16 14:41:23 +08:00
|
|
|
/*
|
|
|
|
* Check tansfer mode from HCON[17:16]
|
|
|
|
* Clear the ambiguous description of dw_mmc databook:
|
|
|
|
* 2b'00: No DMA Interface -> Actually means using Internal DMA block
|
|
|
|
* 2b'01: DesignWare DMA Interface -> Synopsys DW-DMA block
|
|
|
|
* 2b'10: Generic DMA Interface -> non-Synopsys generic DMA block
|
|
|
|
* 2b'11: Non DW DMA Interface -> pio only
|
|
|
|
* Compared to DesignWare DMA Interface, Generic DMA Interface has a
|
|
|
|
* simpler request/acknowledge handshake mechanism and both of them
|
|
|
|
* are regarded as external dma master for dw_mmc.
|
|
|
|
*/
|
|
|
|
host->use_dma = SDMMC_GET_TRANS_MODE(mci_readl(host, HCON));
|
|
|
|
if (host->use_dma == DMA_INTERFACE_IDMA) {
|
|
|
|
host->use_dma = TRANS_MODE_IDMAC;
|
|
|
|
} else if (host->use_dma == DMA_INTERFACE_DWDMA ||
|
|
|
|
host->use_dma == DMA_INTERFACE_GDMA) {
|
|
|
|
host->use_dma = TRANS_MODE_EDMAC;
|
|
|
|
} else {
|
2011-01-02 14:11:59 +08:00
|
|
|
goto no_dma;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Determine which DMA interface to use */
|
2015-09-16 14:41:23 +08:00
|
|
|
if (host->use_dma == TRANS_MODE_IDMAC) {
|
|
|
|
/*
|
|
|
|
* Check ADDR_CONFIG bit in HCON to find
|
|
|
|
* IDMAC address bus width
|
|
|
|
*/
|
2015-09-16 14:41:37 +08:00
|
|
|
addr_config = SDMMC_GET_ADDR_CONFIG(mci_readl(host, HCON));
|
2015-09-16 14:41:23 +08:00
|
|
|
|
|
|
|
if (addr_config == 1) {
|
|
|
|
/* host supports IDMAC in 64-bit address mode */
|
|
|
|
host->dma_64bit_address = 1;
|
|
|
|
dev_info(host->dev,
|
|
|
|
"IDMAC supports 64-bit address mode.\n");
|
|
|
|
if (!dma_set_mask(host->dev, DMA_BIT_MASK(64)))
|
|
|
|
dma_set_coherent_mask(host->dev,
|
|
|
|
DMA_BIT_MASK(64));
|
|
|
|
} else {
|
|
|
|
/* host supports IDMAC in 32-bit address mode */
|
|
|
|
host->dma_64bit_address = 0;
|
|
|
|
dev_info(host->dev,
|
|
|
|
"IDMAC supports 32-bit address mode.\n");
|
|
|
|
}
|
2011-01-02 14:11:59 +08:00
|
|
|
|
2015-09-16 14:41:23 +08:00
|
|
|
/* Alloc memory for sg translation */
|
2016-09-02 12:14:39 +08:00
|
|
|
host->sg_cpu = dmam_alloc_coherent(host->dev,
|
|
|
|
DESC_RING_BUF_SZ,
|
2015-09-16 14:41:23 +08:00
|
|
|
&host->sg_dma, GFP_KERNEL);
|
|
|
|
if (!host->sg_cpu) {
|
|
|
|
dev_err(host->dev,
|
|
|
|
"%s: could not alloc DMA memory\n",
|
|
|
|
__func__);
|
|
|
|
goto no_dma;
|
|
|
|
}
|
|
|
|
|
|
|
|
host->dma_ops = &dw_mci_idmac_ops;
|
|
|
|
dev_info(host->dev, "Using internal DMA controller.\n");
|
|
|
|
} else {
|
|
|
|
/* TRANS_MODE_EDMAC: check dma bindings again */
|
2017-05-27 05:53:20 +08:00
|
|
|
if ((device_property_read_string_array(dev, "dma-names",
|
|
|
|
NULL, 0) < 0) ||
|
|
|
|
!device_property_present(dev, "dmas")) {
|
2015-09-16 14:41:23 +08:00
|
|
|
goto no_dma;
|
|
|
|
}
|
|
|
|
host->dma_ops = &dw_mci_edmac_ops;
|
|
|
|
dev_info(host->dev, "Using external DMA controller.\n");
|
|
|
|
}
|
2011-01-02 14:11:59 +08:00
|
|
|
|
2012-04-18 14:42:31 +08:00
|
|
|
if (host->dma_ops->init && host->dma_ops->start &&
|
|
|
|
host->dma_ops->stop && host->dma_ops->cleanup) {
|
2011-01-02 14:11:59 +08:00
|
|
|
if (host->dma_ops->init(host)) {
|
2015-08-03 15:07:21 +08:00
|
|
|
dev_err(host->dev, "%s: Unable to initialize DMA Controller.\n",
|
|
|
|
__func__);
|
2011-01-02 14:11:59 +08:00
|
|
|
goto no_dma;
|
|
|
|
}
|
|
|
|
} else {
|
2012-09-18 02:16:35 +08:00
|
|
|
dev_err(host->dev, "DMA initialization not found.\n");
|
2011-01-02 14:11:59 +08:00
|
|
|
goto no_dma;
|
|
|
|
}
|
|
|
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
no_dma:
|
2012-09-18 02:16:35 +08:00
|
|
|
dev_info(host->dev, "Using PIO mode.\n");
|
2015-09-16 14:41:23 +08:00
|
|
|
host->use_dma = TRANS_MODE_PIO;
|
2011-01-02 14:11:59 +08:00
|
|
|
}
|
|
|
|
|
2017-10-31 05:45:00 +08:00
|
|
|
static void dw_mci_cmd11_timer(struct timer_list *t)
|
2015-03-10 07:18:21 +08:00
|
|
|
{
|
2017-10-31 05:45:00 +08:00
|
|
|
struct dw_mci *host = from_timer(host, t, cmd11_timer);
|
2015-03-10 07:18:21 +08:00
|
|
|
|
2015-04-04 02:13:06 +08:00
|
|
|
if (host->state != STATE_SENDING_CMD11) {
|
|
|
|
dev_warn(host->dev, "Unexpected CMD11 timeout\n");
|
|
|
|
return;
|
|
|
|
}
|
2015-03-10 07:18:21 +08:00
|
|
|
|
|
|
|
host->cmd_status = SDMMC_INT_RTO;
|
|
|
|
set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
|
|
|
|
tasklet_schedule(&host->tasklet);
|
|
|
|
}
|
|
|
|
|
2017-10-31 05:45:00 +08:00
|
|
|
static void dw_mci_cto_timer(struct timer_list *t)
|
2017-07-11 17:38:37 +08:00
|
|
|
{
|
2017-10-31 05:45:00 +08:00
|
|
|
struct dw_mci *host = from_timer(host, t, cto_timer);
|
2017-10-13 04:11:16 +08:00
|
|
|
unsigned long irqflags;
|
|
|
|
u32 pending;
|
2017-07-11 17:38:37 +08:00
|
|
|
|
2017-10-13 04:11:16 +08:00
|
|
|
spin_lock_irqsave(&host->irq_lock, irqflags);
|
2017-07-11 17:38:37 +08:00
|
|
|
|
2017-10-13 04:11:16 +08:00
|
|
|
/*
|
|
|
|
* If somehow we have very bad interrupt latency it's remotely possible
|
|
|
|
* that the timer could fire while the interrupt is still pending or
|
|
|
|
* while the interrupt is midway through running. Let's be paranoid
|
|
|
|
* and detect those two cases. Note that this is paranoia is somewhat
|
|
|
|
* justified because in this function we don't actually cancel the
|
|
|
|
* pending command in the controller--we just assume it will never come.
|
|
|
|
*/
|
|
|
|
pending = mci_readl(host, MINTSTS); /* read-only mask reg */
|
|
|
|
if (pending & (DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_CMD_DONE)) {
|
|
|
|
/* The interrupt should fire; no need to act but we can warn */
|
|
|
|
dev_warn(host->dev, "Unexpected interrupt latency\n");
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
if (test_bit(EVENT_CMD_COMPLETE, &host->pending_events)) {
|
|
|
|
/* Presumably interrupt handler couldn't delete the timer */
|
|
|
|
dev_warn(host->dev, "CTO timeout when already completed\n");
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Continued paranoia to make sure we're in the state we expect.
|
|
|
|
* This paranoia isn't really justified but it seems good to be safe.
|
|
|
|
*/
|
2017-07-11 17:38:37 +08:00
|
|
|
switch (host->state) {
|
|
|
|
case STATE_SENDING_CMD11:
|
|
|
|
case STATE_SENDING_CMD:
|
|
|
|
case STATE_SENDING_STOP:
|
|
|
|
/*
|
|
|
|
* If CMD_DONE interrupt does NOT come in sending command
|
|
|
|
* state, we should notify the driver to terminate current
|
|
|
|
* transfer and report a command timeout to the core.
|
|
|
|
*/
|
|
|
|
host->cmd_status = SDMMC_INT_RTO;
|
|
|
|
set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
|
|
|
|
tasklet_schedule(&host->tasklet);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
dev_warn(host->dev, "Unexpected command timeout, state %d\n",
|
|
|
|
host->state);
|
|
|
|
break;
|
|
|
|
}
|
2017-10-13 04:11:16 +08:00
|
|
|
|
|
|
|
exit:
|
|
|
|
spin_unlock_irqrestore(&host->irq_lock, irqflags);
|
2017-07-11 17:38:37 +08:00
|
|
|
}
|
|
|
|
|
2017-10-31 05:45:00 +08:00
|
|
|
static void dw_mci_dto_timer(struct timer_list *t)
|
2015-08-11 00:27:18 +08:00
|
|
|
{
|
2017-10-31 05:45:00 +08:00
|
|
|
struct dw_mci *host = from_timer(host, t, dto_timer);
|
2017-10-13 04:11:18 +08:00
|
|
|
unsigned long irqflags;
|
|
|
|
u32 pending;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&host->irq_lock, irqflags);
|
2015-08-11 00:27:18 +08:00
|
|
|
|
2017-10-13 04:11:18 +08:00
|
|
|
/*
|
|
|
|
* The DTO timer is much longer than the CTO timer, so it's even less
|
|
|
|
* likely that we'll these cases, but it pays to be paranoid.
|
|
|
|
*/
|
|
|
|
pending = mci_readl(host, MINTSTS); /* read-only mask reg */
|
|
|
|
if (pending & SDMMC_INT_DATA_OVER) {
|
|
|
|
/* The interrupt should fire; no need to act but we can warn */
|
|
|
|
dev_warn(host->dev, "Unexpected data interrupt latency\n");
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
if (test_bit(EVENT_DATA_COMPLETE, &host->pending_events)) {
|
|
|
|
/* Presumably interrupt handler couldn't delete the timer */
|
|
|
|
dev_warn(host->dev, "DTO timeout when already completed\n");
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Continued paranoia to make sure we're in the state we expect.
|
|
|
|
* This paranoia isn't really justified but it seems good to be safe.
|
|
|
|
*/
|
2015-08-11 00:27:18 +08:00
|
|
|
switch (host->state) {
|
|
|
|
case STATE_SENDING_DATA:
|
|
|
|
case STATE_DATA_BUSY:
|
|
|
|
/*
|
|
|
|
* If DTO interrupt does NOT come in sending data state,
|
|
|
|
* we should notify the driver to terminate current transfer
|
|
|
|
* and report a data timeout to the core.
|
|
|
|
*/
|
|
|
|
host->data_status = SDMMC_INT_DRTO;
|
|
|
|
set_bit(EVENT_DATA_ERROR, &host->pending_events);
|
|
|
|
set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
|
|
|
|
tasklet_schedule(&host->tasklet);
|
|
|
|
break;
|
|
|
|
default:
|
2017-10-13 04:11:18 +08:00
|
|
|
dev_warn(host->dev, "Unexpected data timeout, state %d\n",
|
|
|
|
host->state);
|
2015-08-11 00:27:18 +08:00
|
|
|
break;
|
|
|
|
}
|
2017-10-13 04:11:18 +08:00
|
|
|
|
|
|
|
exit:
|
|
|
|
spin_unlock_irqrestore(&host->irq_lock, irqflags);
|
2015-08-11 00:27:18 +08:00
|
|
|
}
|
|
|
|
|
2012-09-18 02:16:40 +08:00
|
|
|
#ifdef CONFIG_OF
|
|
|
|
static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
|
|
|
|
{
|
|
|
|
struct dw_mci_board *pdata;
|
|
|
|
struct device *dev = host->dev;
|
2012-11-08 22:26:11 +08:00
|
|
|
const struct dw_mci_drv_data *drv_data = host->drv_data;
|
2016-01-21 14:52:52 +08:00
|
|
|
int ret;
|
2013-06-08 01:28:30 +08:00
|
|
|
u32 clock_frequency;
|
2012-09-18 02:16:40 +08:00
|
|
|
|
|
|
|
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
|
2014-12-23 20:07:33 +08:00
|
|
|
if (!pdata)
|
2012-09-18 02:16:40 +08:00
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
2016-08-12 16:51:26 +08:00
|
|
|
/* find reset controller when exist */
|
2017-07-19 23:25:42 +08:00
|
|
|
pdata->rstc = devm_reset_control_get_optional_exclusive(dev, "reset");
|
2016-08-12 16:51:26 +08:00
|
|
|
if (IS_ERR(pdata->rstc)) {
|
|
|
|
if (PTR_ERR(pdata->rstc) == -EPROBE_DEFER)
|
|
|
|
return ERR_PTR(-EPROBE_DEFER);
|
|
|
|
}
|
|
|
|
|
2017-05-27 05:53:20 +08:00
|
|
|
if (device_property_read_u32(dev, "fifo-depth", &pdata->fifo_depth))
|
2015-08-03 15:07:21 +08:00
|
|
|
dev_info(dev,
|
|
|
|
"fifo-depth property not found, using value of FIFOTH register as default\n");
|
2012-09-18 02:16:40 +08:00
|
|
|
|
2017-05-27 05:53:20 +08:00
|
|
|
device_property_read_u32(dev, "card-detect-delay",
|
|
|
|
&pdata->detect_delay_ms);
|
2012-09-18 02:16:40 +08:00
|
|
|
|
2017-05-27 05:53:20 +08:00
|
|
|
device_property_read_u32(dev, "data-addr", &host->data_addr_override);
|
2017-01-11 14:35:35 +08:00
|
|
|
|
2017-05-27 05:53:20 +08:00
|
|
|
if (device_property_present(dev, "fifo-watermark-aligned"))
|
2017-01-11 14:37:26 +08:00
|
|
|
host->wm_aligned = true;
|
|
|
|
|
2017-05-27 05:53:20 +08:00
|
|
|
if (!device_property_read_u32(dev, "clock-frequency", &clock_frequency))
|
2013-06-08 01:28:30 +08:00
|
|
|
pdata->bus_hz = clock_frequency;
|
|
|
|
|
2012-10-16 16:43:08 +08:00
|
|
|
if (drv_data && drv_data->parse_dt) {
|
|
|
|
ret = drv_data->parse_dt(host);
|
2012-09-18 02:16:42 +08:00
|
|
|
if (ret)
|
|
|
|
return ERR_PTR(ret);
|
|
|
|
}
|
|
|
|
|
2012-09-18 02:16:40 +08:00
|
|
|
return pdata;
|
|
|
|
}
|
|
|
|
|
|
|
|
#else /* CONFIG_OF */
|
|
|
|
static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
|
|
|
|
{
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_OF */
|
|
|
|
|
2015-02-26 02:11:51 +08:00
|
|
|
static void dw_mci_enable_cd(struct dw_mci *host)
|
|
|
|
{
|
|
|
|
unsigned long irqflags;
|
|
|
|
u32 temp;
|
|
|
|
|
2016-01-21 14:52:52 +08:00
|
|
|
/*
|
|
|
|
* No need for CD if all slots have a non-error GPIO
|
|
|
|
* as well as broken card detection is found.
|
|
|
|
*/
|
2017-06-05 12:41:35 +08:00
|
|
|
if (host->slot->mmc->caps & MMC_CAP_NEEDS_POLL)
|
2015-02-26 02:11:51 +08:00
|
|
|
return;
|
|
|
|
|
2017-06-05 12:41:35 +08:00
|
|
|
if (mmc_gpio_get_cd(host->slot->mmc) < 0) {
|
2017-06-05 12:41:31 +08:00
|
|
|
spin_lock_irqsave(&host->irq_lock, irqflags);
|
|
|
|
temp = mci_readl(host, INTMASK);
|
|
|
|
temp |= SDMMC_INT_CD;
|
|
|
|
mci_writel(host, INTMASK, temp);
|
|
|
|
spin_unlock_irqrestore(&host->irq_lock, irqflags);
|
|
|
|
}
|
2015-02-26 02:11:51 +08:00
|
|
|
}
|
|
|
|
|
2012-01-13 18:34:57 +08:00
|
|
|
int dw_mci_probe(struct dw_mci *host)
|
2011-01-02 14:11:59 +08:00
|
|
|
{
|
2012-11-08 22:26:11 +08:00
|
|
|
const struct dw_mci_drv_data *drv_data = host->drv_data;
|
2012-01-13 18:34:57 +08:00
|
|
|
int width, i, ret = 0;
|
2011-01-02 14:11:59 +08:00
|
|
|
u32 fifo_size;
|
|
|
|
|
2012-09-18 02:16:40 +08:00
|
|
|
if (!host->pdata) {
|
|
|
|
host->pdata = dw_mci_parse_dt(host);
|
2016-08-12 16:51:26 +08:00
|
|
|
if (PTR_ERR(host->pdata) == -EPROBE_DEFER) {
|
|
|
|
return -EPROBE_DEFER;
|
|
|
|
} else if (IS_ERR(host->pdata)) {
|
2012-09-18 02:16:40 +08:00
|
|
|
dev_err(host->dev, "platform data not available\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2011-01-02 14:11:59 +08:00
|
|
|
}
|
|
|
|
|
2012-11-28 18:26:03 +08:00
|
|
|
host->biu_clk = devm_clk_get(host->dev, "biu");
|
2012-09-18 02:16:38 +08:00
|
|
|
if (IS_ERR(host->biu_clk)) {
|
|
|
|
dev_dbg(host->dev, "biu clock not available\n");
|
|
|
|
} else {
|
|
|
|
ret = clk_prepare_enable(host->biu_clk);
|
|
|
|
if (ret) {
|
|
|
|
dev_err(host->dev, "failed to enable biu clock\n");
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-11-28 18:26:03 +08:00
|
|
|
host->ciu_clk = devm_clk_get(host->dev, "ciu");
|
2012-09-18 02:16:38 +08:00
|
|
|
if (IS_ERR(host->ciu_clk)) {
|
|
|
|
dev_dbg(host->dev, "ciu clock not available\n");
|
2013-06-08 01:28:30 +08:00
|
|
|
host->bus_hz = host->pdata->bus_hz;
|
2012-09-18 02:16:38 +08:00
|
|
|
} else {
|
|
|
|
ret = clk_prepare_enable(host->ciu_clk);
|
|
|
|
if (ret) {
|
|
|
|
dev_err(host->dev, "failed to enable ciu clock\n");
|
|
|
|
goto err_clk_biu;
|
|
|
|
}
|
|
|
|
|
2013-06-08 01:28:30 +08:00
|
|
|
if (host->pdata->bus_hz) {
|
|
|
|
ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz);
|
|
|
|
if (ret)
|
|
|
|
dev_warn(host->dev,
|
2014-03-03 10:36:42 +08:00
|
|
|
"Unable to set bus rate to %uHz\n",
|
2013-06-08 01:28:30 +08:00
|
|
|
host->pdata->bus_hz);
|
|
|
|
}
|
2012-09-18 02:16:38 +08:00
|
|
|
host->bus_hz = clk_get_rate(host->ciu_clk);
|
2013-06-08 01:28:30 +08:00
|
|
|
}
|
2012-09-18 02:16:38 +08:00
|
|
|
|
2014-03-03 10:36:42 +08:00
|
|
|
if (!host->bus_hz) {
|
|
|
|
dev_err(host->dev,
|
|
|
|
"Platform data must supply bus speed\n");
|
|
|
|
ret = -ENODEV;
|
|
|
|
goto err_clk_ciu;
|
|
|
|
}
|
|
|
|
|
2017-08-11 16:06:23 +08:00
|
|
|
if (!IS_ERR(host->pdata->rstc)) {
|
|
|
|
reset_control_assert(host->pdata->rstc);
|
|
|
|
usleep_range(10, 50);
|
|
|
|
reset_control_deassert(host->pdata->rstc);
|
|
|
|
}
|
|
|
|
|
2013-08-30 23:12:19 +08:00
|
|
|
if (drv_data && drv_data->init) {
|
|
|
|
ret = drv_data->init(host);
|
|
|
|
if (ret) {
|
|
|
|
dev_err(host->dev,
|
|
|
|
"implementation specific init failed\n");
|
|
|
|
goto err_clk_ciu;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-31 05:45:00 +08:00
|
|
|
timer_setup(&host->cmd11_timer, dw_mci_cmd11_timer, 0);
|
|
|
|
timer_setup(&host->cto_timer, dw_mci_cto_timer, 0);
|
|
|
|
timer_setup(&host->dto_timer, dw_mci_dto_timer, 0);
|
2015-08-11 00:27:18 +08:00
|
|
|
|
2011-01-02 14:11:59 +08:00
|
|
|
spin_lock_init(&host->lock);
|
2014-12-03 07:42:47 +08:00
|
|
|
spin_lock_init(&host->irq_lock);
|
2011-01-02 14:11:59 +08:00
|
|
|
INIT_LIST_HEAD(&host->queue);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get the host data width - this assumes that HCON has been set with
|
|
|
|
* the correct values.
|
|
|
|
*/
|
2015-09-16 14:41:37 +08:00
|
|
|
i = SDMMC_GET_HDATA_WIDTH(mci_readl(host, HCON));
|
2011-01-02 14:11:59 +08:00
|
|
|
if (!i) {
|
|
|
|
host->push_data = dw_mci_push_data16;
|
|
|
|
host->pull_data = dw_mci_pull_data16;
|
|
|
|
width = 16;
|
|
|
|
host->data_shift = 1;
|
|
|
|
} else if (i == 2) {
|
|
|
|
host->push_data = dw_mci_push_data64;
|
|
|
|
host->pull_data = dw_mci_pull_data64;
|
|
|
|
width = 64;
|
|
|
|
host->data_shift = 3;
|
|
|
|
} else {
|
|
|
|
/* Check for a reserved value, and warn if it is */
|
|
|
|
WARN((i != 1),
|
|
|
|
"HCON reports a reserved host data width!\n"
|
|
|
|
"Defaulting to 32-bit access.\n");
|
|
|
|
host->push_data = dw_mci_push_data32;
|
|
|
|
host->pull_data = dw_mci_pull_data32;
|
|
|
|
width = 32;
|
|
|
|
host->data_shift = 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Reset all blocks */
|
2016-01-22 15:43:12 +08:00
|
|
|
if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) {
|
|
|
|
ret = -ENODEV;
|
|
|
|
goto err_clk_ciu;
|
|
|
|
}
|
2012-05-22 12:01:03 +08:00
|
|
|
|
|
|
|
host->dma_ops = host->pdata->dma_ops;
|
|
|
|
dw_mci_init_dma(host);
|
2011-01-02 14:11:59 +08:00
|
|
|
|
|
|
|
/* Clear the interrupts for the host controller */
|
|
|
|
mci_writel(host, RINTSTS, 0xFFFFFFFF);
|
|
|
|
mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
|
|
|
|
|
|
|
|
/* Put in max timeout */
|
|
|
|
mci_writel(host, TMOUT, 0xFFFFFFFF);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* FIFO threshold settings RxMark = fifo_size / 2 - 1,
|
|
|
|
* Tx Mark = fifo_size / 2 DMA Size = 8
|
|
|
|
*/
|
2011-06-24 20:57:18 +08:00
|
|
|
if (!host->pdata->fifo_depth) {
|
|
|
|
/*
|
|
|
|
* Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
|
|
|
|
* have been overwritten by the bootloader, just like we're
|
|
|
|
* about to do, so if you know the value for your hardware, you
|
|
|
|
* should put it in the platform data.
|
|
|
|
*/
|
|
|
|
fifo_size = mci_readl(host, FIFOTH);
|
2012-01-11 17:28:21 +08:00
|
|
|
fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
|
2011-06-24 20:57:18 +08:00
|
|
|
} else {
|
|
|
|
fifo_size = host->pdata->fifo_depth;
|
|
|
|
}
|
|
|
|
host->fifo_depth = fifo_size;
|
2013-08-30 23:13:42 +08:00
|
|
|
host->fifoth_val =
|
|
|
|
SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
|
2011-03-17 19:32:33 +08:00
|
|
|
mci_writel(host, FIFOTH, host->fifoth_val);
|
2011-01-02 14:11:59 +08:00
|
|
|
|
|
|
|
/* disable clock to CIU */
|
|
|
|
mci_writel(host, CLKENA, 0);
|
|
|
|
mci_writel(host, CLKSRC, 0);
|
|
|
|
|
2013-03-12 18:43:54 +08:00
|
|
|
/*
|
|
|
|
* In 2.40a spec, Data offset is changed.
|
|
|
|
* Need to check the version-id and set data-offset for DATA register.
|
|
|
|
*/
|
|
|
|
host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
|
|
|
|
dev_info(host->dev, "Version ID is %04x\n", host->verid);
|
|
|
|
|
2017-01-11 14:35:35 +08:00
|
|
|
if (host->data_addr_override)
|
|
|
|
host->fifo_reg = host->regs + host->data_addr_override;
|
|
|
|
else if (host->verid < DW_MMC_240A)
|
2015-03-25 19:27:52 +08:00
|
|
|
host->fifo_reg = host->regs + DATA_OFFSET;
|
2013-03-12 18:43:54 +08:00
|
|
|
else
|
2015-03-25 19:27:52 +08:00
|
|
|
host->fifo_reg = host->regs + DATA_240A_OFFSET;
|
2013-03-12 18:43:54 +08:00
|
|
|
|
2011-01-02 14:11:59 +08:00
|
|
|
tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
|
2012-11-28 18:26:03 +08:00
|
|
|
ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
|
|
|
|
host->irq_flags, "dw-mci", host);
|
2011-01-02 14:11:59 +08:00
|
|
|
if (ret)
|
2014-10-15 00:33:09 +08:00
|
|
|
goto err_dmaunmap;
|
2011-01-02 14:11:59 +08:00
|
|
|
|
2012-10-08 16:59:51 +08:00
|
|
|
/*
|
2015-02-26 02:11:51 +08:00
|
|
|
* Enable interrupts for command done, data over, data empty,
|
2012-10-08 16:59:51 +08:00
|
|
|
* receive ready and error such as transmit, receive timeout, crc error
|
|
|
|
*/
|
|
|
|
mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
|
|
|
|
SDMMC_INT_TXDR | SDMMC_INT_RXDR |
|
2015-02-26 02:11:51 +08:00
|
|
|
DW_MCI_ERROR_FLAGS);
|
2015-08-03 15:07:21 +08:00
|
|
|
/* Enable mci interrupt */
|
|
|
|
mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
|
2012-10-08 16:59:51 +08:00
|
|
|
|
2015-08-03 15:07:21 +08:00
|
|
|
dev_info(host->dev,
|
|
|
|
"DW MMC controller at irq %d,%d bit host data width,%u deep fifo\n",
|
2012-10-08 16:59:51 +08:00
|
|
|
host->irq, width, fifo_size);
|
|
|
|
|
2011-01-02 14:11:59 +08:00
|
|
|
/* We need at least one slot to succeed */
|
2017-06-05 12:41:33 +08:00
|
|
|
ret = dw_mci_init_slot(host);
|
2017-06-05 12:41:31 +08:00
|
|
|
if (ret) {
|
|
|
|
dev_dbg(host->dev, "slot %d init failed\n", i);
|
2014-10-15 00:33:09 +08:00
|
|
|
goto err_dmaunmap;
|
2011-01-02 14:11:59 +08:00
|
|
|
}
|
|
|
|
|
2015-03-12 06:15:14 +08:00
|
|
|
/* Now that slots are all setup, we can enable card detect */
|
|
|
|
dw_mci_enable_cd(host);
|
|
|
|
|
2011-01-02 14:11:59 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_dmaunmap:
|
|
|
|
if (host->use_dma && host->dma_ops->exit)
|
|
|
|
host->dma_ops->exit(host);
|
2012-09-18 02:16:38 +08:00
|
|
|
|
2016-08-12 16:51:26 +08:00
|
|
|
if (!IS_ERR(host->pdata->rstc))
|
|
|
|
reset_control_assert(host->pdata->rstc);
|
|
|
|
|
2012-09-18 02:16:38 +08:00
|
|
|
err_clk_ciu:
|
2016-07-15 09:54:08 +08:00
|
|
|
clk_disable_unprepare(host->ciu_clk);
|
2012-11-28 18:26:03 +08:00
|
|
|
|
2012-09-18 02:16:38 +08:00
|
|
|
err_clk_biu:
|
2016-07-15 09:54:08 +08:00
|
|
|
clk_disable_unprepare(host->biu_clk);
|
2012-11-28 18:26:03 +08:00
|
|
|
|
2011-01-02 14:11:59 +08:00
|
|
|
return ret;
|
|
|
|
}
|
2012-01-13 18:34:57 +08:00
|
|
|
EXPORT_SYMBOL(dw_mci_probe);
|
2011-01-02 14:11:59 +08:00
|
|
|
|
2012-01-13 18:34:57 +08:00
|
|
|
void dw_mci_remove(struct dw_mci *host)
|
2011-01-02 14:11:59 +08:00
|
|
|
{
|
2017-06-05 12:41:33 +08:00
|
|
|
dev_dbg(host->dev, "remove slot\n");
|
2017-06-05 12:41:32 +08:00
|
|
|
if (host->slot)
|
2017-06-05 12:41:33 +08:00
|
|
|
dw_mci_cleanup_slot(host->slot);
|
2011-01-02 14:11:59 +08:00
|
|
|
|
2015-05-28 20:21:06 +08:00
|
|
|
mci_writel(host, RINTSTS, 0xFFFFFFFF);
|
|
|
|
mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
|
|
|
|
|
2011-01-02 14:11:59 +08:00
|
|
|
/* disable clock to CIU */
|
|
|
|
mci_writel(host, CLKENA, 0);
|
|
|
|
mci_writel(host, CLKSRC, 0);
|
|
|
|
|
|
|
|
if (host->use_dma && host->dma_ops->exit)
|
|
|
|
host->dma_ops->exit(host);
|
|
|
|
|
2016-08-12 16:51:26 +08:00
|
|
|
if (!IS_ERR(host->pdata->rstc))
|
|
|
|
reset_control_assert(host->pdata->rstc);
|
|
|
|
|
2016-07-15 09:54:08 +08:00
|
|
|
clk_disable_unprepare(host->ciu_clk);
|
|
|
|
clk_disable_unprepare(host->biu_clk);
|
2011-01-02 14:11:59 +08:00
|
|
|
}
|
2012-01-13 18:34:57 +08:00
|
|
|
EXPORT_SYMBOL(dw_mci_remove);
|
|
|
|
|
|
|
|
|
2011-01-02 14:11:59 +08:00
|
|
|
|
2016-10-12 10:50:35 +08:00
|
|
|
#ifdef CONFIG_PM
|
2016-10-12 10:56:55 +08:00
|
|
|
int dw_mci_runtime_suspend(struct device *dev)
|
2011-01-02 14:11:59 +08:00
|
|
|
{
|
2016-10-12 10:56:55 +08:00
|
|
|
struct dw_mci *host = dev_get_drvdata(dev);
|
|
|
|
|
2015-09-16 14:41:23 +08:00
|
|
|
if (host->use_dma && host->dma_ops->exit)
|
|
|
|
host->dma_ops->exit(host);
|
|
|
|
|
2016-10-12 10:56:55 +08:00
|
|
|
clk_disable_unprepare(host->ciu_clk);
|
|
|
|
|
2017-06-05 12:41:34 +08:00
|
|
|
if (host->slot &&
|
|
|
|
(mmc_can_gpio_cd(host->slot->mmc) ||
|
|
|
|
!mmc_card_is_removable(host->slot->mmc)))
|
2016-10-12 10:56:55 +08:00
|
|
|
clk_disable_unprepare(host->biu_clk);
|
|
|
|
|
2011-01-02 14:11:59 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2016-10-12 10:56:55 +08:00
|
|
|
EXPORT_SYMBOL(dw_mci_runtime_suspend);
|
2011-01-02 14:11:59 +08:00
|
|
|
|
2016-10-12 10:56:55 +08:00
|
|
|
int dw_mci_runtime_resume(struct device *dev)
|
2011-01-02 14:11:59 +08:00
|
|
|
{
|
2017-06-05 12:41:32 +08:00
|
|
|
int ret = 0;
|
2016-10-12 10:56:55 +08:00
|
|
|
struct dw_mci *host = dev_get_drvdata(dev);
|
2011-01-02 14:11:59 +08:00
|
|
|
|
2017-06-05 12:41:34 +08:00
|
|
|
if (host->slot &&
|
|
|
|
(mmc_can_gpio_cd(host->slot->mmc) ||
|
|
|
|
!mmc_card_is_removable(host->slot->mmc))) {
|
2016-10-12 10:56:55 +08:00
|
|
|
ret = clk_prepare_enable(host->biu_clk);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2011-03-17 19:32:33 +08:00
|
|
|
}
|
|
|
|
|
2016-10-12 10:56:55 +08:00
|
|
|
ret = clk_prepare_enable(host->ciu_clk);
|
|
|
|
if (ret)
|
2016-11-25 11:47:15 +08:00
|
|
|
goto err;
|
|
|
|
|
|
|
|
if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) {
|
|
|
|
clk_disable_unprepare(host->ciu_clk);
|
|
|
|
ret = -ENODEV;
|
|
|
|
goto err;
|
|
|
|
}
|
2016-10-12 10:56:55 +08:00
|
|
|
|
2012-06-15 01:31:55 +08:00
|
|
|
if (host->use_dma && host->dma_ops->init)
|
2012-05-22 12:01:03 +08:00
|
|
|
host->dma_ops->init(host);
|
|
|
|
|
2013-08-30 23:13:42 +08:00
|
|
|
/*
|
|
|
|
* Restore the initial value at FIFOTH register
|
|
|
|
* And Invalidate the prev_blksz with zero
|
|
|
|
*/
|
2016-10-12 10:56:55 +08:00
|
|
|
mci_writel(host, FIFOTH, host->fifoth_val);
|
|
|
|
host->prev_blksz = 0;
|
2011-03-17 19:32:33 +08:00
|
|
|
|
2013-08-30 23:11:49 +08:00
|
|
|
/* Put in max timeout */
|
|
|
|
mci_writel(host, TMOUT, 0xFFFFFFFF);
|
|
|
|
|
2011-03-17 19:32:33 +08:00
|
|
|
mci_writel(host, RINTSTS, 0xFFFFFFFF);
|
|
|
|
mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
|
|
|
|
SDMMC_INT_TXDR | SDMMC_INT_RXDR |
|
2015-02-26 02:11:51 +08:00
|
|
|
DW_MCI_ERROR_FLAGS);
|
2011-03-17 19:32:33 +08:00
|
|
|
mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
|
|
|
|
|
2015-08-03 15:07:21 +08:00
|
|
|
|
2017-06-05 12:41:35 +08:00
|
|
|
if (host->slot->mmc->pm_flags & MMC_PM_KEEP_POWER)
|
|
|
|
dw_mci_set_ios(host->slot->mmc, &host->slot->mmc->ios);
|
2017-01-17 09:22:56 +08:00
|
|
|
|
2017-06-05 12:41:31 +08:00
|
|
|
/* Force setup bus to guarantee available clock output */
|
2017-06-05 12:41:35 +08:00
|
|
|
dw_mci_setup_bus(host->slot, true);
|
2015-02-26 02:11:51 +08:00
|
|
|
|
|
|
|
/* Now that slots are all setup, we can enable card detect */
|
|
|
|
dw_mci_enable_cd(host);
|
|
|
|
|
2016-11-25 11:47:15 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
err:
|
2017-06-05 12:41:34 +08:00
|
|
|
if (host->slot &&
|
|
|
|
(mmc_can_gpio_cd(host->slot->mmc) ||
|
|
|
|
!mmc_card_is_removable(host->slot->mmc)))
|
2016-11-25 11:47:15 +08:00
|
|
|
clk_disable_unprepare(host->biu_clk);
|
|
|
|
|
2016-10-12 10:56:55 +08:00
|
|
|
return ret;
|
2016-10-12 10:50:35 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(dw_mci_runtime_resume);
|
|
|
|
#endif /* CONFIG_PM */
|
2011-12-08 18:23:03 +08:00
|
|
|
|
2011-01-02 14:11:59 +08:00
|
|
|
static int __init dw_mci_init(void)
|
|
|
|
{
|
2013-04-04 13:55:11 +08:00
|
|
|
pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
|
2012-01-13 18:34:57 +08:00
|
|
|
return 0;
|
2011-01-02 14:11:59 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void __exit dw_mci_exit(void)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
module_init(dw_mci_init);
|
|
|
|
module_exit(dw_mci_exit);
|
|
|
|
|
|
|
|
MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
|
|
|
|
MODULE_AUTHOR("NXP Semiconductor VietNam");
|
|
|
|
MODULE_AUTHOR("Imagination Technologies Ltd");
|
|
|
|
MODULE_LICENSE("GPL v2");
|