2019-06-04 16:11:33 +08:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
2007-02-28 22:33:10 +08:00
|
|
|
* linux/include/linux/mmc/core.h
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2007-02-28 22:33:10 +08:00
|
|
|
#ifndef LINUX_MMC_CORE_H
|
|
|
|
#define LINUX_MMC_CORE_H
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2012-01-31 00:46:54 +08:00
|
|
|
#include <linux/completion.h>
|
2017-01-13 21:14:06 +08:00
|
|
|
#include <linux/types.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
struct mmc_data;
|
|
|
|
struct mmc_request;
|
|
|
|
|
2016-11-04 18:05:19 +08:00
|
|
|
enum mmc_blk_status {
|
|
|
|
MMC_BLK_SUCCESS = 0,
|
|
|
|
MMC_BLK_PARTIAL,
|
|
|
|
MMC_BLK_CMD_ERR,
|
|
|
|
MMC_BLK_RETRY,
|
|
|
|
MMC_BLK_ABORT,
|
|
|
|
MMC_BLK_DATA_ERR,
|
|
|
|
MMC_BLK_ECC_ERR,
|
|
|
|
MMC_BLK_NOMEDIUM,
|
|
|
|
MMC_BLK_NEW_REQUEST,
|
|
|
|
};
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
struct mmc_command {
|
|
|
|
u32 opcode;
|
|
|
|
u32 arg;
|
2013-02-06 16:02:46 +08:00
|
|
|
#define MMC_CMD23_ARG_REL_WR (1 << 31)
|
|
|
|
#define MMC_CMD23_ARG_PACKED ((0 << 31) | (1 << 30))
|
|
|
|
#define MMC_CMD23_ARG_TAG_REQ (1 << 29)
|
2005-04-17 06:20:36 +08:00
|
|
|
u32 resp[4];
|
|
|
|
unsigned int flags; /* expected response type */
|
2006-02-02 20:23:12 +08:00
|
|
|
#define MMC_RSP_PRESENT (1 << 0)
|
|
|
|
#define MMC_RSP_136 (1 << 1) /* 136 bit response */
|
|
|
|
#define MMC_RSP_CRC (1 << 2) /* expect valid crc */
|
|
|
|
#define MMC_RSP_BUSY (1 << 3) /* card may send busy */
|
|
|
|
#define MMC_RSP_OPCODE (1 << 4) /* response contains opcode */
|
2007-08-09 00:09:01 +08:00
|
|
|
|
|
|
|
#define MMC_CMD_MASK (3 << 5) /* non-SPI command type */
|
2006-02-02 20:23:12 +08:00
|
|
|
#define MMC_CMD_AC (0 << 5)
|
|
|
|
#define MMC_CMD_ADTC (1 << 5)
|
|
|
|
#define MMC_CMD_BC (2 << 5)
|
|
|
|
#define MMC_CMD_BCR (3 << 5)
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-08-09 00:09:01 +08:00
|
|
|
#define MMC_RSP_SPI_S1 (1 << 7) /* one status byte */
|
|
|
|
#define MMC_RSP_SPI_S2 (1 << 8) /* second byte */
|
|
|
|
#define MMC_RSP_SPI_B4 (1 << 9) /* four data bytes */
|
|
|
|
#define MMC_RSP_SPI_BUSY (1 << 10) /* card may send busy */
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
2007-08-09 00:09:01 +08:00
|
|
|
* These are the native response types, and correspond to valid bit
|
2005-04-17 06:20:36 +08:00
|
|
|
* patterns of the above flags. One additional valid pattern
|
|
|
|
* is all zeros, which means we don't expect a response.
|
|
|
|
*/
|
2006-02-02 20:23:12 +08:00
|
|
|
#define MMC_RSP_NONE (0)
|
|
|
|
#define MMC_RSP_R1 (MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE)
|
|
|
|
#define MMC_RSP_R1B (MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE|MMC_RSP_BUSY)
|
|
|
|
#define MMC_RSP_R2 (MMC_RSP_PRESENT|MMC_RSP_136|MMC_RSP_CRC)
|
|
|
|
#define MMC_RSP_R3 (MMC_RSP_PRESENT)
|
2007-05-22 02:23:20 +08:00
|
|
|
#define MMC_RSP_R4 (MMC_RSP_PRESENT)
|
2007-05-23 02:25:21 +08:00
|
|
|
#define MMC_RSP_R5 (MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE)
|
2007-01-04 23:04:47 +08:00
|
|
|
#define MMC_RSP_R6 (MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE)
|
2007-01-04 22:57:32 +08:00
|
|
|
#define MMC_RSP_R7 (MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE)
|
2006-02-02 20:23:12 +08:00
|
|
|
|
2016-09-20 04:57:45 +08:00
|
|
|
/* Can be used by core to poll after switch to MMC HS mode */
|
|
|
|
#define MMC_RSP_R1_NO_CRC (MMC_RSP_PRESENT|MMC_RSP_OPCODE)
|
|
|
|
|
2006-02-02 20:23:12 +08:00
|
|
|
#define mmc_resp_type(cmd) ((cmd)->flags & (MMC_RSP_PRESENT|MMC_RSP_136|MMC_RSP_CRC|MMC_RSP_BUSY|MMC_RSP_OPCODE))
|
|
|
|
|
2007-08-09 00:09:01 +08:00
|
|
|
/*
|
|
|
|
* These are the SPI response types for MMC, SD, and SDIO cards.
|
|
|
|
* Commands return R1, with maybe more info. Zero is an error type;
|
|
|
|
* callers must always provide the appropriate MMC_RSP_SPI_Rx flags.
|
|
|
|
*/
|
|
|
|
#define MMC_RSP_SPI_R1 (MMC_RSP_SPI_S1)
|
|
|
|
#define MMC_RSP_SPI_R1B (MMC_RSP_SPI_S1|MMC_RSP_SPI_BUSY)
|
|
|
|
#define MMC_RSP_SPI_R2 (MMC_RSP_SPI_S1|MMC_RSP_SPI_S2)
|
|
|
|
#define MMC_RSP_SPI_R3 (MMC_RSP_SPI_S1|MMC_RSP_SPI_B4)
|
|
|
|
#define MMC_RSP_SPI_R4 (MMC_RSP_SPI_S1|MMC_RSP_SPI_B4)
|
|
|
|
#define MMC_RSP_SPI_R5 (MMC_RSP_SPI_S1|MMC_RSP_SPI_S2)
|
|
|
|
#define MMC_RSP_SPI_R7 (MMC_RSP_SPI_S1|MMC_RSP_SPI_B4)
|
|
|
|
|
|
|
|
#define mmc_spi_resp_type(cmd) ((cmd)->flags & \
|
|
|
|
(MMC_RSP_SPI_S1|MMC_RSP_SPI_BUSY|MMC_RSP_SPI_S2|MMC_RSP_SPI_B4))
|
|
|
|
|
2006-02-02 20:23:12 +08:00
|
|
|
/*
|
|
|
|
* These are the command types.
|
|
|
|
*/
|
2006-02-18 04:23:29 +08:00
|
|
|
#define mmc_cmd_type(cmd) ((cmd)->flags & MMC_CMD_MASK)
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
unsigned int retries; /* max number of retries */
|
2015-03-31 18:41:55 +08:00
|
|
|
int error; /* command error */
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-07-23 04:18:46 +08:00
|
|
|
/*
|
|
|
|
* Standard errno values are used for errors, but some have specific
|
|
|
|
* meaning in the MMC layer:
|
|
|
|
*
|
|
|
|
* ETIMEDOUT Card took too long to respond
|
|
|
|
* EILSEQ Basic format problem with the received or sent data
|
|
|
|
* (e.g. CRC check failed, incorrect opcode in response
|
|
|
|
* or bad end bit)
|
|
|
|
* EINVAL Request cannot be performed because of restrictions
|
|
|
|
* in hardware and/or the driver
|
|
|
|
* ENOMEDIUM Host can determine that the slot is empty and is
|
|
|
|
* actively failing requests
|
|
|
|
*/
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2014-01-08 22:06:08 +08:00
|
|
|
unsigned int busy_timeout; /* busy detect timeout in ms */
|
2005-04-17 06:20:36 +08:00
|
|
|
struct mmc_data *data; /* data segment associated with cmd */
|
2005-10-28 23:28:04 +08:00
|
|
|
struct mmc_request *mrq; /* associated request */
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct mmc_data {
|
|
|
|
unsigned int timeout_ns; /* data timeout (in ns, max 80ms) */
|
|
|
|
unsigned int timeout_clks; /* data timeout (in clocks) */
|
2006-05-20 04:48:03 +08:00
|
|
|
unsigned int blksz; /* data block size */
|
2005-04-17 06:20:36 +08:00
|
|
|
unsigned int blocks; /* number of blocks */
|
2017-08-10 20:08:07 +08:00
|
|
|
unsigned int blk_addr; /* block address */
|
2015-03-31 18:41:55 +08:00
|
|
|
int error; /* data error */
|
2005-04-17 06:20:36 +08:00
|
|
|
unsigned int flags;
|
|
|
|
|
2017-08-10 20:08:07 +08:00
|
|
|
#define MMC_DATA_WRITE BIT(8)
|
|
|
|
#define MMC_DATA_READ BIT(9)
|
|
|
|
/* Extra flags used by CQE */
|
|
|
|
#define MMC_DATA_QBR BIT(10) /* CQE queue barrier*/
|
|
|
|
#define MMC_DATA_PRIO BIT(11) /* CQE high priority */
|
|
|
|
#define MMC_DATA_REL_WR BIT(12) /* Reliable write */
|
|
|
|
#define MMC_DATA_DAT_TAG BIT(13) /* Tag request */
|
|
|
|
#define MMC_DATA_FORCED_PRG BIT(14) /* Forced programming */
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
unsigned int bytes_xfered;
|
|
|
|
|
|
|
|
struct mmc_command *stop; /* stop command */
|
2005-10-28 23:28:04 +08:00
|
|
|
struct mmc_request *mrq; /* associated request */
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
unsigned int sg_len; /* size of scatter list */
|
2015-06-15 19:20:48 +08:00
|
|
|
int sg_count; /* mapped sg entries */
|
2005-04-17 06:20:36 +08:00
|
|
|
struct scatterlist *sg; /* I/O scatter list */
|
2011-07-02 00:55:22 +08:00
|
|
|
s32 host_cookie; /* host private data */
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
|
2013-01-15 03:28:17 +08:00
|
|
|
struct mmc_host;
|
2005-04-17 06:20:36 +08:00
|
|
|
struct mmc_request {
|
2011-05-24 04:06:36 +08:00
|
|
|
struct mmc_command *sbc; /* SET_BLOCK_COUNT for multiblock */
|
2005-04-17 06:20:36 +08:00
|
|
|
struct mmc_command *cmd;
|
|
|
|
struct mmc_data *data;
|
|
|
|
struct mmc_command *stop;
|
|
|
|
|
2011-07-02 00:55:22 +08:00
|
|
|
struct completion completion;
|
mmc: core: Add support for sending commands during data transfer
A host controller driver exposes its capability using caps flag
MMC_CAP_CMD_DURING_TFR. A driver with that capability can accept requests
that are marked mrq->cap_cmd_during_tfr = true. Then the driver informs the
upper layers when the command line is available for further commands by
calling mmc_command_done(). Because of that, the driver will not then
automatically send STOP commands, and it is the responsibility of the upper
layer to send a STOP command if it is required.
For requests submitted through the mmc_wait_for_req() interface, the caller
sets mrq->cap_cmd_during_tfr = true which causes mmc_wait_for_req() in fact
not to wait. The caller can then send commands that do not use the data
lines. Finally the caller can wait for the transfer to complete by calling
mmc_wait_for_req_done() which is now exported.
For requests submitted through the mmc_start_req() interface, the caller
again sets mrq->cap_cmd_during_tfr = true, but mmc_start_req() anyway does
not wait. The caller can then send commands that do not use the data
lines. Finally the caller can wait for the transfer to complete in the
normal way i.e. calling mmc_start_req() again.
Irrespective of how a cap_cmd_during_tfr request is started,
mmc_is_req_done() can be called if the upper layer needs to determine if
the request is done. However the appropriate waiting function (either
mmc_wait_for_req_done() or mmc_start_req()) must still be called.
The implementation consists primarily of a new completion
mrq->cmd_completion which notifies when the command line is available for
further commands. That completion is completed by mmc_command_done().
When there is an ongoing data transfer, calls to mmc_wait_for_req() will
automatically wait on that completion, so the caller does not have to do
anything special.
Note, in the case of errors, the driver may call mmc_request_done() without
calling mmc_command_done() because mmc_request_done() always calls
mmc_command_done().
Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
2016-08-16 18:44:11 +08:00
|
|
|
struct completion cmd_completion;
|
2005-04-17 06:20:36 +08:00
|
|
|
void (*done)(struct mmc_request *);/* completion function */
|
2017-08-25 20:43:46 +08:00
|
|
|
/*
|
|
|
|
* Notify uppers layers (e.g. mmc block driver) that recovery is needed
|
|
|
|
* due to an error associated with the mmc_request. Currently used only
|
|
|
|
* by CQE.
|
|
|
|
*/
|
|
|
|
void (*recovery_notifier)(struct mmc_request *);
|
2013-01-15 03:28:17 +08:00
|
|
|
struct mmc_host *host;
|
mmc: core: Add support for sending commands during data transfer
A host controller driver exposes its capability using caps flag
MMC_CAP_CMD_DURING_TFR. A driver with that capability can accept requests
that are marked mrq->cap_cmd_during_tfr = true. Then the driver informs the
upper layers when the command line is available for further commands by
calling mmc_command_done(). Because of that, the driver will not then
automatically send STOP commands, and it is the responsibility of the upper
layer to send a STOP command if it is required.
For requests submitted through the mmc_wait_for_req() interface, the caller
sets mrq->cap_cmd_during_tfr = true which causes mmc_wait_for_req() in fact
not to wait. The caller can then send commands that do not use the data
lines. Finally the caller can wait for the transfer to complete by calling
mmc_wait_for_req_done() which is now exported.
For requests submitted through the mmc_start_req() interface, the caller
again sets mrq->cap_cmd_during_tfr = true, but mmc_start_req() anyway does
not wait. The caller can then send commands that do not use the data
lines. Finally the caller can wait for the transfer to complete in the
normal way i.e. calling mmc_start_req() again.
Irrespective of how a cap_cmd_during_tfr request is started,
mmc_is_req_done() can be called if the upper layer needs to determine if
the request is done. However the appropriate waiting function (either
mmc_wait_for_req_done() or mmc_start_req()) must still be called.
The implementation consists primarily of a new completion
mrq->cmd_completion which notifies when the command line is available for
further commands. That completion is completed by mmc_command_done().
When there is an ongoing data transfer, calls to mmc_wait_for_req() will
automatically wait on that completion, so the caller does not have to do
anything special.
Note, in the case of errors, the driver may call mmc_request_done() without
calling mmc_command_done() because mmc_request_done() always calls
mmc_command_done().
Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
2016-08-16 18:44:11 +08:00
|
|
|
|
|
|
|
/* Allow other commands during this ongoing data transfer or busy wait */
|
|
|
|
bool cap_cmd_during_tfr;
|
2017-08-10 20:08:07 +08:00
|
|
|
|
|
|
|
int tag;
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct mmc_card;
|
|
|
|
|
2017-01-13 21:14:16 +08:00
|
|
|
void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq);
|
|
|
|
int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd,
|
|
|
|
int retries);
|
|
|
|
|
|
|
|
int mmc_hw_reset(struct mmc_host *host);
|
2018-04-05 19:42:00 +08:00
|
|
|
int mmc_sw_reset(struct mmc_host *host);
|
2017-01-13 21:14:16 +08:00
|
|
|
void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card);
|
2007-06-30 22:21:52 +08:00
|
|
|
|
2011-05-28 04:04:03 +08:00
|
|
|
#endif /* LINUX_MMC_CORE_H */
|