2018-05-03 03:18:27 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0+
|
|
|
|
// Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
|
|
|
|
// Copyright (C) 2008 Juergen Beisert
|
2009-09-23 07:46:02 +08:00
|
|
|
|
|
|
|
#include <linux/clk.h>
|
|
|
|
#include <linux/completion.h>
|
|
|
|
#include <linux/delay.h>
|
2014-09-11 09:18:44 +08:00
|
|
|
#include <linux/dmaengine.h>
|
|
|
|
#include <linux/dma-mapping.h>
|
2009-09-23 07:46:02 +08:00
|
|
|
#include <linux/err.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/io.h>
|
|
|
|
#include <linux/irq.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/module.h>
|
2020-07-27 14:33:54 +08:00
|
|
|
#include <linux/pinctrl/consumer.h>
|
2009-09-23 07:46:02 +08:00
|
|
|
#include <linux/platform_device.h>
|
2020-07-27 14:33:54 +08:00
|
|
|
#include <linux/pm_runtime.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 16:04:11 +08:00
|
|
|
#include <linux/slab.h>
|
2009-09-23 07:46:02 +08:00
|
|
|
#include <linux/spi/spi.h>
|
|
|
|
#include <linux/types.h>
|
2011-07-10 01:16:41 +08:00
|
|
|
#include <linux/of.h>
|
|
|
|
#include <linux/of_device.h>
|
2020-06-26 04:02:52 +08:00
|
|
|
#include <linux/property.h>
|
2009-09-23 07:46:02 +08:00
|
|
|
|
2022-04-15 00:22:37 +08:00
|
|
|
#include <linux/dma/imx-dma.h>
|
2009-09-23 07:46:02 +08:00
|
|
|
|
|
|
|
#define DRIVER_NAME "spi_imx"
|
|
|
|
|
2019-03-05 07:02:36 +08:00
|
|
|
static bool use_dma = true;
|
|
|
|
module_param(use_dma, bool, 0644);
|
|
|
|
MODULE_PARM_DESC(use_dma, "Enable usage of DMA when available (default)");
|
|
|
|
|
2022-05-03 01:54:56 +08:00
|
|
|
/* define polling limits */
|
|
|
|
static unsigned int polling_limit_us = 30;
|
|
|
|
module_param(polling_limit_us, uint, 0664);
|
|
|
|
MODULE_PARM_DESC(polling_limit_us,
|
|
|
|
"time in us to run a transfer in polling mode\n");
|
|
|
|
|
2020-07-27 14:33:54 +08:00
|
|
|
#define MXC_RPM_TIMEOUT 2000 /* 2000ms */
|
|
|
|
|
2009-09-23 07:46:02 +08:00
|
|
|
#define MXC_CSPIRXDATA 0x00
|
|
|
|
#define MXC_CSPITXDATA 0x04
|
|
|
|
#define MXC_CSPICTRL 0x08
|
|
|
|
#define MXC_CSPIINT 0x0c
|
|
|
|
#define MXC_RESET 0x1c
|
|
|
|
|
|
|
|
/* generic defines to abstract from the different register layouts */
|
|
|
|
#define MXC_INT_RR (1 << 0) /* Receive data ready interrupt */
|
|
|
|
#define MXC_INT_TE (1 << 1) /* Transmit FIFO empty interrupt */
|
2017-09-05 13:12:32 +08:00
|
|
|
#define MXC_INT_RDR BIT(4) /* Receive date threshold interrupt */
|
2009-09-23 07:46:02 +08:00
|
|
|
|
2018-11-30 14:47:07 +08:00
|
|
|
/* The maximum bytes that a sdma BD can transfer. */
|
|
|
|
#define MAX_SDMA_BD_BYTES (1 << 15)
|
2017-08-10 12:50:08 +08:00
|
|
|
#define MX51_ECSPI_CTRL_MAX_BURST 512
|
2017-09-05 13:12:32 +08:00
|
|
|
/* The maximum bytes that IMX53_ECSPI can transfer in slave mode.*/
|
|
|
|
#define MX53_MAX_TRANSFER_BYTES 512
|
2009-09-23 07:46:02 +08:00
|
|
|
|
2010-09-09 21:29:01 +08:00
|
|
|
enum spi_imx_devtype {
|
2011-07-10 01:16:39 +08:00
|
|
|
IMX1_CSPI,
|
|
|
|
IMX21_CSPI,
|
|
|
|
IMX27_CSPI,
|
|
|
|
IMX31_CSPI,
|
|
|
|
IMX35_CSPI, /* CSPI on all i.mx except above */
|
2017-06-08 13:16:01 +08:00
|
|
|
IMX51_ECSPI, /* ECSPI on i.mx51 */
|
|
|
|
IMX53_ECSPI, /* ECSPI on i.mx53 and later */
|
2010-09-09 21:29:01 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct spi_imx_data;
|
|
|
|
|
|
|
|
struct spi_imx_devtype_data {
|
2022-05-03 01:54:49 +08:00
|
|
|
void (*intctrl)(struct spi_imx_data *spi_imx, int enable);
|
|
|
|
int (*prepare_message)(struct spi_imx_data *spi_imx, struct spi_message *msg);
|
|
|
|
int (*prepare_transfer)(struct spi_imx_data *spi_imx, struct spi_device *spi);
|
|
|
|
void (*trigger)(struct spi_imx_data *spi_imx);
|
|
|
|
int (*rx_available)(struct spi_imx_data *spi_imx);
|
|
|
|
void (*reset)(struct spi_imx_data *spi_imx);
|
|
|
|
void (*setup_wml)(struct spi_imx_data *spi_imx);
|
|
|
|
void (*disable)(struct spi_imx_data *spi_imx);
|
2017-06-08 13:16:00 +08:00
|
|
|
bool has_dmamode;
|
2017-09-05 13:12:32 +08:00
|
|
|
bool has_slavemode;
|
2017-06-08 13:16:00 +08:00
|
|
|
unsigned int fifo_size;
|
2017-08-10 12:50:08 +08:00
|
|
|
bool dynamic_burst;
|
2021-07-14 18:20:48 +08:00
|
|
|
/*
|
|
|
|
* ERR009165 fixed or not:
|
|
|
|
* https://www.nxp.com/docs/en/errata/IMX6DQCE.pdf
|
|
|
|
*/
|
|
|
|
bool tx_glitch_fixed;
|
2011-07-10 01:16:39 +08:00
|
|
|
enum spi_imx_devtype devtype;
|
2010-09-09 21:29:01 +08:00
|
|
|
};
|
|
|
|
|
2009-10-02 06:44:28 +08:00
|
|
|
struct spi_imx_data {
|
2022-05-03 01:54:55 +08:00
|
|
|
struct spi_controller *controller;
|
2016-02-17 21:28:48 +08:00
|
|
|
struct device *dev;
|
2009-09-23 07:46:02 +08:00
|
|
|
|
|
|
|
struct completion xfer_done;
|
2012-03-30 03:54:18 +08:00
|
|
|
void __iomem *base;
|
2016-02-24 16:20:29 +08:00
|
|
|
unsigned long base_phys;
|
|
|
|
|
2012-03-07 16:30:22 +08:00
|
|
|
struct clk *clk_per;
|
|
|
|
struct clk *clk_ipg;
|
2009-09-23 07:46:02 +08:00
|
|
|
unsigned long spi_clk;
|
2016-02-19 15:43:03 +08:00
|
|
|
unsigned int spi_bus_clk;
|
2009-09-23 07:46:02 +08:00
|
|
|
|
2017-06-02 13:38:01 +08:00
|
|
|
unsigned int bits_per_word;
|
2017-04-24 03:19:58 +08:00
|
|
|
unsigned int spi_drctl;
|
2016-02-24 16:20:29 +08:00
|
|
|
|
2017-08-10 12:50:08 +08:00
|
|
|
unsigned int count, remainder;
|
2022-05-03 01:54:49 +08:00
|
|
|
void (*tx)(struct spi_imx_data *spi_imx);
|
|
|
|
void (*rx)(struct spi_imx_data *spi_imx);
|
2009-09-23 07:46:02 +08:00
|
|
|
void *rx_buf;
|
|
|
|
const void *tx_buf;
|
|
|
|
unsigned int txfifo; /* number of words pushed in tx FIFO */
|
2018-07-17 22:31:54 +08:00
|
|
|
unsigned int dynamic_burst;
|
2022-04-12 02:45:29 +08:00
|
|
|
bool rx_only;
|
2009-09-23 07:46:02 +08:00
|
|
|
|
2017-09-05 13:12:32 +08:00
|
|
|
/* Slave mode */
|
|
|
|
bool slave_mode;
|
|
|
|
bool slave_aborted;
|
|
|
|
unsigned int slave_burst;
|
|
|
|
|
2014-09-11 09:18:44 +08:00
|
|
|
/* DMA */
|
|
|
|
bool usedma;
|
2015-12-06 00:57:01 +08:00
|
|
|
u32 wml;
|
2014-09-11 09:18:44 +08:00
|
|
|
struct completion dma_rx_completion;
|
|
|
|
struct completion dma_tx_completion;
|
|
|
|
|
2012-05-22 03:49:35 +08:00
|
|
|
const struct spi_imx_devtype_data *devtype_data;
|
2009-09-23 07:46:02 +08:00
|
|
|
};
|
|
|
|
|
2011-07-10 01:16:39 +08:00
|
|
|
static inline int is_imx27_cspi(struct spi_imx_data *d)
|
|
|
|
{
|
|
|
|
return d->devtype_data->devtype == IMX27_CSPI;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int is_imx35_cspi(struct spi_imx_data *d)
|
|
|
|
{
|
|
|
|
return d->devtype_data->devtype == IMX35_CSPI;
|
|
|
|
}
|
|
|
|
|
2015-12-06 00:57:02 +08:00
|
|
|
static inline int is_imx51_ecspi(struct spi_imx_data *d)
|
|
|
|
{
|
|
|
|
return d->devtype_data->devtype == IMX51_ECSPI;
|
|
|
|
}
|
|
|
|
|
2017-06-08 13:16:01 +08:00
|
|
|
static inline int is_imx53_ecspi(struct spi_imx_data *d)
|
|
|
|
{
|
|
|
|
return d->devtype_data->devtype == IMX53_ECSPI;
|
|
|
|
}
|
|
|
|
|
2009-09-23 07:46:02 +08:00
|
|
|
#define MXC_SPI_BUF_RX(type) \
|
2009-10-02 06:44:28 +08:00
|
|
|
static void spi_imx_buf_rx_##type(struct spi_imx_data *spi_imx) \
|
2009-09-23 07:46:02 +08:00
|
|
|
{ \
|
2009-10-02 06:44:28 +08:00
|
|
|
unsigned int val = readl(spi_imx->base + MXC_CSPIRXDATA); \
|
2009-09-23 07:46:02 +08:00
|
|
|
\
|
2009-10-02 06:44:28 +08:00
|
|
|
if (spi_imx->rx_buf) { \
|
|
|
|
*(type *)spi_imx->rx_buf = val; \
|
|
|
|
spi_imx->rx_buf += sizeof(type); \
|
2009-09-23 07:46:02 +08:00
|
|
|
} \
|
2018-07-17 22:31:54 +08:00
|
|
|
\
|
|
|
|
spi_imx->remainder -= sizeof(type); \
|
2009-09-23 07:46:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
#define MXC_SPI_BUF_TX(type) \
|
2009-10-02 06:44:28 +08:00
|
|
|
static void spi_imx_buf_tx_##type(struct spi_imx_data *spi_imx) \
|
2009-09-23 07:46:02 +08:00
|
|
|
{ \
|
|
|
|
type val = 0; \
|
|
|
|
\
|
2009-10-02 06:44:28 +08:00
|
|
|
if (spi_imx->tx_buf) { \
|
|
|
|
val = *(type *)spi_imx->tx_buf; \
|
|
|
|
spi_imx->tx_buf += sizeof(type); \
|
2009-09-23 07:46:02 +08:00
|
|
|
} \
|
|
|
|
\
|
2009-10-02 06:44:28 +08:00
|
|
|
spi_imx->count -= sizeof(type); \
|
2009-09-23 07:46:02 +08:00
|
|
|
\
|
2009-10-02 06:44:28 +08:00
|
|
|
writel(val, spi_imx->base + MXC_CSPITXDATA); \
|
2009-09-23 07:46:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
MXC_SPI_BUF_RX(u8)
|
|
|
|
MXC_SPI_BUF_TX(u8)
|
|
|
|
MXC_SPI_BUF_RX(u16)
|
|
|
|
MXC_SPI_BUF_TX(u16)
|
|
|
|
MXC_SPI_BUF_RX(u32)
|
|
|
|
MXC_SPI_BUF_TX(u32)
|
|
|
|
|
|
|
|
/* First entry is reserved, second entry is valid only if SDHC_SPIEN is set
|
|
|
|
* (which is currently not the case in this driver)
|
|
|
|
*/
|
|
|
|
static int mxc_clkdivs[] = {0, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128, 192,
|
|
|
|
256, 384, 512, 768, 1024};
|
|
|
|
|
|
|
|
/* MX21, MX27 */
|
2009-10-02 06:44:28 +08:00
|
|
|
static unsigned int spi_imx_clkdiv_1(unsigned int fin,
|
2016-11-02 05:18:39 +08:00
|
|
|
unsigned int fspi, unsigned int max, unsigned int *fres)
|
2009-09-23 07:46:02 +08:00
|
|
|
{
|
2011-07-10 01:16:39 +08:00
|
|
|
int i;
|
2009-09-23 07:46:02 +08:00
|
|
|
|
|
|
|
for (i = 2; i < max; i++)
|
|
|
|
if (fspi * mxc_clkdivs[i] >= fin)
|
2016-11-02 05:18:39 +08:00
|
|
|
break;
|
2009-09-23 07:46:02 +08:00
|
|
|
|
2016-11-02 05:18:39 +08:00
|
|
|
*fres = fin / mxc_clkdivs[i];
|
|
|
|
return i;
|
2009-09-23 07:46:02 +08:00
|
|
|
}
|
|
|
|
|
2010-09-10 03:02:48 +08:00
|
|
|
/* MX1, MX31, MX35, MX51 CSPI */
|
2009-10-02 06:44:28 +08:00
|
|
|
static unsigned int spi_imx_clkdiv_2(unsigned int fin,
|
2016-09-02 04:38:40 +08:00
|
|
|
unsigned int fspi, unsigned int *fres)
|
2009-09-23 07:46:02 +08:00
|
|
|
{
|
|
|
|
int i, div = 4;
|
|
|
|
|
|
|
|
for (i = 0; i < 7; i++) {
|
|
|
|
if (fspi * div >= fin)
|
2016-09-02 04:38:40 +08:00
|
|
|
goto out;
|
2009-09-23 07:46:02 +08:00
|
|
|
div <<= 1;
|
|
|
|
}
|
|
|
|
|
2016-09-02 04:38:40 +08:00
|
|
|
out:
|
|
|
|
*fres = fin / div;
|
|
|
|
return i;
|
2009-09-23 07:46:02 +08:00
|
|
|
}
|
|
|
|
|
2017-06-02 13:38:04 +08:00
|
|
|
static int spi_imx_bytes_per_word(const int bits_per_word)
|
2016-02-24 16:20:29 +08:00
|
|
|
{
|
2018-07-17 22:31:52 +08:00
|
|
|
if (bits_per_word <= 8)
|
|
|
|
return 1;
|
|
|
|
else if (bits_per_word <= 16)
|
|
|
|
return 2;
|
|
|
|
else
|
|
|
|
return 4;
|
2016-02-24 16:20:29 +08:00
|
|
|
}
|
|
|
|
|
2022-05-03 01:54:54 +08:00
|
|
|
static bool spi_imx_can_dma(struct spi_controller *controller, struct spi_device *spi,
|
2014-09-11 09:18:44 +08:00
|
|
|
struct spi_transfer *transfer)
|
|
|
|
{
|
2022-05-03 01:54:54 +08:00
|
|
|
struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller);
|
2016-02-24 16:20:29 +08:00
|
|
|
|
2022-05-03 01:54:54 +08:00
|
|
|
if (!use_dma || controller->fallback)
|
2019-03-05 07:02:36 +08:00
|
|
|
return false;
|
|
|
|
|
2022-05-03 01:54:54 +08:00
|
|
|
if (!controller->dma_rx)
|
2016-02-24 16:20:29 +08:00
|
|
|
return false;
|
|
|
|
|
2017-09-05 13:12:32 +08:00
|
|
|
if (spi_imx->slave_mode)
|
|
|
|
return false;
|
|
|
|
|
2018-10-10 18:32:48 +08:00
|
|
|
if (transfer->len < spi_imx->devtype_data->fifo_size)
|
|
|
|
return false;
|
|
|
|
|
2017-08-10 12:50:08 +08:00
|
|
|
spi_imx->dynamic_burst = 0;
|
2017-01-06 20:22:18 +08:00
|
|
|
|
2016-02-24 16:20:29 +08:00
|
|
|
return true;
|
2014-09-11 09:18:44 +08:00
|
|
|
}
|
|
|
|
|
2011-07-10 01:16:37 +08:00
|
|
|
#define MX51_ECSPI_CTRL 0x08
|
|
|
|
#define MX51_ECSPI_CTRL_ENABLE (1 << 0)
|
|
|
|
#define MX51_ECSPI_CTRL_XCH (1 << 2)
|
2014-09-11 09:18:44 +08:00
|
|
|
#define MX51_ECSPI_CTRL_SMC (1 << 3)
|
2011-07-10 01:16:37 +08:00
|
|
|
#define MX51_ECSPI_CTRL_MODE_MASK (0xf << 4)
|
2017-04-24 03:19:58 +08:00
|
|
|
#define MX51_ECSPI_CTRL_DRCTL(drctl) ((drctl) << 16)
|
2011-07-10 01:16:37 +08:00
|
|
|
#define MX51_ECSPI_CTRL_POSTDIV_OFFSET 8
|
|
|
|
#define MX51_ECSPI_CTRL_PREDIV_OFFSET 12
|
|
|
|
#define MX51_ECSPI_CTRL_CS(cs) ((cs) << 18)
|
|
|
|
#define MX51_ECSPI_CTRL_BL_OFFSET 20
|
2017-08-10 12:50:08 +08:00
|
|
|
#define MX51_ECSPI_CTRL_BL_MASK (0xfff << 20)
|
2011-07-10 01:16:37 +08:00
|
|
|
|
|
|
|
#define MX51_ECSPI_CONFIG 0x0c
|
|
|
|
#define MX51_ECSPI_CONFIG_SCLKPHA(cs) (1 << ((cs) + 0))
|
|
|
|
#define MX51_ECSPI_CONFIG_SCLKPOL(cs) (1 << ((cs) + 4))
|
|
|
|
#define MX51_ECSPI_CONFIG_SBBCTRL(cs) (1 << ((cs) + 8))
|
|
|
|
#define MX51_ECSPI_CONFIG_SSBPOL(cs) (1 << ((cs) + 12))
|
2012-09-25 19:21:57 +08:00
|
|
|
#define MX51_ECSPI_CONFIG_SCLKCTL(cs) (1 << ((cs) + 20))
|
2011-07-10 01:16:37 +08:00
|
|
|
|
|
|
|
#define MX51_ECSPI_INT 0x10
|
|
|
|
#define MX51_ECSPI_INT_TEEN (1 << 0)
|
|
|
|
#define MX51_ECSPI_INT_RREN (1 << 3)
|
2017-09-05 13:12:32 +08:00
|
|
|
#define MX51_ECSPI_INT_RDREN (1 << 4)
|
2011-07-10 01:16:37 +08:00
|
|
|
|
2018-11-30 14:47:07 +08:00
|
|
|
#define MX51_ECSPI_DMA 0x14
|
2016-02-24 16:20:31 +08:00
|
|
|
#define MX51_ECSPI_DMA_TX_WML(wml) ((wml) & 0x3f)
|
|
|
|
#define MX51_ECSPI_DMA_RX_WML(wml) (((wml) & 0x3f) << 16)
|
|
|
|
#define MX51_ECSPI_DMA_RXT_WML(wml) (((wml) & 0x3f) << 24)
|
2014-09-11 09:18:44 +08:00
|
|
|
|
2016-02-24 16:20:27 +08:00
|
|
|
#define MX51_ECSPI_DMA_TEDEN (1 << 7)
|
|
|
|
#define MX51_ECSPI_DMA_RXDEN (1 << 23)
|
|
|
|
#define MX51_ECSPI_DMA_RXTDEN (1 << 31)
|
2014-09-11 09:18:44 +08:00
|
|
|
|
2011-07-10 01:16:37 +08:00
|
|
|
#define MX51_ECSPI_STAT 0x18
|
|
|
|
#define MX51_ECSPI_STAT_RR (1 << 3)
|
2010-09-10 03:02:48 +08:00
|
|
|
|
2015-12-04 09:23:24 +08:00
|
|
|
#define MX51_ECSPI_TESTREG 0x20
|
|
|
|
#define MX51_ECSPI_TESTREG_LBC BIT(31)
|
|
|
|
|
2017-08-10 12:50:08 +08:00
|
|
|
static void spi_imx_buf_rx_swap_u32(struct spi_imx_data *spi_imx)
|
|
|
|
{
|
|
|
|
unsigned int val = readl(spi_imx->base + MXC_CSPIRXDATA);
|
|
|
|
|
|
|
|
if (spi_imx->rx_buf) {
|
|
|
|
#ifdef __LITTLE_ENDIAN
|
2022-05-03 01:54:53 +08:00
|
|
|
unsigned int bytes_per_word;
|
|
|
|
|
2017-08-10 12:50:08 +08:00
|
|
|
bytes_per_word = spi_imx_bytes_per_word(spi_imx->bits_per_word);
|
|
|
|
if (bytes_per_word == 1)
|
2022-05-03 01:54:52 +08:00
|
|
|
swab32s(&val);
|
2017-08-10 12:50:08 +08:00
|
|
|
else if (bytes_per_word == 2)
|
2022-05-03 01:54:53 +08:00
|
|
|
swahw32s(&val);
|
2017-08-10 12:50:08 +08:00
|
|
|
#endif
|
|
|
|
*(u32 *)spi_imx->rx_buf = val;
|
|
|
|
spi_imx->rx_buf += sizeof(u32);
|
|
|
|
}
|
2018-07-17 22:31:54 +08:00
|
|
|
|
|
|
|
spi_imx->remainder -= sizeof(u32);
|
2017-08-10 12:50:08 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void spi_imx_buf_rx_swap(struct spi_imx_data *spi_imx)
|
|
|
|
{
|
2018-07-17 22:31:54 +08:00
|
|
|
int unaligned;
|
|
|
|
u32 val;
|
2017-08-10 12:50:08 +08:00
|
|
|
|
2018-07-17 22:31:54 +08:00
|
|
|
unaligned = spi_imx->remainder % 4;
|
|
|
|
|
|
|
|
if (!unaligned) {
|
2017-08-10 12:50:08 +08:00
|
|
|
spi_imx_buf_rx_swap_u32(spi_imx);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-07-17 22:31:54 +08:00
|
|
|
if (spi_imx_bytes_per_word(spi_imx->bits_per_word) == 2) {
|
2017-08-10 12:50:08 +08:00
|
|
|
spi_imx_buf_rx_u16(spi_imx);
|
2018-07-17 22:31:54 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
val = readl(spi_imx->base + MXC_CSPIRXDATA);
|
|
|
|
|
|
|
|
while (unaligned--) {
|
|
|
|
if (spi_imx->rx_buf) {
|
|
|
|
*(u8 *)spi_imx->rx_buf = (val >> (8 * unaligned)) & 0xff;
|
|
|
|
spi_imx->rx_buf++;
|
|
|
|
}
|
|
|
|
spi_imx->remainder--;
|
|
|
|
}
|
2017-08-10 12:50:08 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void spi_imx_buf_tx_swap_u32(struct spi_imx_data *spi_imx)
|
|
|
|
{
|
|
|
|
u32 val = 0;
|
2017-08-23 21:34:43 +08:00
|
|
|
#ifdef __LITTLE_ENDIAN
|
2017-08-10 12:50:08 +08:00
|
|
|
unsigned int bytes_per_word;
|
2017-08-23 21:34:43 +08:00
|
|
|
#endif
|
2017-08-10 12:50:08 +08:00
|
|
|
|
|
|
|
if (spi_imx->tx_buf) {
|
|
|
|
val = *(u32 *)spi_imx->tx_buf;
|
|
|
|
spi_imx->tx_buf += sizeof(u32);
|
|
|
|
}
|
|
|
|
|
|
|
|
spi_imx->count -= sizeof(u32);
|
|
|
|
#ifdef __LITTLE_ENDIAN
|
|
|
|
bytes_per_word = spi_imx_bytes_per_word(spi_imx->bits_per_word);
|
|
|
|
|
|
|
|
if (bytes_per_word == 1)
|
2022-05-03 01:54:52 +08:00
|
|
|
swab32s(&val);
|
2017-08-10 12:50:08 +08:00
|
|
|
else if (bytes_per_word == 2)
|
2022-05-03 01:54:53 +08:00
|
|
|
swahw32s(&val);
|
2017-08-10 12:50:08 +08:00
|
|
|
#endif
|
|
|
|
writel(val, spi_imx->base + MXC_CSPITXDATA);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void spi_imx_buf_tx_swap(struct spi_imx_data *spi_imx)
|
|
|
|
{
|
2018-07-17 22:31:54 +08:00
|
|
|
int unaligned;
|
|
|
|
u32 val = 0;
|
2017-08-10 12:50:08 +08:00
|
|
|
|
2018-07-17 22:31:54 +08:00
|
|
|
unaligned = spi_imx->count % 4;
|
2017-08-10 12:50:08 +08:00
|
|
|
|
2018-07-17 22:31:54 +08:00
|
|
|
if (!unaligned) {
|
|
|
|
spi_imx_buf_tx_swap_u32(spi_imx);
|
|
|
|
return;
|
2017-08-10 12:50:08 +08:00
|
|
|
}
|
|
|
|
|
2018-07-17 22:31:54 +08:00
|
|
|
if (spi_imx_bytes_per_word(spi_imx->bits_per_word) == 2) {
|
|
|
|
spi_imx_buf_tx_u16(spi_imx);
|
2017-08-10 12:50:08 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-07-17 22:31:54 +08:00
|
|
|
while (unaligned--) {
|
|
|
|
if (spi_imx->tx_buf) {
|
|
|
|
val |= *(u8 *)spi_imx->tx_buf << (8 * unaligned);
|
|
|
|
spi_imx->tx_buf++;
|
|
|
|
}
|
|
|
|
spi_imx->count--;
|
|
|
|
}
|
2017-08-10 12:50:08 +08:00
|
|
|
|
2018-07-17 22:31:54 +08:00
|
|
|
writel(val, spi_imx->base + MXC_CSPITXDATA);
|
2017-08-10 12:50:08 +08:00
|
|
|
}
|
|
|
|
|
2017-09-05 13:12:32 +08:00
|
|
|
static void mx53_ecspi_rx_slave(struct spi_imx_data *spi_imx)
|
|
|
|
{
|
|
|
|
u32 val = be32_to_cpu(readl(spi_imx->base + MXC_CSPIRXDATA));
|
|
|
|
|
|
|
|
if (spi_imx->rx_buf) {
|
|
|
|
int n_bytes = spi_imx->slave_burst % sizeof(val);
|
|
|
|
|
|
|
|
if (!n_bytes)
|
|
|
|
n_bytes = sizeof(val);
|
|
|
|
|
|
|
|
memcpy(spi_imx->rx_buf,
|
|
|
|
((u8 *)&val) + sizeof(val) - n_bytes, n_bytes);
|
|
|
|
|
|
|
|
spi_imx->rx_buf += n_bytes;
|
|
|
|
spi_imx->slave_burst -= n_bytes;
|
|
|
|
}
|
2018-07-17 22:31:54 +08:00
|
|
|
|
|
|
|
spi_imx->remainder -= sizeof(u32);
|
2017-09-05 13:12:32 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void mx53_ecspi_tx_slave(struct spi_imx_data *spi_imx)
|
|
|
|
{
|
|
|
|
u32 val = 0;
|
|
|
|
int n_bytes = spi_imx->count % sizeof(val);
|
|
|
|
|
|
|
|
if (!n_bytes)
|
|
|
|
n_bytes = sizeof(val);
|
|
|
|
|
|
|
|
if (spi_imx->tx_buf) {
|
|
|
|
memcpy(((u8 *)&val) + sizeof(val) - n_bytes,
|
|
|
|
spi_imx->tx_buf, n_bytes);
|
|
|
|
val = cpu_to_be32(val);
|
|
|
|
spi_imx->tx_buf += n_bytes;
|
|
|
|
}
|
|
|
|
|
|
|
|
spi_imx->count -= n_bytes;
|
|
|
|
|
|
|
|
writel(val, spi_imx->base + MXC_CSPITXDATA);
|
|
|
|
}
|
|
|
|
|
2010-09-10 03:02:48 +08:00
|
|
|
/* MX51 eCSPI */
|
2016-02-17 21:28:48 +08:00
|
|
|
static unsigned int mx51_ecspi_clkdiv(struct spi_imx_data *spi_imx,
|
|
|
|
unsigned int fspi, unsigned int *fres)
|
2010-09-10 03:02:48 +08:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* there are two 4-bit dividers, the pre-divider divides by
|
|
|
|
* $pre, the post-divider by 2^$post
|
|
|
|
*/
|
|
|
|
unsigned int pre, post;
|
2016-02-17 21:28:48 +08:00
|
|
|
unsigned int fin = spi_imx->spi_clk;
|
2010-09-10 03:02:48 +08:00
|
|
|
|
2022-11-16 02:10:00 +08:00
|
|
|
fspi = min(fspi, fin);
|
2010-09-10 03:02:48 +08:00
|
|
|
|
|
|
|
post = fls(fin) - fls(fspi);
|
|
|
|
if (fin > fspi << post)
|
|
|
|
post++;
|
|
|
|
|
|
|
|
/* now we have: (fin <= fspi << post) with post being minimal */
|
|
|
|
|
|
|
|
post = max(4U, post) - 4;
|
|
|
|
if (unlikely(post > 0xf)) {
|
2016-02-17 21:28:48 +08:00
|
|
|
dev_err(spi_imx->dev, "cannot set clock freq: %u (base freq: %u)\n",
|
|
|
|
fspi, fin);
|
2010-09-10 03:02:48 +08:00
|
|
|
return 0xff;
|
|
|
|
}
|
|
|
|
|
|
|
|
pre = DIV_ROUND_UP(fin, fspi << post) - 1;
|
|
|
|
|
2016-02-17 21:28:48 +08:00
|
|
|
dev_dbg(spi_imx->dev, "%s: fin: %u, fspi: %u, post: %u, pre: %u\n",
|
2010-09-10 03:02:48 +08:00
|
|
|
__func__, fin, fspi, post, pre);
|
spi: spi-imx: Fix out-of-order CS/SCLK operation at low speeds
Problem:
--------
The problem this patch addresses has the following assumptions about the
SPI bus setup:
- The hardware used to find this is Freescale i.MX537 @ 1200MHz
- The SPI SCLK operate at very low speed, less than 200 kHz
- There are two SPI devices attached to the bus
- Each device uses different GPIO for chipselect
- Each device requires different SCLK signal polarity
The observation of the SCLK and GPIO chipselect lines with a logic analyzer
shows, that the SCLK polarity change does sometimes happen after the GPIO
chipselect is asserted. The SPI slave device reacts on that by counting the
SCLK polarity change as a clock pulse, which disrupts the communication with
the SPI slave device.
Explanation:
------------
We found an interesting correlation, that the maximum delay between the write
into the ECSPIx_CONFIGREG register and the change of SCLK polarity at each
SCLK frequency of 10 kHz, 20 kHz, 50 kHz and 100 kHz is 100 uS, 50 uS, 20 uS
and 10 uS respectively. This lead us to a theory, that at SCLK frequency of
1 Hz, the delay would be 1 S. Therefore, the time it takes for the write to
ECSPIx_CONFIGREG to take effect in the hardware is up to the duration of 1
tick of the SCLK clock.
During this delay period, if the SCLK frequency is too low, the execution of
the spi-imx.c driver can advance so much, that the GPIO chipselect will be
asserted. The GPIO chipselect is asserted almost immediatelly.
Solution:
---------
The solution this patch presents is simple. We calculate the resulting SCLK
clock first by dividing the ECSPI block clock by both dividers that are to be
programmed into the configuration register. Based on the resulting SCLK clock,
we derive the delay it will take for the changes to get really applied. We are
extra careful here so we delay twice as long as we should. Note that the patch
does not create additional overhead at high speeds as the delay will likely be
close to zero there.
Signed-off-by: Marek Vasut <marex@denx.de>
To: linux-spi@vger.kernel.org
Cc: Fabio Estevam <fabio.estevam@freescale.com>
Cc: Huang Shijie <b32955@freescale.com>
Cc: Mark Brown <broonie@kernel.org>
Cc: Sascha Hauer <s.hauer@pengutronix.de>
Cc: Shawn Guo <shawn.guo@linaro.org>
Signed-off-by: Mark Brown <broonie@linaro.org>
2013-12-19 01:31:47 +08:00
|
|
|
|
|
|
|
/* Resulting frequency for the SCLK line. */
|
|
|
|
*fres = (fin / (pre + 1)) >> post;
|
|
|
|
|
2011-07-10 01:16:37 +08:00
|
|
|
return (pre << MX51_ECSPI_CTRL_PREDIV_OFFSET) |
|
|
|
|
(post << MX51_ECSPI_CTRL_POSTDIV_OFFSET);
|
2010-09-10 03:02:48 +08:00
|
|
|
}
|
|
|
|
|
2016-06-09 01:02:08 +08:00
|
|
|
static void mx51_ecspi_intctrl(struct spi_imx_data *spi_imx, int enable)
|
2010-09-10 03:02:48 +08:00
|
|
|
{
|
2022-05-03 01:54:51 +08:00
|
|
|
unsigned int val = 0;
|
2010-09-10 03:02:48 +08:00
|
|
|
|
|
|
|
if (enable & MXC_INT_TE)
|
2011-07-10 01:16:37 +08:00
|
|
|
val |= MX51_ECSPI_INT_TEEN;
|
2010-09-10 03:02:48 +08:00
|
|
|
|
|
|
|
if (enable & MXC_INT_RR)
|
2011-07-10 01:16:37 +08:00
|
|
|
val |= MX51_ECSPI_INT_RREN;
|
2010-09-10 03:02:48 +08:00
|
|
|
|
2017-09-05 13:12:32 +08:00
|
|
|
if (enable & MXC_INT_RDR)
|
|
|
|
val |= MX51_ECSPI_INT_RDREN;
|
|
|
|
|
2011-07-10 01:16:37 +08:00
|
|
|
writel(val, spi_imx->base + MX51_ECSPI_INT);
|
2010-09-10 03:02:48 +08:00
|
|
|
}
|
|
|
|
|
2016-06-09 01:02:08 +08:00
|
|
|
static void mx51_ecspi_trigger(struct spi_imx_data *spi_imx)
|
2010-09-10 03:02:48 +08:00
|
|
|
{
|
2016-02-24 16:20:32 +08:00
|
|
|
u32 reg;
|
2014-09-11 09:18:44 +08:00
|
|
|
|
2016-02-24 16:20:32 +08:00
|
|
|
reg = readl(spi_imx->base + MX51_ECSPI_CTRL);
|
|
|
|
reg |= MX51_ECSPI_CTRL_XCH;
|
2011-07-10 01:16:37 +08:00
|
|
|
writel(reg, spi_imx->base + MX51_ECSPI_CTRL);
|
2010-09-10 03:02:48 +08:00
|
|
|
}
|
|
|
|
|
2017-09-05 13:12:32 +08:00
|
|
|
static void mx51_ecspi_disable(struct spi_imx_data *spi_imx)
|
|
|
|
{
|
|
|
|
u32 ctrl;
|
|
|
|
|
|
|
|
ctrl = readl(spi_imx->base + MX51_ECSPI_CTRL);
|
|
|
|
ctrl &= ~MX51_ECSPI_CTRL_ENABLE;
|
|
|
|
writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
|
|
|
|
}
|
|
|
|
|
2018-11-30 14:47:05 +08:00
|
|
|
static int mx51_ecspi_prepare_message(struct spi_imx_data *spi_imx,
|
|
|
|
struct spi_message *msg)
|
|
|
|
{
|
2018-11-30 14:47:06 +08:00
|
|
|
struct spi_device *spi = msg->spi;
|
2021-07-26 18:01:02 +08:00
|
|
|
struct spi_transfer *xfer;
|
2016-03-15 21:24:36 +08:00
|
|
|
u32 ctrl = MX51_ECSPI_CTRL_ENABLE;
|
2021-07-26 18:01:02 +08:00
|
|
|
u32 min_speed_hz = ~0U;
|
2021-07-03 10:23:00 +08:00
|
|
|
u32 testreg, delay;
|
2016-03-15 21:24:36 +08:00
|
|
|
u32 cfg = readl(spi_imx->base + MX51_ECSPI_CONFIG);
|
2022-05-03 01:54:57 +08:00
|
|
|
u32 current_cfg = cfg;
|
2010-09-10 03:02:48 +08:00
|
|
|
|
2017-09-05 13:12:32 +08:00
|
|
|
/* set Master or Slave mode */
|
|
|
|
if (spi_imx->slave_mode)
|
|
|
|
ctrl &= ~MX51_ECSPI_CTRL_MODE_MASK;
|
|
|
|
else
|
|
|
|
ctrl |= MX51_ECSPI_CTRL_MODE_MASK;
|
2010-09-10 03:02:48 +08:00
|
|
|
|
2017-04-24 03:19:58 +08:00
|
|
|
/*
|
|
|
|
* Enable SPI_RDY handling (falling edge/level triggered).
|
|
|
|
*/
|
|
|
|
if (spi->mode & SPI_READY)
|
|
|
|
ctrl |= MX51_ECSPI_CTRL_DRCTL(spi_imx->spi_drctl);
|
|
|
|
|
2010-09-10 03:02:48 +08:00
|
|
|
/* set chip select to use */
|
2016-06-09 01:02:06 +08:00
|
|
|
ctrl |= MX51_ECSPI_CTRL_CS(spi->chip_select);
|
2010-09-10 03:02:48 +08:00
|
|
|
|
2018-11-30 14:47:06 +08:00
|
|
|
/*
|
|
|
|
* The ctrl register must be written first, with the EN bit set other
|
|
|
|
* registers must not be written to.
|
|
|
|
*/
|
|
|
|
writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
|
|
|
|
|
|
|
|
testreg = readl(spi_imx->base + MX51_ECSPI_TESTREG);
|
|
|
|
if (spi->mode & SPI_LOOP)
|
|
|
|
testreg |= MX51_ECSPI_TESTREG_LBC;
|
2017-09-05 13:12:32 +08:00
|
|
|
else
|
2018-11-30 14:47:06 +08:00
|
|
|
testreg &= ~MX51_ECSPI_TESTREG_LBC;
|
|
|
|
writel(testreg, spi_imx->base + MX51_ECSPI_TESTREG);
|
2010-09-10 03:02:48 +08:00
|
|
|
|
2017-09-05 13:12:32 +08:00
|
|
|
/*
|
|
|
|
* eCSPI burst completion by Chip Select signal in Slave mode
|
|
|
|
* is not functional for imx53 Soc, config SPI burst completed when
|
|
|
|
* BURST_LENGTH + 1 bits are received
|
|
|
|
*/
|
|
|
|
if (spi_imx->slave_mode && is_imx53_ecspi(spi_imx))
|
|
|
|
cfg &= ~MX51_ECSPI_CONFIG_SBBCTRL(spi->chip_select);
|
|
|
|
else
|
|
|
|
cfg |= MX51_ECSPI_CONFIG_SBBCTRL(spi->chip_select);
|
2010-09-10 03:02:48 +08:00
|
|
|
|
2016-06-09 01:02:07 +08:00
|
|
|
if (spi->mode & SPI_CPOL) {
|
2016-06-09 01:02:06 +08:00
|
|
|
cfg |= MX51_ECSPI_CONFIG_SCLKPOL(spi->chip_select);
|
|
|
|
cfg |= MX51_ECSPI_CONFIG_SCLKCTL(spi->chip_select);
|
2016-03-15 21:24:36 +08:00
|
|
|
} else {
|
2016-06-09 01:02:06 +08:00
|
|
|
cfg &= ~MX51_ECSPI_CONFIG_SCLKPOL(spi->chip_select);
|
|
|
|
cfg &= ~MX51_ECSPI_CONFIG_SCLKCTL(spi->chip_select);
|
2012-09-25 19:21:57 +08:00
|
|
|
}
|
2018-11-30 14:47:06 +08:00
|
|
|
|
2016-06-09 01:02:07 +08:00
|
|
|
if (spi->mode & SPI_CS_HIGH)
|
2016-06-09 01:02:06 +08:00
|
|
|
cfg |= MX51_ECSPI_CONFIG_SSBPOL(spi->chip_select);
|
2016-03-15 21:24:36 +08:00
|
|
|
else
|
2016-06-09 01:02:06 +08:00
|
|
|
cfg &= ~MX51_ECSPI_CONFIG_SSBPOL(spi->chip_select);
|
2010-09-10 03:02:48 +08:00
|
|
|
|
2022-05-03 01:54:57 +08:00
|
|
|
if (cfg == current_cfg)
|
|
|
|
return 0;
|
|
|
|
|
2018-11-30 14:47:06 +08:00
|
|
|
writel(cfg, spi_imx->base + MX51_ECSPI_CONFIG);
|
2016-02-24 16:20:32 +08:00
|
|
|
|
2021-07-03 10:23:00 +08:00
|
|
|
/*
|
|
|
|
* Wait until the changes in the configuration register CONFIGREG
|
|
|
|
* propagate into the hardware. It takes exactly one tick of the
|
|
|
|
* SCLK clock, but we will wait two SCLK clock just to be sure. The
|
|
|
|
* effect of the delay it takes for the hardware to apply changes
|
|
|
|
* is noticable if the SCLK clock run very slow. In such a case, if
|
|
|
|
* the polarity of SCLK should be inverted, the GPIO ChipSelect might
|
|
|
|
* be asserted before the SCLK polarity changes, which would disrupt
|
|
|
|
* the SPI communication as the device on the other end would consider
|
|
|
|
* the change of SCLK polarity as a clock tick already.
|
2021-07-26 18:01:02 +08:00
|
|
|
*
|
2022-05-03 01:54:55 +08:00
|
|
|
* Because spi_imx->spi_bus_clk is only set in prepare_message
|
2021-07-26 18:01:02 +08:00
|
|
|
* callback, iterate over all the transfers in spi_message, find the
|
|
|
|
* one with lowest bus frequency, and use that bus frequency for the
|
|
|
|
* delay calculation. In case all transfers have speed_hz == 0, then
|
|
|
|
* min_speed_hz is ~0 and the resulting delay is zero.
|
2021-07-03 10:23:00 +08:00
|
|
|
*/
|
2021-07-26 18:01:02 +08:00
|
|
|
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
|
|
|
|
if (!xfer->speed_hz)
|
|
|
|
continue;
|
|
|
|
min_speed_hz = min(xfer->speed_hz, min_speed_hz);
|
|
|
|
}
|
|
|
|
|
|
|
|
delay = (2 * 1000000) / min_speed_hz;
|
2021-07-28 00:04:28 +08:00
|
|
|
if (likely(delay < 10)) /* SCLK is faster than 200 kHz */
|
2021-07-03 10:23:00 +08:00
|
|
|
udelay(delay);
|
|
|
|
else /* SCLK is _very_ slow */
|
|
|
|
usleep_range(delay, delay + 10);
|
|
|
|
|
2018-11-30 14:47:06 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2015-12-08 14:43:43 +08:00
|
|
|
|
2022-04-12 02:45:29 +08:00
|
|
|
static void mx51_configure_cpha(struct spi_imx_data *spi_imx,
|
|
|
|
struct spi_device *spi)
|
|
|
|
{
|
|
|
|
bool cpha = (spi->mode & SPI_CPHA);
|
|
|
|
bool flip_cpha = (spi->mode & SPI_RX_CPHA_FLIP) && spi_imx->rx_only;
|
|
|
|
u32 cfg = readl(spi_imx->base + MX51_ECSPI_CONFIG);
|
|
|
|
|
|
|
|
/* Flip cpha logical value iff flip_cpha */
|
|
|
|
cpha ^= flip_cpha;
|
|
|
|
|
|
|
|
if (cpha)
|
|
|
|
cfg |= MX51_ECSPI_CONFIG_SCLKPHA(spi->chip_select);
|
|
|
|
else
|
|
|
|
cfg &= ~MX51_ECSPI_CONFIG_SCLKPHA(spi->chip_select);
|
|
|
|
|
|
|
|
writel(cfg, spi_imx->base + MX51_ECSPI_CONFIG);
|
|
|
|
}
|
|
|
|
|
2018-11-30 14:47:08 +08:00
|
|
|
static int mx51_ecspi_prepare_transfer(struct spi_imx_data *spi_imx,
|
2021-04-08 18:33:47 +08:00
|
|
|
struct spi_device *spi)
|
2018-11-30 14:47:06 +08:00
|
|
|
{
|
|
|
|
u32 ctrl = readl(spi_imx->base + MX51_ECSPI_CTRL);
|
2021-07-03 10:23:00 +08:00
|
|
|
u32 clk;
|
2018-11-30 14:47:06 +08:00
|
|
|
|
|
|
|
/* Clear BL field and set the right value */
|
|
|
|
ctrl &= ~MX51_ECSPI_CTRL_BL_MASK;
|
|
|
|
if (spi_imx->slave_mode && is_imx53_ecspi(spi_imx))
|
|
|
|
ctrl |= (spi_imx->slave_burst * 8 - 1)
|
|
|
|
<< MX51_ECSPI_CTRL_BL_OFFSET;
|
2015-12-04 09:23:24 +08:00
|
|
|
else
|
2018-11-30 14:47:06 +08:00
|
|
|
ctrl |= (spi_imx->bits_per_word - 1)
|
|
|
|
<< MX51_ECSPI_CTRL_BL_OFFSET;
|
2015-12-04 09:23:24 +08:00
|
|
|
|
2018-11-30 14:47:06 +08:00
|
|
|
/* set clock speed */
|
|
|
|
ctrl &= ~(0xf << MX51_ECSPI_CTRL_POSTDIV_OFFSET |
|
|
|
|
0xf << MX51_ECSPI_CTRL_PREDIV_OFFSET);
|
2021-04-08 18:33:47 +08:00
|
|
|
ctrl |= mx51_ecspi_clkdiv(spi_imx, spi_imx->spi_bus_clk, &clk);
|
2018-11-30 14:47:06 +08:00
|
|
|
spi_imx->spi_bus_clk = clk;
|
|
|
|
|
2022-04-12 02:45:29 +08:00
|
|
|
mx51_configure_cpha(spi_imx, spi);
|
|
|
|
|
2021-07-14 18:20:48 +08:00
|
|
|
/*
|
|
|
|
* ERR009165: work in XHC mode instead of SMC as PIO on the chips
|
|
|
|
* before i.mx6ul.
|
|
|
|
*/
|
|
|
|
if (spi_imx->usedma && spi_imx->devtype_data->tx_glitch_fixed)
|
2018-11-30 14:47:06 +08:00
|
|
|
ctrl |= MX51_ECSPI_CTRL_SMC;
|
2021-07-14 18:20:48 +08:00
|
|
|
else
|
|
|
|
ctrl &= ~MX51_ECSPI_CTRL_SMC;
|
2018-11-30 14:47:06 +08:00
|
|
|
|
|
|
|
writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
|
2010-09-10 03:02:48 +08:00
|
|
|
|
2018-10-10 18:32:42 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mx51_setup_wml(struct spi_imx_data *spi_imx)
|
|
|
|
{
|
2021-07-14 18:20:48 +08:00
|
|
|
u32 tx_wml = 0;
|
|
|
|
|
|
|
|
if (spi_imx->devtype_data->tx_glitch_fixed)
|
|
|
|
tx_wml = spi_imx->wml;
|
2014-09-11 09:18:44 +08:00
|
|
|
/*
|
|
|
|
* Configure the DMA register: setup the watermark
|
|
|
|
* and enable DMA request.
|
|
|
|
*/
|
2018-10-10 18:32:45 +08:00
|
|
|
writel(MX51_ECSPI_DMA_RX_WML(spi_imx->wml - 1) |
|
2021-07-14 18:20:48 +08:00
|
|
|
MX51_ECSPI_DMA_TX_WML(tx_wml) |
|
2016-02-24 16:20:31 +08:00
|
|
|
MX51_ECSPI_DMA_RXT_WML(spi_imx->wml) |
|
2016-02-24 16:20:27 +08:00
|
|
|
MX51_ECSPI_DMA_TEDEN | MX51_ECSPI_DMA_RXDEN |
|
|
|
|
MX51_ECSPI_DMA_RXTDEN, spi_imx->base + MX51_ECSPI_DMA);
|
2010-09-10 03:02:48 +08:00
|
|
|
}
|
|
|
|
|
2016-06-09 01:02:08 +08:00
|
|
|
static int mx51_ecspi_rx_available(struct spi_imx_data *spi_imx)
|
2010-09-10 03:02:48 +08:00
|
|
|
{
|
2011-07-10 01:16:37 +08:00
|
|
|
return readl(spi_imx->base + MX51_ECSPI_STAT) & MX51_ECSPI_STAT_RR;
|
2010-09-10 03:02:48 +08:00
|
|
|
}
|
|
|
|
|
2016-06-09 01:02:08 +08:00
|
|
|
static void mx51_ecspi_reset(struct spi_imx_data *spi_imx)
|
2010-09-10 03:02:48 +08:00
|
|
|
{
|
|
|
|
/* drain receive buffer */
|
2011-07-10 01:16:37 +08:00
|
|
|
while (mx51_ecspi_rx_available(spi_imx))
|
2010-09-10 03:02:48 +08:00
|
|
|
readl(spi_imx->base + MXC_CSPIRXDATA);
|
|
|
|
}
|
|
|
|
|
2009-09-23 07:46:02 +08:00
|
|
|
#define MX31_INTREG_TEEN (1 << 0)
|
|
|
|
#define MX31_INTREG_RREN (1 << 3)
|
|
|
|
|
|
|
|
#define MX31_CSPICTRL_ENABLE (1 << 0)
|
|
|
|
#define MX31_CSPICTRL_MASTER (1 << 1)
|
|
|
|
#define MX31_CSPICTRL_XCH (1 << 2)
|
2016-10-20 06:42:25 +08:00
|
|
|
#define MX31_CSPICTRL_SMC (1 << 3)
|
2009-09-23 07:46:02 +08:00
|
|
|
#define MX31_CSPICTRL_POL (1 << 4)
|
|
|
|
#define MX31_CSPICTRL_PHA (1 << 5)
|
|
|
|
#define MX31_CSPICTRL_SSCTL (1 << 6)
|
|
|
|
#define MX31_CSPICTRL_SSPOL (1 << 7)
|
|
|
|
#define MX31_CSPICTRL_BC_SHIFT 8
|
|
|
|
#define MX35_CSPICTRL_BL_SHIFT 20
|
|
|
|
#define MX31_CSPICTRL_CS_SHIFT 24
|
|
|
|
#define MX35_CSPICTRL_CS_SHIFT 12
|
|
|
|
#define MX31_CSPICTRL_DR_SHIFT 16
|
|
|
|
|
2016-10-20 06:42:25 +08:00
|
|
|
#define MX31_CSPI_DMAREG 0x10
|
|
|
|
#define MX31_DMAREG_RH_DEN (1<<4)
|
|
|
|
#define MX31_DMAREG_TH_DEN (1<<1)
|
|
|
|
|
2009-09-23 07:46:02 +08:00
|
|
|
#define MX31_CSPISTATUS 0x14
|
|
|
|
#define MX31_STATUS_RR (1 << 3)
|
|
|
|
|
2016-09-02 04:39:58 +08:00
|
|
|
#define MX31_CSPI_TESTREG 0x1C
|
|
|
|
#define MX31_TEST_LBC (1 << 14)
|
|
|
|
|
2009-09-23 07:46:02 +08:00
|
|
|
/* These functions also work for the i.MX35, but be aware that
|
|
|
|
* the i.MX35 has a slightly different register layout for bits
|
|
|
|
* we do not use here.
|
|
|
|
*/
|
2016-06-09 01:02:08 +08:00
|
|
|
static void mx31_intctrl(struct spi_imx_data *spi_imx, int enable)
|
2009-09-23 07:46:02 +08:00
|
|
|
{
|
|
|
|
unsigned int val = 0;
|
|
|
|
|
|
|
|
if (enable & MXC_INT_TE)
|
|
|
|
val |= MX31_INTREG_TEEN;
|
|
|
|
if (enable & MXC_INT_RR)
|
|
|
|
val |= MX31_INTREG_RREN;
|
|
|
|
|
2009-10-02 06:44:28 +08:00
|
|
|
writel(val, spi_imx->base + MXC_CSPIINT);
|
2009-09-23 07:46:02 +08:00
|
|
|
}
|
|
|
|
|
2016-06-09 01:02:08 +08:00
|
|
|
static void mx31_trigger(struct spi_imx_data *spi_imx)
|
2009-09-23 07:46:02 +08:00
|
|
|
{
|
|
|
|
unsigned int reg;
|
|
|
|
|
2009-10-02 06:44:28 +08:00
|
|
|
reg = readl(spi_imx->base + MXC_CSPICTRL);
|
2009-09-23 07:46:02 +08:00
|
|
|
reg |= MX31_CSPICTRL_XCH;
|
2009-10-02 06:44:28 +08:00
|
|
|
writel(reg, spi_imx->base + MXC_CSPICTRL);
|
2009-09-23 07:46:02 +08:00
|
|
|
}
|
|
|
|
|
2018-11-30 14:47:05 +08:00
|
|
|
static int mx31_prepare_message(struct spi_imx_data *spi_imx,
|
|
|
|
struct spi_message *msg)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-11-30 14:47:08 +08:00
|
|
|
static int mx31_prepare_transfer(struct spi_imx_data *spi_imx,
|
2021-04-08 18:33:47 +08:00
|
|
|
struct spi_device *spi)
|
2010-09-10 15:19:18 +08:00
|
|
|
{
|
|
|
|
unsigned int reg = MX31_CSPICTRL_ENABLE | MX31_CSPICTRL_MASTER;
|
2016-09-02 04:38:40 +08:00
|
|
|
unsigned int clk;
|
2010-09-10 15:19:18 +08:00
|
|
|
|
2021-04-08 18:33:47 +08:00
|
|
|
reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, spi_imx->spi_bus_clk, &clk) <<
|
2010-09-10 15:19:18 +08:00
|
|
|
MX31_CSPICTRL_DR_SHIFT;
|
2016-09-02 04:38:40 +08:00
|
|
|
spi_imx->spi_bus_clk = clk;
|
2010-09-10 15:19:18 +08:00
|
|
|
|
2011-07-10 01:16:39 +08:00
|
|
|
if (is_imx35_cspi(spi_imx)) {
|
2017-06-02 13:38:01 +08:00
|
|
|
reg |= (spi_imx->bits_per_word - 1) << MX35_CSPICTRL_BL_SHIFT;
|
2011-07-10 01:16:38 +08:00
|
|
|
reg |= MX31_CSPICTRL_SSCTL;
|
|
|
|
} else {
|
2017-06-02 13:38:01 +08:00
|
|
|
reg |= (spi_imx->bits_per_word - 1) << MX31_CSPICTRL_BC_SHIFT;
|
2011-07-10 01:16:38 +08:00
|
|
|
}
|
2010-09-10 15:19:18 +08:00
|
|
|
|
2016-06-09 01:02:07 +08:00
|
|
|
if (spi->mode & SPI_CPHA)
|
2010-09-10 15:19:18 +08:00
|
|
|
reg |= MX31_CSPICTRL_PHA;
|
2016-06-09 01:02:07 +08:00
|
|
|
if (spi->mode & SPI_CPOL)
|
2010-09-10 15:19:18 +08:00
|
|
|
reg |= MX31_CSPICTRL_POL;
|
2016-06-09 01:02:07 +08:00
|
|
|
if (spi->mode & SPI_CS_HIGH)
|
2010-09-10 15:19:18 +08:00
|
|
|
reg |= MX31_CSPICTRL_SSPOL;
|
2020-06-26 04:02:52 +08:00
|
|
|
if (!spi->cs_gpiod)
|
2017-07-11 12:22:11 +08:00
|
|
|
reg |= (spi->chip_select) <<
|
2011-07-10 01:16:39 +08:00
|
|
|
(is_imx35_cspi(spi_imx) ? MX35_CSPICTRL_CS_SHIFT :
|
|
|
|
MX31_CSPICTRL_CS_SHIFT);
|
2010-09-10 15:19:18 +08:00
|
|
|
|
2016-10-20 06:42:25 +08:00
|
|
|
if (spi_imx->usedma)
|
|
|
|
reg |= MX31_CSPICTRL_SMC;
|
|
|
|
|
2010-09-10 15:19:18 +08:00
|
|
|
writel(reg, spi_imx->base + MXC_CSPICTRL);
|
|
|
|
|
2016-09-02 04:39:58 +08:00
|
|
|
reg = readl(spi_imx->base + MX31_CSPI_TESTREG);
|
|
|
|
if (spi->mode & SPI_LOOP)
|
|
|
|
reg |= MX31_TEST_LBC;
|
|
|
|
else
|
|
|
|
reg &= ~MX31_TEST_LBC;
|
|
|
|
writel(reg, spi_imx->base + MX31_CSPI_TESTREG);
|
|
|
|
|
2016-10-20 06:42:25 +08:00
|
|
|
if (spi_imx->usedma) {
|
2018-11-30 14:47:07 +08:00
|
|
|
/*
|
|
|
|
* configure DMA requests when RXFIFO is half full and
|
|
|
|
* when TXFIFO is half empty
|
|
|
|
*/
|
2016-10-20 06:42:25 +08:00
|
|
|
writel(MX31_DMAREG_RH_DEN | MX31_DMAREG_TH_DEN,
|
|
|
|
spi_imx->base + MX31_CSPI_DMAREG);
|
|
|
|
}
|
|
|
|
|
2010-09-10 15:19:18 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-06-09 01:02:08 +08:00
|
|
|
static int mx31_rx_available(struct spi_imx_data *spi_imx)
|
2009-09-23 07:46:02 +08:00
|
|
|
{
|
2009-10-02 06:44:28 +08:00
|
|
|
return readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR;
|
2009-09-23 07:46:02 +08:00
|
|
|
}
|
|
|
|
|
2016-06-09 01:02:08 +08:00
|
|
|
static void mx31_reset(struct spi_imx_data *spi_imx)
|
2010-09-10 15:19:18 +08:00
|
|
|
{
|
|
|
|
/* drain receive buffer */
|
2011-07-10 01:16:38 +08:00
|
|
|
while (readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR)
|
2010-09-10 15:19:18 +08:00
|
|
|
readl(spi_imx->base + MXC_CSPIRXDATA);
|
|
|
|
}
|
|
|
|
|
2011-07-10 01:16:36 +08:00
|
|
|
#define MX21_INTREG_RR (1 << 4)
|
|
|
|
#define MX21_INTREG_TEEN (1 << 9)
|
|
|
|
#define MX21_INTREG_RREN (1 << 13)
|
|
|
|
|
|
|
|
#define MX21_CSPICTRL_POL (1 << 5)
|
|
|
|
#define MX21_CSPICTRL_PHA (1 << 6)
|
|
|
|
#define MX21_CSPICTRL_SSPOL (1 << 8)
|
|
|
|
#define MX21_CSPICTRL_XCH (1 << 9)
|
|
|
|
#define MX21_CSPICTRL_ENABLE (1 << 10)
|
|
|
|
#define MX21_CSPICTRL_MASTER (1 << 11)
|
|
|
|
#define MX21_CSPICTRL_DR_SHIFT 14
|
|
|
|
#define MX21_CSPICTRL_CS_SHIFT 19
|
|
|
|
|
2016-06-09 01:02:08 +08:00
|
|
|
static void mx21_intctrl(struct spi_imx_data *spi_imx, int enable)
|
2009-09-23 07:46:02 +08:00
|
|
|
{
|
|
|
|
unsigned int val = 0;
|
|
|
|
|
|
|
|
if (enable & MXC_INT_TE)
|
2011-07-10 01:16:36 +08:00
|
|
|
val |= MX21_INTREG_TEEN;
|
2009-09-23 07:46:02 +08:00
|
|
|
if (enable & MXC_INT_RR)
|
2011-07-10 01:16:36 +08:00
|
|
|
val |= MX21_INTREG_RREN;
|
2009-09-23 07:46:02 +08:00
|
|
|
|
2009-10-02 06:44:28 +08:00
|
|
|
writel(val, spi_imx->base + MXC_CSPIINT);
|
2009-09-23 07:46:02 +08:00
|
|
|
}
|
|
|
|
|
2016-06-09 01:02:08 +08:00
|
|
|
static void mx21_trigger(struct spi_imx_data *spi_imx)
|
2009-09-23 07:46:02 +08:00
|
|
|
{
|
|
|
|
unsigned int reg;
|
|
|
|
|
2009-10-02 06:44:28 +08:00
|
|
|
reg = readl(spi_imx->base + MXC_CSPICTRL);
|
2011-07-10 01:16:36 +08:00
|
|
|
reg |= MX21_CSPICTRL_XCH;
|
2009-10-02 06:44:28 +08:00
|
|
|
writel(reg, spi_imx->base + MXC_CSPICTRL);
|
2009-09-23 07:46:02 +08:00
|
|
|
}
|
|
|
|
|
2018-11-30 14:47:05 +08:00
|
|
|
static int mx21_prepare_message(struct spi_imx_data *spi_imx,
|
|
|
|
struct spi_message *msg)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-11-30 14:47:08 +08:00
|
|
|
static int mx21_prepare_transfer(struct spi_imx_data *spi_imx,
|
2021-04-08 18:33:47 +08:00
|
|
|
struct spi_device *spi)
|
2009-09-23 07:46:02 +08:00
|
|
|
{
|
2011-07-10 01:16:36 +08:00
|
|
|
unsigned int reg = MX21_CSPICTRL_ENABLE | MX21_CSPICTRL_MASTER;
|
2011-07-10 01:16:39 +08:00
|
|
|
unsigned int max = is_imx27_cspi(spi_imx) ? 16 : 18;
|
2016-11-02 05:18:39 +08:00
|
|
|
unsigned int clk;
|
|
|
|
|
2021-04-08 18:33:47 +08:00
|
|
|
reg |= spi_imx_clkdiv_1(spi_imx->spi_clk, spi_imx->spi_bus_clk, max, &clk)
|
2016-11-02 05:18:39 +08:00
|
|
|
<< MX21_CSPICTRL_DR_SHIFT;
|
|
|
|
spi_imx->spi_bus_clk = clk;
|
2009-09-23 07:46:02 +08:00
|
|
|
|
2017-06-02 13:38:01 +08:00
|
|
|
reg |= spi_imx->bits_per_word - 1;
|
2009-09-23 07:46:02 +08:00
|
|
|
|
2016-06-09 01:02:07 +08:00
|
|
|
if (spi->mode & SPI_CPHA)
|
2011-07-10 01:16:36 +08:00
|
|
|
reg |= MX21_CSPICTRL_PHA;
|
2016-06-09 01:02:07 +08:00
|
|
|
if (spi->mode & SPI_CPOL)
|
2011-07-10 01:16:36 +08:00
|
|
|
reg |= MX21_CSPICTRL_POL;
|
2016-06-09 01:02:07 +08:00
|
|
|
if (spi->mode & SPI_CS_HIGH)
|
2011-07-10 01:16:36 +08:00
|
|
|
reg |= MX21_CSPICTRL_SSPOL;
|
2020-06-26 04:02:52 +08:00
|
|
|
if (!spi->cs_gpiod)
|
2017-07-11 12:22:11 +08:00
|
|
|
reg |= spi->chip_select << MX21_CSPICTRL_CS_SHIFT;
|
2009-09-23 07:46:02 +08:00
|
|
|
|
2009-10-02 06:44:28 +08:00
|
|
|
writel(reg, spi_imx->base + MXC_CSPICTRL);
|
2009-09-23 07:46:02 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-06-09 01:02:08 +08:00
|
|
|
static int mx21_rx_available(struct spi_imx_data *spi_imx)
|
2009-09-23 07:46:02 +08:00
|
|
|
{
|
2011-07-10 01:16:36 +08:00
|
|
|
return readl(spi_imx->base + MXC_CSPIINT) & MX21_INTREG_RR;
|
2009-09-23 07:46:02 +08:00
|
|
|
}
|
|
|
|
|
2016-06-09 01:02:08 +08:00
|
|
|
static void mx21_reset(struct spi_imx_data *spi_imx)
|
2010-09-10 15:19:18 +08:00
|
|
|
{
|
|
|
|
writel(1, spi_imx->base + MXC_RESET);
|
|
|
|
}
|
|
|
|
|
2009-09-23 07:46:02 +08:00
|
|
|
#define MX1_INTREG_RR (1 << 3)
|
|
|
|
#define MX1_INTREG_TEEN (1 << 8)
|
|
|
|
#define MX1_INTREG_RREN (1 << 11)
|
|
|
|
|
|
|
|
#define MX1_CSPICTRL_POL (1 << 4)
|
|
|
|
#define MX1_CSPICTRL_PHA (1 << 5)
|
|
|
|
#define MX1_CSPICTRL_XCH (1 << 8)
|
|
|
|
#define MX1_CSPICTRL_ENABLE (1 << 9)
|
|
|
|
#define MX1_CSPICTRL_MASTER (1 << 10)
|
|
|
|
#define MX1_CSPICTRL_DR_SHIFT 13
|
|
|
|
|
2016-06-09 01:02:08 +08:00
|
|
|
static void mx1_intctrl(struct spi_imx_data *spi_imx, int enable)
|
2009-09-23 07:46:02 +08:00
|
|
|
{
|
|
|
|
unsigned int val = 0;
|
|
|
|
|
|
|
|
if (enable & MXC_INT_TE)
|
|
|
|
val |= MX1_INTREG_TEEN;
|
|
|
|
if (enable & MXC_INT_RR)
|
|
|
|
val |= MX1_INTREG_RREN;
|
|
|
|
|
2009-10-02 06:44:28 +08:00
|
|
|
writel(val, spi_imx->base + MXC_CSPIINT);
|
2009-09-23 07:46:02 +08:00
|
|
|
}
|
|
|
|
|
2016-06-09 01:02:08 +08:00
|
|
|
static void mx1_trigger(struct spi_imx_data *spi_imx)
|
2009-09-23 07:46:02 +08:00
|
|
|
{
|
|
|
|
unsigned int reg;
|
|
|
|
|
2009-10-02 06:44:28 +08:00
|
|
|
reg = readl(spi_imx->base + MXC_CSPICTRL);
|
2009-09-23 07:46:02 +08:00
|
|
|
reg |= MX1_CSPICTRL_XCH;
|
2009-10-02 06:44:28 +08:00
|
|
|
writel(reg, spi_imx->base + MXC_CSPICTRL);
|
2009-09-23 07:46:02 +08:00
|
|
|
}
|
|
|
|
|
2018-11-30 14:47:05 +08:00
|
|
|
static int mx1_prepare_message(struct spi_imx_data *spi_imx,
|
|
|
|
struct spi_message *msg)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-11-30 14:47:08 +08:00
|
|
|
static int mx1_prepare_transfer(struct spi_imx_data *spi_imx,
|
2021-04-08 18:33:47 +08:00
|
|
|
struct spi_device *spi)
|
2009-09-23 07:46:02 +08:00
|
|
|
{
|
|
|
|
unsigned int reg = MX1_CSPICTRL_ENABLE | MX1_CSPICTRL_MASTER;
|
2016-09-02 04:38:40 +08:00
|
|
|
unsigned int clk;
|
2009-09-23 07:46:02 +08:00
|
|
|
|
2021-04-08 18:33:47 +08:00
|
|
|
reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, spi_imx->spi_bus_clk, &clk) <<
|
2009-09-23 07:46:02 +08:00
|
|
|
MX1_CSPICTRL_DR_SHIFT;
|
2016-09-02 04:38:40 +08:00
|
|
|
spi_imx->spi_bus_clk = clk;
|
|
|
|
|
2017-06-02 13:38:01 +08:00
|
|
|
reg |= spi_imx->bits_per_word - 1;
|
2009-09-23 07:46:02 +08:00
|
|
|
|
2016-06-09 01:02:07 +08:00
|
|
|
if (spi->mode & SPI_CPHA)
|
2009-09-23 07:46:02 +08:00
|
|
|
reg |= MX1_CSPICTRL_PHA;
|
2016-06-09 01:02:07 +08:00
|
|
|
if (spi->mode & SPI_CPOL)
|
2009-09-23 07:46:02 +08:00
|
|
|
reg |= MX1_CSPICTRL_POL;
|
|
|
|
|
2009-10-02 06:44:28 +08:00
|
|
|
writel(reg, spi_imx->base + MXC_CSPICTRL);
|
2009-09-23 07:46:02 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-06-09 01:02:08 +08:00
|
|
|
static int mx1_rx_available(struct spi_imx_data *spi_imx)
|
2009-09-23 07:46:02 +08:00
|
|
|
{
|
2009-10-02 06:44:28 +08:00
|
|
|
return readl(spi_imx->base + MXC_CSPIINT) & MX1_INTREG_RR;
|
2009-09-23 07:46:02 +08:00
|
|
|
}
|
|
|
|
|
2016-06-09 01:02:08 +08:00
|
|
|
static void mx1_reset(struct spi_imx_data *spi_imx)
|
2010-09-10 15:19:18 +08:00
|
|
|
{
|
|
|
|
writel(1, spi_imx->base + MXC_RESET);
|
|
|
|
}
|
|
|
|
|
2011-07-10 01:16:39 +08:00
|
|
|
static struct spi_imx_devtype_data imx1_cspi_devtype_data = {
|
|
|
|
.intctrl = mx1_intctrl,
|
2018-11-30 14:47:05 +08:00
|
|
|
.prepare_message = mx1_prepare_message,
|
2018-11-30 14:47:08 +08:00
|
|
|
.prepare_transfer = mx1_prepare_transfer,
|
2011-07-10 01:16:39 +08:00
|
|
|
.trigger = mx1_trigger,
|
|
|
|
.rx_available = mx1_rx_available,
|
|
|
|
.reset = mx1_reset,
|
2017-06-08 13:16:00 +08:00
|
|
|
.fifo_size = 8,
|
|
|
|
.has_dmamode = false,
|
2017-08-10 12:50:08 +08:00
|
|
|
.dynamic_burst = false,
|
2017-09-05 13:12:32 +08:00
|
|
|
.has_slavemode = false,
|
2011-07-10 01:16:39 +08:00
|
|
|
.devtype = IMX1_CSPI,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct spi_imx_devtype_data imx21_cspi_devtype_data = {
|
|
|
|
.intctrl = mx21_intctrl,
|
2018-11-30 14:47:05 +08:00
|
|
|
.prepare_message = mx21_prepare_message,
|
2018-11-30 14:47:08 +08:00
|
|
|
.prepare_transfer = mx21_prepare_transfer,
|
2011-07-10 01:16:39 +08:00
|
|
|
.trigger = mx21_trigger,
|
|
|
|
.rx_available = mx21_rx_available,
|
|
|
|
.reset = mx21_reset,
|
2017-06-08 13:16:00 +08:00
|
|
|
.fifo_size = 8,
|
|
|
|
.has_dmamode = false,
|
2017-08-10 12:50:08 +08:00
|
|
|
.dynamic_burst = false,
|
2017-09-05 13:12:32 +08:00
|
|
|
.has_slavemode = false,
|
2011-07-10 01:16:39 +08:00
|
|
|
.devtype = IMX21_CSPI,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct spi_imx_devtype_data imx27_cspi_devtype_data = {
|
|
|
|
/* i.mx27 cspi shares the functions with i.mx21 one */
|
|
|
|
.intctrl = mx21_intctrl,
|
2018-11-30 14:47:05 +08:00
|
|
|
.prepare_message = mx21_prepare_message,
|
2018-11-30 14:47:08 +08:00
|
|
|
.prepare_transfer = mx21_prepare_transfer,
|
2011-07-10 01:16:39 +08:00
|
|
|
.trigger = mx21_trigger,
|
|
|
|
.rx_available = mx21_rx_available,
|
|
|
|
.reset = mx21_reset,
|
2017-06-08 13:16:00 +08:00
|
|
|
.fifo_size = 8,
|
|
|
|
.has_dmamode = false,
|
2017-08-10 12:50:08 +08:00
|
|
|
.dynamic_burst = false,
|
2017-09-05 13:12:32 +08:00
|
|
|
.has_slavemode = false,
|
2011-07-10 01:16:39 +08:00
|
|
|
.devtype = IMX27_CSPI,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct spi_imx_devtype_data imx31_cspi_devtype_data = {
|
|
|
|
.intctrl = mx31_intctrl,
|
2018-11-30 14:47:05 +08:00
|
|
|
.prepare_message = mx31_prepare_message,
|
2018-11-30 14:47:08 +08:00
|
|
|
.prepare_transfer = mx31_prepare_transfer,
|
2011-07-10 01:16:39 +08:00
|
|
|
.trigger = mx31_trigger,
|
|
|
|
.rx_available = mx31_rx_available,
|
|
|
|
.reset = mx31_reset,
|
2017-06-08 13:16:00 +08:00
|
|
|
.fifo_size = 8,
|
|
|
|
.has_dmamode = false,
|
2017-08-10 12:50:08 +08:00
|
|
|
.dynamic_burst = false,
|
2017-09-05 13:12:32 +08:00
|
|
|
.has_slavemode = false,
|
2011-07-10 01:16:39 +08:00
|
|
|
.devtype = IMX31_CSPI,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct spi_imx_devtype_data imx35_cspi_devtype_data = {
|
|
|
|
/* i.mx35 and later cspi shares the functions with i.mx31 one */
|
|
|
|
.intctrl = mx31_intctrl,
|
2018-11-30 14:47:05 +08:00
|
|
|
.prepare_message = mx31_prepare_message,
|
2018-11-30 14:47:08 +08:00
|
|
|
.prepare_transfer = mx31_prepare_transfer,
|
2011-07-10 01:16:39 +08:00
|
|
|
.trigger = mx31_trigger,
|
|
|
|
.rx_available = mx31_rx_available,
|
|
|
|
.reset = mx31_reset,
|
2017-06-08 13:16:00 +08:00
|
|
|
.fifo_size = 8,
|
|
|
|
.has_dmamode = true,
|
2017-08-10 12:50:08 +08:00
|
|
|
.dynamic_burst = false,
|
2017-09-05 13:12:32 +08:00
|
|
|
.has_slavemode = false,
|
2011-07-10 01:16:39 +08:00
|
|
|
.devtype = IMX35_CSPI,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct spi_imx_devtype_data imx51_ecspi_devtype_data = {
|
|
|
|
.intctrl = mx51_ecspi_intctrl,
|
2018-11-30 14:47:05 +08:00
|
|
|
.prepare_message = mx51_ecspi_prepare_message,
|
2018-11-30 14:47:08 +08:00
|
|
|
.prepare_transfer = mx51_ecspi_prepare_transfer,
|
2011-07-10 01:16:39 +08:00
|
|
|
.trigger = mx51_ecspi_trigger,
|
|
|
|
.rx_available = mx51_ecspi_rx_available,
|
|
|
|
.reset = mx51_ecspi_reset,
|
2018-10-10 18:32:42 +08:00
|
|
|
.setup_wml = mx51_setup_wml,
|
2017-06-08 13:16:00 +08:00
|
|
|
.fifo_size = 64,
|
|
|
|
.has_dmamode = true,
|
2017-08-10 12:50:08 +08:00
|
|
|
.dynamic_burst = true,
|
2017-09-05 13:12:32 +08:00
|
|
|
.has_slavemode = true,
|
|
|
|
.disable = mx51_ecspi_disable,
|
2011-07-10 01:16:39 +08:00
|
|
|
.devtype = IMX51_ECSPI,
|
|
|
|
};
|
|
|
|
|
2017-06-08 13:16:01 +08:00
|
|
|
static struct spi_imx_devtype_data imx53_ecspi_devtype_data = {
|
|
|
|
.intctrl = mx51_ecspi_intctrl,
|
2018-11-30 14:47:05 +08:00
|
|
|
.prepare_message = mx51_ecspi_prepare_message,
|
2018-11-30 14:47:08 +08:00
|
|
|
.prepare_transfer = mx51_ecspi_prepare_transfer,
|
2017-06-08 13:16:01 +08:00
|
|
|
.trigger = mx51_ecspi_trigger,
|
|
|
|
.rx_available = mx51_ecspi_rx_available,
|
|
|
|
.reset = mx51_ecspi_reset,
|
|
|
|
.fifo_size = 64,
|
|
|
|
.has_dmamode = true,
|
2017-09-05 13:12:32 +08:00
|
|
|
.has_slavemode = true,
|
|
|
|
.disable = mx51_ecspi_disable,
|
2017-06-08 13:16:01 +08:00
|
|
|
.devtype = IMX53_ECSPI,
|
|
|
|
};
|
|
|
|
|
2021-07-14 18:20:48 +08:00
|
|
|
static struct spi_imx_devtype_data imx6ul_ecspi_devtype_data = {
|
|
|
|
.intctrl = mx51_ecspi_intctrl,
|
|
|
|
.prepare_message = mx51_ecspi_prepare_message,
|
|
|
|
.prepare_transfer = mx51_ecspi_prepare_transfer,
|
|
|
|
.trigger = mx51_ecspi_trigger,
|
|
|
|
.rx_available = mx51_ecspi_rx_available,
|
|
|
|
.reset = mx51_ecspi_reset,
|
|
|
|
.setup_wml = mx51_setup_wml,
|
|
|
|
.fifo_size = 64,
|
|
|
|
.has_dmamode = true,
|
|
|
|
.dynamic_burst = true,
|
|
|
|
.has_slavemode = true,
|
|
|
|
.tx_glitch_fixed = true,
|
|
|
|
.disable = mx51_ecspi_disable,
|
|
|
|
.devtype = IMX51_ECSPI,
|
|
|
|
};
|
|
|
|
|
2011-07-10 01:16:41 +08:00
|
|
|
static const struct of_device_id spi_imx_dt_ids[] = {
|
|
|
|
{ .compatible = "fsl,imx1-cspi", .data = &imx1_cspi_devtype_data, },
|
|
|
|
{ .compatible = "fsl,imx21-cspi", .data = &imx21_cspi_devtype_data, },
|
|
|
|
{ .compatible = "fsl,imx27-cspi", .data = &imx27_cspi_devtype_data, },
|
|
|
|
{ .compatible = "fsl,imx31-cspi", .data = &imx31_cspi_devtype_data, },
|
|
|
|
{ .compatible = "fsl,imx35-cspi", .data = &imx35_cspi_devtype_data, },
|
|
|
|
{ .compatible = "fsl,imx51-ecspi", .data = &imx51_ecspi_devtype_data, },
|
2017-06-08 13:16:01 +08:00
|
|
|
{ .compatible = "fsl,imx53-ecspi", .data = &imx53_ecspi_devtype_data, },
|
2021-07-14 18:20:48 +08:00
|
|
|
{ .compatible = "fsl,imx6ul-ecspi", .data = &imx6ul_ecspi_devtype_data, },
|
2011-07-10 01:16:41 +08:00
|
|
|
{ /* sentinel */ }
|
|
|
|
};
|
2013-07-29 15:38:05 +08:00
|
|
|
MODULE_DEVICE_TABLE(of, spi_imx_dt_ids);
|
2011-07-10 01:16:41 +08:00
|
|
|
|
2018-07-17 22:31:54 +08:00
|
|
|
static void spi_imx_set_burst_len(struct spi_imx_data *spi_imx, int n_bits)
|
|
|
|
{
|
|
|
|
u32 ctrl;
|
|
|
|
|
|
|
|
ctrl = readl(spi_imx->base + MX51_ECSPI_CTRL);
|
|
|
|
ctrl &= ~MX51_ECSPI_CTRL_BL_MASK;
|
|
|
|
ctrl |= ((n_bits - 1) << MX51_ECSPI_CTRL_BL_OFFSET);
|
|
|
|
writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
|
|
|
|
}
|
|
|
|
|
2009-10-02 06:44:28 +08:00
|
|
|
static void spi_imx_push(struct spi_imx_data *spi_imx)
|
2009-09-23 07:46:02 +08:00
|
|
|
{
|
2021-07-17 01:39:27 +08:00
|
|
|
unsigned int burst_len;
|
2018-07-17 22:31:54 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Reload the FIFO when the remaining bytes to be transferred in the
|
|
|
|
* current burst is 0. This only applies when bits_per_word is a
|
|
|
|
* multiple of 8.
|
|
|
|
*/
|
|
|
|
if (!spi_imx->remainder) {
|
|
|
|
if (spi_imx->dynamic_burst) {
|
|
|
|
|
|
|
|
/* We need to deal unaligned data first */
|
|
|
|
burst_len = spi_imx->count % MX51_ECSPI_CTRL_MAX_BURST;
|
|
|
|
|
|
|
|
if (!burst_len)
|
|
|
|
burst_len = MX51_ECSPI_CTRL_MAX_BURST;
|
|
|
|
|
|
|
|
spi_imx_set_burst_len(spi_imx, burst_len * 8);
|
|
|
|
|
|
|
|
spi_imx->remainder = burst_len;
|
|
|
|
} else {
|
2021-07-17 01:39:27 +08:00
|
|
|
spi_imx->remainder = spi_imx_bytes_per_word(spi_imx->bits_per_word);
|
2018-07-17 22:31:54 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-06-08 13:16:00 +08:00
|
|
|
while (spi_imx->txfifo < spi_imx->devtype_data->fifo_size) {
|
2009-10-02 06:44:28 +08:00
|
|
|
if (!spi_imx->count)
|
2009-09-23 07:46:02 +08:00
|
|
|
break;
|
2018-07-17 22:31:54 +08:00
|
|
|
if (spi_imx->dynamic_burst &&
|
2021-07-17 01:39:27 +08:00
|
|
|
spi_imx->txfifo >= DIV_ROUND_UP(spi_imx->remainder, 4))
|
2017-08-10 12:50:08 +08:00
|
|
|
break;
|
2009-10-02 06:44:28 +08:00
|
|
|
spi_imx->tx(spi_imx);
|
|
|
|
spi_imx->txfifo++;
|
2009-09-23 07:46:02 +08:00
|
|
|
}
|
|
|
|
|
2017-09-05 13:12:32 +08:00
|
|
|
if (!spi_imx->slave_mode)
|
|
|
|
spi_imx->devtype_data->trigger(spi_imx);
|
2009-09-23 07:46:02 +08:00
|
|
|
}
|
|
|
|
|
2009-10-02 06:44:28 +08:00
|
|
|
static irqreturn_t spi_imx_isr(int irq, void *dev_id)
|
2009-09-23 07:46:02 +08:00
|
|
|
{
|
2009-10-02 06:44:28 +08:00
|
|
|
struct spi_imx_data *spi_imx = dev_id;
|
2009-09-23 07:46:02 +08:00
|
|
|
|
2017-09-05 13:12:32 +08:00
|
|
|
while (spi_imx->txfifo &&
|
|
|
|
spi_imx->devtype_data->rx_available(spi_imx)) {
|
2009-10-02 06:44:28 +08:00
|
|
|
spi_imx->rx(spi_imx);
|
|
|
|
spi_imx->txfifo--;
|
2009-09-23 07:46:02 +08:00
|
|
|
}
|
|
|
|
|
2009-10-02 06:44:28 +08:00
|
|
|
if (spi_imx->count) {
|
|
|
|
spi_imx_push(spi_imx);
|
2009-09-23 07:46:02 +08:00
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
2009-10-02 06:44:28 +08:00
|
|
|
if (spi_imx->txfifo) {
|
2009-09-23 07:46:02 +08:00
|
|
|
/* No data left to push, but still waiting for rx data,
|
|
|
|
* enable receive data available interrupt.
|
|
|
|
*/
|
2011-07-10 01:16:35 +08:00
|
|
|
spi_imx->devtype_data->intctrl(
|
2010-09-09 21:29:01 +08:00
|
|
|
spi_imx, MXC_INT_RR);
|
2009-09-23 07:46:02 +08:00
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
2011-07-10 01:16:35 +08:00
|
|
|
spi_imx->devtype_data->intctrl(spi_imx, 0);
|
2009-10-02 06:44:28 +08:00
|
|
|
complete(&spi_imx->xfer_done);
|
2009-09-23 07:46:02 +08:00
|
|
|
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
2022-05-03 01:54:54 +08:00
|
|
|
static int spi_imx_dma_configure(struct spi_controller *controller)
|
2016-02-24 16:20:29 +08:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
enum dma_slave_buswidth buswidth;
|
|
|
|
struct dma_slave_config rx = {}, tx = {};
|
2022-05-03 01:54:54 +08:00
|
|
|
struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller);
|
2016-02-24 16:20:29 +08:00
|
|
|
|
2017-06-02 13:38:03 +08:00
|
|
|
switch (spi_imx_bytes_per_word(spi_imx->bits_per_word)) {
|
2016-02-24 16:20:29 +08:00
|
|
|
case 4:
|
|
|
|
buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
tx.direction = DMA_MEM_TO_DEV;
|
|
|
|
tx.dst_addr = spi_imx->base_phys + MXC_CSPITXDATA;
|
|
|
|
tx.dst_addr_width = buswidth;
|
|
|
|
tx.dst_maxburst = spi_imx->wml;
|
2022-05-03 01:54:54 +08:00
|
|
|
ret = dmaengine_slave_config(controller->dma_tx, &tx);
|
2016-02-24 16:20:29 +08:00
|
|
|
if (ret) {
|
|
|
|
dev_err(spi_imx->dev, "TX dma configuration failed with %d\n", ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
rx.direction = DMA_DEV_TO_MEM;
|
|
|
|
rx.src_addr = spi_imx->base_phys + MXC_CSPIRXDATA;
|
|
|
|
rx.src_addr_width = buswidth;
|
|
|
|
rx.src_maxburst = spi_imx->wml;
|
2022-05-03 01:54:54 +08:00
|
|
|
ret = dmaengine_slave_config(controller->dma_rx, &rx);
|
2016-02-24 16:20:29 +08:00
|
|
|
if (ret) {
|
|
|
|
dev_err(spi_imx->dev, "RX dma configuration failed with %d\n", ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-10-02 06:44:28 +08:00
|
|
|
static int spi_imx_setupxfer(struct spi_device *spi,
|
2009-09-23 07:46:02 +08:00
|
|
|
struct spi_transfer *t)
|
|
|
|
{
|
2022-05-03 01:54:54 +08:00
|
|
|
struct spi_imx_data *spi_imx = spi_controller_get_devdata(spi->controller);
|
2009-09-23 07:46:02 +08:00
|
|
|
|
2017-06-02 13:37:59 +08:00
|
|
|
if (!t)
|
|
|
|
return 0;
|
|
|
|
|
2021-04-08 18:33:47 +08:00
|
|
|
if (!t->speed_hz) {
|
|
|
|
if (!spi->max_speed_hz) {
|
|
|
|
dev_err(&spi->dev, "no speed_hz provided!\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
dev_dbg(&spi->dev, "using spi->max_speed_hz!\n");
|
|
|
|
spi_imx->spi_bus_clk = spi->max_speed_hz;
|
|
|
|
} else
|
|
|
|
spi_imx->spi_bus_clk = t->speed_hz;
|
|
|
|
|
2017-06-02 13:38:01 +08:00
|
|
|
spi_imx->bits_per_word = t->bits_per_word;
|
2009-09-23 07:46:02 +08:00
|
|
|
|
2018-07-17 22:31:51 +08:00
|
|
|
/*
|
|
|
|
* Initialize the functions for transfer. To transfer non byte-aligned
|
|
|
|
* words, we have to use multiple word-size bursts, we can't use
|
|
|
|
* dynamic_burst in that case.
|
|
|
|
*/
|
|
|
|
if (spi_imx->devtype_data->dynamic_burst && !spi_imx->slave_mode &&
|
2021-07-27 20:42:26 +08:00
|
|
|
!(spi->mode & SPI_CS_WORD) &&
|
2018-07-17 22:31:51 +08:00
|
|
|
(spi_imx->bits_per_word == 8 ||
|
|
|
|
spi_imx->bits_per_word == 16 ||
|
|
|
|
spi_imx->bits_per_word == 32)) {
|
2017-08-10 12:50:08 +08:00
|
|
|
|
|
|
|
spi_imx->rx = spi_imx_buf_rx_swap;
|
|
|
|
spi_imx->tx = spi_imx_buf_tx_swap;
|
|
|
|
spi_imx->dynamic_burst = 1;
|
|
|
|
|
2013-05-30 16:08:09 +08:00
|
|
|
} else {
|
2017-08-10 12:50:08 +08:00
|
|
|
if (spi_imx->bits_per_word <= 8) {
|
|
|
|
spi_imx->rx = spi_imx_buf_rx_u8;
|
|
|
|
spi_imx->tx = spi_imx_buf_tx_u8;
|
|
|
|
} else if (spi_imx->bits_per_word <= 16) {
|
|
|
|
spi_imx->rx = spi_imx_buf_rx_u16;
|
|
|
|
spi_imx->tx = spi_imx_buf_tx_u16;
|
|
|
|
} else {
|
|
|
|
spi_imx->rx = spi_imx_buf_rx_u32;
|
|
|
|
spi_imx->tx = spi_imx_buf_tx_u32;
|
|
|
|
}
|
2018-07-17 22:31:54 +08:00
|
|
|
spi_imx->dynamic_burst = 0;
|
2013-05-22 10:36:35 +08:00
|
|
|
}
|
2009-10-02 06:44:33 +08:00
|
|
|
|
2022-05-03 01:54:55 +08:00
|
|
|
if (spi_imx_can_dma(spi_imx->controller, spi, t))
|
2019-12-24 11:52:05 +08:00
|
|
|
spi_imx->usedma = true;
|
2016-02-24 16:20:26 +08:00
|
|
|
else
|
2019-12-24 11:52:05 +08:00
|
|
|
spi_imx->usedma = false;
|
2016-02-24 16:20:26 +08:00
|
|
|
|
2022-04-12 02:45:29 +08:00
|
|
|
spi_imx->rx_only = ((t->tx_buf == NULL)
|
|
|
|
|| (t->tx_buf == spi->controller->dummy_tx));
|
|
|
|
|
2017-09-05 13:12:32 +08:00
|
|
|
if (is_imx53_ecspi(spi_imx) && spi_imx->slave_mode) {
|
|
|
|
spi_imx->rx = mx53_ecspi_rx_slave;
|
|
|
|
spi_imx->tx = mx53_ecspi_tx_slave;
|
|
|
|
spi_imx->slave_burst = t->len;
|
|
|
|
}
|
|
|
|
|
2021-04-08 18:33:47 +08:00
|
|
|
spi_imx->devtype_data->prepare_transfer(spi_imx, spi);
|
2009-09-23 07:46:02 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-09-11 09:18:44 +08:00
|
|
|
static void spi_imx_sdma_exit(struct spi_imx_data *spi_imx)
|
|
|
|
{
|
2022-05-03 01:54:55 +08:00
|
|
|
struct spi_controller *controller = spi_imx->controller;
|
2014-09-11 09:18:44 +08:00
|
|
|
|
2022-05-03 01:54:54 +08:00
|
|
|
if (controller->dma_rx) {
|
|
|
|
dma_release_channel(controller->dma_rx);
|
|
|
|
controller->dma_rx = NULL;
|
2014-09-11 09:18:44 +08:00
|
|
|
}
|
|
|
|
|
2022-05-03 01:54:54 +08:00
|
|
|
if (controller->dma_tx) {
|
|
|
|
dma_release_channel(controller->dma_tx);
|
|
|
|
controller->dma_tx = NULL;
|
2014-09-11 09:18:44 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
|
2022-05-03 01:54:54 +08:00
|
|
|
struct spi_controller *controller)
|
2014-09-11 09:18:44 +08:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2017-06-08 13:16:00 +08:00
|
|
|
spi_imx->wml = spi_imx->devtype_data->fifo_size / 2;
|
2015-12-06 00:57:01 +08:00
|
|
|
|
2014-09-11 09:18:44 +08:00
|
|
|
/* Prepare for TX DMA: */
|
2022-05-03 01:54:54 +08:00
|
|
|
controller->dma_tx = dma_request_chan(dev, "tx");
|
|
|
|
if (IS_ERR(controller->dma_tx)) {
|
|
|
|
ret = PTR_ERR(controller->dma_tx);
|
2015-12-08 14:43:45 +08:00
|
|
|
dev_dbg(dev, "can't get the TX DMA channel, error %d!\n", ret);
|
2022-05-03 01:54:54 +08:00
|
|
|
controller->dma_tx = NULL;
|
2014-09-11 09:18:44 +08:00
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Prepare for RX : */
|
2022-05-03 01:54:54 +08:00
|
|
|
controller->dma_rx = dma_request_chan(dev, "rx");
|
|
|
|
if (IS_ERR(controller->dma_rx)) {
|
|
|
|
ret = PTR_ERR(controller->dma_rx);
|
2015-12-08 14:43:45 +08:00
|
|
|
dev_dbg(dev, "can't get the RX DMA channel, error %d\n", ret);
|
2022-05-03 01:54:54 +08:00
|
|
|
controller->dma_rx = NULL;
|
2014-09-11 09:18:44 +08:00
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
init_completion(&spi_imx->dma_rx_completion);
|
|
|
|
init_completion(&spi_imx->dma_tx_completion);
|
2022-05-03 01:54:54 +08:00
|
|
|
controller->can_dma = spi_imx_can_dma;
|
|
|
|
controller->max_dma_len = MAX_SDMA_BD_BYTES;
|
2022-05-03 01:54:55 +08:00
|
|
|
spi_imx->controller->flags = SPI_CONTROLLER_MUST_RX |
|
2022-05-03 01:54:54 +08:00
|
|
|
SPI_CONTROLLER_MUST_TX;
|
2014-09-11 09:18:44 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
err:
|
|
|
|
spi_imx_sdma_exit(spi_imx);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void spi_imx_dma_rx_callback(void *cookie)
|
|
|
|
{
|
|
|
|
struct spi_imx_data *spi_imx = (struct spi_imx_data *)cookie;
|
|
|
|
|
|
|
|
complete(&spi_imx->dma_rx_completion);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void spi_imx_dma_tx_callback(void *cookie)
|
|
|
|
{
|
|
|
|
struct spi_imx_data *spi_imx = (struct spi_imx_data *)cookie;
|
|
|
|
|
|
|
|
complete(&spi_imx->dma_tx_completion);
|
|
|
|
}
|
|
|
|
|
2016-02-19 15:43:03 +08:00
|
|
|
static int spi_imx_calculate_timeout(struct spi_imx_data *spi_imx, int size)
|
|
|
|
{
|
|
|
|
unsigned long timeout = 0;
|
|
|
|
|
|
|
|
/* Time with actual data transfer and CS change delay related to HW */
|
|
|
|
timeout = (8 + 4) * size / spi_imx->spi_bus_clk;
|
|
|
|
|
|
|
|
/* Add extra second for scheduler related activities */
|
|
|
|
timeout += 1;
|
|
|
|
|
|
|
|
/* Double calculated timeout */
|
|
|
|
return msecs_to_jiffies(2 * timeout * MSEC_PER_SEC);
|
|
|
|
}
|
|
|
|
|
2014-09-11 09:18:44 +08:00
|
|
|
static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
|
|
|
|
struct spi_transfer *transfer)
|
|
|
|
{
|
2016-02-24 16:20:33 +08:00
|
|
|
struct dma_async_tx_descriptor *desc_tx, *desc_rx;
|
2016-02-19 15:43:03 +08:00
|
|
|
unsigned long transfer_timeout;
|
2015-02-02 16:30:35 +08:00
|
|
|
unsigned long timeout;
|
2022-05-03 01:54:55 +08:00
|
|
|
struct spi_controller *controller = spi_imx->controller;
|
2014-09-11 09:18:44 +08:00
|
|
|
struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg;
|
2018-10-10 18:32:45 +08:00
|
|
|
struct scatterlist *last_sg = sg_last(rx->sgl, rx->nents);
|
|
|
|
unsigned int bytes_per_word, i;
|
2018-10-10 18:32:42 +08:00
|
|
|
int ret;
|
|
|
|
|
2018-10-10 18:32:45 +08:00
|
|
|
/* Get the right burst length from the last sg to ensure no tail data */
|
|
|
|
bytes_per_word = spi_imx_bytes_per_word(transfer->bits_per_word);
|
|
|
|
for (i = spi_imx->devtype_data->fifo_size / 2; i > 0; i--) {
|
|
|
|
if (!(sg_dma_len(last_sg) % (i * bytes_per_word)))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* Use 1 as wml in case no available burst length got */
|
|
|
|
if (i == 0)
|
|
|
|
i = 1;
|
|
|
|
|
|
|
|
spi_imx->wml = i;
|
|
|
|
|
2022-05-03 01:54:54 +08:00
|
|
|
ret = spi_imx_dma_configure(controller);
|
2018-10-10 18:32:42 +08:00
|
|
|
if (ret)
|
2020-06-17 06:42:09 +08:00
|
|
|
goto dma_failure_no_start;
|
2018-10-10 18:32:42 +08:00
|
|
|
|
2018-10-10 18:32:45 +08:00
|
|
|
if (!spi_imx->devtype_data->setup_wml) {
|
|
|
|
dev_err(spi_imx->dev, "No setup_wml()?\n");
|
2020-06-17 06:42:09 +08:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto dma_failure_no_start;
|
2018-10-10 18:32:45 +08:00
|
|
|
}
|
2018-10-10 18:32:42 +08:00
|
|
|
spi_imx->devtype_data->setup_wml(spi_imx);
|
2014-09-11 09:18:44 +08:00
|
|
|
|
2016-02-24 16:20:33 +08:00
|
|
|
/*
|
|
|
|
* The TX DMA setup starts the transfer, so make sure RX is configured
|
|
|
|
* before TX.
|
|
|
|
*/
|
2022-05-03 01:54:54 +08:00
|
|
|
desc_rx = dmaengine_prep_slave_sg(controller->dma_rx,
|
2016-02-24 16:20:33 +08:00
|
|
|
rx->sgl, rx->nents, DMA_DEV_TO_MEM,
|
|
|
|
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
2020-06-17 06:42:09 +08:00
|
|
|
if (!desc_rx) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto dma_failure_no_start;
|
|
|
|
}
|
2014-09-11 09:18:44 +08:00
|
|
|
|
2016-02-24 16:20:33 +08:00
|
|
|
desc_rx->callback = spi_imx_dma_rx_callback;
|
|
|
|
desc_rx->callback_param = (void *)spi_imx;
|
|
|
|
dmaengine_submit(desc_rx);
|
|
|
|
reinit_completion(&spi_imx->dma_rx_completion);
|
2022-05-03 01:54:54 +08:00
|
|
|
dma_async_issue_pending(controller->dma_rx);
|
2014-09-11 09:18:44 +08:00
|
|
|
|
2022-05-03 01:54:54 +08:00
|
|
|
desc_tx = dmaengine_prep_slave_sg(controller->dma_tx,
|
2016-02-24 16:20:33 +08:00
|
|
|
tx->sgl, tx->nents, DMA_MEM_TO_DEV,
|
|
|
|
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
|
|
|
if (!desc_tx) {
|
2022-05-03 01:54:54 +08:00
|
|
|
dmaengine_terminate_all(controller->dma_tx);
|
|
|
|
dmaengine_terminate_all(controller->dma_rx);
|
2016-02-24 16:20:33 +08:00
|
|
|
return -EINVAL;
|
2014-09-11 09:18:44 +08:00
|
|
|
}
|
|
|
|
|
2016-02-24 16:20:33 +08:00
|
|
|
desc_tx->callback = spi_imx_dma_tx_callback;
|
|
|
|
desc_tx->callback_param = (void *)spi_imx;
|
|
|
|
dmaengine_submit(desc_tx);
|
2014-09-11 09:18:44 +08:00
|
|
|
reinit_completion(&spi_imx->dma_tx_completion);
|
2022-05-03 01:54:54 +08:00
|
|
|
dma_async_issue_pending(controller->dma_tx);
|
2014-09-11 09:18:44 +08:00
|
|
|
|
2016-02-19 15:43:03 +08:00
|
|
|
transfer_timeout = spi_imx_calculate_timeout(spi_imx, transfer->len);
|
|
|
|
|
2014-09-11 09:18:44 +08:00
|
|
|
/* Wait SDMA to finish the data transfer.*/
|
2015-02-02 16:30:35 +08:00
|
|
|
timeout = wait_for_completion_timeout(&spi_imx->dma_tx_completion,
|
2016-02-19 15:43:03 +08:00
|
|
|
transfer_timeout);
|
2015-02-02 16:30:35 +08:00
|
|
|
if (!timeout) {
|
2016-02-17 21:28:48 +08:00
|
|
|
dev_err(spi_imx->dev, "I/O Error in DMA TX\n");
|
2022-05-03 01:54:54 +08:00
|
|
|
dmaengine_terminate_all(controller->dma_tx);
|
|
|
|
dmaengine_terminate_all(controller->dma_rx);
|
2016-02-24 16:20:33 +08:00
|
|
|
return -ETIMEDOUT;
|
2014-09-11 09:18:44 +08:00
|
|
|
}
|
|
|
|
|
2016-02-24 16:20:33 +08:00
|
|
|
timeout = wait_for_completion_timeout(&spi_imx->dma_rx_completion,
|
|
|
|
transfer_timeout);
|
|
|
|
if (!timeout) {
|
2022-05-03 01:54:54 +08:00
|
|
|
dev_err(&controller->dev, "I/O Error in DMA RX\n");
|
2016-02-24 16:20:33 +08:00
|
|
|
spi_imx->devtype_data->reset(spi_imx);
|
2022-05-03 01:54:54 +08:00
|
|
|
dmaengine_terminate_all(controller->dma_rx);
|
2016-02-24 16:20:33 +08:00
|
|
|
return -ETIMEDOUT;
|
|
|
|
}
|
2014-09-11 09:18:44 +08:00
|
|
|
|
2022-05-03 01:54:55 +08:00
|
|
|
return 0;
|
2020-06-17 06:42:09 +08:00
|
|
|
/* fallback to pio */
|
|
|
|
dma_failure_no_start:
|
|
|
|
transfer->error |= SPI_TRANS_FAIL_NO_START;
|
|
|
|
return ret;
|
2014-09-11 09:18:44 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int spi_imx_pio_transfer(struct spi_device *spi,
|
2009-09-23 07:46:02 +08:00
|
|
|
struct spi_transfer *transfer)
|
|
|
|
{
|
2022-05-03 01:54:54 +08:00
|
|
|
struct spi_imx_data *spi_imx = spi_controller_get_devdata(spi->controller);
|
2016-06-21 20:12:54 +08:00
|
|
|
unsigned long transfer_timeout;
|
|
|
|
unsigned long timeout;
|
2009-09-23 07:46:02 +08:00
|
|
|
|
2009-10-02 06:44:28 +08:00
|
|
|
spi_imx->tx_buf = transfer->tx_buf;
|
|
|
|
spi_imx->rx_buf = transfer->rx_buf;
|
|
|
|
spi_imx->count = transfer->len;
|
|
|
|
spi_imx->txfifo = 0;
|
2018-07-17 22:31:54 +08:00
|
|
|
spi_imx->remainder = 0;
|
2009-09-23 07:46:02 +08:00
|
|
|
|
2014-02-09 11:06:04 +08:00
|
|
|
reinit_completion(&spi_imx->xfer_done);
|
2009-09-23 07:46:02 +08:00
|
|
|
|
2009-10-02 06:44:28 +08:00
|
|
|
spi_imx_push(spi_imx);
|
2009-09-23 07:46:02 +08:00
|
|
|
|
2011-07-10 01:16:35 +08:00
|
|
|
spi_imx->devtype_data->intctrl(spi_imx, MXC_INT_TE);
|
2009-09-23 07:46:02 +08:00
|
|
|
|
2016-06-21 20:12:54 +08:00
|
|
|
transfer_timeout = spi_imx_calculate_timeout(spi_imx, transfer->len);
|
|
|
|
|
|
|
|
timeout = wait_for_completion_timeout(&spi_imx->xfer_done,
|
|
|
|
transfer_timeout);
|
|
|
|
if (!timeout) {
|
|
|
|
dev_err(&spi->dev, "I/O Error in PIO\n");
|
|
|
|
spi_imx->devtype_data->reset(spi_imx);
|
|
|
|
return -ETIMEDOUT;
|
|
|
|
}
|
2009-09-23 07:46:02 +08:00
|
|
|
|
2022-05-03 01:54:55 +08:00
|
|
|
return 0;
|
2009-09-23 07:46:02 +08:00
|
|
|
}
|
|
|
|
|
2022-05-03 01:54:56 +08:00
|
|
|
static int spi_imx_poll_transfer(struct spi_device *spi,
|
|
|
|
struct spi_transfer *transfer)
|
|
|
|
{
|
|
|
|
struct spi_imx_data *spi_imx = spi_controller_get_devdata(spi->controller);
|
|
|
|
unsigned long timeout;
|
|
|
|
|
|
|
|
spi_imx->tx_buf = transfer->tx_buf;
|
|
|
|
spi_imx->rx_buf = transfer->rx_buf;
|
|
|
|
spi_imx->count = transfer->len;
|
|
|
|
spi_imx->txfifo = 0;
|
|
|
|
spi_imx->remainder = 0;
|
|
|
|
|
|
|
|
/* fill in the fifo before timeout calculations if we are
|
|
|
|
* interrupted here, then the data is getting transferred by
|
|
|
|
* the HW while we are interrupted
|
|
|
|
*/
|
|
|
|
spi_imx_push(spi_imx);
|
|
|
|
|
|
|
|
timeout = spi_imx_calculate_timeout(spi_imx, transfer->len) + jiffies;
|
|
|
|
while (spi_imx->txfifo) {
|
|
|
|
/* RX */
|
|
|
|
while (spi_imx->txfifo &&
|
|
|
|
spi_imx->devtype_data->rx_available(spi_imx)) {
|
|
|
|
spi_imx->rx(spi_imx);
|
|
|
|
spi_imx->txfifo--;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* TX */
|
|
|
|
if (spi_imx->count) {
|
|
|
|
spi_imx_push(spi_imx);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (spi_imx->txfifo &&
|
|
|
|
time_after(jiffies, timeout)) {
|
|
|
|
|
|
|
|
dev_err_ratelimited(&spi->dev,
|
|
|
|
"timeout period reached: jiffies: %lu- falling back to interrupt mode\n",
|
|
|
|
jiffies - timeout);
|
|
|
|
|
|
|
|
/* fall back to interrupt mode */
|
|
|
|
return spi_imx_pio_transfer(spi, transfer);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-09-05 13:12:32 +08:00
|
|
|
static int spi_imx_pio_transfer_slave(struct spi_device *spi,
|
|
|
|
struct spi_transfer *transfer)
|
|
|
|
{
|
2022-05-03 01:54:54 +08:00
|
|
|
struct spi_imx_data *spi_imx = spi_controller_get_devdata(spi->controller);
|
2022-05-03 01:54:55 +08:00
|
|
|
int ret = 0;
|
2017-09-05 13:12:32 +08:00
|
|
|
|
|
|
|
if (is_imx53_ecspi(spi_imx) &&
|
|
|
|
transfer->len > MX53_MAX_TRANSFER_BYTES) {
|
|
|
|
dev_err(&spi->dev, "Transaction too big, max size is %d bytes\n",
|
|
|
|
MX53_MAX_TRANSFER_BYTES);
|
|
|
|
return -EMSGSIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
spi_imx->tx_buf = transfer->tx_buf;
|
|
|
|
spi_imx->rx_buf = transfer->rx_buf;
|
|
|
|
spi_imx->count = transfer->len;
|
|
|
|
spi_imx->txfifo = 0;
|
2018-07-17 22:31:54 +08:00
|
|
|
spi_imx->remainder = 0;
|
2017-09-05 13:12:32 +08:00
|
|
|
|
|
|
|
reinit_completion(&spi_imx->xfer_done);
|
|
|
|
spi_imx->slave_aborted = false;
|
|
|
|
|
|
|
|
spi_imx_push(spi_imx);
|
|
|
|
|
|
|
|
spi_imx->devtype_data->intctrl(spi_imx, MXC_INT_TE | MXC_INT_RDR);
|
|
|
|
|
|
|
|
if (wait_for_completion_interruptible(&spi_imx->xfer_done) ||
|
|
|
|
spi_imx->slave_aborted) {
|
|
|
|
dev_dbg(&spi->dev, "interrupted\n");
|
|
|
|
ret = -EINTR;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ecspi has a HW issue when works in Slave mode,
|
|
|
|
* after 64 words writtern to TXFIFO, even TXFIFO becomes empty,
|
|
|
|
* ECSPI_TXDATA keeps shift out the last word data,
|
|
|
|
* so we have to disable ECSPI when in slave mode after the
|
|
|
|
* transfer completes
|
|
|
|
*/
|
|
|
|
if (spi_imx->devtype_data->disable)
|
|
|
|
spi_imx->devtype_data->disable(spi_imx);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2022-05-03 01:54:55 +08:00
|
|
|
static int spi_imx_transfer_one(struct spi_controller *controller,
|
|
|
|
struct spi_device *spi,
|
2014-09-11 09:18:44 +08:00
|
|
|
struct spi_transfer *transfer)
|
|
|
|
{
|
2022-05-03 01:54:54 +08:00
|
|
|
struct spi_imx_data *spi_imx = spi_controller_get_devdata(spi->controller);
|
2022-05-03 01:54:56 +08:00
|
|
|
unsigned long hz_per_byte, byte_limit;
|
2014-09-11 09:18:44 +08:00
|
|
|
|
2022-05-03 01:54:55 +08:00
|
|
|
spi_imx_setupxfer(spi, transfer);
|
2020-09-18 04:24:20 +08:00
|
|
|
transfer->effective_speed_hz = spi_imx->spi_bus_clk;
|
|
|
|
|
2017-09-05 13:12:32 +08:00
|
|
|
/* flush rxfifo before transfer */
|
|
|
|
while (spi_imx->devtype_data->rx_available(spi_imx))
|
2019-03-05 04:18:49 +08:00
|
|
|
readl(spi_imx->base + MXC_CSPIRXDATA);
|
2017-09-05 13:12:32 +08:00
|
|
|
|
|
|
|
if (spi_imx->slave_mode)
|
|
|
|
return spi_imx_pio_transfer_slave(spi, transfer);
|
|
|
|
|
2022-11-17 00:49:30 +08:00
|
|
|
/*
|
|
|
|
* If we decided in spi_imx_can_dma() that we want to do a DMA
|
|
|
|
* transfer, the SPI transfer has already been mapped, so we
|
|
|
|
* have to do the DMA transfer here.
|
|
|
|
*/
|
|
|
|
if (spi_imx->usedma)
|
|
|
|
return spi_imx_dma_transfer(spi_imx, transfer);
|
2022-05-03 01:54:56 +08:00
|
|
|
/*
|
|
|
|
* Calculate the estimated time in us the transfer runs. Find
|
|
|
|
* the number of Hz per byte per polling limit.
|
|
|
|
*/
|
|
|
|
hz_per_byte = polling_limit_us ? ((8 + 4) * USEC_PER_SEC) / polling_limit_us : 0;
|
|
|
|
byte_limit = hz_per_byte ? transfer->effective_speed_hz / hz_per_byte : 1;
|
|
|
|
|
|
|
|
/* run in polling mode for short transfers */
|
|
|
|
if (transfer->len < byte_limit)
|
|
|
|
return spi_imx_poll_transfer(spi, transfer);
|
|
|
|
|
2020-05-21 04:34:17 +08:00
|
|
|
return spi_imx_pio_transfer(spi, transfer);
|
2014-09-11 09:18:44 +08:00
|
|
|
}
|
|
|
|
|
2009-10-02 06:44:28 +08:00
|
|
|
static int spi_imx_setup(struct spi_device *spi)
|
2009-09-23 07:46:02 +08:00
|
|
|
{
|
2010-01-21 04:49:45 +08:00
|
|
|
dev_dbg(&spi->dev, "%s: mode %d, %u bpw, %d hz\n", __func__,
|
2009-09-23 07:46:02 +08:00
|
|
|
spi->mode, spi->bits_per_word, spi->max_speed_hz);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-10-02 06:44:28 +08:00
|
|
|
static void spi_imx_cleanup(struct spi_device *spi)
|
2009-09-23 07:46:02 +08:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2013-10-23 16:31:50 +08:00
|
|
|
static int
|
2022-05-03 01:54:54 +08:00
|
|
|
spi_imx_prepare_message(struct spi_controller *controller, struct spi_message *msg)
|
2013-10-23 16:31:50 +08:00
|
|
|
{
|
2022-05-03 01:54:54 +08:00
|
|
|
struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller);
|
2013-10-23 16:31:50 +08:00
|
|
|
int ret;
|
|
|
|
|
2022-04-14 16:53:42 +08:00
|
|
|
ret = pm_runtime_resume_and_get(spi_imx->dev);
|
2020-07-27 14:33:54 +08:00
|
|
|
if (ret < 0) {
|
|
|
|
dev_err(spi_imx->dev, "failed to enable clock\n");
|
2013-10-23 16:31:50 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-11-30 14:47:05 +08:00
|
|
|
ret = spi_imx->devtype_data->prepare_message(spi_imx, msg);
|
|
|
|
if (ret) {
|
2020-07-27 14:33:54 +08:00
|
|
|
pm_runtime_mark_last_busy(spi_imx->dev);
|
|
|
|
pm_runtime_put_autosuspend(spi_imx->dev);
|
2018-11-30 14:47:05 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
2013-10-23 16:31:50 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2022-05-03 01:54:54 +08:00
|
|
|
spi_imx_unprepare_message(struct spi_controller *controller, struct spi_message *msg)
|
2013-10-23 16:31:50 +08:00
|
|
|
{
|
2022-05-03 01:54:54 +08:00
|
|
|
struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller);
|
2013-10-23 16:31:50 +08:00
|
|
|
|
2020-07-27 14:33:54 +08:00
|
|
|
pm_runtime_mark_last_busy(spi_imx->dev);
|
|
|
|
pm_runtime_put_autosuspend(spi_imx->dev);
|
2013-10-23 16:31:50 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-05-03 01:54:54 +08:00
|
|
|
static int spi_imx_slave_abort(struct spi_controller *controller)
|
2017-09-05 13:12:32 +08:00
|
|
|
{
|
2022-05-03 01:54:54 +08:00
|
|
|
struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller);
|
2017-09-05 13:12:32 +08:00
|
|
|
|
|
|
|
spi_imx->slave_aborted = true;
|
|
|
|
complete(&spi_imx->xfer_done);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-12-08 00:57:14 +08:00
|
|
|
static int spi_imx_probe(struct platform_device *pdev)
|
2009-09-23 07:46:02 +08:00
|
|
|
{
|
2011-07-10 01:16:41 +08:00
|
|
|
struct device_node *np = pdev->dev.of_node;
|
2022-05-03 01:54:54 +08:00
|
|
|
struct spi_controller *controller;
|
2009-10-02 06:44:28 +08:00
|
|
|
struct spi_imx_data *spi_imx;
|
2009-09-23 07:46:02 +08:00
|
|
|
struct resource *res;
|
2020-06-26 04:02:52 +08:00
|
|
|
int ret, irq, spi_drctl;
|
2021-03-22 11:57:56 +08:00
|
|
|
const struct spi_imx_devtype_data *devtype_data =
|
|
|
|
of_device_get_match_data(&pdev->dev);
|
2017-09-05 13:12:32 +08:00
|
|
|
bool slave_mode;
|
2020-06-26 04:02:52 +08:00
|
|
|
u32 val;
|
2009-09-23 07:46:02 +08:00
|
|
|
|
2017-09-05 13:12:32 +08:00
|
|
|
slave_mode = devtype_data->has_slavemode &&
|
|
|
|
of_property_read_bool(np, "spi-slave");
|
|
|
|
if (slave_mode)
|
2022-05-03 01:54:54 +08:00
|
|
|
controller = spi_alloc_slave(&pdev->dev,
|
|
|
|
sizeof(struct spi_imx_data));
|
2017-09-05 13:12:32 +08:00
|
|
|
else
|
2022-05-03 01:54:54 +08:00
|
|
|
controller = spi_alloc_master(&pdev->dev,
|
|
|
|
sizeof(struct spi_imx_data));
|
|
|
|
if (!controller)
|
2017-06-21 00:50:55 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
|
2017-04-24 03:19:58 +08:00
|
|
|
ret = of_property_read_u32(np, "fsl,spi-rdy-drctl", &spi_drctl);
|
|
|
|
if ((ret < 0) || (spi_drctl >= 0x3)) {
|
|
|
|
/* '11' is reserved */
|
|
|
|
spi_drctl = 0;
|
|
|
|
}
|
|
|
|
|
2022-05-03 01:54:54 +08:00
|
|
|
platform_set_drvdata(pdev, controller);
|
2009-09-23 07:46:02 +08:00
|
|
|
|
2022-05-03 01:54:54 +08:00
|
|
|
controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
|
|
|
|
controller->bus_num = np ? -1 : pdev->id;
|
|
|
|
controller->use_gpio_descriptors = true;
|
2009-09-23 07:46:02 +08:00
|
|
|
|
2022-05-03 01:54:54 +08:00
|
|
|
spi_imx = spi_controller_get_devdata(controller);
|
2022-05-03 01:54:55 +08:00
|
|
|
spi_imx->controller = controller;
|
2016-02-17 21:28:48 +08:00
|
|
|
spi_imx->dev = &pdev->dev;
|
2017-09-05 13:12:32 +08:00
|
|
|
spi_imx->slave_mode = slave_mode;
|
2009-09-23 07:46:02 +08:00
|
|
|
|
2017-09-05 13:12:32 +08:00
|
|
|
spi_imx->devtype_data = devtype_data;
|
2015-12-08 14:43:44 +08:00
|
|
|
|
2020-06-26 04:02:52 +08:00
|
|
|
/*
|
|
|
|
* Get number of chip selects from device properties. This can be
|
|
|
|
* coming from device tree or boardfiles, if it is not defined,
|
|
|
|
* a default value of 3 chip selects will be used, as all the legacy
|
|
|
|
* board files have <= 3 chip selects.
|
|
|
|
*/
|
|
|
|
if (!device_property_read_u32(&pdev->dev, "num-cs", &val))
|
2022-05-03 01:54:54 +08:00
|
|
|
controller->num_chipselect = val;
|
2020-06-26 04:02:52 +08:00
|
|
|
else
|
2022-05-03 01:54:54 +08:00
|
|
|
controller->num_chipselect = 3;
|
2009-09-23 07:46:02 +08:00
|
|
|
|
2022-05-03 01:54:55 +08:00
|
|
|
spi_imx->controller->transfer_one = spi_imx_transfer_one;
|
|
|
|
spi_imx->controller->setup = spi_imx_setup;
|
|
|
|
spi_imx->controller->cleanup = spi_imx_cleanup;
|
|
|
|
spi_imx->controller->prepare_message = spi_imx_prepare_message;
|
|
|
|
spi_imx->controller->unprepare_message = spi_imx_unprepare_message;
|
|
|
|
spi_imx->controller->slave_abort = spi_imx_slave_abort;
|
|
|
|
spi_imx->controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_NO_CS;
|
|
|
|
|
2017-06-08 13:16:01 +08:00
|
|
|
if (is_imx35_cspi(spi_imx) || is_imx51_ecspi(spi_imx) ||
|
|
|
|
is_imx53_ecspi(spi_imx))
|
2022-05-03 01:54:55 +08:00
|
|
|
spi_imx->controller->mode_bits |= SPI_LOOP | SPI_READY;
|
2017-04-24 03:19:58 +08:00
|
|
|
|
2022-04-12 02:45:29 +08:00
|
|
|
if (is_imx51_ecspi(spi_imx) || is_imx53_ecspi(spi_imx))
|
2022-05-03 01:54:55 +08:00
|
|
|
spi_imx->controller->mode_bits |= SPI_RX_CPHA_FLIP;
|
2022-04-12 02:45:29 +08:00
|
|
|
|
2021-07-27 20:42:26 +08:00
|
|
|
if (is_imx51_ecspi(spi_imx) &&
|
|
|
|
device_property_read_u32(&pdev->dev, "cs-gpios", NULL))
|
|
|
|
/*
|
|
|
|
* When using HW-CS implementing SPI_CS_WORD can be done by just
|
|
|
|
* setting the burst length to the word size. This is
|
|
|
|
* considerably faster than manually controlling the CS.
|
|
|
|
*/
|
2022-05-03 01:54:55 +08:00
|
|
|
spi_imx->controller->mode_bits |= SPI_CS_WORD;
|
2021-07-27 20:42:26 +08:00
|
|
|
|
2017-04-24 03:19:58 +08:00
|
|
|
spi_imx->spi_drctl = spi_drctl;
|
2009-09-23 07:46:02 +08:00
|
|
|
|
2009-10-02 06:44:28 +08:00
|
|
|
init_completion(&spi_imx->xfer_done);
|
2009-09-23 07:46:02 +08:00
|
|
|
|
|
|
|
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
2013-07-11 12:26:48 +08:00
|
|
|
spi_imx->base = devm_ioremap_resource(&pdev->dev, res);
|
|
|
|
if (IS_ERR(spi_imx->base)) {
|
|
|
|
ret = PTR_ERR(spi_imx->base);
|
2022-05-03 01:54:54 +08:00
|
|
|
goto out_controller_put;
|
2009-09-23 07:46:02 +08:00
|
|
|
}
|
2016-02-24 16:20:29 +08:00
|
|
|
spi_imx->base_phys = res->start;
|
2009-09-23 07:46:02 +08:00
|
|
|
|
2014-12-30 05:38:51 +08:00
|
|
|
irq = platform_get_irq(pdev, 0);
|
|
|
|
if (irq < 0) {
|
|
|
|
ret = irq;
|
2022-05-03 01:54:54 +08:00
|
|
|
goto out_controller_put;
|
2009-09-23 07:46:02 +08:00
|
|
|
}
|
|
|
|
|
2014-12-30 05:38:51 +08:00
|
|
|
ret = devm_request_irq(&pdev->dev, irq, spi_imx_isr, 0,
|
2014-02-22 21:23:46 +08:00
|
|
|
dev_name(&pdev->dev), spi_imx);
|
2009-09-23 07:46:02 +08:00
|
|
|
if (ret) {
|
2014-12-30 05:38:51 +08:00
|
|
|
dev_err(&pdev->dev, "can't get irq%d: %d\n", irq, ret);
|
2022-05-03 01:54:54 +08:00
|
|
|
goto out_controller_put;
|
2009-09-23 07:46:02 +08:00
|
|
|
}
|
|
|
|
|
2012-03-07 16:30:22 +08:00
|
|
|
spi_imx->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
|
|
|
|
if (IS_ERR(spi_imx->clk_ipg)) {
|
|
|
|
ret = PTR_ERR(spi_imx->clk_ipg);
|
2022-05-03 01:54:54 +08:00
|
|
|
goto out_controller_put;
|
2009-09-23 07:46:02 +08:00
|
|
|
}
|
|
|
|
|
2012-03-07 16:30:22 +08:00
|
|
|
spi_imx->clk_per = devm_clk_get(&pdev->dev, "per");
|
|
|
|
if (IS_ERR(spi_imx->clk_per)) {
|
|
|
|
ret = PTR_ERR(spi_imx->clk_per);
|
2022-05-03 01:54:54 +08:00
|
|
|
goto out_controller_put;
|
2012-03-07 16:30:22 +08:00
|
|
|
}
|
|
|
|
|
2020-10-21 18:45:13 +08:00
|
|
|
ret = clk_prepare_enable(spi_imx->clk_per);
|
|
|
|
if (ret)
|
2022-05-03 01:54:54 +08:00
|
|
|
goto out_controller_put;
|
2020-10-21 18:45:13 +08:00
|
|
|
|
|
|
|
ret = clk_prepare_enable(spi_imx->clk_ipg);
|
|
|
|
if (ret)
|
|
|
|
goto out_put_per;
|
|
|
|
|
2020-07-27 14:33:54 +08:00
|
|
|
pm_runtime_set_autosuspend_delay(spi_imx->dev, MXC_RPM_TIMEOUT);
|
|
|
|
pm_runtime_use_autosuspend(spi_imx->dev);
|
2020-11-24 16:52:47 +08:00
|
|
|
pm_runtime_get_noresume(spi_imx->dev);
|
2020-10-21 18:45:13 +08:00
|
|
|
pm_runtime_set_active(spi_imx->dev);
|
|
|
|
pm_runtime_enable(spi_imx->dev);
|
2012-03-07 16:30:22 +08:00
|
|
|
|
|
|
|
spi_imx->spi_clk = clk_get_rate(spi_imx->clk_per);
|
2014-09-11 09:18:44 +08:00
|
|
|
/*
|
2016-10-20 06:42:25 +08:00
|
|
|
* Only validated on i.mx35 and i.mx6 now, can remove the constraint
|
|
|
|
* if validated on other chips.
|
2014-09-11 09:18:44 +08:00
|
|
|
*/
|
2017-06-08 13:16:00 +08:00
|
|
|
if (spi_imx->devtype_data->has_dmamode) {
|
2022-05-03 01:54:54 +08:00
|
|
|
ret = spi_imx_sdma_init(&pdev->dev, spi_imx, controller);
|
2015-12-08 14:43:46 +08:00
|
|
|
if (ret == -EPROBE_DEFER)
|
2020-07-27 14:33:54 +08:00
|
|
|
goto out_runtime_pm_put;
|
2015-12-08 14:43:46 +08:00
|
|
|
|
2015-12-08 14:43:45 +08:00
|
|
|
if (ret < 0)
|
2020-08-19 06:35:18 +08:00
|
|
|
dev_dbg(&pdev->dev, "dma setup error %d, use pio\n",
|
2015-12-08 14:43:45 +08:00
|
|
|
ret);
|
|
|
|
}
|
2009-09-23 07:46:02 +08:00
|
|
|
|
2011-07-10 01:16:35 +08:00
|
|
|
spi_imx->devtype_data->reset(spi_imx);
|
2009-11-20 03:01:42 +08:00
|
|
|
|
2011-07-10 01:16:35 +08:00
|
|
|
spi_imx->devtype_data->intctrl(spi_imx, 0);
|
2009-09-23 07:46:02 +08:00
|
|
|
|
2022-05-03 01:54:54 +08:00
|
|
|
controller->dev.of_node = pdev->dev.of_node;
|
2022-05-03 01:54:55 +08:00
|
|
|
ret = spi_register_controller(controller);
|
2017-11-07 02:38:23 +08:00
|
|
|
if (ret) {
|
2022-05-03 01:54:55 +08:00
|
|
|
dev_err_probe(&pdev->dev, ret, "register controller failed\n");
|
|
|
|
goto out_register_controller;
|
2017-11-07 02:38:23 +08:00
|
|
|
}
|
2009-09-23 07:46:02 +08:00
|
|
|
|
2020-07-27 14:33:54 +08:00
|
|
|
pm_runtime_mark_last_busy(spi_imx->dev);
|
|
|
|
pm_runtime_put_autosuspend(spi_imx->dev);
|
|
|
|
|
2009-09-23 07:46:02 +08:00
|
|
|
return ret;
|
|
|
|
|
2022-05-03 01:54:55 +08:00
|
|
|
out_register_controller:
|
2020-10-05 21:22:29 +08:00
|
|
|
if (spi_imx->devtype_data->has_dmamode)
|
|
|
|
spi_imx_sdma_exit(spi_imx);
|
2020-07-27 14:33:54 +08:00
|
|
|
out_runtime_pm_put:
|
|
|
|
pm_runtime_dont_use_autosuspend(spi_imx->dev);
|
2020-10-21 18:45:13 +08:00
|
|
|
pm_runtime_set_suspended(&pdev->dev);
|
2020-07-27 14:33:54 +08:00
|
|
|
pm_runtime_disable(spi_imx->dev);
|
2020-10-21 18:45:13 +08:00
|
|
|
|
|
|
|
clk_disable_unprepare(spi_imx->clk_ipg);
|
|
|
|
out_put_per:
|
|
|
|
clk_disable_unprepare(spi_imx->clk_per);
|
2022-05-03 01:54:54 +08:00
|
|
|
out_controller_put:
|
|
|
|
spi_controller_put(controller);
|
2013-07-11 12:26:48 +08:00
|
|
|
|
2009-09-23 07:46:02 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-12-08 00:57:14 +08:00
|
|
|
static int spi_imx_remove(struct platform_device *pdev)
|
2009-09-23 07:46:02 +08:00
|
|
|
{
|
2022-05-03 01:54:54 +08:00
|
|
|
struct spi_controller *controller = platform_get_drvdata(pdev);
|
|
|
|
struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller);
|
2018-01-07 22:05:49 +08:00
|
|
|
int ret;
|
2009-09-23 07:46:02 +08:00
|
|
|
|
2022-05-03 01:54:55 +08:00
|
|
|
spi_unregister_controller(controller);
|
2009-09-23 07:46:02 +08:00
|
|
|
|
2022-04-14 16:53:42 +08:00
|
|
|
ret = pm_runtime_resume_and_get(spi_imx->dev);
|
2020-07-27 14:33:54 +08:00
|
|
|
if (ret < 0) {
|
|
|
|
dev_err(spi_imx->dev, "failed to enable clock\n");
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
writel(0, spi_imx->base + MXC_CSPICTRL);
|
|
|
|
|
|
|
|
pm_runtime_dont_use_autosuspend(spi_imx->dev);
|
|
|
|
pm_runtime_put_sync(spi_imx->dev);
|
|
|
|
pm_runtime_disable(spi_imx->dev);
|
|
|
|
|
|
|
|
spi_imx_sdma_exit(spi_imx);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int __maybe_unused spi_imx_runtime_resume(struct device *dev)
|
|
|
|
{
|
2022-05-03 01:54:54 +08:00
|
|
|
struct spi_controller *controller = dev_get_drvdata(dev);
|
2020-07-27 14:33:54 +08:00
|
|
|
struct spi_imx_data *spi_imx;
|
|
|
|
int ret;
|
|
|
|
|
2022-05-03 01:54:54 +08:00
|
|
|
spi_imx = spi_controller_get_devdata(controller);
|
2020-07-27 14:33:54 +08:00
|
|
|
|
|
|
|
ret = clk_prepare_enable(spi_imx->clk_per);
|
2018-01-07 22:05:49 +08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2020-07-27 14:33:54 +08:00
|
|
|
ret = clk_prepare_enable(spi_imx->clk_ipg);
|
2018-01-07 22:05:49 +08:00
|
|
|
if (ret) {
|
2020-07-27 14:33:54 +08:00
|
|
|
clk_disable_unprepare(spi_imx->clk_per);
|
2018-01-07 22:05:49 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2020-07-27 14:33:54 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int __maybe_unused spi_imx_runtime_suspend(struct device *dev)
|
|
|
|
{
|
2022-05-03 01:54:54 +08:00
|
|
|
struct spi_controller *controller = dev_get_drvdata(dev);
|
2020-07-27 14:33:54 +08:00
|
|
|
struct spi_imx_data *spi_imx;
|
|
|
|
|
2022-05-03 01:54:54 +08:00
|
|
|
spi_imx = spi_controller_get_devdata(controller);
|
2020-07-27 14:33:54 +08:00
|
|
|
|
2018-01-07 22:05:49 +08:00
|
|
|
clk_disable_unprepare(spi_imx->clk_per);
|
2020-07-27 14:33:54 +08:00
|
|
|
clk_disable_unprepare(spi_imx->clk_ipg);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2009-09-23 07:46:02 +08:00
|
|
|
|
2020-07-27 14:33:54 +08:00
|
|
|
static int __maybe_unused spi_imx_suspend(struct device *dev)
|
|
|
|
{
|
|
|
|
pinctrl_pm_select_sleep_state(dev);
|
2009-09-23 07:46:02 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-07-27 14:33:54 +08:00
|
|
|
static int __maybe_unused spi_imx_resume(struct device *dev)
|
|
|
|
{
|
|
|
|
pinctrl_pm_select_default_state(dev);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct dev_pm_ops imx_spi_pm = {
|
|
|
|
SET_RUNTIME_PM_OPS(spi_imx_runtime_suspend,
|
|
|
|
spi_imx_runtime_resume, NULL)
|
|
|
|
SET_SYSTEM_SLEEP_PM_OPS(spi_imx_suspend, spi_imx_resume)
|
|
|
|
};
|
|
|
|
|
2009-10-02 06:44:28 +08:00
|
|
|
static struct platform_driver spi_imx_driver = {
|
2009-09-23 07:46:02 +08:00
|
|
|
.driver = {
|
|
|
|
.name = DRIVER_NAME,
|
2011-07-10 01:16:41 +08:00
|
|
|
.of_match_table = spi_imx_dt_ids,
|
2020-07-27 14:33:54 +08:00
|
|
|
.pm = &imx_spi_pm,
|
|
|
|
},
|
2009-10-02 06:44:28 +08:00
|
|
|
.probe = spi_imx_probe,
|
2012-12-08 00:57:14 +08:00
|
|
|
.remove = spi_imx_remove,
|
2009-09-23 07:46:02 +08:00
|
|
|
};
|
2011-10-06 01:29:49 +08:00
|
|
|
module_platform_driver(spi_imx_driver);
|
2009-09-23 07:46:02 +08:00
|
|
|
|
2021-03-17 02:09:22 +08:00
|
|
|
MODULE_DESCRIPTION("i.MX SPI Controller driver");
|
2009-09-23 07:46:02 +08:00
|
|
|
MODULE_AUTHOR("Sascha Hauer, Pengutronix");
|
|
|
|
MODULE_LICENSE("GPL");
|
2013-01-08 06:42:55 +08:00
|
|
|
MODULE_ALIAS("platform:" DRIVER_NAME);
|