2019-05-03 04:23:30 +08:00
|
|
|
// SPDX-License-Identifier: BSD-3-Clause
|
|
|
|
/* Copyright (c) 2016-2018, NXP Semiconductors
|
|
|
|
* Copyright (c) 2018, Sensor-Technik Wiedemann GmbH
|
|
|
|
* Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
|
|
|
|
*/
|
|
|
|
#include <linux/spi/spi.h>
|
|
|
|
#include <linux/packing.h>
|
|
|
|
#include "sja1105.h"
|
|
|
|
|
|
|
|
#define SJA1105_SIZE_RESET_CMD 4
|
|
|
|
#define SJA1105_SIZE_SPI_MSG_HEADER 4
|
|
|
|
#define SJA1105_SIZE_SPI_MSG_MAXLEN (64 * 4)
|
net: dsa: sja1105: Switch to scatter/gather API for SPI
This reworks the SPI transfer implementation to make use of more of the
SPI core features. The main benefit is to avoid the memcpy in
sja1105_xfer_buf().
The memcpy was only needed because the function was transferring a
single buffer at a time. So it needed to copy the caller-provided buffer
at buf + 4, to store the SPI message header in the "headroom" area.
But the SPI core supports scatter-gather messages, comprised of multiple
transfers. We can actually use those to break apart every SPI message
into 2 transfers: one for the header and one for the actual payload.
To keep the behavior the same regarding the chip select signal, it is
necessary to tell the SPI core to de-assert the chip select after each
chunk. This was not needed before, because each spi_message contained
only 1 single transfer.
The meaning of the per-transfer cs_change=1 is:
- If the transfer is the last one of the message, keep CS asserted
- Otherwise, deassert CS
We need to deassert CS in the "otherwise" case, which was implicit
before.
Avoiding the memcpy creates yet another opportunity. The device can't
process more than 256 bytes of SPI payload at a time, so the
sja1105_xfer_long_buf() function used to exist, to split the larger
caller buffer into chunks.
But these chunks couldn't be used as scatter/gather buffers for
spi_message until now, because of that memcpy (we would have needed more
memory for each chunk). So we can now remove the sja1105_xfer_long_buf()
function and have a single implementation for long and short buffers.
Another benefit is lower usage of stack memory. Previously we had to
store 2 SPI buffers for each chunk. Due to the elimination of the
memcpy, we can now send pointers to the actual chunks from the
caller-supplied buffer to the SPI core.
Since the patch merges two functions into a rewritten implementation,
the function prototype was also changed, mainly for cosmetic consistency
with the structures used within it.
Signed-off-by: Vladimir Oltean <olteanv@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-10-12 06:31:15 +08:00
|
|
|
|
|
|
|
struct sja1105_chunk {
|
|
|
|
u8 *buf;
|
|
|
|
size_t len;
|
|
|
|
u64 reg_addr;
|
|
|
|
};
|
2019-05-03 04:23:30 +08:00
|
|
|
|
|
|
|
static void
|
|
|
|
sja1105_spi_message_pack(void *buf, const struct sja1105_spi_message *msg)
|
|
|
|
{
|
|
|
|
const int size = SJA1105_SIZE_SPI_MSG_HEADER;
|
|
|
|
|
|
|
|
memset(buf, 0, size);
|
|
|
|
|
|
|
|
sja1105_pack(buf, &msg->access, 31, 31, size);
|
|
|
|
sja1105_pack(buf, &msg->read_count, 30, 25, size);
|
|
|
|
sja1105_pack(buf, &msg->address, 24, 4, size);
|
|
|
|
}
|
|
|
|
|
net: dsa: sja1105: Switch to scatter/gather API for SPI
This reworks the SPI transfer implementation to make use of more of the
SPI core features. The main benefit is to avoid the memcpy in
sja1105_xfer_buf().
The memcpy was only needed because the function was transferring a
single buffer at a time. So it needed to copy the caller-provided buffer
at buf + 4, to store the SPI message header in the "headroom" area.
But the SPI core supports scatter-gather messages, comprised of multiple
transfers. We can actually use those to break apart every SPI message
into 2 transfers: one for the header and one for the actual payload.
To keep the behavior the same regarding the chip select signal, it is
necessary to tell the SPI core to de-assert the chip select after each
chunk. This was not needed before, because each spi_message contained
only 1 single transfer.
The meaning of the per-transfer cs_change=1 is:
- If the transfer is the last one of the message, keep CS asserted
- Otherwise, deassert CS
We need to deassert CS in the "otherwise" case, which was implicit
before.
Avoiding the memcpy creates yet another opportunity. The device can't
process more than 256 bytes of SPI payload at a time, so the
sja1105_xfer_long_buf() function used to exist, to split the larger
caller buffer into chunks.
But these chunks couldn't be used as scatter/gather buffers for
spi_message until now, because of that memcpy (we would have needed more
memory for each chunk). So we can now remove the sja1105_xfer_long_buf()
function and have a single implementation for long and short buffers.
Another benefit is lower usage of stack memory. Previously we had to
store 2 SPI buffers for each chunk. Due to the elimination of the
memcpy, we can now send pointers to the actual chunks from the
caller-supplied buffer to the SPI core.
Since the patch merges two functions into a rewritten implementation,
the function prototype was also changed, mainly for cosmetic consistency
with the structures used within it.
Signed-off-by: Vladimir Oltean <olteanv@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-10-12 06:31:15 +08:00
|
|
|
#define sja1105_hdr_xfer(xfers, chunk) \
|
|
|
|
((xfers) + 2 * (chunk))
|
|
|
|
#define sja1105_chunk_xfer(xfers, chunk) \
|
|
|
|
((xfers) + 2 * (chunk) + 1)
|
|
|
|
#define sja1105_hdr_buf(hdr_bufs, chunk) \
|
|
|
|
((hdr_bufs) + (chunk) * SJA1105_SIZE_SPI_MSG_HEADER)
|
|
|
|
|
2019-05-03 04:23:30 +08:00
|
|
|
/* If @rw is:
|
|
|
|
* - SPI_WRITE: creates and sends an SPI write message at absolute
|
net: dsa: sja1105: Switch to scatter/gather API for SPI
This reworks the SPI transfer implementation to make use of more of the
SPI core features. The main benefit is to avoid the memcpy in
sja1105_xfer_buf().
The memcpy was only needed because the function was transferring a
single buffer at a time. So it needed to copy the caller-provided buffer
at buf + 4, to store the SPI message header in the "headroom" area.
But the SPI core supports scatter-gather messages, comprised of multiple
transfers. We can actually use those to break apart every SPI message
into 2 transfers: one for the header and one for the actual payload.
To keep the behavior the same regarding the chip select signal, it is
necessary to tell the SPI core to de-assert the chip select after each
chunk. This was not needed before, because each spi_message contained
only 1 single transfer.
The meaning of the per-transfer cs_change=1 is:
- If the transfer is the last one of the message, keep CS asserted
- Otherwise, deassert CS
We need to deassert CS in the "otherwise" case, which was implicit
before.
Avoiding the memcpy creates yet another opportunity. The device can't
process more than 256 bytes of SPI payload at a time, so the
sja1105_xfer_long_buf() function used to exist, to split the larger
caller buffer into chunks.
But these chunks couldn't be used as scatter/gather buffers for
spi_message until now, because of that memcpy (we would have needed more
memory for each chunk). So we can now remove the sja1105_xfer_long_buf()
function and have a single implementation for long and short buffers.
Another benefit is lower usage of stack memory. Previously we had to
store 2 SPI buffers for each chunk. Due to the elimination of the
memcpy, we can now send pointers to the actual chunks from the
caller-supplied buffer to the SPI core.
Since the patch merges two functions into a rewritten implementation,
the function prototype was also changed, mainly for cosmetic consistency
with the structures used within it.
Signed-off-by: Vladimir Oltean <olteanv@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-10-12 06:31:15 +08:00
|
|
|
* address reg_addr, taking @len bytes from *buf
|
2019-05-03 04:23:30 +08:00
|
|
|
* - SPI_READ: creates and sends an SPI read message from absolute
|
net: dsa: sja1105: Switch to scatter/gather API for SPI
This reworks the SPI transfer implementation to make use of more of the
SPI core features. The main benefit is to avoid the memcpy in
sja1105_xfer_buf().
The memcpy was only needed because the function was transferring a
single buffer at a time. So it needed to copy the caller-provided buffer
at buf + 4, to store the SPI message header in the "headroom" area.
But the SPI core supports scatter-gather messages, comprised of multiple
transfers. We can actually use those to break apart every SPI message
into 2 transfers: one for the header and one for the actual payload.
To keep the behavior the same regarding the chip select signal, it is
necessary to tell the SPI core to de-assert the chip select after each
chunk. This was not needed before, because each spi_message contained
only 1 single transfer.
The meaning of the per-transfer cs_change=1 is:
- If the transfer is the last one of the message, keep CS asserted
- Otherwise, deassert CS
We need to deassert CS in the "otherwise" case, which was implicit
before.
Avoiding the memcpy creates yet another opportunity. The device can't
process more than 256 bytes of SPI payload at a time, so the
sja1105_xfer_long_buf() function used to exist, to split the larger
caller buffer into chunks.
But these chunks couldn't be used as scatter/gather buffers for
spi_message until now, because of that memcpy (we would have needed more
memory for each chunk). So we can now remove the sja1105_xfer_long_buf()
function and have a single implementation for long and short buffers.
Another benefit is lower usage of stack memory. Previously we had to
store 2 SPI buffers for each chunk. Due to the elimination of the
memcpy, we can now send pointers to the actual chunks from the
caller-supplied buffer to the SPI core.
Since the patch merges two functions into a rewritten implementation,
the function prototype was also changed, mainly for cosmetic consistency
with the structures used within it.
Signed-off-by: Vladimir Oltean <olteanv@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-10-12 06:31:15 +08:00
|
|
|
* address reg_addr, writing @len bytes into *buf
|
2019-05-03 04:23:30 +08:00
|
|
|
*/
|
2019-11-09 19:32:22 +08:00
|
|
|
static int sja1105_xfer(const struct sja1105_private *priv,
|
|
|
|
sja1105_spi_rw_mode_t rw, u64 reg_addr, u8 *buf,
|
|
|
|
size_t len, struct ptp_system_timestamp *ptp_sts)
|
2019-05-03 04:23:30 +08:00
|
|
|
{
|
net: dsa: sja1105: Switch to scatter/gather API for SPI
This reworks the SPI transfer implementation to make use of more of the
SPI core features. The main benefit is to avoid the memcpy in
sja1105_xfer_buf().
The memcpy was only needed because the function was transferring a
single buffer at a time. So it needed to copy the caller-provided buffer
at buf + 4, to store the SPI message header in the "headroom" area.
But the SPI core supports scatter-gather messages, comprised of multiple
transfers. We can actually use those to break apart every SPI message
into 2 transfers: one for the header and one for the actual payload.
To keep the behavior the same regarding the chip select signal, it is
necessary to tell the SPI core to de-assert the chip select after each
chunk. This was not needed before, because each spi_message contained
only 1 single transfer.
The meaning of the per-transfer cs_change=1 is:
- If the transfer is the last one of the message, keep CS asserted
- Otherwise, deassert CS
We need to deassert CS in the "otherwise" case, which was implicit
before.
Avoiding the memcpy creates yet another opportunity. The device can't
process more than 256 bytes of SPI payload at a time, so the
sja1105_xfer_long_buf() function used to exist, to split the larger
caller buffer into chunks.
But these chunks couldn't be used as scatter/gather buffers for
spi_message until now, because of that memcpy (we would have needed more
memory for each chunk). So we can now remove the sja1105_xfer_long_buf()
function and have a single implementation for long and short buffers.
Another benefit is lower usage of stack memory. Previously we had to
store 2 SPI buffers for each chunk. Due to the elimination of the
memcpy, we can now send pointers to the actual chunks from the
caller-supplied buffer to the SPI core.
Since the patch merges two functions into a rewritten implementation,
the function prototype was also changed, mainly for cosmetic consistency
with the structures used within it.
Signed-off-by: Vladimir Oltean <olteanv@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-10-12 06:31:15 +08:00
|
|
|
struct sja1105_chunk chunk = {
|
|
|
|
.len = min_t(size_t, len, SJA1105_SIZE_SPI_MSG_MAXLEN),
|
|
|
|
.reg_addr = reg_addr,
|
|
|
|
.buf = buf,
|
2019-10-12 06:31:14 +08:00
|
|
|
};
|
net: dsa: sja1105: Switch to scatter/gather API for SPI
This reworks the SPI transfer implementation to make use of more of the
SPI core features. The main benefit is to avoid the memcpy in
sja1105_xfer_buf().
The memcpy was only needed because the function was transferring a
single buffer at a time. So it needed to copy the caller-provided buffer
at buf + 4, to store the SPI message header in the "headroom" area.
But the SPI core supports scatter-gather messages, comprised of multiple
transfers. We can actually use those to break apart every SPI message
into 2 transfers: one for the header and one for the actual payload.
To keep the behavior the same regarding the chip select signal, it is
necessary to tell the SPI core to de-assert the chip select after each
chunk. This was not needed before, because each spi_message contained
only 1 single transfer.
The meaning of the per-transfer cs_change=1 is:
- If the transfer is the last one of the message, keep CS asserted
- Otherwise, deassert CS
We need to deassert CS in the "otherwise" case, which was implicit
before.
Avoiding the memcpy creates yet another opportunity. The device can't
process more than 256 bytes of SPI payload at a time, so the
sja1105_xfer_long_buf() function used to exist, to split the larger
caller buffer into chunks.
But these chunks couldn't be used as scatter/gather buffers for
spi_message until now, because of that memcpy (we would have needed more
memory for each chunk). So we can now remove the sja1105_xfer_long_buf()
function and have a single implementation for long and short buffers.
Another benefit is lower usage of stack memory. Previously we had to
store 2 SPI buffers for each chunk. Due to the elimination of the
memcpy, we can now send pointers to the actual chunks from the
caller-supplied buffer to the SPI core.
Since the patch merges two functions into a rewritten implementation,
the function prototype was also changed, mainly for cosmetic consistency
with the structures used within it.
Signed-off-by: Vladimir Oltean <olteanv@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-10-12 06:31:15 +08:00
|
|
|
struct spi_device *spi = priv->spidev;
|
|
|
|
struct spi_transfer *xfers;
|
|
|
|
int num_chunks;
|
|
|
|
int rc, i = 0;
|
|
|
|
u8 *hdr_bufs;
|
2019-05-03 04:23:30 +08:00
|
|
|
|
net: dsa: sja1105: Switch to scatter/gather API for SPI
This reworks the SPI transfer implementation to make use of more of the
SPI core features. The main benefit is to avoid the memcpy in
sja1105_xfer_buf().
The memcpy was only needed because the function was transferring a
single buffer at a time. So it needed to copy the caller-provided buffer
at buf + 4, to store the SPI message header in the "headroom" area.
But the SPI core supports scatter-gather messages, comprised of multiple
transfers. We can actually use those to break apart every SPI message
into 2 transfers: one for the header and one for the actual payload.
To keep the behavior the same regarding the chip select signal, it is
necessary to tell the SPI core to de-assert the chip select after each
chunk. This was not needed before, because each spi_message contained
only 1 single transfer.
The meaning of the per-transfer cs_change=1 is:
- If the transfer is the last one of the message, keep CS asserted
- Otherwise, deassert CS
We need to deassert CS in the "otherwise" case, which was implicit
before.
Avoiding the memcpy creates yet another opportunity. The device can't
process more than 256 bytes of SPI payload at a time, so the
sja1105_xfer_long_buf() function used to exist, to split the larger
caller buffer into chunks.
But these chunks couldn't be used as scatter/gather buffers for
spi_message until now, because of that memcpy (we would have needed more
memory for each chunk). So we can now remove the sja1105_xfer_long_buf()
function and have a single implementation for long and short buffers.
Another benefit is lower usage of stack memory. Previously we had to
store 2 SPI buffers for each chunk. Due to the elimination of the
memcpy, we can now send pointers to the actual chunks from the
caller-supplied buffer to the SPI core.
Since the patch merges two functions into a rewritten implementation,
the function prototype was also changed, mainly for cosmetic consistency
with the structures used within it.
Signed-off-by: Vladimir Oltean <olteanv@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-10-12 06:31:15 +08:00
|
|
|
num_chunks = DIV_ROUND_UP(len, SJA1105_SIZE_SPI_MSG_MAXLEN);
|
2019-05-03 04:23:30 +08:00
|
|
|
|
net: dsa: sja1105: Switch to scatter/gather API for SPI
This reworks the SPI transfer implementation to make use of more of the
SPI core features. The main benefit is to avoid the memcpy in
sja1105_xfer_buf().
The memcpy was only needed because the function was transferring a
single buffer at a time. So it needed to copy the caller-provided buffer
at buf + 4, to store the SPI message header in the "headroom" area.
But the SPI core supports scatter-gather messages, comprised of multiple
transfers. We can actually use those to break apart every SPI message
into 2 transfers: one for the header and one for the actual payload.
To keep the behavior the same regarding the chip select signal, it is
necessary to tell the SPI core to de-assert the chip select after each
chunk. This was not needed before, because each spi_message contained
only 1 single transfer.
The meaning of the per-transfer cs_change=1 is:
- If the transfer is the last one of the message, keep CS asserted
- Otherwise, deassert CS
We need to deassert CS in the "otherwise" case, which was implicit
before.
Avoiding the memcpy creates yet another opportunity. The device can't
process more than 256 bytes of SPI payload at a time, so the
sja1105_xfer_long_buf() function used to exist, to split the larger
caller buffer into chunks.
But these chunks couldn't be used as scatter/gather buffers for
spi_message until now, because of that memcpy (we would have needed more
memory for each chunk). So we can now remove the sja1105_xfer_long_buf()
function and have a single implementation for long and short buffers.
Another benefit is lower usage of stack memory. Previously we had to
store 2 SPI buffers for each chunk. Due to the elimination of the
memcpy, we can now send pointers to the actual chunks from the
caller-supplied buffer to the SPI core.
Since the patch merges two functions into a rewritten implementation,
the function prototype was also changed, mainly for cosmetic consistency
with the structures used within it.
Signed-off-by: Vladimir Oltean <olteanv@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-10-12 06:31:15 +08:00
|
|
|
/* One transfer for each message header, one for each message
|
|
|
|
* payload (chunk).
|
|
|
|
*/
|
|
|
|
xfers = kcalloc(2 * num_chunks, sizeof(struct spi_transfer),
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!xfers)
|
|
|
|
return -ENOMEM;
|
2019-05-03 04:23:30 +08:00
|
|
|
|
net: dsa: sja1105: Switch to scatter/gather API for SPI
This reworks the SPI transfer implementation to make use of more of the
SPI core features. The main benefit is to avoid the memcpy in
sja1105_xfer_buf().
The memcpy was only needed because the function was transferring a
single buffer at a time. So it needed to copy the caller-provided buffer
at buf + 4, to store the SPI message header in the "headroom" area.
But the SPI core supports scatter-gather messages, comprised of multiple
transfers. We can actually use those to break apart every SPI message
into 2 transfers: one for the header and one for the actual payload.
To keep the behavior the same regarding the chip select signal, it is
necessary to tell the SPI core to de-assert the chip select after each
chunk. This was not needed before, because each spi_message contained
only 1 single transfer.
The meaning of the per-transfer cs_change=1 is:
- If the transfer is the last one of the message, keep CS asserted
- Otherwise, deassert CS
We need to deassert CS in the "otherwise" case, which was implicit
before.
Avoiding the memcpy creates yet another opportunity. The device can't
process more than 256 bytes of SPI payload at a time, so the
sja1105_xfer_long_buf() function used to exist, to split the larger
caller buffer into chunks.
But these chunks couldn't be used as scatter/gather buffers for
spi_message until now, because of that memcpy (we would have needed more
memory for each chunk). So we can now remove the sja1105_xfer_long_buf()
function and have a single implementation for long and short buffers.
Another benefit is lower usage of stack memory. Previously we had to
store 2 SPI buffers for each chunk. Due to the elimination of the
memcpy, we can now send pointers to the actual chunks from the
caller-supplied buffer to the SPI core.
Since the patch merges two functions into a rewritten implementation,
the function prototype was also changed, mainly for cosmetic consistency
with the structures used within it.
Signed-off-by: Vladimir Oltean <olteanv@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-10-12 06:31:15 +08:00
|
|
|
/* Packed buffers for the num_chunks SPI message headers,
|
|
|
|
* stored as a contiguous array
|
|
|
|
*/
|
|
|
|
hdr_bufs = kcalloc(num_chunks, SJA1105_SIZE_SPI_MSG_HEADER,
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!hdr_bufs) {
|
|
|
|
kfree(xfers);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2019-05-03 04:23:30 +08:00
|
|
|
|
net: dsa: sja1105: Switch to scatter/gather API for SPI
This reworks the SPI transfer implementation to make use of more of the
SPI core features. The main benefit is to avoid the memcpy in
sja1105_xfer_buf().
The memcpy was only needed because the function was transferring a
single buffer at a time. So it needed to copy the caller-provided buffer
at buf + 4, to store the SPI message header in the "headroom" area.
But the SPI core supports scatter-gather messages, comprised of multiple
transfers. We can actually use those to break apart every SPI message
into 2 transfers: one for the header and one for the actual payload.
To keep the behavior the same regarding the chip select signal, it is
necessary to tell the SPI core to de-assert the chip select after each
chunk. This was not needed before, because each spi_message contained
only 1 single transfer.
The meaning of the per-transfer cs_change=1 is:
- If the transfer is the last one of the message, keep CS asserted
- Otherwise, deassert CS
We need to deassert CS in the "otherwise" case, which was implicit
before.
Avoiding the memcpy creates yet another opportunity. The device can't
process more than 256 bytes of SPI payload at a time, so the
sja1105_xfer_long_buf() function used to exist, to split the larger
caller buffer into chunks.
But these chunks couldn't be used as scatter/gather buffers for
spi_message until now, because of that memcpy (we would have needed more
memory for each chunk). So we can now remove the sja1105_xfer_long_buf()
function and have a single implementation for long and short buffers.
Another benefit is lower usage of stack memory. Previously we had to
store 2 SPI buffers for each chunk. Due to the elimination of the
memcpy, we can now send pointers to the actual chunks from the
caller-supplied buffer to the SPI core.
Since the patch merges two functions into a rewritten implementation,
the function prototype was also changed, mainly for cosmetic consistency
with the structures used within it.
Signed-off-by: Vladimir Oltean <olteanv@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-10-12 06:31:15 +08:00
|
|
|
for (i = 0; i < num_chunks; i++) {
|
|
|
|
struct spi_transfer *chunk_xfer = sja1105_chunk_xfer(xfers, i);
|
|
|
|
struct spi_transfer *hdr_xfer = sja1105_hdr_xfer(xfers, i);
|
|
|
|
u8 *hdr_buf = sja1105_hdr_buf(hdr_bufs, i);
|
2019-11-09 19:32:22 +08:00
|
|
|
struct spi_transfer *ptp_sts_xfer;
|
net: dsa: sja1105: Switch to scatter/gather API for SPI
This reworks the SPI transfer implementation to make use of more of the
SPI core features. The main benefit is to avoid the memcpy in
sja1105_xfer_buf().
The memcpy was only needed because the function was transferring a
single buffer at a time. So it needed to copy the caller-provided buffer
at buf + 4, to store the SPI message header in the "headroom" area.
But the SPI core supports scatter-gather messages, comprised of multiple
transfers. We can actually use those to break apart every SPI message
into 2 transfers: one for the header and one for the actual payload.
To keep the behavior the same regarding the chip select signal, it is
necessary to tell the SPI core to de-assert the chip select after each
chunk. This was not needed before, because each spi_message contained
only 1 single transfer.
The meaning of the per-transfer cs_change=1 is:
- If the transfer is the last one of the message, keep CS asserted
- Otherwise, deassert CS
We need to deassert CS in the "otherwise" case, which was implicit
before.
Avoiding the memcpy creates yet another opportunity. The device can't
process more than 256 bytes of SPI payload at a time, so the
sja1105_xfer_long_buf() function used to exist, to split the larger
caller buffer into chunks.
But these chunks couldn't be used as scatter/gather buffers for
spi_message until now, because of that memcpy (we would have needed more
memory for each chunk). So we can now remove the sja1105_xfer_long_buf()
function and have a single implementation for long and short buffers.
Another benefit is lower usage of stack memory. Previously we had to
store 2 SPI buffers for each chunk. Due to the elimination of the
memcpy, we can now send pointers to the actual chunks from the
caller-supplied buffer to the SPI core.
Since the patch merges two functions into a rewritten implementation,
the function prototype was also changed, mainly for cosmetic consistency
with the structures used within it.
Signed-off-by: Vladimir Oltean <olteanv@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-10-12 06:31:15 +08:00
|
|
|
struct sja1105_spi_message msg;
|
|
|
|
|
|
|
|
/* Populate the transfer's header buffer */
|
|
|
|
msg.address = chunk.reg_addr;
|
|
|
|
msg.access = rw;
|
|
|
|
if (rw == SPI_READ)
|
|
|
|
msg.read_count = chunk.len / 4;
|
|
|
|
else
|
|
|
|
/* Ignored */
|
|
|
|
msg.read_count = 0;
|
|
|
|
sja1105_spi_message_pack(hdr_buf, &msg);
|
|
|
|
hdr_xfer->tx_buf = hdr_buf;
|
|
|
|
hdr_xfer->len = SJA1105_SIZE_SPI_MSG_HEADER;
|
|
|
|
|
|
|
|
/* Populate the transfer's data buffer */
|
|
|
|
if (rw == SPI_READ)
|
|
|
|
chunk_xfer->rx_buf = chunk.buf;
|
|
|
|
else
|
|
|
|
chunk_xfer->tx_buf = chunk.buf;
|
|
|
|
chunk_xfer->len = chunk.len;
|
|
|
|
|
2019-11-09 19:32:22 +08:00
|
|
|
/* Request timestamping for the transfer. Instead of letting
|
|
|
|
* callers specify which byte they want to timestamp, we can
|
|
|
|
* make certain assumptions:
|
|
|
|
* - A read operation will request a software timestamp when
|
|
|
|
* what's being read is the PTP time. That is snapshotted by
|
|
|
|
* the switch hardware at the end of the command portion
|
|
|
|
* (hdr_xfer).
|
|
|
|
* - A write operation will request a software timestamp on
|
|
|
|
* actions that modify the PTP time. Taking clock stepping as
|
|
|
|
* an example, the switch writes the PTP time at the end of
|
|
|
|
* the data portion (chunk_xfer).
|
|
|
|
*/
|
|
|
|
if (rw == SPI_READ)
|
|
|
|
ptp_sts_xfer = hdr_xfer;
|
|
|
|
else
|
|
|
|
ptp_sts_xfer = chunk_xfer;
|
|
|
|
ptp_sts_xfer->ptp_sts_word_pre = ptp_sts_xfer->len - 1;
|
|
|
|
ptp_sts_xfer->ptp_sts_word_post = ptp_sts_xfer->len - 1;
|
|
|
|
ptp_sts_xfer->ptp_sts = ptp_sts;
|
|
|
|
|
net: dsa: sja1105: Switch to scatter/gather API for SPI
This reworks the SPI transfer implementation to make use of more of the
SPI core features. The main benefit is to avoid the memcpy in
sja1105_xfer_buf().
The memcpy was only needed because the function was transferring a
single buffer at a time. So it needed to copy the caller-provided buffer
at buf + 4, to store the SPI message header in the "headroom" area.
But the SPI core supports scatter-gather messages, comprised of multiple
transfers. We can actually use those to break apart every SPI message
into 2 transfers: one for the header and one for the actual payload.
To keep the behavior the same regarding the chip select signal, it is
necessary to tell the SPI core to de-assert the chip select after each
chunk. This was not needed before, because each spi_message contained
only 1 single transfer.
The meaning of the per-transfer cs_change=1 is:
- If the transfer is the last one of the message, keep CS asserted
- Otherwise, deassert CS
We need to deassert CS in the "otherwise" case, which was implicit
before.
Avoiding the memcpy creates yet another opportunity. The device can't
process more than 256 bytes of SPI payload at a time, so the
sja1105_xfer_long_buf() function used to exist, to split the larger
caller buffer into chunks.
But these chunks couldn't be used as scatter/gather buffers for
spi_message until now, because of that memcpy (we would have needed more
memory for each chunk). So we can now remove the sja1105_xfer_long_buf()
function and have a single implementation for long and short buffers.
Another benefit is lower usage of stack memory. Previously we had to
store 2 SPI buffers for each chunk. Due to the elimination of the
memcpy, we can now send pointers to the actual chunks from the
caller-supplied buffer to the SPI core.
Since the patch merges two functions into a rewritten implementation,
the function prototype was also changed, mainly for cosmetic consistency
with the structures used within it.
Signed-off-by: Vladimir Oltean <olteanv@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-10-12 06:31:15 +08:00
|
|
|
/* Calculate next chunk */
|
|
|
|
chunk.buf += chunk.len;
|
|
|
|
chunk.reg_addr += chunk.len / 4;
|
|
|
|
chunk.len = min_t(size_t, (ptrdiff_t)(buf + len - chunk.buf),
|
|
|
|
SJA1105_SIZE_SPI_MSG_MAXLEN);
|
|
|
|
|
|
|
|
/* De-assert the chip select after each chunk. */
|
|
|
|
if (chunk.len)
|
|
|
|
chunk_xfer->cs_change = 1;
|
|
|
|
}
|
2019-10-12 06:31:14 +08:00
|
|
|
|
net: dsa: sja1105: Switch to scatter/gather API for SPI
This reworks the SPI transfer implementation to make use of more of the
SPI core features. The main benefit is to avoid the memcpy in
sja1105_xfer_buf().
The memcpy was only needed because the function was transferring a
single buffer at a time. So it needed to copy the caller-provided buffer
at buf + 4, to store the SPI message header in the "headroom" area.
But the SPI core supports scatter-gather messages, comprised of multiple
transfers. We can actually use those to break apart every SPI message
into 2 transfers: one for the header and one for the actual payload.
To keep the behavior the same regarding the chip select signal, it is
necessary to tell the SPI core to de-assert the chip select after each
chunk. This was not needed before, because each spi_message contained
only 1 single transfer.
The meaning of the per-transfer cs_change=1 is:
- If the transfer is the last one of the message, keep CS asserted
- Otherwise, deassert CS
We need to deassert CS in the "otherwise" case, which was implicit
before.
Avoiding the memcpy creates yet another opportunity. The device can't
process more than 256 bytes of SPI payload at a time, so the
sja1105_xfer_long_buf() function used to exist, to split the larger
caller buffer into chunks.
But these chunks couldn't be used as scatter/gather buffers for
spi_message until now, because of that memcpy (we would have needed more
memory for each chunk). So we can now remove the sja1105_xfer_long_buf()
function and have a single implementation for long and short buffers.
Another benefit is lower usage of stack memory. Previously we had to
store 2 SPI buffers for each chunk. Due to the elimination of the
memcpy, we can now send pointers to the actual chunks from the
caller-supplied buffer to the SPI core.
Since the patch merges two functions into a rewritten implementation,
the function prototype was also changed, mainly for cosmetic consistency
with the structures used within it.
Signed-off-by: Vladimir Oltean <olteanv@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-10-12 06:31:15 +08:00
|
|
|
rc = spi_sync_transfer(spi, xfers, 2 * num_chunks);
|
|
|
|
if (rc < 0)
|
2019-10-12 06:31:14 +08:00
|
|
|
dev_err(&spi->dev, "SPI transfer failed: %d\n", rc);
|
2019-05-03 04:23:30 +08:00
|
|
|
|
net: dsa: sja1105: Switch to scatter/gather API for SPI
This reworks the SPI transfer implementation to make use of more of the
SPI core features. The main benefit is to avoid the memcpy in
sja1105_xfer_buf().
The memcpy was only needed because the function was transferring a
single buffer at a time. So it needed to copy the caller-provided buffer
at buf + 4, to store the SPI message header in the "headroom" area.
But the SPI core supports scatter-gather messages, comprised of multiple
transfers. We can actually use those to break apart every SPI message
into 2 transfers: one for the header and one for the actual payload.
To keep the behavior the same regarding the chip select signal, it is
necessary to tell the SPI core to de-assert the chip select after each
chunk. This was not needed before, because each spi_message contained
only 1 single transfer.
The meaning of the per-transfer cs_change=1 is:
- If the transfer is the last one of the message, keep CS asserted
- Otherwise, deassert CS
We need to deassert CS in the "otherwise" case, which was implicit
before.
Avoiding the memcpy creates yet another opportunity. The device can't
process more than 256 bytes of SPI payload at a time, so the
sja1105_xfer_long_buf() function used to exist, to split the larger
caller buffer into chunks.
But these chunks couldn't be used as scatter/gather buffers for
spi_message until now, because of that memcpy (we would have needed more
memory for each chunk). So we can now remove the sja1105_xfer_long_buf()
function and have a single implementation for long and short buffers.
Another benefit is lower usage of stack memory. Previously we had to
store 2 SPI buffers for each chunk. Due to the elimination of the
memcpy, we can now send pointers to the actual chunks from the
caller-supplied buffer to the SPI core.
Since the patch merges two functions into a rewritten implementation,
the function prototype was also changed, mainly for cosmetic consistency
with the structures used within it.
Signed-off-by: Vladimir Oltean <olteanv@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-10-12 06:31:15 +08:00
|
|
|
kfree(hdr_bufs);
|
|
|
|
kfree(xfers);
|
2019-05-03 04:23:30 +08:00
|
|
|
|
net: dsa: sja1105: Switch to scatter/gather API for SPI
This reworks the SPI transfer implementation to make use of more of the
SPI core features. The main benefit is to avoid the memcpy in
sja1105_xfer_buf().
The memcpy was only needed because the function was transferring a
single buffer at a time. So it needed to copy the caller-provided buffer
at buf + 4, to store the SPI message header in the "headroom" area.
But the SPI core supports scatter-gather messages, comprised of multiple
transfers. We can actually use those to break apart every SPI message
into 2 transfers: one for the header and one for the actual payload.
To keep the behavior the same regarding the chip select signal, it is
necessary to tell the SPI core to de-assert the chip select after each
chunk. This was not needed before, because each spi_message contained
only 1 single transfer.
The meaning of the per-transfer cs_change=1 is:
- If the transfer is the last one of the message, keep CS asserted
- Otherwise, deassert CS
We need to deassert CS in the "otherwise" case, which was implicit
before.
Avoiding the memcpy creates yet another opportunity. The device can't
process more than 256 bytes of SPI payload at a time, so the
sja1105_xfer_long_buf() function used to exist, to split the larger
caller buffer into chunks.
But these chunks couldn't be used as scatter/gather buffers for
spi_message until now, because of that memcpy (we would have needed more
memory for each chunk). So we can now remove the sja1105_xfer_long_buf()
function and have a single implementation for long and short buffers.
Another benefit is lower usage of stack memory. Previously we had to
store 2 SPI buffers for each chunk. Due to the elimination of the
memcpy, we can now send pointers to the actual chunks from the
caller-supplied buffer to the SPI core.
Since the patch merges two functions into a rewritten implementation,
the function prototype was also changed, mainly for cosmetic consistency
with the structures used within it.
Signed-off-by: Vladimir Oltean <olteanv@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-10-12 06:31:15 +08:00
|
|
|
return rc;
|
2019-05-03 04:23:30 +08:00
|
|
|
}
|
|
|
|
|
2019-11-09 19:32:22 +08:00
|
|
|
int sja1105_xfer_buf(const struct sja1105_private *priv,
|
|
|
|
sja1105_spi_rw_mode_t rw, u64 reg_addr,
|
|
|
|
u8 *buf, size_t len)
|
|
|
|
{
|
|
|
|
return sja1105_xfer(priv, rw, reg_addr, buf, len, NULL);
|
|
|
|
}
|
|
|
|
|
2019-05-03 04:23:30 +08:00
|
|
|
/* If @rw is:
|
|
|
|
* - SPI_WRITE: creates and sends an SPI write message at absolute
|
2019-10-02 03:18:00 +08:00
|
|
|
* address reg_addr
|
2019-05-03 04:23:30 +08:00
|
|
|
* - SPI_READ: creates and sends an SPI read message from absolute
|
2019-10-02 03:18:00 +08:00
|
|
|
* address reg_addr
|
2019-05-03 04:23:30 +08:00
|
|
|
*
|
|
|
|
* The u64 *value is unpacked, meaning that it's stored in the native
|
|
|
|
* CPU endianness and directly usable by software running on the core.
|
|
|
|
*/
|
2019-10-02 03:18:00 +08:00
|
|
|
int sja1105_xfer_u64(const struct sja1105_private *priv,
|
2019-11-09 19:32:22 +08:00
|
|
|
sja1105_spi_rw_mode_t rw, u64 reg_addr, u64 *value,
|
|
|
|
struct ptp_system_timestamp *ptp_sts)
|
2019-05-03 04:23:30 +08:00
|
|
|
{
|
2019-10-02 03:18:00 +08:00
|
|
|
u8 packed_buf[8];
|
2019-05-03 04:23:30 +08:00
|
|
|
int rc;
|
|
|
|
|
|
|
|
if (rw == SPI_WRITE)
|
2019-10-02 03:18:00 +08:00
|
|
|
sja1105_pack(packed_buf, value, 63, 0, 8);
|
2019-05-03 04:23:30 +08:00
|
|
|
|
2019-11-09 19:32:22 +08:00
|
|
|
rc = sja1105_xfer(priv, rw, reg_addr, packed_buf, 8, ptp_sts);
|
2019-05-03 04:23:30 +08:00
|
|
|
|
|
|
|
if (rw == SPI_READ)
|
2019-10-02 03:18:00 +08:00
|
|
|
sja1105_unpack(packed_buf, value, 63, 0, 8);
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Same as above, but transfers only a 4 byte word */
|
|
|
|
int sja1105_xfer_u32(const struct sja1105_private *priv,
|
2019-11-09 19:32:22 +08:00
|
|
|
sja1105_spi_rw_mode_t rw, u64 reg_addr, u32 *value,
|
|
|
|
struct ptp_system_timestamp *ptp_sts)
|
2019-10-02 03:18:00 +08:00
|
|
|
{
|
|
|
|
u8 packed_buf[4];
|
|
|
|
u64 tmp;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
if (rw == SPI_WRITE) {
|
|
|
|
/* The packing API only supports u64 as CPU word size,
|
|
|
|
* so we need to convert.
|
|
|
|
*/
|
|
|
|
tmp = *value;
|
|
|
|
sja1105_pack(packed_buf, &tmp, 31, 0, 4);
|
|
|
|
}
|
|
|
|
|
2019-11-09 19:32:22 +08:00
|
|
|
rc = sja1105_xfer(priv, rw, reg_addr, packed_buf, 4, ptp_sts);
|
2019-10-02 03:18:00 +08:00
|
|
|
|
|
|
|
if (rw == SPI_READ) {
|
|
|
|
sja1105_unpack(packed_buf, &tmp, 31, 0, 4);
|
|
|
|
*value = tmp;
|
|
|
|
}
|
2019-05-03 04:23:30 +08:00
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2019-11-13 06:16:41 +08:00
|
|
|
static int sja1105et_reset_cmd(struct dsa_switch *ds)
|
2019-05-03 04:23:30 +08:00
|
|
|
{
|
2019-11-13 06:16:41 +08:00
|
|
|
struct sja1105_private *priv = ds->priv;
|
2019-05-03 04:23:30 +08:00
|
|
|
const struct sja1105_regs *regs = priv->info->regs;
|
2019-11-13 06:16:41 +08:00
|
|
|
u8 packed_buf[SJA1105_SIZE_RESET_CMD] = {0};
|
|
|
|
const int size = SJA1105_SIZE_RESET_CMD;
|
|
|
|
u64 cold_rst = 1;
|
2019-05-03 04:23:30 +08:00
|
|
|
|
2019-11-13 06:16:41 +08:00
|
|
|
sja1105_pack(packed_buf, &cold_rst, 3, 3, size);
|
2019-05-03 04:23:30 +08:00
|
|
|
|
2019-10-02 03:18:01 +08:00
|
|
|
return sja1105_xfer_buf(priv, SPI_WRITE, regs->rgu, packed_buf,
|
|
|
|
SJA1105_SIZE_RESET_CMD);
|
2019-05-03 04:23:30 +08:00
|
|
|
}
|
|
|
|
|
2019-11-13 06:16:41 +08:00
|
|
|
static int sja1105pqrs_reset_cmd(struct dsa_switch *ds)
|
2019-05-03 04:23:30 +08:00
|
|
|
{
|
2019-11-13 06:16:41 +08:00
|
|
|
struct sja1105_private *priv = ds->priv;
|
2019-05-03 04:23:30 +08:00
|
|
|
const struct sja1105_regs *regs = priv->info->regs;
|
2019-11-13 06:16:41 +08:00
|
|
|
u8 packed_buf[SJA1105_SIZE_RESET_CMD] = {0};
|
|
|
|
const int size = SJA1105_SIZE_RESET_CMD;
|
|
|
|
u64 cold_rst = 1;
|
|
|
|
|
|
|
|
sja1105_pack(packed_buf, &cold_rst, 2, 2, size);
|
2019-05-03 04:23:30 +08:00
|
|
|
|
2019-10-02 03:18:01 +08:00
|
|
|
return sja1105_xfer_buf(priv, SPI_WRITE, regs->rgu, packed_buf,
|
|
|
|
SJA1105_SIZE_RESET_CMD);
|
2019-05-03 04:23:30 +08:00
|
|
|
}
|
|
|
|
|
2019-06-08 21:03:43 +08:00
|
|
|
int sja1105_inhibit_tx(const struct sja1105_private *priv,
|
|
|
|
unsigned long port_bitmap, bool tx_inhibited)
|
2019-05-03 04:23:37 +08:00
|
|
|
{
|
|
|
|
const struct sja1105_regs *regs = priv->info->regs;
|
2019-10-02 03:18:00 +08:00
|
|
|
u32 inhibit_cmd;
|
2019-06-08 21:03:43 +08:00
|
|
|
int rc;
|
2019-05-03 04:23:37 +08:00
|
|
|
|
2019-10-02 03:18:00 +08:00
|
|
|
rc = sja1105_xfer_u32(priv, SPI_READ, regs->port_control,
|
2019-11-09 19:32:22 +08:00
|
|
|
&inhibit_cmd, NULL);
|
2019-05-03 04:23:37 +08:00
|
|
|
if (rc < 0)
|
|
|
|
return rc;
|
|
|
|
|
2019-06-08 21:03:43 +08:00
|
|
|
if (tx_inhibited)
|
|
|
|
inhibit_cmd |= port_bitmap;
|
|
|
|
else
|
|
|
|
inhibit_cmd &= ~port_bitmap;
|
2019-05-03 04:23:37 +08:00
|
|
|
|
2019-10-02 03:18:00 +08:00
|
|
|
return sja1105_xfer_u32(priv, SPI_WRITE, regs->port_control,
|
2019-11-09 19:32:22 +08:00
|
|
|
&inhibit_cmd, NULL);
|
2019-05-03 04:23:37 +08:00
|
|
|
}
|
|
|
|
|
2019-05-03 04:23:30 +08:00
|
|
|
struct sja1105_status {
|
|
|
|
u64 configs;
|
|
|
|
u64 crcchkl;
|
|
|
|
u64 ids;
|
|
|
|
u64 crcchkg;
|
|
|
|
};
|
|
|
|
|
|
|
|
/* This is not reading the entire General Status area, which is also
|
|
|
|
* divergent between E/T and P/Q/R/S, but only the relevant bits for
|
|
|
|
* ensuring that the static config upload procedure was successful.
|
|
|
|
*/
|
|
|
|
static void sja1105_status_unpack(void *buf, struct sja1105_status *status)
|
|
|
|
{
|
|
|
|
/* So that addition translates to 4 bytes */
|
|
|
|
u32 *p = buf;
|
|
|
|
|
|
|
|
/* device_id is missing from the buffer, but we don't
|
|
|
|
* want to diverge from the manual definition of the
|
|
|
|
* register addresses, so we'll back off one step with
|
|
|
|
* the register pointer, and never access p[0].
|
|
|
|
*/
|
|
|
|
p--;
|
|
|
|
sja1105_unpack(p + 0x1, &status->configs, 31, 31, 4);
|
|
|
|
sja1105_unpack(p + 0x1, &status->crcchkl, 30, 30, 4);
|
|
|
|
sja1105_unpack(p + 0x1, &status->ids, 29, 29, 4);
|
|
|
|
sja1105_unpack(p + 0x1, &status->crcchkg, 28, 28, 4);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int sja1105_status_get(struct sja1105_private *priv,
|
|
|
|
struct sja1105_status *status)
|
|
|
|
{
|
|
|
|
const struct sja1105_regs *regs = priv->info->regs;
|
|
|
|
u8 packed_buf[4];
|
|
|
|
int rc;
|
|
|
|
|
2019-10-02 03:18:01 +08:00
|
|
|
rc = sja1105_xfer_buf(priv, SPI_READ, regs->status, packed_buf, 4);
|
2019-05-03 04:23:30 +08:00
|
|
|
if (rc < 0)
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
sja1105_status_unpack(packed_buf, status);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Not const because unpacking priv->static_config into buffers and preparing
|
|
|
|
* for upload requires the recalculation of table CRCs and updating the
|
|
|
|
* structures with these.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
static_config_buf_prepare_for_upload(struct sja1105_private *priv,
|
|
|
|
void *config_buf, int buf_len)
|
|
|
|
{
|
|
|
|
struct sja1105_static_config *config = &priv->static_config;
|
|
|
|
struct sja1105_table_header final_header;
|
|
|
|
sja1105_config_valid_t valid;
|
|
|
|
char *final_header_ptr;
|
|
|
|
int crc_len;
|
|
|
|
|
|
|
|
valid = sja1105_static_config_check_valid(config);
|
|
|
|
if (valid != SJA1105_CONFIG_OK) {
|
|
|
|
dev_err(&priv->spidev->dev,
|
|
|
|
sja1105_static_config_error_msg[valid]);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Write Device ID and config tables to config_buf */
|
|
|
|
sja1105_static_config_pack(config_buf, config);
|
|
|
|
/* Recalculate CRC of the last header (right now 0xDEADBEEF).
|
|
|
|
* Don't include the CRC field itself.
|
|
|
|
*/
|
|
|
|
crc_len = buf_len - 4;
|
|
|
|
/* Read the whole table header */
|
|
|
|
final_header_ptr = config_buf + buf_len - SJA1105_SIZE_TABLE_HEADER;
|
|
|
|
sja1105_table_header_packing(final_header_ptr, &final_header, UNPACK);
|
|
|
|
/* Modify */
|
|
|
|
final_header.crc = sja1105_crc32(config_buf, crc_len);
|
|
|
|
/* Rewrite */
|
|
|
|
sja1105_table_header_packing(final_header_ptr, &final_header, PACK);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define RETRIES 10
|
|
|
|
|
|
|
|
int sja1105_static_config_upload(struct sja1105_private *priv)
|
|
|
|
{
|
2019-05-03 04:23:37 +08:00
|
|
|
unsigned long port_bitmap = GENMASK_ULL(SJA1105_NUM_PORTS - 1, 0);
|
2019-05-03 04:23:30 +08:00
|
|
|
struct sja1105_static_config *config = &priv->static_config;
|
|
|
|
const struct sja1105_regs *regs = priv->info->regs;
|
|
|
|
struct device *dev = &priv->spidev->dev;
|
|
|
|
struct sja1105_status status;
|
|
|
|
int rc, retries = RETRIES;
|
|
|
|
u8 *config_buf;
|
|
|
|
int buf_len;
|
|
|
|
|
|
|
|
buf_len = sja1105_static_config_get_length(config);
|
|
|
|
config_buf = kcalloc(buf_len, sizeof(char), GFP_KERNEL);
|
|
|
|
if (!config_buf)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
rc = static_config_buf_prepare_for_upload(priv, config_buf, buf_len);
|
|
|
|
if (rc < 0) {
|
|
|
|
dev_err(dev, "Invalid config, cannot upload\n");
|
2019-09-29 06:43:39 +08:00
|
|
|
rc = -EINVAL;
|
|
|
|
goto out;
|
2019-05-03 04:23:30 +08:00
|
|
|
}
|
2019-05-03 04:23:37 +08:00
|
|
|
/* Prevent PHY jabbering during switch reset by inhibiting
|
|
|
|
* Tx on all ports and waiting for current packet to drain.
|
|
|
|
* Otherwise, the PHY will see an unterminated Ethernet packet.
|
|
|
|
*/
|
2019-06-08 21:03:43 +08:00
|
|
|
rc = sja1105_inhibit_tx(priv, port_bitmap, true);
|
2019-05-03 04:23:37 +08:00
|
|
|
if (rc < 0) {
|
|
|
|
dev_err(dev, "Failed to inhibit Tx on ports\n");
|
2019-09-29 06:43:39 +08:00
|
|
|
rc = -ENXIO;
|
|
|
|
goto out;
|
2019-05-03 04:23:37 +08:00
|
|
|
}
|
|
|
|
/* Wait for an eventual egress packet to finish transmission
|
|
|
|
* (reach IFG). It is guaranteed that a second one will not
|
|
|
|
* follow, and that switch cold reset is thus safe
|
|
|
|
*/
|
|
|
|
usleep_range(500, 1000);
|
2019-05-03 04:23:30 +08:00
|
|
|
do {
|
|
|
|
/* Put the SJA1105 in programming mode */
|
2019-11-13 06:16:41 +08:00
|
|
|
rc = priv->info->reset_cmd(priv->ds);
|
2019-05-03 04:23:30 +08:00
|
|
|
if (rc < 0) {
|
|
|
|
dev_err(dev, "Failed to reset switch, retrying...\n");
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
/* Wait for the switch to come out of reset */
|
|
|
|
usleep_range(1000, 5000);
|
|
|
|
/* Upload the static config to the device */
|
net: dsa: sja1105: Switch to scatter/gather API for SPI
This reworks the SPI transfer implementation to make use of more of the
SPI core features. The main benefit is to avoid the memcpy in
sja1105_xfer_buf().
The memcpy was only needed because the function was transferring a
single buffer at a time. So it needed to copy the caller-provided buffer
at buf + 4, to store the SPI message header in the "headroom" area.
But the SPI core supports scatter-gather messages, comprised of multiple
transfers. We can actually use those to break apart every SPI message
into 2 transfers: one for the header and one for the actual payload.
To keep the behavior the same regarding the chip select signal, it is
necessary to tell the SPI core to de-assert the chip select after each
chunk. This was not needed before, because each spi_message contained
only 1 single transfer.
The meaning of the per-transfer cs_change=1 is:
- If the transfer is the last one of the message, keep CS asserted
- Otherwise, deassert CS
We need to deassert CS in the "otherwise" case, which was implicit
before.
Avoiding the memcpy creates yet another opportunity. The device can't
process more than 256 bytes of SPI payload at a time, so the
sja1105_xfer_long_buf() function used to exist, to split the larger
caller buffer into chunks.
But these chunks couldn't be used as scatter/gather buffers for
spi_message until now, because of that memcpy (we would have needed more
memory for each chunk). So we can now remove the sja1105_xfer_long_buf()
function and have a single implementation for long and short buffers.
Another benefit is lower usage of stack memory. Previously we had to
store 2 SPI buffers for each chunk. Due to the elimination of the
memcpy, we can now send pointers to the actual chunks from the
caller-supplied buffer to the SPI core.
Since the patch merges two functions into a rewritten implementation,
the function prototype was also changed, mainly for cosmetic consistency
with the structures used within it.
Signed-off-by: Vladimir Oltean <olteanv@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-10-12 06:31:15 +08:00
|
|
|
rc = sja1105_xfer_buf(priv, SPI_WRITE, regs->config,
|
|
|
|
config_buf, buf_len);
|
2019-05-03 04:23:30 +08:00
|
|
|
if (rc < 0) {
|
|
|
|
dev_err(dev, "Failed to upload config, retrying...\n");
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
/* Check that SJA1105 responded well to the config upload */
|
|
|
|
rc = sja1105_status_get(priv, &status);
|
|
|
|
if (rc < 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (status.ids == 1) {
|
|
|
|
dev_err(dev, "Mismatch between hardware and static config "
|
|
|
|
"device id. Wrote 0x%llx, wants 0x%llx\n",
|
|
|
|
config->device_id, priv->info->device_id);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (status.crcchkl == 1) {
|
|
|
|
dev_err(dev, "Switch reported invalid local CRC on "
|
|
|
|
"the uploaded config, retrying...\n");
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (status.crcchkg == 1) {
|
|
|
|
dev_err(dev, "Switch reported invalid global CRC on "
|
|
|
|
"the uploaded config, retrying...\n");
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (status.configs == 0) {
|
|
|
|
dev_err(dev, "Switch reported that configuration is "
|
|
|
|
"invalid, retrying...\n");
|
|
|
|
continue;
|
|
|
|
}
|
2019-05-08 21:30:41 +08:00
|
|
|
/* Success! */
|
|
|
|
break;
|
|
|
|
} while (--retries);
|
2019-05-03 04:23:30 +08:00
|
|
|
|
|
|
|
if (!retries) {
|
|
|
|
rc = -EIO;
|
|
|
|
dev_err(dev, "Failed to upload config to device, giving up\n");
|
|
|
|
goto out;
|
2019-05-08 21:30:41 +08:00
|
|
|
} else if (retries != RETRIES) {
|
2019-05-03 04:23:30 +08:00
|
|
|
dev_info(dev, "Succeeded after %d tried\n", RETRIES - retries);
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
kfree(config_buf);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2019-05-08 21:43:26 +08:00
|
|
|
static struct sja1105_regs sja1105et_regs = {
|
2019-05-03 04:23:30 +08:00
|
|
|
.device_id = 0x0,
|
|
|
|
.prod_id = 0x100BC3,
|
|
|
|
.status = 0x1,
|
2019-05-03 04:23:37 +08:00
|
|
|
.port_control = 0x11,
|
2019-05-03 04:23:30 +08:00
|
|
|
.config = 0x020000,
|
|
|
|
.rgu = 0x100440,
|
2019-06-09 00:12:27 +08:00
|
|
|
/* UM10944.pdf, Table 86, ACU Register overview */
|
2019-05-03 04:23:30 +08:00
|
|
|
.pad_mii_tx = {0x100800, 0x100802, 0x100804, 0x100806, 0x100808},
|
|
|
|
.rmii_pll1 = 0x10000A,
|
|
|
|
.cgu_idiv = {0x10000B, 0x10000C, 0x10000D, 0x10000E, 0x10000F},
|
|
|
|
.mac = {0x200, 0x202, 0x204, 0x206, 0x208},
|
|
|
|
.mac_hl1 = {0x400, 0x410, 0x420, 0x430, 0x440},
|
|
|
|
.mac_hl2 = {0x600, 0x610, 0x620, 0x630, 0x640},
|
|
|
|
/* UM10944.pdf, Table 78, CGU Register overview */
|
|
|
|
.mii_tx_clk = {0x100013, 0x10001A, 0x100021, 0x100028, 0x10002F},
|
|
|
|
.mii_rx_clk = {0x100014, 0x10001B, 0x100022, 0x100029, 0x100030},
|
|
|
|
.mii_ext_tx_clk = {0x100018, 0x10001F, 0x100026, 0x10002D, 0x100034},
|
|
|
|
.mii_ext_rx_clk = {0x100019, 0x100020, 0x100027, 0x10002E, 0x100035},
|
|
|
|
.rgmii_tx_clk = {0x100016, 0x10001D, 0x100024, 0x10002B, 0x100032},
|
|
|
|
.rmii_ref_clk = {0x100015, 0x10001C, 0x100023, 0x10002A, 0x100031},
|
|
|
|
.rmii_ext_tx_clk = {0x100018, 0x10001F, 0x100026, 0x10002D, 0x100034},
|
2019-06-08 20:04:35 +08:00
|
|
|
.ptpegr_ts = {0xC0, 0xC2, 0xC4, 0xC6, 0xC8},
|
2019-11-12 08:11:54 +08:00
|
|
|
.ptpschtm = 0x12, /* Spans 0x12 to 0x13 */
|
net: dsa: sja1105: configure the PTP_CLK pin as EXT_TS or PER_OUT
The SJA1105 switch family has a PTP_CLK pin which emits a signal with
fixed 50% duty cycle, but variable frequency and programmable start time.
On the second generation (P/Q/R/S) switches, this pin supports even more
functionality. The use case described by the hardware documents talks
about synchronization via oneshot pulses: given 2 sja1105 switches,
arbitrarily designated as a master and a slave, the master emits a
single pulse on PTP_CLK, while the slave is configured to timestamp this
pulse received on its PTP_CLK pin (which must obviously be configured as
input). The difference between the timestamps then exactly becomes the
slave offset to the master.
The only trouble with the above is that the hardware is very much tied
into this use case only, and not very generic beyond that:
- When emitting a oneshot pulse, instead of being told when to emit it,
the switch just does it "now" and tells you later what time it was,
via the PTPSYNCTS register. [ Incidentally, this is the same register
that the slave uses to collect the ext_ts timestamp from, too. ]
- On the sync slave, there is no interrupt mechanism on reception of a
new extts, and no FIFO to buffer them, because in the foreseen use
case, software is in control of both the master and the slave pins,
so it "knows" when there's something to collect.
These 2 problems mean that:
- We don't support (at least yet) the quirky oneshot mode exposed by
the hardware, just normal periodic output.
- We abuse the hardware a little bit when we expose generic extts.
Because there's no interrupt mechanism, we need to poll at double the
frequency we expect to receive a pulse. Currently that means a
non-configurable "twice a second".
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Acked-by: Richard Cochran <richardcochran@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-03-24 06:59:24 +08:00
|
|
|
.ptppinst = 0x14,
|
|
|
|
.ptppindur = 0x16,
|
net: dsa: sja1105: Add support for the PTP clock
The design of this PHC driver is influenced by the switch's behavior
w.r.t. timestamping. It exposes two PTP counters, one free-running
(PTPTSCLK) and the other offset- and frequency-corrected in hardware
through PTPCLKVAL, PTPCLKADD and PTPCLKRATE. The MACs can sample either
of these for frame timestamps.
However, the user manual warns that taking timestamps based on the
corrected clock is less than useful, as the switch can deliver corrupted
timestamps in a variety of circumstances.
Therefore, this PHC uses the free-running PTPTSCLK together with a
timecounter/cyclecounter structure that translates it into a software
time domain. Thus, the settime/adjtime and adjfine callbacks are
hardware no-ops.
The timestamps (introduced in a further patch) will also be translated
to the correct time domain before being handed over to the userspace PTP
stack.
The introduction of a second set of PHC operations that operate on the
hardware PTPCLKVAL/PTPCLKADD/PTPCLKRATE in the future is somewhat
unavoidable, as the TTEthernet core uses the corrected PTP time domain.
However, the free-running counter + timecounter structure combination
will suffice for now, as the resulting timestamps yield a sub-50 ns
synchronization offset in steady state using linuxptp.
For this patch, in absence of frame timestamping, the operations of the
switch PHC were tested by syncing it to the system time as a local slave
clock with:
phc2sys -s CLOCK_REALTIME -c swp2 -O 0 -m -S 0.01
Signed-off-by: Vladimir Oltean <olteanv@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-06-08 20:04:34 +08:00
|
|
|
.ptp_control = 0x17,
|
2019-10-17 02:41:02 +08:00
|
|
|
.ptpclkval = 0x18, /* Spans 0x18 to 0x19 */
|
net: dsa: sja1105: Add support for the PTP clock
The design of this PHC driver is influenced by the switch's behavior
w.r.t. timestamping. It exposes two PTP counters, one free-running
(PTPTSCLK) and the other offset- and frequency-corrected in hardware
through PTPCLKVAL, PTPCLKADD and PTPCLKRATE. The MACs can sample either
of these for frame timestamps.
However, the user manual warns that taking timestamps based on the
corrected clock is less than useful, as the switch can deliver corrupted
timestamps in a variety of circumstances.
Therefore, this PHC uses the free-running PTPTSCLK together with a
timecounter/cyclecounter structure that translates it into a software
time domain. Thus, the settime/adjtime and adjfine callbacks are
hardware no-ops.
The timestamps (introduced in a further patch) will also be translated
to the correct time domain before being handed over to the userspace PTP
stack.
The introduction of a second set of PHC operations that operate on the
hardware PTPCLKVAL/PTPCLKADD/PTPCLKRATE in the future is somewhat
unavoidable, as the TTEthernet core uses the corrected PTP time domain.
However, the free-running counter + timecounter structure combination
will suffice for now, as the resulting timestamps yield a sub-50 ns
synchronization offset in steady state using linuxptp.
For this patch, in absence of frame timestamping, the operations of the
switch PHC were tested by syncing it to the system time as a local slave
clock with:
phc2sys -s CLOCK_REALTIME -c swp2 -O 0 -m -S 0.01
Signed-off-by: Vladimir Oltean <olteanv@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-06-08 20:04:34 +08:00
|
|
|
.ptpclkrate = 0x1A,
|
2019-11-12 08:11:54 +08:00
|
|
|
.ptpclkcorp = 0x1D,
|
2019-05-03 04:23:30 +08:00
|
|
|
};
|
|
|
|
|
2019-05-08 21:43:26 +08:00
|
|
|
static struct sja1105_regs sja1105pqrs_regs = {
|
2019-05-03 04:23:30 +08:00
|
|
|
.device_id = 0x0,
|
|
|
|
.prod_id = 0x100BC3,
|
|
|
|
.status = 0x1,
|
2019-05-03 04:23:37 +08:00
|
|
|
.port_control = 0x12,
|
2019-05-03 04:23:30 +08:00
|
|
|
.config = 0x020000,
|
|
|
|
.rgu = 0x100440,
|
2019-06-09 00:12:27 +08:00
|
|
|
/* UM10944.pdf, Table 86, ACU Register overview */
|
2019-05-03 04:23:30 +08:00
|
|
|
.pad_mii_tx = {0x100800, 0x100802, 0x100804, 0x100806, 0x100808},
|
2019-06-09 00:12:28 +08:00
|
|
|
.pad_mii_id = {0x100810, 0x100811, 0x100812, 0x100813, 0x100814},
|
2020-03-20 19:29:37 +08:00
|
|
|
.sgmii = 0x1F0000,
|
2019-05-03 04:23:30 +08:00
|
|
|
.rmii_pll1 = 0x10000A,
|
|
|
|
.cgu_idiv = {0x10000B, 0x10000C, 0x10000D, 0x10000E, 0x10000F},
|
|
|
|
.mac = {0x200, 0x202, 0x204, 0x206, 0x208},
|
|
|
|
.mac_hl1 = {0x400, 0x410, 0x420, 0x430, 0x440},
|
|
|
|
.mac_hl2 = {0x600, 0x610, 0x620, 0x630, 0x640},
|
2020-03-27 22:00:16 +08:00
|
|
|
.ether_stats = {0x1400, 0x1418, 0x1430, 0x1448, 0x1460},
|
2019-05-03 04:23:30 +08:00
|
|
|
/* UM11040.pdf, Table 114 */
|
|
|
|
.mii_tx_clk = {0x100013, 0x100019, 0x10001F, 0x100025, 0x10002B},
|
|
|
|
.mii_rx_clk = {0x100014, 0x10001A, 0x100020, 0x100026, 0x10002C},
|
|
|
|
.mii_ext_tx_clk = {0x100017, 0x10001D, 0x100023, 0x100029, 0x10002F},
|
|
|
|
.mii_ext_rx_clk = {0x100018, 0x10001E, 0x100024, 0x10002A, 0x100030},
|
|
|
|
.rgmii_tx_clk = {0x100016, 0x10001C, 0x100022, 0x100028, 0x10002E},
|
|
|
|
.rmii_ref_clk = {0x100015, 0x10001B, 0x100021, 0x100027, 0x10002D},
|
|
|
|
.rmii_ext_tx_clk = {0x100017, 0x10001D, 0x100023, 0x100029, 0x10002F},
|
|
|
|
.qlevel = {0x604, 0x614, 0x624, 0x634, 0x644},
|
2019-06-08 20:04:35 +08:00
|
|
|
.ptpegr_ts = {0xC0, 0xC4, 0xC8, 0xCC, 0xD0},
|
2019-11-12 08:11:54 +08:00
|
|
|
.ptpschtm = 0x13, /* Spans 0x13 to 0x14 */
|
net: dsa: sja1105: configure the PTP_CLK pin as EXT_TS or PER_OUT
The SJA1105 switch family has a PTP_CLK pin which emits a signal with
fixed 50% duty cycle, but variable frequency and programmable start time.
On the second generation (P/Q/R/S) switches, this pin supports even more
functionality. The use case described by the hardware documents talks
about synchronization via oneshot pulses: given 2 sja1105 switches,
arbitrarily designated as a master and a slave, the master emits a
single pulse on PTP_CLK, while the slave is configured to timestamp this
pulse received on its PTP_CLK pin (which must obviously be configured as
input). The difference between the timestamps then exactly becomes the
slave offset to the master.
The only trouble with the above is that the hardware is very much tied
into this use case only, and not very generic beyond that:
- When emitting a oneshot pulse, instead of being told when to emit it,
the switch just does it "now" and tells you later what time it was,
via the PTPSYNCTS register. [ Incidentally, this is the same register
that the slave uses to collect the ext_ts timestamp from, too. ]
- On the sync slave, there is no interrupt mechanism on reception of a
new extts, and no FIFO to buffer them, because in the foreseen use
case, software is in control of both the master and the slave pins,
so it "knows" when there's something to collect.
These 2 problems mean that:
- We don't support (at least yet) the quirky oneshot mode exposed by
the hardware, just normal periodic output.
- We abuse the hardware a little bit when we expose generic extts.
Because there's no interrupt mechanism, we need to poll at double the
frequency we expect to receive a pulse. Currently that means a
non-configurable "twice a second".
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Acked-by: Richard Cochran <richardcochran@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-03-24 06:59:24 +08:00
|
|
|
.ptppinst = 0x15,
|
|
|
|
.ptppindur = 0x17,
|
net: dsa: sja1105: Add support for the PTP clock
The design of this PHC driver is influenced by the switch's behavior
w.r.t. timestamping. It exposes two PTP counters, one free-running
(PTPTSCLK) and the other offset- and frequency-corrected in hardware
through PTPCLKVAL, PTPCLKADD and PTPCLKRATE. The MACs can sample either
of these for frame timestamps.
However, the user manual warns that taking timestamps based on the
corrected clock is less than useful, as the switch can deliver corrupted
timestamps in a variety of circumstances.
Therefore, this PHC uses the free-running PTPTSCLK together with a
timecounter/cyclecounter structure that translates it into a software
time domain. Thus, the settime/adjtime and adjfine callbacks are
hardware no-ops.
The timestamps (introduced in a further patch) will also be translated
to the correct time domain before being handed over to the userspace PTP
stack.
The introduction of a second set of PHC operations that operate on the
hardware PTPCLKVAL/PTPCLKADD/PTPCLKRATE in the future is somewhat
unavoidable, as the TTEthernet core uses the corrected PTP time domain.
However, the free-running counter + timecounter structure combination
will suffice for now, as the resulting timestamps yield a sub-50 ns
synchronization offset in steady state using linuxptp.
For this patch, in absence of frame timestamping, the operations of the
switch PHC were tested by syncing it to the system time as a local slave
clock with:
phc2sys -s CLOCK_REALTIME -c swp2 -O 0 -m -S 0.01
Signed-off-by: Vladimir Oltean <olteanv@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-06-08 20:04:34 +08:00
|
|
|
.ptp_control = 0x18,
|
2019-10-17 02:41:02 +08:00
|
|
|
.ptpclkval = 0x19,
|
net: dsa: sja1105: Add support for the PTP clock
The design of this PHC driver is influenced by the switch's behavior
w.r.t. timestamping. It exposes two PTP counters, one free-running
(PTPTSCLK) and the other offset- and frequency-corrected in hardware
through PTPCLKVAL, PTPCLKADD and PTPCLKRATE. The MACs can sample either
of these for frame timestamps.
However, the user manual warns that taking timestamps based on the
corrected clock is less than useful, as the switch can deliver corrupted
timestamps in a variety of circumstances.
Therefore, this PHC uses the free-running PTPTSCLK together with a
timecounter/cyclecounter structure that translates it into a software
time domain. Thus, the settime/adjtime and adjfine callbacks are
hardware no-ops.
The timestamps (introduced in a further patch) will also be translated
to the correct time domain before being handed over to the userspace PTP
stack.
The introduction of a second set of PHC operations that operate on the
hardware PTPCLKVAL/PTPCLKADD/PTPCLKRATE in the future is somewhat
unavoidable, as the TTEthernet core uses the corrected PTP time domain.
However, the free-running counter + timecounter structure combination
will suffice for now, as the resulting timestamps yield a sub-50 ns
synchronization offset in steady state using linuxptp.
For this patch, in absence of frame timestamping, the operations of the
switch PHC were tested by syncing it to the system time as a local slave
clock with:
phc2sys -s CLOCK_REALTIME -c swp2 -O 0 -m -S 0.01
Signed-off-by: Vladimir Oltean <olteanv@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-06-08 20:04:34 +08:00
|
|
|
.ptpclkrate = 0x1B,
|
2019-11-12 08:11:54 +08:00
|
|
|
.ptpclkcorp = 0x1E,
|
net: dsa: sja1105: configure the PTP_CLK pin as EXT_TS or PER_OUT
The SJA1105 switch family has a PTP_CLK pin which emits a signal with
fixed 50% duty cycle, but variable frequency and programmable start time.
On the second generation (P/Q/R/S) switches, this pin supports even more
functionality. The use case described by the hardware documents talks
about synchronization via oneshot pulses: given 2 sja1105 switches,
arbitrarily designated as a master and a slave, the master emits a
single pulse on PTP_CLK, while the slave is configured to timestamp this
pulse received on its PTP_CLK pin (which must obviously be configured as
input). The difference between the timestamps then exactly becomes the
slave offset to the master.
The only trouble with the above is that the hardware is very much tied
into this use case only, and not very generic beyond that:
- When emitting a oneshot pulse, instead of being told when to emit it,
the switch just does it "now" and tells you later what time it was,
via the PTPSYNCTS register. [ Incidentally, this is the same register
that the slave uses to collect the ext_ts timestamp from, too. ]
- On the sync slave, there is no interrupt mechanism on reception of a
new extts, and no FIFO to buffer them, because in the foreseen use
case, software is in control of both the master and the slave pins,
so it "knows" when there's something to collect.
These 2 problems mean that:
- We don't support (at least yet) the quirky oneshot mode exposed by
the hardware, just normal periodic output.
- We abuse the hardware a little bit when we expose generic extts.
Because there's no interrupt mechanism, we need to poll at double the
frequency we expect to receive a pulse. Currently that means a
non-configurable "twice a second".
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Acked-by: Richard Cochran <richardcochran@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-03-24 06:59:24 +08:00
|
|
|
.ptpsyncts = 0x1F,
|
2019-05-03 04:23:30 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct sja1105_info sja1105e_info = {
|
|
|
|
.device_id = SJA1105E_DEVICE_ID,
|
|
|
|
.part_no = SJA1105ET_PART_NO,
|
|
|
|
.static_ops = sja1105e_table_ops,
|
|
|
|
.dyn_ops = sja1105et_dyn_ops,
|
2019-06-08 20:04:35 +08:00
|
|
|
.ptp_ts_bits = 24,
|
|
|
|
.ptpegr_ts_bytes = 4,
|
2019-05-03 04:23:30 +08:00
|
|
|
.reset_cmd = sja1105et_reset_cmd,
|
2019-06-03 05:11:57 +08:00
|
|
|
.fdb_add_cmd = sja1105et_fdb_add,
|
|
|
|
.fdb_del_cmd = sja1105et_fdb_del,
|
2019-11-12 08:11:53 +08:00
|
|
|
.ptp_cmd_packing = sja1105et_ptp_cmd_packing,
|
2019-05-03 04:23:30 +08:00
|
|
|
.regs = &sja1105et_regs,
|
|
|
|
.name = "SJA1105E",
|
|
|
|
};
|
|
|
|
struct sja1105_info sja1105t_info = {
|
|
|
|
.device_id = SJA1105T_DEVICE_ID,
|
|
|
|
.part_no = SJA1105ET_PART_NO,
|
|
|
|
.static_ops = sja1105t_table_ops,
|
|
|
|
.dyn_ops = sja1105et_dyn_ops,
|
2019-06-08 20:04:35 +08:00
|
|
|
.ptp_ts_bits = 24,
|
|
|
|
.ptpegr_ts_bytes = 4,
|
2019-05-03 04:23:30 +08:00
|
|
|
.reset_cmd = sja1105et_reset_cmd,
|
2019-06-03 05:11:57 +08:00
|
|
|
.fdb_add_cmd = sja1105et_fdb_add,
|
|
|
|
.fdb_del_cmd = sja1105et_fdb_del,
|
2019-11-12 08:11:53 +08:00
|
|
|
.ptp_cmd_packing = sja1105et_ptp_cmd_packing,
|
2019-05-03 04:23:30 +08:00
|
|
|
.regs = &sja1105et_regs,
|
|
|
|
.name = "SJA1105T",
|
|
|
|
};
|
|
|
|
struct sja1105_info sja1105p_info = {
|
|
|
|
.device_id = SJA1105PR_DEVICE_ID,
|
|
|
|
.part_no = SJA1105P_PART_NO,
|
|
|
|
.static_ops = sja1105p_table_ops,
|
|
|
|
.dyn_ops = sja1105pqrs_dyn_ops,
|
2019-06-08 20:04:35 +08:00
|
|
|
.ptp_ts_bits = 32,
|
|
|
|
.ptpegr_ts_bytes = 8,
|
2019-06-09 00:12:28 +08:00
|
|
|
.setup_rgmii_delay = sja1105pqrs_setup_rgmii_delay,
|
2019-05-03 04:23:30 +08:00
|
|
|
.reset_cmd = sja1105pqrs_reset_cmd,
|
2019-06-03 05:11:57 +08:00
|
|
|
.fdb_add_cmd = sja1105pqrs_fdb_add,
|
|
|
|
.fdb_del_cmd = sja1105pqrs_fdb_del,
|
2019-11-12 08:11:53 +08:00
|
|
|
.ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
|
2019-05-03 04:23:30 +08:00
|
|
|
.regs = &sja1105pqrs_regs,
|
|
|
|
.name = "SJA1105P",
|
|
|
|
};
|
|
|
|
struct sja1105_info sja1105q_info = {
|
|
|
|
.device_id = SJA1105QS_DEVICE_ID,
|
|
|
|
.part_no = SJA1105Q_PART_NO,
|
|
|
|
.static_ops = sja1105q_table_ops,
|
|
|
|
.dyn_ops = sja1105pqrs_dyn_ops,
|
2019-06-08 20:04:35 +08:00
|
|
|
.ptp_ts_bits = 32,
|
|
|
|
.ptpegr_ts_bytes = 8,
|
2019-06-09 00:12:28 +08:00
|
|
|
.setup_rgmii_delay = sja1105pqrs_setup_rgmii_delay,
|
2019-05-03 04:23:30 +08:00
|
|
|
.reset_cmd = sja1105pqrs_reset_cmd,
|
2019-06-03 05:11:57 +08:00
|
|
|
.fdb_add_cmd = sja1105pqrs_fdb_add,
|
|
|
|
.fdb_del_cmd = sja1105pqrs_fdb_del,
|
2019-11-12 08:11:53 +08:00
|
|
|
.ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
|
2019-05-03 04:23:30 +08:00
|
|
|
.regs = &sja1105pqrs_regs,
|
|
|
|
.name = "SJA1105Q",
|
|
|
|
};
|
|
|
|
struct sja1105_info sja1105r_info = {
|
|
|
|
.device_id = SJA1105PR_DEVICE_ID,
|
|
|
|
.part_no = SJA1105R_PART_NO,
|
|
|
|
.static_ops = sja1105r_table_ops,
|
|
|
|
.dyn_ops = sja1105pqrs_dyn_ops,
|
2019-06-08 20:04:35 +08:00
|
|
|
.ptp_ts_bits = 32,
|
|
|
|
.ptpegr_ts_bytes = 8,
|
2019-06-09 00:12:28 +08:00
|
|
|
.setup_rgmii_delay = sja1105pqrs_setup_rgmii_delay,
|
2019-05-03 04:23:30 +08:00
|
|
|
.reset_cmd = sja1105pqrs_reset_cmd,
|
2019-06-03 05:11:57 +08:00
|
|
|
.fdb_add_cmd = sja1105pqrs_fdb_add,
|
|
|
|
.fdb_del_cmd = sja1105pqrs_fdb_del,
|
2019-11-12 08:11:53 +08:00
|
|
|
.ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
|
2019-05-03 04:23:30 +08:00
|
|
|
.regs = &sja1105pqrs_regs,
|
|
|
|
.name = "SJA1105R",
|
|
|
|
};
|
|
|
|
struct sja1105_info sja1105s_info = {
|
|
|
|
.device_id = SJA1105QS_DEVICE_ID,
|
|
|
|
.part_no = SJA1105S_PART_NO,
|
|
|
|
.static_ops = sja1105s_table_ops,
|
|
|
|
.dyn_ops = sja1105pqrs_dyn_ops,
|
|
|
|
.regs = &sja1105pqrs_regs,
|
2019-06-08 20:04:35 +08:00
|
|
|
.ptp_ts_bits = 32,
|
|
|
|
.ptpegr_ts_bytes = 8,
|
2019-06-09 00:12:28 +08:00
|
|
|
.setup_rgmii_delay = sja1105pqrs_setup_rgmii_delay,
|
2019-05-03 04:23:30 +08:00
|
|
|
.reset_cmd = sja1105pqrs_reset_cmd,
|
2019-06-03 05:11:57 +08:00
|
|
|
.fdb_add_cmd = sja1105pqrs_fdb_add,
|
|
|
|
.fdb_del_cmd = sja1105pqrs_fdb_del,
|
2019-11-12 08:11:53 +08:00
|
|
|
.ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
|
2019-05-03 04:23:30 +08:00
|
|
|
.name = "SJA1105S",
|
|
|
|
};
|