can: rx-offload: Prepare for CAN FD support
The skbs for classic CAN and CAN FD frames are allocated with seperate functions: alloc_can_skb() and alloc_canfd_skb(). In order to support CAN FD frames via the rx-offload helper, the driver itself has to allocate the skb (depending whether it received a classic CAN or CAN FD frame), as the rx-offload helper cannot know which kind of CAN frame the driver has received. This patch moves the allocation of the skb into the struct can_rx_offload::mailbox_read callbacks of the the flexcan and ti_hecc driver and adjusts the rx-offload helper accordingly. Signed-off-by: Joakim Zhang <qiangqing.zhang@nxp.com> Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
This commit is contained in:
parent
61d2350615
commit
4e9c9484b0
|
@ -783,16 +783,23 @@ static inline struct flexcan_priv *rx_offload_to_priv(struct can_rx_offload *off
|
|||
return container_of(offload, struct flexcan_priv, offload);
|
||||
}
|
||||
|
||||
static unsigned int flexcan_mailbox_read(struct can_rx_offload *offload,
|
||||
struct can_frame *cf,
|
||||
u32 *timestamp, unsigned int n)
|
||||
static struct sk_buff *flexcan_mailbox_read(struct can_rx_offload *offload,
|
||||
unsigned int n, u32 *timestamp,
|
||||
bool drop)
|
||||
{
|
||||
struct flexcan_priv *priv = rx_offload_to_priv(offload);
|
||||
struct flexcan_regs __iomem *regs = priv->regs;
|
||||
struct flexcan_mb __iomem *mb;
|
||||
struct sk_buff *skb;
|
||||
struct can_frame *cf;
|
||||
u32 reg_ctrl, reg_id, reg_iflag1;
|
||||
int i;
|
||||
|
||||
if (unlikely(drop)) {
|
||||
skb = ERR_PTR(-ENOBUFS);
|
||||
goto mark_as_read;
|
||||
}
|
||||
|
||||
mb = flexcan_get_mb(priv, n);
|
||||
|
||||
if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
|
||||
|
@ -806,7 +813,7 @@ static unsigned int flexcan_mailbox_read(struct can_rx_offload *offload,
|
|||
code = reg_ctrl & FLEXCAN_MB_CODE_MASK;
|
||||
if ((code != FLEXCAN_MB_CODE_RX_FULL) &&
|
||||
(code != FLEXCAN_MB_CODE_RX_OVERRUN))
|
||||
return 0;
|
||||
return NULL;
|
||||
|
||||
if (code == FLEXCAN_MB_CODE_RX_OVERRUN) {
|
||||
/* This MB was overrun, we lost data */
|
||||
|
@ -816,11 +823,17 @@ static unsigned int flexcan_mailbox_read(struct can_rx_offload *offload,
|
|||
} else {
|
||||
reg_iflag1 = priv->read(®s->iflag1);
|
||||
if (!(reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE))
|
||||
return 0;
|
||||
return NULL;
|
||||
|
||||
reg_ctrl = priv->read(&mb->can_ctrl);
|
||||
}
|
||||
|
||||
skb = alloc_can_skb(offload->dev, &cf);
|
||||
if (!skb) {
|
||||
skb = ERR_PTR(-ENOMEM);
|
||||
goto mark_as_read;
|
||||
}
|
||||
|
||||
/* increase timstamp to full 32 bit */
|
||||
*timestamp = reg_ctrl << 16;
|
||||
|
||||
|
@ -839,7 +852,7 @@ static unsigned int flexcan_mailbox_read(struct can_rx_offload *offload,
|
|||
*(__be32 *)(cf->data + i) = data;
|
||||
}
|
||||
|
||||
/* mark as read */
|
||||
mark_as_read:
|
||||
if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
|
||||
/* Clear IRQ */
|
||||
if (n < 32)
|
||||
|
@ -856,7 +869,7 @@ static unsigned int flexcan_mailbox_read(struct can_rx_offload *offload,
|
|||
*/
|
||||
priv->read(®s->timer);
|
||||
|
||||
return 1;
|
||||
return skb;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -139,71 +139,35 @@ static int can_rx_offload_compare(struct sk_buff *a, struct sk_buff *b)
|
|||
static struct sk_buff *
|
||||
can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n)
|
||||
{
|
||||
struct sk_buff *skb = NULL, *skb_error = NULL;
|
||||
struct sk_buff *skb;
|
||||
struct can_rx_offload_cb *cb;
|
||||
struct can_frame *cf;
|
||||
int ret;
|
||||
|
||||
if (likely(skb_queue_len(&offload->skb_queue) <
|
||||
offload->skb_queue_len_max)) {
|
||||
skb = alloc_can_skb(offload->dev, &cf);
|
||||
if (unlikely(!skb))
|
||||
skb_error = ERR_PTR(-ENOMEM); /* skb alloc failed */
|
||||
} else {
|
||||
skb_error = ERR_PTR(-ENOBUFS); /* skb_queue is full */
|
||||
}
|
||||
|
||||
/* If queue is full or skb not available, drop by reading into
|
||||
* overflow buffer.
|
||||
*/
|
||||
if (unlikely(skb_error)) {
|
||||
struct can_frame cf_overflow;
|
||||
bool drop = false;
|
||||
u32 timestamp;
|
||||
|
||||
ret = offload->mailbox_read(offload, &cf_overflow,
|
||||
×tamp, n);
|
||||
/* If queue is full drop frame */
|
||||
if (unlikely(skb_queue_len(&offload->skb_queue) >
|
||||
offload->skb_queue_len_max))
|
||||
drop = true;
|
||||
|
||||
skb = offload->mailbox_read(offload, n, ×tamp, drop);
|
||||
/* Mailbox was empty. */
|
||||
if (unlikely(!ret))
|
||||
if (unlikely(!skb))
|
||||
return NULL;
|
||||
|
||||
/* Mailbox has been read and we're dropping it or
|
||||
* there was a problem reading the mailbox.
|
||||
*
|
||||
* Increment error counters in any case.
|
||||
*/
|
||||
offload->dev->stats.rx_dropped++;
|
||||
offload->dev->stats.rx_fifo_errors++;
|
||||
|
||||
/* There was a problem reading the mailbox, propagate
|
||||
* error value.
|
||||
*/
|
||||
if (unlikely(ret < 0))
|
||||
return ERR_PTR(ret);
|
||||
|
||||
return skb_error;
|
||||
}
|
||||
|
||||
cb = can_rx_offload_get_cb(skb);
|
||||
ret = offload->mailbox_read(offload, cf, &cb->timestamp, n);
|
||||
|
||||
/* Mailbox was empty. */
|
||||
if (unlikely(!ret)) {
|
||||
kfree_skb(skb);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* There was a problem reading the mailbox, propagate error value. */
|
||||
if (unlikely(ret < 0)) {
|
||||
kfree_skb(skb);
|
||||
|
||||
if (unlikely(IS_ERR(skb))) {
|
||||
offload->dev->stats.rx_dropped++;
|
||||
offload->dev->stats.rx_fifo_errors++;
|
||||
|
||||
return ERR_PTR(ret);
|
||||
return skb;
|
||||
}
|
||||
|
||||
/* Mailbox was read. */
|
||||
cb = can_rx_offload_get_cb(skb);
|
||||
cb->timestamp = timestamp;
|
||||
|
||||
return skb;
|
||||
}
|
||||
|
||||
|
|
|
@ -535,15 +535,28 @@ struct ti_hecc_priv *rx_offload_to_priv(struct can_rx_offload *offload)
|
|||
return container_of(offload, struct ti_hecc_priv, offload);
|
||||
}
|
||||
|
||||
static unsigned int ti_hecc_mailbox_read(struct can_rx_offload *offload,
|
||||
struct can_frame *cf,
|
||||
u32 *timestamp, unsigned int mbxno)
|
||||
static struct sk_buff *ti_hecc_mailbox_read(struct can_rx_offload *offload,
|
||||
unsigned int mbxno, u32 *timestamp,
|
||||
bool drop)
|
||||
{
|
||||
struct ti_hecc_priv *priv = rx_offload_to_priv(offload);
|
||||
struct sk_buff *skb;
|
||||
struct can_frame *cf;
|
||||
u32 data, mbx_mask;
|
||||
int ret = 1;
|
||||
|
||||
mbx_mask = BIT(mbxno);
|
||||
|
||||
if (unlikely(drop)) {
|
||||
skb = ERR_PTR(-ENOBUFS);
|
||||
goto mark_as_read;
|
||||
}
|
||||
|
||||
skb = alloc_can_skb(offload->dev, &cf);
|
||||
if (unlikely(!skb)) {
|
||||
skb = ERR_PTR(-ENOMEM);
|
||||
goto mark_as_read;
|
||||
}
|
||||
|
||||
data = hecc_read_mbx(priv, mbxno, HECC_CANMID);
|
||||
if (data & HECC_CANMID_IDE)
|
||||
cf->can_id = (data & CAN_EFF_MASK) | CAN_EFF_FLAG;
|
||||
|
@ -578,11 +591,12 @@ static unsigned int ti_hecc_mailbox_read(struct can_rx_offload *offload,
|
|||
*/
|
||||
if (unlikely(mbxno == HECC_RX_LAST_MBOX &&
|
||||
hecc_read(priv, HECC_CANRML) & mbx_mask))
|
||||
ret = -ENOBUFS;
|
||||
skb = ERR_PTR(-ENOBUFS);
|
||||
|
||||
mark_as_read:
|
||||
hecc_write(priv, HECC_CANRMP, mbx_mask);
|
||||
|
||||
return ret;
|
||||
return skb;
|
||||
}
|
||||
|
||||
static int ti_hecc_error(struct net_device *ndev, int int_status,
|
||||
|
|
|
@ -15,9 +15,9 @@
|
|||
struct can_rx_offload {
|
||||
struct net_device *dev;
|
||||
|
||||
unsigned int (*mailbox_read)(struct can_rx_offload *offload,
|
||||
struct can_frame *cf,
|
||||
u32 *timestamp, unsigned int mb);
|
||||
struct sk_buff *(*mailbox_read)(struct can_rx_offload *offload,
|
||||
unsigned int mb, u32 *timestamp,
|
||||
bool drop);
|
||||
|
||||
struct sk_buff_head skb_queue;
|
||||
u32 skb_queue_len_max;
|
||||
|
|
Loading…
Reference in New Issue