linux-sg2042/drivers/net/ethernet/ti/davinci_cpdma.h

118 lines
3.6 KiB
C
Raw Normal View History

/*
* Texas Instruments CPDMA Driver
*
* Copyright (C) 2010 Texas Instruments
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation version 2.
*
* This program is distributed "as is" WITHOUT ANY WARRANTY of any
* kind, whether express or implied; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __DAVINCI_CPDMA_H__
#define __DAVINCI_CPDMA_H__
#define CPDMA_MAX_CHANNELS BITS_PER_LONG
#define tx_chan_num(chan) (chan)
#define rx_chan_num(chan) ((chan) + CPDMA_MAX_CHANNELS)
#define is_rx_chan(chan) ((chan)->chan_num >= CPDMA_MAX_CHANNELS)
#define is_tx_chan(chan) (!is_rx_chan(chan))
#define __chan_linear(chan_num) ((chan_num) & (CPDMA_MAX_CHANNELS - 1))
#define chan_linear(chan) __chan_linear((chan)->chan_num)
#define CPDMA_RX_SOURCE_PORT(__status__) ((__status__ >> 16) & 0x7)
#define CPDMA_EOI_RX_THRESH 0x0
#define CPDMA_EOI_RX 0x1
#define CPDMA_EOI_TX 0x2
#define CPDMA_EOI_MISC 0x3
struct cpdma_params {
struct device *dev;
void __iomem *dmaregs;
void __iomem *txhdp, *rxhdp, *txcp, *rxcp;
void __iomem *rxthresh, *rxfree;
int num_chan;
bool has_soft_reset;
int min_packet_size;
u32 desc_mem_phys;
u32 desc_hw_addr;
int desc_mem_size;
int desc_align;
/*
* Some instances of embedded cpdma controllers have extra control and
* status registers. The following flag enables access to these
* "extended" registers.
*/
bool has_ext_regs;
};
struct cpdma_chan_stats {
u32 head_enqueue;
u32 tail_enqueue;
u32 pad_enqueue;
u32 misqueued;
u32 desc_alloc_fail;
u32 pad_alloc_fail;
u32 runt_receive_buff;
u32 runt_transmit_buff;
u32 empty_dequeue;
u32 busy_dequeue;
u32 good_dequeue;
u32 requeue;
u32 teardown_dequeue;
};
struct cpdma_ctlr;
struct cpdma_chan;
typedef void (*cpdma_handler_fn)(void *token, int len, int status);
struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params);
int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr);
int cpdma_ctlr_start(struct cpdma_ctlr *ctlr);
int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr);
int cpdma_ctlr_dump(struct cpdma_ctlr *ctlr);
struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
cpdma_handler_fn handler);
int cpdma_chan_destroy(struct cpdma_chan *chan);
int cpdma_chan_start(struct cpdma_chan *chan);
int cpdma_chan_stop(struct cpdma_chan *chan);
int cpdma_chan_dump(struct cpdma_chan *chan);
int cpdma_chan_get_stats(struct cpdma_chan *chan,
struct cpdma_chan_stats *stats);
int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
int len, int directed);
int cpdma_chan_process(struct cpdma_chan *chan, int quota);
int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable);
void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value);
int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable);
net: ethernet: davinci_cpdma: Add boundary for rx and tx descriptors When there is heavy transmission traffic in the CPDMA, then Rx descriptors memory is also utilized as tx desc memory looses all rx descriptors and the driver stops working then. This patch adds boundary for tx and rx descriptors in bd ram dividing the descriptor memory to ensure that during heavy transmission tx doesn't use rx descriptors. This patch is already applied to davinci_emac driver, since CPSW and davici_dmac shares the same CPDMA, moving the boundry seperation from Davinci EMAC driver to CPDMA driver which was done in the following commit commit 86d8c07ff2448eb4e860e50f34ef6ee78e45c40c Author: Sascha Hauer <s.hauer@pengutronix.de> Date: Tue Jan 3 05:27:47 2012 +0000 net/davinci: do not use all descriptors for tx packets The driver uses a shared pool for both rx and tx descriptors. During open it queues fixed number of 128 descriptors for receive packets. For each received packet it tries to queue another descriptor. If this fails the descriptor is lost for rx. The driver has no limitation on tx descriptors to use, so it can happen during a nmap / ping -f attack that the driver allocates all descriptors for tx and looses all rx descriptors. The driver stops working then. To fix this limit the number of tx descriptors used to half of the descriptors available, the rx path uses the other half. Tested on a custom board using nmap / ping -f to the board from two different hosts. Signed-off-by: Mugunthan V N <mugunthanvnm@ti.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2013-01-17 14:31:34 +08:00
bool cpdma_check_free_tx_desc(struct cpdma_chan *chan);
enum cpdma_control {
CPDMA_CMD_IDLE, /* write-only */
CPDMA_COPY_ERROR_FRAMES, /* read-write */
CPDMA_RX_OFF_LEN_UPDATE, /* read-write */
CPDMA_RX_OWNERSHIP_FLIP, /* read-write */
CPDMA_TX_PRIO_FIXED, /* read-write */
CPDMA_STAT_IDLE, /* read-only */
CPDMA_STAT_TX_ERR_CHAN, /* read-only */
CPDMA_STAT_TX_ERR_CODE, /* read-only */
CPDMA_STAT_RX_ERR_CHAN, /* read-only */
CPDMA_STAT_RX_ERR_CODE, /* read-only */
CPDMA_RX_BUFFER_OFFSET, /* read-write */
};
int cpdma_control_get(struct cpdma_ctlr *ctlr, int control);
int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value);
#endif