Merge branch 'DPAA-Ethernet-changes'
Madalin Bucur says: ==================== DPAA Ethernet changes v3: add newline at the end of error messages v2: resending with From: field matching signed-off-by Here's a series of changes for the DPAA Ethernet, addressing minor or unapparent issues in the codebase, adding probe ordering based on a recently added DPAA QMan API, removing some redundant code. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
503a64635d
|
@ -901,7 +901,7 @@ static void dpaa_fq_setup(struct dpaa_priv *priv,
|
||||||
|
|
||||||
if (num_portals == 0)
|
if (num_portals == 0)
|
||||||
dev_err(priv->net_dev->dev.parent,
|
dev_err(priv->net_dev->dev.parent,
|
||||||
"No Qman software (affine) channels found");
|
"No Qman software (affine) channels found\n");
|
||||||
|
|
||||||
/* Initialize each FQ in the list */
|
/* Initialize each FQ in the list */
|
||||||
list_for_each_entry(fq, &priv->dpaa_fq_list, list) {
|
list_for_each_entry(fq, &priv->dpaa_fq_list, list) {
|
||||||
|
@ -1335,15 +1335,15 @@ static void dpaa_fd_release(const struct net_device *net_dev,
|
||||||
vaddr = phys_to_virt(qm_fd_addr(fd));
|
vaddr = phys_to_virt(qm_fd_addr(fd));
|
||||||
sgt = vaddr + qm_fd_get_offset(fd);
|
sgt = vaddr + qm_fd_get_offset(fd);
|
||||||
|
|
||||||
dma_unmap_single(dpaa_bp->dev, qm_fd_addr(fd), dpaa_bp->size,
|
dma_unmap_single(dpaa_bp->priv->rx_dma_dev, qm_fd_addr(fd),
|
||||||
DMA_FROM_DEVICE);
|
dpaa_bp->size, DMA_FROM_DEVICE);
|
||||||
|
|
||||||
dpaa_release_sgt_members(sgt);
|
dpaa_release_sgt_members(sgt);
|
||||||
|
|
||||||
addr = dma_map_single(dpaa_bp->dev, vaddr, dpaa_bp->size,
|
addr = dma_map_single(dpaa_bp->priv->rx_dma_dev, vaddr,
|
||||||
DMA_FROM_DEVICE);
|
dpaa_bp->size, DMA_FROM_DEVICE);
|
||||||
if (dma_mapping_error(dpaa_bp->dev, addr)) {
|
if (dma_mapping_error(dpaa_bp->priv->rx_dma_dev, addr)) {
|
||||||
dev_err(dpaa_bp->dev, "DMA mapping failed");
|
netdev_err(net_dev, "DMA mapping failed\n");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
bm_buffer_set64(&bmb, addr);
|
bm_buffer_set64(&bmb, addr);
|
||||||
|
@ -1488,7 +1488,7 @@ return_error:
|
||||||
|
|
||||||
static int dpaa_bp_add_8_bufs(const struct dpaa_bp *dpaa_bp)
|
static int dpaa_bp_add_8_bufs(const struct dpaa_bp *dpaa_bp)
|
||||||
{
|
{
|
||||||
struct device *dev = dpaa_bp->dev;
|
struct net_device *net_dev = dpaa_bp->priv->net_dev;
|
||||||
struct bm_buffer bmb[8];
|
struct bm_buffer bmb[8];
|
||||||
dma_addr_t addr;
|
dma_addr_t addr;
|
||||||
void *new_buf;
|
void *new_buf;
|
||||||
|
@ -1497,16 +1497,18 @@ static int dpaa_bp_add_8_bufs(const struct dpaa_bp *dpaa_bp)
|
||||||
for (i = 0; i < 8; i++) {
|
for (i = 0; i < 8; i++) {
|
||||||
new_buf = netdev_alloc_frag(dpaa_bp->raw_size);
|
new_buf = netdev_alloc_frag(dpaa_bp->raw_size);
|
||||||
if (unlikely(!new_buf)) {
|
if (unlikely(!new_buf)) {
|
||||||
dev_err(dev, "netdev_alloc_frag() failed, size %zu\n",
|
netdev_err(net_dev,
|
||||||
dpaa_bp->raw_size);
|
"netdev_alloc_frag() failed, size %zu\n",
|
||||||
|
dpaa_bp->raw_size);
|
||||||
goto release_previous_buffs;
|
goto release_previous_buffs;
|
||||||
}
|
}
|
||||||
new_buf = PTR_ALIGN(new_buf, SMP_CACHE_BYTES);
|
new_buf = PTR_ALIGN(new_buf, SMP_CACHE_BYTES);
|
||||||
|
|
||||||
addr = dma_map_single(dev, new_buf,
|
addr = dma_map_single(dpaa_bp->priv->rx_dma_dev, new_buf,
|
||||||
dpaa_bp->size, DMA_FROM_DEVICE);
|
dpaa_bp->size, DMA_FROM_DEVICE);
|
||||||
if (unlikely(dma_mapping_error(dev, addr))) {
|
if (unlikely(dma_mapping_error(dpaa_bp->priv->rx_dma_dev,
|
||||||
dev_err(dpaa_bp->dev, "DMA map failed");
|
addr))) {
|
||||||
|
netdev_err(net_dev, "DMA map failed\n");
|
||||||
goto release_previous_buffs;
|
goto release_previous_buffs;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1634,7 +1636,7 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
|
||||||
|
|
||||||
if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) {
|
if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) {
|
||||||
nr_frags = skb_shinfo(skb)->nr_frags;
|
nr_frags = skb_shinfo(skb)->nr_frags;
|
||||||
dma_unmap_single(dev, addr,
|
dma_unmap_single(priv->tx_dma_dev, addr,
|
||||||
qm_fd_get_offset(fd) + DPAA_SGT_SIZE,
|
qm_fd_get_offset(fd) + DPAA_SGT_SIZE,
|
||||||
dma_dir);
|
dma_dir);
|
||||||
|
|
||||||
|
@ -1644,21 +1646,21 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
|
||||||
sgt = phys_to_virt(addr + qm_fd_get_offset(fd));
|
sgt = phys_to_virt(addr + qm_fd_get_offset(fd));
|
||||||
|
|
||||||
/* sgt[0] is from lowmem, was dma_map_single()-ed */
|
/* sgt[0] is from lowmem, was dma_map_single()-ed */
|
||||||
dma_unmap_single(dev, qm_sg_addr(&sgt[0]),
|
dma_unmap_single(priv->tx_dma_dev, qm_sg_addr(&sgt[0]),
|
||||||
qm_sg_entry_get_len(&sgt[0]), dma_dir);
|
qm_sg_entry_get_len(&sgt[0]), dma_dir);
|
||||||
|
|
||||||
/* remaining pages were mapped with skb_frag_dma_map() */
|
/* remaining pages were mapped with skb_frag_dma_map() */
|
||||||
for (i = 1; i <= nr_frags; i++) {
|
for (i = 1; i <= nr_frags; i++) {
|
||||||
WARN_ON(qm_sg_entry_is_ext(&sgt[i]));
|
WARN_ON(qm_sg_entry_is_ext(&sgt[i]));
|
||||||
|
|
||||||
dma_unmap_page(dev, qm_sg_addr(&sgt[i]),
|
dma_unmap_page(priv->tx_dma_dev, qm_sg_addr(&sgt[i]),
|
||||||
qm_sg_entry_get_len(&sgt[i]), dma_dir);
|
qm_sg_entry_get_len(&sgt[i]), dma_dir);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Free the page frag that we allocated on Tx */
|
/* Free the page frag that we allocated on Tx */
|
||||||
skb_free_frag(phys_to_virt(addr));
|
skb_free_frag(phys_to_virt(addr));
|
||||||
} else {
|
} else {
|
||||||
dma_unmap_single(dev, addr,
|
dma_unmap_single(priv->tx_dma_dev, addr,
|
||||||
skb_tail_pointer(skb) - (u8 *)skbh, dma_dir);
|
skb_tail_pointer(skb) - (u8 *)skbh, dma_dir);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1762,8 +1764,8 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
|
||||||
goto free_buffers;
|
goto free_buffers;
|
||||||
|
|
||||||
count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
|
count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
|
||||||
dma_unmap_single(dpaa_bp->dev, sg_addr, dpaa_bp->size,
|
dma_unmap_single(dpaa_bp->priv->rx_dma_dev, sg_addr,
|
||||||
DMA_FROM_DEVICE);
|
dpaa_bp->size, DMA_FROM_DEVICE);
|
||||||
if (!skb) {
|
if (!skb) {
|
||||||
sz = dpaa_bp->size +
|
sz = dpaa_bp->size +
|
||||||
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
|
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
|
||||||
|
@ -1853,7 +1855,6 @@ static int skb_to_contig_fd(struct dpaa_priv *priv,
|
||||||
int *offset)
|
int *offset)
|
||||||
{
|
{
|
||||||
struct net_device *net_dev = priv->net_dev;
|
struct net_device *net_dev = priv->net_dev;
|
||||||
struct device *dev = net_dev->dev.parent;
|
|
||||||
enum dma_data_direction dma_dir;
|
enum dma_data_direction dma_dir;
|
||||||
unsigned char *buffer_start;
|
unsigned char *buffer_start;
|
||||||
struct sk_buff **skbh;
|
struct sk_buff **skbh;
|
||||||
|
@ -1889,9 +1890,9 @@ static int skb_to_contig_fd(struct dpaa_priv *priv,
|
||||||
fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO);
|
fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO);
|
||||||
|
|
||||||
/* Map the entire buffer size that may be seen by FMan, but no more */
|
/* Map the entire buffer size that may be seen by FMan, but no more */
|
||||||
addr = dma_map_single(dev, skbh,
|
addr = dma_map_single(priv->tx_dma_dev, skbh,
|
||||||
skb_tail_pointer(skb) - buffer_start, dma_dir);
|
skb_tail_pointer(skb) - buffer_start, dma_dir);
|
||||||
if (unlikely(dma_mapping_error(dev, addr))) {
|
if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
|
||||||
if (net_ratelimit())
|
if (net_ratelimit())
|
||||||
netif_err(priv, tx_err, net_dev, "dma_map_single() failed\n");
|
netif_err(priv, tx_err, net_dev, "dma_map_single() failed\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -1907,7 +1908,6 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
|
||||||
const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
|
const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
|
||||||
const int nr_frags = skb_shinfo(skb)->nr_frags;
|
const int nr_frags = skb_shinfo(skb)->nr_frags;
|
||||||
struct net_device *net_dev = priv->net_dev;
|
struct net_device *net_dev = priv->net_dev;
|
||||||
struct device *dev = net_dev->dev.parent;
|
|
||||||
struct qm_sg_entry *sgt;
|
struct qm_sg_entry *sgt;
|
||||||
struct sk_buff **skbh;
|
struct sk_buff **skbh;
|
||||||
int i, j, err, sz;
|
int i, j, err, sz;
|
||||||
|
@ -1946,10 +1946,10 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
|
||||||
qm_sg_entry_set_len(&sgt[0], frag_len);
|
qm_sg_entry_set_len(&sgt[0], frag_len);
|
||||||
sgt[0].bpid = FSL_DPAA_BPID_INV;
|
sgt[0].bpid = FSL_DPAA_BPID_INV;
|
||||||
sgt[0].offset = 0;
|
sgt[0].offset = 0;
|
||||||
addr = dma_map_single(dev, skb->data,
|
addr = dma_map_single(priv->tx_dma_dev, skb->data,
|
||||||
skb_headlen(skb), dma_dir);
|
skb_headlen(skb), dma_dir);
|
||||||
if (unlikely(dma_mapping_error(dev, addr))) {
|
if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
|
||||||
dev_err(dev, "DMA mapping failed");
|
netdev_err(priv->net_dev, "DMA mapping failed\n");
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
goto sg0_map_failed;
|
goto sg0_map_failed;
|
||||||
}
|
}
|
||||||
|
@ -1960,10 +1960,10 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
|
||||||
frag = &skb_shinfo(skb)->frags[i];
|
frag = &skb_shinfo(skb)->frags[i];
|
||||||
frag_len = skb_frag_size(frag);
|
frag_len = skb_frag_size(frag);
|
||||||
WARN_ON(!skb_frag_page(frag));
|
WARN_ON(!skb_frag_page(frag));
|
||||||
addr = skb_frag_dma_map(dev, frag, 0,
|
addr = skb_frag_dma_map(priv->tx_dma_dev, frag, 0,
|
||||||
frag_len, dma_dir);
|
frag_len, dma_dir);
|
||||||
if (unlikely(dma_mapping_error(dev, addr))) {
|
if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
|
||||||
dev_err(dev, "DMA mapping failed");
|
netdev_err(priv->net_dev, "DMA mapping failed\n");
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
goto sg_map_failed;
|
goto sg_map_failed;
|
||||||
}
|
}
|
||||||
|
@ -1986,10 +1986,10 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
|
||||||
skbh = (struct sk_buff **)buffer_start;
|
skbh = (struct sk_buff **)buffer_start;
|
||||||
*skbh = skb;
|
*skbh = skb;
|
||||||
|
|
||||||
addr = dma_map_single(dev, buffer_start,
|
addr = dma_map_single(priv->tx_dma_dev, buffer_start,
|
||||||
priv->tx_headroom + DPAA_SGT_SIZE, dma_dir);
|
priv->tx_headroom + DPAA_SGT_SIZE, dma_dir);
|
||||||
if (unlikely(dma_mapping_error(dev, addr))) {
|
if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
|
||||||
dev_err(dev, "DMA mapping failed");
|
netdev_err(priv->net_dev, "DMA mapping failed\n");
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
goto sgt_map_failed;
|
goto sgt_map_failed;
|
||||||
}
|
}
|
||||||
|
@ -2003,7 +2003,7 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
|
||||||
sgt_map_failed:
|
sgt_map_failed:
|
||||||
sg_map_failed:
|
sg_map_failed:
|
||||||
for (j = 0; j < i; j++)
|
for (j = 0; j < i; j++)
|
||||||
dma_unmap_page(dev, qm_sg_addr(&sgt[j]),
|
dma_unmap_page(priv->tx_dma_dev, qm_sg_addr(&sgt[j]),
|
||||||
qm_sg_entry_get_len(&sgt[j]), dma_dir);
|
qm_sg_entry_get_len(&sgt[j]), dma_dir);
|
||||||
sg0_map_failed:
|
sg0_map_failed:
|
||||||
csum_failed:
|
csum_failed:
|
||||||
|
@ -2304,11 +2304,8 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
|
||||||
return qman_cb_dqrr_consume;
|
return qman_cb_dqrr_consume;
|
||||||
}
|
}
|
||||||
|
|
||||||
dpaa_bp = dpaa_bpid2pool(fd->bpid);
|
dma_unmap_single(dpaa_bp->priv->rx_dma_dev, addr, dpaa_bp->size,
|
||||||
if (!dpaa_bp)
|
DMA_FROM_DEVICE);
|
||||||
return qman_cb_dqrr_consume;
|
|
||||||
|
|
||||||
dma_unmap_single(dpaa_bp->dev, addr, dpaa_bp->size, DMA_FROM_DEVICE);
|
|
||||||
|
|
||||||
/* prefetch the first 64 bytes of the frame or the SGT start */
|
/* prefetch the first 64 bytes of the frame or the SGT start */
|
||||||
vaddr = phys_to_virt(addr);
|
vaddr = phys_to_virt(addr);
|
||||||
|
@ -2663,7 +2660,7 @@ static inline void dpaa_bp_free_pf(const struct dpaa_bp *bp,
|
||||||
{
|
{
|
||||||
dma_addr_t addr = bm_buf_addr(bmb);
|
dma_addr_t addr = bm_buf_addr(bmb);
|
||||||
|
|
||||||
dma_unmap_single(bp->dev, addr, bp->size, DMA_FROM_DEVICE);
|
dma_unmap_single(bp->priv->rx_dma_dev, addr, bp->size, DMA_FROM_DEVICE);
|
||||||
|
|
||||||
skb_free_frag(phys_to_virt(addr));
|
skb_free_frag(phys_to_virt(addr));
|
||||||
}
|
}
|
||||||
|
@ -2773,12 +2770,37 @@ static int dpaa_eth_probe(struct platform_device *pdev)
|
||||||
int err = 0, i, channel;
|
int err = 0, i, channel;
|
||||||
struct device *dev;
|
struct device *dev;
|
||||||
|
|
||||||
/* device used for DMA mapping */
|
dev = &pdev->dev;
|
||||||
dev = pdev->dev.parent;
|
|
||||||
err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(40));
|
err = bman_is_probed();
|
||||||
if (err) {
|
if (!err)
|
||||||
dev_err(dev, "dma_coerce_mask_and_coherent() failed\n");
|
return -EPROBE_DEFER;
|
||||||
return err;
|
if (err < 0) {
|
||||||
|
dev_err(dev, "failing probe due to bman probe error\n");
|
||||||
|
return -ENODEV;
|
||||||
|
}
|
||||||
|
err = qman_is_probed();
|
||||||
|
if (!err)
|
||||||
|
return -EPROBE_DEFER;
|
||||||
|
if (err < 0) {
|
||||||
|
dev_err(dev, "failing probe due to qman probe error\n");
|
||||||
|
return -ENODEV;
|
||||||
|
}
|
||||||
|
err = bman_portals_probed();
|
||||||
|
if (!err)
|
||||||
|
return -EPROBE_DEFER;
|
||||||
|
if (err < 0) {
|
||||||
|
dev_err(dev,
|
||||||
|
"failing probe due to bman portals probe error\n");
|
||||||
|
return -ENODEV;
|
||||||
|
}
|
||||||
|
err = qman_portals_probed();
|
||||||
|
if (!err)
|
||||||
|
return -EPROBE_DEFER;
|
||||||
|
if (err < 0) {
|
||||||
|
dev_err(dev,
|
||||||
|
"failing probe due to qman portals probe error\n");
|
||||||
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Allocate this early, so we can store relevant information in
|
/* Allocate this early, so we can store relevant information in
|
||||||
|
@ -2801,11 +2823,23 @@ static int dpaa_eth_probe(struct platform_device *pdev)
|
||||||
|
|
||||||
mac_dev = dpaa_mac_dev_get(pdev);
|
mac_dev = dpaa_mac_dev_get(pdev);
|
||||||
if (IS_ERR(mac_dev)) {
|
if (IS_ERR(mac_dev)) {
|
||||||
dev_err(dev, "dpaa_mac_dev_get() failed\n");
|
netdev_err(net_dev, "dpaa_mac_dev_get() failed\n");
|
||||||
err = PTR_ERR(mac_dev);
|
err = PTR_ERR(mac_dev);
|
||||||
goto free_netdev;
|
goto free_netdev;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Devices used for DMA mapping */
|
||||||
|
priv->rx_dma_dev = fman_port_get_device(mac_dev->port[RX]);
|
||||||
|
priv->tx_dma_dev = fman_port_get_device(mac_dev->port[TX]);
|
||||||
|
err = dma_coerce_mask_and_coherent(priv->rx_dma_dev, DMA_BIT_MASK(40));
|
||||||
|
if (!err)
|
||||||
|
err = dma_coerce_mask_and_coherent(priv->tx_dma_dev,
|
||||||
|
DMA_BIT_MASK(40));
|
||||||
|
if (err) {
|
||||||
|
netdev_err(net_dev, "dma_coerce_mask_and_coherent() failed\n");
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
/* If fsl_fm_max_frm is set to a higher value than the all-common 1500,
|
/* If fsl_fm_max_frm is set to a higher value than the all-common 1500,
|
||||||
* we choose conservatively and let the user explicitly set a higher
|
* we choose conservatively and let the user explicitly set a higher
|
||||||
* MTU via ifconfig. Otherwise, the user may end up with different MTUs
|
* MTU via ifconfig. Otherwise, the user may end up with different MTUs
|
||||||
|
@ -2832,7 +2866,7 @@ static int dpaa_eth_probe(struct platform_device *pdev)
|
||||||
dpaa_bps[i]->raw_size = bpool_buffer_raw_size(i, DPAA_BPS_NUM);
|
dpaa_bps[i]->raw_size = bpool_buffer_raw_size(i, DPAA_BPS_NUM);
|
||||||
/* avoid runtime computations by keeping the usable size here */
|
/* avoid runtime computations by keeping the usable size here */
|
||||||
dpaa_bps[i]->size = dpaa_bp_size(dpaa_bps[i]->raw_size);
|
dpaa_bps[i]->size = dpaa_bp_size(dpaa_bps[i]->raw_size);
|
||||||
dpaa_bps[i]->dev = dev;
|
dpaa_bps[i]->priv = priv;
|
||||||
|
|
||||||
err = dpaa_bp_alloc_pool(dpaa_bps[i]);
|
err = dpaa_bp_alloc_pool(dpaa_bps[i]);
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
|
@ -2955,7 +2989,7 @@ static int dpaa_remove(struct platform_device *pdev)
|
||||||
struct device *dev;
|
struct device *dev;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
dev = pdev->dev.parent;
|
dev = &pdev->dev;
|
||||||
net_dev = dev_get_drvdata(dev);
|
net_dev = dev_get_drvdata(dev);
|
||||||
|
|
||||||
priv = netdev_priv(net_dev);
|
priv = netdev_priv(net_dev);
|
||||||
|
|
|
@ -80,9 +80,11 @@ struct dpaa_fq_cbs {
|
||||||
struct qman_fq egress_ern;
|
struct qman_fq egress_ern;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct dpaa_priv;
|
||||||
|
|
||||||
struct dpaa_bp {
|
struct dpaa_bp {
|
||||||
/* device used in the DMA mapping operations */
|
/* used in the DMA mapping operations */
|
||||||
struct device *dev;
|
struct dpaa_priv *priv;
|
||||||
/* current number of buffers in the buffer pool alloted to each CPU */
|
/* current number of buffers in the buffer pool alloted to each CPU */
|
||||||
int __percpu *percpu_count;
|
int __percpu *percpu_count;
|
||||||
/* all buffers allocated for this pool have this raw size */
|
/* all buffers allocated for this pool have this raw size */
|
||||||
|
@ -153,6 +155,8 @@ struct dpaa_priv {
|
||||||
u16 tx_headroom;
|
u16 tx_headroom;
|
||||||
struct net_device *net_dev;
|
struct net_device *net_dev;
|
||||||
struct mac_device *mac_dev;
|
struct mac_device *mac_dev;
|
||||||
|
struct device *rx_dma_dev;
|
||||||
|
struct device *tx_dma_dev;
|
||||||
struct qman_fq *egress_fqs[DPAA_ETH_TXQ_NUM];
|
struct qman_fq *egress_fqs[DPAA_ETH_TXQ_NUM];
|
||||||
struct qman_fq *conf_fqs[DPAA_ETH_TXQ_NUM];
|
struct qman_fq *conf_fqs[DPAA_ETH_TXQ_NUM];
|
||||||
|
|
||||||
|
|
|
@ -634,6 +634,9 @@ static void set_port_liodn(struct fman *fman, u8 port_id,
|
||||||
{
|
{
|
||||||
u32 tmp;
|
u32 tmp;
|
||||||
|
|
||||||
|
iowrite32be(liodn_ofst, &fman->bmi_regs->fmbm_spliodn[port_id - 1]);
|
||||||
|
if (!IS_ENABLED(CONFIG_FSL_PAMU))
|
||||||
|
return;
|
||||||
/* set LIODN base for this port */
|
/* set LIODN base for this port */
|
||||||
tmp = ioread32be(&fman->dma_regs->fmdmplr[port_id / 2]);
|
tmp = ioread32be(&fman->dma_regs->fmdmplr[port_id / 2]);
|
||||||
if (port_id % 2) {
|
if (port_id % 2) {
|
||||||
|
@ -644,7 +647,6 @@ static void set_port_liodn(struct fman *fman, u8 port_id,
|
||||||
tmp |= liodn_base << DMA_LIODN_SHIFT;
|
tmp |= liodn_base << DMA_LIODN_SHIFT;
|
||||||
}
|
}
|
||||||
iowrite32be(tmp, &fman->dma_regs->fmdmplr[port_id / 2]);
|
iowrite32be(tmp, &fman->dma_regs->fmdmplr[port_id / 2]);
|
||||||
iowrite32be(liodn_ofst, &fman->bmi_regs->fmbm_spliodn[port_id - 1]);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void enable_rams_ecc(struct fman_fpm_regs __iomem *fpm_rg)
|
static void enable_rams_ecc(struct fman_fpm_regs __iomem *fpm_rg)
|
||||||
|
@ -1942,6 +1944,8 @@ static int fman_init(struct fman *fman)
|
||||||
|
|
||||||
fman->liodn_offset[i] =
|
fman->liodn_offset[i] =
|
||||||
ioread32be(&fman->bmi_regs->fmbm_spliodn[i - 1]);
|
ioread32be(&fman->bmi_regs->fmbm_spliodn[i - 1]);
|
||||||
|
if (!IS_ENABLED(CONFIG_FSL_PAMU))
|
||||||
|
continue;
|
||||||
liodn_base = ioread32be(&fman->dma_regs->fmdmplr[i / 2]);
|
liodn_base = ioread32be(&fman->dma_regs->fmdmplr[i / 2]);
|
||||||
if (i % 2) {
|
if (i % 2) {
|
||||||
/* FMDM_PLR LSB holds LIODN base for odd ports */
|
/* FMDM_PLR LSB holds LIODN base for odd ports */
|
||||||
|
|
|
@ -435,7 +435,6 @@ struct fman_port_cfg {
|
||||||
|
|
||||||
struct fman_port_rx_pools_params {
|
struct fman_port_rx_pools_params {
|
||||||
u8 num_of_pools;
|
u8 num_of_pools;
|
||||||
u16 second_largest_buf_size;
|
|
||||||
u16 largest_buf_size;
|
u16 largest_buf_size;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -946,8 +945,6 @@ static int set_ext_buffer_pools(struct fman_port *port)
|
||||||
port->rx_pools_params.num_of_pools = ext_buf_pools->num_of_pools_used;
|
port->rx_pools_params.num_of_pools = ext_buf_pools->num_of_pools_used;
|
||||||
port->rx_pools_params.largest_buf_size =
|
port->rx_pools_params.largest_buf_size =
|
||||||
sizes_array[ordered_array[ext_buf_pools->num_of_pools_used - 1]];
|
sizes_array[ordered_array[ext_buf_pools->num_of_pools_used - 1]];
|
||||||
port->rx_pools_params.second_largest_buf_size =
|
|
||||||
sizes_array[ordered_array[ext_buf_pools->num_of_pools_used - 2]];
|
|
||||||
|
|
||||||
/* FMBM_RMPD reg. - pool depletion */
|
/* FMBM_RMPD reg. - pool depletion */
|
||||||
if (buf_pool_depletion->pools_grp_mode_enable) {
|
if (buf_pool_depletion->pools_grp_mode_enable) {
|
||||||
|
@ -1728,6 +1725,20 @@ u32 fman_port_get_qman_channel_id(struct fman_port *port)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(fman_port_get_qman_channel_id);
|
EXPORT_SYMBOL(fman_port_get_qman_channel_id);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* fman_port_get_device
|
||||||
|
* port: Pointer to the FMan port device
|
||||||
|
*
|
||||||
|
* Get the 'struct device' associated to the specified FMan port device
|
||||||
|
*
|
||||||
|
* Return: pointer to associated 'struct device'
|
||||||
|
*/
|
||||||
|
struct device *fman_port_get_device(struct fman_port *port)
|
||||||
|
{
|
||||||
|
return port->dev;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(fman_port_get_device);
|
||||||
|
|
||||||
int fman_port_get_hash_result_offset(struct fman_port *port, u32 *offset)
|
int fman_port_get_hash_result_offset(struct fman_port *port, u32 *offset)
|
||||||
{
|
{
|
||||||
if (port->buffer_offsets.hash_result_offset == ILLEGAL_BASE)
|
if (port->buffer_offsets.hash_result_offset == ILLEGAL_BASE)
|
||||||
|
|
|
@ -157,4 +157,6 @@ int fman_port_get_tstamp(struct fman_port *port, const void *data, u64 *tstamp);
|
||||||
|
|
||||||
struct fman_port *fman_port_bind(struct device *dev);
|
struct fman_port *fman_port_bind(struct device *dev);
|
||||||
|
|
||||||
|
struct device *fman_port_get_device(struct fman_port *port);
|
||||||
|
|
||||||
#endif /* __FMAN_PORT_H */
|
#endif /* __FMAN_PORT_H */
|
||||||
|
|
Loading…
Reference in New Issue