2019-05-27 14:55:21 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2012-05-16 04:50:22 +08:00
|
|
|
/*
|
|
|
|
* Copyright 2007-2012 Siemens AG
|
|
|
|
*
|
|
|
|
* Written by:
|
|
|
|
* Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
|
|
|
|
* Sergey Lapin <slapin@ossfans.org>
|
|
|
|
* Maxim Gorbachyov <maxim.gorbachev@siemens.com>
|
|
|
|
* Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/netdevice.h>
|
|
|
|
#include <linux/if_arp.h>
|
|
|
|
#include <linux/crc-ccitt.h>
|
2014-10-28 00:13:28 +08:00
|
|
|
#include <asm/unaligned.h>
|
2012-05-16 04:50:22 +08:00
|
|
|
|
2014-10-26 16:37:09 +08:00
|
|
|
#include <net/rtnetlink.h>
|
2013-04-03 12:00:56 +08:00
|
|
|
#include <net/ieee802154_netdev.h>
|
2012-05-16 04:50:22 +08:00
|
|
|
#include <net/mac802154.h>
|
2014-10-25 15:41:02 +08:00
|
|
|
#include <net/cfg802154.h>
|
2012-05-16 04:50:22 +08:00
|
|
|
|
2014-10-25 15:41:00 +08:00
|
|
|
#include "ieee802154_i.h"
|
2014-10-29 01:21:21 +08:00
|
|
|
#include "driver-ops.h"
|
2012-05-16 04:50:22 +08:00
|
|
|
|
2022-05-19 23:05:06 +08:00
|
|
|
void ieee802154_xmit_sync_worker(struct work_struct *work)
|
2012-05-16 04:50:22 +08:00
|
|
|
{
|
2015-07-21 22:44:47 +08:00
|
|
|
struct ieee802154_local *local =
|
2022-05-19 23:05:07 +08:00
|
|
|
container_of(work, struct ieee802154_local, sync_tx_work);
|
2015-07-21 22:44:47 +08:00
|
|
|
struct sk_buff *skb = local->tx_skb;
|
2014-10-26 16:37:12 +08:00
|
|
|
struct net_device *dev = skb->dev;
|
2012-05-16 04:50:22 +08:00
|
|
|
int res;
|
|
|
|
|
2014-10-29 01:21:21 +08:00
|
|
|
res = drv_xmit_sync(local, skb);
|
2014-10-26 16:37:09 +08:00
|
|
|
if (res)
|
|
|
|
goto err_tx;
|
|
|
|
|
2014-10-26 16:37:12 +08:00
|
|
|
dev->stats.tx_packets++;
|
|
|
|
dev->stats.tx_bytes += skb->len;
|
|
|
|
|
2020-09-08 18:40:25 +08:00
|
|
|
ieee802154_xmit_complete(&local->hw, skb, false);
|
|
|
|
|
2014-10-26 16:37:09 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
err_tx:
|
|
|
|
/* Restart the netif queue on each sub_if_data object. */
|
2022-05-19 23:05:10 +08:00
|
|
|
ieee802154_release_queue(local);
|
2022-06-13 12:37:35 +08:00
|
|
|
if (atomic_dec_and_test(&local->phy->ongoing_txs))
|
2022-05-19 23:05:13 +08:00
|
|
|
wake_up(&local->phy->sync_txq);
|
2014-10-26 16:37:09 +08:00
|
|
|
kfree_skb(skb);
|
2014-10-26 16:37:12 +08:00
|
|
|
netdev_dbg(dev, "transmission failed\n");
|
2012-05-16 04:50:22 +08:00
|
|
|
}
|
|
|
|
|
2014-10-26 16:37:04 +08:00
|
|
|
static netdev_tx_t
|
2014-10-26 16:37:13 +08:00
|
|
|
ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb)
|
2012-05-16 04:50:22 +08:00
|
|
|
{
|
2014-10-26 16:37:12 +08:00
|
|
|
struct net_device *dev = skb->dev;
|
2014-10-26 16:37:08 +08:00
|
|
|
int ret;
|
2012-05-16 04:50:22 +08:00
|
|
|
|
2014-10-30 04:34:34 +08:00
|
|
|
if (!(local->hw.flags & IEEE802154_HW_TX_OMIT_CKSUM)) {
|
2018-07-03 04:32:03 +08:00
|
|
|
struct sk_buff *nskb;
|
|
|
|
u16 crc;
|
2014-07-02 11:31:09 +08:00
|
|
|
|
2018-07-03 04:32:03 +08:00
|
|
|
if (unlikely(skb_tailroom(skb) < IEEE802154_FCS_LEN)) {
|
|
|
|
nskb = skb_copy_expand(skb, 0, IEEE802154_FCS_LEN,
|
|
|
|
GFP_ATOMIC);
|
|
|
|
if (likely(nskb)) {
|
|
|
|
consume_skb(skb);
|
|
|
|
skb = nskb;
|
|
|
|
} else {
|
2022-05-19 23:05:08 +08:00
|
|
|
goto err_free_skb;
|
2018-07-03 04:32:03 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
crc = crc_ccitt(0, skb->data, skb->len);
|
2014-10-28 00:13:28 +08:00
|
|
|
put_unaligned_le16(crc, skb_put(skb, 2));
|
2012-05-16 04:50:22 +08:00
|
|
|
}
|
|
|
|
|
2013-04-03 12:00:56 +08:00
|
|
|
/* Stop the netif queue on each sub_if_data object. */
|
2022-05-19 23:05:10 +08:00
|
|
|
ieee802154_hold_queue(local);
|
2022-05-19 23:05:09 +08:00
|
|
|
atomic_inc(&local->phy->ongoing_txs);
|
2013-04-03 12:00:56 +08:00
|
|
|
|
2022-05-19 23:05:07 +08:00
|
|
|
/* Drivers should preferably implement the async callback. In some rare
|
|
|
|
* cases they only provide a sync callback which we will use as a
|
|
|
|
* fallback.
|
|
|
|
*/
|
2014-10-26 16:37:08 +08:00
|
|
|
if (local->ops->xmit_async) {
|
2020-09-08 18:40:25 +08:00
|
|
|
unsigned int len = skb->len;
|
|
|
|
|
2014-10-29 01:21:21 +08:00
|
|
|
ret = drv_xmit_async(local, skb);
|
2022-05-19 23:05:08 +08:00
|
|
|
if (ret)
|
|
|
|
goto err_wake_netif_queue;
|
2014-10-26 16:37:12 +08:00
|
|
|
|
|
|
|
dev->stats.tx_packets++;
|
2020-09-08 18:40:25 +08:00
|
|
|
dev->stats.tx_bytes += len;
|
2014-10-26 16:37:08 +08:00
|
|
|
} else {
|
2015-07-21 22:44:47 +08:00
|
|
|
local->tx_skb = skb;
|
2022-05-19 23:05:07 +08:00
|
|
|
queue_work(local->workqueue, &local->sync_tx_work);
|
2014-10-26 16:37:08 +08:00
|
|
|
}
|
2012-05-16 04:50:22 +08:00
|
|
|
|
|
|
|
return NETDEV_TX_OK;
|
2014-08-11 19:25:10 +08:00
|
|
|
|
2022-05-19 23:05:08 +08:00
|
|
|
err_wake_netif_queue:
|
2022-05-19 23:05:10 +08:00
|
|
|
ieee802154_release_queue(local);
|
2022-06-13 12:37:35 +08:00
|
|
|
if (atomic_dec_and_test(&local->phy->ongoing_txs))
|
2022-05-19 23:05:13 +08:00
|
|
|
wake_up(&local->phy->sync_txq);
|
2022-05-19 23:05:08 +08:00
|
|
|
err_free_skb:
|
2014-08-11 19:25:10 +08:00
|
|
|
kfree_skb(skb);
|
|
|
|
return NETDEV_TX_OK;
|
2012-05-16 04:50:22 +08:00
|
|
|
}
|
2014-10-26 16:37:01 +08:00
|
|
|
|
2022-05-19 23:05:13 +08:00
|
|
|
static int ieee802154_sync_queue(struct ieee802154_local *local)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ieee802154_hold_queue(local);
|
|
|
|
ieee802154_disable_queue(local);
|
|
|
|
wait_event(local->phy->sync_txq, !atomic_read(&local->phy->ongoing_txs));
|
|
|
|
ret = local->tx_result;
|
|
|
|
ieee802154_release_queue(local);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int ieee802154_sync_and_hold_queue(struct ieee802154_local *local)
|
|
|
|
{
|
2022-05-19 23:05:15 +08:00
|
|
|
int ret;
|
|
|
|
|
2022-05-19 23:05:13 +08:00
|
|
|
ieee802154_hold_queue(local);
|
2022-05-19 23:05:15 +08:00
|
|
|
ret = ieee802154_sync_queue(local);
|
|
|
|
set_bit(WPAN_PHY_FLAG_STATE_QUEUE_STOPPED, &local->phy->flags);
|
2022-05-19 23:05:13 +08:00
|
|
|
|
2022-05-19 23:05:15 +08:00
|
|
|
return ret;
|
2022-05-19 23:05:13 +08:00
|
|
|
}
|
|
|
|
|
2022-05-19 23:05:14 +08:00
|
|
|
int ieee802154_mlme_op_pre(struct ieee802154_local *local)
|
|
|
|
{
|
|
|
|
return ieee802154_sync_and_hold_queue(local);
|
|
|
|
}
|
|
|
|
|
2023-01-04 00:56:43 +08:00
|
|
|
int ieee802154_mlme_tx_locked(struct ieee802154_local *local,
|
|
|
|
struct ieee802154_sub_if_data *sdata,
|
|
|
|
struct sk_buff *skb)
|
2022-05-19 23:05:14 +08:00
|
|
|
{
|
|
|
|
/* Avoid possible calls to ->ndo_stop() when we asynchronously perform
|
|
|
|
* MLME transmissions.
|
|
|
|
*/
|
2023-01-04 00:56:43 +08:00
|
|
|
ASSERT_RTNL();
|
2022-05-19 23:05:14 +08:00
|
|
|
|
|
|
|
/* Ensure the device was not stopped, otherwise error out */
|
2023-01-04 00:56:43 +08:00
|
|
|
if (!local->open_count)
|
2022-05-19 23:05:14 +08:00
|
|
|
return -ENETDOWN;
|
|
|
|
|
2022-05-19 23:05:16 +08:00
|
|
|
/* Warn if the ieee802154 core thinks MLME frames can be sent while the
|
|
|
|
* net interface expects this cannot happen.
|
|
|
|
*/
|
2023-01-04 00:56:43 +08:00
|
|
|
if (WARN_ON_ONCE(!netif_running(sdata->dev)))
|
2022-05-19 23:05:16 +08:00
|
|
|
return -ENETDOWN;
|
|
|
|
|
2022-05-19 23:05:14 +08:00
|
|
|
ieee802154_tx(local, skb);
|
2023-01-04 00:56:43 +08:00
|
|
|
return ieee802154_sync_queue(local);
|
|
|
|
}
|
|
|
|
|
|
|
|
int ieee802154_mlme_tx(struct ieee802154_local *local,
|
|
|
|
struct ieee802154_sub_if_data *sdata,
|
|
|
|
struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
int ret;
|
2022-05-19 23:05:14 +08:00
|
|
|
|
2023-01-04 00:56:43 +08:00
|
|
|
rtnl_lock();
|
|
|
|
ret = ieee802154_mlme_tx_locked(local, sdata, skb);
|
2022-05-19 23:05:14 +08:00
|
|
|
rtnl_unlock();
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
void ieee802154_mlme_op_post(struct ieee802154_local *local)
|
|
|
|
{
|
|
|
|
ieee802154_release_queue(local);
|
|
|
|
}
|
|
|
|
|
2022-06-18 03:29:14 +08:00
|
|
|
int ieee802154_mlme_tx_one(struct ieee802154_local *local,
|
|
|
|
struct ieee802154_sub_if_data *sdata,
|
|
|
|
struct sk_buff *skb)
|
2022-05-19 23:05:14 +08:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ieee802154_mlme_op_pre(local);
|
2022-06-18 03:29:14 +08:00
|
|
|
ret = ieee802154_mlme_tx(local, sdata, skb);
|
2022-05-19 23:05:14 +08:00
|
|
|
ieee802154_mlme_op_post(local);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2023-01-04 00:56:43 +08:00
|
|
|
int ieee802154_mlme_tx_one_locked(struct ieee802154_local *local,
|
|
|
|
struct ieee802154_sub_if_data *sdata,
|
|
|
|
struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ieee802154_mlme_op_pre(local);
|
|
|
|
ret = ieee802154_mlme_tx_locked(local, sdata, skb);
|
|
|
|
ieee802154_mlme_op_post(local);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2022-05-19 23:05:15 +08:00
|
|
|
static bool ieee802154_queue_is_stopped(struct ieee802154_local *local)
|
|
|
|
{
|
|
|
|
return test_bit(WPAN_PHY_FLAG_STATE_QUEUE_STOPPED, &local->phy->flags);
|
|
|
|
}
|
|
|
|
|
2022-05-19 23:05:11 +08:00
|
|
|
static netdev_tx_t
|
|
|
|
ieee802154_hot_tx(struct ieee802154_local *local, struct sk_buff *skb)
|
|
|
|
{
|
2022-05-19 23:05:15 +08:00
|
|
|
/* Warn if the net interface tries to transmit frames while the
|
|
|
|
* ieee802154 core assumes the queue is stopped.
|
|
|
|
*/
|
|
|
|
WARN_ON_ONCE(ieee802154_queue_is_stopped(local));
|
|
|
|
|
2022-05-19 23:05:11 +08:00
|
|
|
return ieee802154_tx(local, skb);
|
|
|
|
}
|
|
|
|
|
2014-10-26 16:37:13 +08:00
|
|
|
netdev_tx_t
|
|
|
|
ieee802154_monitor_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
2014-10-26 16:37:01 +08:00
|
|
|
{
|
|
|
|
struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
|
|
|
|
|
|
|
|
skb->skb_iif = dev->ifindex;
|
|
|
|
|
2022-05-19 23:05:11 +08:00
|
|
|
return ieee802154_hot_tx(sdata->local, skb);
|
2014-10-26 16:37:01 +08:00
|
|
|
}
|
|
|
|
|
2014-10-26 16:37:13 +08:00
|
|
|
netdev_tx_t
|
|
|
|
ieee802154_subif_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
2014-10-26 16:37:01 +08:00
|
|
|
{
|
|
|
|
struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
|
|
|
|
int rc;
|
|
|
|
|
2015-09-28 15:00:26 +08:00
|
|
|
/* TODO we should move it to wpan_dev_hard_header and dev_hard_header
|
|
|
|
* functions. The reason is wireshark will show a mac header which is
|
|
|
|
* with security fields but the payload is not encrypted.
|
|
|
|
*/
|
2014-10-26 16:37:01 +08:00
|
|
|
rc = mac802154_llsec_encrypt(&sdata->sec, skb);
|
|
|
|
if (rc) {
|
2014-10-26 16:37:10 +08:00
|
|
|
netdev_warn(dev, "encryption failed: %i\n", rc);
|
2014-10-26 16:37:01 +08:00
|
|
|
kfree_skb(skb);
|
|
|
|
return NETDEV_TX_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
skb->skb_iif = dev->ifindex;
|
|
|
|
|
2022-05-19 23:05:11 +08:00
|
|
|
return ieee802154_hot_tx(sdata->local, skb);
|
2014-10-26 16:37:01 +08:00
|
|
|
}
|