pktgen: better scheduler friendliness
Previous update did not resched in inner loop causing watchdogs. Rewrite inner loop to: * account for delays better with less clock calls * more accurate timing of delay: - only delay if packet was successfully sent - if delay is 100ns and it takes 10ns to build packet then account for that * use wait_event_interruptible_timeout rather than open coding it. Signed-off-by: Stephen Hemminger <shemminger@vyatta.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
6b80d6a6b4
commit
ef87979c27
|
@ -2104,7 +2104,7 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
|
||||||
|
|
||||||
static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
|
static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
|
||||||
{
|
{
|
||||||
ktime_t start;
|
ktime_t start_time, end_time;
|
||||||
s32 remaining;
|
s32 remaining;
|
||||||
struct hrtimer_sleeper t;
|
struct hrtimer_sleeper t;
|
||||||
|
|
||||||
|
@ -2115,7 +2115,7 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
|
||||||
if (remaining <= 0)
|
if (remaining <= 0)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
start = ktime_now();
|
start_time = ktime_now();
|
||||||
if (remaining < 100)
|
if (remaining < 100)
|
||||||
udelay(remaining); /* really small just spin */
|
udelay(remaining); /* really small just spin */
|
||||||
else {
|
else {
|
||||||
|
@ -2134,7 +2134,10 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
|
||||||
} while (t.task && pkt_dev->running && !signal_pending(current));
|
} while (t.task && pkt_dev->running && !signal_pending(current));
|
||||||
__set_current_state(TASK_RUNNING);
|
__set_current_state(TASK_RUNNING);
|
||||||
}
|
}
|
||||||
pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_now(), start));
|
end_time = ktime_now();
|
||||||
|
|
||||||
|
pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time));
|
||||||
|
pkt_dev->next_tx = ktime_add_ns(end_time, pkt_dev->delay);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev)
|
static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev)
|
||||||
|
@ -3364,18 +3367,28 @@ static void pktgen_rem_thread(struct pktgen_thread *t)
|
||||||
mutex_unlock(&pktgen_thread_lock);
|
mutex_unlock(&pktgen_thread_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void idle(struct pktgen_dev *pkt_dev)
|
static void pktgen_resched(struct pktgen_dev *pkt_dev)
|
||||||
{
|
{
|
||||||
ktime_t idle_start = ktime_now();
|
ktime_t idle_start = ktime_now();
|
||||||
|
|
||||||
if (need_resched())
|
|
||||||
schedule();
|
schedule();
|
||||||
else
|
|
||||||
cpu_relax();
|
|
||||||
|
|
||||||
pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_now(), idle_start));
|
pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_now(), idle_start));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void pktgen_wait_for_skb(struct pktgen_dev *pkt_dev)
|
||||||
|
{
|
||||||
|
ktime_t idle_start = ktime_now();
|
||||||
|
|
||||||
|
while (atomic_read(&(pkt_dev->skb->users)) != 1) {
|
||||||
|
if (signal_pending(current))
|
||||||
|
break;
|
||||||
|
|
||||||
|
if (need_resched())
|
||||||
|
pktgen_resched(pkt_dev);
|
||||||
|
else
|
||||||
|
cpu_relax();
|
||||||
|
}
|
||||||
|
pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_now(), idle_start));
|
||||||
|
}
|
||||||
|
|
||||||
static void pktgen_xmit(struct pktgen_dev *pkt_dev)
|
static void pktgen_xmit(struct pktgen_dev *pkt_dev)
|
||||||
{
|
{
|
||||||
|
@ -3386,36 +3399,21 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
|
||||||
u16 queue_map;
|
u16 queue_map;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (pkt_dev->delay) {
|
/* If device is offline, then don't send */
|
||||||
spin(pkt_dev, pkt_dev->next_tx);
|
if (unlikely(!netif_running(odev) || !netif_carrier_ok(odev))) {
|
||||||
|
|
||||||
/* This is max DELAY, this has special meaning of
|
|
||||||
* "never transmit"
|
|
||||||
*/
|
|
||||||
if (pkt_dev->delay == ULLONG_MAX) {
|
|
||||||
pkt_dev->next_tx = ktime_add_ns(ktime_now(), ULONG_MAX);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!pkt_dev->skb) {
|
|
||||||
set_cur_queue_map(pkt_dev);
|
|
||||||
queue_map = pkt_dev->cur_queue_map;
|
|
||||||
} else {
|
|
||||||
queue_map = skb_get_queue_mapping(pkt_dev->skb);
|
|
||||||
}
|
|
||||||
|
|
||||||
txq = netdev_get_tx_queue(odev, queue_map);
|
|
||||||
/* Did we saturate the queue already? */
|
|
||||||
if (netif_tx_queue_stopped(txq) || netif_tx_queue_frozen(txq)) {
|
|
||||||
/* If device is down, then all queues are permnantly frozen */
|
|
||||||
if (netif_running(odev))
|
|
||||||
idle(pkt_dev);
|
|
||||||
else
|
|
||||||
pktgen_stop_device(pkt_dev);
|
pktgen_stop_device(pkt_dev);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* This is max DELAY, this has special meaning of
|
||||||
|
* "never transmit"
|
||||||
|
*/
|
||||||
|
if (unlikely(pkt_dev->delay == ULLONG_MAX)) {
|
||||||
|
pkt_dev->next_tx = ktime_add_ns(ktime_now(), ULONG_MAX);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* If no skb or clone count exhausted then get new one */
|
||||||
if (!pkt_dev->skb || (pkt_dev->last_ok &&
|
if (!pkt_dev->skb || (pkt_dev->last_ok &&
|
||||||
++pkt_dev->clone_count >= pkt_dev->clone_skb)) {
|
++pkt_dev->clone_count >= pkt_dev->clone_skb)) {
|
||||||
/* build a new pkt */
|
/* build a new pkt */
|
||||||
|
@ -3434,18 +3432,20 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
|
||||||
pkt_dev->clone_count = 0; /* reset counter */
|
pkt_dev->clone_count = 0; /* reset counter */
|
||||||
}
|
}
|
||||||
|
|
||||||
/* fill_packet() might have changed the queue */
|
if (pkt_dev->delay && pkt_dev->last_ok)
|
||||||
|
spin(pkt_dev, pkt_dev->next_tx);
|
||||||
|
|
||||||
queue_map = skb_get_queue_mapping(pkt_dev->skb);
|
queue_map = skb_get_queue_mapping(pkt_dev->skb);
|
||||||
txq = netdev_get_tx_queue(odev, queue_map);
|
txq = netdev_get_tx_queue(odev, queue_map);
|
||||||
|
|
||||||
__netif_tx_lock_bh(txq);
|
__netif_tx_lock_bh(txq);
|
||||||
if (unlikely(netif_tx_queue_stopped(txq) || netif_tx_queue_frozen(txq)))
|
|
||||||
pkt_dev->last_ok = 0;
|
|
||||||
else {
|
|
||||||
atomic_inc(&(pkt_dev->skb->users));
|
atomic_inc(&(pkt_dev->skb->users));
|
||||||
|
|
||||||
retry_now:
|
if (unlikely(netif_tx_queue_stopped(txq) || netif_tx_queue_frozen(txq)))
|
||||||
|
ret = NETDEV_TX_BUSY;
|
||||||
|
else
|
||||||
ret = (*xmit)(pkt_dev->skb, odev);
|
ret = (*xmit)(pkt_dev->skb, odev);
|
||||||
|
|
||||||
switch (ret) {
|
switch (ret) {
|
||||||
case NETDEV_TX_OK:
|
case NETDEV_TX_OK:
|
||||||
txq_trans_update(txq);
|
txq_trans_update(txq);
|
||||||
|
@ -3454,34 +3454,23 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
|
||||||
pkt_dev->seq_num++;
|
pkt_dev->seq_num++;
|
||||||
pkt_dev->tx_bytes += pkt_dev->cur_pkt_size;
|
pkt_dev->tx_bytes += pkt_dev->cur_pkt_size;
|
||||||
break;
|
break;
|
||||||
case NETDEV_TX_LOCKED:
|
|
||||||
cpu_relax();
|
|
||||||
goto retry_now;
|
|
||||||
default: /* Drivers are not supposed to return other values! */
|
default: /* Drivers are not supposed to return other values! */
|
||||||
if (net_ratelimit())
|
if (net_ratelimit())
|
||||||
pr_info("pktgen: %s xmit error: %d\n",
|
pr_info("pktgen: %s xmit error: %d\n",
|
||||||
odev->name, ret);
|
odev->name, ret);
|
||||||
pkt_dev->errors++;
|
pkt_dev->errors++;
|
||||||
/* fallthru */
|
/* fallthru */
|
||||||
|
case NETDEV_TX_LOCKED:
|
||||||
case NETDEV_TX_BUSY:
|
case NETDEV_TX_BUSY:
|
||||||
/* Retry it next time */
|
/* Retry it next time */
|
||||||
atomic_dec(&(pkt_dev->skb->users));
|
atomic_dec(&(pkt_dev->skb->users));
|
||||||
pkt_dev->last_ok = 0;
|
pkt_dev->last_ok = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pkt_dev->delay)
|
|
||||||
pkt_dev->next_tx = ktime_add_ns(ktime_now(),
|
|
||||||
pkt_dev->delay);
|
|
||||||
}
|
|
||||||
__netif_tx_unlock_bh(txq);
|
__netif_tx_unlock_bh(txq);
|
||||||
|
|
||||||
/* If pkt_dev->count is zero, then run forever */
|
/* If pkt_dev->count is zero, then run forever */
|
||||||
if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) {
|
if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) {
|
||||||
while (atomic_read(&(pkt_dev->skb->users)) != 1) {
|
pktgen_wait_for_skb(pkt_dev);
|
||||||
if (signal_pending(current))
|
|
||||||
break;
|
|
||||||
idle(pkt_dev);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Done with this */
|
/* Done with this */
|
||||||
pktgen_stop_device(pkt_dev);
|
pktgen_stop_device(pkt_dev);
|
||||||
|
@ -3514,20 +3503,24 @@ static int pktgen_thread_worker(void *arg)
|
||||||
while (!kthread_should_stop()) {
|
while (!kthread_should_stop()) {
|
||||||
pkt_dev = next_to_run(t);
|
pkt_dev = next_to_run(t);
|
||||||
|
|
||||||
if (!pkt_dev &&
|
if (unlikely(!pkt_dev && t->control == 0)) {
|
||||||
(t->control & (T_STOP | T_RUN | T_REMDEVALL | T_REMDEV))
|
wait_event_interruptible_timeout(t->queue,
|
||||||
== 0) {
|
t->control != 0,
|
||||||
prepare_to_wait(&(t->queue), &wait,
|
HZ/10);
|
||||||
TASK_INTERRUPTIBLE);
|
continue;
|
||||||
schedule_timeout(HZ / 10);
|
|
||||||
finish_wait(&(t->queue), &wait);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
__set_current_state(TASK_RUNNING);
|
__set_current_state(TASK_RUNNING);
|
||||||
|
|
||||||
if (pkt_dev)
|
if (likely(pkt_dev)) {
|
||||||
pktgen_xmit(pkt_dev);
|
pktgen_xmit(pkt_dev);
|
||||||
|
|
||||||
|
if (need_resched())
|
||||||
|
pktgen_resched(pkt_dev);
|
||||||
|
else
|
||||||
|
cpu_relax();
|
||||||
|
}
|
||||||
|
|
||||||
if (t->control & T_STOP) {
|
if (t->control & T_STOP) {
|
||||||
pktgen_stop(t);
|
pktgen_stop(t);
|
||||||
t->control &= ~(T_STOP);
|
t->control &= ~(T_STOP);
|
||||||
|
|
Loading…
Reference in New Issue