100 lines
2.9 KiB
C
100 lines
2.9 KiB
C
#ifndef __NET_PKT_SCHED_H
|
|
#define __NET_PKT_SCHED_H
|
|
|
|
#include <linux/jiffies.h>
|
|
#include <linux/ktime.h>
|
|
#include <net/sch_generic.h>
|
|
|
|
struct qdisc_walker
|
|
{
|
|
int stop;
|
|
int skip;
|
|
int count;
|
|
int (*fn)(struct Qdisc *, unsigned long cl, struct qdisc_walker *);
|
|
};
|
|
|
|
extern rwlock_t qdisc_tree_lock;
|
|
|
|
#define QDISC_ALIGNTO 32
|
|
#define QDISC_ALIGN(len) (((len) + QDISC_ALIGNTO-1) & ~(QDISC_ALIGNTO-1))
|
|
|
|
static inline void *qdisc_priv(struct Qdisc *q)
|
|
{
|
|
return (char *) q + QDISC_ALIGN(sizeof(struct Qdisc));
|
|
}
|
|
|
|
/*
|
|
Timer resolution MUST BE < 10% of min_schedulable_packet_size/bandwidth
|
|
|
|
Normal IP packet size ~ 512byte, hence:
|
|
|
|
0.5Kbyte/1Mbyte/sec = 0.5msec, so that we need 50usec timer for
|
|
10Mbit ethernet.
|
|
|
|
10msec resolution -> <50Kbit/sec.
|
|
|
|
The result: [34]86 is not good choice for QoS router :-(
|
|
|
|
The things are not so bad, because we may use artifical
|
|
clock evaluated by integration of network data flow
|
|
in the most critical places.
|
|
*/
|
|
|
|
typedef u64 psched_time_t;
|
|
typedef long psched_tdiff_t;
|
|
|
|
/* Avoid doing 64 bit divide by 1000 */
|
|
#define PSCHED_US2NS(x) ((s64)(x) << 10)
|
|
#define PSCHED_NS2US(x) ((x) >> 10)
|
|
|
|
#define PSCHED_TICKS_PER_SEC PSCHED_NS2US(NSEC_PER_SEC)
|
|
#define PSCHED_GET_TIME(stamp) \
|
|
((stamp) = PSCHED_NS2US(ktime_to_ns(ktime_get())))
|
|
|
|
#define PSCHED_US2JIFFIE(usecs) usecs_to_jiffies(PSCHED_US2NS((usecs)) / NSEC_PER_USEC)
|
|
#define PSCHED_JIFFIE2US(delay) PSCHED_NS2US(jiffies_to_usecs((delay)) * NSEC_PER_USEC)
|
|
|
|
#define PSCHED_TDIFF(tv1, tv2) (long)((tv1) - (tv2))
|
|
#define PSCHED_TDIFF_SAFE(tv1, tv2, bound) \
|
|
min_t(long long, (tv1) - (tv2), bound)
|
|
#define PSCHED_TLESS(tv1, tv2) ((tv1) < (tv2))
|
|
#define PSCHED_TADD2(tv, delta, tv_res) ((tv_res) = (tv) + (delta))
|
|
#define PSCHED_TADD(tv, delta) ((tv) += (delta))
|
|
#define PSCHED_SET_PASTPERFECT(t) ((t) = 0)
|
|
#define PSCHED_IS_PASTPERFECT(t) ((t) == 0)
|
|
#define PSCHED_AUDIT_TDIFF(t)
|
|
|
|
extern struct Qdisc_ops pfifo_qdisc_ops;
|
|
extern struct Qdisc_ops bfifo_qdisc_ops;
|
|
|
|
extern int register_qdisc(struct Qdisc_ops *qops);
|
|
extern int unregister_qdisc(struct Qdisc_ops *qops);
|
|
extern struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle);
|
|
extern struct Qdisc *qdisc_lookup_class(struct net_device *dev, u32 handle);
|
|
extern struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
|
|
struct rtattr *tab);
|
|
extern void qdisc_put_rtab(struct qdisc_rate_table *tab);
|
|
|
|
extern void __qdisc_run(struct net_device *dev);
|
|
|
|
static inline void qdisc_run(struct net_device *dev)
|
|
{
|
|
if (!netif_queue_stopped(dev) &&
|
|
!test_and_set_bit(__LINK_STATE_QDISC_RUNNING, &dev->state))
|
|
__qdisc_run(dev);
|
|
}
|
|
|
|
extern int tc_classify(struct sk_buff *skb, struct tcf_proto *tp,
|
|
struct tcf_result *res);
|
|
|
|
/* Calculate maximal size of packet seen by hard_start_xmit
|
|
routine of this device.
|
|
*/
|
|
static inline unsigned psched_mtu(struct net_device *dev)
|
|
{
|
|
unsigned mtu = dev->mtu;
|
|
return dev->hard_header ? mtu + dev->hard_header_len : mtu;
|
|
}
|
|
|
|
#endif
|