提交 4ec5240e 编写于 作者: L Linus Torvalds
......@@ -2032,6 +2032,15 @@ config TIGON3
To compile this driver as a module, choose M here: the module
will be called tg3. This is recommended.
config BNX2
tristate "Broadcom NetXtremeII support"
depends on PCI
help
This driver supports Broadcom NetXtremeII gigabit Ethernet cards.
To compile this driver as a module, choose M here: the module
will be called bnx2. This is recommended.
config GIANFAR
tristate "Gianfar Ethernet"
depends on 85xx || 83xx
......
......@@ -51,6 +51,7 @@ obj-$(CONFIG_NS83820) += ns83820.o
obj-$(CONFIG_STNIC) += stnic.o 8390.o
obj-$(CONFIG_FEALNX) += fealnx.o
obj-$(CONFIG_TIGON3) += tg3.o
obj-$(CONFIG_BNX2) += bnx2.o
obj-$(CONFIG_TC35815) += tc35815.o
obj-$(CONFIG_SK98LIN) += sk98lin/
obj-$(CONFIG_SKFP) += skfp/
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
......@@ -3037,7 +3037,7 @@ static void bond_activebackup_arp_mon(struct net_device *bond_dev)
bond_set_slave_inactive_flags(bond->current_arp_slave);
/* search for next candidate */
bond_for_each_slave_from(bond, slave, i, bond->current_arp_slave) {
bond_for_each_slave_from(bond, slave, i, bond->current_arp_slave->next) {
if (IS_UP(slave->dev)) {
slave->link = BOND_LINK_BACK;
bond_set_slave_active_flags(slave);
......
......@@ -36,8 +36,8 @@ struct trh_hdr {
__u8 fc; /* frame control field */
__u8 daddr[TR_ALEN]; /* destination address */
__u8 saddr[TR_ALEN]; /* source address */
__u16 rcf; /* route control field */
__u16 rseg[8]; /* routing registers */
__be16 rcf; /* route control field */
__be16 rseg[8]; /* routing registers */
};
#ifdef __KERNEL__
......@@ -55,7 +55,7 @@ struct trllc {
__u8 ssap; /* source SAP */
__u8 llc; /* LLC control field */
__u8 protid[3]; /* protocol id */
__u16 ethertype; /* ether type field */
__be16 ethertype; /* ether type field */
};
/* Token-Ring statistics collection data. */
......
......@@ -65,9 +65,13 @@
#define ADVERTISE_SLCT 0x001f /* Selector bits */
#define ADVERTISE_CSMA 0x0001 /* Only selector supported */
#define ADVERTISE_10HALF 0x0020 /* Try for 10mbps half-duplex */
#define ADVERTISE_1000XFULL 0x0020 /* Try for 1000BASE-X full-duplex */
#define ADVERTISE_10FULL 0x0040 /* Try for 10mbps full-duplex */
#define ADVERTISE_1000XHALF 0x0040 /* Try for 1000BASE-X half-duplex */
#define ADVERTISE_100HALF 0x0080 /* Try for 100mbps half-duplex */
#define ADVERTISE_1000XPAUSE 0x0080 /* Try for 1000BASE-X pause */
#define ADVERTISE_100FULL 0x0100 /* Try for 100mbps full-duplex */
#define ADVERTISE_1000XPSE_ASYM 0x0100 /* Try for 1000BASE-X asym pause */
#define ADVERTISE_100BASE4 0x0200 /* Try for 100mbps 4k packets */
#define ADVERTISE_PAUSE_CAP 0x0400 /* Try for pause */
#define ADVERTISE_PAUSE_ASYM 0x0800 /* Try for asymetric pause */
......@@ -84,9 +88,13 @@
/* Link partner ability register. */
#define LPA_SLCT 0x001f /* Same as advertise selector */
#define LPA_10HALF 0x0020 /* Can do 10mbps half-duplex */
#define LPA_1000XFULL 0x0020 /* Can do 1000BASE-X full-duplex */
#define LPA_10FULL 0x0040 /* Can do 10mbps full-duplex */
#define LPA_1000XHALF 0x0040 /* Can do 1000BASE-X half-duplex */
#define LPA_100HALF 0x0080 /* Can do 100mbps half-duplex */
#define LPA_1000XPAUSE 0x0080 /* Can do 1000BASE-X pause */
#define LPA_100FULL 0x0100 /* Can do 100mbps full-duplex */
#define LPA_1000XPAUSE_ASYM 0x0100 /* Can do 1000BASE-X pause asym*/
#define LPA_100BASE4 0x0200 /* Can do 100mbps 4k packets */
#define LPA_PAUSE_CAP 0x0400 /* Can pause */
#define LPA_PAUSE_ASYM 0x0800 /* Can pause asymetrically */
......
......@@ -2071,6 +2071,7 @@
#define PCI_DEVICE_ID_TIGON3_5703 0x1647
#define PCI_DEVICE_ID_TIGON3_5704 0x1648
#define PCI_DEVICE_ID_TIGON3_5704S_2 0x1649
#define PCI_DEVICE_ID_NX2_5706 0x164a
#define PCI_DEVICE_ID_TIGON3_5702FE 0x164d
#define PCI_DEVICE_ID_TIGON3_5705 0x1653
#define PCI_DEVICE_ID_TIGON3_5705_2 0x1654
......@@ -2090,6 +2091,7 @@
#define PCI_DEVICE_ID_TIGON3_5702X 0x16a6
#define PCI_DEVICE_ID_TIGON3_5703X 0x16a7
#define PCI_DEVICE_ID_TIGON3_5704S 0x16a8
#define PCI_DEVICE_ID_NX2_5706S 0x16aa
#define PCI_DEVICE_ID_TIGON3_5702A3 0x16c6
#define PCI_DEVICE_ID_TIGON3_5703A3 0x16c7
#define PCI_DEVICE_ID_TIGON3_5781 0x16dd
......
......@@ -427,6 +427,7 @@ enum
TCA_NETEM_UNSPEC,
TCA_NETEM_CORR,
TCA_NETEM_DELAY_DIST,
TCA_NETEM_REORDER,
__TCA_NETEM_MAX,
};
......@@ -437,7 +438,7 @@ struct tc_netem_qopt
__u32 latency; /* added delay (us) */
__u32 limit; /* fifo limit (packets) */
__u32 loss; /* random packet loss (0=none ~0=100%) */
__u32 gap; /* re-ordering gap (0 for delay all) */
__u32 gap; /* re-ordering gap (0 for none) */
__u32 duplicate; /* random packet dup (0=none ~0=100%) */
__u32 jitter; /* random jitter in latency (us) */
};
......@@ -449,6 +450,12 @@ struct tc_netem_corr
__u32 dup_corr; /* duplicate correlation */
};
struct tc_netem_reorder
{
__u32 probability;
__u32 correlation;
};
#define NETEM_DIST_SCALE 8192
#endif
......@@ -515,6 +515,8 @@ struct xfrm_dst
struct dst_entry *route;
u32 route_mtu_cached;
u32 child_mtu_cached;
u32 route_cookie;
u32 path_cookie;
};
static inline void xfrm_dst_destroy(struct xfrm_dst *xdst)
......
......@@ -47,12 +47,12 @@ static void rif_check_expire(unsigned long dummy);
* Each RIF entry we learn is kept this way
*/
struct rif_cache_s {
struct rif_cache {
unsigned char addr[TR_ALEN];
int iface;
__u16 rcf;
__u16 rseg[8];
struct rif_cache_s *next;
__be16 rcf;
__be16 rseg[8];
struct rif_cache *next;
unsigned long last_used;
unsigned char local_ring;
};
......@@ -64,7 +64,7 @@ struct rif_cache_s {
* up a lot.
*/
static struct rif_cache_s *rif_table[RIF_TABLE_SIZE];
static struct rif_cache *rif_table[RIF_TABLE_SIZE];
static DEFINE_SPINLOCK(rif_lock);
......@@ -249,7 +249,7 @@ void tr_source_route(struct sk_buff *skb,struct trh_hdr *trh,struct net_device *
{
int slack;
unsigned int hash;
struct rif_cache_s *entry;
struct rif_cache *entry;
unsigned char *olddata;
static const unsigned char mcast_func_addr[]
= {0xC0,0x00,0x00,0x04,0x00,0x00};
......@@ -337,7 +337,7 @@ printk("source routing for %02X:%02X:%02X:%02X:%02X:%02X\n",trh->daddr[0],
static void tr_add_rif_info(struct trh_hdr *trh, struct net_device *dev)
{
unsigned int hash, rii_p = 0;
struct rif_cache_s *entry;
struct rif_cache *entry;
spin_lock_bh(&rif_lock);
......@@ -373,7 +373,7 @@ printk("adding rif_entry: addr:%02X:%02X:%02X:%02X:%02X:%02X rcf:%04X\n",
* FIXME: We ought to keep some kind of cache size
* limiting and adjust the timers to suit.
*/
entry=kmalloc(sizeof(struct rif_cache_s),GFP_ATOMIC);
entry=kmalloc(sizeof(struct rif_cache),GFP_ATOMIC);
if(!entry)
{
......@@ -435,7 +435,7 @@ static void rif_check_expire(unsigned long dummy)
spin_lock_bh(&rif_lock);
for(i =0; i < RIF_TABLE_SIZE; i++) {
struct rif_cache_s *entry, **pentry;
struct rif_cache *entry, **pentry;
pentry = rif_table+i;
while((entry=*pentry) != NULL) {
......@@ -467,10 +467,10 @@ static void rif_check_expire(unsigned long dummy)
#ifdef CONFIG_PROC_FS
static struct rif_cache_s *rif_get_idx(loff_t pos)
static struct rif_cache *rif_get_idx(loff_t pos)
{
int i;
struct rif_cache_s *entry;
struct rif_cache *entry;
loff_t off = 0;
for(i = 0; i < RIF_TABLE_SIZE; i++)
......@@ -493,7 +493,7 @@ static void *rif_seq_start(struct seq_file *seq, loff_t *pos)
static void *rif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
int i;
struct rif_cache_s *ent = v;
struct rif_cache *ent = v;
++*pos;
......@@ -522,7 +522,7 @@ static void rif_seq_stop(struct seq_file *seq, void *v)
static int rif_seq_show(struct seq_file *seq, void *v)
{
int j, rcf_len, segment, brdgnmb;
struct rif_cache_s *entry = v;
struct rif_cache *entry = v;
if (v == SEQ_START_TOKEN)
seq_puts(seq,
......
......@@ -113,6 +113,8 @@ __xfrm6_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int
xdst = (struct xfrm_dst *)dst1;
xdst->route = &rt->u.dst;
if (rt->rt6i_node)
xdst->route_cookie = rt->rt6i_node->fn_sernum;
dst1->next = dst_prev;
dst_prev = dst1;
......@@ -137,6 +139,8 @@ __xfrm6_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int
dst_prev->child = &rt->u.dst;
dst->path = &rt->u.dst;
if (rt->rt6i_node)
((struct xfrm_dst *)dst)->path_cookie = rt->rt6i_node->fn_sernum;
*dst_p = dst;
dst = dst_prev;
......
......@@ -53,7 +53,6 @@
struct netem_sched_data {
struct Qdisc *qdisc;
struct sk_buff_head delayed;
struct timer_list timer;
u32 latency;
......@@ -63,11 +62,12 @@ struct netem_sched_data {
u32 gap;
u32 jitter;
u32 duplicate;
u32 reorder;
struct crndstate {
unsigned long last;
unsigned long rho;
} delay_cor, loss_cor, dup_cor;
} delay_cor, loss_cor, dup_cor, reorder_cor;
struct disttable {
u32 size;
......@@ -137,122 +137,68 @@ static long tabledist(unsigned long mu, long sigma,
return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
}
/* Put skb in the private delayed queue. */
static int netem_delay(struct Qdisc *sch, struct sk_buff *skb)
{
struct netem_sched_data *q = qdisc_priv(sch);
psched_tdiff_t td;
psched_time_t now;
PSCHED_GET_TIME(now);
td = tabledist(q->latency, q->jitter, &q->delay_cor, q->delay_dist);
/* Always queue at tail to keep packets in order */
if (likely(q->delayed.qlen < q->limit)) {
struct netem_skb_cb *cb = (struct netem_skb_cb *)skb->cb;
PSCHED_TADD2(now, td, cb->time_to_send);
pr_debug("netem_delay: skb=%p now=%llu tosend=%llu\n", skb,
now, cb->time_to_send);
__skb_queue_tail(&q->delayed, skb);
return NET_XMIT_SUCCESS;
}
pr_debug("netem_delay: queue over limit %d\n", q->limit);
sch->qstats.overlimits++;
kfree_skb(skb);
return NET_XMIT_DROP;
}
/*
* Move a packet that is ready to send from the delay holding
* list to the underlying qdisc.
* Insert one skb into qdisc.
* Note: parent depends on return value to account for queue length.
* NET_XMIT_DROP: queue length didn't change.
* NET_XMIT_SUCCESS: one skb was queued.
*/
static int netem_run(struct Qdisc *sch)
{
struct netem_sched_data *q = qdisc_priv(sch);
struct sk_buff *skb;
psched_time_t now;
PSCHED_GET_TIME(now);
skb = skb_peek(&q->delayed);
if (skb) {
const struct netem_skb_cb *cb
= (const struct netem_skb_cb *)skb->cb;
long delay
= PSCHED_US2JIFFIE(PSCHED_TDIFF(cb->time_to_send, now));
pr_debug("netem_run: skb=%p delay=%ld\n", skb, delay);
/* if more time remaining? */
if (delay > 0) {
mod_timer(&q->timer, jiffies + delay);
return 1;
}
__skb_unlink(skb, &q->delayed);
if (q->qdisc->enqueue(skb, q->qdisc)) {
sch->q.qlen--;
sch->qstats.drops++;
}
}
return 0;
}
static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
struct netem_sched_data *q = qdisc_priv(sch);
struct netem_skb_cb *cb = (struct netem_skb_cb *)skb->cb;
struct sk_buff *skb2;
int ret;
int count = 1;
pr_debug("netem_enqueue skb=%p\n", skb);
/* Random duplication */
if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
++count;
/* Random packet drop 0 => none, ~0 => all */
if (q->loss && q->loss >= get_crandom(&q->loss_cor)) {
pr_debug("netem_enqueue: random loss\n");
if (q->loss && q->loss >= get_crandom(&q->loss_cor))
--count;
if (count == 0) {
sch->qstats.drops++;
kfree_skb(skb);
return 0; /* lie about loss so TCP doesn't know */
return NET_XMIT_DROP;
}
/* Random duplication */
if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor)) {
struct sk_buff *skb2;
skb2 = skb_clone(skb, GFP_ATOMIC);
if (skb2 && netem_delay(sch, skb2) == NET_XMIT_SUCCESS) {
struct Qdisc *qp;
/* Since one packet can generate two packets in the
* queue, the parent's qlen accounting gets confused,
* so fix it.
*/
qp = qdisc_lookup(sch->dev, TC_H_MAJ(sch->parent));
if (qp)
qp->q.qlen++;
sch->q.qlen++;
sch->bstats.bytes += skb2->len;
sch->bstats.packets++;
} else
sch->qstats.drops++;
/*
* If we need to duplicate packet, then re-insert at top of the
* qdisc tree, since parent queuer expects that only one
* skb will be queued.
*/
if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
struct Qdisc *rootq = sch->dev->qdisc;
u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
q->duplicate = 0;
rootq->enqueue(skb2, rootq);
q->duplicate = dupsave;
}
/* If doing simple delay then gap == 0 so all packets
* go into the delayed holding queue
* otherwise if doing out of order only "1 out of gap"
* packets will be delayed.
*/
if (q->counter < q->gap) {
if (q->gap == 0 /* not doing reordering */
|| q->counter < q->gap /* inside last reordering gap */
|| q->reorder < get_crandom(&q->reorder_cor)) {
psched_time_t now;
PSCHED_GET_TIME(now);
PSCHED_TADD2(now, tabledist(q->latency, q->jitter,
&q->delay_cor, q->delay_dist),
cb->time_to_send);
++q->counter;
ret = q->qdisc->enqueue(skb, q->qdisc);
} else {
/*
* Do re-ordering by putting one out of N packets at the front
* of the queue.
*/
PSCHED_GET_TIME(cb->time_to_send);
q->counter = 0;
ret = netem_delay(sch, skb);
netem_run(sch);
ret = q->qdisc->ops->requeue(skb, q->qdisc);
}
if (likely(ret == NET_XMIT_SUCCESS)) {
......@@ -296,22 +242,33 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
{
struct netem_sched_data *q = qdisc_priv(sch);
struct sk_buff *skb;
int pending;
pending = netem_run(sch);
skb = q->qdisc->dequeue(q->qdisc);
if (skb) {
pr_debug("netem_dequeue: return skb=%p\n", skb);
sch->q.qlen--;
sch->flags &= ~TCQ_F_THROTTLED;
}
else if (pending) {
pr_debug("netem_dequeue: throttling\n");
const struct netem_skb_cb *cb
= (const struct netem_skb_cb *)skb->cb;
psched_time_t now;
long delay;
/* if more time remaining? */
PSCHED_GET_TIME(now);
delay = PSCHED_US2JIFFIE(PSCHED_TDIFF(cb->time_to_send, now));
pr_debug("netem_run: skb=%p delay=%ld\n", skb, delay);
if (delay <= 0) {
pr_debug("netem_dequeue: return skb=%p\n", skb);
sch->q.qlen--;
sch->flags &= ~TCQ_F_THROTTLED;
return skb;
}
mod_timer(&q->timer, jiffies + delay);
sch->flags |= TCQ_F_THROTTLED;
}
return skb;
if (q->qdisc->ops->requeue(skb, q->qdisc) != 0)
sch->qstats.drops++;
}
return NULL;
}
static void netem_watchdog(unsigned long arg)
......@@ -328,8 +285,6 @@ static void netem_reset(struct Qdisc *sch)
struct netem_sched_data *q = qdisc_priv(sch);
qdisc_reset(q->qdisc);
skb_queue_purge(&q->delayed);
sch->q.qlen = 0;
sch->flags &= ~TCQ_F_THROTTLED;
del_timer_sync(&q->timer);
......@@ -397,6 +352,19 @@ static int get_correlation(struct Qdisc *sch, const struct rtattr *attr)
return 0;
}
static int get_reorder(struct Qdisc *sch, const struct rtattr *attr)
{
struct netem_sched_data *q = qdisc_priv(sch);
const struct tc_netem_reorder *r = RTA_DATA(attr);
if (RTA_PAYLOAD(attr) != sizeof(*r))
return -EINVAL;
q->reorder = r->probability;
init_crandom(&q->reorder_cor, r->correlation);
return 0;
}
static int netem_change(struct Qdisc *sch, struct rtattr *opt)
{
struct netem_sched_data *q = qdisc_priv(sch);
......@@ -417,9 +385,15 @@ static int netem_change(struct Qdisc *sch, struct rtattr *opt)
q->jitter = qopt->jitter;
q->limit = qopt->limit;
q->gap = qopt->gap;
q->counter = 0;
q->loss = qopt->loss;
q->duplicate = qopt->duplicate;
/* for compatiablity with earlier versions.
* if gap is set, need to assume 100% probablity
*/
q->reorder = ~0;
/* Handle nested options after initial queue options.
* Should have put all options in nested format but too late now.
*/
......@@ -441,6 +415,11 @@ static int netem_change(struct Qdisc *sch, struct rtattr *opt)
if (ret)
return ret;
}
if (tb[TCA_NETEM_REORDER-1]) {
ret = get_reorder(sch, tb[TCA_NETEM_REORDER-1]);
if (ret)
return ret;
}
}
......@@ -455,11 +434,9 @@ static int netem_init(struct Qdisc *sch, struct rtattr *opt)
if (!opt)
return -EINVAL;
skb_queue_head_init(&q->delayed);
init_timer(&q->timer);
q->timer.function = netem_watchdog;
q->timer.data = (unsigned long) sch;
q->counter = 0;
q->qdisc = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops);
if (!q->qdisc) {
......@@ -491,6 +468,7 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
struct rtattr *rta = (struct rtattr *) b;
struct tc_netem_qopt qopt;
struct tc_netem_corr cor;
struct tc_netem_reorder reorder;
qopt.latency = q->latency;
qopt.jitter = q->jitter;
......@@ -504,6 +482,11 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
cor.loss_corr = q->loss_cor.rho;
cor.dup_corr = q->dup_cor.rho;
RTA_PUT(skb, TCA_NETEM_CORR, sizeof(cor), &cor);
reorder.probability = q->reorder;
reorder.correlation = q->reorder_cor.rho;
RTA_PUT(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder);
rta->rta_len = skb->tail - b;
return skb->len;
......
......@@ -1136,7 +1136,7 @@ int xfrm_bundle_ok(struct xfrm_dst *first, struct flowi *fl, int family)
struct xfrm_dst *last;
u32 mtu;
if (!dst_check(dst->path, 0) ||
if (!dst_check(dst->path, ((struct xfrm_dst *)dst)->path_cookie) ||
(dst->dev && !netif_running(dst->dev)))
return 0;
......@@ -1156,7 +1156,7 @@ int xfrm_bundle_ok(struct xfrm_dst *first, struct flowi *fl, int family)
xdst->child_mtu_cached = mtu;
}
if (!dst_check(xdst->route, 0))
if (!dst_check(xdst->route, xdst->route_cookie))
return 0;
mtu = dst_mtu(xdst->route);
if (xdst->route_mtu_cached != mtu) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册