提交 69d48179 编写于 作者: D David S. Miller

Merge branch 'netem-fix-compilation-on-32-bit'

Stephen Hemminger says:

====================
netem: fix compilation on 32 bit

A couple of places where 64 bit CPU was being assumed incorrectly.
====================
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
......@@ -312,9 +312,9 @@ static bool loss_event(struct netem_sched_data *q)
* std deviation sigma. Uses table lookup to approximate the desired
* distribution, and a uniformly-distributed pseudo-random source.
*/
static s64 tabledist(s64 mu, s64 sigma,
static s64 tabledist(s64 mu, s32 sigma,
struct crndstate *state,
const struct disttable *dist)
const struct disttable *dist)
{
s64 x;
long t;
......@@ -327,7 +327,7 @@ static s64 tabledist(s64 mu, s64 sigma,
/* default uniform distribution */
if (dist == NULL)
return (rnd % (2*sigma)) - sigma + mu;
return (rnd % (2 * sigma)) - sigma + mu;
t = dist->table[rnd % dist->size];
x = (sigma % NETEM_DIST_SCALE) * t;
......@@ -339,10 +339,8 @@ static s64 tabledist(s64 mu, s64 sigma,
return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
}
static u64 packet_len_2_sched_time(unsigned int len,
struct netem_sched_data *q)
static u64 packet_time_ns(u64 len, const struct netem_sched_data *q)
{
u64 offset;
len += q->packet_overhead;
if (q->cell_size) {
......@@ -352,9 +350,8 @@ static u64 packet_len_2_sched_time(unsigned int len,
cells++;
len = cells * (q->cell_size + q->cell_overhead);
}
offset = (u64)len * NSEC_PER_SEC;
do_div(offset, q->rate);
return offset;
return div64_u64(len * NSEC_PER_SEC, q->rate);
}
static void tfifo_reset(struct Qdisc *sch)
......@@ -556,7 +553,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
now = last->time_to_send;
}
delay += packet_len_2_sched_time(qdisc_pkt_len(skb), q);
delay += packet_time_ns(qdisc_pkt_len(skb), q);
}
cb->time_to_send = now + delay;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册