提交 f8bd2258 编写于 作者: R Roman Zippel 提交者: Linus Torvalds

remove div_long_long_rem

x86 is the only arch right now, which provides an optimized for
div_long_long_rem and it has the downside that one has to be very careful that
the divide doesn't overflow.

The API is a little akward, as the arguments for the unsigned divide are
signed.  The signed version also doesn't handle a negative divisor and
produces worse code on 64bit archs.

There is little incentive to keep this API alive, so this converts the few
users to the new API.
Signed-off-by: NRoman Zippel <zippel@linux-m68k.org>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: john stultz <johnstul@us.ibm.com>
Cc: Christoph Lameter <clameter@sgi.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 6f6d6a1a
...@@ -54,6 +54,7 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; ...@@ -54,6 +54,7 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
#include <linux/module.h> #include <linux/module.h>
#include <linux/elfcore.h> #include <linux/elfcore.h>
#include <linux/compat.h> #include <linux/compat.h>
#include <linux/math64.h>
#define elf_prstatus elf_prstatus32 #define elf_prstatus elf_prstatus32
struct elf_prstatus32 struct elf_prstatus32
...@@ -102,8 +103,8 @@ jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value) ...@@ -102,8 +103,8 @@ jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value)
* one divide. * one divide.
*/ */
u64 nsec = (u64)jiffies * TICK_NSEC; u64 nsec = (u64)jiffies * TICK_NSEC;
long rem; u32 rem;
value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &rem); value->tv_sec = div_u64_rem(nsec, NSEC_PER_SEC, &rem);
value->tv_usec = rem / NSEC_PER_USEC; value->tv_usec = rem / NSEC_PER_USEC;
} }
......
...@@ -56,6 +56,7 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; ...@@ -56,6 +56,7 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
#include <linux/module.h> #include <linux/module.h>
#include <linux/elfcore.h> #include <linux/elfcore.h>
#include <linux/compat.h> #include <linux/compat.h>
#include <linux/math64.h>
#define elf_prstatus elf_prstatus32 #define elf_prstatus elf_prstatus32
struct elf_prstatus32 struct elf_prstatus32
...@@ -104,8 +105,8 @@ jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value) ...@@ -104,8 +105,8 @@ jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value)
* one divide. * one divide.
*/ */
u64 nsec = (u64)jiffies * TICK_NSEC; u64 nsec = (u64)jiffies * TICK_NSEC;
long rem; u32 rem;
value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &rem); value->tv_sec = div_u64_rem(nsec, NSEC_PER_SEC, &rem);
value->tv_usec = rem / NSEC_PER_USEC; value->tv_usec = rem / NSEC_PER_USEC;
} }
......
...@@ -30,6 +30,8 @@ ...@@ -30,6 +30,8 @@
#include <linux/miscdevice.h> #include <linux/miscdevice.h>
#include <linux/posix-timers.h> #include <linux/posix-timers.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/time.h>
#include <linux/math64.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/sn/addrs.h> #include <asm/sn/addrs.h>
...@@ -472,8 +474,8 @@ static int sgi_clock_get(clockid_t clockid, struct timespec *tp) ...@@ -472,8 +474,8 @@ static int sgi_clock_get(clockid_t clockid, struct timespec *tp)
nsec = rtc_time() * sgi_clock_period nsec = rtc_time() * sgi_clock_period
+ sgi_clock_offset.tv_nsec; + sgi_clock_offset.tv_nsec;
tp->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &tp->tv_nsec) *tp = ns_to_timespec(nsec);
+ sgi_clock_offset.tv_sec; tp->tv_sec += sgi_clock_offset.tv_sec;
return 0; return 0;
}; };
...@@ -481,11 +483,11 @@ static int sgi_clock_set(clockid_t clockid, struct timespec *tp) ...@@ -481,11 +483,11 @@ static int sgi_clock_set(clockid_t clockid, struct timespec *tp)
{ {
u64 nsec; u64 nsec;
u64 rem; u32 rem;
nsec = rtc_time() * sgi_clock_period; nsec = rtc_time() * sgi_clock_period;
sgi_clock_offset.tv_sec = tp->tv_sec - div_long_long_rem(nsec, NSEC_PER_SEC, &rem); sgi_clock_offset.tv_sec = tp->tv_sec - div_u64_rem(nsec, NSEC_PER_SEC, &rem);
if (rem <= tp->tv_nsec) if (rem <= tp->tv_nsec)
sgi_clock_offset.tv_nsec = tp->tv_sec - rem; sgi_clock_offset.tv_nsec = tp->tv_sec - rem;
...@@ -644,9 +646,6 @@ static int sgi_timer_del(struct k_itimer *timr) ...@@ -644,9 +646,6 @@ static int sgi_timer_del(struct k_itimer *timr)
return 0; return 0;
} }
#define timespec_to_ns(x) ((x).tv_nsec + (x).tv_sec * NSEC_PER_SEC)
#define ns_to_timespec(ts, nsec) (ts).tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &(ts).tv_nsec)
/* Assumption: it_lock is already held with irq's disabled */ /* Assumption: it_lock is already held with irq's disabled */
static void sgi_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting) static void sgi_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting)
{ {
...@@ -659,9 +658,8 @@ static void sgi_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting) ...@@ -659,9 +658,8 @@ static void sgi_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting)
return; return;
} }
ns_to_timespec(cur_setting->it_interval, timr->it.mmtimer.incr * sgi_clock_period); cur_setting->it_interval = ns_to_timespec(timr->it.mmtimer.incr * sgi_clock_period);
ns_to_timespec(cur_setting->it_value, (timr->it.mmtimer.expires - rtc_time())* sgi_clock_period); cur_setting->it_value = ns_to_timespec((timr->it.mmtimer.expires - rtc_time()) * sgi_clock_period);
return;
} }
...@@ -679,8 +677,8 @@ static int sgi_timer_set(struct k_itimer *timr, int flags, ...@@ -679,8 +677,8 @@ static int sgi_timer_set(struct k_itimer *timr, int flags,
sgi_timer_get(timr, old_setting); sgi_timer_get(timr, old_setting);
sgi_timer_del(timr); sgi_timer_del(timr);
when = timespec_to_ns(new_setting->it_value); when = timespec_to_ns(&new_setting->it_value);
period = timespec_to_ns(new_setting->it_interval); period = timespec_to_ns(&new_setting->it_interval);
if (when == 0) if (when == 0)
/* Clear timer */ /* Clear timer */
...@@ -695,7 +693,7 @@ static int sgi_timer_set(struct k_itimer *timr, int flags, ...@@ -695,7 +693,7 @@ static int sgi_timer_set(struct k_itimer *timr, int flags,
unsigned long now; unsigned long now;
getnstimeofday(&n); getnstimeofday(&n);
now = timespec_to_ns(n); now = timespec_to_ns(&n);
if (when > now) if (when > now)
when -= now; when -= now;
else else
......
...@@ -33,24 +33,6 @@ ...@@ -33,24 +33,6 @@
__mod; \ __mod; \
}) })
/*
* (long)X = ((long long)divs) / (long)div
* (long)rem = ((long long)divs) % (long)div
*
* Warning, this will do an exception if X overflows.
*/
#define div_long_long_rem(a, b, c) div_ll_X_l_rem(a, b, c)
static inline long div_ll_X_l_rem(long long divs, long div, long *rem)
{
long dum2;
asm("divl %2":"=a"(dum2), "=d"(*rem)
: "rm"(div), "A"(divs));
return dum2;
}
static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder) static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
{ {
union { union {
......
#ifndef _LINUX_CALC64_H
#define _LINUX_CALC64_H
#include <linux/types.h>
#include <asm/div64.h>
/*
* This is a generic macro which is used when the architecture
* specific div64.h does not provide a optimized one.
*
* The 64bit dividend is divided by the divisor (data type long), the
* result is returned and the remainder stored in the variable
* referenced by remainder (data type long *). In contrast to the
* do_div macro the dividend is kept intact.
*/
#ifndef div_long_long_rem
#define div_long_long_rem(dividend, divisor, remainder) \
do_div_llr((dividend), divisor, remainder)
static inline unsigned long do_div_llr(const long long dividend,
const long divisor, long *remainder)
{
u64 result = dividend;
*(remainder) = do_div(result, divisor);
return (unsigned long) result;
}
#endif
/*
* Sign aware variation of the above. On some architectures a
* negative dividend leads to an divide overflow exception, which
* is avoided by the sign check.
*/
static inline long div_long_long_rem_signed(const long long dividend,
const long divisor, long *remainder)
{
long res;
if (unlikely(dividend < 0)) {
res = -div_long_long_rem(-dividend, divisor, remainder);
*remainder = -(*remainder);
} else
res = div_long_long_rem(dividend, divisor, remainder);
return res;
}
#endif
#ifndef _LINUX_JIFFIES_H #ifndef _LINUX_JIFFIES_H
#define _LINUX_JIFFIES_H #define _LINUX_JIFFIES_H
#include <linux/calc64.h> #include <linux/math64.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/time.h> #include <linux/time.h>
......
...@@ -4,8 +4,9 @@ ...@@ -4,8 +4,9 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/posix-timers.h> #include <linux/posix-timers.h>
#include <asm/uaccess.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/math64.h>
#include <asm/uaccess.h>
static int check_clock(const clockid_t which_clock) static int check_clock(const clockid_t which_clock)
{ {
...@@ -47,12 +48,10 @@ static void sample_to_timespec(const clockid_t which_clock, ...@@ -47,12 +48,10 @@ static void sample_to_timespec(const clockid_t which_clock,
union cpu_time_count cpu, union cpu_time_count cpu,
struct timespec *tp) struct timespec *tp)
{ {
if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED)
tp->tv_sec = div_long_long_rem(cpu.sched, *tp = ns_to_timespec(cpu.sched);
NSEC_PER_SEC, &tp->tv_nsec); else
} else {
cputime_to_timespec(cpu.cpu, tp); cputime_to_timespec(cpu.cpu, tp);
}
} }
static inline int cpu_time_before(const clockid_t which_clock, static inline int cpu_time_before(const clockid_t which_clock,
......
...@@ -392,13 +392,17 @@ EXPORT_SYMBOL(set_normalized_timespec); ...@@ -392,13 +392,17 @@ EXPORT_SYMBOL(set_normalized_timespec);
struct timespec ns_to_timespec(const s64 nsec) struct timespec ns_to_timespec(const s64 nsec)
{ {
struct timespec ts; struct timespec ts;
s32 rem;
if (!nsec) if (!nsec)
return (struct timespec) {0, 0}; return (struct timespec) {0, 0};
ts.tv_sec = div_long_long_rem_signed(nsec, NSEC_PER_SEC, &ts.tv_nsec); ts.tv_sec = div_s64_rem(nsec, NSEC_PER_SEC, &rem);
if (unlikely(nsec < 0)) if (unlikely(rem < 0)) {
set_normalized_timespec(&ts, ts.tv_sec, ts.tv_nsec); ts.tv_sec--;
rem += NSEC_PER_SEC;
}
ts.tv_nsec = rem;
return ts; return ts;
} }
...@@ -528,8 +532,10 @@ jiffies_to_timespec(const unsigned long jiffies, struct timespec *value) ...@@ -528,8 +532,10 @@ jiffies_to_timespec(const unsigned long jiffies, struct timespec *value)
* Convert jiffies to nanoseconds and separate with * Convert jiffies to nanoseconds and separate with
* one divide. * one divide.
*/ */
u64 nsec = (u64)jiffies * TICK_NSEC; u32 rem;
value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &value->tv_nsec); value->tv_sec = div_u64_rem((u64)jiffies * TICK_NSEC,
NSEC_PER_SEC, &rem);
value->tv_nsec = rem;
} }
EXPORT_SYMBOL(jiffies_to_timespec); EXPORT_SYMBOL(jiffies_to_timespec);
...@@ -567,12 +573,11 @@ void jiffies_to_timeval(const unsigned long jiffies, struct timeval *value) ...@@ -567,12 +573,11 @@ void jiffies_to_timeval(const unsigned long jiffies, struct timeval *value)
* Convert jiffies to nanoseconds and separate with * Convert jiffies to nanoseconds and separate with
* one divide. * one divide.
*/ */
u64 nsec = (u64)jiffies * TICK_NSEC; u32 rem;
long tv_usec;
value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &tv_usec); value->tv_sec = div_u64_rem((u64)jiffies * TICK_NSEC,
tv_usec /= NSEC_PER_USEC; NSEC_PER_SEC, &rem);
value->tv_usec = tv_usec; value->tv_usec = rem / NSEC_PER_USEC;
} }
EXPORT_SYMBOL(jiffies_to_timeval); EXPORT_SYMBOL(jiffies_to_timeval);
......
...@@ -234,7 +234,7 @@ static inline void notify_cmos_timer(void) { } ...@@ -234,7 +234,7 @@ static inline void notify_cmos_timer(void) { }
*/ */
int do_adjtimex(struct timex *txc) int do_adjtimex(struct timex *txc)
{ {
long mtemp, save_adjust, rem; long mtemp, save_adjust;
s64 freq_adj; s64 freq_adj;
int result; int result;
...@@ -345,9 +345,7 @@ int do_adjtimex(struct timex *txc) ...@@ -345,9 +345,7 @@ int do_adjtimex(struct timex *txc)
freq_adj += time_freq; freq_adj += time_freq;
freq_adj = min(freq_adj, (s64)MAXFREQ_NSEC); freq_adj = min(freq_adj, (s64)MAXFREQ_NSEC);
time_freq = max(freq_adj, (s64)-MAXFREQ_NSEC); time_freq = max(freq_adj, (s64)-MAXFREQ_NSEC);
time_offset = div_long_long_rem_signed(time_offset, time_offset = div_s64(time_offset, NTP_INTERVAL_FREQ);
NTP_INTERVAL_FREQ,
&rem);
time_offset <<= SHIFT_UPDATE; time_offset <<= SHIFT_UPDATE;
} /* STA_PLL */ } /* STA_PLL */
} /* txc->modes & ADJ_OFFSET */ } /* txc->modes & ADJ_OFFSET */
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include <linux/debugobjects.h> #include <linux/debugobjects.h>
#include <linux/kallsyms.h> #include <linux/kallsyms.h>
#include <linux/memory.h> #include <linux/memory.h>
#include <linux/math64.h>
/* /*
* Lock order: * Lock order:
...@@ -3621,12 +3622,10 @@ static int list_locations(struct kmem_cache *s, char *buf, ...@@ -3621,12 +3622,10 @@ static int list_locations(struct kmem_cache *s, char *buf,
len += sprintf(buf + len, "<not-available>"); len += sprintf(buf + len, "<not-available>");
if (l->sum_time != l->min_time) { if (l->sum_time != l->min_time) {
unsigned long remainder;
len += sprintf(buf + len, " age=%ld/%ld/%ld", len += sprintf(buf + len, " age=%ld/%ld/%ld",
l->min_time, l->min_time,
div_long_long_rem(l->sum_time, l->count, &remainder), (long)div_u64(l->sum_time, l->count),
l->max_time); l->max_time);
} else } else
len += sprintf(buf + len, " age=%ld", len += sprintf(buf + len, " age=%ld",
l->min_time); l->min_time);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册