aboutsummaryrefslogtreecommitdiff
path: root/kernel/time.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/time.c')
-rw-r--r--kernel/time.c46
1 files changed, 34 insertions, 12 deletions
diff --git a/kernel/time.c b/kernel/time.c
index 4064c0566e7..a5ec013b6c8 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -39,6 +39,8 @@
#include <asm/uaccess.h>
#include <asm/unistd.h>
+#include "timeconst.h"
+
/*
* The timezone where the local system is located. Used as a default by some
* programs who obtain this value by using gettimeofday.
@@ -93,7 +95,8 @@ asmlinkage long sys_stime(time_t __user *tptr)
#endif /* __ARCH_WANT_SYS_TIME */
-asmlinkage long sys_gettimeofday(struct timeval __user *tv, struct timezone __user *tz)
+asmlinkage long sys_gettimeofday(struct timeval __user *tv,
+ struct timezone __user *tz)
{
if (likely(tv != NULL)) {
struct timeval ktv;
@@ -118,7 +121,7 @@ asmlinkage long sys_gettimeofday(struct timeval __user *tv, struct timezone __us
* hard to make the program warp the clock precisely n hours) or
* compile in the timezone information into the kernel. Bad, bad....
*
- * - TYT, 1992-01-01
+ * - TYT, 1992-01-01
*
* The best thing to do is to keep the CMOS clock in universal time (UTC)
* as real UNIX machines always do it. This avoids all headaches about
@@ -240,7 +243,11 @@ unsigned int inline jiffies_to_msecs(const unsigned long j)
#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC);
#else
- return (j * MSEC_PER_SEC) / HZ;
+# if BITS_PER_LONG == 32
+ return ((u64)HZ_TO_MSEC_MUL32 * j) >> HZ_TO_MSEC_SHR32;
+# else
+ return (j * HZ_TO_MSEC_NUM) / HZ_TO_MSEC_DEN;
+# endif
#endif
}
EXPORT_SYMBOL(jiffies_to_msecs);
@@ -252,7 +259,11 @@ unsigned int inline jiffies_to_usecs(const unsigned long j)
#elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC)
return (j + (HZ / USEC_PER_SEC) - 1)/(HZ / USEC_PER_SEC);
#else
- return (j * USEC_PER_SEC) / HZ;
+# if BITS_PER_LONG == 32
+ return ((u64)HZ_TO_USEC_MUL32 * j) >> HZ_TO_USEC_SHR32;
+# else
+ return (j * HZ_TO_USEC_NUM) / HZ_TO_USEC_DEN;
+# endif
#endif
}
EXPORT_SYMBOL(jiffies_to_usecs);
@@ -267,7 +278,7 @@ EXPORT_SYMBOL(jiffies_to_usecs);
*
* This function should be only used for timestamps returned by
* current_kernel_time() or CURRENT_TIME, not with do_gettimeofday() because
- * it doesn't handle the better resolution of the later.
+ * it doesn't handle the better resolution of the latter.
*/
struct timespec timespec_trunc(struct timespec t, unsigned gran)
{
@@ -315,7 +326,7 @@ EXPORT_SYMBOL_GPL(getnstimeofday);
* This algorithm was first published by Gauss (I think).
*
* WARNING: this function will overflow on 2106-02-07 06:28:16 on
- * machines were long is 32-bit! (However, as time_t is signed, we
+ * machines where long is 32-bit! (However, as time_t is signed, we
* will already get problems at other places on 2038-01-19 03:14:08)
*/
unsigned long
@@ -352,7 +363,7 @@ EXPORT_SYMBOL(mktime);
* normalize to the timespec storage format
*
* Note: The tv_nsec part is always in the range of
- * 0 <= tv_nsec < NSEC_PER_SEC
+ * 0 <= tv_nsec < NSEC_PER_SEC
* For negative values only the tv_sec field is negative !
*/
void set_normalized_timespec(struct timespec *ts, time_t sec, long nsec)
@@ -453,12 +464,13 @@ unsigned long msecs_to_jiffies(const unsigned int m)
/*
* Generic case - multiply, round and divide. But first
* check that if we are doing a net multiplication, that
- * we wouldnt overflow:
+ * we wouldn't overflow:
*/
if (HZ > MSEC_PER_SEC && m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
return MAX_JIFFY_OFFSET;
- return (m * HZ + MSEC_PER_SEC - 1) / MSEC_PER_SEC;
+ return ((u64)MSEC_TO_HZ_MUL32 * m + MSEC_TO_HZ_ADJ32)
+ >> MSEC_TO_HZ_SHR32;
#endif
}
EXPORT_SYMBOL(msecs_to_jiffies);
@@ -472,7 +484,8 @@ unsigned long usecs_to_jiffies(const unsigned int u)
#elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC)
return u * (HZ / USEC_PER_SEC);
#else
- return (u * HZ + USEC_PER_SEC - 1) / USEC_PER_SEC;
+ return ((u64)USEC_TO_HZ_MUL32 * u + USEC_TO_HZ_ADJ32)
+ >> USEC_TO_HZ_SHR32;
#endif
}
EXPORT_SYMBOL(usecs_to_jiffies);
@@ -566,7 +579,11 @@ EXPORT_SYMBOL(jiffies_to_timeval);
clock_t jiffies_to_clock_t(long x)
{
#if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
+# if HZ < USER_HZ
+ return x * (USER_HZ / HZ);
+# else
return x / (HZ / USER_HZ);
+# endif
#else
u64 tmp = (u64)x * TICK_NSEC;
do_div(tmp, (NSEC_PER_SEC / USER_HZ));
@@ -599,7 +616,14 @@ EXPORT_SYMBOL(clock_t_to_jiffies);
u64 jiffies_64_to_clock_t(u64 x)
{
#if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
+# if HZ < USER_HZ
+ x *= USER_HZ;
+ do_div(x, HZ);
+# elif HZ > USER_HZ
do_div(x, HZ / USER_HZ);
+# else
+ /* Nothing to do */
+# endif
#else
/*
* There are better ways that don't overflow early,
@@ -611,7 +635,6 @@ u64 jiffies_64_to_clock_t(u64 x)
#endif
return x;
}
-
EXPORT_SYMBOL(jiffies_64_to_clock_t);
u64 nsec_to_clock_t(u64 x)
@@ -646,7 +669,6 @@ u64 get_jiffies_64(void)
} while (read_seqretry(&xtime_lock, seq));
return ret;
}
-
EXPORT_SYMBOL(get_jiffies_64);
#endif