1 #ifndef _LINUX_MATH64_H
2 #define _LINUX_MATH64_H
5 #include <linux/bitops.h>
6 #include <linux/types.h>
8 #if BITS_PER_LONG == 64
10 #define div64_long(x, y) div64_s64((x), (y))
11 #define div64_ul(x, y) div64_u64((x), (y))
14 * div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder
16 * This is commonly provided by 32bit archs to provide an optimized 64bit
19 static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
21 *remainder = dividend % divisor;
22 return dividend / divisor;
26 * div_s64_rem - signed 64bit divide with 32bit divisor with remainder
28 static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
30 *remainder = dividend % divisor;
31 return dividend / divisor;
35 * div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder
37 static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
39 *remainder = dividend % divisor;
40 return dividend / divisor;
44 * div64_u64 - unsigned 64bit divide with 64bit divisor
46 static inline u64 div64_u64(u64 dividend, u64 divisor)
48 return dividend / divisor;
51 #define DIV64_U64_ROUND_UP(ll, d) \
52 ({ u64 _tmp = (d); div64_u64((ll) + _tmp - 1, _tmp); })
55 * div64_s64 - signed 64bit divide with 64bit divisor
57 static inline s64 div64_s64(s64 dividend, s64 divisor)
59 return dividend / divisor;
62 #elif BITS_PER_LONG == 32
64 #define div64_long(x, y) div_s64((x), (y))
65 #define div64_ul(x, y) div_u64((x), (y))
68 static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
70 *remainder = do_div(dividend, divisor);
76 extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder);
80 extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder);
84 extern u64 div64_u64(u64 dividend, u64 divisor);
88 extern s64 div64_s64(s64 dividend, s64 divisor);
91 #endif /* BITS_PER_LONG */
94 * div_u64 - unsigned 64bit divide with 32bit divisor
96 * This is the most common 64bit divide and should be used if possible,
97 * as many 32bit archs can optimize this variant better than a full 64bit
101 static inline u64 div_u64(u64 dividend, u32 divisor)
104 return div_u64_rem(dividend, divisor, &remainder);
109 * div_s64 - signed 64bit divide with 32bit divisor
112 static inline s64 div_s64(s64 dividend, s32 divisor)
115 return div_s64_rem(dividend, divisor, &remainder);
119 u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder);
121 static __always_inline u32
122 __iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
126 while (dividend >= divisor) {
127 /* The following asm() prevents the compiler from
128 optimising this loop into a modulo operation. */
129 asm("" : "+rm"(dividend));
135 *remainder = dividend;
142 * Many a GCC version messes this up and generates a 64x64 mult :-(
144 static inline u64 mul_u32_u32(u32 a, u32 b)
150 #if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
152 #ifndef mul_u64_u32_shr
153 static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
155 return (u64)(((unsigned __int128)a * mul) >> shift);
157 #endif /* mul_u64_u32_shr */
159 #ifndef mul_u64_u64_shr
160 static inline u64 mul_u64_u64_shr(u64 a, u64 mul, unsigned int shift)
162 return (u64)(((unsigned __int128)a * mul) >> shift);
164 #endif /* mul_u64_u64_shr */
168 #ifndef mul_u64_u32_shr
169 static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
177 ret = mul_u32_u32(al, mul) >> shift;
179 ret += mul_u32_u32(ah, mul) << (32 - shift);
183 #endif /* mul_u64_u32_shr */
185 #ifndef mul_u64_u64_shr
186 static inline u64 mul_u64_u64_shr(u64 a, u64 b, unsigned int shift)
197 } rl, rm, rn, rh, a0, b0;
203 rl.ll = mul_u32_u32(a0.l.low, b0.l.low);
204 rm.ll = mul_u32_u32(a0.l.low, b0.l.high);
205 rn.ll = mul_u32_u32(a0.l.high, b0.l.low);
206 rh.ll = mul_u32_u32(a0.l.high, b0.l.high);
209 * Each of these lines computes a 64-bit intermediate result into "c",
210 * starting at bits 32-95. The low 32-bits go into the result of the
211 * multiplication, the high 32-bits are carried into the next step.
213 rl.l.high = c = (u64)rl.l.high + rm.l.low + rn.l.low;
214 rh.l.low = c = (c >> 32) + rm.l.high + rn.l.high + rh.l.low;
215 rh.l.high = (c >> 32) + rh.l.high;
218 * The 128-bit result of the multiplication is in rl.ll and rh.ll,
219 * shift it right and throw away the high part of the result.
224 return (rl.ll >> shift) | (rh.ll << (64 - shift));
225 return rh.ll >> (shift & 63);
227 #endif /* mul_u64_u64_shr */
231 #ifndef mul_u64_u32_div
232 static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor)
246 rl.ll = mul_u32_u32(u.l.low, mul);
247 rh.ll = mul_u32_u32(u.l.high, mul) + rl.l.high;
249 /* Bits 32-63 of the result will be in rh.l.low. */
250 rl.l.high = do_div(rh.ll, divisor);
252 /* Bits 0-31 of the result will be in rl.l.low. */
253 do_div(rl.ll, divisor);
255 rl.l.high = rh.l.low;
258 #endif /* mul_u64_u32_div */
260 #endif /* _LINUX_MATH64_H */