Add an initial math64.h similar to linux/math64.h with functions mul_u64_u64_div64() and mul_u64_u32_shr(). This isn't a direct copy of include/linux/math64.h as that doesn't define mul_u64_u64_div64(). Implementation was written by Peter Zilkstra based on linux/math64.h and div64.h[1]. The original implementation was not optimal on arm64 as __int128 division is not optimal with a call out to __udivti3, so I dropped the __int128 variant of mul_u64_u64_div64(). [1] https://lore.kernel.org/lkml/20200322101848.GF2452@worktop.programming.kicks-ass.net/ Signed-off-by: Rob Herring <robh@kernel.org> Acked-by: Jiri Olsa <jolsa@redhat.com> Acked-by: Namhyung Kim <namhyung@kernel.org> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Itaru Kitayama <itaru.kitayama@gmail.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Will Deacon <will@kernel.org> Link: http://lore.kernel.org/lkml/20210414155412.3697605-2-robh@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
76 lines
1.2 KiB
C
76 lines
1.2 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _LINUX_MATH64_H
|
|
#define _LINUX_MATH64_H
|
|
|
|
#include <linux/types.h>
|
|
|
|
#ifdef __x86_64__
|
|
static inline u64 mul_u64_u64_div64(u64 a, u64 b, u64 c)
|
|
{
|
|
u64 q;
|
|
|
|
asm ("mulq %2; divq %3" : "=a" (q)
|
|
: "a" (a), "rm" (b), "rm" (c)
|
|
: "rdx");
|
|
|
|
return q;
|
|
}
|
|
#define mul_u64_u64_div64 mul_u64_u64_div64
|
|
#endif
|
|
|
|
#ifdef __SIZEOF_INT128__
|
|
static inline u64 mul_u64_u32_shr(u64 a, u32 b, unsigned int shift)
|
|
{
|
|
return (u64)(((unsigned __int128)a * b) >> shift);
|
|
}
|
|
|
|
#else
|
|
|
|
#ifdef __i386__
|
|
static inline u64 mul_u32_u32(u32 a, u32 b)
|
|
{
|
|
u32 high, low;
|
|
|
|
asm ("mull %[b]" : "=a" (low), "=d" (high)
|
|
: [a] "a" (a), [b] "rm" (b) );
|
|
|
|
return low | ((u64)high) << 32;
|
|
}
|
|
#else
|
|
static inline u64 mul_u32_u32(u32 a, u32 b)
|
|
{
|
|
return (u64)a * b;
|
|
}
|
|
#endif
|
|
|
|
static inline u64 mul_u64_u32_shr(u64 a, u32 b, unsigned int shift)
|
|
{
|
|
u32 ah, al;
|
|
u64 ret;
|
|
|
|
al = a;
|
|
ah = a >> 32;
|
|
|
|
ret = mul_u32_u32(al, b) >> shift;
|
|
if (ah)
|
|
ret += mul_u32_u32(ah, b) << (32 - shift);
|
|
|
|
return ret;
|
|
}
|
|
|
|
#endif /* __SIZEOF_INT128__ */
|
|
|
|
#ifndef mul_u64_u64_div64
|
|
static inline u64 mul_u64_u64_div64(u64 a, u64 b, u64 c)
|
|
{
|
|
u64 quot, rem;
|
|
|
|
quot = a / c;
|
|
rem = a % c;
|
|
|
|
return quot * b + (rem * b) / c;
|
|
}
|
|
#endif
|
|
|
|
#endif /* _LINUX_MATH64_H */
|