All of lore.kernel.org
 help / color / mirror / Atom feed
* [U-Boot] [PATCH V2] lib: div64: sync with Linux
@ 2017-04-10  5:39 Peng Fan
  2017-04-12 20:55 ` [U-Boot] [PATCH] sandbox: Change CONFIG_SANDBOX_BITS_PER_LONG to hard-coded Tom Rini
  2017-04-14 21:09 ` [U-Boot] [U-Boot,V2] lib: div64: sync with Linux Tom Rini
  0 siblings, 2 replies; 4+ messages in thread
From: Peng Fan @ 2017-04-10  5:39 UTC (permalink / raw)
  To: u-boot

Sync with Linux commit ad0376eb1483b ("Merge tag 'edac_for_4.11_2'").

Signed-off-by: Peng Fan <peng.fan@nxp.com>
Cc: Tom Rini <trini@konsulko.com>
---

V2:
Use `./tools/buildman/buildman  -v` to do the build,
the following 3 build fails, but not related to this patch.
x86:  +   qemu-x86_64
x86:  +   qemu-x86_efi_payload64
x86:  +   chromebook_link64

Update div64.h to sync with kernel include/asm-generic/div64.h.
There are some checkpatch warnings, I did not fix them, because files
directly copied from Linux.

 include/div64.h        | 205 +++++++++++++++++++++++++++++++++++++++++++++++--
 include/linux/math64.h | 172 +++++++++++++++++++++++++++++++++++++++++
 lib/div64.c            | 141 ++++++++++++++++++++++++++++++++--
 3 files changed, 508 insertions(+), 10 deletions(-)

diff --git a/include/div64.h b/include/div64.h
index d833144..cecb232 100644
--- a/include/div64.h
+++ b/include/div64.h
@@ -4,13 +4,16 @@
  * Copyright (C) 2003 Bernardo Innocenti <bernie@develer.com>
  * Based on former asm-ppc/div64.h and asm-m68knommu/div64.h
  *
+ * Optimization for constant divisors on 32-bit machines:
+ * Copyright (C) 2006-2015 Nicolas Pitre
+ *
  * The semantics of do_div() are:
  *
  * uint32_t do_div(uint64_t *n, uint32_t base)
  * {
- *	uint32_t remainder = *n % base;
- *	*n = *n / base;
- *	return remainder;
+ * 	uint32_t remainder = *n % base;
+ * 	*n = *n / base;
+ * 	return remainder;
  * }
  *
  * NOTE: macro parameter n is evaluated multiple times,
@@ -18,8 +21,182 @@
  */
 
 #include <linux/types.h>
+#include <linux/compiler.h>
+
+#if BITS_PER_LONG == 64
+
+# define do_div(n,base) ({					\
+	uint32_t __base = (base);				\
+	uint32_t __rem;						\
+	__rem = ((uint64_t)(n)) % __base;			\
+	(n) = ((uint64_t)(n)) / __base;				\
+	__rem;							\
+ })
+
+#elif BITS_PER_LONG == 32
+
+#include <linux/log2.h>
+
+/*
+ * If the divisor happens to be constant, we determine the appropriate
+ * inverse at compile time to turn the division into a few inline
+ * multiplications which ought to be much faster. And yet only if compiling
+ * with a sufficiently recent gcc version to perform proper 64-bit constant
+ * propagation.
+ *
+ * (It is unfortunate that gcc doesn't perform all this internally.)
+ */
+
+#ifndef __div64_const32_is_OK
+#define __div64_const32_is_OK (__GNUC__ >= 4)
+#endif
+
+#define __div64_const32(n, ___b)					\
+({									\
+	/*								\
+	 * Multiplication by reciprocal of b: n / b = n * (p / b) / p	\
+	 *								\
+	 * We rely on the fact that most of this code gets optimized	\
+	 * away at compile time due to constant propagation and only	\
+	 * a few multiplication instructions should remain.		\
+	 * Hence this monstrous macro (static inline doesn't always	\
+	 * do the trick here).						\
+	 */								\
+	uint64_t ___res, ___x, ___t, ___m, ___n = (n);			\
+	uint32_t ___p, ___bias;						\
+									\
+	/* determine MSB of b */					\
+	___p = 1 << ilog2(___b);					\
+									\
+	/* compute m = ((p << 64) + b - 1) / b */			\
+	___m = (~0ULL / ___b) * ___p;					\
+	___m += (((~0ULL % ___b + 1) * ___p) + ___b - 1) / ___b;	\
+									\
+	/* one less than the dividend with highest result */		\
+	___x = ~0ULL / ___b * ___b - 1;					\
+									\
+	/* test our ___m with res = m * x / (p << 64) */		\
+	___res = ((___m & 0xffffffff) * (___x & 0xffffffff)) >> 32;	\
+	___t = ___res += (___m & 0xffffffff) * (___x >> 32);		\
+	___res += (___x & 0xffffffff) * (___m >> 32);			\
+	___t = (___res < ___t) ? (1ULL << 32) : 0;			\
+	___res = (___res >> 32) + ___t;					\
+	___res += (___m >> 32) * (___x >> 32);				\
+	___res /= ___p;							\
+									\
+	/* Now sanitize and optimize what we've got. */			\
+	if (~0ULL % (___b / (___b & -___b)) == 0) {			\
+		/* special case, can be simplified to ... */		\
+		___n /= (___b & -___b);					\
+		___m = ~0ULL / (___b / (___b & -___b));			\
+		___p = 1;						\
+		___bias = 1;						\
+	} else if (___res != ___x / ___b) {				\
+		/*							\
+		 * We can't get away without a bias to compensate	\
+		 * for bit truncation errors.  To avoid it we'd need an	\
+		 * additional bit to represent m which would overflow	\
+		 * a 64-bit variable.					\
+		 *							\
+		 * Instead we do m = p / b and n / b = (n * m + m) / p.	\
+		 */							\
+		___bias = 1;						\
+		/* Compute m = (p << 64) / b */				\
+		___m = (~0ULL / ___b) * ___p;				\
+		___m += ((~0ULL % ___b + 1) * ___p) / ___b;		\
+	} else {							\
+		/*							\
+		 * Reduce m / p, and try to clear bit 31 of m when	\
+		 * possible, otherwise that'll need extra overflow	\
+		 * handling later.					\
+		 */							\
+		uint32_t ___bits = -(___m & -___m);			\
+		___bits |= ___m >> 32;					\
+		___bits = (~___bits) << 1;				\
+		/*							\
+		 * If ___bits == 0 then setting bit 31 is  unavoidable.	\
+		 * Simply apply the maximum possible reduction in that	\
+		 * case. Otherwise the MSB of ___bits indicates the	\
+		 * best reduction we should apply.			\
+		 */							\
+		if (!___bits) {						\
+			___p /= (___m & -___m);				\
+			___m /= (___m & -___m);				\
+		} else {						\
+			___p >>= ilog2(___bits);			\
+			___m >>= ilog2(___bits);			\
+		}							\
+		/* No bias needed. */					\
+		___bias = 0;						\
+	}								\
+									\
+	/*								\
+	 * Now we have a combination of 2 conditions:			\
+	 *								\
+	 * 1) whether or not we need to apply a bias, and		\
+	 *								\
+	 * 2) whether or not there might be an overflow in the cross	\
+	 *    product determined by (___m & ((1 << 63) | (1 << 31))).	\
+	 *								\
+	 * Select the best way to do (m_bias + m * n) / (1 << 64).	\
+	 * From now on there will be actual runtime code generated.	\
+	 */								\
+	___res = __arch_xprod_64(___m, ___n, ___bias);			\
+									\
+	___res /= ___p;							\
+})
+
+#ifndef __arch_xprod_64
+/*
+ * Default C implementation for __arch_xprod_64()
+ *
+ * Prototype: uint64_t __arch_xprod_64(const uint64_t m, uint64_t n, bool bias)
+ * Semantic:  retval = ((bias ? m : 0) + m * n) >> 64
+ *
+ * The product is a 128-bit value, scaled down to 64 bits.
+ * Assuming constant propagation to optimize away unused conditional code.
+ * Architectures may provide their own optimized assembly implementation.
+ */
+static inline uint64_t __arch_xprod_64(const uint64_t m, uint64_t n, bool bias)
+{
+	uint32_t m_lo = m;
+	uint32_t m_hi = m >> 32;
+	uint32_t n_lo = n;
+	uint32_t n_hi = n >> 32;
+	uint64_t res, tmp;
+
+	if (!bias) {
+		res = ((uint64_t)m_lo * n_lo) >> 32;
+	} else if (!(m & ((1ULL << 63) | (1ULL << 31)))) {
+		/* there can't be any overflow here */
+		res = (m + (uint64_t)m_lo * n_lo) >> 32;
+	} else {
+		res = m + (uint64_t)m_lo * n_lo;
+		tmp = (res < m) ? (1ULL << 32) : 0;
+		res = (res >> 32) + tmp;
+	}
+
+	if (!(m & ((1ULL << 63) | (1ULL << 31)))) {
+		/* there can't be any overflow here */
+		res += (uint64_t)m_lo * n_hi;
+		res += (uint64_t)m_hi * n_lo;
+		res >>= 32;
+	} else {
+		tmp = res += (uint64_t)m_lo * n_hi;
+		res += (uint64_t)m_hi * n_lo;
+		tmp = (res < tmp) ? (1ULL << 32) : 0;
+		res = (res >> 32) + tmp;
+	}
 
+	res += (uint64_t)m_hi * n_hi;
+
+	return res;
+}
+#endif
+
+#ifndef __div64_32
 extern uint32_t __div64_32(uint64_t *dividend, uint32_t divisor);
+#endif
 
 /* The unnecessary pointer compare is there
  * to check for type safety (n must be 64bit)
@@ -28,14 +205,32 @@ extern uint32_t __div64_32(uint64_t *dividend, uint32_t divisor);
 	uint32_t __base = (base);			\
 	uint32_t __rem;					\
 	(void)(((typeof((n)) *)0) == ((uint64_t *)0));	\
-	if (((n) >> 32) == 0) {			\
+	if (__builtin_constant_p(__base) &&		\
+	    is_power_of_2(__base)) {			\
+		__rem = (n) & (__base - 1);		\
+		(n) >>= ilog2(__base);			\
+	} else if (__div64_const32_is_OK &&		\
+		   __builtin_constant_p(__base) &&	\
+		   __base != 0) {			\
+		uint32_t __res_lo, __n_lo = (n);	\
+		(n) = __div64_const32(n, __base);	\
+		/* the remainder can be computed with 32-bit regs */ \
+		__res_lo = (n);				\
+		__rem = __n_lo - __res_lo * __base;	\
+	} else if (likely(((n) >> 32) == 0)) {		\
 		__rem = (uint32_t)(n) % __base;		\
 		(n) = (uint32_t)(n) / __base;		\
-	} else						\
+	} else 						\
 		__rem = __div64_32(&(n), __base);	\
 	__rem;						\
  })
 
+#else /* BITS_PER_LONG == ?? */
+
+# error do_div() does not yet support the C64
+
+#endif /* BITS_PER_LONG */
+
 /* Wrapper for do_div(). Doesn't modify dividend and returns
  * the result, not reminder.
  */
diff --git a/include/linux/math64.h b/include/linux/math64.h
index 6d760d7..08584c8 100644
--- a/include/linux/math64.h
+++ b/include/linux/math64.h
@@ -1,10 +1,15 @@
 #ifndef _LINUX_MATH64_H
 #define _LINUX_MATH64_H
 
+#include <div64.h>
+#include <linux/bitops.h>
 #include <linux/types.h>
 
 #if BITS_PER_LONG == 64
 
+#define div64_long(x, y) div64_s64((x), (y))
+#define div64_ul(x, y)   div64_u64((x), (y))
+
 /**
  * div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder
  *
@@ -27,6 +32,15 @@ static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
 }
 
 /**
+ * div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder
+ */
+static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
+{
+	*remainder = dividend % divisor;
+	return dividend / divisor;
+}
+
+/**
  * div64_u64 - unsigned 64bit divide with 64bit divisor
  */
 static inline u64 div64_u64(u64 dividend, u64 divisor)
@@ -34,8 +48,19 @@ static inline u64 div64_u64(u64 dividend, u64 divisor)
 	return dividend / divisor;
 }
 
+/**
+ * div64_s64 - signed 64bit divide with 64bit divisor
+ */
+static inline s64 div64_s64(s64 dividend, s64 divisor)
+{
+	return dividend / divisor;
+}
+
 #elif BITS_PER_LONG == 32
 
+#define div64_long(x, y) div_s64((x), (y))
+#define div64_ul(x, y)   div_u64((x), (y))
+
 #ifndef div_u64_rem
 static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
 {
@@ -48,10 +73,18 @@ static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
 extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder);
 #endif
 
+#ifndef div64_u64_rem
+extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder);
+#endif
+
 #ifndef div64_u64
 extern u64 div64_u64(u64 dividend, u64 divisor);
 #endif
 
+#ifndef div64_s64
+extern s64 div64_s64(s64 dividend, s64 divisor);
+#endif
+
 #endif /* BITS_PER_LONG */
 
 /**
@@ -82,4 +115,143 @@ static inline s64 div_s64(s64 dividend, s32 divisor)
 
 u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder);
 
+static __always_inline u32
+__iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
+{
+	u32 ret = 0;
+
+	while (dividend >= divisor) {
+		/* The following asm() prevents the compiler from
+		   optimising this loop into a modulo operation.  */
+		asm("" : "+rm"(dividend));
+
+		dividend -= divisor;
+		ret++;
+	}
+
+	*remainder = dividend;
+
+	return ret;
+}
+
+#ifndef mul_u32_u32
+/*
+ * Many a GCC version messes this up and generates a 64x64 mult :-(
+ */
+static inline u64 mul_u32_u32(u32 a, u32 b)
+{
+	return (u64)a * b;
+}
+#endif
+
+#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
+
+#ifndef mul_u64_u32_shr
+static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
+{
+	return (u64)(((unsigned __int128)a * mul) >> shift);
+}
+#endif /* mul_u64_u32_shr */
+
+#ifndef mul_u64_u64_shr
+static inline u64 mul_u64_u64_shr(u64 a, u64 mul, unsigned int shift)
+{
+	return (u64)(((unsigned __int128)a * mul) >> shift);
+}
+#endif /* mul_u64_u64_shr */
+
+#else
+
+#ifndef mul_u64_u32_shr
+static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
+{
+	u32 ah, al;
+	u64 ret;
+
+	al = a;
+	ah = a >> 32;
+
+	ret = mul_u32_u32(al, mul) >> shift;
+	if (ah)
+		ret += mul_u32_u32(ah, mul) << (32 - shift);
+
+	return ret;
+}
+#endif /* mul_u64_u32_shr */
+
+#ifndef mul_u64_u64_shr
+static inline u64 mul_u64_u64_shr(u64 a, u64 b, unsigned int shift)
+{
+	union {
+		u64 ll;
+		struct {
+#ifdef __BIG_ENDIAN
+			u32 high, low;
+#else
+			u32 low, high;
+#endif
+		} l;
+	} rl, rm, rn, rh, a0, b0;
+	u64 c;
+
+	a0.ll = a;
+	b0.ll = b;
+
+	rl.ll = mul_u32_u32(a0.l.low, b0.l.low);
+	rm.ll = mul_u32_u32(a0.l.low, b0.l.high);
+	rn.ll = mul_u32_u32(a0.l.high, b0.l.low);
+	rh.ll = mul_u32_u32(a0.l.high, b0.l.high);
+
+	/*
+	 * Each of these lines computes a 64-bit intermediate result into "c",
+	 * starting@bits 32-95.  The low 32-bits go into the result of the
+	 * multiplication, the high 32-bits are carried into the next step.
+	 */
+	rl.l.high = c = (u64)rl.l.high + rm.l.low + rn.l.low;
+	rh.l.low = c = (c >> 32) + rm.l.high + rn.l.high + rh.l.low;
+	rh.l.high = (c >> 32) + rh.l.high;
+
+	/*
+	 * The 128-bit result of the multiplication is in rl.ll and rh.ll,
+	 * shift it right and throw away the high part of the result.
+	 */
+	if (shift == 0)
+		return rl.ll;
+	if (shift < 64)
+		return (rl.ll >> shift) | (rh.ll << (64 - shift));
+	return rh.ll >> (shift & 63);
+}
+#endif /* mul_u64_u64_shr */
+
+#endif
+
+#ifndef mul_u64_u32_div
+static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor)
+{
+	union {
+		u64 ll;
+		struct {
+#ifdef __BIG_ENDIAN
+			u32 high, low;
+#else
+			u32 low, high;
+#endif
+		} l;
+	} u, rl, rh;
+
+	u.ll = a;
+	rl.ll = mul_u32_u32(u.l.low, mul);
+	rh.ll = mul_u32_u32(u.l.high, mul) + rl.l.high;
+
+	/* Bits 32-63 of the result will be in rh.l.low. */
+	rl.l.high = do_div(rh.ll, divisor);
+
+	/* Bits 0-31 of the result will be in rl.l.low.	*/
+	do_div(rl.ll, divisor);
+
+	rl.l.high = rh.l.low;
+	return rl.ll;
+}
+#endif /* mul_u64_u32_div */
+
 #endif /* _LINUX_MATH64_H */
diff --git a/lib/div64.c b/lib/div64.c
index 319fca5..206f582 100644
--- a/lib/div64.c
+++ b/lib/div64.c
@@ -13,14 +13,19 @@
  *
  * Code generated for this function might be very inefficient
  * for some CPUs. __div64_32() can be overridden by linking arch-specific
- * assembly versions such as arch/powerpc/lib/div64.S and arch/sh/lib/div64.S.
+ * assembly versions such as arch/ppc/lib/div64.S and arch/sh/lib/div64.S
+ * or by defining a preprocessor macro in arch/include/asm/div64.h.
  */
 
-#include <div64.h>
-#include <linux/types.h>
-#include <linux/compiler.h>
+#include <linux/compat.h>
+#include <linux/kernel.h>
+#include <linux/math64.h>
 
-uint32_t notrace __div64_32(uint64_t *n, uint32_t base)
+/* Not needed on 64bit architectures */
+#if BITS_PER_LONG == 32
+
+#ifndef __div64_32
+uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
 {
 	uint64_t rem = *n;
 	uint64_t b = base;
@@ -52,3 +57,129 @@ uint32_t notrace __div64_32(uint64_t *n, uint32_t base)
 	*n = res;
 	return rem;
 }
+EXPORT_SYMBOL(__div64_32);
+#endif
+
+#ifndef div_s64_rem
+s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
+{
+	u64 quotient;
+
+	if (dividend < 0) {
+		quotient = div_u64_rem(-dividend, abs(divisor), (u32 *)remainder);
+		*remainder = -*remainder;
+		if (divisor > 0)
+			quotient = -quotient;
+	} else {
+		quotient = div_u64_rem(dividend, abs(divisor), (u32 *)remainder);
+		if (divisor < 0)
+			quotient = -quotient;
+	}
+	return quotient;
+}
+EXPORT_SYMBOL(div_s64_rem);
+#endif
+
+/**
+ * div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder
+ * @dividend:	64bit dividend
+ * @divisor:	64bit divisor
+ * @remainder:  64bit remainder
+ *
+ * This implementation is a comparable to algorithm used by div64_u64.
+ * But this operation, which includes math for calculating the remainder,
+ * is kept distinct to avoid slowing down the div64_u64 operation on 32bit
+ * systems.
+ */
+#ifndef div64_u64_rem
+u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
+{
+	u32 high = divisor >> 32;
+	u64 quot;
+
+	if (high == 0) {
+		u32 rem32;
+		quot = div_u64_rem(dividend, divisor, &rem32);
+		*remainder = rem32;
+	} else {
+		int n = 1 + fls(high);
+		quot = div_u64(dividend >> n, divisor >> n);
+
+		if (quot != 0)
+			quot--;
+
+		*remainder = dividend - quot * divisor;
+		if (*remainder >= divisor) {
+			quot++;
+			*remainder -= divisor;
+		}
+	}
+
+	return quot;
+}
+EXPORT_SYMBOL(div64_u64_rem);
+#endif
+
+/**
+ * div64_u64 - unsigned 64bit divide with 64bit divisor
+ * @dividend:	64bit dividend
+ * @divisor:	64bit divisor
+ *
+ * This implementation is a modified version of the algorithm proposed
+ * by the book 'Hacker's Delight'.  The original source and full proof
+ * can be found here and is available for use without restriction.
+ *
+ * 'http://www.hackersdelight.org/hdcodetxt/divDouble.c.txt'
+ */
+#ifndef div64_u64
+u64 div64_u64(u64 dividend, u64 divisor)
+{
+	u32 high = divisor >> 32;
+	u64 quot;
+
+	if (high == 0) {
+		quot = div_u64(dividend, divisor);
+	} else {
+		int n = 1 + fls(high);
+		quot = div_u64(dividend >> n, divisor >> n);
+
+		if (quot != 0)
+			quot--;
+		if ((dividend - quot * divisor) >= divisor)
+			quot++;
+	}
+
+	return quot;
+}
+EXPORT_SYMBOL(div64_u64);
+#endif
+
+/**
+ * div64_s64 - signed 64bit divide with 64bit divisor
+ * @dividend:	64bit dividend
+ * @divisor:	64bit divisor
+ */
+#ifndef div64_s64
+s64 div64_s64(s64 dividend, s64 divisor)
+{
+	s64 quot, t;
+
+	quot = div64_u64(abs(dividend), abs(divisor));
+	t = (dividend ^ divisor) >> 63;
+
+	return (quot ^ t) - t;
+}
+EXPORT_SYMBOL(div64_s64);
+#endif
+
+#endif /* BITS_PER_LONG == 32 */
+
+/*
+ * Iterative div/mod for use when dividend is not expected to be much
+ * bigger than divisor.
+ */
+u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
+{
+	return __iter_div_u64_rem(dividend, divisor, remainder);
+}
+EXPORT_SYMBOL(iter_div_u64_rem);
-- 
2.6.6

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [U-Boot] [PATCH] sandbox: Change CONFIG_SANDBOX_BITS_PER_LONG to hard-coded
  2017-04-10  5:39 [U-Boot] [PATCH V2] lib: div64: sync with Linux Peng Fan
@ 2017-04-12 20:55 ` Tom Rini
  2017-04-14 21:10   ` Tom Rini
  2017-04-14 21:09 ` [U-Boot] [U-Boot,V2] lib: div64: sync with Linux Tom Rini
  1 sibling, 1 reply; 4+ messages in thread
From: Tom Rini @ 2017-04-12 20:55 UTC (permalink / raw)
  To: u-boot

Instead of having CONFIG_SANDBOX_BITS_PER_LONG in sandbox.h set to 64
with a comment to change to 32 on a 32bit host, simply set this to 64 in
asm/types.h and have the comment be there.

Cc: Simon Glass <sjg@chromium.org>
Signed-off-by: Tom Rini <trini@konsulko.com>
---
 arch/sandbox/include/asm/types.h | 6 +++++-
 include/configs/sandbox.h        | 6 ------
 2 files changed, 5 insertions(+), 7 deletions(-)

diff --git a/arch/sandbox/include/asm/types.h b/arch/sandbox/include/asm/types.h
index 42c09e2fff25..623cdafefaa5 100644
--- a/arch/sandbox/include/asm/types.h
+++ b/arch/sandbox/include/asm/types.h
@@ -50,7 +50,11 @@ typedef __INT64_TYPE__ s64;
 typedef __UINT64_TYPE__ u64;
 #endif
 
-#define BITS_PER_LONG	CONFIG_SANDBOX_BITS_PER_LONG
+/*
+ * Number of bits in a C 'long' on this architecture. Set this to 32 when
+ * building on a 32-bit machine.
+ */
+#define BITS_PER_LONG	32
 
 typedef unsigned long dma_addr_t;
 typedef u32 phys_addr_t;
diff --git a/include/configs/sandbox.h b/include/configs/sandbox.h
index 7de8765dc803..4c112cc1a99d 100644
--- a/include/configs/sandbox.h
+++ b/include/configs/sandbox.h
@@ -25,12 +25,6 @@
 #define CONFIG_SYS_TIMER_RATE		1000000
 #endif
 
-/*
- * Number of bits in a C 'long' on this architecture. Set this to 32 when
- * building on a 32-bit machine.
- */
-#define CONFIG_SANDBOX_BITS_PER_LONG	64
-
 #define CONFIG_LMB
 #define CONFIG_ANDROID_BOOT_IMAGE
 
-- 
1.9.1

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [U-Boot] [U-Boot,V2] lib: div64: sync with Linux
  2017-04-10  5:39 [U-Boot] [PATCH V2] lib: div64: sync with Linux Peng Fan
  2017-04-12 20:55 ` [U-Boot] [PATCH] sandbox: Change CONFIG_SANDBOX_BITS_PER_LONG to hard-coded Tom Rini
@ 2017-04-14 21:09 ` Tom Rini
  1 sibling, 0 replies; 4+ messages in thread
From: Tom Rini @ 2017-04-14 21:09 UTC (permalink / raw)
  To: u-boot

On Mon, Apr 10, 2017 at 01:39:48PM +0800, Peng Fan wrote:

> Sync with Linux commit ad0376eb1483b ("Merge tag 'edac_for_4.11_2'").
> 
> Signed-off-by: Peng Fan <peng.fan@nxp.com>
> Cc: Tom Rini <trini@konsulko.com>

Applied to u-boot/master, thanks!

-- 
Tom
-------------- next part --------------
A non-text attachment was scrubbed...
Name: signature.asc
Type: application/pgp-signature
Size: 819 bytes
Desc: Digital signature
URL: <http://lists.denx.de/pipermail/u-boot/attachments/20170414/788dce1a/attachment.sig>

^ permalink raw reply	[flat|nested] 4+ messages in thread

* [U-Boot] [PATCH] sandbox: Change CONFIG_SANDBOX_BITS_PER_LONG to hard-coded
  2017-04-12 20:55 ` [U-Boot] [PATCH] sandbox: Change CONFIG_SANDBOX_BITS_PER_LONG to hard-coded Tom Rini
@ 2017-04-14 21:10   ` Tom Rini
  0 siblings, 0 replies; 4+ messages in thread
From: Tom Rini @ 2017-04-14 21:10 UTC (permalink / raw)
  To: u-boot

On Wed, Apr 12, 2017 at 04:55:39PM -0400, Tom Rini wrote:

> Instead of having CONFIG_SANDBOX_BITS_PER_LONG in sandbox.h set to 64
> with a comment to change to 32 on a 32bit host, simply set this to 64 in
> asm/types.h and have the comment be there.
> 
> Cc: Simon Glass <sjg@chromium.org>
> Signed-off-by: Tom Rini <trini@konsulko.com>

Applied to u-boot/master, thanks!

-- 
Tom
-------------- next part --------------
A non-text attachment was scrubbed...
Name: signature.asc
Type: application/pgp-signature
Size: 819 bytes
Desc: Digital signature
URL: <http://lists.denx.de/pipermail/u-boot/attachments/20170414/60ade06d/attachment.sig>

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2017-04-14 21:10 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-04-10  5:39 [U-Boot] [PATCH V2] lib: div64: sync with Linux Peng Fan
2017-04-12 20:55 ` [U-Boot] [PATCH] sandbox: Change CONFIG_SANDBOX_BITS_PER_LONG to hard-coded Tom Rini
2017-04-14 21:10   ` Tom Rini
2017-04-14 21:09 ` [U-Boot] [U-Boot,V2] lib: div64: sync with Linux Tom Rini

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.