linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/1] MIPS: Lexra CPU support, prerequisite patch
@ 2018-09-20 17:03 Yasha Cherikovsky
  2018-09-20 17:03 ` [PATCH 1/1] MIPS: Add new Kconfig variable to avoid unaligned access instructions Yasha Cherikovsky
  0 siblings, 1 reply; 6+ messages in thread
From: Yasha Cherikovsky @ 2018-09-20 17:03 UTC (permalink / raw)
  To: Ralf Baechle, Paul Burton, James Hogan, linux-mips
  Cc: linux-kernel, Yasha Cherikovsky

Hi,

This is a prerequisite patch required for adding Lexra CPU [1] support in
arch/mips.
It does not add any Lexra CPU support yet, but it is required for such future
support.

The patch is written on top of v4.18.

Background:
I'm currently working on porting Linux/MIPS to run on the Realtek RTL8186 SoC [1],
which has Lexra LX5280 CPU [2].

The complete Lexra + RTL8186 support is still WIP [3] so I'm not sending that
for review right now.

Thanks,
Yasha

[1] https://wikidevi.com/wiki/Realtek_RTL8186
[2] https://www.linux-mips.org/wiki/Lexra
[3] https://github.com/yashac3/linux-rtl8186/commits/rtl8186-porting-for-upstream-4.18


Yasha Cherikovsky (1):
  MIPS: Add new Kconfig variable to avoid unaligned access instructions

 arch/mips/Kconfig            |  7 +++++--
 arch/mips/kernel/unaligned.c | 24 ++++++++++++------------
 arch/mips/lib/memcpy.S       | 10 +++++-----
 arch/mips/lib/memset.S       | 12 ++++++------
 4 files changed, 28 insertions(+), 25 deletions(-)

-- 
2.19.0


^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH 1/1] MIPS: Add new Kconfig variable to avoid unaligned access instructions
  2018-09-20 17:03 [PATCH 0/1] MIPS: Lexra CPU support, prerequisite patch Yasha Cherikovsky
@ 2018-09-20 17:03 ` Yasha Cherikovsky
  2018-09-25 17:45   ` Paul Burton
  0 siblings, 1 reply; 6+ messages in thread
From: Yasha Cherikovsky @ 2018-09-20 17:03 UTC (permalink / raw)
  To: Ralf Baechle, Paul Burton, James Hogan, linux-mips
  Cc: linux-kernel, Yasha Cherikovsky

MIPSR6 doesn't support unaligned access instructions (lwl, lwr, swl, swr).
The MIPS tree has some special cases to avoid these instructions,
and currently the code is testing for CONFIG_CPU_MIPSR6.

Declare a new Kconfig variable: CONFIG_CPU_HAS_NO_UNALIGNED_LOAD_STORE,
and make CONFIG_CPU_MIPSR6 select it.
And switch all the special cases to test for the new variable.

Also, the new variable selects CONFIG_GENERIC_CSUM, to use
generic C checksum code (to avoid special assembly code that uses
the unsupported instructions).

This commit should not affect MIPSR6 behavior, and is required
for future Lexra CPU support, that misses these instructions too.

Signed-off-by: Yasha Cherikovsky <yasha.che3@gmail.com>
---
 arch/mips/Kconfig            |  7 +++++--
 arch/mips/kernel/unaligned.c | 24 ++++++++++++------------
 arch/mips/lib/memcpy.S       | 10 +++++-----
 arch/mips/lib/memset.S       | 12 ++++++------
 4 files changed, 28 insertions(+), 25 deletions(-)

diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 08c10c518f83..5dd2f05ecd39 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -1477,7 +1477,6 @@ config CPU_MIPS32_R6
 	select CPU_SUPPORTS_32BIT_KERNEL
 	select CPU_SUPPORTS_HIGHMEM
 	select CPU_SUPPORTS_MSA
-	select GENERIC_CSUM
 	select HAVE_KVM
 	select MIPS_O32_FP64_SUPPORT
 	help
@@ -1530,7 +1529,6 @@ config CPU_MIPS64_R6
 	select CPU_SUPPORTS_64BIT_KERNEL
 	select CPU_SUPPORTS_HIGHMEM
 	select CPU_SUPPORTS_MSA
-	select GENERIC_CSUM
 	select MIPS_O32_FP64_SUPPORT if 32BIT || MIPS32_O32
 	select HAVE_KVM
 	help
@@ -2032,6 +2030,7 @@ config CPU_MIPSR6
 	select MIPS_ASID_BITS_VARIABLE
 	select MIPS_CRC_SUPPORT
 	select MIPS_SPRAM
+	select CPU_HAS_NO_UNALIGNED_LOAD_STORE
 
 config EVA
 	bool
@@ -2456,6 +2455,10 @@ config XKS01
 config CPU_HAS_RIXI
 	bool
 
+config CPU_HAS_NO_UNALIGNED_LOAD_STORE
+	bool
+	select GENERIC_CSUM
+
 #
 # Vectored interrupt mode is an R2 feature
 #
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index 2d0b912f9e3e..b19056aa8ea5 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -130,7 +130,7 @@ do {                                                        \
 			: "r" (addr), "i" (-EFAULT));       \
 } while(0)
 
-#ifndef CONFIG_CPU_MIPSR6
+#ifndef CONFIG_CPU_HAS_NO_UNALIGNED_LOAD_STORE
 #define     _LoadW(addr, value, res, type)   \
 do {                                                        \
 		__asm__ __volatile__ (                      \
@@ -186,7 +186,7 @@ do {                                                        \
 			: "r" (addr), "i" (-EFAULT));       \
 } while(0)
 
-#endif /* CONFIG_CPU_MIPSR6 */
+#endif /* CONFIG_CPU_HAS_NO_UNALIGNED_LOAD_STORE */
 
 #define     _LoadHWU(addr, value, res, type) \
 do {                                                        \
@@ -212,7 +212,7 @@ do {                                                        \
 			: "r" (addr), "i" (-EFAULT));       \
 } while(0)
 
-#ifndef CONFIG_CPU_MIPSR6
+#ifndef CONFIG_CPU_HAS_NO_UNALIGNED_LOAD_STORE
 #define     _LoadWU(addr, value, res, type)  \
 do {                                                        \
 		__asm__ __volatile__ (                      \
@@ -339,7 +339,7 @@ do {                                                        \
 			: "r" (addr), "i" (-EFAULT));       \
 } while(0)
 
-#endif /* CONFIG_CPU_MIPSR6 */
+#endif /* CONFIG_CPU_HAS_NO_UNALIGNED_LOAD_STORE */
 
 
 #define     _StoreHW(addr, value, res, type) \
@@ -365,7 +365,7 @@ do {                                                        \
 			: "r" (value), "r" (addr), "i" (-EFAULT));\
 } while(0)
 
-#ifndef CONFIG_CPU_MIPSR6
+#ifndef CONFIG_CPU_HAS_NO_UNALIGNED_LOAD_STORE
 #define     _StoreW(addr, value, res, type)  \
 do {                                                        \
 		__asm__ __volatile__ (                      \
@@ -483,7 +483,7 @@ do {                                                        \
 		: "memory");                                \
 } while(0)
 
-#endif /* CONFIG_CPU_MIPSR6 */
+#endif /* CONFIG_CPU_HAS_NO_UNALIGNED_LOAD_STORE */
 
 #else /* __BIG_ENDIAN */
 
@@ -509,7 +509,7 @@ do {                                                        \
 			: "r" (addr), "i" (-EFAULT));       \
 } while(0)
 
-#ifndef CONFIG_CPU_MIPSR6
+#ifndef CONFIG_CPU_HAS_NO_UNALIGNED_LOAD_STORE
 #define     _LoadW(addr, value, res, type)   \
 do {                                                        \
 		__asm__ __volatile__ (                      \
@@ -565,7 +565,7 @@ do {                                                        \
 			: "r" (addr), "i" (-EFAULT));       \
 } while(0)
 
-#endif /* CONFIG_CPU_MIPSR6 */
+#endif /* CONFIG_CPU_HAS_NO_UNALIGNED_LOAD_STORE */
 
 
 #define     _LoadHWU(addr, value, res, type) \
@@ -592,7 +592,7 @@ do {                                                        \
 			: "r" (addr), "i" (-EFAULT));       \
 } while(0)
 
-#ifndef CONFIG_CPU_MIPSR6
+#ifndef CONFIG_CPU_HAS_NO_UNALIGNED_LOAD_STORE
 #define     _LoadWU(addr, value, res, type)  \
 do {                                                        \
 		__asm__ __volatile__ (                      \
@@ -718,7 +718,7 @@ do {                                                        \
 			: "=&r" (value), "=r" (res)	    \
 			: "r" (addr), "i" (-EFAULT));       \
 } while(0)
-#endif /* CONFIG_CPU_MIPSR6 */
+#endif /* CONFIG_CPU_HAS_NO_UNALIGNED_LOAD_STORE */
 
 #define     _StoreHW(addr, value, res, type) \
 do {                                                        \
@@ -743,7 +743,7 @@ do {                                                        \
 			: "r" (value), "r" (addr), "i" (-EFAULT));\
 } while(0)
 
-#ifndef CONFIG_CPU_MIPSR6
+#ifndef CONFIG_CPU_HAS_NO_UNALIGNED_LOAD_STORE
 #define     _StoreW(addr, value, res, type)  \
 do {                                                        \
 		__asm__ __volatile__ (                      \
@@ -861,7 +861,7 @@ do {                                                        \
 		: "memory");                                \
 } while(0)
 
-#endif /* CONFIG_CPU_MIPSR6 */
+#endif /* CONFIG_CPU_HAS_NO_UNALIGNED_LOAD_STORE */
 #endif
 
 #define LoadHWU(addr, value, res)	_LoadHWU(addr, value, res, kernel)
diff --git a/arch/mips/lib/memcpy.S b/arch/mips/lib/memcpy.S
index 03e3304d6ae5..1bc8ee6398bb 100644
--- a/arch/mips/lib/memcpy.S
+++ b/arch/mips/lib/memcpy.S
@@ -297,7 +297,7 @@
 	 and	t0, src, ADDRMASK
 	PREFS(	0, 2*32(src) )
 	PREFD(	1, 2*32(dst) )
-#ifndef CONFIG_CPU_MIPSR6
+#ifndef CONFIG_CPU_HAS_NO_UNALIGNED_LOAD_STORE
 	bnez	t1, .Ldst_unaligned\@
 	 nop
 	bnez	t0, .Lsrc_unaligned_dst_aligned\@
@@ -385,7 +385,7 @@
 	bne	rem, len, 1b
 	.set	noreorder
 
-#ifndef CONFIG_CPU_MIPSR6
+#ifndef CONFIG_CPU_HAS_NO_UNALIGNED_LOAD_STORE
 	/*
 	 * src and dst are aligned, need to copy rem bytes (rem < NBYTES)
 	 * A loop would do only a byte at a time with possible branch
@@ -487,7 +487,7 @@
 	bne	len, rem, 1b
 	.set	noreorder
 
-#endif /* !CONFIG_CPU_MIPSR6 */
+#endif /* !CONFIG_CPU_HAS_NO_UNALIGNED_LOAD_STORE */
 .Lcopy_bytes_checklen\@:
 	beqz	len, .Ldone\@
 	 nop
@@ -516,7 +516,7 @@
 	jr	ra
 	 nop
 
-#ifdef CONFIG_CPU_MIPSR6
+#ifdef CONFIG_CPU_HAS_NO_UNALIGNED_LOAD_STORE
 .Lcopy_unaligned_bytes\@:
 1:
 	COPY_BYTE(0)
@@ -530,7 +530,7 @@
 	ADD	src, src, 8
 	b	1b
 	 ADD	dst, dst, 8
-#endif /* CONFIG_CPU_MIPSR6 */
+#endif /* CONFIG_CPU_HAS_NO_UNALIGNED_LOAD_STORE */
 	.if __memcpy == 1
 	END(memcpy)
 	.set __memcpy, 0
diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S
index 1cc306520a55..87f199df6902 100644
--- a/arch/mips/lib/memset.S
+++ b/arch/mips/lib/memset.S
@@ -112,7 +112,7 @@
 	.set		at
 #endif
 
-#ifndef CONFIG_CPU_MIPSR6
+#ifndef CONFIG_CPU_HAS_NO_UNALIGNED_LOAD_STORE
 	R10KCBARRIER(0(ra))
 #ifdef __MIPSEB__
 	EX(LONG_S_L, a1, (a0), .Lfirst_fixup\@)	/* make word/dword aligned */
@@ -122,7 +122,7 @@
 	PTR_SUBU	a0, t0			/* long align ptr */
 	PTR_ADDU	a2, t0			/* correct size */
 
-#else /* CONFIG_CPU_MIPSR6 */
+#else /* CONFIG_CPU_HAS_NO_UNALIGNED_LOAD_STORE */
 #define STORE_BYTE(N)				\
 	EX(sb, a1, N(a0), .Lbyte_fixup\@);	\
 	beqz		t0, 0f;			\
@@ -145,7 +145,7 @@
 	ori		a0, STORMASK
 	xori		a0, STORMASK
 	PTR_ADDIU	a0, STORSIZE
-#endif /* CONFIG_CPU_MIPSR6 */
+#endif /* CONFIG_CPU_HAS_NO_UNALIGNED_LOAD_STORE */
 1:	ori		t1, a2, 0x3f		/* # of full blocks */
 	xori		t1, 0x3f
 	beqz		t1, .Lmemset_partial\@	/* no block to fill */
@@ -185,7 +185,7 @@
 	andi		a2, STORMASK		/* At most one long to go */
 
 	beqz		a2, 1f
-#ifndef CONFIG_CPU_MIPSR6
+#ifndef CONFIG_CPU_HAS_NO_UNALIGNED_LOAD_STORE
 	 PTR_ADDU	a0, a2			/* What's left */
 	R10KCBARRIER(0(ra))
 #ifdef __MIPSEB__
@@ -229,12 +229,12 @@
 	.hidden __memset
 	.endif
 
-#ifdef CONFIG_CPU_MIPSR6
+#ifdef CONFIG_CPU_HAS_NO_UNALIGNED_LOAD_STORE
 .Lbyte_fixup\@:
 	PTR_SUBU	a2, $0, t0
 	jr		ra
 	 PTR_ADDIU	a2, 1
-#endif /* CONFIG_CPU_MIPSR6 */
+#endif /* CONFIG_CPU_HAS_NO_UNALIGNED_LOAD_STORE */
 
 .Lfirst_fixup\@:
 	jr	ra
-- 
2.19.0


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH 1/1] MIPS: Add new Kconfig variable to avoid unaligned access instructions
  2018-09-20 17:03 ` [PATCH 1/1] MIPS: Add new Kconfig variable to avoid unaligned access instructions Yasha Cherikovsky
@ 2018-09-25 17:45   ` Paul Burton
  2018-09-25 19:30     ` Yasha Cherikovsky
  0 siblings, 1 reply; 6+ messages in thread
From: Paul Burton @ 2018-09-25 17:45 UTC (permalink / raw)
  To: Yasha Cherikovsky; +Cc: Ralf Baechle, James Hogan, linux-mips, linux-kernel

Hi Yasha,

On Thu, Sep 20, 2018 at 08:03:06PM +0300, Yasha Cherikovsky wrote:
> MIPSR6 doesn't support unaligned access instructions (lwl, lwr, swl, swr).
> The MIPS tree has some special cases to avoid these instructions,
> and currently the code is testing for CONFIG_CPU_MIPSR6.
> 
> Declare a new Kconfig variable: CONFIG_CPU_HAS_NO_UNALIGNED_LOAD_STORE,
> and make CONFIG_CPU_MIPSR6 select it.
> And switch all the special cases to test for the new variable.
> 
> Also, the new variable selects CONFIG_GENERIC_CSUM, to use
> generic C checksum code (to avoid special assembly code that uses
> the unsupported instructions).

Thanks for your patch :)

I think it would be cleaner to invert this logic & instead have the
Kconfig entry indicate when kernel's build target *does* support the
[ls]w[lr] instructions.

It would be good for the name to be clear that these instructions are
what it's about too - "unaligned load store" is a little too vague for
my liking. For example one could easily misconstrue it to mean something
akin to the inverse of CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, whereas
in the MIPSr6 case many CPUs actually handle unaligned accesses in
hardware when using the regular load/store instructions. They don't have
the [ls]w[lr] instructions, but they don't need them because they handle
unaligned accesses more naturally without needing us to be explicit
about them.

How about we:

  - Add a Kconfig option CONFIG_CPU_SUPPORTS_LOAD_STORE_LR, and select
    it for all existing pre-r6 targets (probably from CONFIG_CPU_*).

  - Change CONFIG_GENERIC_CSUM to default y if
    !CONFIG_CPU_SUPPORTS_LOAD_STORE_LR, and drop the selects of it.

That would avoid the double-negative ("if we don't not support this")
that the #ifndef's currently represent. It would also mean any future
architecture/ISA targets beyond MIPSr6 automatically avoid the
instructions.

Thanks,
    Paul

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH 1/1] MIPS: Add new Kconfig variable to avoid unaligned access instructions
  2018-09-25 17:45   ` Paul Burton
@ 2018-09-25 19:30     ` Yasha Cherikovsky
  2018-09-25 19:57       ` Paul Burton
  0 siblings, 1 reply; 6+ messages in thread
From: Yasha Cherikovsky @ 2018-09-25 19:30 UTC (permalink / raw)
  To: Paul Burton; +Cc: Ralf Baechle, James Hogan, linux-mips, linux-kernel

Hi Paul,

On Tue, 2018-09-25 at 17:45 +0000, Paul Burton wrote:
> Hi Yasha,
> 
> On Thu, Sep 20, 2018 at 08:03:06PM +0300, Yasha Cherikovsky wrote:
> > MIPSR6 doesn't support unaligned access instructions (lwl, lwr,
> > swl, swr).
> > The MIPS tree has some special cases to avoid these instructions,
> > and currently the code is testing for CONFIG_CPU_MIPSR6.
> > 
> > Declare a new Kconfig variable:
> > CONFIG_CPU_HAS_NO_UNALIGNED_LOAD_STORE,
> > and make CONFIG_CPU_MIPSR6 select it.
> > And switch all the special cases to test for the new variable.
> > 
> > Also, the new variable selects CONFIG_GENERIC_CSUM, to use
> > generic C checksum code (to avoid special assembly code that uses
> > the unsupported instructions).
> 
> Thanks for your patch :)
> 
> I think it would be cleaner to invert this logic & instead have the
> Kconfig entry indicate when kernel's build target *does* support the
> [ls]w[lr] instructions.
> 
> It would be good for the name to be clear that these instructions are
> what it's about too - "unaligned load store" is a little too vague
> for
> my liking. For example one could easily misconstrue it to mean
> something
> akin to the inverse of CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS,
> whereas
> in the MIPSr6 case many CPUs actually handle unaligned accesses in
> hardware when using the regular load/store instructions. They don't
> have
> the [ls]w[lr] instructions, but they don't need them because they
> handle
> unaligned accesses more naturally without needing us to be explicit
> about them.
> 
> How about we:
> 
>   - Add a Kconfig option CONFIG_CPU_SUPPORTS_LOAD_STORE_LR, and
> select
>     it for all existing pre-r6 targets (probably from CONFIG_CPU_*).
> 
>   - Change CONFIG_GENERIC_CSUM to default y if
>     !CONFIG_CPU_SUPPORTS_LOAD_STORE_LR, and drop the selects of it.
> 
> That would avoid the double-negative ("if we don't not support this")
> that the #ifndef's currently represent. It would also mean any future
> architecture/ISA targets beyond MIPSr6 automatically avoid the
> instructions.
> 
> Thanks,
>     Paul

Thanks for your feedback, I'll start preparing v2.

Looking in arch/mips/Kconfig, some CPU options start
with CPU_SUPPORTS_ and some with CPU_HAS_.
Which perfix should we use here?

Thanks,
Yasha




^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH 1/1] MIPS: Add new Kconfig variable to avoid unaligned access instructions
  2018-09-25 19:30     ` Yasha Cherikovsky
@ 2018-09-25 19:57       ` Paul Burton
  2018-09-25 20:12         ` Yasha Cherikovsky
  0 siblings, 1 reply; 6+ messages in thread
From: Paul Burton @ 2018-09-25 19:57 UTC (permalink / raw)
  To: Yasha Cherikovsky; +Cc: Ralf Baechle, James Hogan, linux-mips, linux-kernel

Hi Yasha,

On Tue, Sep 25, 2018 at 10:30:52PM +0300, Yasha Cherikovsky wrote:
> On Tue, 2018-09-25 at 17:45 +0000, Paul Burton wrote:
> > How about we:
> > 
> >   - Add a Kconfig option CONFIG_CPU_SUPPORTS_LOAD_STORE_LR, and
> > select
> >     it for all existing pre-r6 targets (probably from CONFIG_CPU_*).
> > 
> >   - Change CONFIG_GENERIC_CSUM to default y if
> >     !CONFIG_CPU_SUPPORTS_LOAD_STORE_LR, and drop the selects of it.
> > 
> > That would avoid the double-negative ("if we don't not support this")
> > that the #ifndef's currently represent. It would also mean any future
> > architecture/ISA targets beyond MIPSr6 automatically avoid the
> > instructions.
> 
> Thanks for your feedback, I'll start preparing v2.
> 
> Looking in arch/mips/Kconfig, some CPU options start
> with CPU_SUPPORTS_ and some with CPU_HAS_.
> Which perfix should we use here?

That's a good question :)

To be honest I don't think either of them is perfect, since what we're
really describing is what's supported by the ISA that the kernel build
is targeting - and in theory the CPU we actually run on could support
extra things.

But considering it I think CPU_HAS_ is probably the best choice for
this, since it's already used similarly to indicate support for pref &
sync instructions.

Thanks,
    Paul

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH 1/1] MIPS: Add new Kconfig variable to avoid unaligned access instructions
  2018-09-25 19:57       ` Paul Burton
@ 2018-09-25 20:12         ` Yasha Cherikovsky
  0 siblings, 0 replies; 6+ messages in thread
From: Yasha Cherikovsky @ 2018-09-25 20:12 UTC (permalink / raw)
  To: Paul Burton; +Cc: Ralf Baechle, James Hogan, linux-mips, linux-kernel

On Tue, 2018-09-25 at 19:57 +0000, Paul Burton wrote:
> Hi Yasha,
> 
> On Tue, Sep 25, 2018 at 10:30:52PM +0300, Yasha Cherikovsky wrote:
> > On Tue, 2018-09-25 at 17:45 +0000, Paul Burton wrote:
> > > How about we:
> > > 
> > >   - Add a Kconfig option CONFIG_CPU_SUPPORTS_LOAD_STORE_LR, and
> > > select
> > >     it for all existing pre-r6 targets (probably from CONFIG_CPU_*).
> > > 
> > >   - Change CONFIG_GENERIC_CSUM to default y if
> > >     !CONFIG_CPU_SUPPORTS_LOAD_STORE_LR, and drop the selects of it.
> > > 
> > > That would avoid the double-negative ("if we don't not support this")
> > > that the #ifndef's currently represent. It would also mean any future
> > > architecture/ISA targets beyond MIPSr6 automatically avoid the
> > > instructions.
> > 
> > Thanks for your feedback, I'll start preparing v2.
> > 
> > Looking in arch/mips/Kconfig, some CPU options start
> > with CPU_SUPPORTS_ and some with CPU_HAS_.
> > Which perfix should we use here?
> 
> That's a good question :)
> 
> To be honest I don't think either of them is perfect, since what we're
> really describing is what's supported by the ISA that the kernel build
> is targeting - and in theory the CPU we actually run on could support
> extra things.
> 
> But considering it I think CPU_HAS_ is probably the best choice for
> this, since it's already used similarly to indicate support for pref &
> sync instructions.
> 
> Thanks,
>     Paul

OK, Thanks.



^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2018-09-25 20:13 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-09-20 17:03 [PATCH 0/1] MIPS: Lexra CPU support, prerequisite patch Yasha Cherikovsky
2018-09-20 17:03 ` [PATCH 1/1] MIPS: Add new Kconfig variable to avoid unaligned access instructions Yasha Cherikovsky
2018-09-25 17:45   ` Paul Burton
2018-09-25 19:30     ` Yasha Cherikovsky
2018-09-25 19:57       ` Paul Burton
2018-09-25 20:12         ` Yasha Cherikovsky

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).