From: Allen Pais <allen.pais@oracle.com> To: linux-kernel@vger.kernel.org Cc: sparclinux@vger.kernel.org, davem@davemloft.net, bigeasy@linutronix.de, Allen Pais <allen.pais@oracle.com> Subject: [PATCH 3/4] sparc64: convert spinlock_t to raw_spinlock_t in mmu_context_t Date: Mon, 6 Jan 2014 09:25:09 +0530 [thread overview] Message-ID: <1388980510-10190-4-git-send-email-allen.pais@oracle.com> (raw) In-Reply-To: <1388980510-10190-1-git-send-email-allen.pais@oracle.com> In the attempt of get PREEMPT_RT working on sparc64 using linux-stable-rt version 3.10.22-rt19+, the kernel crash with the following trace: [ 1487.027884] I7: <rt_mutex_setprio+0x3c/0x2c0> [ 1487.027885] Call Trace: [ 1487.027887] [00000000004967dc] rt_mutex_setprio+0x3c/0x2c0 [ 1487.027892] [00000000004afe20] task_blocks_on_rt_mutex+0x180/0x200 [ 1487.027895] [0000000000819114] rt_spin_lock_slowlock+0x94/0x300 [ 1487.027897] [0000000000817ebc] __schedule+0x39c/0x53c [ 1487.027899] [00000000008185fc] schedule+0x1c/0xc0 [ 1487.027908] [000000000048fff4] smpboot_thread_fn+0x154/0x2e0 [ 1487.027913] [000000000048753c] kthread+0x7c/0xa0 [ 1487.027920] [00000000004060c4] ret_from_syscall+0x1c/0x2c [ 1487.027922] [0000000000000000] (null) Thomas debugged this issue and pointed to switch_mm spin_lock_irqsave(&mm->context.lock, flags); context.lock needs to be a raw_spinlock. Acked-by: David S. Miller <davem@davemloft.net> Signed-off-by: Allen Pais <allen.pais@oracle.com> --- arch/sparc/include/asm/mmu_64.h | 2 +- arch/sparc/include/asm/mmu_context_64.h | 8 ++++---- arch/sparc/kernel/smp_64.c | 4 ++-- arch/sparc/mm/init_64.c | 4 ++-- arch/sparc/mm/tsb.c | 16 ++++++++-------- 5 files changed, 17 insertions(+), 17 deletions(-) diff --git a/arch/sparc/include/asm/mmu_64.h b/arch/sparc/include/asm/mmu_64.h index 76092c4..e945ddb 100644 --- a/arch/sparc/include/asm/mmu_64.h +++ b/arch/sparc/include/asm/mmu_64.h @@ -90,7 +90,7 @@ struct tsb_config { #endif typedef struct { - spinlock_t lock; + raw_spinlock_t lock; unsigned long sparc64_ctx_val; unsigned long huge_pte_count; struct page *pgtable_page; diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h index 3d528f0..3a85624 100644 --- a/arch/sparc/include/asm/mmu_context_64.h +++ b/arch/sparc/include/asm/mmu_context_64.h @@ -77,7 +77,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str if (unlikely(mm == &init_mm)) return; - spin_lock_irqsave(&mm->context.lock, flags); + raw_spin_lock_irqsave(&mm->context.lock, flags); ctx_valid = CTX_VALID(mm->context); if (!ctx_valid) get_new_mmu_context(mm); @@ -125,7 +125,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT); } - spin_unlock_irqrestore(&mm->context.lock, flags); + raw_spin_unlock_irqrestore(&mm->context.lock, flags); } #define deactivate_mm(tsk,mm) do { } while (0) @@ -136,7 +136,7 @@ static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm unsigned long flags; int cpu; - spin_lock_irqsave(&mm->context.lock, flags); + raw_spin_lock_irqsave(&mm->context.lock, flags); if (!CTX_VALID(mm->context)) get_new_mmu_context(mm); cpu = smp_processor_id(); @@ -146,7 +146,7 @@ static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm load_secondary_context(mm); __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT); tsb_context_switch(mm); - spin_unlock_irqrestore(&mm->context.lock, flags); + raw_spin_unlock_irqrestore(&mm->context.lock, flags); } #endif /* !(__ASSEMBLY__) */ diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index 77539ed..f42e1a7 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -975,12 +975,12 @@ void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *reg if (unlikely(!mm || (mm == &init_mm))) return; - spin_lock_irqsave(&mm->context.lock, flags); + raw_spin_lock_irqsave(&mm->context.lock, flags); if (unlikely(!CTX_VALID(mm->context))) get_new_mmu_context(mm); - spin_unlock_irqrestore(&mm->context.lock, flags); + raw_spin_unlock_irqrestore(&mm->context.lock, flags); load_secondary_context(mm); __flush_tlb_mm(CTX_HWBITS(mm->context), diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 04fd55a..bd5253d 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -350,7 +350,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t * mm = vma->vm_mm; - spin_lock_irqsave(&mm->context.lock, flags); + raw_spin_lock_irqsave(&mm->context.lock, flags); #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) if (mm->context.huge_pte_count && is_hugetlb_pte(pte)) @@ -361,7 +361,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t * __update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT, address, pte_val(pte)); - spin_unlock_irqrestore(&mm->context.lock, flags); + raw_spin_unlock_irqrestore(&mm->context.lock, flags); } void flush_dcache_page(struct page *page) diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c index 2cc3bce..d84d4ea 100644 --- a/arch/sparc/mm/tsb.c +++ b/arch/sparc/mm/tsb.c @@ -73,7 +73,7 @@ void flush_tsb_user(struct tlb_batch *tb) struct mm_struct *mm = tb->mm; unsigned long nentries, base, flags; - spin_lock_irqsave(&mm->context.lock, flags); + raw_spin_lock_irqsave(&mm->context.lock, flags); base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; @@ -90,14 +90,14 @@ void flush_tsb_user(struct tlb_batch *tb) __flush_tsb_one(tb, HPAGE_SHIFT, base, nentries); } #endif - spin_unlock_irqrestore(&mm->context.lock, flags); + raw_spin_unlock_irqrestore(&mm->context.lock, flags); } void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr) { unsigned long nentries, base, flags; - spin_lock_irqsave(&mm->context.lock, flags); + raw_spin_lock_irqsave(&mm->context.lock, flags); base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; @@ -114,7 +114,7 @@ void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr) __flush_tsb_one_entry(base, vaddr, HPAGE_SHIFT, nentries); } #endif - spin_unlock_irqrestore(&mm->context.lock, flags); + raw_spin_unlock_irqrestore(&mm->context.lock, flags); } #define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K @@ -392,7 +392,7 @@ retry_tsb_alloc: * the lock and ask all other cpus running this address space * to run tsb_context_switch() to see the new TSB table. */ - spin_lock_irqsave(&mm->context.lock, flags); + raw_spin_lock_irqsave(&mm->context.lock, flags); old_tsb = mm->context.tsb_block[tsb_index].tsb; old_cache_index = @@ -407,7 +407,7 @@ retry_tsb_alloc: */ if (unlikely(old_tsb && (rss < mm->context.tsb_block[tsb_index].tsb_rss_limit))) { - spin_unlock_irqrestore(&mm->context.lock, flags); + raw_spin_unlock_irqrestore(&mm->context.lock, flags); kmem_cache_free(tsb_caches[new_cache_index], new_tsb); return; @@ -433,7 +433,7 @@ retry_tsb_alloc: mm->context.tsb_block[tsb_index].tsb = new_tsb; setup_tsb_params(mm, tsb_index, new_size); - spin_unlock_irqrestore(&mm->context.lock, flags); + raw_spin_unlock_irqrestore(&mm->context.lock, flags); /* If old_tsb is NULL, we're being invoked for the first time * from init_new_context(). @@ -459,7 +459,7 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) #endif unsigned int i; - spin_lock_init(&mm->context.lock); + raw_spin_lock_init(&mm->context.lock); mm->context.sparc64_ctx_val = 0UL; -- 1.7.10.4
WARNING: multiple messages have this Message-ID (diff)
From: Allen Pais <allen.pais@oracle.com> To: linux-kernel@vger.kernel.org Cc: sparclinux@vger.kernel.org, davem@davemloft.net, bigeasy@linutronix.de, Allen Pais <allen.pais@oracle.com> Subject: [PATCH 3/4] sparc64: convert spinlock_t to raw_spinlock_t in mmu_context_t Date: Mon, 06 Jan 2014 03:56:30 +0000 [thread overview] Message-ID: <1388980510-10190-4-git-send-email-allen.pais@oracle.com> (raw) In-Reply-To: <1388980510-10190-1-git-send-email-allen.pais@oracle.com> In the attempt of get PREEMPT_RT working on sparc64 using linux-stable-rt version 3.10.22-rt19+, the kernel crash with the following trace: [ 1487.027884] I7: <rt_mutex_setprio+0x3c/0x2c0> [ 1487.027885] Call Trace: [ 1487.027887] [00000000004967dc] rt_mutex_setprio+0x3c/0x2c0 [ 1487.027892] [00000000004afe20] task_blocks_on_rt_mutex+0x180/0x200 [ 1487.027895] [0000000000819114] rt_spin_lock_slowlock+0x94/0x300 [ 1487.027897] [0000000000817ebc] __schedule+0x39c/0x53c [ 1487.027899] [00000000008185fc] schedule+0x1c/0xc0 [ 1487.027908] [000000000048fff4] smpboot_thread_fn+0x154/0x2e0 [ 1487.027913] [000000000048753c] kthread+0x7c/0xa0 [ 1487.027920] [00000000004060c4] ret_from_syscall+0x1c/0x2c [ 1487.027922] [0000000000000000] (null) Thomas debugged this issue and pointed to switch_mm spin_lock_irqsave(&mm->context.lock, flags); context.lock needs to be a raw_spinlock. Acked-by: David S. Miller <davem@davemloft.net> Signed-off-by: Allen Pais <allen.pais@oracle.com> --- arch/sparc/include/asm/mmu_64.h | 2 +- arch/sparc/include/asm/mmu_context_64.h | 8 ++++---- arch/sparc/kernel/smp_64.c | 4 ++-- arch/sparc/mm/init_64.c | 4 ++-- arch/sparc/mm/tsb.c | 16 ++++++++-------- 5 files changed, 17 insertions(+), 17 deletions(-) diff --git a/arch/sparc/include/asm/mmu_64.h b/arch/sparc/include/asm/mmu_64.h index 76092c4..e945ddb 100644 --- a/arch/sparc/include/asm/mmu_64.h +++ b/arch/sparc/include/asm/mmu_64.h @@ -90,7 +90,7 @@ struct tsb_config { #endif typedef struct { - spinlock_t lock; + raw_spinlock_t lock; unsigned long sparc64_ctx_val; unsigned long huge_pte_count; struct page *pgtable_page; diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h index 3d528f0..3a85624 100644 --- a/arch/sparc/include/asm/mmu_context_64.h +++ b/arch/sparc/include/asm/mmu_context_64.h @@ -77,7 +77,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str if (unlikely(mm = &init_mm)) return; - spin_lock_irqsave(&mm->context.lock, flags); + raw_spin_lock_irqsave(&mm->context.lock, flags); ctx_valid = CTX_VALID(mm->context); if (!ctx_valid) get_new_mmu_context(mm); @@ -125,7 +125,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT); } - spin_unlock_irqrestore(&mm->context.lock, flags); + raw_spin_unlock_irqrestore(&mm->context.lock, flags); } #define deactivate_mm(tsk,mm) do { } while (0) @@ -136,7 +136,7 @@ static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm unsigned long flags; int cpu; - spin_lock_irqsave(&mm->context.lock, flags); + raw_spin_lock_irqsave(&mm->context.lock, flags); if (!CTX_VALID(mm->context)) get_new_mmu_context(mm); cpu = smp_processor_id(); @@ -146,7 +146,7 @@ static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm load_secondary_context(mm); __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT); tsb_context_switch(mm); - spin_unlock_irqrestore(&mm->context.lock, flags); + raw_spin_unlock_irqrestore(&mm->context.lock, flags); } #endif /* !(__ASSEMBLY__) */ diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index 77539ed..f42e1a7 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -975,12 +975,12 @@ void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *reg if (unlikely(!mm || (mm = &init_mm))) return; - spin_lock_irqsave(&mm->context.lock, flags); + raw_spin_lock_irqsave(&mm->context.lock, flags); if (unlikely(!CTX_VALID(mm->context))) get_new_mmu_context(mm); - spin_unlock_irqrestore(&mm->context.lock, flags); + raw_spin_unlock_irqrestore(&mm->context.lock, flags); load_secondary_context(mm); __flush_tlb_mm(CTX_HWBITS(mm->context), diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 04fd55a..bd5253d 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -350,7 +350,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t * mm = vma->vm_mm; - spin_lock_irqsave(&mm->context.lock, flags); + raw_spin_lock_irqsave(&mm->context.lock, flags); #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) if (mm->context.huge_pte_count && is_hugetlb_pte(pte)) @@ -361,7 +361,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t * __update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT, address, pte_val(pte)); - spin_unlock_irqrestore(&mm->context.lock, flags); + raw_spin_unlock_irqrestore(&mm->context.lock, flags); } void flush_dcache_page(struct page *page) diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c index 2cc3bce..d84d4ea 100644 --- a/arch/sparc/mm/tsb.c +++ b/arch/sparc/mm/tsb.c @@ -73,7 +73,7 @@ void flush_tsb_user(struct tlb_batch *tb) struct mm_struct *mm = tb->mm; unsigned long nentries, base, flags; - spin_lock_irqsave(&mm->context.lock, flags); + raw_spin_lock_irqsave(&mm->context.lock, flags); base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; @@ -90,14 +90,14 @@ void flush_tsb_user(struct tlb_batch *tb) __flush_tsb_one(tb, HPAGE_SHIFT, base, nentries); } #endif - spin_unlock_irqrestore(&mm->context.lock, flags); + raw_spin_unlock_irqrestore(&mm->context.lock, flags); } void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr) { unsigned long nentries, base, flags; - spin_lock_irqsave(&mm->context.lock, flags); + raw_spin_lock_irqsave(&mm->context.lock, flags); base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; @@ -114,7 +114,7 @@ void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr) __flush_tsb_one_entry(base, vaddr, HPAGE_SHIFT, nentries); } #endif - spin_unlock_irqrestore(&mm->context.lock, flags); + raw_spin_unlock_irqrestore(&mm->context.lock, flags); } #define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K @@ -392,7 +392,7 @@ retry_tsb_alloc: * the lock and ask all other cpus running this address space * to run tsb_context_switch() to see the new TSB table. */ - spin_lock_irqsave(&mm->context.lock, flags); + raw_spin_lock_irqsave(&mm->context.lock, flags); old_tsb = mm->context.tsb_block[tsb_index].tsb; old_cache_index @@ -407,7 +407,7 @@ retry_tsb_alloc: */ if (unlikely(old_tsb && (rss < mm->context.tsb_block[tsb_index].tsb_rss_limit))) { - spin_unlock_irqrestore(&mm->context.lock, flags); + raw_spin_unlock_irqrestore(&mm->context.lock, flags); kmem_cache_free(tsb_caches[new_cache_index], new_tsb); return; @@ -433,7 +433,7 @@ retry_tsb_alloc: mm->context.tsb_block[tsb_index].tsb = new_tsb; setup_tsb_params(mm, tsb_index, new_size); - spin_unlock_irqrestore(&mm->context.lock, flags); + raw_spin_unlock_irqrestore(&mm->context.lock, flags); /* If old_tsb is NULL, we're being invoked for the first time * from init_new_context(). @@ -459,7 +459,7 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) #endif unsigned int i; - spin_lock_init(&mm->context.lock); + raw_spin_lock_init(&mm->context.lock); mm->context.sparc64_ctx_val = 0UL; -- 1.7.10.4
next prev parent reply other threads:[~2014-01-06 3:55 UTC|newest] Thread overview: 85+ messages / expand[flat|nested] mbox.gz Atom feed top 2014-01-06 3:55 [PATCH 0/4] PREEMPT_RT support for sparc64 Allen Pais 2014-01-06 3:55 ` Allen Pais 2014-01-06 3:55 ` [PATCH 1/4] sparc64: use generic rwsem spinlocks rt Allen Pais 2014-01-06 3:56 ` Allen Pais 2014-01-06 3:55 ` [PATCH 2/4] sparc64: allow forced irq threading Allen Pais 2014-01-06 3:56 ` Allen Pais 2014-01-06 3:55 ` Allen Pais [this message] 2014-01-06 3:56 ` [PATCH 3/4] sparc64: convert spinlock_t to raw_spinlock_t in mmu_context_t Allen Pais 2014-02-11 21:13 ` Kirill Tkhai 2014-02-11 21:13 ` Kirill Tkhai 2014-02-12 7:31 ` Allen Pais 2014-02-12 7:43 ` Allen Pais 2014-02-12 7:48 ` Allen Pais 2014-02-12 7:48 ` Allen Pais 2014-02-12 8:33 ` Kirill Tkhai 2014-02-12 8:33 ` Kirill Tkhai 2014-02-12 11:28 ` Allen Pais 2014-02-12 11:40 ` Allen Pais 2014-02-12 11:43 ` Kirill Tkhai 2014-02-12 11:43 ` Kirill Tkhai 2014-02-12 12:14 ` Allen Pais 2014-02-12 12:26 ` Allen Pais 2014-02-12 12:45 ` Kirill Tkhai 2014-02-12 12:45 ` Kirill Tkhai 2014-02-12 13:05 ` Allen Pais 2014-02-12 13:17 ` Allen Pais 2014-02-19 3:53 ` Allen Pais 2014-02-19 3:54 ` Allen Pais 2014-02-19 8:09 ` Kirill Tkhai 2014-02-19 8:09 ` Kirill Tkhai 2014-02-19 8:12 ` Allen Pais 2014-02-19 8:24 ` Allen Pais 2014-02-19 8:57 ` Kirill Tkhai 2014-02-19 8:57 ` Kirill Tkhai 2014-02-19 8:59 ` Allen Pais 2014-02-19 8:59 ` Allen Pais 2014-02-19 9:13 ` Allen Pais 2014-02-19 9:25 ` Allen Pais 2014-02-19 9:25 ` Kirill Tkhai 2014-02-19 9:25 ` Kirill Tkhai 2014-02-19 9:31 ` Allen Pais 2014-02-19 9:43 ` Allen Pais 2014-02-26 7:51 ` Allen Pais 2014-02-26 7:52 ` Allen Pais 2014-02-28 14:51 ` Kirill Tkhai 2014-02-28 14:51 ` Kirill Tkhai 2014-03-04 19:10 ` David Miller 2014-03-04 19:10 ` David Miller 2014-03-04 20:28 ` David Miller 2014-03-04 20:28 ` David Miller 2014-03-05 4:30 ` Allen Pais 2014-03-05 4:42 ` Allen Pais 2014-03-06 21:36 ` David Miller 2014-03-06 21:36 ` David Miller 2014-03-07 14:05 ` Sebastian Andrzej Siewior 2014-03-07 14:05 ` Sebastian Andrzej Siewior 2014-03-04 20:39 ` Kirill Tkhai 2014-03-04 20:39 ` Kirill Tkhai 2014-03-07 13:41 ` Sebastian Andrzej Siewior 2014-03-07 13:41 ` Sebastian Andrzej Siewior 2014-03-04 20:03 ` David Miller 2014-03-04 20:03 ` David Miller 2014-03-04 21:26 ` Kirill Tkhai 2014-03-04 21:26 ` Kirill Tkhai 2014-03-04 20:01 ` David Miller 2014-03-04 20:01 ` David Miller 2014-03-05 4:34 ` Allen Pais 2014-03-05 4:46 ` Allen Pais 2014-03-05 4:52 ` David Miller 2014-03-05 4:52 ` David Miller 2014-03-04 19:59 ` David Miller 2014-03-04 19:59 ` David Miller 2014-03-04 19:55 ` David Miller 2014-03-04 19:55 ` David Miller 2014-03-04 20:44 ` Kirill Tkhai 2014-03-04 20:44 ` Kirill Tkhai 2014-03-07 14:29 ` Sebastian Andrzej Siewior 2014-03-07 14:29 ` Sebastian Andrzej Siewior 2014-01-06 3:55 ` [PATCH 4/4] sparc64: convert ctx_alloc_lock raw_spinlock_t Allen Pais 2014-01-06 3:56 ` Allen Pais 2014-02-05 3:31 ` [PATCH 0/4] PREEMPT_RT support for sparc64 Allen Pais 2014-02-05 8:28 ` Sebastian Andrzej Siewior 2014-02-05 10:38 ` Allen Pais 2014-02-05 10:43 ` Sebastian Andrzej Siewior 2014-02-05 10:51 ` Allen Pais
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=1388980510-10190-4-git-send-email-allen.pais@oracle.com \ --to=allen.pais@oracle.com \ --cc=bigeasy@linutronix.de \ --cc=davem@davemloft.net \ --cc=linux-kernel@vger.kernel.org \ --cc=sparclinux@vger.kernel.org \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: linkBe sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes, see mirroring instructions on how to clone and mirror all data and code used by this external index.