From: Laurent Dufour <ldufour@linux.vnet.ibm.com> To: paulmck@linux.vnet.ibm.com, peterz@infradead.org, akpm@linux-foundation.org, kirill@shutemov.name, ak@linux.intel.com, mhocko@kernel.org, dave@stgolabs.net, jack@suse.cz, Matthew Wilcox <willy@infradead.org>, benh@kernel.crashing.org, mpe@ellerman.id.au, paulus@samba.org, Thomas Gleixner <tglx@linutronix.de>, Ingo Molnar <mingo@redhat.com>, hpa@zytor.com, Will Deacon <will.deacon@arm.com>, Sergey Senozhatsky <sergey.senozhatsky@gmail.com>, Andrea Arcangeli <aarcange@redhat.com>, Alexei Starovoitov <alexei.starovoitov@gmail.com>, kemi.wang@intel.com, sergey.senozhatsky.work@gmail.com, Daniel Jordan <daniel.m.jordan@oracle.com> Cc: linux-kernel@vger.kernel.org, linux-mm@kvack.org, haren@linux.vnet.ibm.com, khandual@linux.vnet.ibm.com, npiggin@gmail.com, bsingharora@gmail.com, Tim Chen <tim.c.chen@linux.intel.com>, linuxppc-dev@lists.ozlabs.org, x86@kernel.org Subject: [PATCH v7 05/24] mm: Prepare for FAULT_FLAG_SPECULATIVE Date: Tue, 6 Feb 2018 17:49:51 +0100 [thread overview] Message-ID: <1517935810-31177-6-git-send-email-ldufour@linux.vnet.ibm.com> (raw) In-Reply-To: <1517935810-31177-1-git-send-email-ldufour@linux.vnet.ibm.com> From: Peter Zijlstra <peterz@infradead.org> When speculating faults (without holding mmap_sem) we need to validate that the vma against which we loaded pages is still valid when we're ready to install the new PTE. Therefore, replace the pte_offset_map_lock() calls that (re)take the PTL with pte_map_lock() which can fail in case we find the VMA changed since we started the fault. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> [Port to 4.12 kernel] [Remove the comment about the fault_env structure which has been implemented as the vm_fault structure in the kernel] Signed-off-by: Laurent Dufour <ldufour@linux.vnet.ibm.com> --- include/linux/mm.h | 1 + mm/memory.c | 56 ++++++++++++++++++++++++++++++++++++++---------------- 2 files changed, 41 insertions(+), 16 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 47c06fd20f6a..51d950cac772 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -302,6 +302,7 @@ extern pgprot_t protection_map[16]; #define FAULT_FLAG_USER 0x40 /* The fault originated in userspace */ #define FAULT_FLAG_REMOTE 0x80 /* faulting for non current tsk/mm */ #define FAULT_FLAG_INSTRUCTION 0x100 /* The fault was during an instruction fetch */ +#define FAULT_FLAG_SPECULATIVE 0x200 /* Speculative fault, not holding mmap_sem */ #define FAULT_FLAG_TRACE \ { FAULT_FLAG_WRITE, "WRITE" }, \ diff --git a/mm/memory.c b/mm/memory.c index 32b9eb77d95c..bb058527525a 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2452,6 +2452,13 @@ static inline void wp_page_reuse(struct vm_fault *vmf) pte_unmap_unlock(vmf->pte, vmf->ptl); } +static bool pte_map_lock(struct vm_fault *vmf) +{ + vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, + vmf->address, &vmf->ptl); + return true; +} + /* * Handle the case of a page which we actually need to copy to a new page. * @@ -2479,6 +2486,7 @@ static int wp_page_copy(struct vm_fault *vmf) const unsigned long mmun_start = vmf->address & PAGE_MASK; const unsigned long mmun_end = mmun_start + PAGE_SIZE; struct mem_cgroup *memcg; + int ret = VM_FAULT_OOM; if (unlikely(anon_vma_prepare(vma))) goto oom; @@ -2506,7 +2514,11 @@ static int wp_page_copy(struct vm_fault *vmf) /* * Re-check the pte - we dropped the lock */ - vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl); + if (!pte_map_lock(vmf)) { + mem_cgroup_cancel_charge(new_page, memcg, false); + ret = VM_FAULT_RETRY; + goto oom_free_new; + } if (likely(pte_same(*vmf->pte, vmf->orig_pte))) { if (old_page) { if (!PageAnon(old_page)) { @@ -2598,7 +2610,7 @@ static int wp_page_copy(struct vm_fault *vmf) oom: if (old_page) put_page(old_page); - return VM_FAULT_OOM; + return ret; } /** @@ -2619,8 +2631,8 @@ static int wp_page_copy(struct vm_fault *vmf) int finish_mkwrite_fault(struct vm_fault *vmf) { WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED)); - vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address, - &vmf->ptl); + if (!pte_map_lock(vmf)) + return VM_FAULT_RETRY; /* * We might have raced with another page fault while we released the * pte_offset_map_lock. @@ -2738,8 +2750,11 @@ static int do_wp_page(struct vm_fault *vmf) get_page(vmf->page); pte_unmap_unlock(vmf->pte, vmf->ptl); lock_page(vmf->page); - vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, - vmf->address, &vmf->ptl); + if (!pte_map_lock(vmf)) { + unlock_page(vmf->page); + put_page(vmf->page); + return VM_FAULT_RETRY; + } if (!pte_same(*vmf->pte, vmf->orig_pte)) { unlock_page(vmf->page); pte_unmap_unlock(vmf->pte, vmf->ptl); @@ -2967,8 +2982,10 @@ int do_swap_page(struct vm_fault *vmf) * Back out if somebody else faulted in this pte * while we released the pte lock. */ - vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, - vmf->address, &vmf->ptl); + if (!pte_map_lock(vmf)) { + delayacct_clear_flag(DELAYACCT_PF_SWAPIN); + return VM_FAULT_RETRY; + } if (likely(pte_same(*vmf->pte, vmf->orig_pte))) ret = VM_FAULT_OOM; delayacct_clear_flag(DELAYACCT_PF_SWAPIN); @@ -3024,8 +3041,11 @@ int do_swap_page(struct vm_fault *vmf) /* * Back out if somebody else already faulted in this pte. */ - vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, - &vmf->ptl); + if (!pte_map_lock(vmf)) { + ret = VM_FAULT_RETRY; + mem_cgroup_cancel_charge(page, memcg, false); + goto out_page; + } if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) goto out_nomap; @@ -3154,8 +3174,8 @@ static int do_anonymous_page(struct vm_fault *vmf) !mm_forbids_zeropage(vma->vm_mm)) { entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address), vma->vm_page_prot)); - vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, - vmf->address, &vmf->ptl); + if (!pte_map_lock(vmf)) + return VM_FAULT_RETRY; if (!pte_none(*vmf->pte)) goto unlock; ret = check_stable_address_space(vma->vm_mm); @@ -3190,8 +3210,11 @@ static int do_anonymous_page(struct vm_fault *vmf) if (vma->vm_flags & VM_WRITE) entry = pte_mkwrite(pte_mkdirty(entry)); - vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, - &vmf->ptl); + if (!pte_map_lock(vmf)) { + mem_cgroup_cancel_charge(page, memcg, false); + put_page(page); + return VM_FAULT_RETRY; + } if (!pte_none(*vmf->pte)) goto release; @@ -3315,8 +3338,9 @@ static int pte_alloc_one_map(struct vm_fault *vmf) * pte_none() under vmf->ptl protection when we return to * alloc_set_pte(). */ - vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, - &vmf->ptl); + if (!pte_map_lock(vmf)) + return VM_FAULT_RETRY; + return 0; } -- 2.7.4
WARNING: multiple messages have this Message-ID (diff)
From: Laurent Dufour <ldufour@linux.vnet.ibm.com> To: paulmck@linux.vnet.ibm.com, peterz@infradead.org, akpm@linux-foundation.org, kirill@shutemov.name, ak@linux.intel.com, mhocko@kernel.org, dave@stgolabs.net, jack@suse.cz, Matthew Wilcox <willy@infradead.org>, benh@kernel.crashing.org, mpe@ellerman.id.au, paulus@samba.org, Thomas Gleixner <tglx@linutronix.de>, Ingo Molnar <mingo@redhat.com>, hpa@zytor.com, Will Deacon <will.deacon@arm.com>, Sergey Senozhatsky <sergey.senozhatsky@gmail.com>, Andrea Arcangeli <aarcange@redhat.com>, Alexei Starovoitov <alexei.starovoitov@gmail.com>, kemi.wang@intel.com, sergey.senozhatsky.work@gmail.com, Daniel Jordan <daniel.m.jordan@oracle.com> Cc: linux-kernel@vger.kernel.org, linux-mm@kvack.org, haren@linux.vnet.ibm.com, khandual@linux.vnet.ibm.com, npiggin@gmail.com, bsingharora@gmail.com, Tim Chen <tim.c.chen@linux.intel.com>, linuxppc-dev@lists.ozlabs.org, x86@kernel.org Subject: [PATCH v7 05/24] mm: Prepare for FAULT_FLAG_SPECULATIVE Date: Tue, 6 Feb 2018 17:49:51 +0100 [thread overview] Message-ID: <1517935810-31177-6-git-send-email-ldufour@linux.vnet.ibm.com> (raw) In-Reply-To: <1517935810-31177-1-git-send-email-ldufour@linux.vnet.ibm.com> From: Peter Zijlstra <peterz@infradead.org> When speculating faults (without holding mmap_sem) we need to validate that the vma against which we loaded pages is still valid when we're ready to install the new PTE. Therefore, replace the pte_offset_map_lock() calls that (re)take the PTL with pte_map_lock() which can fail in case we find the VMA changed since we started the fault. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> [Port to 4.12 kernel] [Remove the comment about the fault_env structure which has been implemented as the vm_fault structure in the kernel] Signed-off-by: Laurent Dufour <ldufour@linux.vnet.ibm.com> --- include/linux/mm.h | 1 + mm/memory.c | 56 ++++++++++++++++++++++++++++++++++++++---------------- 2 files changed, 41 insertions(+), 16 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 47c06fd20f6a..51d950cac772 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -302,6 +302,7 @@ extern pgprot_t protection_map[16]; #define FAULT_FLAG_USER 0x40 /* The fault originated in userspace */ #define FAULT_FLAG_REMOTE 0x80 /* faulting for non current tsk/mm */ #define FAULT_FLAG_INSTRUCTION 0x100 /* The fault was during an instruction fetch */ +#define FAULT_FLAG_SPECULATIVE 0x200 /* Speculative fault, not holding mmap_sem */ #define FAULT_FLAG_TRACE \ { FAULT_FLAG_WRITE, "WRITE" }, \ diff --git a/mm/memory.c b/mm/memory.c index 32b9eb77d95c..bb058527525a 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2452,6 +2452,13 @@ static inline void wp_page_reuse(struct vm_fault *vmf) pte_unmap_unlock(vmf->pte, vmf->ptl); } +static bool pte_map_lock(struct vm_fault *vmf) +{ + vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, + vmf->address, &vmf->ptl); + return true; +} + /* * Handle the case of a page which we actually need to copy to a new page. * @@ -2479,6 +2486,7 @@ static int wp_page_copy(struct vm_fault *vmf) const unsigned long mmun_start = vmf->address & PAGE_MASK; const unsigned long mmun_end = mmun_start + PAGE_SIZE; struct mem_cgroup *memcg; + int ret = VM_FAULT_OOM; if (unlikely(anon_vma_prepare(vma))) goto oom; @@ -2506,7 +2514,11 @@ static int wp_page_copy(struct vm_fault *vmf) /* * Re-check the pte - we dropped the lock */ - vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl); + if (!pte_map_lock(vmf)) { + mem_cgroup_cancel_charge(new_page, memcg, false); + ret = VM_FAULT_RETRY; + goto oom_free_new; + } if (likely(pte_same(*vmf->pte, vmf->orig_pte))) { if (old_page) { if (!PageAnon(old_page)) { @@ -2598,7 +2610,7 @@ static int wp_page_copy(struct vm_fault *vmf) oom: if (old_page) put_page(old_page); - return VM_FAULT_OOM; + return ret; } /** @@ -2619,8 +2631,8 @@ static int wp_page_copy(struct vm_fault *vmf) int finish_mkwrite_fault(struct vm_fault *vmf) { WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED)); - vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address, - &vmf->ptl); + if (!pte_map_lock(vmf)) + return VM_FAULT_RETRY; /* * We might have raced with another page fault while we released the * pte_offset_map_lock. @@ -2738,8 +2750,11 @@ static int do_wp_page(struct vm_fault *vmf) get_page(vmf->page); pte_unmap_unlock(vmf->pte, vmf->ptl); lock_page(vmf->page); - vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, - vmf->address, &vmf->ptl); + if (!pte_map_lock(vmf)) { + unlock_page(vmf->page); + put_page(vmf->page); + return VM_FAULT_RETRY; + } if (!pte_same(*vmf->pte, vmf->orig_pte)) { unlock_page(vmf->page); pte_unmap_unlock(vmf->pte, vmf->ptl); @@ -2967,8 +2982,10 @@ int do_swap_page(struct vm_fault *vmf) * Back out if somebody else faulted in this pte * while we released the pte lock. */ - vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, - vmf->address, &vmf->ptl); + if (!pte_map_lock(vmf)) { + delayacct_clear_flag(DELAYACCT_PF_SWAPIN); + return VM_FAULT_RETRY; + } if (likely(pte_same(*vmf->pte, vmf->orig_pte))) ret = VM_FAULT_OOM; delayacct_clear_flag(DELAYACCT_PF_SWAPIN); @@ -3024,8 +3041,11 @@ int do_swap_page(struct vm_fault *vmf) /* * Back out if somebody else already faulted in this pte. */ - vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, - &vmf->ptl); + if (!pte_map_lock(vmf)) { + ret = VM_FAULT_RETRY; + mem_cgroup_cancel_charge(page, memcg, false); + goto out_page; + } if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) goto out_nomap; @@ -3154,8 +3174,8 @@ static int do_anonymous_page(struct vm_fault *vmf) !mm_forbids_zeropage(vma->vm_mm)) { entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address), vma->vm_page_prot)); - vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, - vmf->address, &vmf->ptl); + if (!pte_map_lock(vmf)) + return VM_FAULT_RETRY; if (!pte_none(*vmf->pte)) goto unlock; ret = check_stable_address_space(vma->vm_mm); @@ -3190,8 +3210,11 @@ static int do_anonymous_page(struct vm_fault *vmf) if (vma->vm_flags & VM_WRITE) entry = pte_mkwrite(pte_mkdirty(entry)); - vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, - &vmf->ptl); + if (!pte_map_lock(vmf)) { + mem_cgroup_cancel_charge(page, memcg, false); + put_page(page); + return VM_FAULT_RETRY; + } if (!pte_none(*vmf->pte)) goto release; @@ -3315,8 +3338,9 @@ static int pte_alloc_one_map(struct vm_fault *vmf) * pte_none() under vmf->ptl protection when we return to * alloc_set_pte(). */ - vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, - &vmf->ptl); + if (!pte_map_lock(vmf)) + return VM_FAULT_RETRY; + return 0; } -- 2.7.4 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@kvack.org. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2018-02-06 16:52 UTC|newest] Thread overview: 62+ messages / expand[flat|nested] mbox.gz Atom feed top 2018-02-06 16:49 [PATCH v7 00/24] Speculative page faults Laurent Dufour 2018-02-06 16:49 ` Laurent Dufour 2018-02-06 16:49 ` [PATCH v7 01/24] mm: Introduce CONFIG_SPECULATIVE_PAGE_FAULT Laurent Dufour 2018-02-06 16:49 ` Laurent Dufour 2018-02-06 16:49 ` [PATCH v7 02/24] x86/mm: Define CONFIG_SPECULATIVE_PAGE_FAULT Laurent Dufour 2018-02-06 16:49 ` Laurent Dufour 2018-02-06 16:49 ` [PATCH v7 03/24] powerpc/mm: " Laurent Dufour 2018-02-06 16:49 ` Laurent Dufour 2018-02-06 16:49 ` [PATCH v7 04/24] mm: Dont assume page-table invariance during faults Laurent Dufour 2018-02-06 16:49 ` Laurent Dufour 2018-02-06 20:28 ` Matthew Wilcox 2018-02-06 20:28 ` Matthew Wilcox 2018-02-08 14:35 ` Laurent Dufour 2018-02-08 14:35 ` Laurent Dufour 2018-02-08 15:00 ` Matthew Wilcox 2018-02-08 15:00 ` Matthew Wilcox 2018-02-08 17:14 ` Laurent Dufour 2018-02-08 17:14 ` Laurent Dufour 2018-02-06 16:49 ` Laurent Dufour [this message] 2018-02-06 16:49 ` [PATCH v7 05/24] mm: Prepare for FAULT_FLAG_SPECULATIVE Laurent Dufour 2018-02-06 16:49 ` [PATCH v7 06/24] mm: Introduce pte_spinlock " Laurent Dufour 2018-02-06 16:49 ` Laurent Dufour 2018-02-06 16:49 ` [PATCH v7 07/24] mm: VMA sequence count Laurent Dufour 2018-02-06 16:49 ` Laurent Dufour 2018-02-06 16:49 ` [PATCH v7 08/24] mm: Protect VMA modifications using " Laurent Dufour 2018-02-06 16:49 ` Laurent Dufour 2018-02-06 16:49 ` [PATCH v7 09/24] mm: protect mremap() against SPF hanlder Laurent Dufour 2018-02-06 16:49 ` Laurent Dufour 2018-02-06 16:49 ` [PATCH v7 10/24] mm: Protect SPF handler against anon_vma changes Laurent Dufour 2018-02-06 16:49 ` Laurent Dufour 2018-02-06 16:49 ` [PATCH v7 11/24] mm: Cache some VMA fields in the vm_fault structure Laurent Dufour 2018-02-06 16:49 ` Laurent Dufour 2018-02-06 16:49 ` [PATCH v7 12/24] mm/migrate: Pass vm_fault pointer to migrate_misplaced_page() Laurent Dufour 2018-02-06 16:49 ` Laurent Dufour 2018-02-06 16:49 ` [PATCH v7 13/24] mm: Introduce __lru_cache_add_active_or_unevictable Laurent Dufour 2018-02-06 16:49 ` Laurent Dufour 2018-02-06 16:50 ` [PATCH v7 14/24] mm: Introduce __maybe_mkwrite() Laurent Dufour 2018-02-06 16:50 ` Laurent Dufour 2018-02-06 16:50 ` [PATCH v7 15/24] mm: Introduce __vm_normal_page() Laurent Dufour 2018-02-06 16:50 ` Laurent Dufour 2018-02-06 16:50 ` [PATCH v7 16/24] mm: Introduce __page_add_new_anon_rmap() Laurent Dufour 2018-02-06 16:50 ` Laurent Dufour 2018-02-06 16:50 ` [PATCH v7 17/24] mm: Protect mm_rb tree with a rwlock Laurent Dufour 2018-02-06 16:50 ` Laurent Dufour 2018-02-06 16:50 ` [PATCH v7 18/24] mm: Provide speculative fault infrastructure Laurent Dufour 2018-02-06 16:50 ` Laurent Dufour 2018-02-06 16:50 ` [PATCH v7 19/24] mm: Adding speculative page fault failure trace events Laurent Dufour 2018-02-06 16:50 ` Laurent Dufour 2018-02-06 16:50 ` [PATCH v7 20/24] perf: Add a speculative page fault sw event Laurent Dufour 2018-02-06 16:50 ` Laurent Dufour 2018-02-06 16:50 ` [PATCH v7 21/24] perf tools: Add support for the SPF perf event Laurent Dufour 2018-02-06 16:50 ` Laurent Dufour 2018-02-06 16:50 ` [PATCH v7 22/24] mm: Speculative page fault handler return VMA Laurent Dufour 2018-02-06 16:50 ` Laurent Dufour 2018-02-06 16:50 ` [PATCH v7 23/24] x86/mm: Add speculative pagefault handling Laurent Dufour 2018-02-06 16:50 ` Laurent Dufour 2018-02-06 16:50 ` [PATCH v7 24/24] powerpc/mm: Add speculative page fault Laurent Dufour 2018-02-06 16:50 ` Laurent Dufour 2018-02-08 20:53 ` [PATCH v7 00/24] Speculative page faults Andrew Morton 2018-02-08 20:53 ` Andrew Morton 2018-02-13 7:56 ` Laurent Dufour 2018-02-13 7:56 ` Laurent Dufour
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=1517935810-31177-6-git-send-email-ldufour@linux.vnet.ibm.com \ --to=ldufour@linux.vnet.ibm.com \ --cc=aarcange@redhat.com \ --cc=ak@linux.intel.com \ --cc=akpm@linux-foundation.org \ --cc=alexei.starovoitov@gmail.com \ --cc=benh@kernel.crashing.org \ --cc=bsingharora@gmail.com \ --cc=daniel.m.jordan@oracle.com \ --cc=dave@stgolabs.net \ --cc=haren@linux.vnet.ibm.com \ --cc=hpa@zytor.com \ --cc=jack@suse.cz \ --cc=kemi.wang@intel.com \ --cc=khandual@linux.vnet.ibm.com \ --cc=kirill@shutemov.name \ --cc=linux-kernel@vger.kernel.org \ --cc=linux-mm@kvack.org \ --cc=linuxppc-dev@lists.ozlabs.org \ --cc=mhocko@kernel.org \ --cc=mingo@redhat.com \ --cc=mpe@ellerman.id.au \ --cc=npiggin@gmail.com \ --cc=paulmck@linux.vnet.ibm.com \ --cc=paulus@samba.org \ --cc=peterz@infradead.org \ --cc=sergey.senozhatsky.work@gmail.com \ --cc=sergey.senozhatsky@gmail.com \ --cc=tglx@linutronix.de \ --cc=tim.c.chen@linux.intel.com \ --cc=will.deacon@arm.com \ --cc=willy@infradead.org \ --cc=x86@kernel.org \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: linkBe sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes, see mirroring instructions on how to clone and mirror all data and code used by this external index.