From: Anthony Yznaga <anthony.yznaga@oracle.com> To: linux-mm@kvack.org, linux-kernel@vger.kernel.org Cc: willy@infradead.org, corbet@lwn.net, tglx@linutronix.de, mingo@redhat.com, bp@alien8.de, x86@kernel.org, hpa@zytor.com, dave.hansen@linux.intel.com, luto@kernel.org, peterz@infradead.org, rppt@kernel.org, akpm@linux-foundation.org, hughd@google.com, ebiederm@xmission.com, keescook@chromium.org, ardb@kernel.org, nivedita@alum.mit.edu, jroedel@suse.de, masahiroy@kernel.org, nathan@kernel.org, terrelln@fb.com, vincenzo.frascino@arm.com, martin.b.radev@gmail.com, andreyknvl@google.com, daniel.kiper@oracle.com, rafael.j.wysocki@intel.com, dan.j.williams@intel.com, Jonathan.Cameron@huawei.com, bhe@redhat.com, rminnich@gmail.com, ashish.kalra@amd.com, guro@fb.com, hannes@cmpxchg.org, mhocko@kernel.org, iamjoonsoo.kim@lge.com, vbabka@suse.cz, alex.shi@linux.alibaba.com, david@redhat.com, richard.weiyang@gmail.com, vdavydov.dev@gmail.com, graf@amazon.com, jason.zeng@intel.com, lei.l.li@intel.com, daniel.m.jordan@oracle.com, steven.sistare@oracle.com, linux-fsdevel@vger.kernel.org, linux-doc@vger.kernel.org, kexec@lists.infradead.org Subject: [RFC v2 38/43] mm: implement splicing a list of pages to the LRU Date: Tue, 30 Mar 2021 14:36:13 -0700 [thread overview] Message-ID: <1617140178-8773-39-git-send-email-anthony.yznaga@oracle.com> (raw) In-Reply-To: <1617140178-8773-1-git-send-email-anthony.yznaga@oracle.com> Considerable contention on the LRU lock happens when multiple threads are used to insert pages into a shmem file in parallel. To alleviate this provide a way for pages to be added to the same LRU to be staged so that they can be added by splicing lists and updating stats once with the lock held. For now only unevictable pages are supported. Signed-off-by: Anthony Yznaga <anthony.yznaga@oracle.com> --- include/linux/swap.h | 13 ++++++++ mm/swap.c | 86 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 99 insertions(+) diff --git a/include/linux/swap.h b/include/linux/swap.h index 4cc6ec3bf0ab..254c9c8d71d0 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -351,6 +351,19 @@ extern void lru_note_cost(struct lruvec *lruvec, bool file, extern void lru_cache_add_inactive_or_unevictable(struct page *page, struct vm_area_struct *vma); +struct lru_splice { + struct list_head splice; + struct list_head *lru_head; + struct lruvec *lruvec; + enum lru_list lru; + unsigned long nr_pages[MAX_NR_ZONES]; + unsigned long pgculled; +}; +#define LRU_SPLICE_INIT(name) { .splice = LIST_HEAD_INIT(name.splice) } +#define LRU_SPLICE(name) \ + struct lru_splice name = LRU_SPLICE_INIT(name) +extern void lru_splice_add(struct page *page, struct lru_splice *splice); +extern void add_splice_to_lru_list(struct lru_splice *splice); /* linux/mm/vmscan.c */ extern unsigned long zone_reclaimable_pages(struct zone *zone); diff --git a/mm/swap.c b/mm/swap.c index 31b844d4ed94..a1db6a748608 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -200,6 +200,92 @@ int get_kernel_page(unsigned long start, int write, struct page **pages) } EXPORT_SYMBOL_GPL(get_kernel_page); +/* + * Update stats and move accumulated pages from an lru_splice to the lru. + */ +void add_splice_to_lru_list(struct lru_splice *splice) +{ + struct lruvec *lruvec = splice->lruvec; + enum lru_list lru = splice->lru; + unsigned long flags = 0; + int zid; + + if (list_empty(&splice->splice)) + return; + + spin_lock_irqsave(&lruvec->lru_lock, flags); + for (zid = 0; zid < MAX_NR_ZONES; zid++) { + if (splice->nr_pages[zid]) + update_lru_size(lruvec, lru, zid, splice->nr_pages[zid]); + } + count_vm_events(UNEVICTABLE_PGCULLED, splice->pgculled); + list_splice_init(&splice->splice, splice->lru_head); + spin_unlock_irqrestore(&lruvec->lru_lock, flags); +} + +static void add_page_to_lru_splice(struct page *page, struct lru_splice *splice, + struct lruvec *lruvec, enum lru_list lru) +{ + if (list_empty(&splice->splice)) { + int zid; + + splice->lruvec = lruvec; + splice->lru_head = &lruvec->lists[lru]; + splice->lru = lru; + for (zid = 0; zid < MAX_NR_ZONES; zid++) + splice->nr_pages[zid] = 0; + splice->pgculled = 0; + } + + BUG_ON(splice->lruvec != lruvec); + BUG_ON(splice->lru_head != &lruvec->lists[lru]); + + list_add(&page->lru, &splice->splice); + splice->nr_pages[page_zonenum(page)] += thp_nr_pages(page); +} + +/* + * Similar in functionality to __pagevec_lru_add_fn() but here the page is + * being added to an lru_splice and the LRU lock is not held. + */ +static void page_lru_splice_add(struct page *page, struct lru_splice *splice, struct lruvec *lruvec) +{ + enum lru_list lru; + int was_unevictable = TestClearPageUnevictable(page); + int nr_pages = thp_nr_pages(page); + + VM_BUG_ON_PAGE(PageLRU(page), page); + /* XXX only supports unevictable pages at the moment */ + VM_BUG_ON_PAGE(was_unevictable, page); + + SetPageLRU(page); + smp_mb__after_atomic(); + + lru = LRU_UNEVICTABLE; + ClearPageActive(page); + SetPageUnevictable(page); + if (!was_unevictable) + splice->pgculled += nr_pages; + + add_page_to_lru_splice(page, splice, lruvec, lru); + trace_mm_lru_insertion(page); +} + +void lru_splice_add(struct page *page, struct lru_splice *splice) +{ + struct lruvec *lruvec; + + VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page); + VM_BUG_ON_PAGE(PageLRU(page), page); + + get_page(page); + lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page)); + if (lruvec != splice->lruvec) + add_splice_to_lru_list(splice); + page_lru_splice_add(page, splice, lruvec); + put_page(page); +} + static void pagevec_lru_move_fn(struct pagevec *pvec, void (*move_fn)(struct page *page, struct lruvec *lruvec)) { -- 1.8.3.1
WARNING: multiple messages have this Message-ID (diff)
From: Anthony Yznaga <anthony.yznaga@oracle.com> To: linux-mm@kvack.org, linux-kernel@vger.kernel.org Cc: willy@infradead.org, corbet@lwn.net, tglx@linutronix.de, mingo@redhat.com, bp@alien8.de, x86@kernel.org, hpa@zytor.com, dave.hansen@linux.intel.com, luto@kernel.org, peterz@infradead.org, rppt@kernel.org, akpm@linux-foundation.org, hughd@google.com, ebiederm@xmission.com, keescook@chromium.org, ardb@kernel.org, nivedita@alum.mit.edu, jroedel@suse.de, masahiroy@kernel.org, nathan@kernel.org, terrelln@fb.com, vincenzo.frascino@arm.com, martin.b.radev@gmail.com, andreyknvl@google.com, daniel.kiper@oracle.com, rafael.j.wysocki@intel.com, dan.j.williams@intel.com, Jonathan.Cameron@huawei.com, bhe@redhat.com, rminnich@gmail.com, ashish.kalra@amd.com, guro@fb.com, hannes@cmpxchg.org, mhocko@kernel.org, iamjoonsoo.kim@lge.com, vbabka@suse.cz, alex.shi@linux.alibaba.com, david@redhat.com, richard.weiyang@gmail.com, vdavydov.dev@gmail.com, graf@amazon.com, jason.zeng@intel.com, lei.l.li@intel.com, daniel.m.jordan@oracle.com, steven.sistare@oracle.com, linux-fsdevel@vger.kernel.org, linux-doc@vger.kernel.org, kexec@lists.infradead.org Subject: [RFC v2 38/43] mm: implement splicing a list of pages to the LRU Date: Tue, 30 Mar 2021 14:36:13 -0700 [thread overview] Message-ID: <1617140178-8773-39-git-send-email-anthony.yznaga@oracle.com> (raw) In-Reply-To: <1617140178-8773-1-git-send-email-anthony.yznaga@oracle.com> Considerable contention on the LRU lock happens when multiple threads are used to insert pages into a shmem file in parallel. To alleviate this provide a way for pages to be added to the same LRU to be staged so that they can be added by splicing lists and updating stats once with the lock held. For now only unevictable pages are supported. Signed-off-by: Anthony Yznaga <anthony.yznaga@oracle.com> --- include/linux/swap.h | 13 ++++++++ mm/swap.c | 86 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 99 insertions(+) diff --git a/include/linux/swap.h b/include/linux/swap.h index 4cc6ec3bf0ab..254c9c8d71d0 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -351,6 +351,19 @@ extern void lru_note_cost(struct lruvec *lruvec, bool file, extern void lru_cache_add_inactive_or_unevictable(struct page *page, struct vm_area_struct *vma); +struct lru_splice { + struct list_head splice; + struct list_head *lru_head; + struct lruvec *lruvec; + enum lru_list lru; + unsigned long nr_pages[MAX_NR_ZONES]; + unsigned long pgculled; +}; +#define LRU_SPLICE_INIT(name) { .splice = LIST_HEAD_INIT(name.splice) } +#define LRU_SPLICE(name) \ + struct lru_splice name = LRU_SPLICE_INIT(name) +extern void lru_splice_add(struct page *page, struct lru_splice *splice); +extern void add_splice_to_lru_list(struct lru_splice *splice); /* linux/mm/vmscan.c */ extern unsigned long zone_reclaimable_pages(struct zone *zone); diff --git a/mm/swap.c b/mm/swap.c index 31b844d4ed94..a1db6a748608 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -200,6 +200,92 @@ int get_kernel_page(unsigned long start, int write, struct page **pages) } EXPORT_SYMBOL_GPL(get_kernel_page); +/* + * Update stats and move accumulated pages from an lru_splice to the lru. + */ +void add_splice_to_lru_list(struct lru_splice *splice) +{ + struct lruvec *lruvec = splice->lruvec; + enum lru_list lru = splice->lru; + unsigned long flags = 0; + int zid; + + if (list_empty(&splice->splice)) + return; + + spin_lock_irqsave(&lruvec->lru_lock, flags); + for (zid = 0; zid < MAX_NR_ZONES; zid++) { + if (splice->nr_pages[zid]) + update_lru_size(lruvec, lru, zid, splice->nr_pages[zid]); + } + count_vm_events(UNEVICTABLE_PGCULLED, splice->pgculled); + list_splice_init(&splice->splice, splice->lru_head); + spin_unlock_irqrestore(&lruvec->lru_lock, flags); +} + +static void add_page_to_lru_splice(struct page *page, struct lru_splice *splice, + struct lruvec *lruvec, enum lru_list lru) +{ + if (list_empty(&splice->splice)) { + int zid; + + splice->lruvec = lruvec; + splice->lru_head = &lruvec->lists[lru]; + splice->lru = lru; + for (zid = 0; zid < MAX_NR_ZONES; zid++) + splice->nr_pages[zid] = 0; + splice->pgculled = 0; + } + + BUG_ON(splice->lruvec != lruvec); + BUG_ON(splice->lru_head != &lruvec->lists[lru]); + + list_add(&page->lru, &splice->splice); + splice->nr_pages[page_zonenum(page)] += thp_nr_pages(page); +} + +/* + * Similar in functionality to __pagevec_lru_add_fn() but here the page is + * being added to an lru_splice and the LRU lock is not held. + */ +static void page_lru_splice_add(struct page *page, struct lru_splice *splice, struct lruvec *lruvec) +{ + enum lru_list lru; + int was_unevictable = TestClearPageUnevictable(page); + int nr_pages = thp_nr_pages(page); + + VM_BUG_ON_PAGE(PageLRU(page), page); + /* XXX only supports unevictable pages at the moment */ + VM_BUG_ON_PAGE(was_unevictable, page); + + SetPageLRU(page); + smp_mb__after_atomic(); + + lru = LRU_UNEVICTABLE; + ClearPageActive(page); + SetPageUnevictable(page); + if (!was_unevictable) + splice->pgculled += nr_pages; + + add_page_to_lru_splice(page, splice, lruvec, lru); + trace_mm_lru_insertion(page); +} + +void lru_splice_add(struct page *page, struct lru_splice *splice) +{ + struct lruvec *lruvec; + + VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page); + VM_BUG_ON_PAGE(PageLRU(page), page); + + get_page(page); + lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page)); + if (lruvec != splice->lruvec) + add_splice_to_lru_list(splice); + page_lru_splice_add(page, splice, lruvec); + put_page(page); +} + static void pagevec_lru_move_fn(struct pagevec *pvec, void (*move_fn)(struct page *page, struct lruvec *lruvec)) { -- 1.8.3.1 _______________________________________________ kexec mailing list kexec@lists.infradead.org http://lists.infradead.org/mailman/listinfo/kexec
next prev parent reply other threads:[~2021-03-30 21:30 UTC|newest] Thread overview: 94+ messages / expand[flat|nested] mbox.gz Atom feed top 2021-03-30 21:35 [RFC v2 00/43] PKRAM: Preserved-over-Kexec RAM Anthony Yznaga 2021-03-30 21:35 ` Anthony Yznaga 2021-03-30 21:35 ` [RFC v2 01/43] mm: add PKRAM API stubs and Kconfig Anthony Yznaga 2021-03-30 21:35 ` Anthony Yznaga 2021-03-31 18:43 ` Randy Dunlap 2021-03-31 18:43 ` Randy Dunlap 2021-03-31 20:28 ` Anthony Yznaga 2021-03-31 20:28 ` Anthony Yznaga 2021-03-30 21:35 ` [RFC v2 02/43] mm: PKRAM: implement node load and save functions Anthony Yznaga 2021-03-30 21:35 ` Anthony Yznaga 2021-03-30 21:35 ` [RFC v2 03/43] mm: PKRAM: implement object " Anthony Yznaga 2021-03-30 21:35 ` Anthony Yznaga 2021-03-30 21:35 ` [RFC v2 04/43] mm: PKRAM: implement page stream operations Anthony Yznaga 2021-03-30 21:35 ` Anthony Yznaga 2021-03-30 21:35 ` [RFC v2 05/43] mm: PKRAM: support preserving transparent hugepages Anthony Yznaga 2021-03-30 21:35 ` Anthony Yznaga 2021-03-30 21:35 ` [RFC v2 06/43] mm: PKRAM: implement byte stream operations Anthony Yznaga 2021-03-30 21:35 ` Anthony Yznaga 2021-03-30 21:35 ` [RFC v2 07/43] mm: PKRAM: link nodes by pfn before reboot Anthony Yznaga 2021-03-30 21:35 ` Anthony Yznaga 2021-03-30 21:35 ` [RFC v2 08/43] mm: PKRAM: introduce super block Anthony Yznaga 2021-03-30 21:35 ` Anthony Yznaga 2021-03-30 21:35 ` [RFC v2 09/43] PKRAM: track preserved pages in a physical mapping pagetable Anthony Yznaga 2021-03-30 21:35 ` Anthony Yznaga 2021-03-30 21:35 ` [RFC v2 10/43] PKRAM: pass a list of preserved ranges to the next kernel Anthony Yznaga 2021-03-30 21:35 ` Anthony Yznaga 2021-03-30 21:35 ` [RFC v2 11/43] PKRAM: prepare for adding preserved ranges to memblock reserved Anthony Yznaga 2021-03-30 21:35 ` Anthony Yznaga 2021-03-30 21:35 ` [RFC v2 12/43] mm: PKRAM: reserve preserved memory at boot Anthony Yznaga 2021-03-30 21:35 ` Anthony Yznaga 2021-03-30 21:35 ` [RFC v2 13/43] PKRAM: free the preserved ranges list Anthony Yznaga 2021-03-30 21:35 ` Anthony Yznaga 2021-03-30 21:35 ` [RFC v2 14/43] PKRAM: prevent inadvertent use of a stale superblock Anthony Yznaga 2021-03-30 21:35 ` Anthony Yznaga 2021-03-30 21:35 ` [RFC v2 15/43] PKRAM: provide a way to ban pages from use by PKRAM Anthony Yznaga 2021-03-30 21:35 ` Anthony Yznaga 2021-03-30 21:35 ` [RFC v2 16/43] kexec: PKRAM: prevent kexec clobbering preserved pages in some cases Anthony Yznaga 2021-03-30 21:35 ` Anthony Yznaga 2021-03-30 21:35 ` [RFC v2 17/43] PKRAM: provide a way to check if a memory range has preserved pages Anthony Yznaga 2021-03-30 21:35 ` Anthony Yznaga 2021-03-30 21:35 ` [RFC v2 18/43] kexec: PKRAM: avoid clobbering already " Anthony Yznaga 2021-03-30 21:35 ` Anthony Yznaga 2021-03-30 21:35 ` [RFC v2 19/43] mm: PKRAM: allow preserved memory to be freed from userspace Anthony Yznaga 2021-03-30 21:35 ` Anthony Yznaga 2021-03-30 21:35 ` [RFC v2 20/43] PKRAM: disable feature when running the kdump kernel Anthony Yznaga 2021-03-30 21:35 ` Anthony Yznaga 2021-03-30 21:35 ` [RFC v2 21/43] x86/KASLR: PKRAM: support physical kaslr Anthony Yznaga 2021-03-30 21:35 ` Anthony Yznaga 2021-03-30 21:35 ` [RFC v2 22/43] x86/boot/compressed/64: use 1GB pages for mappings Anthony Yznaga 2021-03-30 21:35 ` Anthony Yznaga 2021-03-30 21:35 ` [RFC v2 23/43] mm: shmem: introduce shmem_insert_page Anthony Yznaga 2021-03-30 21:35 ` Anthony Yznaga 2021-03-30 21:35 ` [RFC v2 24/43] mm: shmem: enable saving to PKRAM Anthony Yznaga 2021-03-30 21:35 ` Anthony Yznaga 2021-03-30 21:36 ` [RFC v2 25/43] mm: shmem: prevent swapping of PKRAM-enabled tmpfs pages Anthony Yznaga 2021-03-30 21:36 ` Anthony Yznaga 2021-03-30 21:36 ` [RFC v2 26/43] mm: shmem: specify the mm to use when inserting pages Anthony Yznaga 2021-03-30 21:36 ` Anthony Yznaga 2021-03-30 21:36 ` [RFC v2 27/43] mm: shmem: when inserting, handle pages already charged to a memcg Anthony Yznaga 2021-03-30 21:36 ` Anthony Yznaga 2021-03-30 21:36 ` [RFC v2 28/43] x86/mm/numa: add numa_isolate_memblocks() Anthony Yznaga 2021-03-30 21:36 ` Anthony Yznaga 2021-03-30 21:36 ` [RFC v2 29/43] PKRAM: ensure memblocks with preserved pages init'd for numa Anthony Yznaga 2021-03-30 21:36 ` Anthony Yznaga 2021-03-30 21:36 ` [RFC v2 30/43] memblock: PKRAM: mark memblocks that contain preserved pages Anthony Yznaga 2021-03-30 21:36 ` Anthony Yznaga 2021-03-30 21:36 ` [RFC v2 31/43] memblock, mm: defer initialization of " Anthony Yznaga 2021-03-30 21:36 ` Anthony Yznaga 2021-03-30 21:36 ` [RFC v2 32/43] shmem: preserve shmem files a chunk at a time Anthony Yznaga 2021-03-30 21:36 ` Anthony Yznaga 2021-03-30 21:36 ` [RFC v2 33/43] PKRAM: atomically add and remove link pages Anthony Yznaga 2021-03-30 21:36 ` Anthony Yznaga 2021-03-30 21:36 ` [RFC v2 34/43] shmem: PKRAM: multithread preserving and restoring shmem pages Anthony Yznaga 2021-03-30 21:36 ` Anthony Yznaga 2021-03-30 21:36 ` [RFC v2 35/43] shmem: introduce shmem_insert_pages() Anthony Yznaga 2021-03-30 21:36 ` Anthony Yznaga 2021-03-30 21:36 ` [RFC v2 36/43] PKRAM: add support for loading pages in bulk Anthony Yznaga 2021-03-30 21:36 ` Anthony Yznaga 2021-03-30 21:36 ` [RFC v2 37/43] shmem: PKRAM: enable bulk loading of preserved pages into shmem Anthony Yznaga 2021-03-30 21:36 ` Anthony Yznaga 2021-03-30 21:36 ` Anthony Yznaga [this message] 2021-03-30 21:36 ` [RFC v2 38/43] mm: implement splicing a list of pages to the LRU Anthony Yznaga 2021-03-30 21:36 ` [RFC v2 39/43] shmem: optimize adding pages to the LRU in shmem_insert_pages() Anthony Yznaga 2021-03-30 21:36 ` Anthony Yznaga 2021-03-30 21:36 ` [RFC v2 40/43] shmem: initial support for adding multiple pages to pagecache Anthony Yznaga 2021-03-30 21:36 ` Anthony Yznaga 2021-03-30 21:36 ` [RFC v2 41/43] XArray: add xas_export_node() and xas_import_node() Anthony Yznaga 2021-03-30 21:36 ` Anthony Yznaga 2021-03-30 21:36 ` [RFC v2 42/43] shmem: reduce time holding xa_lock when inserting pages Anthony Yznaga 2021-03-30 21:36 ` Anthony Yznaga 2021-03-30 21:36 ` [RFC v2 43/43] PKRAM: improve index alignment of pkram_link entries Anthony Yznaga 2021-03-30 21:36 ` Anthony Yznaga 2021-06-05 13:39 ` [RFC v2 00/43] PKRAM: Preserved-over-Kexec RAM Pavel Tatashin 2021-06-05 13:39 ` Pavel Tatashin
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=1617140178-8773-39-git-send-email-anthony.yznaga@oracle.com \ --to=anthony.yznaga@oracle.com \ --cc=Jonathan.Cameron@huawei.com \ --cc=akpm@linux-foundation.org \ --cc=alex.shi@linux.alibaba.com \ --cc=andreyknvl@google.com \ --cc=ardb@kernel.org \ --cc=ashish.kalra@amd.com \ --cc=bhe@redhat.com \ --cc=bp@alien8.de \ --cc=corbet@lwn.net \ --cc=dan.j.williams@intel.com \ --cc=daniel.kiper@oracle.com \ --cc=daniel.m.jordan@oracle.com \ --cc=dave.hansen@linux.intel.com \ --cc=david@redhat.com \ --cc=ebiederm@xmission.com \ --cc=graf@amazon.com \ --cc=guro@fb.com \ --cc=hannes@cmpxchg.org \ --cc=hpa@zytor.com \ --cc=hughd@google.com \ --cc=iamjoonsoo.kim@lge.com \ --cc=jason.zeng@intel.com \ --cc=jroedel@suse.de \ --cc=keescook@chromium.org \ --cc=kexec@lists.infradead.org \ --cc=lei.l.li@intel.com \ --cc=linux-doc@vger.kernel.org \ --cc=linux-fsdevel@vger.kernel.org \ --cc=linux-kernel@vger.kernel.org \ --cc=linux-mm@kvack.org \ --cc=luto@kernel.org \ --cc=martin.b.radev@gmail.com \ --cc=masahiroy@kernel.org \ --cc=mhocko@kernel.org \ --cc=mingo@redhat.com \ --cc=nathan@kernel.org \ --cc=nivedita@alum.mit.edu \ --cc=peterz@infradead.org \ --cc=rafael.j.wysocki@intel.com \ --cc=richard.weiyang@gmail.com \ --cc=rminnich@gmail.com \ --cc=rppt@kernel.org \ --cc=steven.sistare@oracle.com \ --cc=terrelln@fb.com \ --cc=tglx@linutronix.de \ --cc=vbabka@suse.cz \ --cc=vdavydov.dev@gmail.com \ --cc=vincenzo.frascino@arm.com \ --cc=willy@infradead.org \ --cc=x86@kernel.org \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: linkBe sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes, see mirroring instructions on how to clone and mirror all data and code used by this external index.