From: Jane Chu <jane.chu@oracle.com> To: david@fromorbit.com, djwong@kernel.org, dan.j.williams@intel.com, hch@infradead.org, vishal.l.verma@intel.com, dave.jiang@intel.com, agk@redhat.com, snitzer@redhat.com, dm-devel@redhat.com, ira.weiny@intel.com, willy@infradead.org, vgoyal@redhat.com, linux-fsdevel@vger.kernel.org, nvdimm@lists.linux.dev, linux-kernel@vger.kernel.org, linux-xfs@vger.kernel.org, x86@kernel.org Subject: [PATCH v7 5/6] pmem: refactor pmem_clear_poison() Date: Tue, 5 Apr 2022 13:47:46 -0600 [thread overview] Message-ID: <20220405194747.2386619-6-jane.chu@oracle.com> (raw) In-Reply-To: <20220405194747.2386619-1-jane.chu@oracle.com> Refactor the pmem_clear_poison() in order to share common code later. Signed-off-by: Jane Chu <jane.chu@oracle.com> --- drivers/nvdimm/pmem.c | 78 ++++++++++++++++++++++++++++--------------- 1 file changed, 52 insertions(+), 26 deletions(-) diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index 0400c5a7ba39..56596be70400 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -45,10 +45,27 @@ static struct nd_region *to_region(struct pmem_device *pmem) return to_nd_region(to_dev(pmem)->parent); } -static void hwpoison_clear(struct pmem_device *pmem, - phys_addr_t phys, unsigned int len) +static phys_addr_t to_phys(struct pmem_device *pmem, phys_addr_t offset) { + return (pmem->phys_addr + offset); +} + +static sector_t to_sect(struct pmem_device *pmem, phys_addr_t offset) +{ + return (offset - pmem->data_offset) >> SECTOR_SHIFT; +} + +static phys_addr_t to_offset(struct pmem_device *pmem, sector_t sector) +{ + return ((sector << SECTOR_SHIFT) + pmem->data_offset); +} + +static void pmem_clear_hwpoison(struct pmem_device *pmem, phys_addr_t offset, + unsigned int len) +{ + phys_addr_t phys = to_phys(pmem, offset); unsigned long pfn_start, pfn_end, pfn; + unsigned int blks = len >> SECTOR_SHIFT; /* only pmem in the linear map supports HWPoison */ if (is_vmalloc_addr(pmem->virt_addr)) @@ -67,35 +84,44 @@ static void hwpoison_clear(struct pmem_device *pmem, if (test_and_clear_pmem_poison(page)) clear_mce_nospec(pfn); } + + dev_dbg(to_dev(pmem), "%#llx clear %u sector%s\n", + (unsigned long long) to_sect(pmem, offset), blks, + blks > 1 ? "s" : ""); } -static blk_status_t pmem_clear_poison(struct pmem_device *pmem, +static void pmem_clear_bb(struct pmem_device *pmem, sector_t sector, long blks) +{ + if (blks == 0) + return; + badblocks_clear(&pmem->bb, sector, blks); + if (pmem->bb_state) + sysfs_notify_dirent(pmem->bb_state); +} + +static long __pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset, unsigned int len) { - struct device *dev = to_dev(pmem); - sector_t sector; - long cleared; - blk_status_t rc = BLK_STS_OK; - - sector = (offset - pmem->data_offset) / 512; - - cleared = nvdimm_clear_poison(dev, pmem->phys_addr + offset, len); - if (cleared < len) - rc = BLK_STS_IOERR; - if (cleared > 0 && cleared / 512) { - hwpoison_clear(pmem, pmem->phys_addr + offset, cleared); - cleared /= 512; - dev_dbg(dev, "%#llx clear %ld sector%s\n", - (unsigned long long) sector, cleared, - cleared > 1 ? "s" : ""); - badblocks_clear(&pmem->bb, sector, cleared); - if (pmem->bb_state) - sysfs_notify_dirent(pmem->bb_state); + phys_addr_t phys = to_phys(pmem, offset); + long cleared = nvdimm_clear_poison(to_dev(pmem), phys, len); + + if (cleared > 0) { + pmem_clear_hwpoison(pmem, offset, cleared); + arch_invalidate_pmem(pmem->virt_addr + offset, len); } + return cleared; +} - arch_invalidate_pmem(pmem->virt_addr + offset, len); +static blk_status_t pmem_clear_poison(struct pmem_device *pmem, + phys_addr_t offset, unsigned int len) +{ + long cleared = __pmem_clear_poison(pmem, offset, len); - return rc; + if (cleared < 0) + return BLK_STS_IOERR; + + pmem_clear_bb(pmem, to_sect(pmem, offset), cleared >> SECTOR_SHIFT); + return (cleared < len) ? BLK_STS_IOERR : BLK_STS_OK; } static void write_pmem(void *pmem_addr, struct page *page, @@ -143,7 +169,7 @@ static blk_status_t pmem_do_read(struct pmem_device *pmem, sector_t sector, unsigned int len) { blk_status_t rc; - phys_addr_t pmem_off = sector * 512 + pmem->data_offset; + phys_addr_t pmem_off = to_offset(pmem, sector); void *pmem_addr = pmem->virt_addr + pmem_off; if (unlikely(is_bad_pmem(&pmem->bb, sector, len))) @@ -158,7 +184,7 @@ static blk_status_t pmem_do_write(struct pmem_device *pmem, struct page *page, unsigned int page_off, sector_t sector, unsigned int len) { - phys_addr_t pmem_off = sector * 512 + pmem->data_offset; + phys_addr_t pmem_off = to_offset(pmem, sector); void *pmem_addr = pmem->virt_addr + pmem_off; if (unlikely(is_bad_pmem(&pmem->bb, sector, len))) { -- 2.18.4
WARNING: multiple messages have this Message-ID (diff)
From: Jane Chu <jane.chu@oracle.com> To: david@fromorbit.com, djwong@kernel.org, dan.j.williams@intel.com, hch@infradead.org, vishal.l.verma@intel.com, dave.jiang@intel.com, agk@redhat.com, snitzer@redhat.com, dm-devel@redhat.com, ira.weiny@intel.com, willy@infradead.org, vgoyal@redhat.com, linux-fsdevel@vger.kernel.org, nvdimm@lists.linux.dev, linux-kernel@vger.kernel.org, linux-xfs@vger.kernel.org, x86@kernel.org Subject: [dm-devel] [PATCH v7 5/6] pmem: refactor pmem_clear_poison() Date: Tue, 5 Apr 2022 13:47:46 -0600 [thread overview] Message-ID: <20220405194747.2386619-6-jane.chu@oracle.com> (raw) In-Reply-To: <20220405194747.2386619-1-jane.chu@oracle.com> Refactor the pmem_clear_poison() in order to share common code later. Signed-off-by: Jane Chu <jane.chu@oracle.com> --- drivers/nvdimm/pmem.c | 78 ++++++++++++++++++++++++++++--------------- 1 file changed, 52 insertions(+), 26 deletions(-) diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index 0400c5a7ba39..56596be70400 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -45,10 +45,27 @@ static struct nd_region *to_region(struct pmem_device *pmem) return to_nd_region(to_dev(pmem)->parent); } -static void hwpoison_clear(struct pmem_device *pmem, - phys_addr_t phys, unsigned int len) +static phys_addr_t to_phys(struct pmem_device *pmem, phys_addr_t offset) { + return (pmem->phys_addr + offset); +} + +static sector_t to_sect(struct pmem_device *pmem, phys_addr_t offset) +{ + return (offset - pmem->data_offset) >> SECTOR_SHIFT; +} + +static phys_addr_t to_offset(struct pmem_device *pmem, sector_t sector) +{ + return ((sector << SECTOR_SHIFT) + pmem->data_offset); +} + +static void pmem_clear_hwpoison(struct pmem_device *pmem, phys_addr_t offset, + unsigned int len) +{ + phys_addr_t phys = to_phys(pmem, offset); unsigned long pfn_start, pfn_end, pfn; + unsigned int blks = len >> SECTOR_SHIFT; /* only pmem in the linear map supports HWPoison */ if (is_vmalloc_addr(pmem->virt_addr)) @@ -67,35 +84,44 @@ static void hwpoison_clear(struct pmem_device *pmem, if (test_and_clear_pmem_poison(page)) clear_mce_nospec(pfn); } + + dev_dbg(to_dev(pmem), "%#llx clear %u sector%s\n", + (unsigned long long) to_sect(pmem, offset), blks, + blks > 1 ? "s" : ""); } -static blk_status_t pmem_clear_poison(struct pmem_device *pmem, +static void pmem_clear_bb(struct pmem_device *pmem, sector_t sector, long blks) +{ + if (blks == 0) + return; + badblocks_clear(&pmem->bb, sector, blks); + if (pmem->bb_state) + sysfs_notify_dirent(pmem->bb_state); +} + +static long __pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset, unsigned int len) { - struct device *dev = to_dev(pmem); - sector_t sector; - long cleared; - blk_status_t rc = BLK_STS_OK; - - sector = (offset - pmem->data_offset) / 512; - - cleared = nvdimm_clear_poison(dev, pmem->phys_addr + offset, len); - if (cleared < len) - rc = BLK_STS_IOERR; - if (cleared > 0 && cleared / 512) { - hwpoison_clear(pmem, pmem->phys_addr + offset, cleared); - cleared /= 512; - dev_dbg(dev, "%#llx clear %ld sector%s\n", - (unsigned long long) sector, cleared, - cleared > 1 ? "s" : ""); - badblocks_clear(&pmem->bb, sector, cleared); - if (pmem->bb_state) - sysfs_notify_dirent(pmem->bb_state); + phys_addr_t phys = to_phys(pmem, offset); + long cleared = nvdimm_clear_poison(to_dev(pmem), phys, len); + + if (cleared > 0) { + pmem_clear_hwpoison(pmem, offset, cleared); + arch_invalidate_pmem(pmem->virt_addr + offset, len); } + return cleared; +} - arch_invalidate_pmem(pmem->virt_addr + offset, len); +static blk_status_t pmem_clear_poison(struct pmem_device *pmem, + phys_addr_t offset, unsigned int len) +{ + long cleared = __pmem_clear_poison(pmem, offset, len); - return rc; + if (cleared < 0) + return BLK_STS_IOERR; + + pmem_clear_bb(pmem, to_sect(pmem, offset), cleared >> SECTOR_SHIFT); + return (cleared < len) ? BLK_STS_IOERR : BLK_STS_OK; } static void write_pmem(void *pmem_addr, struct page *page, @@ -143,7 +169,7 @@ static blk_status_t pmem_do_read(struct pmem_device *pmem, sector_t sector, unsigned int len) { blk_status_t rc; - phys_addr_t pmem_off = sector * 512 + pmem->data_offset; + phys_addr_t pmem_off = to_offset(pmem, sector); void *pmem_addr = pmem->virt_addr + pmem_off; if (unlikely(is_bad_pmem(&pmem->bb, sector, len))) @@ -158,7 +184,7 @@ static blk_status_t pmem_do_write(struct pmem_device *pmem, struct page *page, unsigned int page_off, sector_t sector, unsigned int len) { - phys_addr_t pmem_off = sector * 512 + pmem->data_offset; + phys_addr_t pmem_off = to_offset(pmem, sector); void *pmem_addr = pmem->virt_addr + pmem_off; if (unlikely(is_bad_pmem(&pmem->bb, sector, len))) { -- 2.18.4 -- dm-devel mailing list dm-devel@redhat.com https://listman.redhat.com/mailman/listinfo/dm-devel
next prev parent reply other threads:[~2022-04-05 19:48 UTC|newest] Thread overview: 82+ messages / expand[flat|nested] mbox.gz Atom feed top 2022-04-05 19:47 [PATCH v7 0/6] DAX poison recovery Jane Chu 2022-04-05 19:47 ` [dm-devel] " Jane Chu 2022-04-05 19:47 ` [PATCH v7 1/6] x86/mm: fix comment Jane Chu 2022-04-05 19:47 ` [dm-devel] " Jane Chu 2022-04-11 22:07 ` Dan Williams 2022-04-11 22:07 ` [dm-devel] " Dan Williams 2022-04-12 9:53 ` Borislav Petkov 2022-04-12 9:53 ` [dm-devel] " Borislav Petkov 2022-04-14 1:00 ` Jane Chu 2022-04-14 1:00 ` [dm-devel] " Jane Chu 2022-04-14 8:44 ` Borislav Petkov 2022-04-14 8:44 ` [dm-devel] " Borislav Petkov 2022-04-14 21:54 ` Jane Chu 2022-04-14 21:54 ` [dm-devel] " Jane Chu 2022-04-05 19:47 ` [PATCH v7 2/6] x86/mce: relocate set{clear}_mce_nospec() functions Jane Chu 2022-04-05 19:47 ` [dm-devel] " Jane Chu 2022-04-06 5:01 ` Christoph Hellwig 2022-04-06 5:01 ` [dm-devel] " Christoph Hellwig 2022-04-11 22:20 ` Dan Williams 2022-04-11 22:20 ` [dm-devel] " Dan Williams 2022-04-14 0:56 ` Jane Chu 2022-04-14 0:56 ` [dm-devel] " Jane Chu 2022-04-05 19:47 ` [PATCH v7 3/6] mce: fix set_mce_nospec to always unmap the whole page Jane Chu 2022-04-05 19:47 ` [dm-devel] " Jane Chu 2022-04-06 5:02 ` Christoph Hellwig 2022-04-06 5:02 ` [dm-devel] " Christoph Hellwig 2022-04-11 23:27 ` Dan Williams 2022-04-11 23:27 ` [dm-devel] " Dan Williams 2022-04-13 23:36 ` Jane Chu 2022-04-13 23:36 ` [dm-devel] " Jane Chu 2022-04-14 2:32 ` Dan Williams 2022-04-14 2:32 ` [dm-devel] " Dan Williams 2022-04-15 16:18 ` Jane Chu 2022-04-15 16:18 ` [dm-devel] " Jane Chu 2022-04-12 10:07 ` Borislav Petkov 2022-04-12 10:07 ` [dm-devel] " Borislav Petkov 2022-04-13 23:41 ` Jane Chu 2022-04-13 23:41 ` [dm-devel] " Jane Chu 2022-04-05 19:47 ` [PATCH v7 4/6] dax: add DAX_RECOVERY flag and .recovery_write dev_pgmap_ops Jane Chu 2022-04-05 19:47 ` [dm-devel] " Jane Chu 2022-04-06 5:19 ` Christoph Hellwig 2022-04-06 5:19 ` Christoph Hellwig 2022-04-06 17:32 ` [dm-devel] " Jane Chu 2022-04-06 17:32 ` Jane Chu 2022-04-06 17:45 ` Jane Chu 2022-04-06 17:45 ` [dm-devel] " Jane Chu 2022-04-07 5:30 ` Christoph Hellwig 2022-04-07 5:30 ` [dm-devel] " Christoph Hellwig 2022-04-11 23:55 ` Dan Williams 2022-04-11 23:55 ` [dm-devel] " Dan Williams 2022-04-14 0:48 ` Jane Chu 2022-04-14 0:48 ` [dm-devel] " Jane Chu 2022-04-14 0:47 ` Jane Chu 2022-04-14 0:47 ` [dm-devel] " Jane Chu 2022-04-12 0:08 ` Dan Williams 2022-04-12 0:08 ` [dm-devel] " Dan Williams 2022-04-14 0:50 ` Jane Chu 2022-04-14 0:50 ` [dm-devel] " Jane Chu 2022-04-12 4:57 ` Dan Williams 2022-04-12 4:57 ` [dm-devel] " Dan Williams 2022-04-12 5:02 ` Christoph Hellwig 2022-04-12 5:02 ` [dm-devel] " Christoph Hellwig 2022-04-14 0:51 ` Jane Chu 2022-04-14 0:51 ` [dm-devel] " Jane Chu 2022-04-05 19:47 ` Jane Chu [this message] 2022-04-05 19:47 ` [dm-devel] [PATCH v7 5/6] pmem: refactor pmem_clear_poison() Jane Chu 2022-04-06 5:04 ` Christoph Hellwig 2022-04-06 5:04 ` [dm-devel] " Christoph Hellwig 2022-04-06 17:34 ` Jane Chu 2022-04-06 17:34 ` Jane Chu 2022-04-12 4:26 ` Dan Williams 2022-04-12 4:26 ` [dm-devel] " Dan Williams 2022-04-14 0:55 ` Jane Chu 2022-04-14 0:55 ` [dm-devel] " Jane Chu 2022-04-14 2:02 ` Dan Williams 2022-04-14 2:02 ` [dm-devel] " Dan Williams 2022-04-05 19:47 ` [PATCH v7 6/6] pmem: implement pmem_recovery_write() Jane Chu 2022-04-05 19:47 ` [dm-devel] " Jane Chu 2022-04-06 5:21 ` Christoph Hellwig 2022-04-06 5:21 ` [dm-devel] " Christoph Hellwig 2022-04-06 17:33 ` Jane Chu 2022-04-06 17:33 ` Jane Chu
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=20220405194747.2386619-6-jane.chu@oracle.com \ --to=jane.chu@oracle.com \ --cc=agk@redhat.com \ --cc=dan.j.williams@intel.com \ --cc=dave.jiang@intel.com \ --cc=david@fromorbit.com \ --cc=djwong@kernel.org \ --cc=dm-devel@redhat.com \ --cc=hch@infradead.org \ --cc=ira.weiny@intel.com \ --cc=linux-fsdevel@vger.kernel.org \ --cc=linux-kernel@vger.kernel.org \ --cc=linux-xfs@vger.kernel.org \ --cc=nvdimm@lists.linux.dev \ --cc=snitzer@redhat.com \ --cc=vgoyal@redhat.com \ --cc=vishal.l.verma@intel.com \ --cc=willy@infradead.org \ --cc=x86@kernel.org \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: linkBe sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes, see mirroring instructions on how to clone and mirror all data and code used by this external index.