From: Christoph Hellwig <hch@infradead.org>
To: Alistair Popple <apopple@nvidia.com>
Cc: linux-mm@kvack.org, nouveau@lists.freedesktop.org,
bskeggs@redhat.com, akpm@linux-foundation.org,
linux-doc@vger.kernel.org, linux-kernel@vger.kernel.org,
dri-devel@lists.freedesktop.org, jhubbard@nvidia.com,
rcampbell@nvidia.com, jglisse@redhat.com, jgg@nvidia.com,
hch@infradead.org, daniel@ffwll.ch
Subject: Re: [PATCH v3 1/8] mm: Remove special swap entry functions
Date: Fri, 26 Feb 2021 15:59:09 +0000 [thread overview]
Message-ID: <20210226155909.GA2907711@infradead.org> (raw)
In-Reply-To: <20210226071832.31547-2-apopple@nvidia.com>
> - struct page *page = migration_entry_to_page(entry);
> + struct page *page = pfn_to_page(swp_offset(entry));
I wonder if keeping a single special_entry_to_page() helper would still
me a useful. But I'm not entirely sure. There are also two more open
coded copies of this in the THP migration code.
> -#define free_swap_and_cache(e) ({(is_migration_entry(e) || is_device_private_entry(e));})
> -#define swapcache_prepare(e) ({(is_migration_entry(e) || is_device_private_entry(e));})
> +#define free_swap_and_cache(e) is_special_entry(e)
> +#define swapcache_prepare(e) is_special_entry(e)
Staring at this I'm really, really confused at what this is doing.
Looking a little closer these are the !CONFIG_SWAP stubs, but it could
probably use a comment or two.
> } else if (is_migration_entry(entry)) {
> - page = migration_entry_to_page(entry);
> + page = pfn_to_page(swp_offset(entry));
>
> rss[mm_counter(page)]++;
>
> @@ -737,7 +737,7 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
> set_pte_at(src_mm, addr, src_pte, pte);
> }
> } else if (is_device_private_entry(entry)) {
> - page = device_private_entry_to_page(entry);
> + page = pfn_to_page(swp_offset(entry));
>
> /*
> * Update rss count even for unaddressable pages, as
> @@ -1274,7 +1274,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
>
> entry = pte_to_swp_entry(ptent);
> if (is_device_private_entry(entry)) {
> - struct page *page = device_private_entry_to_page(entry);
> + struct page *page = pfn_to_page(swp_offset(entry));
>
> if (unlikely(details && details->check_mapping)) {
> /*
> @@ -1303,7 +1303,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
> else if (is_migration_entry(entry)) {
> struct page *page;
>
> - page = migration_entry_to_page(entry);
> + page = pfn_to_page(swp_offset(entry));
> rss[mm_counter(page)]--;
> }
> if (unlikely(!free_swap_and_cache(entry)))
> @@ -3271,7 +3271,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
> migration_entry_wait(vma->vm_mm, vmf->pmd,
> vmf->address);
> } else if (is_device_private_entry(entry)) {
> - vmf->page = device_private_entry_to_page(entry);
> + vmf->page = pfn_to_page(swp_offset(entry));
> ret = vmf->page->pgmap->ops->migrate_to_ram(vmf);
> } else if (is_hwpoison_entry(entry)) {
> ret = VM_FAULT_HWPOISON;
> diff --git a/mm/migrate.c b/mm/migrate.c
> index 20ca887ea769..72adcc3d8f5b 100644
> --- a/mm/migrate.c
> +++ b/mm/migrate.c
> @@ -321,7 +321,7 @@ void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
> if (!is_migration_entry(entry))
> goto out;
>
> - page = migration_entry_to_page(entry);
> + page = pfn_to_page(swp_offset(entry));
>
> /*
> * Once page cache replacement of page migration started, page_count
> @@ -361,7 +361,7 @@ void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
> ptl = pmd_lock(mm, pmd);
> if (!is_pmd_migration_entry(*pmd))
> goto unlock;
> - page = migration_entry_to_page(pmd_to_swp_entry(*pmd));
> + page = pfn_to_page(swp_offset(pmd_to_swp_entry(*pmd)));
> if (!get_page_unless_zero(page))
> goto unlock;
> spin_unlock(ptl);
> @@ -2437,7 +2437,7 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp,
> if (!is_device_private_entry(entry))
> goto next;
>
> - page = device_private_entry_to_page(entry);
> + page = pfn_to_page(swp_offset(entry));
> if (!(migrate->flags &
> MIGRATE_VMA_SELECT_DEVICE_PRIVATE) ||
> page->pgmap->owner != migrate->pgmap_owner)
> diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
> index 86e3a3688d59..34230d08556a 100644
> --- a/mm/page_vma_mapped.c
> +++ b/mm/page_vma_mapped.c
> @@ -96,7 +96,7 @@ static bool check_pte(struct page_vma_mapped_walk *pvmw)
> if (!is_migration_entry(entry))
> return false;
>
> - pfn = migration_entry_to_pfn(entry);
> + pfn = swp_offset(entry);
> } else if (is_swap_pte(*pvmw->pte)) {
> swp_entry_t entry;
>
> @@ -105,7 +105,7 @@ static bool check_pte(struct page_vma_mapped_walk *pvmw)
> if (!is_device_private_entry(entry))
> return false;
>
> - pfn = device_private_entry_to_pfn(entry);
> + pfn = swp_offset(entry);
> } else {
> if (!pte_present(*pvmw->pte))
> return false;
> @@ -200,7 +200,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
> if (is_migration_entry(pmd_to_swp_entry(*pvmw->pmd))) {
> swp_entry_t entry = pmd_to_swp_entry(*pvmw->pmd);
>
> - if (migration_entry_to_page(entry) != page)
> + if (pfn_to_page(swp_offset(entry)) != page)
> return not_found(pvmw);
> return true;
> }
> --
> 2.20.1
>
---end quoted text---
next prev parent reply other threads:[~2021-02-26 15:59 UTC|newest]
Thread overview: 29+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-02-26 7:18 [PATCH v3 0/8] Add support for SVM atomics in Nouveau Alistair Popple
2021-02-26 7:18 ` [PATCH v3 1/8] mm: Remove special swap entry functions Alistair Popple
2021-02-26 15:59 ` Christoph Hellwig [this message]
2021-03-02 8:52 ` Alistair Popple
2021-03-01 17:46 ` Jason Gunthorpe
2021-03-02 0:21 ` Alistair Popple
2021-02-26 7:18 ` [PATCH v3 2/8] mm/swapops: Rework swap entry manipulation code Alistair Popple
2021-02-26 16:00 ` Christoph Hellwig
2021-03-01 17:47 ` Jason Gunthorpe
2021-02-26 7:18 ` [PATCH v3 3/8] mm/rmap: Split try_to_munlock from try_to_unmap Alistair Popple
2021-02-26 16:01 ` Christoph Hellwig
2021-03-01 16:10 ` Jason Gunthorpe
2021-03-04 4:27 ` Alistair Popple
2021-02-26 7:18 ` [PATCH v3 4/8] mm/rmap: Split migration into its own function Alistair Popple
2021-02-26 16:03 ` Christoph Hellwig
[not found] ` <E93F89E1-3CE2-4CA3-97D9-6BCED78E1001@nvidia.com>
2021-03-04 23:54 ` Alistair Popple
2021-02-26 7:18 ` [PATCH v3 5/8] mm: Device exclusive memory access Alistair Popple
2021-03-01 17:54 ` Jason Gunthorpe
2021-03-01 22:55 ` Ralph Campbell
2021-03-02 0:05 ` Jason Gunthorpe
2021-03-02 8:57 ` Alistair Popple
[not found] ` <20210302124152.GF4247@nvidia.com>
2021-03-04 5:20 ` Alistair Popple
2021-02-26 7:18 ` [PATCH v3 6/8] mm: Selftests for exclusive device memory Alistair Popple
2021-03-01 17:55 ` Jason Gunthorpe
2021-03-01 18:07 ` Ralph Campbell
2021-03-01 23:14 ` Ralph Campbell
2021-03-02 9:12 ` Alistair Popple
2021-02-26 7:18 ` [PATCH v3 7/8] nouveau/svm: Refactor nouveau_range_fault Alistair Popple
2021-02-26 7:18 ` [PATCH v3 8/8] nouveau/svm: Implement atomic SVM access Alistair Popple
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210226155909.GA2907711@infradead.org \
--to=hch@infradead.org \
--cc=akpm@linux-foundation.org \
--cc=apopple@nvidia.com \
--cc=bskeggs@redhat.com \
--cc=daniel@ffwll.ch \
--cc=dri-devel@lists.freedesktop.org \
--cc=jgg@nvidia.com \
--cc=jglisse@redhat.com \
--cc=jhubbard@nvidia.com \
--cc=linux-doc@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=nouveau@lists.freedesktop.org \
--cc=rcampbell@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).