All of lore.kernel.org
 help / color / mirror / Atom feed
From: Matthew Wilcox <willy@infradead.org>
To: linux-mm@kvack.org
Cc: Matthew Wilcox <mawilcox@microsoft.com>,
	Andrew Morton <akpm@linux-foundation.org>,
	"Kirill A . Shutemov" <kirill.shutemov@linux.intel.com>,
	Christoph Lameter <cl@linux.com>,
	Lai Jiangshan <laijs@cn.fujitsu.com>,
	Pekka Enberg <penberg@kernel.org>,
	Vlastimil Babka <vbabka@suse.cz>
Subject: [PATCH v3 01/14] s390: Use _refcount for pgtables
Date: Wed, 18 Apr 2018 11:48:59 -0700	[thread overview]
Message-ID: <20180418184912.2851-2-willy@infradead.org> (raw)
In-Reply-To: <20180418184912.2851-1-willy@infradead.org>

From: Matthew Wilcox <mawilcox@microsoft.com>

s390 borrows the storage used for _mapcount in struct page in order to
account whether the bottom or top half is being used for 2kB page
tables.  I want to use that for something else, so use the top byte of
_refcount instead of the bottom byte of _mapcount.  _refcount may
temporarily be incremented by other CPUs that see a stale pointer to
this page in the page cache, but each CPU can only increment it by one,
and there are no systems with 2^24 CPUs today, so they will not change
the upper byte of _refcount.  We do have to be a little careful not to
lose any of their writes (as they will subsequently decrement the
counter).

Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
---
 arch/s390/mm/pgalloc.c | 21 ++++++++++++---------
 1 file changed, 12 insertions(+), 9 deletions(-)

diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index 562f72955956..84bd6329a88d 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -190,14 +190,15 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
 		if (!list_empty(&mm->context.pgtable_list)) {
 			page = list_first_entry(&mm->context.pgtable_list,
 						struct page, lru);
-			mask = atomic_read(&page->_mapcount);
+			mask = atomic_read(&page->_refcount) >> 24;
 			mask = (mask | (mask >> 4)) & 3;
 			if (mask != 3) {
 				table = (unsigned long *) page_to_phys(page);
 				bit = mask & 1;		/* =1 -> second 2K */
 				if (bit)
 					table += PTRS_PER_PTE;
-				atomic_xor_bits(&page->_mapcount, 1U << bit);
+				atomic_xor_bits(&page->_refcount,
+							1U << (bit + 24));
 				list_del(&page->lru);
 			}
 		}
@@ -218,12 +219,12 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
 	table = (unsigned long *) page_to_phys(page);
 	if (mm_alloc_pgste(mm)) {
 		/* Return 4K page table with PGSTEs */
-		atomic_set(&page->_mapcount, 3);
+		atomic_xor_bits(&page->_refcount, 3 << 24);
 		memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
 		memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
 	} else {
 		/* Return the first 2K fragment of the page */
-		atomic_set(&page->_mapcount, 1);
+		atomic_xor_bits(&page->_refcount, 1 << 24);
 		memset64((u64 *)table, _PAGE_INVALID, 2 * PTRS_PER_PTE);
 		spin_lock_bh(&mm->context.lock);
 		list_add(&page->lru, &mm->context.pgtable_list);
@@ -242,7 +243,8 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
 		/* Free 2K page table fragment of a 4K page */
 		bit = (__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t));
 		spin_lock_bh(&mm->context.lock);
-		mask = atomic_xor_bits(&page->_mapcount, 1U << bit);
+		mask = atomic_xor_bits(&page->_refcount, 1U << (bit + 24));
+		mask >>= 24;
 		if (mask & 3)
 			list_add(&page->lru, &mm->context.pgtable_list);
 		else
@@ -253,7 +255,6 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
 	}
 
 	pgtable_page_dtor(page);
-	atomic_set(&page->_mapcount, -1);
 	__free_page(page);
 }
 
@@ -274,7 +275,8 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
 	}
 	bit = (__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t));
 	spin_lock_bh(&mm->context.lock);
-	mask = atomic_xor_bits(&page->_mapcount, 0x11U << bit);
+	mask = atomic_xor_bits(&page->_refcount, 0x11U << (bit + 24));
+	mask >>= 24;
 	if (mask & 3)
 		list_add_tail(&page->lru, &mm->context.pgtable_list);
 	else
@@ -296,12 +298,13 @@ static void __tlb_remove_table(void *_table)
 		break;
 	case 1:		/* lower 2K of a 4K page table */
 	case 2:		/* higher 2K of a 4K page table */
-		if (atomic_xor_bits(&page->_mapcount, mask << 4) != 0)
+		mask = atomic_xor_bits(&page->_refcount, mask << (4 + 24));
+		mask >>= 24;
+		if (mask != 0)
 			break;
 		/* fallthrough */
 	case 3:		/* 4K page table with pgstes */
 		pgtable_page_dtor(page);
-		atomic_set(&page->_mapcount, -1);
 		__free_page(page);
 		break;
 	}
-- 
2.17.0

  reply	other threads:[~2018-04-18 18:49 UTC|newest]

Thread overview: 43+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-04-18 18:48 [PATCH v3 00/14] Rearrange struct page Matthew Wilcox
2018-04-18 18:48 ` Matthew Wilcox [this message]
2018-04-18 18:49 ` [PATCH v3 02/14] mm: Split page_type out from _mapcount Matthew Wilcox
2018-04-19  9:04   ` Vlastimil Babka
2018-04-19 11:16     ` Matthew Wilcox
2018-04-20 15:17   ` Christopher Lameter
2018-04-20 20:43     ` Matthew Wilcox
2018-04-18 18:49 ` [PATCH v3 03/14] mm: Mark pages in use for page tables Matthew Wilcox
2018-04-19  9:30   ` Vlastimil Babka
2018-04-18 18:49 ` [PATCH v3 04/14] mm: Switch s_mem and slab_cache in struct page Matthew Wilcox
2018-04-19 11:06   ` Vlastimil Babka
2018-04-19 11:19     ` Matthew Wilcox
2018-04-18 18:49 ` [PATCH v3 05/14] mm: Move 'private' union within " Matthew Wilcox
2018-04-19 11:31   ` Vlastimil Babka
2018-04-20 15:25   ` Christopher Lameter
2018-04-20 20:27     ` Matthew Wilcox
2018-04-30  9:38   ` Kirill A. Shutemov
2018-04-18 18:49 ` [PATCH v3 06/14] mm: Move _refcount out of struct page union Matthew Wilcox
2018-04-19 11:37   ` Vlastimil Babka
2018-04-30  9:40   ` Kirill A. Shutemov
2018-04-18 18:49 ` [PATCH v3 07/14] slub: Remove page->counters Matthew Wilcox
2018-04-19 13:42   ` Vlastimil Babka
2018-04-19 14:23     ` Matthew Wilcox
2018-04-18 18:49 ` [PATCH v3 08/14] mm: Combine first three unions in struct page Matthew Wilcox
2018-04-19 13:46   ` Vlastimil Babka
2018-04-19 14:08     ` Matthew Wilcox
2018-04-30  9:42   ` Kirill A. Shutemov
2018-04-18 18:49 ` [PATCH v3 09/14] mm: Use page->deferred_list Matthew Wilcox
2018-04-19 13:23   ` Vlastimil Babka
2018-04-30  9:43   ` Kirill A. Shutemov
2018-04-18 18:49 ` [PATCH v3 10/14] mm: Move lru union within struct page Matthew Wilcox
2018-04-19 13:56   ` Vlastimil Babka
2018-04-30  9:44   ` Kirill A. Shutemov
2018-04-18 18:49 ` [PATCH v3 11/14] mm: Combine first two unions in " Matthew Wilcox
2018-04-19 14:03   ` Vlastimil Babka
2018-04-30  9:47   ` Kirill A. Shutemov
2018-04-30 12:42     ` Matthew Wilcox
2018-04-30 13:12       ` Kirill A. Shutemov
2018-04-18 18:49 ` [PATCH v3 12/14] mm: Improve struct page documentation Matthew Wilcox
2018-04-18 23:32   ` Randy Dunlap
2018-04-18 23:43     ` Matthew Wilcox
2018-04-18 18:49 ` [PATCH v3 13/14] slab,slub: Remove rcu_head size checks Matthew Wilcox
2018-04-18 18:49 ` [PATCH v3 14/14] slub: Remove kmem_cache->reserved Matthew Wilcox

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180418184912.2851-2-willy@infradead.org \
    --to=willy@infradead.org \
    --cc=akpm@linux-foundation.org \
    --cc=cl@linux.com \
    --cc=kirill.shutemov@linux.intel.com \
    --cc=laijs@cn.fujitsu.com \
    --cc=linux-mm@kvack.org \
    --cc=mawilcox@microsoft.com \
    --cc=penberg@kernel.org \
    --cc=vbabka@suse.cz \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.