All of lore.kernel.org
 help / color / mirror / Atom feed
From: Yunfeng Ye <yeyunfeng@huawei.com>
To: <catalin.marinas@arm.com>, <will@kernel.org>,
	<wangkefeng.wang@huawei.com>,
	<linux-arm-kernel@lists.infradead.org>,
	<linux-kernel@vger.kernel.org>, <yeyunfeng@huawei.com>
Cc: <linfeilong@huawei.com>
Subject: [PATCH 1/5] arm64: mm: Define asid_bitmap structure for pinned_asid
Date: Mon, 17 Oct 2022 16:31:59 +0800	[thread overview]
Message-ID: <20221017083203.3690346-2-yeyunfeng@huawei.com> (raw)
In-Reply-To: <20221017083203.3690346-1-yeyunfeng@huawei.com>

It is clearer to use the asid_bitmap structure for pinned_sid, and we
will use it for isolated asid later.

No functional change.

Signed-off-by: Yunfeng Ye <yeyunfeng@huawei.com>
---
 arch/arm64/mm/context.c | 38 +++++++++++++++++++++-----------------
 1 file changed, 21 insertions(+), 17 deletions(-)

diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index e1e0dca01839..8549b5f30352 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -17,6 +17,12 @@
 #include <asm/smp.h>
 #include <asm/tlbflush.h>
 
+struct asid_bitmap {
+	unsigned long *map;
+	unsigned long nr;
+	unsigned long max;
+};
+
 static u32 asid_bits;
 static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
 
@@ -27,9 +33,7 @@ static DEFINE_PER_CPU(atomic64_t, active_asids);
 static DEFINE_PER_CPU(u64, reserved_asids);
 static cpumask_t tlb_flush_pending;
 
-static unsigned long max_pinned_asids;
-static unsigned long nr_pinned_asids;
-static unsigned long *pinned_asid_map;
+static struct asid_bitmap pinned_asid;
 
 #define ASID_MASK		(~GENMASK(asid_bits - 1, 0))
 #define ASID_FIRST_VERSION	(1UL << asid_bits)
@@ -90,8 +94,8 @@ static void set_kpti_asid_bits(unsigned long *map)
 
 static void set_reserved_asid_bits(void)
 {
-	if (pinned_asid_map)
-		bitmap_copy(asid_map, pinned_asid_map, NUM_USER_ASIDS);
+	if (pinned_asid.map)
+		bitmap_copy(asid_map, pinned_asid.map, NUM_USER_ASIDS);
 	else if (arm64_kernel_unmapped_at_el0())
 		set_kpti_asid_bits(asid_map);
 	else
@@ -275,7 +279,7 @@ unsigned long arm64_mm_context_get(struct mm_struct *mm)
 	unsigned long flags;
 	u64 asid;
 
-	if (!pinned_asid_map)
+	if (!pinned_asid.map)
 		return 0;
 
 	raw_spin_lock_irqsave(&cpu_asid_lock, flags);
@@ -285,7 +289,7 @@ unsigned long arm64_mm_context_get(struct mm_struct *mm)
 	if (refcount_inc_not_zero(&mm->context.pinned))
 		goto out_unlock;
 
-	if (nr_pinned_asids >= max_pinned_asids) {
+	if (pinned_asid.nr >= pinned_asid.max) {
 		asid = 0;
 		goto out_unlock;
 	}
@@ -299,8 +303,8 @@ unsigned long arm64_mm_context_get(struct mm_struct *mm)
 		atomic64_set(&mm->context.id, asid);
 	}
 
-	nr_pinned_asids++;
-	__set_bit(ctxid2asid(asid), pinned_asid_map);
+	pinned_asid.nr++;
+	__set_bit(ctxid2asid(asid), pinned_asid.map);
 	refcount_set(&mm->context.pinned, 1);
 
 out_unlock:
@@ -321,14 +325,14 @@ void arm64_mm_context_put(struct mm_struct *mm)
 	unsigned long flags;
 	u64 asid = atomic64_read(&mm->context.id);
 
-	if (!pinned_asid_map)
+	if (!pinned_asid.map)
 		return;
 
 	raw_spin_lock_irqsave(&cpu_asid_lock, flags);
 
 	if (refcount_dec_and_test(&mm->context.pinned)) {
-		__clear_bit(ctxid2asid(asid), pinned_asid_map);
-		nr_pinned_asids--;
+		__clear_bit(ctxid2asid(asid), pinned_asid.map);
+		pinned_asid.nr--;
 	}
 
 	raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
@@ -377,8 +381,8 @@ static int asids_update_limit(void)
 
 	if (arm64_kernel_unmapped_at_el0()) {
 		num_available_asids /= 2;
-		if (pinned_asid_map)
-			set_kpti_asid_bits(pinned_asid_map);
+		if (pinned_asid.map)
+			set_kpti_asid_bits(pinned_asid.map);
 	}
 	/*
 	 * Expect allocation after rollover to fail if we don't have at least
@@ -393,7 +397,7 @@ static int asids_update_limit(void)
 	 * even if all CPUs have a reserved ASID and the maximum number of ASIDs
 	 * are pinned, there still is at least one empty slot in the ASID map.
 	 */
-	max_pinned_asids = num_available_asids - num_possible_cpus() - 2;
+	pinned_asid.max = num_available_asids - num_possible_cpus() - 2;
 	return 0;
 }
 arch_initcall(asids_update_limit);
@@ -407,8 +411,8 @@ static int asids_init(void)
 		panic("Failed to allocate bitmap for %lu ASIDs\n",
 		      NUM_USER_ASIDS);
 
-	pinned_asid_map = bitmap_zalloc(NUM_USER_ASIDS, GFP_KERNEL);
-	nr_pinned_asids = 0;
+	pinned_asid.map = bitmap_zalloc(NUM_USER_ASIDS, GFP_KERNEL);
+	pinned_asid.nr = 0;
 
 	/*
 	 * We cannot call set_reserved_asid_bits() here because CPU
-- 
2.27.0


WARNING: multiple messages have this Message-ID (diff)
From: Yunfeng Ye <yeyunfeng@huawei.com>
To: <catalin.marinas@arm.com>, <will@kernel.org>,
	<wangkefeng.wang@huawei.com>,
	<linux-arm-kernel@lists.infradead.org>,
	<linux-kernel@vger.kernel.org>, <yeyunfeng@huawei.com>
Cc: <linfeilong@huawei.com>
Subject: [PATCH 1/5] arm64: mm: Define asid_bitmap structure for pinned_asid
Date: Mon, 17 Oct 2022 16:31:59 +0800	[thread overview]
Message-ID: <20221017083203.3690346-2-yeyunfeng@huawei.com> (raw)
In-Reply-To: <20221017083203.3690346-1-yeyunfeng@huawei.com>

It is clearer to use the asid_bitmap structure for pinned_sid, and we
will use it for isolated asid later.

No functional change.

Signed-off-by: Yunfeng Ye <yeyunfeng@huawei.com>
---
 arch/arm64/mm/context.c | 38 +++++++++++++++++++++-----------------
 1 file changed, 21 insertions(+), 17 deletions(-)

diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index e1e0dca01839..8549b5f30352 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -17,6 +17,12 @@
 #include <asm/smp.h>
 #include <asm/tlbflush.h>
 
+struct asid_bitmap {
+	unsigned long *map;
+	unsigned long nr;
+	unsigned long max;
+};
+
 static u32 asid_bits;
 static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
 
@@ -27,9 +33,7 @@ static DEFINE_PER_CPU(atomic64_t, active_asids);
 static DEFINE_PER_CPU(u64, reserved_asids);
 static cpumask_t tlb_flush_pending;
 
-static unsigned long max_pinned_asids;
-static unsigned long nr_pinned_asids;
-static unsigned long *pinned_asid_map;
+static struct asid_bitmap pinned_asid;
 
 #define ASID_MASK		(~GENMASK(asid_bits - 1, 0))
 #define ASID_FIRST_VERSION	(1UL << asid_bits)
@@ -90,8 +94,8 @@ static void set_kpti_asid_bits(unsigned long *map)
 
 static void set_reserved_asid_bits(void)
 {
-	if (pinned_asid_map)
-		bitmap_copy(asid_map, pinned_asid_map, NUM_USER_ASIDS);
+	if (pinned_asid.map)
+		bitmap_copy(asid_map, pinned_asid.map, NUM_USER_ASIDS);
 	else if (arm64_kernel_unmapped_at_el0())
 		set_kpti_asid_bits(asid_map);
 	else
@@ -275,7 +279,7 @@ unsigned long arm64_mm_context_get(struct mm_struct *mm)
 	unsigned long flags;
 	u64 asid;
 
-	if (!pinned_asid_map)
+	if (!pinned_asid.map)
 		return 0;
 
 	raw_spin_lock_irqsave(&cpu_asid_lock, flags);
@@ -285,7 +289,7 @@ unsigned long arm64_mm_context_get(struct mm_struct *mm)
 	if (refcount_inc_not_zero(&mm->context.pinned))
 		goto out_unlock;
 
-	if (nr_pinned_asids >= max_pinned_asids) {
+	if (pinned_asid.nr >= pinned_asid.max) {
 		asid = 0;
 		goto out_unlock;
 	}
@@ -299,8 +303,8 @@ unsigned long arm64_mm_context_get(struct mm_struct *mm)
 		atomic64_set(&mm->context.id, asid);
 	}
 
-	nr_pinned_asids++;
-	__set_bit(ctxid2asid(asid), pinned_asid_map);
+	pinned_asid.nr++;
+	__set_bit(ctxid2asid(asid), pinned_asid.map);
 	refcount_set(&mm->context.pinned, 1);
 
 out_unlock:
@@ -321,14 +325,14 @@ void arm64_mm_context_put(struct mm_struct *mm)
 	unsigned long flags;
 	u64 asid = atomic64_read(&mm->context.id);
 
-	if (!pinned_asid_map)
+	if (!pinned_asid.map)
 		return;
 
 	raw_spin_lock_irqsave(&cpu_asid_lock, flags);
 
 	if (refcount_dec_and_test(&mm->context.pinned)) {
-		__clear_bit(ctxid2asid(asid), pinned_asid_map);
-		nr_pinned_asids--;
+		__clear_bit(ctxid2asid(asid), pinned_asid.map);
+		pinned_asid.nr--;
 	}
 
 	raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
@@ -377,8 +381,8 @@ static int asids_update_limit(void)
 
 	if (arm64_kernel_unmapped_at_el0()) {
 		num_available_asids /= 2;
-		if (pinned_asid_map)
-			set_kpti_asid_bits(pinned_asid_map);
+		if (pinned_asid.map)
+			set_kpti_asid_bits(pinned_asid.map);
 	}
 	/*
 	 * Expect allocation after rollover to fail if we don't have at least
@@ -393,7 +397,7 @@ static int asids_update_limit(void)
 	 * even if all CPUs have a reserved ASID and the maximum number of ASIDs
 	 * are pinned, there still is at least one empty slot in the ASID map.
 	 */
-	max_pinned_asids = num_available_asids - num_possible_cpus() - 2;
+	pinned_asid.max = num_available_asids - num_possible_cpus() - 2;
 	return 0;
 }
 arch_initcall(asids_update_limit);
@@ -407,8 +411,8 @@ static int asids_init(void)
 		panic("Failed to allocate bitmap for %lu ASIDs\n",
 		      NUM_USER_ASIDS);
 
-	pinned_asid_map = bitmap_zalloc(NUM_USER_ASIDS, GFP_KERNEL);
-	nr_pinned_asids = 0;
+	pinned_asid.map = bitmap_zalloc(NUM_USER_ASIDS, GFP_KERNEL);
+	pinned_asid.nr = 0;
 
 	/*
 	 * We cannot call set_reserved_asid_bits() here because CPU
-- 
2.27.0


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  reply	other threads:[~2022-10-17  8:33 UTC|newest]

Thread overview: 22+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-10-17  8:31 [PATCH 0/5] Support ASID Isolation mechanism Yunfeng Ye
2022-10-17  8:31 ` Yunfeng Ye
2022-10-17  8:31 ` Yunfeng Ye [this message]
2022-10-17  8:31   ` [PATCH 1/5] arm64: mm: Define asid_bitmap structure for pinned_asid Yunfeng Ye
2022-10-17  8:32 ` [PATCH 2/5] arm64: mm: Extract the processing of asid_generation Yunfeng Ye
2022-10-17  8:32   ` Yunfeng Ye
2022-10-17  8:32 ` [PATCH 3/5] arm64: mm: Use cpumask in flush_context() Yunfeng Ye
2022-10-17  8:32   ` Yunfeng Ye
2022-10-17  8:32 ` [PATCH 4/5] arm64: mm: Support ASID isolation feature Yunfeng Ye
2022-10-17  8:32   ` Yunfeng Ye
2022-11-09 12:43   ` Catalin Marinas
2022-11-09 12:43     ` Catalin Marinas
2022-11-10  7:07     ` Yunfeng Ye
2022-11-10  7:07       ` Yunfeng Ye
2022-11-28 17:00       ` Catalin Marinas
2022-11-28 17:00         ` Catalin Marinas
2022-11-29 12:26         ` Yunfeng Ye
2022-11-29 12:26           ` Yunfeng Ye
2022-10-17  8:32 ` [PATCH 5/5] arm64: mm: Add TLB flush trace on context switch Yunfeng Ye
2022-10-17  8:32   ` Yunfeng Ye
  -- strict thread matches above, loose matches on Subject: below --
2022-10-17  8:12 [PATCH 0/5] Support ASID Isolation mechanism y00318929
2022-10-17  8:12 ` [PATCH 1/5] arm64: mm: Define asid_bitmap structure for pinned_asid y00318929
2022-10-17  8:12   ` y00318929

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20221017083203.3690346-2-yeyunfeng@huawei.com \
    --to=yeyunfeng@huawei.com \
    --cc=catalin.marinas@arm.com \
    --cc=linfeilong@huawei.com \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=wangkefeng.wang@huawei.com \
    --cc=will@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.