From: y00318929 <yeyunfeng@huawei.com> To: <catalin.marinas@arm.com>, <will@kernel.org>, <wangkefeng.wang@huawei.com>, <linux-arm-kernel@lists.infradead.org>, <linux-kernel@vger.kernel.org>, <yeyunfeng@huawei.com> Subject: [PATCH 1/5] arm64: mm: Define asid_bitmap structure for pinned_asid Date: Mon, 17 Oct 2022 16:12:54 +0800 [thread overview] Message-ID: <20221017081258.3678830-2-yeyunfeng@huawei.com> (raw) In-Reply-To: <20221017081258.3678830-1-yeyunfeng@huawei.com> From: Yunfeng Ye <yeyunfeng@huawei.com> It is clearer to use the asid_bitmap structure for pinned_sid, and we will use it for isolated asid later. No functional change. Signed-off-by: Yunfeng Ye <yeyunfeng@huawei.com> --- arch/arm64/mm/context.c | 38 +++++++++++++++++++++----------------- 1 file changed, 21 insertions(+), 17 deletions(-) diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c index e1e0dca01839..8549b5f30352 100644 --- a/arch/arm64/mm/context.c +++ b/arch/arm64/mm/context.c @@ -17,6 +17,12 @@ #include <asm/smp.h> #include <asm/tlbflush.h> +struct asid_bitmap { + unsigned long *map; + unsigned long nr; + unsigned long max; +}; + static u32 asid_bits; static DEFINE_RAW_SPINLOCK(cpu_asid_lock); @@ -27,9 +33,7 @@ static DEFINE_PER_CPU(atomic64_t, active_asids); static DEFINE_PER_CPU(u64, reserved_asids); static cpumask_t tlb_flush_pending; -static unsigned long max_pinned_asids; -static unsigned long nr_pinned_asids; -static unsigned long *pinned_asid_map; +static struct asid_bitmap pinned_asid; #define ASID_MASK (~GENMASK(asid_bits - 1, 0)) #define ASID_FIRST_VERSION (1UL << asid_bits) @@ -90,8 +94,8 @@ static void set_kpti_asid_bits(unsigned long *map) static void set_reserved_asid_bits(void) { - if (pinned_asid_map) - bitmap_copy(asid_map, pinned_asid_map, NUM_USER_ASIDS); + if (pinned_asid.map) + bitmap_copy(asid_map, pinned_asid.map, NUM_USER_ASIDS); else if (arm64_kernel_unmapped_at_el0()) set_kpti_asid_bits(asid_map); else @@ -275,7 +279,7 @@ unsigned long arm64_mm_context_get(struct mm_struct *mm) unsigned long flags; u64 asid; - if (!pinned_asid_map) + if (!pinned_asid.map) return 0; raw_spin_lock_irqsave(&cpu_asid_lock, flags); @@ -285,7 +289,7 @@ unsigned long arm64_mm_context_get(struct mm_struct *mm) if (refcount_inc_not_zero(&mm->context.pinned)) goto out_unlock; - if (nr_pinned_asids >= max_pinned_asids) { + if (pinned_asid.nr >= pinned_asid.max) { asid = 0; goto out_unlock; } @@ -299,8 +303,8 @@ unsigned long arm64_mm_context_get(struct mm_struct *mm) atomic64_set(&mm->context.id, asid); } - nr_pinned_asids++; - __set_bit(ctxid2asid(asid), pinned_asid_map); + pinned_asid.nr++; + __set_bit(ctxid2asid(asid), pinned_asid.map); refcount_set(&mm->context.pinned, 1); out_unlock: @@ -321,14 +325,14 @@ void arm64_mm_context_put(struct mm_struct *mm) unsigned long flags; u64 asid = atomic64_read(&mm->context.id); - if (!pinned_asid_map) + if (!pinned_asid.map) return; raw_spin_lock_irqsave(&cpu_asid_lock, flags); if (refcount_dec_and_test(&mm->context.pinned)) { - __clear_bit(ctxid2asid(asid), pinned_asid_map); - nr_pinned_asids--; + __clear_bit(ctxid2asid(asid), pinned_asid.map); + pinned_asid.nr--; } raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); @@ -377,8 +381,8 @@ static int asids_update_limit(void) if (arm64_kernel_unmapped_at_el0()) { num_available_asids /= 2; - if (pinned_asid_map) - set_kpti_asid_bits(pinned_asid_map); + if (pinned_asid.map) + set_kpti_asid_bits(pinned_asid.map); } /* * Expect allocation after rollover to fail if we don't have at least @@ -393,7 +397,7 @@ static int asids_update_limit(void) * even if all CPUs have a reserved ASID and the maximum number of ASIDs * are pinned, there still is at least one empty slot in the ASID map. */ - max_pinned_asids = num_available_asids - num_possible_cpus() - 2; + pinned_asid.max = num_available_asids - num_possible_cpus() - 2; return 0; } arch_initcall(asids_update_limit); @@ -407,8 +411,8 @@ static int asids_init(void) panic("Failed to allocate bitmap for %lu ASIDs\n", NUM_USER_ASIDS); - pinned_asid_map = bitmap_zalloc(NUM_USER_ASIDS, GFP_KERNEL); - nr_pinned_asids = 0; + pinned_asid.map = bitmap_zalloc(NUM_USER_ASIDS, GFP_KERNEL); + pinned_asid.nr = 0; /* * We cannot call set_reserved_asid_bits() here because CPU -- 2.27.0
WARNING: multiple messages have this Message-ID (diff)
From: y00318929 <yeyunfeng@huawei.com> To: <catalin.marinas@arm.com>, <will@kernel.org>, <wangkefeng.wang@huawei.com>, <linux-arm-kernel@lists.infradead.org>, <linux-kernel@vger.kernel.org>, <yeyunfeng@huawei.com> Subject: [PATCH 1/5] arm64: mm: Define asid_bitmap structure for pinned_asid Date: Mon, 17 Oct 2022 16:12:54 +0800 [thread overview] Message-ID: <20221017081258.3678830-2-yeyunfeng@huawei.com> (raw) In-Reply-To: <20221017081258.3678830-1-yeyunfeng@huawei.com> From: Yunfeng Ye <yeyunfeng@huawei.com> It is clearer to use the asid_bitmap structure for pinned_sid, and we will use it for isolated asid later. No functional change. Signed-off-by: Yunfeng Ye <yeyunfeng@huawei.com> --- arch/arm64/mm/context.c | 38 +++++++++++++++++++++----------------- 1 file changed, 21 insertions(+), 17 deletions(-) diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c index e1e0dca01839..8549b5f30352 100644 --- a/arch/arm64/mm/context.c +++ b/arch/arm64/mm/context.c @@ -17,6 +17,12 @@ #include <asm/smp.h> #include <asm/tlbflush.h> +struct asid_bitmap { + unsigned long *map; + unsigned long nr; + unsigned long max; +}; + static u32 asid_bits; static DEFINE_RAW_SPINLOCK(cpu_asid_lock); @@ -27,9 +33,7 @@ static DEFINE_PER_CPU(atomic64_t, active_asids); static DEFINE_PER_CPU(u64, reserved_asids); static cpumask_t tlb_flush_pending; -static unsigned long max_pinned_asids; -static unsigned long nr_pinned_asids; -static unsigned long *pinned_asid_map; +static struct asid_bitmap pinned_asid; #define ASID_MASK (~GENMASK(asid_bits - 1, 0)) #define ASID_FIRST_VERSION (1UL << asid_bits) @@ -90,8 +94,8 @@ static void set_kpti_asid_bits(unsigned long *map) static void set_reserved_asid_bits(void) { - if (pinned_asid_map) - bitmap_copy(asid_map, pinned_asid_map, NUM_USER_ASIDS); + if (pinned_asid.map) + bitmap_copy(asid_map, pinned_asid.map, NUM_USER_ASIDS); else if (arm64_kernel_unmapped_at_el0()) set_kpti_asid_bits(asid_map); else @@ -275,7 +279,7 @@ unsigned long arm64_mm_context_get(struct mm_struct *mm) unsigned long flags; u64 asid; - if (!pinned_asid_map) + if (!pinned_asid.map) return 0; raw_spin_lock_irqsave(&cpu_asid_lock, flags); @@ -285,7 +289,7 @@ unsigned long arm64_mm_context_get(struct mm_struct *mm) if (refcount_inc_not_zero(&mm->context.pinned)) goto out_unlock; - if (nr_pinned_asids >= max_pinned_asids) { + if (pinned_asid.nr >= pinned_asid.max) { asid = 0; goto out_unlock; } @@ -299,8 +303,8 @@ unsigned long arm64_mm_context_get(struct mm_struct *mm) atomic64_set(&mm->context.id, asid); } - nr_pinned_asids++; - __set_bit(ctxid2asid(asid), pinned_asid_map); + pinned_asid.nr++; + __set_bit(ctxid2asid(asid), pinned_asid.map); refcount_set(&mm->context.pinned, 1); out_unlock: @@ -321,14 +325,14 @@ void arm64_mm_context_put(struct mm_struct *mm) unsigned long flags; u64 asid = atomic64_read(&mm->context.id); - if (!pinned_asid_map) + if (!pinned_asid.map) return; raw_spin_lock_irqsave(&cpu_asid_lock, flags); if (refcount_dec_and_test(&mm->context.pinned)) { - __clear_bit(ctxid2asid(asid), pinned_asid_map); - nr_pinned_asids--; + __clear_bit(ctxid2asid(asid), pinned_asid.map); + pinned_asid.nr--; } raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); @@ -377,8 +381,8 @@ static int asids_update_limit(void) if (arm64_kernel_unmapped_at_el0()) { num_available_asids /= 2; - if (pinned_asid_map) - set_kpti_asid_bits(pinned_asid_map); + if (pinned_asid.map) + set_kpti_asid_bits(pinned_asid.map); } /* * Expect allocation after rollover to fail if we don't have at least @@ -393,7 +397,7 @@ static int asids_update_limit(void) * even if all CPUs have a reserved ASID and the maximum number of ASIDs * are pinned, there still is at least one empty slot in the ASID map. */ - max_pinned_asids = num_available_asids - num_possible_cpus() - 2; + pinned_asid.max = num_available_asids - num_possible_cpus() - 2; return 0; } arch_initcall(asids_update_limit); @@ -407,8 +411,8 @@ static int asids_init(void) panic("Failed to allocate bitmap for %lu ASIDs\n", NUM_USER_ASIDS); - pinned_asid_map = bitmap_zalloc(NUM_USER_ASIDS, GFP_KERNEL); - nr_pinned_asids = 0; + pinned_asid.map = bitmap_zalloc(NUM_USER_ASIDS, GFP_KERNEL); + pinned_asid.nr = 0; /* * We cannot call set_reserved_asid_bits() here because CPU -- 2.27.0 _______________________________________________ linux-arm-kernel mailing list linux-arm-kernel@lists.infradead.org http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
next prev parent reply other threads:[~2022-10-17 8:13 UTC|newest] Thread overview: 14+ messages / expand[flat|nested] mbox.gz Atom feed top 2022-10-17 8:12 [PATCH 0/5] Support ASID Isolation mechanism y00318929 2022-10-17 8:12 ` y00318929 2022-10-17 8:12 ` y00318929 [this message] 2022-10-17 8:12 ` [PATCH 1/5] arm64: mm: Define asid_bitmap structure for pinned_asid y00318929 2022-10-17 8:12 ` [PATCH 2/5] arm64: mm: Extract the processing of asid_generation y00318929 2022-10-17 8:12 ` y00318929 2022-10-17 8:12 ` [PATCH 3/5] arm64: mm: Use cpumask in flush_context() y00318929 2022-10-17 8:12 ` y00318929 2022-10-17 8:12 ` [PATCH 4/5] arm64: mm: Support ASID isolation feature y00318929 2022-10-17 8:12 ` y00318929 2022-10-17 8:12 ` [PATCH 5/5] arm64: mm: Add TLB flush trace on context switch y00318929 2022-10-17 8:12 ` y00318929 2022-10-17 8:31 [PATCH 0/5] Support ASID Isolation mechanism Yunfeng Ye 2022-10-17 8:31 ` [PATCH 1/5] arm64: mm: Define asid_bitmap structure for pinned_asid Yunfeng Ye 2022-10-17 8:31 ` Yunfeng Ye
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=20221017081258.3678830-2-yeyunfeng@huawei.com \ --to=yeyunfeng@huawei.com \ --cc=catalin.marinas@arm.com \ --cc=linux-arm-kernel@lists.infradead.org \ --cc=linux-kernel@vger.kernel.org \ --cc=wangkefeng.wang@huawei.com \ --cc=will@kernel.org \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: linkBe sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes, see mirroring instructions on how to clone and mirror all data and code used by this external index.