All of lore.kernel.org
 help / color / mirror / Atom feed
From: Julien Grall <julien.grall@arm.com>
To: linux-kernel@vger.kernel.org,
	linux-arm-kernel@lists.infradead.org,
	kvmarm@lists.cs.columbia.edu
Cc: christoffer.dall@arm.com, james.morse@arm.com,
	marc.zyngier@arm.com, julien.thierry@arm.com,
	suzuki.poulose@arm.com, catalin.marinas@arm.com,
	will.deacon@arm.com, Julien Grall <julien.grall@arm.com>
Subject: [PATCH RFC 03/14] arm64/mm: Move bits to asid_info
Date: Thu, 21 Mar 2019 16:36:12 +0000	[thread overview]
Message-ID: <20190321163623.20219-4-julien.grall@arm.com> (raw)
In-Reply-To: <20190321163623.20219-1-julien.grall@arm.com>

The variable bits hold information for a given ASID allocator. So move
it to the asid_info structure.

Because most of the macros were relying on bits, they are now taking an
extra parameter that is a pointer to the asid_info structure.

Signed-off-by: Julien Grall <julien.grall@arm.com>
---
 arch/arm64/mm/context.c | 59 +++++++++++++++++++++++++------------------------
 1 file changed, 30 insertions(+), 29 deletions(-)

diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index cfe4c5f7abf3..da17ed6c7117 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -27,7 +27,6 @@
 #include <asm/smp.h>
 #include <asm/tlbflush.h>
 
-static u32 asid_bits;
 static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
 
 struct asid_info
@@ -36,6 +35,7 @@ struct asid_info
 	unsigned long	*map;
 	atomic64_t __percpu	*active;
 	u64 __percpu		*reserved;
+	u32			bits;
 } asid_info;
 
 #define active_asid(info, cpu)	*per_cpu_ptr((info)->active, cpu)
@@ -46,17 +46,17 @@ static DEFINE_PER_CPU(u64, reserved_asids);
 
 static cpumask_t tlb_flush_pending;
 
-#define ASID_MASK		(~GENMASK(asid_bits - 1, 0))
-#define ASID_FIRST_VERSION	(1UL << asid_bits)
+#define ASID_MASK(info)			(~GENMASK((info)->bits - 1, 0))
+#define ASID_FIRST_VERSION(info)	(1UL << ((info)->bits))
 
 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
-#define NUM_USER_ASIDS		(ASID_FIRST_VERSION >> 1)
-#define asid2idx(asid)		(((asid) & ~ASID_MASK) >> 1)
-#define idx2asid(idx)		(((idx) << 1) & ~ASID_MASK)
+#define NUM_USER_ASIDS(info)		(ASID_FIRST_VERSION(info) >> 1)
+#define asid2idx(info, asid)		(((asid) & ~ASID_MASK(info)) >> 1)
+#define idx2asid(info, idx)		(((idx) << 1) & ~ASID_MASK(info))
 #else
-#define NUM_USER_ASIDS		(ASID_FIRST_VERSION)
-#define asid2idx(asid)		((asid) & ~ASID_MASK)
-#define idx2asid(idx)		asid2idx(idx)
+#define NUM_USER_ASIDS(info)		(ASID_FIRST_VERSION(info))
+#define asid2idx(info, asid)		((asid) & ~ASID_MASK(info))
+#define idx2asid(info, idx)		asid2idx(info, idx)
 #endif
 
 /* Get the ASIDBits supported by the current CPU */
@@ -86,13 +86,13 @@ void verify_cpu_asid_bits(void)
 {
 	u32 asid = get_cpu_asid_bits();
 
-	if (asid < asid_bits) {
+	if (asid < asid_info.bits) {
 		/*
 		 * We cannot decrease the ASID size at runtime, so panic if we support
 		 * fewer ASID bits than the boot CPU.
 		 */
 		pr_crit("CPU%d: smaller ASID size(%u) than boot CPU (%u)\n",
-				smp_processor_id(), asid, asid_bits);
+				smp_processor_id(), asid, asid_info.bits);
 		cpu_panic_kernel();
 	}
 }
@@ -103,7 +103,7 @@ static void flush_context(struct asid_info *info)
 	u64 asid;
 
 	/* Update the list of reserved ASIDs and the ASID bitmap. */
-	bitmap_clear(info->map, 0, NUM_USER_ASIDS);
+	bitmap_clear(info->map, 0, NUM_USER_ASIDS(info));
 
 	for_each_possible_cpu(i) {
 		asid = atomic64_xchg_relaxed(&active_asid(info, i), 0);
@@ -116,7 +116,7 @@ static void flush_context(struct asid_info *info)
 		 */
 		if (asid == 0)
 			asid = reserved_asid(info, i);
-		__set_bit(asid2idx(asid), info->map);
+		__set_bit(asid2idx(info, asid), info->map);
 		reserved_asid(info, i) = asid;
 	}
 
@@ -159,7 +159,7 @@ static u64 new_context(struct asid_info *info, struct mm_struct *mm)
 	u64 generation = atomic64_read(&info->generation);
 
 	if (asid != 0) {
-		u64 newasid = generation | (asid & ~ASID_MASK);
+		u64 newasid = generation | (asid & ~ASID_MASK(info));
 
 		/*
 		 * If our current ASID was active during a rollover, we
@@ -172,7 +172,7 @@ static u64 new_context(struct asid_info *info, struct mm_struct *mm)
 		 * We had a valid ASID in a previous life, so try to re-use
 		 * it if possible.
 		 */
-		if (!__test_and_set_bit(asid2idx(asid), info->map))
+		if (!__test_and_set_bit(asid2idx(info, asid), info->map))
 			return newasid;
 	}
 
@@ -183,22 +183,22 @@ static u64 new_context(struct asid_info *info, struct mm_struct *mm)
 	 * a reserved TTBR0 for the init_mm and we allocate ASIDs in even/odd
 	 * pairs.
 	 */
-	asid = find_next_zero_bit(info->map, NUM_USER_ASIDS, cur_idx);
-	if (asid != NUM_USER_ASIDS)
+	asid = find_next_zero_bit(info->map, NUM_USER_ASIDS(info), cur_idx);
+	if (asid != NUM_USER_ASIDS(info))
 		goto set_asid;
 
 	/* We're out of ASIDs, so increment the global generation count */
-	generation = atomic64_add_return_relaxed(ASID_FIRST_VERSION,
+	generation = atomic64_add_return_relaxed(ASID_FIRST_VERSION(info),
 						 &info->generation);
 	flush_context(info);
 
 	/* We have more ASIDs than CPUs, so this will always succeed */
-	asid = find_next_zero_bit(info->map, NUM_USER_ASIDS, 1);
+	asid = find_next_zero_bit(info->map, NUM_USER_ASIDS(info), 1);
 
 set_asid:
 	__set_bit(asid, info->map);
 	cur_idx = asid;
-	return idx2asid(asid) | generation;
+	return idx2asid(info, asid) | generation;
 }
 
 void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
@@ -228,7 +228,7 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
 	 */
 	old_active_asid = atomic64_read(&active_asid(info, cpu));
 	if (old_active_asid &&
-	    !((asid ^ atomic64_read(&info->generation)) >> asid_bits) &&
+	    !((asid ^ atomic64_read(&info->generation)) >> info->bits) &&
 	    atomic64_cmpxchg_relaxed(&active_asid(info, cpu),
 				     old_active_asid, asid))
 		goto switch_mm_fastpath;
@@ -236,7 +236,7 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
 	raw_spin_lock_irqsave(&cpu_asid_lock, flags);
 	/* Check that our ASID belongs to the current generation. */
 	asid = atomic64_read(&mm->context.id);
-	if ((asid ^ atomic64_read(&info->generation)) >> asid_bits) {
+	if ((asid ^ atomic64_read(&info->generation)) >> info->bits) {
 		asid = new_context(info, mm);
 		atomic64_set(&mm->context.id, asid);
 	}
@@ -272,23 +272,24 @@ static int asids_init(void)
 {
 	struct asid_info *info = &asid_info;
 
-	asid_bits = get_cpu_asid_bits();
+	info->bits = get_cpu_asid_bits();
 	/*
 	 * Expect allocation after rollover to fail if we don't have at least
 	 * one more ASID than CPUs. ASID #0 is reserved for init_mm.
 	 */
-	WARN_ON(NUM_USER_ASIDS - 1 <= num_possible_cpus());
-	atomic64_set(&info->generation, ASID_FIRST_VERSION);
-	info->map = kcalloc(BITS_TO_LONGS(NUM_USER_ASIDS), sizeof(*info->map),
-			    GFP_KERNEL);
+	WARN_ON(NUM_USER_ASIDS(info) - 1 <= num_possible_cpus());
+	atomic64_set(&info->generation, ASID_FIRST_VERSION(info));
+	info->map = kcalloc(BITS_TO_LONGS(NUM_USER_ASIDS(info)),
+			    sizeof(*info->map), GFP_KERNEL);
 	if (!info->map)
 		panic("Failed to allocate bitmap for %lu ASIDs\n",
-		      NUM_USER_ASIDS);
+		      NUM_USER_ASIDS(info));
 
 	info->active = &active_asids;
 	info->reserved = &reserved_asids;
 
-	pr_info("ASID allocator initialised with %lu entries\n", NUM_USER_ASIDS);
+	pr_info("ASID allocator initialised with %lu entries\n",
+		NUM_USER_ASIDS(info));
 	return 0;
 }
 early_initcall(asids_init);
-- 
2.11.0


WARNING: multiple messages have this Message-ID (diff)
From: Julien Grall <julien.grall@arm.com>
To: linux-kernel@vger.kernel.org,
	linux-arm-kernel@lists.infradead.org,
	kvmarm@lists.cs.columbia.edu
Cc: marc.zyngier@arm.com, catalin.marinas@arm.com,
	will.deacon@arm.com, Julien Grall <julien.grall@arm.com>
Subject: [PATCH RFC 03/14] arm64/mm: Move bits to asid_info
Date: Thu, 21 Mar 2019 16:36:12 +0000	[thread overview]
Message-ID: <20190321163623.20219-4-julien.grall@arm.com> (raw)
In-Reply-To: <20190321163623.20219-1-julien.grall@arm.com>

The variable bits hold information for a given ASID allocator. So move
it to the asid_info structure.

Because most of the macros were relying on bits, they are now taking an
extra parameter that is a pointer to the asid_info structure.

Signed-off-by: Julien Grall <julien.grall@arm.com>
---
 arch/arm64/mm/context.c | 59 +++++++++++++++++++++++++------------------------
 1 file changed, 30 insertions(+), 29 deletions(-)

diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index cfe4c5f7abf3..da17ed6c7117 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -27,7 +27,6 @@
 #include <asm/smp.h>
 #include <asm/tlbflush.h>
 
-static u32 asid_bits;
 static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
 
 struct asid_info
@@ -36,6 +35,7 @@ struct asid_info
 	unsigned long	*map;
 	atomic64_t __percpu	*active;
 	u64 __percpu		*reserved;
+	u32			bits;
 } asid_info;
 
 #define active_asid(info, cpu)	*per_cpu_ptr((info)->active, cpu)
@@ -46,17 +46,17 @@ static DEFINE_PER_CPU(u64, reserved_asids);
 
 static cpumask_t tlb_flush_pending;
 
-#define ASID_MASK		(~GENMASK(asid_bits - 1, 0))
-#define ASID_FIRST_VERSION	(1UL << asid_bits)
+#define ASID_MASK(info)			(~GENMASK((info)->bits - 1, 0))
+#define ASID_FIRST_VERSION(info)	(1UL << ((info)->bits))
 
 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
-#define NUM_USER_ASIDS		(ASID_FIRST_VERSION >> 1)
-#define asid2idx(asid)		(((asid) & ~ASID_MASK) >> 1)
-#define idx2asid(idx)		(((idx) << 1) & ~ASID_MASK)
+#define NUM_USER_ASIDS(info)		(ASID_FIRST_VERSION(info) >> 1)
+#define asid2idx(info, asid)		(((asid) & ~ASID_MASK(info)) >> 1)
+#define idx2asid(info, idx)		(((idx) << 1) & ~ASID_MASK(info))
 #else
-#define NUM_USER_ASIDS		(ASID_FIRST_VERSION)
-#define asid2idx(asid)		((asid) & ~ASID_MASK)
-#define idx2asid(idx)		asid2idx(idx)
+#define NUM_USER_ASIDS(info)		(ASID_FIRST_VERSION(info))
+#define asid2idx(info, asid)		((asid) & ~ASID_MASK(info))
+#define idx2asid(info, idx)		asid2idx(info, idx)
 #endif
 
 /* Get the ASIDBits supported by the current CPU */
@@ -86,13 +86,13 @@ void verify_cpu_asid_bits(void)
 {
 	u32 asid = get_cpu_asid_bits();
 
-	if (asid < asid_bits) {
+	if (asid < asid_info.bits) {
 		/*
 		 * We cannot decrease the ASID size at runtime, so panic if we support
 		 * fewer ASID bits than the boot CPU.
 		 */
 		pr_crit("CPU%d: smaller ASID size(%u) than boot CPU (%u)\n",
-				smp_processor_id(), asid, asid_bits);
+				smp_processor_id(), asid, asid_info.bits);
 		cpu_panic_kernel();
 	}
 }
@@ -103,7 +103,7 @@ static void flush_context(struct asid_info *info)
 	u64 asid;
 
 	/* Update the list of reserved ASIDs and the ASID bitmap. */
-	bitmap_clear(info->map, 0, NUM_USER_ASIDS);
+	bitmap_clear(info->map, 0, NUM_USER_ASIDS(info));
 
 	for_each_possible_cpu(i) {
 		asid = atomic64_xchg_relaxed(&active_asid(info, i), 0);
@@ -116,7 +116,7 @@ static void flush_context(struct asid_info *info)
 		 */
 		if (asid == 0)
 			asid = reserved_asid(info, i);
-		__set_bit(asid2idx(asid), info->map);
+		__set_bit(asid2idx(info, asid), info->map);
 		reserved_asid(info, i) = asid;
 	}
 
@@ -159,7 +159,7 @@ static u64 new_context(struct asid_info *info, struct mm_struct *mm)
 	u64 generation = atomic64_read(&info->generation);
 
 	if (asid != 0) {
-		u64 newasid = generation | (asid & ~ASID_MASK);
+		u64 newasid = generation | (asid & ~ASID_MASK(info));
 
 		/*
 		 * If our current ASID was active during a rollover, we
@@ -172,7 +172,7 @@ static u64 new_context(struct asid_info *info, struct mm_struct *mm)
 		 * We had a valid ASID in a previous life, so try to re-use
 		 * it if possible.
 		 */
-		if (!__test_and_set_bit(asid2idx(asid), info->map))
+		if (!__test_and_set_bit(asid2idx(info, asid), info->map))
 			return newasid;
 	}
 
@@ -183,22 +183,22 @@ static u64 new_context(struct asid_info *info, struct mm_struct *mm)
 	 * a reserved TTBR0 for the init_mm and we allocate ASIDs in even/odd
 	 * pairs.
 	 */
-	asid = find_next_zero_bit(info->map, NUM_USER_ASIDS, cur_idx);
-	if (asid != NUM_USER_ASIDS)
+	asid = find_next_zero_bit(info->map, NUM_USER_ASIDS(info), cur_idx);
+	if (asid != NUM_USER_ASIDS(info))
 		goto set_asid;
 
 	/* We're out of ASIDs, so increment the global generation count */
-	generation = atomic64_add_return_relaxed(ASID_FIRST_VERSION,
+	generation = atomic64_add_return_relaxed(ASID_FIRST_VERSION(info),
 						 &info->generation);
 	flush_context(info);
 
 	/* We have more ASIDs than CPUs, so this will always succeed */
-	asid = find_next_zero_bit(info->map, NUM_USER_ASIDS, 1);
+	asid = find_next_zero_bit(info->map, NUM_USER_ASIDS(info), 1);
 
 set_asid:
 	__set_bit(asid, info->map);
 	cur_idx = asid;
-	return idx2asid(asid) | generation;
+	return idx2asid(info, asid) | generation;
 }
 
 void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
@@ -228,7 +228,7 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
 	 */
 	old_active_asid = atomic64_read(&active_asid(info, cpu));
 	if (old_active_asid &&
-	    !((asid ^ atomic64_read(&info->generation)) >> asid_bits) &&
+	    !((asid ^ atomic64_read(&info->generation)) >> info->bits) &&
 	    atomic64_cmpxchg_relaxed(&active_asid(info, cpu),
 				     old_active_asid, asid))
 		goto switch_mm_fastpath;
@@ -236,7 +236,7 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
 	raw_spin_lock_irqsave(&cpu_asid_lock, flags);
 	/* Check that our ASID belongs to the current generation. */
 	asid = atomic64_read(&mm->context.id);
-	if ((asid ^ atomic64_read(&info->generation)) >> asid_bits) {
+	if ((asid ^ atomic64_read(&info->generation)) >> info->bits) {
 		asid = new_context(info, mm);
 		atomic64_set(&mm->context.id, asid);
 	}
@@ -272,23 +272,24 @@ static int asids_init(void)
 {
 	struct asid_info *info = &asid_info;
 
-	asid_bits = get_cpu_asid_bits();
+	info->bits = get_cpu_asid_bits();
 	/*
 	 * Expect allocation after rollover to fail if we don't have at least
 	 * one more ASID than CPUs. ASID #0 is reserved for init_mm.
 	 */
-	WARN_ON(NUM_USER_ASIDS - 1 <= num_possible_cpus());
-	atomic64_set(&info->generation, ASID_FIRST_VERSION);
-	info->map = kcalloc(BITS_TO_LONGS(NUM_USER_ASIDS), sizeof(*info->map),
-			    GFP_KERNEL);
+	WARN_ON(NUM_USER_ASIDS(info) - 1 <= num_possible_cpus());
+	atomic64_set(&info->generation, ASID_FIRST_VERSION(info));
+	info->map = kcalloc(BITS_TO_LONGS(NUM_USER_ASIDS(info)),
+			    sizeof(*info->map), GFP_KERNEL);
 	if (!info->map)
 		panic("Failed to allocate bitmap for %lu ASIDs\n",
-		      NUM_USER_ASIDS);
+		      NUM_USER_ASIDS(info));
 
 	info->active = &active_asids;
 	info->reserved = &reserved_asids;
 
-	pr_info("ASID allocator initialised with %lu entries\n", NUM_USER_ASIDS);
+	pr_info("ASID allocator initialised with %lu entries\n",
+		NUM_USER_ASIDS(info));
 	return 0;
 }
 early_initcall(asids_init);
-- 
2.11.0

WARNING: multiple messages have this Message-ID (diff)
From: Julien Grall <julien.grall@arm.com>
To: linux-kernel@vger.kernel.org,
	linux-arm-kernel@lists.infradead.org,
	kvmarm@lists.cs.columbia.edu
Cc: suzuki.poulose@arm.com, marc.zyngier@arm.com,
	catalin.marinas@arm.com, julien.thierry@arm.com,
	will.deacon@arm.com, christoffer.dall@arm.com,
	Julien Grall <julien.grall@arm.com>,
	james.morse@arm.com
Subject: [PATCH RFC 03/14] arm64/mm: Move bits to asid_info
Date: Thu, 21 Mar 2019 16:36:12 +0000	[thread overview]
Message-ID: <20190321163623.20219-4-julien.grall@arm.com> (raw)
In-Reply-To: <20190321163623.20219-1-julien.grall@arm.com>

The variable bits hold information for a given ASID allocator. So move
it to the asid_info structure.

Because most of the macros were relying on bits, they are now taking an
extra parameter that is a pointer to the asid_info structure.

Signed-off-by: Julien Grall <julien.grall@arm.com>
---
 arch/arm64/mm/context.c | 59 +++++++++++++++++++++++++------------------------
 1 file changed, 30 insertions(+), 29 deletions(-)

diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index cfe4c5f7abf3..da17ed6c7117 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -27,7 +27,6 @@
 #include <asm/smp.h>
 #include <asm/tlbflush.h>
 
-static u32 asid_bits;
 static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
 
 struct asid_info
@@ -36,6 +35,7 @@ struct asid_info
 	unsigned long	*map;
 	atomic64_t __percpu	*active;
 	u64 __percpu		*reserved;
+	u32			bits;
 } asid_info;
 
 #define active_asid(info, cpu)	*per_cpu_ptr((info)->active, cpu)
@@ -46,17 +46,17 @@ static DEFINE_PER_CPU(u64, reserved_asids);
 
 static cpumask_t tlb_flush_pending;
 
-#define ASID_MASK		(~GENMASK(asid_bits - 1, 0))
-#define ASID_FIRST_VERSION	(1UL << asid_bits)
+#define ASID_MASK(info)			(~GENMASK((info)->bits - 1, 0))
+#define ASID_FIRST_VERSION(info)	(1UL << ((info)->bits))
 
 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
-#define NUM_USER_ASIDS		(ASID_FIRST_VERSION >> 1)
-#define asid2idx(asid)		(((asid) & ~ASID_MASK) >> 1)
-#define idx2asid(idx)		(((idx) << 1) & ~ASID_MASK)
+#define NUM_USER_ASIDS(info)		(ASID_FIRST_VERSION(info) >> 1)
+#define asid2idx(info, asid)		(((asid) & ~ASID_MASK(info)) >> 1)
+#define idx2asid(info, idx)		(((idx) << 1) & ~ASID_MASK(info))
 #else
-#define NUM_USER_ASIDS		(ASID_FIRST_VERSION)
-#define asid2idx(asid)		((asid) & ~ASID_MASK)
-#define idx2asid(idx)		asid2idx(idx)
+#define NUM_USER_ASIDS(info)		(ASID_FIRST_VERSION(info))
+#define asid2idx(info, asid)		((asid) & ~ASID_MASK(info))
+#define idx2asid(info, idx)		asid2idx(info, idx)
 #endif
 
 /* Get the ASIDBits supported by the current CPU */
@@ -86,13 +86,13 @@ void verify_cpu_asid_bits(void)
 {
 	u32 asid = get_cpu_asid_bits();
 
-	if (asid < asid_bits) {
+	if (asid < asid_info.bits) {
 		/*
 		 * We cannot decrease the ASID size at runtime, so panic if we support
 		 * fewer ASID bits than the boot CPU.
 		 */
 		pr_crit("CPU%d: smaller ASID size(%u) than boot CPU (%u)\n",
-				smp_processor_id(), asid, asid_bits);
+				smp_processor_id(), asid, asid_info.bits);
 		cpu_panic_kernel();
 	}
 }
@@ -103,7 +103,7 @@ static void flush_context(struct asid_info *info)
 	u64 asid;
 
 	/* Update the list of reserved ASIDs and the ASID bitmap. */
-	bitmap_clear(info->map, 0, NUM_USER_ASIDS);
+	bitmap_clear(info->map, 0, NUM_USER_ASIDS(info));
 
 	for_each_possible_cpu(i) {
 		asid = atomic64_xchg_relaxed(&active_asid(info, i), 0);
@@ -116,7 +116,7 @@ static void flush_context(struct asid_info *info)
 		 */
 		if (asid == 0)
 			asid = reserved_asid(info, i);
-		__set_bit(asid2idx(asid), info->map);
+		__set_bit(asid2idx(info, asid), info->map);
 		reserved_asid(info, i) = asid;
 	}
 
@@ -159,7 +159,7 @@ static u64 new_context(struct asid_info *info, struct mm_struct *mm)
 	u64 generation = atomic64_read(&info->generation);
 
 	if (asid != 0) {
-		u64 newasid = generation | (asid & ~ASID_MASK);
+		u64 newasid = generation | (asid & ~ASID_MASK(info));
 
 		/*
 		 * If our current ASID was active during a rollover, we
@@ -172,7 +172,7 @@ static u64 new_context(struct asid_info *info, struct mm_struct *mm)
 		 * We had a valid ASID in a previous life, so try to re-use
 		 * it if possible.
 		 */
-		if (!__test_and_set_bit(asid2idx(asid), info->map))
+		if (!__test_and_set_bit(asid2idx(info, asid), info->map))
 			return newasid;
 	}
 
@@ -183,22 +183,22 @@ static u64 new_context(struct asid_info *info, struct mm_struct *mm)
 	 * a reserved TTBR0 for the init_mm and we allocate ASIDs in even/odd
 	 * pairs.
 	 */
-	asid = find_next_zero_bit(info->map, NUM_USER_ASIDS, cur_idx);
-	if (asid != NUM_USER_ASIDS)
+	asid = find_next_zero_bit(info->map, NUM_USER_ASIDS(info), cur_idx);
+	if (asid != NUM_USER_ASIDS(info))
 		goto set_asid;
 
 	/* We're out of ASIDs, so increment the global generation count */
-	generation = atomic64_add_return_relaxed(ASID_FIRST_VERSION,
+	generation = atomic64_add_return_relaxed(ASID_FIRST_VERSION(info),
 						 &info->generation);
 	flush_context(info);
 
 	/* We have more ASIDs than CPUs, so this will always succeed */
-	asid = find_next_zero_bit(info->map, NUM_USER_ASIDS, 1);
+	asid = find_next_zero_bit(info->map, NUM_USER_ASIDS(info), 1);
 
 set_asid:
 	__set_bit(asid, info->map);
 	cur_idx = asid;
-	return idx2asid(asid) | generation;
+	return idx2asid(info, asid) | generation;
 }
 
 void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
@@ -228,7 +228,7 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
 	 */
 	old_active_asid = atomic64_read(&active_asid(info, cpu));
 	if (old_active_asid &&
-	    !((asid ^ atomic64_read(&info->generation)) >> asid_bits) &&
+	    !((asid ^ atomic64_read(&info->generation)) >> info->bits) &&
 	    atomic64_cmpxchg_relaxed(&active_asid(info, cpu),
 				     old_active_asid, asid))
 		goto switch_mm_fastpath;
@@ -236,7 +236,7 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
 	raw_spin_lock_irqsave(&cpu_asid_lock, flags);
 	/* Check that our ASID belongs to the current generation. */
 	asid = atomic64_read(&mm->context.id);
-	if ((asid ^ atomic64_read(&info->generation)) >> asid_bits) {
+	if ((asid ^ atomic64_read(&info->generation)) >> info->bits) {
 		asid = new_context(info, mm);
 		atomic64_set(&mm->context.id, asid);
 	}
@@ -272,23 +272,24 @@ static int asids_init(void)
 {
 	struct asid_info *info = &asid_info;
 
-	asid_bits = get_cpu_asid_bits();
+	info->bits = get_cpu_asid_bits();
 	/*
 	 * Expect allocation after rollover to fail if we don't have at least
 	 * one more ASID than CPUs. ASID #0 is reserved for init_mm.
 	 */
-	WARN_ON(NUM_USER_ASIDS - 1 <= num_possible_cpus());
-	atomic64_set(&info->generation, ASID_FIRST_VERSION);
-	info->map = kcalloc(BITS_TO_LONGS(NUM_USER_ASIDS), sizeof(*info->map),
-			    GFP_KERNEL);
+	WARN_ON(NUM_USER_ASIDS(info) - 1 <= num_possible_cpus());
+	atomic64_set(&info->generation, ASID_FIRST_VERSION(info));
+	info->map = kcalloc(BITS_TO_LONGS(NUM_USER_ASIDS(info)),
+			    sizeof(*info->map), GFP_KERNEL);
 	if (!info->map)
 		panic("Failed to allocate bitmap for %lu ASIDs\n",
-		      NUM_USER_ASIDS);
+		      NUM_USER_ASIDS(info));
 
 	info->active = &active_asids;
 	info->reserved = &reserved_asids;
 
-	pr_info("ASID allocator initialised with %lu entries\n", NUM_USER_ASIDS);
+	pr_info("ASID allocator initialised with %lu entries\n",
+		NUM_USER_ASIDS(info));
 	return 0;
 }
 early_initcall(asids_init);
-- 
2.11.0


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  parent reply	other threads:[~2019-03-21 16:37 UTC|newest]

Thread overview: 211+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-03-21 16:36 [PATCH RFC 00/14] kvm/arm: Align the VMID allocation with the arm64 ASID one Julien Grall
2019-03-21 16:36 ` Julien Grall
2019-03-21 16:36 ` [PATCH RFC 01/14] arm64/mm: Introduce asid_info structure and move asid_generation/asid_map to it Julien Grall
2019-03-21 16:36   ` Julien Grall
2019-03-21 17:03   ` Suzuki K Poulose
2019-03-21 17:03     ` Suzuki K Poulose
2019-03-21 17:27     ` Julien Grall
2019-03-21 17:27       ` Julien Grall
2019-03-21 16:36 ` [PATCH RFC 02/14] arm64/mm: Move active_asids and reserved_asids to asid_info Julien Grall
2019-03-21 16:36   ` Julien Grall
2019-03-21 16:36   ` Julien Grall
2019-03-21 16:36 ` Julien Grall [this message]
2019-03-21 16:36   ` [PATCH RFC 03/14] arm64/mm: Move bits " Julien Grall
2019-03-21 16:36   ` Julien Grall
2019-03-21 16:36 ` [PATCH RFC 04/14] arm64/mm: Move the variable lock and tlb_flush_pending " Julien Grall
2019-03-21 16:36   ` Julien Grall
2019-03-21 16:36   ` Julien Grall
2019-03-21 16:36 ` [PATCH RFC 05/14] arm64/mm: Remove dependency on MM in new_context Julien Grall
2019-03-21 16:36   ` Julien Grall
2019-03-21 16:36 ` [PATCH RFC 06/14] arm64/mm: Store the number of asid allocated per context Julien Grall
2019-03-21 16:36   ` Julien Grall
2019-03-21 16:36   ` Julien Grall
2019-03-21 16:36 ` [PATCH RFC 07/14] arm64/mm: Introduce NUM_ASIDS Julien Grall
2019-03-21 16:36   ` Julien Grall
2019-03-21 16:36 ` [PATCH RFC 08/14] arm64/mm: Split asid_inits in 2 parts Julien Grall
2019-03-21 16:36   ` Julien Grall
2019-03-21 16:36   ` Julien Grall
2019-03-21 16:36 ` [PATCH RFC 09/14] arm64/mm: Split the function check_and_switch_context in 3 parts Julien Grall
2019-03-21 16:36   ` Julien Grall
2019-03-21 16:36   ` Julien Grall
2019-03-21 16:36 ` [PATCH RFC 10/14] arm64/mm: Introduce a callback to flush the local context Julien Grall
2019-03-21 16:36   ` Julien Grall
2019-03-21 16:36   ` Julien Grall
2019-03-21 16:36 ` [PATCH RFC 11/14] arm64: Move the ASID allocator code in a separate file Julien Grall
2019-03-21 16:36   ` Julien Grall
2019-06-05 16:56   ` Julien Grall
2019-06-05 16:56     ` Julien Grall
2019-06-05 16:56     ` Julien Grall
2019-06-05 16:56     ` Julien Grall
2019-06-05 20:41     ` Palmer Dabbelt
2019-06-05 20:41       ` Palmer Dabbelt
2019-06-05 20:41       ` Palmer Dabbelt
2019-06-05 20:41       ` Palmer Dabbelt
2019-06-11  1:56       ` Gary Guo
2019-06-11  1:56         ` Gary Guo
2019-06-11  1:56         ` Gary Guo
2019-06-11  1:56         ` Gary Guo
2019-06-19  8:07     ` Guo Ren
2019-06-19  8:07       ` Guo Ren
2019-06-19  8:07       ` Guo Ren
2019-06-19  8:07       ` Guo Ren
2019-06-19  8:54       ` Julien Grall
2019-06-19  8:54         ` Julien Grall
2019-06-19  8:54         ` Julien Grall
2019-06-19  8:54         ` Julien Grall
2019-06-19  9:12         ` Will Deacon
2019-06-19  9:12           ` Will Deacon
2019-06-19  9:12           ` Will Deacon
2019-06-19  9:12           ` Will Deacon
2019-06-19 12:18           ` Guo Ren
2019-06-19 12:18             ` Guo Ren
2019-06-19 12:18             ` Guo Ren
2019-06-19 12:18             ` Guo Ren
2019-06-19 12:39             ` Will Deacon
2019-06-19 12:39               ` Will Deacon
2019-06-19 12:39               ` Will Deacon
2019-06-19 12:39               ` Will Deacon
2019-06-20  9:33               ` Guo Ren
2019-06-20  9:33                 ` Guo Ren
2019-06-20  9:33                 ` Guo Ren
2019-06-20  9:33                 ` Guo Ren
2019-06-24 10:40                 ` Will Deacon
2019-06-24 10:40                   ` Will Deacon
2019-06-24 10:40                   ` Will Deacon
2019-06-24 10:40                   ` Will Deacon
2019-06-25  7:25                   ` Palmer Dabbelt
2019-06-25  7:25                     ` Palmer Dabbelt
2019-06-25  7:25                     ` Palmer Dabbelt
2019-06-25  7:25                     ` Palmer Dabbelt
2019-09-07 23:52                   ` Guo Ren
2019-09-07 23:52                     ` Guo Ren
2019-09-07 23:52                     ` Guo Ren
2019-09-07 23:52                     ` Guo Ren
2019-09-07 23:52                     ` Guo Ren
2019-09-12 14:02                     ` Will Deacon
2019-09-12 14:02                       ` Will Deacon
2019-09-12 14:02                       ` Will Deacon
2019-09-12 14:02                       ` Will Deacon
2019-09-12 14:02                       ` Will Deacon
2019-09-12 14:59                       ` Guo Ren
2019-09-12 14:59                         ` Guo Ren
2019-09-12 14:59                         ` Guo Ren
2019-09-12 14:59                         ` Guo Ren
2019-09-12 14:59                         ` Guo Ren
2019-09-13  7:13                         ` Guo Ren
2019-09-13  7:13                           ` Guo Ren
2019-09-14  8:49                           ` Guo Ren
2019-09-14  8:49                             ` Guo Ren
2019-09-14  8:49                             ` Guo Ren
2019-09-14  8:49                             ` Guo Ren
2019-09-14  8:49                             ` Guo Ren
2019-09-16 12:57                           ` Jean-Philippe Brucker
2019-09-16 12:57                             ` Jean-Philippe Brucker
2019-09-16 12:57                             ` Jean-Philippe Brucker
2019-09-16 12:57                             ` Jean-Philippe Brucker
2019-09-16 12:57                             ` Jean-Philippe Brucker
2019-09-19 13:07                             ` Guo Ren
2019-09-19 13:07                               ` Guo Ren
2019-09-19 13:07                               ` Guo Ren
2019-09-19 13:07                               ` Guo Ren
2019-09-19 13:07                               ` Guo Ren
2019-09-19 15:18                               ` Jean-Philippe Brucker
2019-09-19 15:18                                 ` Jean-Philippe Brucker
2019-09-19 15:18                                 ` Jean-Philippe Brucker
2019-09-19 15:18                                 ` Jean-Philippe Brucker
2019-09-19 15:18                                 ` Jean-Philippe Brucker
2019-09-20  0:07                                 ` Guo Ren
2019-09-20  0:07                                   ` Guo Ren
2019-09-20  0:07                                   ` Guo Ren
2019-09-20  0:07                                   ` Guo Ren
2019-09-20  0:07                                   ` Guo Ren
2019-09-20  7:18                                   ` Jean-Philippe Brucker
2019-09-20  7:18                                     ` Jean-Philippe Brucker
2019-09-20  7:18                                     ` Jean-Philippe Brucker
2019-09-20  7:18                                     ` Jean-Philippe Brucker
2019-09-20  7:18                                     ` Jean-Philippe Brucker
2019-09-14 14:01                       ` Palmer Dabbelt
2019-09-14 14:01                         ` Palmer Dabbelt
2019-09-14 14:01                         ` Palmer Dabbelt
2019-09-14 14:01                         ` Palmer Dabbelt
2019-09-14 14:01                         ` Palmer Dabbelt
2019-09-15  5:03                         ` Anup Patel
2019-09-15  5:03                           ` Anup Patel
2019-09-15  5:03                           ` Anup Patel
2019-09-15  5:03                           ` Anup Patel
2019-09-15  5:03                           ` Anup Patel
2019-09-16 18:18                           ` Will Deacon
2019-09-16 18:18                             ` Will Deacon
2019-09-16 18:18                             ` Will Deacon
2019-09-16 18:18                             ` Will Deacon
2019-09-16 18:18                             ` Will Deacon
2019-09-16 18:28                             ` Palmer Dabbelt
2019-09-16 18:28                               ` Palmer Dabbelt
2019-09-16 18:28                               ` Palmer Dabbelt
2019-09-16 18:28                               ` Palmer Dabbelt
2019-09-16 18:28                               ` Palmer Dabbelt
2019-09-17  3:42                             ` Anup Patel
2019-09-17  3:42                               ` Anup Patel
2019-09-17  3:42                               ` Anup Patel
2019-09-17  3:42                               ` Anup Patel
2019-09-17  3:42                               ` Anup Patel
2019-09-19 13:36                               ` Guo Ren
2019-09-19 13:36                                 ` Guo Ren
2019-09-19 13:36                                 ` Guo Ren
2019-09-19 13:36                                 ` Guo Ren
2019-09-19 13:36                                 ` Guo Ren
2019-06-19 11:51         ` Guo Ren
2019-06-19 11:51           ` Guo Ren
2019-06-19 11:51           ` Guo Ren
2019-06-19 11:51           ` Guo Ren
2019-06-19 12:52           ` Julien Grall
2019-06-19 12:52             ` Julien Grall
2019-06-19 12:52             ` Julien Grall
2019-06-19 12:52             ` Julien Grall
2019-06-21 14:16           ` Catalin Marinas
2019-06-21 14:16             ` Catalin Marinas
2019-06-21 14:16             ` Catalin Marinas
2019-06-21 14:16             ` Catalin Marinas
2019-06-23 16:35             ` Guo Ren
2019-06-23 16:35               ` Guo Ren
2019-06-23 16:35               ` Guo Ren
2019-06-23 16:35               ` Guo Ren
2019-06-24 10:22               ` Will Deacon
2019-06-24 10:22                 ` Will Deacon
2019-06-24 10:22                 ` Will Deacon
2019-06-24 10:22                 ` Will Deacon
2019-06-27  9:41                 ` qi.fuli
2019-06-27  9:41                   ` qi.fuli
2019-06-27  9:41                   ` qi.fuli
2019-06-27  9:41                   ` qi.fuli
2019-06-27 10:26                   ` Will Deacon
2019-06-27 10:26                     ` Will Deacon
2019-06-27 10:26                     ` Will Deacon
2019-06-27 10:26                     ` Will Deacon
2019-06-24 15:38               ` Catalin Marinas
2019-06-24 15:38                 ` Catalin Marinas
2019-06-24 15:38                 ` Catalin Marinas
2019-06-24 15:38                 ` Catalin Marinas
2019-06-30  4:29                 ` Guo Ren
2019-06-30  4:29                   ` Guo Ren
2019-06-30  4:29                   ` Guo Ren
2019-06-30  4:29                   ` Guo Ren
2019-07-01  9:17                   ` Catalin Marinas
2019-07-01  9:17                     ` Catalin Marinas
2019-07-01  9:17                     ` Catalin Marinas
2019-07-01  9:17                     ` Catalin Marinas
2019-07-16  3:31                     ` Guo Ren
2019-07-16  3:31                       ` Guo Ren
2019-07-16  3:31                       ` Guo Ren
2019-07-16  3:31                       ` Guo Ren
2019-07-22 16:38                       ` Catalin Marinas
2019-07-22 16:38                         ` Catalin Marinas
2019-07-22 16:38                         ` Catalin Marinas
2019-07-22 16:38                         ` Catalin Marinas
2019-03-21 16:36 ` [PATCH RFC 12/14] arm64/lib: asid: Allow user to update the context under the lock Julien Grall
2019-03-21 16:36   ` Julien Grall
2019-03-21 16:36 ` [PATCH RFC 13/14] arm/kvm: Introduce a new VMID allocator Julien Grall
2019-03-21 16:36   ` Julien Grall
2019-03-21 16:36 ` [PATCH RFC 14/14] kvm/arm: Align the VMID allocation with the arm64 ASID one Julien Grall
2019-03-21 16:36   ` Julien Grall
2019-03-21 16:36   ` Julien Grall

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190321163623.20219-4-julien.grall@arm.com \
    --to=julien.grall@arm.com \
    --cc=catalin.marinas@arm.com \
    --cc=christoffer.dall@arm.com \
    --cc=james.morse@arm.com \
    --cc=julien.thierry@arm.com \
    --cc=kvmarm@lists.cs.columbia.edu \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=marc.zyngier@arm.com \
    --cc=suzuki.poulose@arm.com \
    --cc=will.deacon@arm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.