All of lore.kernel.org
 help / color / mirror / Atom feed
From: will.deacon@arm.com (Will Deacon)
To: linux-arm-kernel@lists.infradead.org
Subject: [RESEND PATCH 3/3] ARM: mm: use bitmap operations when allocating new ASIDs
Date: Thu, 20 Sep 2012 17:16:10 +0100	[thread overview]
Message-ID: <1348157770-9647-4-git-send-email-will.deacon@arm.com> (raw)
In-Reply-To: <1348157770-9647-1-git-send-email-will.deacon@arm.com>

When allocating a new ASID, we must take care not to re-assign a
reserved ASID-value to a new mm. This requires us to check each
candidate ASID against those currently reserved by other cores before
assigning a new ASID to the current mm.

This patch improves the ASID allocation algorithm by using a
bitmap-based approach. Rather than iterating over the reserved ASID
array for each candidate ASID, we simply find the first zero bit,
ensuring that those indices corresponding to reserved ASIDs are set
when flushing during a rollover event.

Signed-off-by: Will Deacon <will.deacon@arm.com>
---
 arch/arm/mm/context.c |   56 +++++++++++++++++++++++++++++++-----------------
 1 files changed, 36 insertions(+), 20 deletions(-)

diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index 994a7a4..05f0877 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -36,9 +36,14 @@
  * should be unique within all running processes.
  */
 #define ASID_FIRST_VERSION	(1ULL << ASID_BITS)
+#define NUM_USER_ASIDS		(ASID_FIRST_VERSION - 1)
+
+#define ASID_TO_IDX(asid)	((asid & ~ASID_MASK) - 1)
+#define IDX_TO_ASID(idx)	((idx + 1) & ~ASID_MASK)
 
 static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
-static atomic64_t cpu_last_asid = ATOMIC64_INIT(ASID_FIRST_VERSION);
+static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
+static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
 
 static DEFINE_PER_CPU(atomic64_t, active_asids);
 static DEFINE_PER_CPU(u64, reserved_asids);
@@ -111,12 +116,19 @@ arch_initcall(contextidr_notifier_init);
 static void flush_context(unsigned int cpu)
 {
 	int i;
-
-	/* Update the list of reserved ASIDs. */
-	for_each_possible_cpu(i)
-		per_cpu(reserved_asids, i) =
-			atomic64_xchg(&per_cpu(active_asids, i), 0);
-	per_cpu(reserved_asids, cpu) = 0;
+	u64 asid;
+
+	/* Update the list of reserved ASIDs and the ASID bitmap. */
+	bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
+	for_each_possible_cpu(i) {
+		if (i == cpu) {
+			asid = 0;
+		} else {
+			asid = atomic64_xchg(&per_cpu(active_asids, i), 0);
+			__set_bit(ASID_TO_IDX(asid), asid_map);
+		}
+		per_cpu(reserved_asids, i) = asid;
+	}
 
 	/* Queue a TLB invalidate and flush the I-cache if necessary. */
 	if (!tlb_ops_need_broadcast())
@@ -128,11 +140,11 @@ static void flush_context(unsigned int cpu)
 		__flush_icache_all();
 }
 
-static int is_reserved_asid(u64 asid, u64 mask)
+static int is_reserved_asid(u64 asid)
 {
 	int cpu;
 	for_each_possible_cpu(cpu)
-		if ((per_cpu(reserved_asids, cpu) & mask) == (asid & mask))
+		if (per_cpu(reserved_asids, cpu) == asid)
 			return 1;
 	return 0;
 }
@@ -140,25 +152,29 @@ static int is_reserved_asid(u64 asid, u64 mask)
 static void new_context(struct mm_struct *mm, unsigned int cpu)
 {
 	u64 asid = mm->context.id;
+	u64 generation = atomic64_read(&asid_generation);
 
-	if (asid != 0 && is_reserved_asid(asid, ULLONG_MAX)) {
+	if (asid != 0 && is_reserved_asid(asid)) {
 		/*
 		 * Our current ASID was active during a rollover, we can
 		 * continue to use it and this was just a false alarm.
 		 */
-		asid = (atomic64_read(&cpu_last_asid) & ASID_MASK) | \
-		       (asid & ~ASID_MASK);
+		asid = generation | (asid & ~ASID_MASK);
 	} else {
 		/*
 		 * Allocate a free ASID. If we can't find one, take a
 		 * note of the currently active ASIDs and mark the TLBs
 		 * as requiring flushes.
 		 */
-		do {
-			asid = atomic64_inc_return(&cpu_last_asid);
-			if ((asid & ~ASID_MASK) == 0)
-				flush_context(cpu);
-		} while (is_reserved_asid(asid, ~ASID_MASK));
+		asid = find_first_zero_bit(asid_map, NUM_USER_ASIDS);
+		if (asid == NUM_USER_ASIDS) {
+			generation = atomic64_add_return(ASID_FIRST_VERSION,
+							 &asid_generation);
+			flush_context(cpu);
+			asid = find_first_zero_bit(asid_map, NUM_USER_ASIDS);
+		}
+		__set_bit(asid, asid_map);
+		asid = generation | IDX_TO_ASID(asid);
 		cpumask_clear(mm_cpumask(mm));
 	}
 
@@ -173,8 +189,8 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
 	if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq))
 		__check_kvm_seq(mm);
 
-	if (!((mm->context.id ^ atomic64_read(&cpu_last_asid)) >> ASID_BITS) &&
-	    atomic64_xchg(&per_cpu(active_asids, cpu), mm->context.id))
+	if (!((mm->context.id ^ atomic64_read(&asid_generation)) >> ASID_BITS)
+	    && atomic64_xchg(&per_cpu(active_asids, cpu), mm->context.id))
 		goto switch_mm_fastpath;
 
 	/*
@@ -185,7 +201,7 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
 
 	raw_spin_lock_irqsave(&cpu_asid_lock, flags);
 	/* Check that our ASID belongs to the current generation. */
-	if ((mm->context.id ^ atomic64_read(&cpu_last_asid)) >> ASID_BITS)
+	if ((mm->context.id ^ atomic64_read(&asid_generation)) >> ASID_BITS)
 		new_context(mm, cpu);
 
 	atomic64_set(&per_cpu(active_asids, cpu), mm->context.id);
-- 
1.7.4.1

  parent reply	other threads:[~2012-09-20 16:16 UTC|newest]

Thread overview: 5+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2012-09-20 16:16 [RESEND PATCH 0/3] New algorithm for ASID allocation and rollover Will Deacon
2012-09-20 16:16 ` [RESEND PATCH 1/3] ARM: mm: remove IPI broadcasting on ASID rollover Will Deacon
2012-09-20 16:16 ` [RESEND PATCH 2/3] ARM: mm: avoid taking ASID spinlock on fastpath Will Deacon
2012-09-20 16:16 ` Will Deacon [this message]
2012-09-20 16:36 ` [RESEND PATCH 0/3] New algorithm for ASID allocation and rollover Marc Zyngier

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1348157770-9647-4-git-send-email-will.deacon@arm.com \
    --to=will.deacon@arm.com \
    --cc=linux-arm-kernel@lists.infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.