All of lore.kernel.org
 help / color / mirror / Atom feed
From: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>
To: <linux-arm-kernel@lists.infradead.org>,
	<kvmarm@lists.cs.columbia.edu>, <linux-kernel@vger.kernel.org>
Cc: <maz@kernel.org>, <will@kernel.org>, <catalin.marinas@arm.com>,
	<james.morse@arm.com>, <julien.thierry.kdev@gmail.com>,
	<suzuki.poulose@arm.com>, <jean-philippe@linaro.org>,
	<julien@xen.org>, <linuxarm@huawei.com>
Subject: [PATCH v4 07/16] arm64/mm: Move Pinned ASID related variables to asid_info
Date: Wed, 14 Apr 2021 12:23:03 +0100	[thread overview]
Message-ID: <20210414112312.13704-8-shameerali.kolothum.thodi@huawei.com> (raw)
In-Reply-To: <20210414112312.13704-1-shameerali.kolothum.thodi@huawei.com>

The Pinned ASID variables hold information for a given ASID
allocator. So move them to the structure asid_info.

Signed-off-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>
---
 arch/arm64/mm/context.c | 38 +++++++++++++++++++-------------------
 1 file changed, 19 insertions(+), 19 deletions(-)

diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index 0f11d7c7f6a3..8af54e06f5bc 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -28,6 +28,10 @@ static struct asid_info
 	raw_spinlock_t		lock;
 	/* Which CPU requires context flush on next call */
 	cpumask_t		flush_pending;
+	/* Pinned ASIDs info */
+	unsigned long		*pinned_map;
+	unsigned long		max_pinned_asids;
+	unsigned long		nr_pinned_asids;
 } asid_info;
 
 #define active_asid(info, cpu)	 (*per_cpu_ptr((info)->active, cpu))
@@ -36,10 +40,6 @@ static struct asid_info
 static DEFINE_PER_CPU(atomic64_t, active_asids);
 static DEFINE_PER_CPU(u64, reserved_asids);
 
-static unsigned long max_pinned_asids;
-static unsigned long nr_pinned_asids;
-static unsigned long *pinned_asid_map;
-
 #define ASID_MASK(info)			(~GENMASK((info)->bits - 1, 0))
 #define NUM_CTXT_ASIDS(info)		(1UL << ((info)->bits))
 #define ASID_FIRST_VERSION(info)        NUM_CTXT_ASIDS(info)
@@ -99,8 +99,8 @@ static void set_kpti_asid_bits(struct asid_info *info, unsigned long *map)
 
 static void set_reserved_asid_bits(struct asid_info *info)
 {
-	if (pinned_asid_map)
-		bitmap_copy(info->map, pinned_asid_map, NUM_CTXT_ASIDS(info));
+	if (info->pinned_map)
+		bitmap_copy(info->map, info->pinned_map, NUM_CTXT_ASIDS(info));
 	else if (arm64_kernel_unmapped_at_el0())
 		set_kpti_asid_bits(info, info->map);
 	else
@@ -287,7 +287,7 @@ unsigned long arm64_mm_context_get(struct mm_struct *mm)
 	u64 asid;
 	struct asid_info *info = &asid_info;
 
-	if (!pinned_asid_map)
+	if (!info->pinned_map)
 		return 0;
 
 	raw_spin_lock_irqsave(&info->lock, flags);
@@ -297,7 +297,7 @@ unsigned long arm64_mm_context_get(struct mm_struct *mm)
 	if (refcount_inc_not_zero(&mm->context.pinned))
 		goto out_unlock;
 
-	if (nr_pinned_asids >= max_pinned_asids) {
+	if (info->nr_pinned_asids >= info->max_pinned_asids) {
 		asid = 0;
 		goto out_unlock;
 	}
@@ -311,8 +311,8 @@ unsigned long arm64_mm_context_get(struct mm_struct *mm)
 		atomic64_set(&mm->context.id, asid);
 	}
 
-	nr_pinned_asids++;
-	__set_bit(asid2idx(info, asid), pinned_asid_map);
+	info->nr_pinned_asids++;
+	__set_bit(asid2idx(info, asid), info->pinned_map);
 	refcount_set(&mm->context.pinned, 1);
 
 out_unlock:
@@ -334,14 +334,14 @@ void arm64_mm_context_put(struct mm_struct *mm)
 	struct asid_info *info = &asid_info;
 	u64 asid = atomic64_read(&mm->context.id);
 
-	if (!pinned_asid_map)
+	if (!info->pinned_map)
 		return;
 
 	raw_spin_lock_irqsave(&info->lock, flags);
 
 	if (refcount_dec_and_test(&mm->context.pinned)) {
-		__clear_bit(asid2idx(info, asid), pinned_asid_map);
-		nr_pinned_asids--;
+		__clear_bit(asid2idx(info, asid), info->pinned_map);
+		info->nr_pinned_asids--;
 	}
 
 	raw_spin_unlock_irqrestore(&info->lock, flags);
@@ -391,8 +391,8 @@ static int asids_update_limit(void)
 
 	if (arm64_kernel_unmapped_at_el0()) {
 		num_available_asids /= 2;
-		if (pinned_asid_map)
-			set_kpti_asid_bits(info, pinned_asid_map);
+		if (info->pinned_map)
+			set_kpti_asid_bits(info, info->pinned_map);
 	}
 	/*
 	 * Expect allocation after rollover to fail if we don't have at least
@@ -407,7 +407,7 @@ static int asids_update_limit(void)
 	 * even if all CPUs have a reserved ASID and the maximum number of ASIDs
 	 * are pinned, there still is at least one empty slot in the ASID map.
 	 */
-	max_pinned_asids = num_available_asids - num_possible_cpus() - 2;
+	info->max_pinned_asids = num_available_asids - num_possible_cpus() - 2;
 	return 0;
 }
 arch_initcall(asids_update_limit);
@@ -429,9 +429,9 @@ static int asids_init(void)
 	info->reserved = &reserved_asids;
 	raw_spin_lock_init(&info->lock);
 
-	pinned_asid_map = kcalloc(BITS_TO_LONGS(NUM_CTXT_ASIDS(info)),
-				  sizeof(*pinned_asid_map), GFP_KERNEL);
-	nr_pinned_asids = 0;
+	info->pinned_map = kcalloc(BITS_TO_LONGS(NUM_CTXT_ASIDS(info)),
+				   sizeof(*info->pinned_map), GFP_KERNEL);
+	info->nr_pinned_asids = 0;
 
 	/*
 	 * We cannot call set_reserved_asid_bits() here because CPU
-- 
2.17.1


WARNING: multiple messages have this Message-ID (diff)
From: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>
To: <linux-arm-kernel@lists.infradead.org>,
	<kvmarm@lists.cs.columbia.edu>, <linux-kernel@vger.kernel.org>
Cc: jean-philippe@linaro.org, julien@xen.org, maz@kernel.org,
	linuxarm@huawei.com, catalin.marinas@arm.com, will@kernel.org
Subject: [PATCH v4 07/16] arm64/mm: Move Pinned ASID related variables to asid_info
Date: Wed, 14 Apr 2021 12:23:03 +0100	[thread overview]
Message-ID: <20210414112312.13704-8-shameerali.kolothum.thodi@huawei.com> (raw)
In-Reply-To: <20210414112312.13704-1-shameerali.kolothum.thodi@huawei.com>

The Pinned ASID variables hold information for a given ASID
allocator. So move them to the structure asid_info.

Signed-off-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>
---
 arch/arm64/mm/context.c | 38 +++++++++++++++++++-------------------
 1 file changed, 19 insertions(+), 19 deletions(-)

diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index 0f11d7c7f6a3..8af54e06f5bc 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -28,6 +28,10 @@ static struct asid_info
 	raw_spinlock_t		lock;
 	/* Which CPU requires context flush on next call */
 	cpumask_t		flush_pending;
+	/* Pinned ASIDs info */
+	unsigned long		*pinned_map;
+	unsigned long		max_pinned_asids;
+	unsigned long		nr_pinned_asids;
 } asid_info;
 
 #define active_asid(info, cpu)	 (*per_cpu_ptr((info)->active, cpu))
@@ -36,10 +40,6 @@ static struct asid_info
 static DEFINE_PER_CPU(atomic64_t, active_asids);
 static DEFINE_PER_CPU(u64, reserved_asids);
 
-static unsigned long max_pinned_asids;
-static unsigned long nr_pinned_asids;
-static unsigned long *pinned_asid_map;
-
 #define ASID_MASK(info)			(~GENMASK((info)->bits - 1, 0))
 #define NUM_CTXT_ASIDS(info)		(1UL << ((info)->bits))
 #define ASID_FIRST_VERSION(info)        NUM_CTXT_ASIDS(info)
@@ -99,8 +99,8 @@ static void set_kpti_asid_bits(struct asid_info *info, unsigned long *map)
 
 static void set_reserved_asid_bits(struct asid_info *info)
 {
-	if (pinned_asid_map)
-		bitmap_copy(info->map, pinned_asid_map, NUM_CTXT_ASIDS(info));
+	if (info->pinned_map)
+		bitmap_copy(info->map, info->pinned_map, NUM_CTXT_ASIDS(info));
 	else if (arm64_kernel_unmapped_at_el0())
 		set_kpti_asid_bits(info, info->map);
 	else
@@ -287,7 +287,7 @@ unsigned long arm64_mm_context_get(struct mm_struct *mm)
 	u64 asid;
 	struct asid_info *info = &asid_info;
 
-	if (!pinned_asid_map)
+	if (!info->pinned_map)
 		return 0;
 
 	raw_spin_lock_irqsave(&info->lock, flags);
@@ -297,7 +297,7 @@ unsigned long arm64_mm_context_get(struct mm_struct *mm)
 	if (refcount_inc_not_zero(&mm->context.pinned))
 		goto out_unlock;
 
-	if (nr_pinned_asids >= max_pinned_asids) {
+	if (info->nr_pinned_asids >= info->max_pinned_asids) {
 		asid = 0;
 		goto out_unlock;
 	}
@@ -311,8 +311,8 @@ unsigned long arm64_mm_context_get(struct mm_struct *mm)
 		atomic64_set(&mm->context.id, asid);
 	}
 
-	nr_pinned_asids++;
-	__set_bit(asid2idx(info, asid), pinned_asid_map);
+	info->nr_pinned_asids++;
+	__set_bit(asid2idx(info, asid), info->pinned_map);
 	refcount_set(&mm->context.pinned, 1);
 
 out_unlock:
@@ -334,14 +334,14 @@ void arm64_mm_context_put(struct mm_struct *mm)
 	struct asid_info *info = &asid_info;
 	u64 asid = atomic64_read(&mm->context.id);
 
-	if (!pinned_asid_map)
+	if (!info->pinned_map)
 		return;
 
 	raw_spin_lock_irqsave(&info->lock, flags);
 
 	if (refcount_dec_and_test(&mm->context.pinned)) {
-		__clear_bit(asid2idx(info, asid), pinned_asid_map);
-		nr_pinned_asids--;
+		__clear_bit(asid2idx(info, asid), info->pinned_map);
+		info->nr_pinned_asids--;
 	}
 
 	raw_spin_unlock_irqrestore(&info->lock, flags);
@@ -391,8 +391,8 @@ static int asids_update_limit(void)
 
 	if (arm64_kernel_unmapped_at_el0()) {
 		num_available_asids /= 2;
-		if (pinned_asid_map)
-			set_kpti_asid_bits(info, pinned_asid_map);
+		if (info->pinned_map)
+			set_kpti_asid_bits(info, info->pinned_map);
 	}
 	/*
 	 * Expect allocation after rollover to fail if we don't have at least
@@ -407,7 +407,7 @@ static int asids_update_limit(void)
 	 * even if all CPUs have a reserved ASID and the maximum number of ASIDs
 	 * are pinned, there still is at least one empty slot in the ASID map.
 	 */
-	max_pinned_asids = num_available_asids - num_possible_cpus() - 2;
+	info->max_pinned_asids = num_available_asids - num_possible_cpus() - 2;
 	return 0;
 }
 arch_initcall(asids_update_limit);
@@ -429,9 +429,9 @@ static int asids_init(void)
 	info->reserved = &reserved_asids;
 	raw_spin_lock_init(&info->lock);
 
-	pinned_asid_map = kcalloc(BITS_TO_LONGS(NUM_CTXT_ASIDS(info)),
-				  sizeof(*pinned_asid_map), GFP_KERNEL);
-	nr_pinned_asids = 0;
+	info->pinned_map = kcalloc(BITS_TO_LONGS(NUM_CTXT_ASIDS(info)),
+				   sizeof(*info->pinned_map), GFP_KERNEL);
+	info->nr_pinned_asids = 0;
 
 	/*
 	 * We cannot call set_reserved_asid_bits() here because CPU
-- 
2.17.1

_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

WARNING: multiple messages have this Message-ID (diff)
From: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>
To: <linux-arm-kernel@lists.infradead.org>,
	<kvmarm@lists.cs.columbia.edu>, <linux-kernel@vger.kernel.org>
Cc: <maz@kernel.org>, <will@kernel.org>, <catalin.marinas@arm.com>,
	<james.morse@arm.com>, <julien.thierry.kdev@gmail.com>,
	<suzuki.poulose@arm.com>, <jean-philippe@linaro.org>,
	<julien@xen.org>, <linuxarm@huawei.com>
Subject: [PATCH v4 07/16] arm64/mm: Move Pinned ASID related variables to asid_info
Date: Wed, 14 Apr 2021 12:23:03 +0100	[thread overview]
Message-ID: <20210414112312.13704-8-shameerali.kolothum.thodi@huawei.com> (raw)
In-Reply-To: <20210414112312.13704-1-shameerali.kolothum.thodi@huawei.com>

The Pinned ASID variables hold information for a given ASID
allocator. So move them to the structure asid_info.

Signed-off-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>
---
 arch/arm64/mm/context.c | 38 +++++++++++++++++++-------------------
 1 file changed, 19 insertions(+), 19 deletions(-)

diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index 0f11d7c7f6a3..8af54e06f5bc 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -28,6 +28,10 @@ static struct asid_info
 	raw_spinlock_t		lock;
 	/* Which CPU requires context flush on next call */
 	cpumask_t		flush_pending;
+	/* Pinned ASIDs info */
+	unsigned long		*pinned_map;
+	unsigned long		max_pinned_asids;
+	unsigned long		nr_pinned_asids;
 } asid_info;
 
 #define active_asid(info, cpu)	 (*per_cpu_ptr((info)->active, cpu))
@@ -36,10 +40,6 @@ static struct asid_info
 static DEFINE_PER_CPU(atomic64_t, active_asids);
 static DEFINE_PER_CPU(u64, reserved_asids);
 
-static unsigned long max_pinned_asids;
-static unsigned long nr_pinned_asids;
-static unsigned long *pinned_asid_map;
-
 #define ASID_MASK(info)			(~GENMASK((info)->bits - 1, 0))
 #define NUM_CTXT_ASIDS(info)		(1UL << ((info)->bits))
 #define ASID_FIRST_VERSION(info)        NUM_CTXT_ASIDS(info)
@@ -99,8 +99,8 @@ static void set_kpti_asid_bits(struct asid_info *info, unsigned long *map)
 
 static void set_reserved_asid_bits(struct asid_info *info)
 {
-	if (pinned_asid_map)
-		bitmap_copy(info->map, pinned_asid_map, NUM_CTXT_ASIDS(info));
+	if (info->pinned_map)
+		bitmap_copy(info->map, info->pinned_map, NUM_CTXT_ASIDS(info));
 	else if (arm64_kernel_unmapped_at_el0())
 		set_kpti_asid_bits(info, info->map);
 	else
@@ -287,7 +287,7 @@ unsigned long arm64_mm_context_get(struct mm_struct *mm)
 	u64 asid;
 	struct asid_info *info = &asid_info;
 
-	if (!pinned_asid_map)
+	if (!info->pinned_map)
 		return 0;
 
 	raw_spin_lock_irqsave(&info->lock, flags);
@@ -297,7 +297,7 @@ unsigned long arm64_mm_context_get(struct mm_struct *mm)
 	if (refcount_inc_not_zero(&mm->context.pinned))
 		goto out_unlock;
 
-	if (nr_pinned_asids >= max_pinned_asids) {
+	if (info->nr_pinned_asids >= info->max_pinned_asids) {
 		asid = 0;
 		goto out_unlock;
 	}
@@ -311,8 +311,8 @@ unsigned long arm64_mm_context_get(struct mm_struct *mm)
 		atomic64_set(&mm->context.id, asid);
 	}
 
-	nr_pinned_asids++;
-	__set_bit(asid2idx(info, asid), pinned_asid_map);
+	info->nr_pinned_asids++;
+	__set_bit(asid2idx(info, asid), info->pinned_map);
 	refcount_set(&mm->context.pinned, 1);
 
 out_unlock:
@@ -334,14 +334,14 @@ void arm64_mm_context_put(struct mm_struct *mm)
 	struct asid_info *info = &asid_info;
 	u64 asid = atomic64_read(&mm->context.id);
 
-	if (!pinned_asid_map)
+	if (!info->pinned_map)
 		return;
 
 	raw_spin_lock_irqsave(&info->lock, flags);
 
 	if (refcount_dec_and_test(&mm->context.pinned)) {
-		__clear_bit(asid2idx(info, asid), pinned_asid_map);
-		nr_pinned_asids--;
+		__clear_bit(asid2idx(info, asid), info->pinned_map);
+		info->nr_pinned_asids--;
 	}
 
 	raw_spin_unlock_irqrestore(&info->lock, flags);
@@ -391,8 +391,8 @@ static int asids_update_limit(void)
 
 	if (arm64_kernel_unmapped_at_el0()) {
 		num_available_asids /= 2;
-		if (pinned_asid_map)
-			set_kpti_asid_bits(info, pinned_asid_map);
+		if (info->pinned_map)
+			set_kpti_asid_bits(info, info->pinned_map);
 	}
 	/*
 	 * Expect allocation after rollover to fail if we don't have at least
@@ -407,7 +407,7 @@ static int asids_update_limit(void)
 	 * even if all CPUs have a reserved ASID and the maximum number of ASIDs
 	 * are pinned, there still is at least one empty slot in the ASID map.
 	 */
-	max_pinned_asids = num_available_asids - num_possible_cpus() - 2;
+	info->max_pinned_asids = num_available_asids - num_possible_cpus() - 2;
 	return 0;
 }
 arch_initcall(asids_update_limit);
@@ -429,9 +429,9 @@ static int asids_init(void)
 	info->reserved = &reserved_asids;
 	raw_spin_lock_init(&info->lock);
 
-	pinned_asid_map = kcalloc(BITS_TO_LONGS(NUM_CTXT_ASIDS(info)),
-				  sizeof(*pinned_asid_map), GFP_KERNEL);
-	nr_pinned_asids = 0;
+	info->pinned_map = kcalloc(BITS_TO_LONGS(NUM_CTXT_ASIDS(info)),
+				   sizeof(*info->pinned_map), GFP_KERNEL);
+	info->nr_pinned_asids = 0;
 
 	/*
 	 * We cannot call set_reserved_asid_bits() here because CPU
-- 
2.17.1


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  parent reply	other threads:[~2021-04-14 11:25 UTC|newest]

Thread overview: 57+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-04-14 11:22 [PATCH v4 00/16] kvm/arm: Align the VMID allocation with the arm64 ASID one Shameer Kolothum
2021-04-14 11:22 ` Shameer Kolothum
2021-04-14 11:22 ` Shameer Kolothum
2021-04-14 11:22 ` [PATCH v4 01/16] arm64/mm: Introduce asid_info structure and move asid_generation/asid_map to it Shameer Kolothum
2021-04-14 11:22   ` Shameer Kolothum
2021-04-14 11:22   ` Shameer Kolothum
2021-04-14 11:22 ` [PATCH v4 02/16] arm64/mm: Move active_asids and reserved_asids to asid_info Shameer Kolothum
2021-04-14 11:22   ` Shameer Kolothum
2021-04-14 11:22   ` Shameer Kolothum
2021-04-14 11:22 ` [PATCH v4 03/16] arm64/mm: Move bits " Shameer Kolothum
2021-04-14 11:22   ` Shameer Kolothum
2021-04-14 11:22   ` Shameer Kolothum
2021-04-14 11:23 ` [PATCH v4 04/16] arm64/mm: Move the variable lock and tlb_flush_pending " Shameer Kolothum
2021-04-14 11:23   ` Shameer Kolothum
2021-04-14 11:23   ` Shameer Kolothum
2021-04-14 11:23 ` [PATCH v4 05/16] arm64/mm: Remove dependency on MM in new_context Shameer Kolothum
2021-04-14 11:23   ` Shameer Kolothum
2021-04-14 11:23   ` Shameer Kolothum
2021-04-14 11:23 ` [PATCH v4 06/16] arm64/mm: Introduce NUM_CTXT_ASIDS Shameer Kolothum
2021-04-14 11:23   ` Shameer Kolothum
2021-04-14 11:23   ` Shameer Kolothum
2021-04-14 11:23 ` Shameer Kolothum [this message]
2021-04-14 11:23   ` [PATCH v4 07/16] arm64/mm: Move Pinned ASID related variables to asid_info Shameer Kolothum
2021-04-14 11:23   ` Shameer Kolothum
2021-04-14 11:23 ` [PATCH v4 08/16] arm64/mm: Split asid_inits in 2 parts Shameer Kolothum
2021-04-14 11:23   ` Shameer Kolothum
2021-04-14 11:23   ` Shameer Kolothum
2021-04-14 11:23 ` [PATCH v4 09/16] arm64/mm: Split the function check_and_switch_context in 3 parts Shameer Kolothum
2021-04-14 11:23   ` Shameer Kolothum
2021-04-14 11:23   ` Shameer Kolothum
2021-04-14 11:23 ` [PATCH v4 10/16] arm64/mm: Split the arm64_mm_context_get/put Shameer Kolothum
2021-04-14 11:23   ` Shameer Kolothum
2021-04-14 11:23   ` Shameer Kolothum
2021-04-14 11:23 ` [PATCH v4 11/16] arm64/mm: Introduce a callback to flush the local context Shameer Kolothum
2021-04-14 11:23   ` Shameer Kolothum
2021-04-14 11:23   ` Shameer Kolothum
2021-04-14 11:23 ` [PATCH v4 12/16] arm64/mm: Introduce a callback to set reserved bits Shameer Kolothum
2021-04-14 11:23   ` Shameer Kolothum
2021-04-14 11:23   ` Shameer Kolothum
2021-04-14 11:23 ` [PATCH v4 13/16] arm64: Move the ASID allocator code in a separate file Shameer Kolothum
2021-04-14 11:23   ` Shameer Kolothum
2021-04-14 11:23   ` Shameer Kolothum
2021-04-14 11:23 ` [PATCH v4 14/16] arm64/lib: Add an helper to free memory allocated by the ASID allocator Shameer Kolothum
2021-04-14 11:23   ` Shameer Kolothum
2021-04-14 11:23   ` Shameer Kolothum
2021-04-14 11:23 ` [PATCH v4 15/16] arch/arm64: Introduce a capability to tell whether 16-bit VMID is available Shameer Kolothum
2021-04-14 11:23   ` Shameer Kolothum
2021-04-14 11:23   ` Shameer Kolothum
2021-04-14 11:23 ` [PATCH v4 16/16] kvm/arm: Align the VMID allocation with the arm64 ASID one Shameer Kolothum
2021-04-14 11:23   ` Shameer Kolothum
2021-04-14 11:23   ` Shameer Kolothum
2021-04-22 16:08 ` [PATCH v4 00/16] " Will Deacon
2021-04-22 16:08   ` Will Deacon
2021-04-22 16:08   ` Will Deacon
2021-04-23  8:31   ` Shameerali Kolothum Thodi
2021-04-23  8:31     ` Shameerali Kolothum Thodi
2021-04-23  8:31     ` Shameerali Kolothum Thodi

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210414112312.13704-8-shameerali.kolothum.thodi@huawei.com \
    --to=shameerali.kolothum.thodi@huawei.com \
    --cc=catalin.marinas@arm.com \
    --cc=james.morse@arm.com \
    --cc=jean-philippe@linaro.org \
    --cc=julien.thierry.kdev@gmail.com \
    --cc=julien@xen.org \
    --cc=kvmarm@lists.cs.columbia.edu \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linuxarm@huawei.com \
    --cc=maz@kernel.org \
    --cc=suzuki.poulose@arm.com \
    --cc=will@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.