From: Julien Grall <julien.grall@arm.com> To: linux-kernel@vger.kernel.org, linux-arm-kernel@lists.infradead.org, kvmarm@lists.cs.columbia.edu Cc: christoffer.dall@arm.com, james.morse@arm.com, marc.zyngier@arm.com, julien.thierry@arm.com, suzuki.poulose@arm.com, catalin.marinas@arm.com, will.deacon@arm.com, Julien Grall <julien.grall@arm.com> Subject: [PATCH RFC 01/14] arm64/mm: Introduce asid_info structure and move asid_generation/asid_map to it Date: Thu, 21 Mar 2019 16:36:10 +0000 [thread overview] Message-ID: <20190321163623.20219-2-julien.grall@arm.com> (raw) In-Reply-To: <20190321163623.20219-1-julien.grall@arm.com> In an attempt to make the ASID allocator generic, create a new structure asid_info to store all the information necessary for the allocator. For now, move the variables asid_generation and asid_map to the new structure asid_info. Follow-up patches will move more variables. Note to avoid more renaming aftwards, a local variable 'info' has been created and is a pointer to the ASID allocator structure. Signed-off-by: Julien Grall <julien.grall@arm.com> --- arch/arm64/mm/context.c | 46 ++++++++++++++++++++++++++-------------------- 1 file changed, 26 insertions(+), 20 deletions(-) diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c index 1f0ea2facf24..34db54f1a39a 100644 --- a/arch/arm64/mm/context.c +++ b/arch/arm64/mm/context.c @@ -30,8 +30,11 @@ static u32 asid_bits; static DEFINE_RAW_SPINLOCK(cpu_asid_lock); -static atomic64_t asid_generation; -static unsigned long *asid_map; +struct asid_info +{ + atomic64_t generation; + unsigned long *map; +} asid_info; static DEFINE_PER_CPU(atomic64_t, active_asids); static DEFINE_PER_CPU(u64, reserved_asids); @@ -88,13 +91,13 @@ void verify_cpu_asid_bits(void) } } -static void flush_context(void) +static void flush_context(struct asid_info *info) { int i; u64 asid; /* Update the list of reserved ASIDs and the ASID bitmap. */ - bitmap_clear(asid_map, 0, NUM_USER_ASIDS); + bitmap_clear(info->map, 0, NUM_USER_ASIDS); for_each_possible_cpu(i) { asid = atomic64_xchg_relaxed(&per_cpu(active_asids, i), 0); @@ -107,7 +110,7 @@ static void flush_context(void) */ if (asid == 0) asid = per_cpu(reserved_asids, i); - __set_bit(asid2idx(asid), asid_map); + __set_bit(asid2idx(asid), info->map); per_cpu(reserved_asids, i) = asid; } @@ -142,11 +145,11 @@ static bool check_update_reserved_asid(u64 asid, u64 newasid) return hit; } -static u64 new_context(struct mm_struct *mm) +static u64 new_context(struct asid_info *info, struct mm_struct *mm) { static u32 cur_idx = 1; u64 asid = atomic64_read(&mm->context.id); - u64 generation = atomic64_read(&asid_generation); + u64 generation = atomic64_read(&info->generation); if (asid != 0) { u64 newasid = generation | (asid & ~ASID_MASK); @@ -162,7 +165,7 @@ static u64 new_context(struct mm_struct *mm) * We had a valid ASID in a previous life, so try to re-use * it if possible. */ - if (!__test_and_set_bit(asid2idx(asid), asid_map)) + if (!__test_and_set_bit(asid2idx(asid), info->map)) return newasid; } @@ -173,20 +176,20 @@ static u64 new_context(struct mm_struct *mm) * a reserved TTBR0 for the init_mm and we allocate ASIDs in even/odd * pairs. */ - asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx); + asid = find_next_zero_bit(info->map, NUM_USER_ASIDS, cur_idx); if (asid != NUM_USER_ASIDS) goto set_asid; /* We're out of ASIDs, so increment the global generation count */ generation = atomic64_add_return_relaxed(ASID_FIRST_VERSION, - &asid_generation); - flush_context(); + &info->generation); + flush_context(info); /* We have more ASIDs than CPUs, so this will always succeed */ - asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1); + asid = find_next_zero_bit(info->map, NUM_USER_ASIDS, 1); set_asid: - __set_bit(asid, asid_map); + __set_bit(asid, info->map); cur_idx = asid; return idx2asid(asid) | generation; } @@ -195,6 +198,7 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu) { unsigned long flags; u64 asid, old_active_asid; + struct asid_info *info = &asid_info; if (system_supports_cnp()) cpu_set_reserved_ttbr0(); @@ -217,7 +221,7 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu) */ old_active_asid = atomic64_read(&per_cpu(active_asids, cpu)); if (old_active_asid && - !((asid ^ atomic64_read(&asid_generation)) >> asid_bits) && + !((asid ^ atomic64_read(&info->generation)) >> asid_bits) && atomic64_cmpxchg_relaxed(&per_cpu(active_asids, cpu), old_active_asid, asid)) goto switch_mm_fastpath; @@ -225,8 +229,8 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu) raw_spin_lock_irqsave(&cpu_asid_lock, flags); /* Check that our ASID belongs to the current generation. */ asid = atomic64_read(&mm->context.id); - if ((asid ^ atomic64_read(&asid_generation)) >> asid_bits) { - asid = new_context(mm); + if ((asid ^ atomic64_read(&info->generation)) >> asid_bits) { + asid = new_context(info, mm); atomic64_set(&mm->context.id, asid); } @@ -259,16 +263,18 @@ asmlinkage void post_ttbr_update_workaround(void) static int asids_init(void) { + struct asid_info *info = &asid_info; + asid_bits = get_cpu_asid_bits(); /* * Expect allocation after rollover to fail if we don't have at least * one more ASID than CPUs. ASID #0 is reserved for init_mm. */ WARN_ON(NUM_USER_ASIDS - 1 <= num_possible_cpus()); - atomic64_set(&asid_generation, ASID_FIRST_VERSION); - asid_map = kcalloc(BITS_TO_LONGS(NUM_USER_ASIDS), sizeof(*asid_map), - GFP_KERNEL); - if (!asid_map) + atomic64_set(&info->generation, ASID_FIRST_VERSION); + info->map = kcalloc(BITS_TO_LONGS(NUM_USER_ASIDS), sizeof(*info->map), + GFP_KERNEL); + if (!info->map) panic("Failed to allocate bitmap for %lu ASIDs\n", NUM_USER_ASIDS); -- 2.11.0
WARNING: multiple messages have this Message-ID (diff)
From: Julien Grall <julien.grall@arm.com> To: linux-kernel@vger.kernel.org, linux-arm-kernel@lists.infradead.org, kvmarm@lists.cs.columbia.edu Cc: suzuki.poulose@arm.com, marc.zyngier@arm.com, catalin.marinas@arm.com, julien.thierry@arm.com, will.deacon@arm.com, christoffer.dall@arm.com, Julien Grall <julien.grall@arm.com>, james.morse@arm.com Subject: [PATCH RFC 01/14] arm64/mm: Introduce asid_info structure and move asid_generation/asid_map to it Date: Thu, 21 Mar 2019 16:36:10 +0000 [thread overview] Message-ID: <20190321163623.20219-2-julien.grall@arm.com> (raw) In-Reply-To: <20190321163623.20219-1-julien.grall@arm.com> In an attempt to make the ASID allocator generic, create a new structure asid_info to store all the information necessary for the allocator. For now, move the variables asid_generation and asid_map to the new structure asid_info. Follow-up patches will move more variables. Note to avoid more renaming aftwards, a local variable 'info' has been created and is a pointer to the ASID allocator structure. Signed-off-by: Julien Grall <julien.grall@arm.com> --- arch/arm64/mm/context.c | 46 ++++++++++++++++++++++++++-------------------- 1 file changed, 26 insertions(+), 20 deletions(-) diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c index 1f0ea2facf24..34db54f1a39a 100644 --- a/arch/arm64/mm/context.c +++ b/arch/arm64/mm/context.c @@ -30,8 +30,11 @@ static u32 asid_bits; static DEFINE_RAW_SPINLOCK(cpu_asid_lock); -static atomic64_t asid_generation; -static unsigned long *asid_map; +struct asid_info +{ + atomic64_t generation; + unsigned long *map; +} asid_info; static DEFINE_PER_CPU(atomic64_t, active_asids); static DEFINE_PER_CPU(u64, reserved_asids); @@ -88,13 +91,13 @@ void verify_cpu_asid_bits(void) } } -static void flush_context(void) +static void flush_context(struct asid_info *info) { int i; u64 asid; /* Update the list of reserved ASIDs and the ASID bitmap. */ - bitmap_clear(asid_map, 0, NUM_USER_ASIDS); + bitmap_clear(info->map, 0, NUM_USER_ASIDS); for_each_possible_cpu(i) { asid = atomic64_xchg_relaxed(&per_cpu(active_asids, i), 0); @@ -107,7 +110,7 @@ static void flush_context(void) */ if (asid == 0) asid = per_cpu(reserved_asids, i); - __set_bit(asid2idx(asid), asid_map); + __set_bit(asid2idx(asid), info->map); per_cpu(reserved_asids, i) = asid; } @@ -142,11 +145,11 @@ static bool check_update_reserved_asid(u64 asid, u64 newasid) return hit; } -static u64 new_context(struct mm_struct *mm) +static u64 new_context(struct asid_info *info, struct mm_struct *mm) { static u32 cur_idx = 1; u64 asid = atomic64_read(&mm->context.id); - u64 generation = atomic64_read(&asid_generation); + u64 generation = atomic64_read(&info->generation); if (asid != 0) { u64 newasid = generation | (asid & ~ASID_MASK); @@ -162,7 +165,7 @@ static u64 new_context(struct mm_struct *mm) * We had a valid ASID in a previous life, so try to re-use * it if possible. */ - if (!__test_and_set_bit(asid2idx(asid), asid_map)) + if (!__test_and_set_bit(asid2idx(asid), info->map)) return newasid; } @@ -173,20 +176,20 @@ static u64 new_context(struct mm_struct *mm) * a reserved TTBR0 for the init_mm and we allocate ASIDs in even/odd * pairs. */ - asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx); + asid = find_next_zero_bit(info->map, NUM_USER_ASIDS, cur_idx); if (asid != NUM_USER_ASIDS) goto set_asid; /* We're out of ASIDs, so increment the global generation count */ generation = atomic64_add_return_relaxed(ASID_FIRST_VERSION, - &asid_generation); - flush_context(); + &info->generation); + flush_context(info); /* We have more ASIDs than CPUs, so this will always succeed */ - asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1); + asid = find_next_zero_bit(info->map, NUM_USER_ASIDS, 1); set_asid: - __set_bit(asid, asid_map); + __set_bit(asid, info->map); cur_idx = asid; return idx2asid(asid) | generation; } @@ -195,6 +198,7 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu) { unsigned long flags; u64 asid, old_active_asid; + struct asid_info *info = &asid_info; if (system_supports_cnp()) cpu_set_reserved_ttbr0(); @@ -217,7 +221,7 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu) */ old_active_asid = atomic64_read(&per_cpu(active_asids, cpu)); if (old_active_asid && - !((asid ^ atomic64_read(&asid_generation)) >> asid_bits) && + !((asid ^ atomic64_read(&info->generation)) >> asid_bits) && atomic64_cmpxchg_relaxed(&per_cpu(active_asids, cpu), old_active_asid, asid)) goto switch_mm_fastpath; @@ -225,8 +229,8 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu) raw_spin_lock_irqsave(&cpu_asid_lock, flags); /* Check that our ASID belongs to the current generation. */ asid = atomic64_read(&mm->context.id); - if ((asid ^ atomic64_read(&asid_generation)) >> asid_bits) { - asid = new_context(mm); + if ((asid ^ atomic64_read(&info->generation)) >> asid_bits) { + asid = new_context(info, mm); atomic64_set(&mm->context.id, asid); } @@ -259,16 +263,18 @@ asmlinkage void post_ttbr_update_workaround(void) static int asids_init(void) { + struct asid_info *info = &asid_info; + asid_bits = get_cpu_asid_bits(); /* * Expect allocation after rollover to fail if we don't have at least * one more ASID than CPUs. ASID #0 is reserved for init_mm. */ WARN_ON(NUM_USER_ASIDS - 1 <= num_possible_cpus()); - atomic64_set(&asid_generation, ASID_FIRST_VERSION); - asid_map = kcalloc(BITS_TO_LONGS(NUM_USER_ASIDS), sizeof(*asid_map), - GFP_KERNEL); - if (!asid_map) + atomic64_set(&info->generation, ASID_FIRST_VERSION); + info->map = kcalloc(BITS_TO_LONGS(NUM_USER_ASIDS), sizeof(*info->map), + GFP_KERNEL); + if (!info->map) panic("Failed to allocate bitmap for %lu ASIDs\n", NUM_USER_ASIDS); -- 2.11.0 _______________________________________________ linux-arm-kernel mailing list linux-arm-kernel@lists.infradead.org http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
next prev parent reply other threads:[~2019-03-21 16:36 UTC|newest] Thread overview: 211+ messages / expand[flat|nested] mbox.gz Atom feed top 2019-03-21 16:36 [PATCH RFC 00/14] kvm/arm: Align the VMID allocation with the arm64 ASID one Julien Grall 2019-03-21 16:36 ` Julien Grall 2019-03-21 16:36 ` Julien Grall [this message] 2019-03-21 16:36 ` [PATCH RFC 01/14] arm64/mm: Introduce asid_info structure and move asid_generation/asid_map to it Julien Grall 2019-03-21 17:03 ` Suzuki K Poulose 2019-03-21 17:03 ` Suzuki K Poulose 2019-03-21 17:27 ` Julien Grall 2019-03-21 17:27 ` Julien Grall 2019-03-21 16:36 ` [PATCH RFC 02/14] arm64/mm: Move active_asids and reserved_asids to asid_info Julien Grall 2019-03-21 16:36 ` Julien Grall 2019-03-21 16:36 ` Julien Grall 2019-03-21 16:36 ` [PATCH RFC 03/14] arm64/mm: Move bits " Julien Grall 2019-03-21 16:36 ` Julien Grall 2019-03-21 16:36 ` Julien Grall 2019-03-21 16:36 ` [PATCH RFC 04/14] arm64/mm: Move the variable lock and tlb_flush_pending " Julien Grall 2019-03-21 16:36 ` Julien Grall 2019-03-21 16:36 ` Julien Grall 2019-03-21 16:36 ` [PATCH RFC 05/14] arm64/mm: Remove dependency on MM in new_context Julien Grall 2019-03-21 16:36 ` Julien Grall 2019-03-21 16:36 ` [PATCH RFC 06/14] arm64/mm: Store the number of asid allocated per context Julien Grall 2019-03-21 16:36 ` Julien Grall 2019-03-21 16:36 ` Julien Grall 2019-03-21 16:36 ` [PATCH RFC 07/14] arm64/mm: Introduce NUM_ASIDS Julien Grall 2019-03-21 16:36 ` Julien Grall 2019-03-21 16:36 ` [PATCH RFC 08/14] arm64/mm: Split asid_inits in 2 parts Julien Grall 2019-03-21 16:36 ` Julien Grall 2019-03-21 16:36 ` Julien Grall 2019-03-21 16:36 ` [PATCH RFC 09/14] arm64/mm: Split the function check_and_switch_context in 3 parts Julien Grall 2019-03-21 16:36 ` Julien Grall 2019-03-21 16:36 ` Julien Grall 2019-03-21 16:36 ` [PATCH RFC 10/14] arm64/mm: Introduce a callback to flush the local context Julien Grall 2019-03-21 16:36 ` Julien Grall 2019-03-21 16:36 ` Julien Grall 2019-03-21 16:36 ` [PATCH RFC 11/14] arm64: Move the ASID allocator code in a separate file Julien Grall 2019-03-21 16:36 ` Julien Grall 2019-06-05 16:56 ` Julien Grall 2019-06-05 16:56 ` Julien Grall 2019-06-05 16:56 ` Julien Grall 2019-06-05 16:56 ` Julien Grall 2019-06-05 20:41 ` Palmer Dabbelt 2019-06-05 20:41 ` Palmer Dabbelt 2019-06-05 20:41 ` Palmer Dabbelt 2019-06-05 20:41 ` Palmer Dabbelt 2019-06-11 1:56 ` Gary Guo 2019-06-11 1:56 ` Gary Guo 2019-06-11 1:56 ` Gary Guo 2019-06-11 1:56 ` Gary Guo 2019-06-19 8:07 ` Guo Ren 2019-06-19 8:07 ` Guo Ren 2019-06-19 8:07 ` Guo Ren 2019-06-19 8:07 ` Guo Ren 2019-06-19 8:54 ` Julien Grall 2019-06-19 8:54 ` Julien Grall 2019-06-19 8:54 ` Julien Grall 2019-06-19 8:54 ` Julien Grall 2019-06-19 9:12 ` Will Deacon 2019-06-19 9:12 ` Will Deacon 2019-06-19 9:12 ` Will Deacon 2019-06-19 9:12 ` Will Deacon 2019-06-19 12:18 ` Guo Ren 2019-06-19 12:18 ` Guo Ren 2019-06-19 12:18 ` Guo Ren 2019-06-19 12:18 ` Guo Ren 2019-06-19 12:39 ` Will Deacon 2019-06-19 12:39 ` Will Deacon 2019-06-19 12:39 ` Will Deacon 2019-06-19 12:39 ` Will Deacon 2019-06-20 9:33 ` Guo Ren 2019-06-20 9:33 ` Guo Ren 2019-06-20 9:33 ` Guo Ren 2019-06-20 9:33 ` Guo Ren 2019-06-24 10:40 ` Will Deacon 2019-06-24 10:40 ` Will Deacon 2019-06-24 10:40 ` Will Deacon 2019-06-24 10:40 ` Will Deacon 2019-06-25 7:25 ` Palmer Dabbelt 2019-06-25 7:25 ` Palmer Dabbelt 2019-06-25 7:25 ` Palmer Dabbelt 2019-06-25 7:25 ` Palmer Dabbelt 2019-09-07 23:52 ` Guo Ren 2019-09-07 23:52 ` Guo Ren 2019-09-07 23:52 ` Guo Ren 2019-09-07 23:52 ` Guo Ren 2019-09-07 23:52 ` Guo Ren 2019-09-12 14:02 ` Will Deacon 2019-09-12 14:02 ` Will Deacon 2019-09-12 14:02 ` Will Deacon 2019-09-12 14:02 ` Will Deacon 2019-09-12 14:02 ` Will Deacon 2019-09-12 14:59 ` Guo Ren 2019-09-12 14:59 ` Guo Ren 2019-09-12 14:59 ` Guo Ren 2019-09-12 14:59 ` Guo Ren 2019-09-12 14:59 ` Guo Ren 2019-09-13 7:13 ` Guo Ren 2019-09-13 7:13 ` Guo Ren 2019-09-14 8:49 ` Guo Ren 2019-09-14 8:49 ` Guo Ren 2019-09-14 8:49 ` Guo Ren 2019-09-14 8:49 ` Guo Ren 2019-09-14 8:49 ` Guo Ren 2019-09-16 12:57 ` Jean-Philippe Brucker 2019-09-16 12:57 ` Jean-Philippe Brucker 2019-09-16 12:57 ` Jean-Philippe Brucker 2019-09-16 12:57 ` Jean-Philippe Brucker 2019-09-16 12:57 ` Jean-Philippe Brucker 2019-09-19 13:07 ` Guo Ren 2019-09-19 13:07 ` Guo Ren 2019-09-19 13:07 ` Guo Ren 2019-09-19 13:07 ` Guo Ren 2019-09-19 13:07 ` Guo Ren 2019-09-19 15:18 ` Jean-Philippe Brucker 2019-09-19 15:18 ` Jean-Philippe Brucker 2019-09-19 15:18 ` Jean-Philippe Brucker 2019-09-19 15:18 ` Jean-Philippe Brucker 2019-09-19 15:18 ` Jean-Philippe Brucker 2019-09-20 0:07 ` Guo Ren 2019-09-20 0:07 ` Guo Ren 2019-09-20 0:07 ` Guo Ren 2019-09-20 0:07 ` Guo Ren 2019-09-20 0:07 ` Guo Ren 2019-09-20 7:18 ` Jean-Philippe Brucker 2019-09-20 7:18 ` Jean-Philippe Brucker 2019-09-20 7:18 ` Jean-Philippe Brucker 2019-09-20 7:18 ` Jean-Philippe Brucker 2019-09-20 7:18 ` Jean-Philippe Brucker 2019-09-14 14:01 ` Palmer Dabbelt 2019-09-14 14:01 ` Palmer Dabbelt 2019-09-14 14:01 ` Palmer Dabbelt 2019-09-14 14:01 ` Palmer Dabbelt 2019-09-14 14:01 ` Palmer Dabbelt 2019-09-15 5:03 ` Anup Patel 2019-09-15 5:03 ` Anup Patel 2019-09-15 5:03 ` Anup Patel 2019-09-15 5:03 ` Anup Patel 2019-09-15 5:03 ` Anup Patel 2019-09-16 18:18 ` Will Deacon 2019-09-16 18:18 ` Will Deacon 2019-09-16 18:18 ` Will Deacon 2019-09-16 18:18 ` Will Deacon 2019-09-16 18:18 ` Will Deacon 2019-09-16 18:28 ` Palmer Dabbelt 2019-09-16 18:28 ` Palmer Dabbelt 2019-09-16 18:28 ` Palmer Dabbelt 2019-09-16 18:28 ` Palmer Dabbelt 2019-09-16 18:28 ` Palmer Dabbelt 2019-09-17 3:42 ` Anup Patel 2019-09-17 3:42 ` Anup Patel 2019-09-17 3:42 ` Anup Patel 2019-09-17 3:42 ` Anup Patel 2019-09-17 3:42 ` Anup Patel 2019-09-19 13:36 ` Guo Ren 2019-09-19 13:36 ` Guo Ren 2019-09-19 13:36 ` Guo Ren 2019-09-19 13:36 ` Guo Ren 2019-09-19 13:36 ` Guo Ren 2019-06-19 11:51 ` Guo Ren 2019-06-19 11:51 ` Guo Ren 2019-06-19 11:51 ` Guo Ren 2019-06-19 11:51 ` Guo Ren 2019-06-19 12:52 ` Julien Grall 2019-06-19 12:52 ` Julien Grall 2019-06-19 12:52 ` Julien Grall 2019-06-19 12:52 ` Julien Grall 2019-06-21 14:16 ` Catalin Marinas 2019-06-21 14:16 ` Catalin Marinas 2019-06-21 14:16 ` Catalin Marinas 2019-06-21 14:16 ` Catalin Marinas 2019-06-23 16:35 ` Guo Ren 2019-06-23 16:35 ` Guo Ren 2019-06-23 16:35 ` Guo Ren 2019-06-23 16:35 ` Guo Ren 2019-06-24 10:22 ` Will Deacon 2019-06-24 10:22 ` Will Deacon 2019-06-24 10:22 ` Will Deacon 2019-06-24 10:22 ` Will Deacon 2019-06-27 9:41 ` qi.fuli 2019-06-27 9:41 ` qi.fuli 2019-06-27 9:41 ` qi.fuli 2019-06-27 9:41 ` qi.fuli 2019-06-27 10:26 ` Will Deacon 2019-06-27 10:26 ` Will Deacon 2019-06-27 10:26 ` Will Deacon 2019-06-27 10:26 ` Will Deacon 2019-06-24 15:38 ` Catalin Marinas 2019-06-24 15:38 ` Catalin Marinas 2019-06-24 15:38 ` Catalin Marinas 2019-06-24 15:38 ` Catalin Marinas 2019-06-30 4:29 ` Guo Ren 2019-06-30 4:29 ` Guo Ren 2019-06-30 4:29 ` Guo Ren 2019-06-30 4:29 ` Guo Ren 2019-07-01 9:17 ` Catalin Marinas 2019-07-01 9:17 ` Catalin Marinas 2019-07-01 9:17 ` Catalin Marinas 2019-07-01 9:17 ` Catalin Marinas 2019-07-16 3:31 ` Guo Ren 2019-07-16 3:31 ` Guo Ren 2019-07-16 3:31 ` Guo Ren 2019-07-16 3:31 ` Guo Ren 2019-07-22 16:38 ` Catalin Marinas 2019-07-22 16:38 ` Catalin Marinas 2019-07-22 16:38 ` Catalin Marinas 2019-07-22 16:38 ` Catalin Marinas 2019-03-21 16:36 ` [PATCH RFC 12/14] arm64/lib: asid: Allow user to update the context under the lock Julien Grall 2019-03-21 16:36 ` Julien Grall 2019-03-21 16:36 ` [PATCH RFC 13/14] arm/kvm: Introduce a new VMID allocator Julien Grall 2019-03-21 16:36 ` Julien Grall 2019-03-21 16:36 ` [PATCH RFC 14/14] kvm/arm: Align the VMID allocation with the arm64 ASID one Julien Grall 2019-03-21 16:36 ` Julien Grall 2019-03-21 16:36 ` Julien Grall
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=20190321163623.20219-2-julien.grall@arm.com \ --to=julien.grall@arm.com \ --cc=catalin.marinas@arm.com \ --cc=christoffer.dall@arm.com \ --cc=james.morse@arm.com \ --cc=julien.thierry@arm.com \ --cc=kvmarm@lists.cs.columbia.edu \ --cc=linux-arm-kernel@lists.infradead.org \ --cc=linux-kernel@vger.kernel.org \ --cc=marc.zyngier@arm.com \ --cc=suzuki.poulose@arm.com \ --cc=will.deacon@arm.com \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: linkBe sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes, see mirroring instructions on how to clone and mirror all data and code used by this external index.