From: Zengruan Ye <yezengruan@huawei.com> To: <linux-kernel@vger.kernel.org>, <linux-arm-kernel@lists.infradead.org>, <kvmarm@lists.cs.columbia.edu>, <kvm@vger.kernel.org>, <linux-doc@vger.kernel.org>, <virtualization@lists.linux-foundation.org> Cc: kbuild test robot <lkp@intel.com>, peterz@infradead.org, maz@kernel.org, daniel.lezcano@linaro.org, linux@armlinux.org.uk, steven.price@arm.com, longman@redhat.com, catalin.marinas@arm.com, will@kernel.org Subject: [PATCH v3 7/8] KVM: arm64: Add interface to support vCPU preempted check Date: Thu, 16 Jan 2020 20:46:25 +0800 Message-ID: <20200116124626.1155-8-yezengruan@huawei.com> (raw) In-Reply-To: <20200116124626.1155-1-yezengruan@huawei.com> This is to fix some lock holder preemption issues. Some other locks implementation do a spin loop before acquiring the lock itself. Currently kernel has an interface of bool vcpu_is_preempted(int cpu). It takes the CPU as parameter and return true if the CPU is preempted. Then kernel can break the spin loops upon the retval of vcpu_is_preempted. As kernel has used this interface, So lets support it. Reported-by: kbuild test robot <lkp@intel.com> Signed-off-by: Zengruan Ye <yezengruan@huawei.com> --- arch/arm64/include/asm/paravirt.h | 12 ++++++++++++ arch/arm64/include/asm/spinlock.h | 9 +++++++++ arch/arm64/kernel/Makefile | 2 +- arch/arm64/kernel/paravirt-spinlocks.c | 13 +++++++++++++ arch/arm64/kernel/paravirt.c | 4 +++- 5 files changed, 38 insertions(+), 2 deletions(-) create mode 100644 arch/arm64/kernel/paravirt-spinlocks.c diff --git a/arch/arm64/include/asm/paravirt.h b/arch/arm64/include/asm/paravirt.h index cf3a0fd7c1a7..7b1c81b544bb 100644 --- a/arch/arm64/include/asm/paravirt.h +++ b/arch/arm64/include/asm/paravirt.h @@ -11,8 +11,13 @@ struct pv_time_ops { unsigned long long (*steal_clock)(int cpu); }; +struct pv_lock_ops { + bool (*vcpu_is_preempted)(int cpu); +}; + struct paravirt_patch_template { struct pv_time_ops time; + struct pv_lock_ops lock; }; extern struct paravirt_patch_template pv_ops; @@ -24,6 +29,13 @@ static inline u64 paravirt_steal_clock(int cpu) int __init pv_time_init(void); +__visible bool __native_vcpu_is_preempted(int cpu); + +static inline bool pv_vcpu_is_preempted(int cpu) +{ + return pv_ops.lock.vcpu_is_preempted(cpu); +} + #else #define pv_time_init() do {} while (0) diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h index b093b287babf..b5d1982414c5 100644 --- a/arch/arm64/include/asm/spinlock.h +++ b/arch/arm64/include/asm/spinlock.h @@ -7,8 +7,17 @@ #include <asm/qrwlock.h> #include <asm/qspinlock.h> +#include <asm/paravirt.h> /* See include/linux/spinlock.h */ #define smp_mb__after_spinlock() smp_mb() +#ifdef CONFIG_PARAVIRT +#define vcpu_is_preempted vcpu_is_preempted +static inline bool vcpu_is_preempted(long cpu) +{ + return pv_vcpu_is_preempted(cpu); +} +#endif // CONFIG_PARAVIRT + #endif /* __ASM_SPINLOCK_H */ diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index fc6488660f64..b23cdae433a4 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -50,7 +50,7 @@ obj-$(CONFIG_ARMV8_DEPRECATED) += armv8_deprecated.o obj-$(CONFIG_ACPI) += acpi.o obj-$(CONFIG_ACPI_NUMA) += acpi_numa.o obj-$(CONFIG_ARM64_ACPI_PARKING_PROTOCOL) += acpi_parking_protocol.o -obj-$(CONFIG_PARAVIRT) += paravirt.o +obj-$(CONFIG_PARAVIRT) += paravirt.o paravirt-spinlocks.o obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o obj-$(CONFIG_HIBERNATION) += hibernate.o hibernate-asm.o obj-$(CONFIG_KEXEC_CORE) += machine_kexec.o relocate_kernel.o \ diff --git a/arch/arm64/kernel/paravirt-spinlocks.c b/arch/arm64/kernel/paravirt-spinlocks.c new file mode 100644 index 000000000000..718aa773d45c --- /dev/null +++ b/arch/arm64/kernel/paravirt-spinlocks.c @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright(c) 2019 Huawei Technologies Co., Ltd + * Author: Zengruan Ye <yezengruan@huawei.com> + */ + +#include <linux/spinlock.h> +#include <asm/paravirt.h> + +__visible bool __native_vcpu_is_preempted(int cpu) +{ + return false; +} diff --git a/arch/arm64/kernel/paravirt.c b/arch/arm64/kernel/paravirt.c index 1ef702b0be2d..d8f1ba8c22ce 100644 --- a/arch/arm64/kernel/paravirt.c +++ b/arch/arm64/kernel/paravirt.c @@ -26,7 +26,9 @@ struct static_key paravirt_steal_enabled; struct static_key paravirt_steal_rq_enabled; -struct paravirt_patch_template pv_ops; +struct paravirt_patch_template pv_ops = { + .lock.vcpu_is_preempted = __native_vcpu_is_preempted, +}; EXPORT_SYMBOL_GPL(pv_ops); struct pv_time_stolen_time_region { -- 2.19.1 _______________________________________________ kvmarm mailing list kvmarm@lists.cs.columbia.edu https://lists.cs.columbia.edu/mailman/listinfo/kvmarm
next prev parent reply index Thread overview: 10+ messages / expand[flat|nested] mbox.gz Atom feed top 2020-01-16 12:46 [PATCH v3 0/8] KVM: arm64: vCPU preempted check support Zengruan Ye 2020-01-16 12:46 ` [PATCH v3 1/8] KVM: arm64: Document PV-lock interface Zengruan Ye 2020-01-20 16:20 ` Steven Price 2020-01-16 12:46 ` [PATCH v3 2/8] arm64: Probe for the presence of KVM hypervisor services during boot Zengruan Ye 2020-01-16 12:46 ` [PATCH v3 3/8] arm/arm64: KVM: Advertise KVM UID to guests via SMCCC Zengruan Ye 2020-01-16 12:46 ` [PATCH v3 4/8] KVM: arm64: Add SMCCC paravirtualised lock calls Zengruan Ye 2020-01-16 12:46 ` [PATCH v3 5/8] KVM: arm64: Support pvlock preempted via shared structure Zengruan Ye 2020-01-16 12:46 ` [PATCH v3 6/8] KVM: arm64: Provide vCPU attributes for PV lock Zengruan Ye 2020-01-16 12:46 ` Zengruan Ye [this message] 2020-01-16 12:46 ` [PATCH v3 8/8] KVM: arm64: Support the vCPU preemption check Zengruan Ye
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=20200116124626.1155-8-yezengruan@huawei.com \ --to=yezengruan@huawei.com \ --cc=catalin.marinas@arm.com \ --cc=daniel.lezcano@linaro.org \ --cc=kvm@vger.kernel.org \ --cc=kvmarm@lists.cs.columbia.edu \ --cc=linux-arm-kernel@lists.infradead.org \ --cc=linux-doc@vger.kernel.org \ --cc=linux-kernel@vger.kernel.org \ --cc=linux@armlinux.org.uk \ --cc=lkp@intel.com \ --cc=longman@redhat.com \ --cc=maz@kernel.org \ --cc=peterz@infradead.org \ --cc=steven.price@arm.com \ --cc=virtualization@lists.linux-foundation.org \ --cc=will@kernel.org \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: link
KVM ARM Archive on lore.kernel.org Archives are clonable: git clone --mirror https://lore.kernel.org/kvmarm/0 kvmarm/git/0.git # If you have public-inbox 1.1+ installed, you may # initialize and index your mirror using the following commands: public-inbox-init -V2 kvmarm kvmarm/ https://lore.kernel.org/kvmarm \ kvmarm@lists.cs.columbia.edu public-inbox-index kvmarm Example config snippet for mirrors Newsgroup available over NNTP: nntp://nntp.lore.kernel.org/edu.columbia.cs.lists.kvmarm AGPL code for this site: git clone https://public-inbox.org/public-inbox.git