From mboxrd@z Thu Jan 1 00:00:00 1970 From: Catalin Marinas Subject: [PATCH v2 17/19] arm64: mte: Allow user control of the generated random tags via prctl() Date: Wed, 26 Feb 2020 18:05:24 +0000 Message-ID: <20200226180526.3272848-18-catalin.marinas@arm.com> References: <20200226180526.3272848-1-catalin.marinas@arm.com> Mime-Version: 1.0 Content-Transfer-Encoding: 8bit Return-path: Received: from foss.arm.com ([217.140.110.172]:40262 "EHLO foss.arm.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1727164AbgBZSGF (ORCPT ); Wed, 26 Feb 2020 13:06:05 -0500 In-Reply-To: <20200226180526.3272848-1-catalin.marinas@arm.com> Sender: linux-arch-owner@vger.kernel.org List-ID: To: linux-arm-kernel@lists.infradead.org Cc: Will Deacon , Vincenzo Frascino , Szabolcs Nagy , Richard Earnshaw , Kevin Brodsky , Andrey Konovalov , Peter Collingbourne , linux-mm@kvack.org, linux-arch@vger.kernel.org The IRG, ADDG and SUBG instructions insert a random tag in the resulting address. Certain tags can be excluded via the GCR_EL1.Exclude bitmap when, for example, the user wants a certain colour for freed buffers. Since the GCR_EL1 register is not accessible at EL0, extend the prctl(PR_SET_TAGGED_ADDR_CTRL) interface to include a 16-bit field in the first argument for controlling which tags can be generated by the above instruction (an include rather than exclude mask). Note that by default all non-zero tags are excluded. This setting is per-thread. Signed-off-by: Catalin Marinas --- Notes: v2: - Switch from an exclude mask to an include one for the prctl() interface. - Reset the allowed tags mask during flush_thread(). arch/arm64/include/asm/processor.h | 1 + arch/arm64/include/asm/sysreg.h | 7 ++++++ arch/arm64/kernel/mte.c | 35 +++++++++++++++++++++++++++--- arch/arm64/kernel/process.c | 2 +- include/uapi/linux/prctl.h | 3 +++ 5 files changed, 44 insertions(+), 4 deletions(-) diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 91aa270afc7d..bd215d74275d 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -150,6 +150,7 @@ struct thread_struct { #endif #ifdef CONFIG_ARM64_MTE u64 sctlr_tcf0; + u64 gcr_incl; #endif }; diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 13f0841b6ed3..dd8d747997be 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -931,6 +931,13 @@ write_sysreg(__scs_new, sysreg); \ } while (0) +#define sysreg_clear_set_s(sysreg, clear, set) do { \ + u64 __scs_val = read_sysreg_s(sysreg); \ + u64 __scs_new = (__scs_val & ~(u64)(clear)) | (set); \ + if (__scs_new != __scs_val) \ + write_sysreg_s(__scs_new, sysreg); \ +} while (0) + #endif #endif /* __ASM_SYSREG_H */ diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c index 4b926d779940..0cd1482b18a0 100644 --- a/arch/arm64/kernel/mte.c +++ b/arch/arm64/kernel/mte.c @@ -31,6 +31,25 @@ static void set_sctlr_el1_tcf0(u64 tcf0) preempt_enable(); } +static void update_gcr_el1_excl(u64 incl) +{ + u64 excl = ~incl & SYS_GCR_EL1_EXCL_MASK; + + /* + * Note that 'incl' is an include mask (controlled by the user via + * prctl()) while GCR_EL1 accepts an exclude mask. + * No need for ISB since this only affects EL0 currently, implicit + * with ERET. + */ + sysreg_clear_set_s(SYS_GCR_EL1, SYS_GCR_EL1_EXCL_MASK, excl); +} + +static void set_gcr_el1_excl(u64 incl) +{ + current->thread.gcr_incl = incl; + update_gcr_el1_excl(incl); +} + void flush_mte_state(void) { if (!system_supports_mte()) @@ -40,6 +59,8 @@ void flush_mte_state(void) clear_thread_flag(TIF_MTE_ASYNC_FAULT); /* disable tag checking */ set_sctlr_el1_tcf0(0); + /* reset tag generation mask */ + set_gcr_el1_excl(0); } void mte_thread_switch(struct task_struct *next) @@ -62,6 +83,7 @@ void mte_thread_switch(struct task_struct *next) /* avoid expensive SCTLR_EL1 accesses if no change */ if (current->thread.sctlr_tcf0 != next->thread.sctlr_tcf0) update_sctlr_el1_tcf0(next->thread.sctlr_tcf0); + update_gcr_el1_excl(next->thread.gcr_incl); } long set_mte_ctrl(unsigned long arg) @@ -86,23 +108,30 @@ long set_mte_ctrl(unsigned long arg) } set_sctlr_el1_tcf0(tcf0); + set_gcr_el1_excl((arg & PR_MTE_TAG_MASK) >> PR_MTE_TAG_SHIFT); return 0; } long get_mte_ctrl(void) { + unsigned long ret; + if (!system_supports_mte()) return 0; + ret = current->thread.gcr_incl << PR_MTE_TAG_SHIFT; + switch (current->thread.sctlr_tcf0) { case SCTLR_EL1_TCF0_NONE: return PR_MTE_TCF_NONE; case SCTLR_EL1_TCF0_SYNC: - return PR_MTE_TCF_SYNC; + ret |= PR_MTE_TCF_SYNC; + break; case SCTLR_EL1_TCF0_ASYNC: - return PR_MTE_TCF_ASYNC; + ret |= PR_MTE_TCF_ASYNC; + break; } - return 0; + return ret; } diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index b3c8cd64b88a..4ac0e90617fc 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -584,7 +584,7 @@ long set_tagged_addr_ctrl(unsigned long arg) return -EINVAL; if (system_supports_mte()) - valid_mask |= PR_MTE_TCF_MASK; + valid_mask |= PR_MTE_TCF_MASK | PR_MTE_TAG_MASK; if (arg & ~valid_mask) return -EINVAL; diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h index 2390ab324afa..7f0827705c9a 100644 --- a/include/uapi/linux/prctl.h +++ b/include/uapi/linux/prctl.h @@ -239,6 +239,9 @@ struct prctl_mm_map { # define PR_MTE_TCF_SYNC (1UL << PR_MTE_TCF_SHIFT) # define PR_MTE_TCF_ASYNC (2UL << PR_MTE_TCF_SHIFT) # define PR_MTE_TCF_MASK (3UL << PR_MTE_TCF_SHIFT) +/* MTE tag inclusion mask */ +# define PR_MTE_TAG_SHIFT 3 +# define PR_MTE_TAG_MASK (0xffffUL << PR_MTE_TAG_SHIFT) /* Control reclaim behavior when allocating memory */ #define PR_SET_IO_FLUSHER 57 From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-9.8 required=3.0 tests=DKIMWL_WL_HIGH,DKIM_SIGNED, DKIM_VALID,HEADER_FROM_DIFFERENT_DOMAINS,INCLUDES_PATCH,MAILING_LIST_MULTI, SIGNED_OFF_BY,SPF_HELO_NONE,SPF_PASS,USER_AGENT_GIT autolearn=unavailable autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id 6269EC4BA1E for ; Wed, 26 Feb 2020 18:10:55 +0000 (UTC) Received: from bombadil.infradead.org (bombadil.infradead.org [198.137.202.133]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPS id 2A71524656 for ; Wed, 26 Feb 2020 18:10:55 +0000 (UTC) Authentication-Results: mail.kernel.org; dkim=pass (2048-bit key) header.d=lists.infradead.org header.i=@lists.infradead.org header.b="TK7B0P8n" DMARC-Filter: OpenDMARC Filter v1.3.2 mail.kernel.org 2A71524656 Authentication-Results: mail.kernel.org; dmarc=none (p=none dis=none) header.from=arm.com Authentication-Results: mail.kernel.org; spf=none smtp.mailfrom=linux-arm-kernel-bounces+infradead-linux-arm-kernel=archiver.kernel.org@lists.infradead.org DKIM-Signature: v=1; a=rsa-sha256; q=dns/txt; c=relaxed/relaxed; d=lists.infradead.org; s=bombadil.20170209; h=Sender: Content-Transfer-Encoding:Content-Type:Cc:List-Subscribe:List-Help:List-Post: List-Archive:List-Unsubscribe:List-Id:MIME-Version:References:In-Reply-To: Message-Id:Date:Subject:To:From:Reply-To:Content-ID:Content-Description: Resent-Date:Resent-From:Resent-Sender:Resent-To:Resent-Cc:Resent-Message-ID: List-Owner; bh=apJc3V+xfPWKGfSTSBHEkOtuu55v61clcJi54PtD6pM=; b=TK7B0P8n2nZWpw 1s25DoUgOMio8gpK92vqd7dTzxkB1NJRcS749G54fmNCRPQUaFmOXD8L67XaBwU5EZtoSuOcjdCgl Wl6bx040qtOFNrix8vqCSgJHdCundeQZEUZL/WRNo3D/mivqxgTYr4q9LZplNa+CNECxO/2PRErrw 2ydhfrQfwc+kBzlawEs5IkLROK90sby3lzfZrgsOxSq+DXeGD1jZX60DzB5MfW7qWq+r7efhWp983 IFVSvYPnA33k30jVguk61QsW3COqi0NJlgJFuNYopfo+dOOuXRMyLD0trZ1YWYXJd6YHjR6hQ5qRB 2jxYIve7GhKeXKhzQjtg==; Received: from localhost ([127.0.0.1] helo=bombadil.infradead.org) by bombadil.infradead.org with esmtp (Exim 4.92.3 #3 (Red Hat Linux)) id 1j718z-0004LO-GU; Wed, 26 Feb 2020 18:10:49 +0000 Received: from foss.arm.com ([217.140.110.172]) by bombadil.infradead.org with esmtp (Exim 4.92.3 #3 (Red Hat Linux)) id 1j714O-0007Yl-SH for linux-arm-kernel@lists.infradead.org; Wed, 26 Feb 2020 18:06:06 +0000 Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id B9A5E4B2; Wed, 26 Feb 2020 10:06:04 -0800 (PST) Received: from arrakis.cambridge.arm.com (usa-sjc-imap-foss1.foss.arm.com [10.121.207.14]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id 510B13F881; Wed, 26 Feb 2020 10:06:03 -0800 (PST) From: Catalin Marinas To: linux-arm-kernel@lists.infradead.org Subject: [PATCH v2 17/19] arm64: mte: Allow user control of the generated random tags via prctl() Date: Wed, 26 Feb 2020 18:05:24 +0000 Message-Id: <20200226180526.3272848-18-catalin.marinas@arm.com> X-Mailer: git-send-email 2.25.0 In-Reply-To: <20200226180526.3272848-1-catalin.marinas@arm.com> References: <20200226180526.3272848-1-catalin.marinas@arm.com> MIME-Version: 1.0 X-CRM114-Version: 20100106-BlameMichelson ( TRE 0.8.0 (BSD) ) MR-646709E3 X-CRM114-CacheID: sfid-20200226_100605_030458_43ECA187 X-CRM114-Status: GOOD ( 16.05 ) X-BeenThere: linux-arm-kernel@lists.infradead.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Cc: linux-arch@vger.kernel.org, Richard Earnshaw , Szabolcs Nagy , Andrey Konovalov , Kevin Brodsky , Peter Collingbourne , linux-mm@kvack.org, Vincenzo Frascino , Will Deacon Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Sender: "linux-arm-kernel" Errors-To: linux-arm-kernel-bounces+infradead-linux-arm-kernel=archiver.kernel.org@lists.infradead.org The IRG, ADDG and SUBG instructions insert a random tag in the resulting address. Certain tags can be excluded via the GCR_EL1.Exclude bitmap when, for example, the user wants a certain colour for freed buffers. Since the GCR_EL1 register is not accessible at EL0, extend the prctl(PR_SET_TAGGED_ADDR_CTRL) interface to include a 16-bit field in the first argument for controlling which tags can be generated by the above instruction (an include rather than exclude mask). Note that by default all non-zero tags are excluded. This setting is per-thread. Signed-off-by: Catalin Marinas --- Notes: v2: - Switch from an exclude mask to an include one for the prctl() interface. - Reset the allowed tags mask during flush_thread(). arch/arm64/include/asm/processor.h | 1 + arch/arm64/include/asm/sysreg.h | 7 ++++++ arch/arm64/kernel/mte.c | 35 +++++++++++++++++++++++++++--- arch/arm64/kernel/process.c | 2 +- include/uapi/linux/prctl.h | 3 +++ 5 files changed, 44 insertions(+), 4 deletions(-) diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 91aa270afc7d..bd215d74275d 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -150,6 +150,7 @@ struct thread_struct { #endif #ifdef CONFIG_ARM64_MTE u64 sctlr_tcf0; + u64 gcr_incl; #endif }; diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 13f0841b6ed3..dd8d747997be 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -931,6 +931,13 @@ write_sysreg(__scs_new, sysreg); \ } while (0) +#define sysreg_clear_set_s(sysreg, clear, set) do { \ + u64 __scs_val = read_sysreg_s(sysreg); \ + u64 __scs_new = (__scs_val & ~(u64)(clear)) | (set); \ + if (__scs_new != __scs_val) \ + write_sysreg_s(__scs_new, sysreg); \ +} while (0) + #endif #endif /* __ASM_SYSREG_H */ diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c index 4b926d779940..0cd1482b18a0 100644 --- a/arch/arm64/kernel/mte.c +++ b/arch/arm64/kernel/mte.c @@ -31,6 +31,25 @@ static void set_sctlr_el1_tcf0(u64 tcf0) preempt_enable(); } +static void update_gcr_el1_excl(u64 incl) +{ + u64 excl = ~incl & SYS_GCR_EL1_EXCL_MASK; + + /* + * Note that 'incl' is an include mask (controlled by the user via + * prctl()) while GCR_EL1 accepts an exclude mask. + * No need for ISB since this only affects EL0 currently, implicit + * with ERET. + */ + sysreg_clear_set_s(SYS_GCR_EL1, SYS_GCR_EL1_EXCL_MASK, excl); +} + +static void set_gcr_el1_excl(u64 incl) +{ + current->thread.gcr_incl = incl; + update_gcr_el1_excl(incl); +} + void flush_mte_state(void) { if (!system_supports_mte()) @@ -40,6 +59,8 @@ void flush_mte_state(void) clear_thread_flag(TIF_MTE_ASYNC_FAULT); /* disable tag checking */ set_sctlr_el1_tcf0(0); + /* reset tag generation mask */ + set_gcr_el1_excl(0); } void mte_thread_switch(struct task_struct *next) @@ -62,6 +83,7 @@ void mte_thread_switch(struct task_struct *next) /* avoid expensive SCTLR_EL1 accesses if no change */ if (current->thread.sctlr_tcf0 != next->thread.sctlr_tcf0) update_sctlr_el1_tcf0(next->thread.sctlr_tcf0); + update_gcr_el1_excl(next->thread.gcr_incl); } long set_mte_ctrl(unsigned long arg) @@ -86,23 +108,30 @@ long set_mte_ctrl(unsigned long arg) } set_sctlr_el1_tcf0(tcf0); + set_gcr_el1_excl((arg & PR_MTE_TAG_MASK) >> PR_MTE_TAG_SHIFT); return 0; } long get_mte_ctrl(void) { + unsigned long ret; + if (!system_supports_mte()) return 0; + ret = current->thread.gcr_incl << PR_MTE_TAG_SHIFT; + switch (current->thread.sctlr_tcf0) { case SCTLR_EL1_TCF0_NONE: return PR_MTE_TCF_NONE; case SCTLR_EL1_TCF0_SYNC: - return PR_MTE_TCF_SYNC; + ret |= PR_MTE_TCF_SYNC; + break; case SCTLR_EL1_TCF0_ASYNC: - return PR_MTE_TCF_ASYNC; + ret |= PR_MTE_TCF_ASYNC; + break; } - return 0; + return ret; } diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index b3c8cd64b88a..4ac0e90617fc 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -584,7 +584,7 @@ long set_tagged_addr_ctrl(unsigned long arg) return -EINVAL; if (system_supports_mte()) - valid_mask |= PR_MTE_TCF_MASK; + valid_mask |= PR_MTE_TCF_MASK | PR_MTE_TAG_MASK; if (arg & ~valid_mask) return -EINVAL; diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h index 2390ab324afa..7f0827705c9a 100644 --- a/include/uapi/linux/prctl.h +++ b/include/uapi/linux/prctl.h @@ -239,6 +239,9 @@ struct prctl_mm_map { # define PR_MTE_TCF_SYNC (1UL << PR_MTE_TCF_SHIFT) # define PR_MTE_TCF_ASYNC (2UL << PR_MTE_TCF_SHIFT) # define PR_MTE_TCF_MASK (3UL << PR_MTE_TCF_SHIFT) +/* MTE tag inclusion mask */ +# define PR_MTE_TAG_SHIFT 3 +# define PR_MTE_TAG_MASK (0xffffUL << PR_MTE_TAG_SHIFT) /* Control reclaim behavior when allocating memory */ #define PR_SET_IO_FLUSHER 57 _______________________________________________ linux-arm-kernel mailing list linux-arm-kernel@lists.infradead.org http://lists.infradead.org/mailman/listinfo/linux-arm-kernel