All of lore.kernel.org
 help / color / mirror / Atom feed
From: Catalin Marinas <catalin.marinas@arm.com>
To: Zhenyu Ye <yezhenyu2@huawei.com>
Cc: will@kernel.org, suzuki.poulose@arm.com, maz@kernel.org,
	steven.price@arm.com, guohanjun@huawei.com, olof@lixom.net,
	linux-arm-kernel@lists.infradead.org,
	linux-kernel@vger.kernel.org, linux-arch@vger.kernel.org,
	linux-mm@kvack.org, arm@kernel.org, xiexiangyou@huawei.com,
	prime.zeng@hisilicon.com, zhangshaokun@hisilicon.com,
	kuhn.chenqun@huawei.com
Subject: Re: [PATCH v2 2/2] arm64: tlb: Use the TLBI RANGE feature in arm64
Date: Tue, 14 Jul 2020 11:36:30 +0100	[thread overview]
Message-ID: <20200714103629.GA18793@gaia> (raw)
In-Reply-To: <20200710094420.517-3-yezhenyu2@huawei.com>

On Fri, Jul 10, 2020 at 05:44:20PM +0800, Zhenyu Ye wrote:
> +#define __TLBI_RANGE_PAGES(num, scale)	(((num) + 1) << (5 * (scale) + 1))
> +#define MAX_TLBI_RANGE_PAGES		__TLBI_RANGE_PAGES(31, 3)
> +
> +#define TLBI_RANGE_MASK			GENMASK_ULL(4, 0)
> +#define __TLBI_RANGE_NUM(range, scale)	\
> +	(((range) >> (5 * (scale) + 1)) & TLBI_RANGE_MASK)
[...]
> +	int num = 0;
> +	int scale = 0;
[...]
> +			start += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT;
[...]

Since num is an int, __TLBI_RANGE_PAGES is still an int. Shifting it by
PAGE_SHIFT can overflow as the maximum would be 8GB for 4K pages (or
128GB for 64K pages). I think we probably get away with this because of
some implicit type conversion but I'd rather make __TLBI_RANGE_PAGES an
explicit unsigned long:

#define __TLBI_RANGE_PAGES(num, scale)	((unsigned long)((num) + 1) << (5 * (scale) + 1))

Without this change, the CBMC check fails (see below for the test). In
the kernel, we don't have this problem as we encode the address via
__TLBI_VADDR_RANGE and it doesn't overflow.

The good part is that CBMC reckons the algorithm is correct ;).

---------------8<------tlbinval.c---------------------------
// SPDX-License-Identifier: GPL-2.0-only
/*
 * Check with:
 *   cbmc --unwind 6 tlbinval.c
 */

#define PAGE_SHIFT	(12)
#define PAGE_SIZE	(1 << PAGE_SHIFT)
#define VA_RANGE	(1UL << 48)

#define __round_mask(x, y) ((__typeof__(x))((y)-1))
#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
#define round_down(x, y) ((x) & ~__round_mask(x, y))

#define __TLBI_RANGE_PAGES(num, scale)	((unsigned long)((num) + 1) << (5 * (scale) + 1))
#define MAX_TLBI_RANGE_PAGES		__TLBI_RANGE_PAGES(31, 3)

#define TLBI_RANGE_MASK			0x1fUL
#define __TLBI_RANGE_NUM(pages, scale)	\
	((((pages) >> (5 * (scale) + 1)) & TLBI_RANGE_MASK) - 1)

static unsigned long inval_start;
static unsigned long inval_end;

static void tlbi(unsigned long start, unsigned long size)
{
	unsigned long end = start + size;

	if (inval_end == 0) {
		inval_start = start;
		inval_end = end;
		return;
	}

	/* contiguous ranges in ascending order only */
	__CPROVER_assert(start == inval_end, "Contiguous TLBI ranges");

	inval_end = end;
}

static void __flush_tlb_range(unsigned long start, unsigned long end,
			      unsigned long stride)
{
	int num = 0;
	int scale = 0;
	unsigned long pages;

	start = round_down(start, stride);
	end = round_up(end, stride);
	pages = (end - start) >> PAGE_SHIFT;

	if (pages >= MAX_TLBI_RANGE_PAGES) {
		tlbi(0, VA_RANGE);
		return;
	}

	while (pages > 0) {
		__CPROVER_assert(scale <= 3, "Scale in range");
		if (pages % 2 == 1) {
			tlbi(start, stride);
			start += stride;
			pages -= stride >> PAGE_SHIFT;
			continue;
		}

		num = __TLBI_RANGE_NUM(pages, scale);
		__CPROVER_assert(num <= 30, "Num in range");
		if (num >= 0) {
			tlbi(start, __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT);
			start += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT;
			pages -= __TLBI_RANGE_PAGES(num, scale);
		}
		scale++;
	}
}

static unsigned long nondet_ulong(void);

int main(void)
{
	unsigned long stride = nondet_ulong();
	unsigned long start = round_down(nondet_ulong(), stride);
	unsigned long end = round_up(nondet_ulong(), stride);

	__CPROVER_assume(stride == PAGE_SIZE ||
			 stride == PAGE_SIZE << (PAGE_SHIFT - 3) ||
			 stride == PAGE_SIZE << (2 * (PAGE_SHIFT - 3)));
	__CPROVER_assume(start < end);
	__CPROVER_assume(end <= VA_RANGE);

	__flush_tlb_range(start, end, stride);

	__CPROVER_assert((inval_start == 0 && inval_end == VA_RANGE) ||
			 (inval_start == start && inval_end == end),
			 "Correct invalidation");

	return 0;
}

WARNING: multiple messages have this Message-ID (diff)
From: Catalin Marinas <catalin.marinas@arm.com>
To: Zhenyu Ye <yezhenyu2@huawei.com>
Cc: linux-arch@vger.kernel.org, suzuki.poulose@arm.com,
	maz@kernel.org, linux-kernel@vger.kernel.org,
	xiexiangyou@huawei.com, steven.price@arm.com,
	zhangshaokun@hisilicon.com, linux-mm@kvack.org, arm@kernel.org,
	prime.zeng@hisilicon.com, guohanjun@huawei.com, olof@lixom.net,
	kuhn.chenqun@huawei.com, will@kernel.org,
	linux-arm-kernel@lists.infradead.org
Subject: Re: [PATCH v2 2/2] arm64: tlb: Use the TLBI RANGE feature in arm64
Date: Tue, 14 Jul 2020 11:36:30 +0100	[thread overview]
Message-ID: <20200714103629.GA18793@gaia> (raw)
In-Reply-To: <20200710094420.517-3-yezhenyu2@huawei.com>

On Fri, Jul 10, 2020 at 05:44:20PM +0800, Zhenyu Ye wrote:
> +#define __TLBI_RANGE_PAGES(num, scale)	(((num) + 1) << (5 * (scale) + 1))
> +#define MAX_TLBI_RANGE_PAGES		__TLBI_RANGE_PAGES(31, 3)
> +
> +#define TLBI_RANGE_MASK			GENMASK_ULL(4, 0)
> +#define __TLBI_RANGE_NUM(range, scale)	\
> +	(((range) >> (5 * (scale) + 1)) & TLBI_RANGE_MASK)
[...]
> +	int num = 0;
> +	int scale = 0;
[...]
> +			start += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT;
[...]

Since num is an int, __TLBI_RANGE_PAGES is still an int. Shifting it by
PAGE_SHIFT can overflow as the maximum would be 8GB for 4K pages (or
128GB for 64K pages). I think we probably get away with this because of
some implicit type conversion but I'd rather make __TLBI_RANGE_PAGES an
explicit unsigned long:

#define __TLBI_RANGE_PAGES(num, scale)	((unsigned long)((num) + 1) << (5 * (scale) + 1))

Without this change, the CBMC check fails (see below for the test). In
the kernel, we don't have this problem as we encode the address via
__TLBI_VADDR_RANGE and it doesn't overflow.

The good part is that CBMC reckons the algorithm is correct ;).

---------------8<------tlbinval.c---------------------------
// SPDX-License-Identifier: GPL-2.0-only
/*
 * Check with:
 *   cbmc --unwind 6 tlbinval.c
 */

#define PAGE_SHIFT	(12)
#define PAGE_SIZE	(1 << PAGE_SHIFT)
#define VA_RANGE	(1UL << 48)

#define __round_mask(x, y) ((__typeof__(x))((y)-1))
#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
#define round_down(x, y) ((x) & ~__round_mask(x, y))

#define __TLBI_RANGE_PAGES(num, scale)	((unsigned long)((num) + 1) << (5 * (scale) + 1))
#define MAX_TLBI_RANGE_PAGES		__TLBI_RANGE_PAGES(31, 3)

#define TLBI_RANGE_MASK			0x1fUL
#define __TLBI_RANGE_NUM(pages, scale)	\
	((((pages) >> (5 * (scale) + 1)) & TLBI_RANGE_MASK) - 1)

static unsigned long inval_start;
static unsigned long inval_end;

static void tlbi(unsigned long start, unsigned long size)
{
	unsigned long end = start + size;

	if (inval_end == 0) {
		inval_start = start;
		inval_end = end;
		return;
	}

	/* contiguous ranges in ascending order only */
	__CPROVER_assert(start == inval_end, "Contiguous TLBI ranges");

	inval_end = end;
}

static void __flush_tlb_range(unsigned long start, unsigned long end,
			      unsigned long stride)
{
	int num = 0;
	int scale = 0;
	unsigned long pages;

	start = round_down(start, stride);
	end = round_up(end, stride);
	pages = (end - start) >> PAGE_SHIFT;

	if (pages >= MAX_TLBI_RANGE_PAGES) {
		tlbi(0, VA_RANGE);
		return;
	}

	while (pages > 0) {
		__CPROVER_assert(scale <= 3, "Scale in range");
		if (pages % 2 == 1) {
			tlbi(start, stride);
			start += stride;
			pages -= stride >> PAGE_SHIFT;
			continue;
		}

		num = __TLBI_RANGE_NUM(pages, scale);
		__CPROVER_assert(num <= 30, "Num in range");
		if (num >= 0) {
			tlbi(start, __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT);
			start += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT;
			pages -= __TLBI_RANGE_PAGES(num, scale);
		}
		scale++;
	}
}

static unsigned long nondet_ulong(void);

int main(void)
{
	unsigned long stride = nondet_ulong();
	unsigned long start = round_down(nondet_ulong(), stride);
	unsigned long end = round_up(nondet_ulong(), stride);

	__CPROVER_assume(stride == PAGE_SIZE ||
			 stride == PAGE_SIZE << (PAGE_SHIFT - 3) ||
			 stride == PAGE_SIZE << (2 * (PAGE_SHIFT - 3)));
	__CPROVER_assume(start < end);
	__CPROVER_assume(end <= VA_RANGE);

	__flush_tlb_range(start, end, stride);

	__CPROVER_assert((inval_start == 0 && inval_end == VA_RANGE) ||
			 (inval_start == start && inval_end == end),
			 "Correct invalidation");

	return 0;
}

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  parent reply	other threads:[~2020-07-14 10:36 UTC|newest]

Thread overview: 50+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-07-10  9:44 [PATCH v2 0/2] arm64: tlb: add support for TLBI RANGE instructions Zhenyu Ye
2020-07-10  9:44 ` Zhenyu Ye
2020-07-10  9:44 ` Zhenyu Ye
2020-07-10  9:44 ` Zhenyu Ye
2020-07-10  9:44 ` [PATCH v2 1/2] arm64: tlb: Detect the ARMv8.4 TLBI RANGE feature Zhenyu Ye
2020-07-10  9:44   ` Zhenyu Ye
2020-07-10  9:44   ` Zhenyu Ye
2020-07-10  9:44 ` [PATCH v2 2/2] arm64: tlb: Use the TLBI RANGE feature in arm64 Zhenyu Ye
2020-07-10  9:44   ` Zhenyu Ye
2020-07-10  9:44   ` Zhenyu Ye
2020-07-10 18:31   ` Catalin Marinas
2020-07-10 18:31     ` Catalin Marinas
2020-07-11  6:50     ` Zhenyu Ye
2020-07-11  6:50       ` Zhenyu Ye
2020-07-11  6:50       ` Zhenyu Ye
2020-07-12 12:03       ` Catalin Marinas
2020-07-12 12:03         ` Catalin Marinas
     [not found]   ` <20200710094420.517-3-yezhenyu2-hv44wF8Li93QT0dZR+AlfA@public.gmane.org>
2020-07-13 14:27     ` Jon Hunter
2020-07-13 14:27       ` Jon Hunter
2020-07-13 14:27       ` Jon Hunter
2020-07-13 14:27       ` Jon Hunter
     [not found]       ` <4040f429-21c8-0825-2ad4-97786c3fe7c1-DDmLM1+adcrQT0dZR+AlfA@public.gmane.org>
2020-07-13 14:39         ` Zhenyu Ye
2020-07-13 14:39           ` Zhenyu Ye
2020-07-13 14:39           ` Zhenyu Ye
2020-07-13 14:39           ` Zhenyu Ye
     [not found]           ` <cee60718-ced2-069f-8dad-48941c6fc09b-hv44wF8Li93QT0dZR+AlfA@public.gmane.org>
2020-07-13 14:44             ` Jon Hunter
2020-07-13 14:44               ` Jon Hunter
2020-07-13 14:44               ` Jon Hunter
2020-07-13 14:44               ` Jon Hunter
     [not found]               ` <7237888d-2168-cd8b-c83d-c8e54871793d-DDmLM1+adcrQT0dZR+AlfA@public.gmane.org>
2020-07-13 17:21                 ` Catalin Marinas
2020-07-13 17:21                   ` Catalin Marinas
2020-07-13 17:21                   ` Catalin Marinas
2020-07-14 10:36   ` Catalin Marinas [this message]
2020-07-14 10:36     ` Catalin Marinas
2020-07-14 13:51     ` Zhenyu Ye
2020-07-14 13:51       ` Zhenyu Ye
2020-07-14 13:51       ` Zhenyu Ye
2020-07-10 19:11 ` [PATCH v2 0/2] arm64: tlb: add support for TLBI RANGE instructions Catalin Marinas
2020-07-13 12:21   ` Catalin Marinas
2020-07-13 12:21     ` Catalin Marinas
2020-07-13 12:41     ` Zhenyu Ye
2020-07-13 12:41       ` Zhenyu Ye
2020-07-13 12:41       ` Zhenyu Ye
2020-07-13 16:59       ` Catalin Marinas
2020-07-13 16:59         ` Catalin Marinas
2020-07-14 15:17         ` Zhenyu Ye
2020-07-14 15:17           ` Zhenyu Ye
2020-07-14 15:17           ` Zhenyu Ye
2020-07-14 15:58           ` Catalin Marinas
2020-07-14 15:58             ` Catalin Marinas

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200714103629.GA18793@gaia \
    --to=catalin.marinas@arm.com \
    --cc=arm@kernel.org \
    --cc=guohanjun@huawei.com \
    --cc=kuhn.chenqun@huawei.com \
    --cc=linux-arch@vger.kernel.org \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=maz@kernel.org \
    --cc=olof@lixom.net \
    --cc=prime.zeng@hisilicon.com \
    --cc=steven.price@arm.com \
    --cc=suzuki.poulose@arm.com \
    --cc=will@kernel.org \
    --cc=xiexiangyou@huawei.com \
    --cc=yezhenyu2@huawei.com \
    --cc=zhangshaokun@hisilicon.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.