All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v8 0/8] x86 TLB flush range optimizing
@ 2012-06-12  9:06 Alex Shi
  2012-06-12  9:06 ` [PATCH v8 1/8] x86/tlb_info: get last level TLB entry number of CPU Alex Shi
                   ` (8 more replies)
  0 siblings, 9 replies; 17+ messages in thread
From: Alex Shi @ 2012-06-12  9:06 UTC (permalink / raw)
  To: tglx, mingo, hpa, arnd, rostedt, fweisbec
  Cc: jeremy, seto.hidetoshi, borislav.petkov, alex.shi, tony.luck,
	luto, riel, avi, len.brown, tj, akpm, cl, ak, jbeulich,
	eric.dumazet, akinobu.mita, cpw, penberg, steiner, viro,
	kamezawa.hiroyu, aarcange, rientjes, linux-kernel

This version patches rebases on 3.5-rc2 kernel. It does some code clean up,
change patches sequency to make it looks more logical.
It also includes xen code change according to Jan Beulich's comments,
includes comments update of tlb flushing IPI according to PeterZ's suggestions.

And Adding the flush_tlb_kernel_range support by invlpg.

Thanks for all comments! and appreciate for more. :) 

Alex Shi

[PATCH v8 1/8] x86/tlb_info: get last level TLB entry number of CPU
[PATCH v8 2/8] x86/flush_tlb: try flush_tlb_single one by one in
[PATCH v8 3/8] x86/tlb: fall back to flush all when meet a THP large
[PATCH v8 4/8] x86/tlb: add tlb_flushall_shift for specific CPU
[PATCH v8 5/8] x86/tlb: add tlb_flushall_shift knob into debugfs
[PATCH v8 6/8] x86/tlb: enable tlb flush range support for generic
[PATCH v8 7/8] x86/tlb: replace INVALIDATE_TLB_VECTOR by
[PATCH v8 8/8] x86/tlb: do flush_tlb_kernel_range by 'invlpg'

^ permalink raw reply	[flat|nested] 17+ messages in thread

* [PATCH v8 1/8] x86/tlb_info: get last level TLB entry number of CPU
  2012-06-12  9:06 [PATCH v8 0/8] x86 TLB flush range optimizing Alex Shi
@ 2012-06-12  9:06 ` Alex Shi
  2012-06-12  9:06 ` [PATCH v8 2/8] x86/flush_tlb: try flush_tlb_single one by one in flush_tlb_range Alex Shi
                   ` (7 subsequent siblings)
  8 siblings, 0 replies; 17+ messages in thread
From: Alex Shi @ 2012-06-12  9:06 UTC (permalink / raw)
  To: tglx, mingo, hpa, arnd, rostedt, fweisbec
  Cc: jeremy, seto.hidetoshi, borislav.petkov, alex.shi, tony.luck,
	luto, riel, avi, len.brown, tj, akpm, cl, ak, jbeulich,
	eric.dumazet, akinobu.mita, cpw, penberg, steiner, viro,
	kamezawa.hiroyu, aarcange, rientjes, linux-kernel

For 4KB pages, x86 CPU has 2 or 1 level TLB, first level is data TLB and
instruction TLB, second level is shared TLB for both data and instructions.

For hupe page TLB, usually there is just one level and seperated by 2MB/4MB
and 1GB.

Although each levels TLB size is important for performance tuning, but for
genernal and rude optimizing, last level TLB entry number is suitable. And
in fact, last level TLB always has the biggest entry number.

This patch will get the biggest TLB entry number and use it in furture TLB
optimizing.

Accroding Borislav's suggestion, except tlb_ll[i/d]_* array, other
function and data will be released after system boot up.

For all kinds of x86 vendor friendly, vendor specific code was moved to its
specific files.

Signed-off-by: Alex Shi <alex.shi@intel.com>
---
 arch/x86/include/asm/processor.h |   11 +++
 arch/x86/kernel/cpu/common.c     |   21 ++++++
 arch/x86/kernel/cpu/cpu.h        |    9 +++
 arch/x86/kernel/cpu/intel.c      |  141 ++++++++++++++++++++++++++++++++++++++
 4 files changed, 182 insertions(+), 0 deletions(-)

diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 39bc577..39b2bd4 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -61,6 +61,17 @@ static inline void *current_text_addr(void)
 # define ARCH_MIN_MMSTRUCT_ALIGN	0
 #endif
 
+enum tlb_infos {
+	ENTRIES,
+	NR_INFO
+};
+
+extern u16 __read_mostly tlb_lli_4k[NR_INFO];
+extern u16 __read_mostly tlb_lli_2m[NR_INFO];
+extern u16 __read_mostly tlb_lli_4m[NR_INFO];
+extern u16 __read_mostly tlb_lld_4k[NR_INFO];
+extern u16 __read_mostly tlb_lld_2m[NR_INFO];
+extern u16 __read_mostly tlb_lld_4m[NR_INFO];
 /*
  *  CPU type and hardware bug flags. Kept separately for each CPU.
  *  Members of this structure are referenced in head.S, so think twice
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 6b9333b..41acb77 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -452,6 +452,25 @@ void __cpuinit cpu_detect_cache_sizes(struct cpuinfo_x86 *c)
 	c->x86_cache_size = l2size;
 }
 
+u16 __read_mostly tlb_lli_4k[NR_INFO];
+u16 __read_mostly tlb_lli_2m[NR_INFO];
+u16 __read_mostly tlb_lli_4m[NR_INFO];
+u16 __read_mostly tlb_lld_4k[NR_INFO];
+u16 __read_mostly tlb_lld_2m[NR_INFO];
+u16 __read_mostly tlb_lld_4m[NR_INFO];
+
+void __cpuinit cpu_detect_tlb(struct cpuinfo_x86 *c)
+{
+	if (c->x86_vendor == X86_VENDOR_INTEL)
+		intel_cpu_detect_tlb(c);
+
+	printk(KERN_INFO "Last level iTLB entries: 4KB %d, 2MB %d, 4MB %d\n" \
+		"Last level dTLB entries: 4KB %d, 2MB %d, 4MB %d\n",
+		tlb_lli_4k[ENTRIES], tlb_lli_2m[ENTRIES],
+		tlb_lli_4m[ENTRIES], tlb_lld_4k[ENTRIES],
+		tlb_lld_2m[ENTRIES], tlb_lld_4m[ENTRIES]);
+}
+
 void __cpuinit detect_ht(struct cpuinfo_x86 *c)
 {
 #ifdef CONFIG_X86_HT
@@ -911,6 +930,8 @@ void __init identify_boot_cpu(void)
 #else
 	vgetcpu_set_mode();
 #endif
+	if (boot_cpu_data.cpuid_level >= 2)
+		cpu_detect_tlb(&boot_cpu_data);
 }
 
 void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
index 8bacc78..78db1d9 100644
--- a/arch/x86/kernel/cpu/cpu.h
+++ b/arch/x86/kernel/cpu/cpu.h
@@ -24,6 +24,14 @@ struct cpu_dev {
 	int		c_x86_vendor;
 };
 
+struct _tlb_table {
+	unsigned char descriptor;
+	char tlb_type;
+	unsigned int entries;
+	/* unsigned int ways; */
+	char info[128];
+};
+
 #define cpu_dev_register(cpu_devX) \
 	static const struct cpu_dev *const __cpu_dev_##cpu_devX __used \
 	__attribute__((__section__(".x86_cpu_dev.init"))) = \
@@ -34,4 +42,5 @@ extern const struct cpu_dev *const __x86_cpu_dev_start[],
 
 extern void get_cpu_cap(struct cpuinfo_x86 *c);
 extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c);
+extern void intel_cpu_detect_tlb(struct cpuinfo_x86 *c) __cpuinit;
 #endif /* ARCH_X86_CPU_H */
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 3e6ff6c..28ecd1b 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -491,6 +491,147 @@ static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned i
 }
 #endif
 
+#define TLB_INST_4K	0x01
+#define TLB_INST_4M	0x02
+#define TLB_INST_2M_4M	0x03
+
+#define TLB_INST_ALL	0x05
+#define TLB_INST_1G	0x06
+
+#define TLB_DATA_4K	0x11
+#define TLB_DATA_4M	0x12
+#define TLB_DATA_2M_4M	0x13
+#define TLB_DATA_4K_4M	0x14
+
+#define TLB_DATA_1G	0x16
+
+#define TLB_DATA0_4K	0x21
+#define TLB_DATA0_4M	0x22
+#define TLB_DATA0_2M_4M	0x23
+
+#define STLB_4K		0x41
+
+static const struct _tlb_table intel_tlb_table[] __cpuinitconst = {
+	{ 0x01, TLB_INST_4K,		32,	" TLB_INST 4 KByte pages, 4-way set associative" },
+	{ 0x02, TLB_INST_4M,		2,	" TLB_INST 4 MByte pages, full associative" },
+	{ 0x03, TLB_DATA_4K,		64,	" TLB_DATA 4 KByte pages, 4-way set associative" },
+	{ 0x04, TLB_DATA_4M,		8,	" TLB_DATA 4 MByte pages, 4-way set associative" },
+	{ 0x05, TLB_DATA_4M,		32,	" TLB_DATA 4 MByte pages, 4-way set associative" },
+	{ 0x0b, TLB_INST_4M,		4,	" TLB_INST 4 MByte pages, 4-way set associative" },
+	{ 0x4f, TLB_INST_4K,		32,	" TLB_INST 4 KByte pages */" },
+	{ 0x50, TLB_INST_ALL,		64,	" TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
+	{ 0x51, TLB_INST_ALL,		128,	" TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
+	{ 0x52, TLB_INST_ALL,		256,	" TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
+	{ 0x55, TLB_INST_2M_4M,		7,	" TLB_INST 2-MByte or 4-MByte pages, fully associative" },
+	{ 0x56, TLB_DATA0_4M,		16,	" TLB_DATA0 4 MByte pages, 4-way set associative" },
+	{ 0x57, TLB_DATA0_4K,		16,	" TLB_DATA0 4 KByte pages, 4-way associative" },
+	{ 0x59, TLB_DATA0_4K,		16,	" TLB_DATA0 4 KByte pages, fully associative" },
+	{ 0x5a, TLB_DATA0_2M_4M,	32,	" TLB_DATA0 2-MByte or 4 MByte pages, 4-way set associative" },
+	{ 0x5b, TLB_DATA_4K_4M,		64,	" TLB_DATA 4 KByte and 4 MByte pages" },
+	{ 0x5c, TLB_DATA_4K_4M,		128,	" TLB_DATA 4 KByte and 4 MByte pages" },
+	{ 0x5d, TLB_DATA_4K_4M,		256,	" TLB_DATA 4 KByte and 4 MByte pages" },
+	{ 0xb0, TLB_INST_4K,		128,	" TLB_INST 4 KByte pages, 4-way set associative" },
+	{ 0xb1, TLB_INST_2M_4M,		4,	" TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries" },
+	{ 0xb2, TLB_INST_4K,		64,	" TLB_INST 4KByte pages, 4-way set associative" },
+	{ 0xb3, TLB_DATA_4K,		128,	" TLB_DATA 4 KByte pages, 4-way set associative" },
+	{ 0xb4, TLB_DATA_4K,		256,	" TLB_DATA 4 KByte pages, 4-way associative" },
+	{ 0xba, TLB_DATA_4K,		64,	" TLB_DATA 4 KByte pages, 4-way associative" },
+	{ 0xc0, TLB_DATA_4K_4M,		8,	" TLB_DATA 4 KByte and 4 MByte pages, 4-way associative" },
+	{ 0xca, STLB_4K,		512,	" STLB 4 KByte pages, 4-way associative" },
+	{ 0x00, 0, 0 }
+};
+
+static void __cpuinit intel_tlb_lookup(const unsigned char desc)
+{
+	unsigned char k;
+	if (desc == 0)
+		return;
+
+	/* look up this descriptor in the table */
+	for (k = 0; intel_tlb_table[k].descriptor != desc && \
+			intel_tlb_table[k].descriptor != 0; k++)
+		;
+
+	if (intel_tlb_table[k].tlb_type == 0)
+		return;
+
+	switch (intel_tlb_table[k].tlb_type) {
+	case STLB_4K:
+		if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
+			tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
+		if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
+			tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
+		break;
+	case TLB_INST_ALL:
+		if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
+			tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
+		if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
+			tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
+		if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
+			tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
+		break;
+	case TLB_INST_4K:
+		if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
+			tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
+		break;
+	case TLB_INST_4M:
+		if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
+			tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
+		break;
+	case TLB_INST_2M_4M:
+		if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
+			tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
+		if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
+			tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
+		break;
+	case TLB_DATA_4K:
+	case TLB_DATA0_4K:
+		if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
+			tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
+		break;
+	case TLB_DATA_4M:
+	case TLB_DATA0_4M:
+		if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
+			tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
+		break;
+	case TLB_DATA_2M_4M:
+	case TLB_DATA0_2M_4M:
+		if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries)
+			tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries;
+		if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
+			tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
+		break;
+	case TLB_DATA_4K_4M:
+		if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
+			tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
+		if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
+			tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
+		break;
+	}
+}
+
+void __cpuinit intel_cpu_detect_tlb(struct cpuinfo_x86 *c)
+{
+	int i, j, n;
+	unsigned int regs[4];
+	unsigned char *desc = (unsigned char *)regs;
+	/* Number of times to iterate */
+	n = cpuid_eax(2) & 0xFF;
+
+	for (i = 0 ; i < n ; i++) {
+		cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
+
+		/* If bit 31 is set, this is an unknown format */
+		for (j = 0 ; j < 3 ; j++)
+			if (regs[j] & (1 << 31))
+				regs[j] = 0;
+
+		/* Byte 0 is level count, not a descriptor */
+		for (j = 1 ; j < 16 ; j++)
+			intel_tlb_lookup(desc[j]);
+	}
+}
+
 static const struct cpu_dev __cpuinitconst intel_cpu_dev = {
 	.c_vendor	= "Intel",
 	.c_ident	= { "GenuineIntel" },
-- 
1.7.5.4


^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [PATCH v8 2/8] x86/flush_tlb: try flush_tlb_single one by one in flush_tlb_range
  2012-06-12  9:06 [PATCH v8 0/8] x86 TLB flush range optimizing Alex Shi
  2012-06-12  9:06 ` [PATCH v8 1/8] x86/tlb_info: get last level TLB entry number of CPU Alex Shi
@ 2012-06-12  9:06 ` Alex Shi
  2012-06-12  9:06 ` [PATCH v8 3/8] x86/tlb: fall back to flush all when meet a THP large page Alex Shi
                   ` (6 subsequent siblings)
  8 siblings, 0 replies; 17+ messages in thread
From: Alex Shi @ 2012-06-12  9:06 UTC (permalink / raw)
  To: tglx, mingo, hpa, arnd, rostedt, fweisbec
  Cc: jeremy, seto.hidetoshi, borislav.petkov, alex.shi, tony.luck,
	luto, riel, avi, len.brown, tj, akpm, cl, ak, jbeulich,
	eric.dumazet, akinobu.mita, cpw, penberg, steiner, viro,
	kamezawa.hiroyu, aarcange, rientjes, linux-kernel

x86 has no flush_tlb_range support in instruction level. Currently the
flush_tlb_range just implemented by flushing all page table. That is not
the best solution for all scenarios. In fact, if we just use 'invlpg' to
flush few lines from TLB, we can get the performance gain from later
remain TLB lines accessing.

But the 'invlpg' instruction costs much of time. Its execution time can
compete with cr3 rewriting, and even a bit more on SNB CPU.

So, on a 512 4KB TLB entries CPU, the balance points is at:
	(512 - X) * 100ns(assumed TLB refill cost) =
		X(TLB flush entries) * 100ns(assumed invlpg cost)

Here, X is 256, that is 1/2 of 512 entries.

But with the mysterious CPU pre-fetcher and page miss handler Unit, the
assumed TLB refill cost is far lower then 100ns in sequential access. And
2 HT siblings in one core makes the memory access more faster if they are
accessing the same memory. So, in the patch, I just do the change when
the target entries is less than 1/16 of whole active tlb entries.
Actually, I have no data support for the percentage '1/16', so any
suggestions are welcomed.

As to hugetlb, guess due to smaller page table, and smaller active TLB
entries, I didn't see benefit via my benchmark, so no optimizing now.

My macro benchmark show in ideal scenarios, the performance improves 70
percent in reading. And in worst scenario, the reading/writing
performance is similar with unpatched 3.4-rc4 kernel.

Here is the reading data on my 2P * 4cores *HT NHM EP machine, with THP
'always':

multi thread testing, '-t' paramter is thread number:
	       	        with patch   unpatched 3.4-rc4
./mprotect -t 1           14ns		24ns
./mprotect -t 2           13ns		22ns
./mprotect -t 4           12ns		19ns
./mprotect -t 8           14ns		16ns
./mprotect -t 16          28ns		26ns
./mprotect -t 32          54ns		51ns
./mprotect -t 128         200ns		199ns

Single process with sequencial flushing and memory accessing:

		       	with patch   unpatched 3.4-rc4
./mprotect		    7ns			11ns
./mprotect -p 4096  -l 8 -n 10240
			    21ns		21ns

I also tried other benchmarks on Intel core2/NHM/SNB EP and NHM EX machine.
No clear performance change on specjbb2005 with openjdk, and oltp reading.

Signed-off-by: Alex Shi <alex.shi@intel.com>
---
 arch/x86/include/asm/paravirt.h       |    5 +-
 arch/x86/include/asm/paravirt_types.h |    3 +-
 arch/x86/include/asm/tlbflush.h       |   23 +++-----
 arch/x86/include/asm/uv/uv.h          |    5 +-
 arch/x86/mm/tlb.c                     |   97 +++++++++++++++++++++++++++------
 arch/x86/platform/uv/tlb_uv.c         |    6 +-
 arch/x86/xen/mmu.c                    |   12 ++--
 include/trace/events/xen.h            |   12 +++--
 8 files changed, 114 insertions(+), 49 deletions(-)

diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 6cbbabf..7e2c2a6 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -397,9 +397,10 @@ static inline void __flush_tlb_single(unsigned long addr)
 
 static inline void flush_tlb_others(const struct cpumask *cpumask,
 				    struct mm_struct *mm,
-				    unsigned long va)
+				    unsigned long start,
+				    unsigned long end)
 {
-	PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, cpumask, mm, va);
+	PVOP_VCALL4(pv_mmu_ops.flush_tlb_others, cpumask, mm, start, end);
 }
 
 static inline int paravirt_pgd_alloc(struct mm_struct *mm)
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 8e8b9a4..600a5fcac9 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -250,7 +250,8 @@ struct pv_mmu_ops {
 	void (*flush_tlb_single)(unsigned long addr);
 	void (*flush_tlb_others)(const struct cpumask *cpus,
 				 struct mm_struct *mm,
-				 unsigned long va);
+				 unsigned long start,
+				 unsigned long end);
 
 	/* Hooks for allocating and freeing a pagetable top-level */
 	int  (*pgd_alloc)(struct mm_struct *mm);
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 36a1a2a..33608d9 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -73,14 +73,10 @@ static inline void __flush_tlb_one(unsigned long addr)
  *  - flush_tlb_page(vma, vmaddr) flushes one page
  *  - flush_tlb_range(vma, start, end) flushes a range of pages
  *  - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
- *  - flush_tlb_others(cpumask, mm, va) flushes TLBs on other cpus
+ *  - flush_tlb_others(cpumask, mm, start, end) flushes TLBs on other cpus
  *
  * ..but the i386 has somewhat limited tlb flushing capabilities,
  * and page-granular flushes are available only on i486 and up.
- *
- * x86-64 can only flush individual pages or full VMs. For a range flush
- * we always do the full VM. Might be worth trying if for a small
- * range a few INVLPGs in a row are a win.
  */
 
 #ifndef CONFIG_SMP
@@ -111,7 +107,8 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
 
 static inline void native_flush_tlb_others(const struct cpumask *cpumask,
 					   struct mm_struct *mm,
-					   unsigned long va)
+					   unsigned long start,
+					   unsigned long end)
 {
 }
 
@@ -129,17 +126,14 @@ extern void flush_tlb_all(void);
 extern void flush_tlb_current_task(void);
 extern void flush_tlb_mm(struct mm_struct *);
 extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
+extern void flush_tlb_range(struct vm_area_struct *vma,
+				   unsigned long start, unsigned long end);
 
 #define flush_tlb()	flush_tlb_current_task()
 
-static inline void flush_tlb_range(struct vm_area_struct *vma,
-				   unsigned long start, unsigned long end)
-{
-	flush_tlb_mm(vma->vm_mm);
-}
-
 void native_flush_tlb_others(const struct cpumask *cpumask,
-			     struct mm_struct *mm, unsigned long va);
+				struct mm_struct *mm,
+				unsigned long start, unsigned long end);
 
 #define TLBSTATE_OK	1
 #define TLBSTATE_LAZY	2
@@ -159,7 +153,8 @@ static inline void reset_lazy_tlbstate(void)
 #endif	/* SMP */
 
 #ifndef CONFIG_PARAVIRT
-#define flush_tlb_others(mask, mm, va)	native_flush_tlb_others(mask, mm, va)
+#define flush_tlb_others(mask, mm, start, end)	\
+	native_flush_tlb_others(mask, mm, start, end)
 #endif
 
 static inline void flush_tlb_kernel_range(unsigned long start,
diff --git a/arch/x86/include/asm/uv/uv.h b/arch/x86/include/asm/uv/uv.h
index 3bb9491..b47c2a8 100644
--- a/arch/x86/include/asm/uv/uv.h
+++ b/arch/x86/include/asm/uv/uv.h
@@ -15,7 +15,8 @@ extern void uv_nmi_init(void);
 extern void uv_system_init(void);
 extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
 						 struct mm_struct *mm,
-						 unsigned long va,
+						 unsigned long start,
+						 unsigned end,
 						 unsigned int cpu);
 
 #else	/* X86_UV */
@@ -26,7 +27,7 @@ static inline void uv_cpu_init(void)	{ }
 static inline void uv_system_init(void)	{ }
 static inline const struct cpumask *
 uv_flush_tlb_others(const struct cpumask *cpumask, struct mm_struct *mm,
-		    unsigned long va, unsigned int cpu)
+		    unsigned long start, unsigned long end, unsigned int cpu)
 { return cpumask; }
 
 #endif	/* X86_UV */
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 5e57e11..3b91c98 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -41,7 +41,8 @@ DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate)
 union smp_flush_state {
 	struct {
 		struct mm_struct *flush_mm;
-		unsigned long flush_va;
+		unsigned long flush_start;
+		unsigned long flush_end;
 		raw_spinlock_t tlbstate_lock;
 		DECLARE_BITMAP(flush_cpumask, NR_CPUS);
 	};
@@ -156,10 +157,19 @@ void smp_invalidate_interrupt(struct pt_regs *regs)
 
 	if (f->flush_mm == this_cpu_read(cpu_tlbstate.active_mm)) {
 		if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
-			if (f->flush_va == TLB_FLUSH_ALL)
+			if (f->flush_end == TLB_FLUSH_ALL
+					|| !cpu_has_invlpg)
 				local_flush_tlb();
-			else
-				__flush_tlb_one(f->flush_va);
+			else if (!f->flush_end)
+				__flush_tlb_single(f->flush_start);
+			else {
+				unsigned long addr;
+				addr = f->flush_start;
+				while (addr < f->flush_end) {
+					__flush_tlb_single(addr);
+					addr += PAGE_SIZE;
+				}
+			}
 		} else
 			leave_mm(cpu);
 	}
@@ -172,7 +182,8 @@ out:
 }
 
 static void flush_tlb_others_ipi(const struct cpumask *cpumask,
-				 struct mm_struct *mm, unsigned long va)
+				 struct mm_struct *mm, unsigned long start,
+				 unsigned long end)
 {
 	unsigned int sender;
 	union smp_flush_state *f;
@@ -185,7 +196,8 @@ static void flush_tlb_others_ipi(const struct cpumask *cpumask,
 		raw_spin_lock(&f->tlbstate_lock);
 
 	f->flush_mm = mm;
-	f->flush_va = va;
+	f->flush_start = start;
+	f->flush_end = end;
 	if (cpumask_andnot(to_cpumask(f->flush_cpumask), cpumask, cpumask_of(smp_processor_id()))) {
 		/*
 		 * We have to send the IPI only to
@@ -199,24 +211,26 @@ static void flush_tlb_others_ipi(const struct cpumask *cpumask,
 	}
 
 	f->flush_mm = NULL;
-	f->flush_va = 0;
+	f->flush_start = 0;
+	f->flush_end = 0;
 	if (nr_cpu_ids > NUM_INVALIDATE_TLB_VECTORS)
 		raw_spin_unlock(&f->tlbstate_lock);
 }
 
 void native_flush_tlb_others(const struct cpumask *cpumask,
-			     struct mm_struct *mm, unsigned long va)
+				 struct mm_struct *mm, unsigned long start,
+				 unsigned long end)
 {
 	if (is_uv_system()) {
 		unsigned int cpu;
 
 		cpu = smp_processor_id();
-		cpumask = uv_flush_tlb_others(cpumask, mm, va, cpu);
+		cpumask = uv_flush_tlb_others(cpumask, mm, start, end, cpu);
 		if (cpumask)
-			flush_tlb_others_ipi(cpumask, mm, va);
+			flush_tlb_others_ipi(cpumask, mm, start, end);
 		return;
 	}
-	flush_tlb_others_ipi(cpumask, mm, va);
+	flush_tlb_others_ipi(cpumask, mm, start, end);
 }
 
 static void __cpuinit calculate_tlb_offset(void)
@@ -282,7 +296,7 @@ void flush_tlb_current_task(void)
 
 	local_flush_tlb();
 	if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
-		flush_tlb_others(mm_cpumask(mm), mm, TLB_FLUSH_ALL);
+		flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
 	preempt_enable();
 }
 
@@ -297,12 +311,63 @@ void flush_tlb_mm(struct mm_struct *mm)
 			leave_mm(smp_processor_id());
 	}
 	if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
-		flush_tlb_others(mm_cpumask(mm), mm, TLB_FLUSH_ALL);
+		flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
+
+	preempt_enable();
+}
+
+#define FLUSHALL_BAR	16
+
+void flush_tlb_range(struct vm_area_struct *vma,
+				   unsigned long start, unsigned long end)
+{
+	struct mm_struct *mm;
+
+	if (!cpu_has_invlpg || vma->vm_flags & VM_HUGETLB) {
+		flush_tlb_mm(vma->vm_mm);
+		return;
+	}
+
+	preempt_disable();
+	mm = vma->vm_mm;
+	if (current->active_mm == mm) {
+		if (current->mm) {
+			unsigned long addr, vmflag = vma->vm_flags;
+			unsigned act_entries, tlb_entries = 0;
+
+			if (vmflag & VM_EXEC)
+				tlb_entries = tlb_lli_4k[ENTRIES];
+			else
+				tlb_entries = tlb_lld_4k[ENTRIES];
+
+			act_entries = tlb_entries > mm->total_vm ?
+					mm->total_vm : tlb_entries;
 
+			if ((end - start)/PAGE_SIZE > act_entries/FLUSHALL_BAR)
+				local_flush_tlb();
+			else {
+				for (addr = start; addr < end;
+						addr += PAGE_SIZE)
+					__flush_tlb_single(addr);
+
+				if (cpumask_any_but(mm_cpumask(mm),
+					smp_processor_id()) < nr_cpu_ids)
+					flush_tlb_others(mm_cpumask(mm), mm,
+								start, end);
+				preempt_enable();
+				return;
+			}
+		} else {
+			leave_mm(smp_processor_id());
+		}
+	}
+	if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
+		flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
 	preempt_enable();
 }
 
-void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
+
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long start)
 {
 	struct mm_struct *mm = vma->vm_mm;
 
@@ -310,13 +375,13 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
 
 	if (current->active_mm == mm) {
 		if (current->mm)
-			__flush_tlb_one(va);
+			__flush_tlb_one(start);
 		else
 			leave_mm(smp_processor_id());
 	}
 
 	if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
-		flush_tlb_others(mm_cpumask(mm), mm, va);
+		flush_tlb_others(mm_cpumask(mm), mm, start, 0UL);
 
 	preempt_enable();
 }
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index 59880af..f1bef8e 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -1068,8 +1068,8 @@ static int set_distrib_bits(struct cpumask *flush_mask, struct bau_control *bcp,
  * done.  The returned pointer is valid till preemption is re-enabled.
  */
 const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
-				struct mm_struct *mm, unsigned long va,
-				unsigned int cpu)
+				struct mm_struct *mm, unsigned long start,
+				unsigned end, unsigned int cpu)
 {
 	int locals = 0;
 	int remotes = 0;
@@ -1112,7 +1112,7 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
 
 	record_send_statistics(stat, locals, hubs, remotes, bau_desc);
 
-	bau_desc->payload.address = va;
+	bau_desc->payload.address = start;
 	bau_desc->payload.sending_cpu = cpu;
 	/*
 	 * uv_flush_send_and_wait returns 0 if all cpu's were messaged,
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 3a73785..39ed567 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1244,7 +1244,8 @@ static void xen_flush_tlb_single(unsigned long addr)
 }
 
 static void xen_flush_tlb_others(const struct cpumask *cpus,
-				 struct mm_struct *mm, unsigned long va)
+				 struct mm_struct *mm, unsigned long start,
+				 unsigned long end)
 {
 	struct {
 		struct mmuext_op op;
@@ -1256,7 +1257,7 @@ static void xen_flush_tlb_others(const struct cpumask *cpus,
 	} *args;
 	struct multicall_space mcs;
 
-	trace_xen_mmu_flush_tlb_others(cpus, mm, va);
+	trace_xen_mmu_flush_tlb_others(cpus, mm, start, end);
 
 	if (cpumask_empty(cpus))
 		return;		/* nothing to do */
@@ -1269,11 +1270,10 @@ static void xen_flush_tlb_others(const struct cpumask *cpus,
 	cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask);
 	cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
 
-	if (va == TLB_FLUSH_ALL) {
-		args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
-	} else {
+	args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
+	if (start != TLB_FLUSH_ALL && (end - start) <= PAGE_SIZE) {
 		args->op.cmd = MMUEXT_INVLPG_MULTI;
-		args->op.arg1.linear_addr = va;
+		args->op.arg1.linear_addr = start;
 	}
 
 	MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
diff --git a/include/trace/events/xen.h b/include/trace/events/xen.h
index 92f1a79..15ba03b 100644
--- a/include/trace/events/xen.h
+++ b/include/trace/events/xen.h
@@ -397,18 +397,20 @@ TRACE_EVENT(xen_mmu_flush_tlb_single,
 
 TRACE_EVENT(xen_mmu_flush_tlb_others,
 	    TP_PROTO(const struct cpumask *cpus, struct mm_struct *mm,
-		     unsigned long addr),
-	    TP_ARGS(cpus, mm, addr),
+		     unsigned long addr, unsigned long end),
+	    TP_ARGS(cpus, mm, addr, end),
 	    TP_STRUCT__entry(
 		    __field(unsigned, ncpus)
 		    __field(struct mm_struct *, mm)
 		    __field(unsigned long, addr)
+		    __field(unsigned long, end)
 		    ),
 	    TP_fast_assign(__entry->ncpus = cpumask_weight(cpus);
 			   __entry->mm = mm;
-			   __entry->addr = addr),
-	    TP_printk("ncpus %d mm %p addr %lx",
-		      __entry->ncpus, __entry->mm, __entry->addr)
+			   __entry->addr = addr,
+			   __entry->end = end),
+	    TP_printk("ncpus %d mm %p addr %lx, end %lx",
+		      __entry->ncpus, __entry->mm, __entry->addr, __entry->end)
 	);
 
 TRACE_EVENT(xen_mmu_write_cr3,
-- 
1.7.5.4


^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [PATCH v8 3/8] x86/tlb: fall back to flush all when meet a THP large page
  2012-06-12  9:06 [PATCH v8 0/8] x86 TLB flush range optimizing Alex Shi
  2012-06-12  9:06 ` [PATCH v8 1/8] x86/tlb_info: get last level TLB entry number of CPU Alex Shi
  2012-06-12  9:06 ` [PATCH v8 2/8] x86/flush_tlb: try flush_tlb_single one by one in flush_tlb_range Alex Shi
@ 2012-06-12  9:06 ` Alex Shi
  2012-06-12  9:06 ` [PATCH v8 4/8] x86/tlb: add tlb_flushall_shift for specific CPU Alex Shi
                   ` (5 subsequent siblings)
  8 siblings, 0 replies; 17+ messages in thread
From: Alex Shi @ 2012-06-12  9:06 UTC (permalink / raw)
  To: tglx, mingo, hpa, arnd, rostedt, fweisbec
  Cc: jeremy, seto.hidetoshi, borislav.petkov, alex.shi, tony.luck,
	luto, riel, avi, len.brown, tj, akpm, cl, ak, jbeulich,
	eric.dumazet, akinobu.mita, cpw, penberg, steiner, viro,
	kamezawa.hiroyu, aarcange, rientjes, linux-kernel

We don't need to flush large pages by PAGE_SIZE step, that just waste
time. and actually, large page don't need 'invlpg' optimizing according
to our macro benchmark. So, just flush whole TLB is enough for them.

The following result is tested on a 2CPU * 4cores * 2HT NHM EP machine,
with THP 'always' setting.

Multi-thread testing, '-t' paramter is thread number:
                       without this patch 	with this patch
./mprotect -t 1         14ns                       13ns
./mprotect -t 2         13ns                       13ns
./mprotect -t 4         12ns                       11ns
./mprotect -t 8         14ns                       10ns
./mprotect -t 16        28ns                       28ns
./mprotect -t 32        54ns                       52ns
./mprotect -t 128       200ns                      200ns

Signed-off-by: Alex Shi <alex.shi@intel.com>
---
 arch/x86/mm/tlb.c |   34 ++++++++++++++++++++++++++++++++++
 1 files changed, 34 insertions(+), 0 deletions(-)

diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 3b91c98..184a02a 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -318,12 +318,42 @@ void flush_tlb_mm(struct mm_struct *mm)
 
 #define FLUSHALL_BAR	16
 
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline unsigned long has_large_page(struct mm_struct *mm,
+				 unsigned long start, unsigned long end)
+{
+	pgd_t *pgd;
+	pud_t *pud;
+	pmd_t *pmd;
+	unsigned long addr = ALIGN(start, HPAGE_SIZE);
+	for (; addr < end; addr += HPAGE_SIZE) {
+		pgd = pgd_offset(mm, addr);
+		if (likely(!pgd_none(*pgd))) {
+			pud = pud_offset(pgd, addr);
+			if (likely(!pud_none(*pud))) {
+				pmd = pmd_offset(pud, addr);
+				if (likely(!pmd_none(*pmd)))
+					if (pmd_large(*pmd))
+						return addr;
+			}
+		}
+	}
+	return 0;
+}
+#else
+static inline unsigned long has_large_page(struct mm_struct *mm,
+				 unsigned long start, unsigned long end)
+{
+	return 0;
+}
+#endif
 void flush_tlb_range(struct vm_area_struct *vma,
 				   unsigned long start, unsigned long end)
 {
 	struct mm_struct *mm;
 
 	if (!cpu_has_invlpg || vma->vm_flags & VM_HUGETLB) {
+flush_all:
 		flush_tlb_mm(vma->vm_mm);
 		return;
 	}
@@ -346,6 +376,10 @@ void flush_tlb_range(struct vm_area_struct *vma,
 			if ((end - start)/PAGE_SIZE > act_entries/FLUSHALL_BAR)
 				local_flush_tlb();
 			else {
+				if (has_large_page(mm, start, end)) {
+					preempt_enable();
+					goto flush_all;
+				}
 				for (addr = start; addr < end;
 						addr += PAGE_SIZE)
 					__flush_tlb_single(addr);
-- 
1.7.5.4


^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [PATCH v8 4/8] x86/tlb: add tlb_flushall_shift for specific CPU
  2012-06-12  9:06 [PATCH v8 0/8] x86 TLB flush range optimizing Alex Shi
                   ` (2 preceding siblings ...)
  2012-06-12  9:06 ` [PATCH v8 3/8] x86/tlb: fall back to flush all when meet a THP large page Alex Shi
@ 2012-06-12  9:06 ` Alex Shi
  2012-06-12  9:06 ` [PATCH v8 5/8] x86/tlb: add tlb_flushall_shift knob into debugfs Alex Shi
                   ` (4 subsequent siblings)
  8 siblings, 0 replies; 17+ messages in thread
From: Alex Shi @ 2012-06-12  9:06 UTC (permalink / raw)
  To: tglx, mingo, hpa, arnd, rostedt, fweisbec
  Cc: jeremy, seto.hidetoshi, borislav.petkov, alex.shi, tony.luck,
	luto, riel, avi, len.brown, tj, akpm, cl, ak, jbeulich,
	eric.dumazet, akinobu.mita, cpw, penberg, steiner, viro,
	kamezawa.hiroyu, aarcange, rientjes, linux-kernel

Testing show different CPU type(micro architectures and NUMA mode) has
different balance points between the TLB flush all and multiple invlpg.
And there also has cases the tlb flush change has no any help.

This patch give a interface to let x86 vendor developers have a chance
to set different shift for different CPU type.

like some machine in my hands, balance points is 16 entries on
Romely-EP; while it is at 8 entries on Bloomfield NHM-EP; and is 256 on
IVB mobile CPU. but on model 15 core2 Xeon using invlpg has nothing
help.

For untested machine, do a conservative optimization, same as NHM CPU.

Signed-off-by: Alex Shi <alex.shi@intel.com>
---
 arch/x86/include/asm/processor.h |    2 ++
 arch/x86/kernel/cpu/common.c     |   14 ++++++++++++--
 arch/x86/kernel/cpu/intel.c      |   34 ++++++++++++++++++++++++++++++++++
 arch/x86/mm/tlb.c                |    7 +++----
 include/asm-generic/tlb.h        |    3 ++-
 5 files changed, 53 insertions(+), 7 deletions(-)

diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 39b2bd4..d048cad 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -72,6 +72,8 @@ extern u16 __read_mostly tlb_lli_4m[NR_INFO];
 extern u16 __read_mostly tlb_lld_4k[NR_INFO];
 extern u16 __read_mostly tlb_lld_2m[NR_INFO];
 extern u16 __read_mostly tlb_lld_4m[NR_INFO];
+extern s8  __read_mostly tlb_flushall_shift;
+
 /*
  *  CPU type and hardware bug flags. Kept separately for each CPU.
  *  Members of this structure are referenced in head.S, so think twice
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 41acb77..39a6b01 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -459,16 +459,26 @@ u16 __read_mostly tlb_lld_4k[NR_INFO];
 u16 __read_mostly tlb_lld_2m[NR_INFO];
 u16 __read_mostly tlb_lld_4m[NR_INFO];
 
+/*
+ * tlb_flushall_shift shows the balance point in replacing cr3 write
+ * with multiple 'invlpg'. It will do this replacement when
+ *   flush_tlb_lines <= active_lines/2^tlb_flushall_shift.
+ * If tlb_flushall_shift is -1, means the replacement will be disabled.
+ */
+s8  __read_mostly tlb_flushall_shift = -1;
+
 void __cpuinit cpu_detect_tlb(struct cpuinfo_x86 *c)
 {
 	if (c->x86_vendor == X86_VENDOR_INTEL)
 		intel_cpu_detect_tlb(c);
 
 	printk(KERN_INFO "Last level iTLB entries: 4KB %d, 2MB %d, 4MB %d\n" \
-		"Last level dTLB entries: 4KB %d, 2MB %d, 4MB %d\n",
+		"Last level dTLB entries: 4KB %d, 2MB %d, 4MB %d\n"	     \
+		"tlb_flushall_shift is 0x%x\n",
 		tlb_lli_4k[ENTRIES], tlb_lli_2m[ENTRIES],
 		tlb_lli_4m[ENTRIES], tlb_lld_4k[ENTRIES],
-		tlb_lld_2m[ENTRIES], tlb_lld_4m[ENTRIES]);
+		tlb_lld_2m[ENTRIES], tlb_lld_4m[ENTRIES],
+		tlb_flushall_shift);
 }
 
 void __cpuinit detect_ht(struct cpuinfo_x86 *c)
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 28ecd1b..bb90754 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -610,6 +610,39 @@ static void __cpuinit intel_tlb_lookup(const unsigned char desc)
 	}
 }
 
+static void __cpuinit intel_tlb_flushall_shift_set(struct cpuinfo_x86 *c)
+{
+	if (!cpu_has_invlpg) {
+		tlb_flushall_shift = -1;
+		return;
+	}
+	switch ((c->x86 << 8) + c->x86_model) {
+	case 0x60f: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
+	case 0x616: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
+	case 0x617: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
+	case 0x61d: /* six-core 45 nm xeon "Dunnington" */
+		tlb_flushall_shift = -1;
+		break;
+	case 0x61a: /* 45 nm nehalem, "Bloomfield" */
+	case 0x61e: /* 45 nm nehalem, "Lynnfield" */
+	case 0x625: /* 32 nm nehalem, "Clarkdale" */
+	case 0x62c: /* 32 nm nehalem, "Gulftown" */
+	case 0x62e: /* 45 nm nehalem-ex, "Beckton" */
+	case 0x62f: /* 32 nm Xeon E7 */
+		tlb_flushall_shift = 6;
+		break;
+	case 0x62a: /* SandyBridge */
+	case 0x62d: /* SandyBridge, "Romely-EP" */
+		tlb_flushall_shift = 5;
+		break;
+	case 0x63a: /* Ivybridge */
+		tlb_flushall_shift = 1;
+		break;
+	default:
+		tlb_flushall_shift = 6;
+	}
+}
+
 void __cpuinit intel_cpu_detect_tlb(struct cpuinfo_x86 *c)
 {
 	int i, j, n;
@@ -630,6 +663,7 @@ void __cpuinit intel_cpu_detect_tlb(struct cpuinfo_x86 *c)
 		for (j = 1 ; j < 16 ; j++)
 			intel_tlb_lookup(desc[j]);
 	}
+	intel_tlb_flushall_shift_set(c);
 }
 
 static const struct cpu_dev __cpuinitconst intel_cpu_dev = {
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 184a02a..2939f2f 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -316,8 +316,6 @@ void flush_tlb_mm(struct mm_struct *mm)
 	preempt_enable();
 }
 
-#define FLUSHALL_BAR	16
-
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 static inline unsigned long has_large_page(struct mm_struct *mm,
 				 unsigned long start, unsigned long end)
@@ -352,7 +350,7 @@ void flush_tlb_range(struct vm_area_struct *vma,
 {
 	struct mm_struct *mm;
 
-	if (!cpu_has_invlpg || vma->vm_flags & VM_HUGETLB) {
+	if (vma->vm_flags & VM_HUGETLB || tlb_flushall_shift == -1) {
 flush_all:
 		flush_tlb_mm(vma->vm_mm);
 		return;
@@ -373,7 +371,8 @@ flush_all:
 			act_entries = tlb_entries > mm->total_vm ?
 					mm->total_vm : tlb_entries;
 
-			if ((end - start)/PAGE_SIZE > act_entries/FLUSHALL_BAR)
+			if ((end - start) >> PAGE_SHIFT >
+					act_entries >> tlb_flushall_shift)
 				local_flush_tlb();
 			else {
 				if (has_large_page(mm, start, end)) {
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index f96a5b5..75e888b 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -113,7 +113,8 @@ static inline int tlb_fast_mode(struct mmu_gather *tlb)
 
 void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm);
 void tlb_flush_mmu(struct mmu_gather *tlb);
-void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end);
+void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start,
+							unsigned long end);
 int __tlb_remove_page(struct mmu_gather *tlb, struct page *page);
 
 /* tlb_remove_page
-- 
1.7.5.4


^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [PATCH v8 5/8] x86/tlb: add tlb_flushall_shift knob into debugfs
  2012-06-12  9:06 [PATCH v8 0/8] x86 TLB flush range optimizing Alex Shi
                   ` (3 preceding siblings ...)
  2012-06-12  9:06 ` [PATCH v8 4/8] x86/tlb: add tlb_flushall_shift for specific CPU Alex Shi
@ 2012-06-12  9:06 ` Alex Shi
  2012-06-12  9:06 ` [PATCH v8 6/8] x86/tlb: enable tlb flush range support for generic mmu and x86 Alex Shi
                   ` (3 subsequent siblings)
  8 siblings, 0 replies; 17+ messages in thread
From: Alex Shi @ 2012-06-12  9:06 UTC (permalink / raw)
  To: tglx, mingo, hpa, arnd, rostedt, fweisbec
  Cc: jeremy, seto.hidetoshi, borislav.petkov, alex.shi, tony.luck,
	luto, riel, avi, len.brown, tj, akpm, cl, ak, jbeulich,
	eric.dumazet, akinobu.mita, cpw, penberg, steiner, viro,
	kamezawa.hiroyu, aarcange, rientjes, linux-kernel

kernel will replace cr3 rewrite with invlpg when
  tlb_flush_entries <= active_tlb_entries / 2^tlb_flushall_factor
if tlb_flushall_factor is -1, kernel won't do this replacement.

User can modify its value according to specific CPU/applications.

Thanks for Borislav providing the help message of
CONFIG_DEBUG_TLBFLUSH.

Signed-off-by: Alex Shi <alex.shi@intel.com>
---
 arch/x86/Kconfig.debug |   19 +++++++++++++++++
 arch/x86/mm/tlb.c      |   51 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 70 insertions(+), 0 deletions(-)

diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index e46c214..b322f12 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -129,6 +129,25 @@ config DOUBLEFAULT
 	  option saves about 4k and might cause you much additional grey
 	  hair.
 
+config DEBUG_TLBFLUSH
+	bool "Set upper limit of TLB entries to flush one-by-one"
+	depends on DEBUG_KERNEL && (X86_64 || X86_INVLPG)
+	---help---
+
+	X86-only for now.
+
+	This option allows the user to tune the amount of TLB entries the
+	kernel flushes one-by-one instead of doing a full TLB flush. In
+	certain situations, the former is cheaper. This is controlled by the
+	tlb_flushall_shift knob under /sys/kernel/debug/x86. If you set it
+	to -1, the code flushes the whole TLB unconditionally. Otherwise,
+	for positive values of it, the kernel will use single TLB entry
+	invalidating instructions according to the following formula:
+
+	flush_entries <= active_tlb_entries / 2^tlb_flushall_shift
+
+	If in doubt, say "N".
+
 config IOMMU_DEBUG
 	bool "Enable IOMMU debugging"
 	depends on GART_IOMMU && DEBUG_KERNEL
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 2939f2f..5911f61 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -12,6 +12,7 @@
 #include <asm/cache.h>
 #include <asm/apic.h>
 #include <asm/uv/uv.h>
+#include <linux/debugfs.h>
 
 DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate)
 			= { &init_mm, 0, };
@@ -430,3 +431,53 @@ void flush_tlb_all(void)
 {
 	on_each_cpu(do_flush_tlb_all, NULL, 1);
 }
+
+#ifdef CONFIG_DEBUG_TLBFLUSH
+static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf,
+			     size_t count, loff_t *ppos)
+{
+	char buf[32];
+	unsigned int len;
+
+	len = sprintf(buf, "%hd\n", tlb_flushall_shift);
+	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t tlbflush_write_file(struct file *file,
+		 const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	char buf[32];
+	ssize_t len;
+	s8 shift;
+
+	len = min(count, sizeof(buf) - 1);
+	if (copy_from_user(buf, user_buf, len))
+		return -EFAULT;
+
+	buf[len] = '\0';
+	if (kstrtos8(buf, 0, &shift))
+		return -EINVAL;
+
+	if (shift > 64)
+		return -EINVAL;
+
+	tlb_flushall_shift = shift;
+	return count;
+}
+
+static const struct file_operations fops_tlbflush = {
+	.read = tlbflush_read_file,
+	.write = tlbflush_write_file,
+	.llseek = default_llseek,
+};
+
+static int __cpuinit create_tlb_flushall_shift(void)
+{
+	if (cpu_has_invlpg) {
+		debugfs_create_file("tlb_flushall_shift", S_IRUSR | S_IWUSR,
+			arch_debugfs_dir, NULL, &fops_tlbflush);
+	}
+	return 0;
+}
+late_initcall(create_tlb_flushall_shift);
+#endif
-- 
1.7.5.4


^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [PATCH v8 6/8] x86/tlb: enable tlb flush range support for generic mmu and x86
  2012-06-12  9:06 [PATCH v8 0/8] x86 TLB flush range optimizing Alex Shi
                   ` (4 preceding siblings ...)
  2012-06-12  9:06 ` [PATCH v8 5/8] x86/tlb: add tlb_flushall_shift knob into debugfs Alex Shi
@ 2012-06-12  9:06 ` Alex Shi
  2012-06-12  9:06 ` [PATCH v8 7/8] x86/tlb: replace INVALIDATE_TLB_VECTOR by CALL_FUNCTION_VECTOR Alex Shi
                   ` (2 subsequent siblings)
  8 siblings, 0 replies; 17+ messages in thread
From: Alex Shi @ 2012-06-12  9:06 UTC (permalink / raw)
  To: tglx, mingo, hpa, arnd, rostedt, fweisbec
  Cc: jeremy, seto.hidetoshi, borislav.petkov, alex.shi, tony.luck,
	luto, riel, avi, len.brown, tj, akpm, cl, ak, jbeulich,
	eric.dumazet, akinobu.mita, cpw, penberg, steiner, viro,
	kamezawa.hiroyu, aarcange, rientjes, linux-kernel

Not every tlb_flush execution moment is really need to evacuate all
TLB entries, like in munmap, just few 'invlpg' is better for whole
process performance, since it leaves most of TLB entries for later
accessing.

All of tlb interfaces in mm/memory.c are reused by all architectures
except few of them for generic mmu_gather were protected under
HAVE_GENERIC_MMU_GATHER. So, add flush range start/end under
HAVE_GENERIC_MMU_GATHER for generic mmu implementation.
For x86, just need to re-implement tlb_flush().

This patch also rewrite flush_tlb_range for 2 purposes:
1, split it out to get flush_blt_mm_range function.
2, clean up to reduce line breaking, thanks for Borislav's input.

My macro benchmark 'mummap' http://lkml.org/lkml/2012/5/17/59
show that the random memory access on other CPU has 0~50% speed up
on a 2P * 4cores * HT NHM EP while do 'munmap'.

Thanks for Peter Zijlstra time and time reminder for multiple
architecture code safe!

Signed-off-by: Alex Shi <alex.shi@intel.com>
---
 arch/x86/include/asm/tlb.h      |    9 +++-
 arch/x86/include/asm/tlbflush.h |   17 +++++-
 arch/x86/mm/tlb.c               |  112 ++++++++++++++++-----------------------
 include/asm-generic/tlb.h       |    2 +
 mm/memory.c                     |    9 +++
 5 files changed, 79 insertions(+), 70 deletions(-)

diff --git a/arch/x86/include/asm/tlb.h b/arch/x86/include/asm/tlb.h
index 829215f..4fef207 100644
--- a/arch/x86/include/asm/tlb.h
+++ b/arch/x86/include/asm/tlb.h
@@ -4,7 +4,14 @@
 #define tlb_start_vma(tlb, vma) do { } while (0)
 #define tlb_end_vma(tlb, vma) do { } while (0)
 #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
-#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
+
+#define tlb_flush(tlb)							\
+{									\
+	if (tlb->fullmm == 0)						\
+		flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end, 0UL);	\
+	else								\
+		flush_tlb_mm_range(tlb->mm, 0UL, TLB_FLUSH_ALL, 0UL);	\
+}
 
 #include <asm-generic/tlb.h>
 
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 33608d9..008043d 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -105,6 +105,13 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
 		__flush_tlb();
 }
 
+static inline void flush_tlb_mm_range(struct vm_area_struct *vma,
+	   unsigned long start, unsigned long end, unsigned long vmflag)
+{
+	if (mm == current->active_mm)
+		__flush_tlb();
+}
+
 static inline void native_flush_tlb_others(const struct cpumask *cpumask,
 					   struct mm_struct *mm,
 					   unsigned long start,
@@ -122,12 +129,16 @@ static inline void reset_lazy_tlbstate(void)
 
 #define local_flush_tlb() __flush_tlb()
 
+#define flush_tlb_mm(mm)	flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL)
+
+#define flush_tlb_range(vma, start, end)	\
+		flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags)
+
 extern void flush_tlb_all(void);
 extern void flush_tlb_current_task(void);
-extern void flush_tlb_mm(struct mm_struct *);
 extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
-extern void flush_tlb_range(struct vm_area_struct *vma,
-				   unsigned long start, unsigned long end);
+extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
+				unsigned long end, unsigned long vmflag);
 
 #define flush_tlb()	flush_tlb_current_task()
 
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 5911f61..481737d 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -301,23 +301,10 @@ void flush_tlb_current_task(void)
 	preempt_enable();
 }
 
-void flush_tlb_mm(struct mm_struct *mm)
-{
-	preempt_disable();
-
-	if (current->active_mm == mm) {
-		if (current->mm)
-			local_flush_tlb();
-		else
-			leave_mm(smp_processor_id());
-	}
-	if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
-		flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
-
-	preempt_enable();
-}
-
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+/*
+ * It can find out the THP large page, or
+ * HUGETLB page in tlb_flush when THP disabled
+ */
 static inline unsigned long has_large_page(struct mm_struct *mm,
 				 unsigned long start, unsigned long end)
 {
@@ -339,68 +326,61 @@ static inline unsigned long has_large_page(struct mm_struct *mm,
 	}
 	return 0;
 }
-#else
-static inline unsigned long has_large_page(struct mm_struct *mm,
-				 unsigned long start, unsigned long end)
-{
-	return 0;
-}
-#endif
-void flush_tlb_range(struct vm_area_struct *vma,
-				   unsigned long start, unsigned long end)
-{
-	struct mm_struct *mm;
 
-	if (vma->vm_flags & VM_HUGETLB || tlb_flushall_shift == -1) {
-flush_all:
-		flush_tlb_mm(vma->vm_mm);
-		return;
-	}
+void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
+				unsigned long end, unsigned long vmflag)
+{
+	unsigned long addr;
+	unsigned act_entries, tlb_entries = 0;
 
 	preempt_disable();
-	mm = vma->vm_mm;
-	if (current->active_mm == mm) {
-		if (current->mm) {
-			unsigned long addr, vmflag = vma->vm_flags;
-			unsigned act_entries, tlb_entries = 0;
+	if (current->active_mm != mm)
+		goto flush_all;
 
-			if (vmflag & VM_EXEC)
-				tlb_entries = tlb_lli_4k[ENTRIES];
-			else
-				tlb_entries = tlb_lld_4k[ENTRIES];
-
-			act_entries = tlb_entries > mm->total_vm ?
-					mm->total_vm : tlb_entries;
+	if (!current->mm) {
+		leave_mm(smp_processor_id());
+		goto flush_all;
+	}
 
-			if ((end - start) >> PAGE_SHIFT >
-					act_entries >> tlb_flushall_shift)
-				local_flush_tlb();
-			else {
-				if (has_large_page(mm, start, end)) {
-					preempt_enable();
-					goto flush_all;
-				}
-				for (addr = start; addr < end;
-						addr += PAGE_SIZE)
-					__flush_tlb_single(addr);
+	if (end == TLB_FLUSH_ALL || tlb_flushall_shift == -1
+					|| vmflag == VM_HUGETLB) {
+		local_flush_tlb();
+		goto flush_all;
+	}
 
-				if (cpumask_any_but(mm_cpumask(mm),
-					smp_processor_id()) < nr_cpu_ids)
-					flush_tlb_others(mm_cpumask(mm), mm,
-								start, end);
-				preempt_enable();
-				return;
-			}
-		} else {
-			leave_mm(smp_processor_id());
+	/* In modern CPU, last level tlb used for both data/ins */
+	if (vmflag & VM_EXEC)
+		tlb_entries = tlb_lli_4k[ENTRIES];
+	else
+		tlb_entries = tlb_lld_4k[ENTRIES];
+	/* Assume all of TLB entries was occupied by this task */
+	act_entries = mm->total_vm > tlb_entries ? tlb_entries : mm->total_vm;
+
+	/* tlb_flushall_shift is on balance point, details in commit log */
+	if ((end - start) >> PAGE_SHIFT > act_entries >> tlb_flushall_shift)
+		local_flush_tlb();
+	else {
+		if (has_large_page(mm, start, end)) {
+			local_flush_tlb();
+			goto flush_all;
 		}
+		/* flush range by one by one 'invlpg' */
+		for (addr = start; addr < end;	addr += PAGE_SIZE)
+			__flush_tlb_single(addr);
+
+		if (cpumask_any_but(mm_cpumask(mm),
+				smp_processor_id()) < nr_cpu_ids)
+			flush_tlb_others(mm_cpumask(mm), mm, start, end);
+		preempt_enable();
+		return;
 	}
+
+flush_all:
 	if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
 		flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
 	preempt_enable();
 }
 
-
 void flush_tlb_page(struct vm_area_struct *vma, unsigned long start)
 {
 	struct mm_struct *mm = vma->vm_mm;
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 75e888b..ed6642a 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -86,6 +86,8 @@ struct mmu_gather {
 #ifdef CONFIG_HAVE_RCU_TABLE_FREE
 	struct mmu_table_batch	*batch;
 #endif
+	unsigned long		start;
+	unsigned long		end;
 	unsigned int		need_flush : 1,	/* Did free PTEs */
 				fast_mode  : 1; /* No batching   */
 
diff --git a/mm/memory.c b/mm/memory.c
index 1b7dc66..32c9943 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -206,6 +206,8 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm)
 	tlb->mm = mm;
 
 	tlb->fullmm     = fullmm;
+	tlb->start	= -1UL;
+	tlb->end	= 0;
 	tlb->need_flush = 0;
 	tlb->fast_mode  = (num_possible_cpus() == 1);
 	tlb->local.next = NULL;
@@ -248,6 +250,8 @@ void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long e
 {
 	struct mmu_gather_batch *batch, *next;
 
+	tlb->start = start;
+	tlb->end   = end;
 	tlb_flush_mmu(tlb);
 
 	/* keep the page table cache within bounds */
@@ -1204,6 +1208,11 @@ again:
 	 */
 	if (force_flush) {
 		force_flush = 0;
+
+#ifdef HAVE_GENERIC_MMU_GATHER
+		tlb->start = addr;
+		tlb->end = end;
+#endif
 		tlb_flush_mmu(tlb);
 		if (addr != end)
 			goto again;
-- 
1.7.5.4


^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [PATCH v8 7/8] x86/tlb: replace INVALIDATE_TLB_VECTOR by CALL_FUNCTION_VECTOR
  2012-06-12  9:06 [PATCH v8 0/8] x86 TLB flush range optimizing Alex Shi
                   ` (5 preceding siblings ...)
  2012-06-12  9:06 ` [PATCH v8 6/8] x86/tlb: enable tlb flush range support for generic mmu and x86 Alex Shi
@ 2012-06-12  9:06 ` Alex Shi
  2012-06-12  9:06 ` [PATCH v8 8/8] x86/tlb: do flush_tlb_kernel_range by 'invlpg' Alex Shi
  2012-06-13  7:42 ` [PATCH v8 0/8] x86 TLB flush range optimizing Alex Shi
  8 siblings, 0 replies; 17+ messages in thread
From: Alex Shi @ 2012-06-12  9:06 UTC (permalink / raw)
  To: tglx, mingo, hpa, arnd, rostedt, fweisbec
  Cc: jeremy, seto.hidetoshi, borislav.petkov, alex.shi, tony.luck,
	luto, riel, avi, len.brown, tj, akpm, cl, ak, jbeulich,
	eric.dumazet, akinobu.mita, cpw, penberg, steiner, viro,
	kamezawa.hiroyu, aarcange, rientjes, linux-kernel

There are only 32 INVALIDATE_TLB_VECTOR now in kernel.
but modern x86 sever has more cpu number. That causes heavy
lock contentation in TLB flushing.

Now, useing generic smp call function to replace it.
In the NHM EX machine 4P * 8cores * HT = 64 CPUs, hackbench pthread
has 3% performance increase.
And no clear performance changes on NHM EP(16CPUs), WSM EP(24CPU)
and SNB EP(32CPU) machines.

Signed-off-by: Alex Shi <alex.shi@intel.com>
---
 arch/x86/include/asm/entry_arch.h  |    9 --
 arch/x86/include/asm/irq_vectors.h |   11 --
 arch/x86/kernel/entry_64.S         |   18 ---
 arch/x86/kernel/irqinit.c          |   73 -----------
 arch/x86/mm/tlb.c                  |  238 +++++++-----------------------------
 5 files changed, 45 insertions(+), 304 deletions(-)

diff --git a/arch/x86/include/asm/entry_arch.h b/arch/x86/include/asm/entry_arch.h
index 0baa628..40afa00 100644
--- a/arch/x86/include/asm/entry_arch.h
+++ b/arch/x86/include/asm/entry_arch.h
@@ -15,15 +15,6 @@ BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR)
 BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR)
 BUILD_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR)
 BUILD_INTERRUPT(reboot_interrupt,REBOOT_VECTOR)
-
-.irp idx,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15, \
-	16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
-.if NUM_INVALIDATE_TLB_VECTORS > \idx
-BUILD_INTERRUPT3(invalidate_interrupt\idx,
-		 (INVALIDATE_TLB_VECTOR_START)+\idx,
-		 smp_invalidate_interrupt)
-.endif
-.endr
 #endif
 
 BUILD_INTERRUPT(x86_platform_ipi, X86_PLATFORM_IPI_VECTOR)
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h
index 4b44487..1508e51 100644
--- a/arch/x86/include/asm/irq_vectors.h
+++ b/arch/x86/include/asm/irq_vectors.h
@@ -119,17 +119,6 @@
  */
 #define LOCAL_TIMER_VECTOR		0xef
 
-/* up to 32 vectors used for spreading out TLB flushes: */
-#if NR_CPUS <= 32
-# define NUM_INVALIDATE_TLB_VECTORS	(NR_CPUS)
-#else
-# define NUM_INVALIDATE_TLB_VECTORS	(32)
-#endif
-
-#define INVALIDATE_TLB_VECTOR_END	(0xee)
-#define INVALIDATE_TLB_VECTOR_START	\
-	(INVALIDATE_TLB_VECTOR_END-NUM_INVALIDATE_TLB_VECTORS+1)
-
 #define NR_VECTORS			 256
 
 #define FPU_IRQ				  13
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 7d65133..bcf28e1 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -1048,24 +1048,6 @@ apicinterrupt LOCAL_TIMER_VECTOR \
 apicinterrupt X86_PLATFORM_IPI_VECTOR \
 	x86_platform_ipi smp_x86_platform_ipi
 
-#ifdef CONFIG_SMP
-	ALIGN
-	INTR_FRAME
-.irp idx,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15, \
-	16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
-.if NUM_INVALIDATE_TLB_VECTORS > \idx
-ENTRY(invalidate_interrupt\idx)
-	pushq_cfi $~(INVALIDATE_TLB_VECTOR_START+\idx)
-	jmp .Lcommon_invalidate_interrupt0
-	CFI_ADJUST_CFA_OFFSET -8
-END(invalidate_interrupt\idx)
-.endif
-.endr
-	CFI_ENDPROC
-apicinterrupt INVALIDATE_TLB_VECTOR_START, \
-	invalidate_interrupt0, smp_invalidate_interrupt
-#endif
-
 apicinterrupt THRESHOLD_APIC_VECTOR \
 	threshold_interrupt smp_threshold_interrupt
 apicinterrupt THERMAL_APIC_VECTOR \
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
index 252981a..6e03b0d 100644
--- a/arch/x86/kernel/irqinit.c
+++ b/arch/x86/kernel/irqinit.c
@@ -171,79 +171,6 @@ static void __init smp_intr_init(void)
 	 */
 	alloc_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt);
 
-	/* IPIs for invalidation */
-#define ALLOC_INVTLB_VEC(NR) \
-	alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+NR, \
-		invalidate_interrupt##NR)
-
-	switch (NUM_INVALIDATE_TLB_VECTORS) {
-	default:
-		ALLOC_INVTLB_VEC(31);
-	case 31:
-		ALLOC_INVTLB_VEC(30);
-	case 30:
-		ALLOC_INVTLB_VEC(29);
-	case 29:
-		ALLOC_INVTLB_VEC(28);
-	case 28:
-		ALLOC_INVTLB_VEC(27);
-	case 27:
-		ALLOC_INVTLB_VEC(26);
-	case 26:
-		ALLOC_INVTLB_VEC(25);
-	case 25:
-		ALLOC_INVTLB_VEC(24);
-	case 24:
-		ALLOC_INVTLB_VEC(23);
-	case 23:
-		ALLOC_INVTLB_VEC(22);
-	case 22:
-		ALLOC_INVTLB_VEC(21);
-	case 21:
-		ALLOC_INVTLB_VEC(20);
-	case 20:
-		ALLOC_INVTLB_VEC(19);
-	case 19:
-		ALLOC_INVTLB_VEC(18);
-	case 18:
-		ALLOC_INVTLB_VEC(17);
-	case 17:
-		ALLOC_INVTLB_VEC(16);
-	case 16:
-		ALLOC_INVTLB_VEC(15);
-	case 15:
-		ALLOC_INVTLB_VEC(14);
-	case 14:
-		ALLOC_INVTLB_VEC(13);
-	case 13:
-		ALLOC_INVTLB_VEC(12);
-	case 12:
-		ALLOC_INVTLB_VEC(11);
-	case 11:
-		ALLOC_INVTLB_VEC(10);
-	case 10:
-		ALLOC_INVTLB_VEC(9);
-	case 9:
-		ALLOC_INVTLB_VEC(8);
-	case 8:
-		ALLOC_INVTLB_VEC(7);
-	case 7:
-		ALLOC_INVTLB_VEC(6);
-	case 6:
-		ALLOC_INVTLB_VEC(5);
-	case 5:
-		ALLOC_INVTLB_VEC(4);
-	case 4:
-		ALLOC_INVTLB_VEC(3);
-	case 3:
-		ALLOC_INVTLB_VEC(2);
-	case 2:
-		ALLOC_INVTLB_VEC(1);
-	case 1:
-		ALLOC_INVTLB_VEC(0);
-		break;
-	}
-
 	/* IPI for generic function call */
 	alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
 
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 481737d..2b5f506 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -28,34 +28,14 @@ DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate)
  *
  *	More scalable flush, from Andi Kleen
  *
- *	To avoid global state use 8 different call vectors.
- *	Each CPU uses a specific vector to trigger flushes on other
- *	CPUs. Depending on the received vector the target CPUs look into
- *	the right array slot for the flush data.
- *
- *	With more than 8 CPUs they are hashed to the 8 available
- *	vectors. The limited global vector space forces us to this right now.
- *	In future when interrupts are split into per CPU domains this could be
- *	fixed, at the cost of triggering multiple IPIs in some cases.
+ *	Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
  */
 
-union smp_flush_state {
-	struct {
-		struct mm_struct *flush_mm;
-		unsigned long flush_start;
-		unsigned long flush_end;
-		raw_spinlock_t tlbstate_lock;
-		DECLARE_BITMAP(flush_cpumask, NR_CPUS);
-	};
-	char pad[INTERNODE_CACHE_BYTES];
-} ____cacheline_internodealigned_in_smp;
-
-/* State is put into the per CPU data section, but padded
-   to a full cache line because other CPUs can access it and we don't
-   want false sharing in the per cpu data segment. */
-static union smp_flush_state flush_state[NUM_INVALIDATE_TLB_VECTORS];
-
-static DEFINE_PER_CPU_READ_MOSTLY(int, tlb_vector_offset);
+struct flush_tlb_info {
+	struct mm_struct *flush_mm;
+	unsigned long flush_start;
+	unsigned long flush_end;
+};
 
 /*
  * We cannot call mmdrop() because we are in interrupt context,
@@ -74,28 +54,25 @@ void leave_mm(int cpu)
 EXPORT_SYMBOL_GPL(leave_mm);
 
 /*
- *
  * The flush IPI assumes that a thread switch happens in this order:
  * [cpu0: the cpu that switches]
  * 1) switch_mm() either 1a) or 1b)
  * 1a) thread switch to a different mm
- * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
- *	Stop ipi delivery for the old mm. This is not synchronized with
- *	the other cpus, but smp_invalidate_interrupt ignore flush ipis
- *	for the wrong mm, and in the worst case we perform a superfluous
- *	tlb flush.
- * 1a2) set cpu mmu_state to TLBSTATE_OK
- *	Now the smp_invalidate_interrupt won't call leave_mm if cpu0
- *	was in lazy tlb mode.
- * 1a3) update cpu active_mm
+ * 1a1) set cpu_tlbstate to TLBSTATE_OK
+ *	Now the tlb flush NMI handler flush_tlb_func won't call leave_mm
+ *	if cpu0 was in lazy tlb mode.
+ * 1a2) update cpu active_mm
  *	Now cpu0 accepts tlb flushes for the new mm.
- * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
+ * 1a3) cpu_set(cpu, new_mm->cpu_vm_mask);
  *	Now the other cpus will send tlb flush ipis.
  * 1a4) change cr3.
+ * 1a5) cpu_clear(cpu, old_mm->cpu_vm_mask);
+ *	Stop ipi delivery for the old mm. This is not synchronized with
+ *	the other cpus, but flush_tlb_func ignore flush ipis for the wrong
+ *	mm, and in the worst case we perform a superfluous tlb flush.
  * 1b) thread switch without mm change
- *	cpu active_mm is correct, cpu0 already handles
- *	flush ipis.
- * 1b1) set cpu mmu_state to TLBSTATE_OK
+ *	cpu active_mm is correct, cpu0 already handles flush ipis.
+ * 1b1) set cpu_tlbstate to TLBSTATE_OK
  * 1b2) test_and_set the cpu bit in cpu_vm_mask.
  *	Atomically set the bit [other cpus will start sending flush ipis],
  *	and test the bit.
@@ -108,186 +85,61 @@ EXPORT_SYMBOL_GPL(leave_mm);
  *   runs in kernel space, the cpu could load tlb entries for user space
  *   pages.
  *
- * The good news is that cpu mmu_state is local to each cpu, no
+ * The good news is that cpu_tlbstate is local to each cpu, no
  * write/read ordering problems.
  */
 
 /*
- * TLB flush IPI:
- *
+ * TLB flush funcation:
  * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
  * 2) Leave the mm if we are in the lazy tlb mode.
- *
- * Interrupts are disabled.
- */
-
-/*
- * FIXME: use of asmlinkage is not consistent.  On x86_64 it's noop
- * but still used for documentation purpose but the usage is slightly
- * inconsistent.  On x86_32, asmlinkage is regparm(0) but interrupt
- * entry calls in with the first parameter in %eax.  Maybe define
- * intrlinkage?
  */
-#ifdef CONFIG_X86_64
-asmlinkage
-#endif
-void smp_invalidate_interrupt(struct pt_regs *regs)
+static void flush_tlb_func(void *info)
 {
-	unsigned int cpu;
-	unsigned int sender;
-	union smp_flush_state *f;
-
-	cpu = smp_processor_id();
-	/*
-	 * orig_rax contains the negated interrupt vector.
-	 * Use that to determine where the sender put the data.
-	 */
-	sender = ~regs->orig_ax - INVALIDATE_TLB_VECTOR_START;
-	f = &flush_state[sender];
-
-	if (!cpumask_test_cpu(cpu, to_cpumask(f->flush_cpumask)))
-		goto out;
-		/*
-		 * This was a BUG() but until someone can quote me the
-		 * line from the intel manual that guarantees an IPI to
-		 * multiple CPUs is retried _only_ on the erroring CPUs
-		 * its staying as a return
-		 *
-		 * BUG();
-		 */
-
-	if (f->flush_mm == this_cpu_read(cpu_tlbstate.active_mm)) {
-		if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
-			if (f->flush_end == TLB_FLUSH_ALL
-					|| !cpu_has_invlpg)
-				local_flush_tlb();
-			else if (!f->flush_end)
-				__flush_tlb_single(f->flush_start);
-			else {
-				unsigned long addr;
-				addr = f->flush_start;
-				while (addr < f->flush_end) {
-					__flush_tlb_single(addr);
-					addr += PAGE_SIZE;
-				}
-			}
-		} else
-			leave_mm(cpu);
-	}
-out:
-	ack_APIC_irq();
-	smp_mb__before_clear_bit();
-	cpumask_clear_cpu(cpu, to_cpumask(f->flush_cpumask));
-	smp_mb__after_clear_bit();
-	inc_irq_stat(irq_tlb_count);
-}
+	struct flush_tlb_info *f = info;
 
-static void flush_tlb_others_ipi(const struct cpumask *cpumask,
-				 struct mm_struct *mm, unsigned long start,
-				 unsigned long end)
-{
-	unsigned int sender;
-	union smp_flush_state *f;
-
-	/* Caller has disabled preemption */
-	sender = this_cpu_read(tlb_vector_offset);
-	f = &flush_state[sender];
-
-	if (nr_cpu_ids > NUM_INVALIDATE_TLB_VECTORS)
-		raw_spin_lock(&f->tlbstate_lock);
-
-	f->flush_mm = mm;
-	f->flush_start = start;
-	f->flush_end = end;
-	if (cpumask_andnot(to_cpumask(f->flush_cpumask), cpumask, cpumask_of(smp_processor_id()))) {
-		/*
-		 * We have to send the IPI only to
-		 * CPUs affected.
-		 */
-		apic->send_IPI_mask(to_cpumask(f->flush_cpumask),
-			      INVALIDATE_TLB_VECTOR_START + sender);
-
-		while (!cpumask_empty(to_cpumask(f->flush_cpumask)))
-			cpu_relax();
-	}
+	if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm))
+		return;
+
+	if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
+		if (f->flush_end == TLB_FLUSH_ALL || !cpu_has_invlpg)
+			local_flush_tlb();
+		else if (!f->flush_end)
+			__flush_tlb_single(f->flush_start);
+		else {
+			unsigned long addr;
+			addr = f->flush_start;
+			while (addr < f->flush_end) {
+				__flush_tlb_single(addr);
+				addr += PAGE_SIZE;
+			}
+		}
+	} else
+		leave_mm(smp_processor_id());
 
-	f->flush_mm = NULL;
-	f->flush_start = 0;
-	f->flush_end = 0;
-	if (nr_cpu_ids > NUM_INVALIDATE_TLB_VECTORS)
-		raw_spin_unlock(&f->tlbstate_lock);
 }
 
 void native_flush_tlb_others(const struct cpumask *cpumask,
 				 struct mm_struct *mm, unsigned long start,
 				 unsigned long end)
 {
+	struct flush_tlb_info info;
+	info.flush_mm = mm;
+	info.flush_start = start;
+	info.flush_end = end;
+
 	if (is_uv_system()) {
 		unsigned int cpu;
 
 		cpu = smp_processor_id();
 		cpumask = uv_flush_tlb_others(cpumask, mm, start, end, cpu);
 		if (cpumask)
-			flush_tlb_others_ipi(cpumask, mm, start, end);
+			smp_call_function_many(cpumask, flush_tlb_func,
+								&info, 1);
 		return;
 	}
-	flush_tlb_others_ipi(cpumask, mm, start, end);
-}
-
-static void __cpuinit calculate_tlb_offset(void)
-{
-	int cpu, node, nr_node_vecs, idx = 0;
-	/*
-	 * we are changing tlb_vector_offset for each CPU in runtime, but this
-	 * will not cause inconsistency, as the write is atomic under X86. we
-	 * might see more lock contentions in a short time, but after all CPU's
-	 * tlb_vector_offset are changed, everything should go normal
-	 *
-	 * Note: if NUM_INVALIDATE_TLB_VECTORS % nr_online_nodes !=0, we might
-	 * waste some vectors.
-	 **/
-	if (nr_online_nodes > NUM_INVALIDATE_TLB_VECTORS)
-		nr_node_vecs = 1;
-	else
-		nr_node_vecs = NUM_INVALIDATE_TLB_VECTORS/nr_online_nodes;
-
-	for_each_online_node(node) {
-		int node_offset = (idx % NUM_INVALIDATE_TLB_VECTORS) *
-			nr_node_vecs;
-		int cpu_offset = 0;
-		for_each_cpu(cpu, cpumask_of_node(node)) {
-			per_cpu(tlb_vector_offset, cpu) = node_offset +
-				cpu_offset;
-			cpu_offset++;
-			cpu_offset = cpu_offset % nr_node_vecs;
-		}
-		idx++;
-	}
-}
-
-static int __cpuinit tlb_cpuhp_notify(struct notifier_block *n,
-		unsigned long action, void *hcpu)
-{
-	switch (action & 0xf) {
-	case CPU_ONLINE:
-	case CPU_DEAD:
-		calculate_tlb_offset();
-	}
-	return NOTIFY_OK;
-}
-
-static int __cpuinit init_smp_flush(void)
-{
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(flush_state); i++)
-		raw_spin_lock_init(&flush_state[i].tlbstate_lock);
-
-	calculate_tlb_offset();
-	hotcpu_notifier(tlb_cpuhp_notify, 0);
-	return 0;
+	smp_call_function_many(cpumask, flush_tlb_func, &info, 1);
 }
-core_initcall(init_smp_flush);
 
 void flush_tlb_current_task(void)
 {
-- 
1.7.5.4


^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [PATCH v8 8/8] x86/tlb: do flush_tlb_kernel_range by 'invlpg'
  2012-06-12  9:06 [PATCH v8 0/8] x86 TLB flush range optimizing Alex Shi
                   ` (6 preceding siblings ...)
  2012-06-12  9:06 ` [PATCH v8 7/8] x86/tlb: replace INVALIDATE_TLB_VECTOR by CALL_FUNCTION_VECTOR Alex Shi
@ 2012-06-12  9:06 ` Alex Shi
  2012-06-13 14:56   ` Andi Kleen
  2012-06-13  7:42 ` [PATCH v8 0/8] x86 TLB flush range optimizing Alex Shi
  8 siblings, 1 reply; 17+ messages in thread
From: Alex Shi @ 2012-06-12  9:06 UTC (permalink / raw)
  To: tglx, mingo, hpa, arnd, rostedt, fweisbec
  Cc: jeremy, seto.hidetoshi, borislav.petkov, alex.shi, tony.luck,
	luto, riel, avi, len.brown, tj, akpm, cl, ak, jbeulich,
	eric.dumazet, akinobu.mita, cpw, penberg, steiner, viro,
	kamezawa.hiroyu, aarcange, rientjes, linux-kernel

This patch do flush_tlb_kernel_range by 'invlpg'. The performance pay
and gain was analyzed in my patch (x86/flush_tlb: try flush_tlb_single
one by one in flush_tlb_range). Now we move this logical into kernel
part. The pay is multiple 'invlpg' execution cost, that is same. but
 the gain(cost reducing of TLB entries refilling) is absoulutely
increased.

Signed-off-by: Alex Shi <alex.shi@intel.com>
---
 arch/x86/include/asm/tlbflush.h |   13 +++++++------
 arch/x86/mm/tlb.c               |   30 ++++++++++++++++++++++++++++++
 2 files changed, 37 insertions(+), 6 deletions(-)

diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 008043d..f434842 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -123,6 +123,12 @@ static inline void reset_lazy_tlbstate(void)
 {
 }
 
+static inline void flush_tlb_kernel_range(unsigned long start,
+					  unsigned long end)
+{
+	flush_tlb_all();
+}
+
 #else  /* SMP */
 
 #include <asm/smp.h>
@@ -139,6 +145,7 @@ extern void flush_tlb_current_task(void);
 extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
 extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
 				unsigned long end, unsigned long vmflag);
+extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
 
 #define flush_tlb()	flush_tlb_current_task()
 
@@ -168,10 +175,4 @@ static inline void reset_lazy_tlbstate(void)
 	native_flush_tlb_others(mask, mm, start, end)
 #endif
 
-static inline void flush_tlb_kernel_range(unsigned long start,
-					  unsigned long end)
-{
-	flush_tlb_all();
-}
-
 #endif /* _ASM_X86_TLBFLUSH_H */
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 2b5f506..fa78df9 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -264,6 +264,36 @@ void flush_tlb_all(void)
 	on_each_cpu(do_flush_tlb_all, NULL, 1);
 }
 
+static void do_kernel_range_flush(void *info)
+{
+	struct flush_tlb_info *f = info;
+	unsigned long addr;
+
+	/* flush range by one by one 'invlpg' */
+	for (addr = f->flush_start; addr < f->flush_end; addr += PAGE_SIZE)
+		__flush_tlb_single(addr);
+}
+
+void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+	unsigned act_entries;
+	struct flush_tlb_info info;
+
+	/* In modern CPU, last level tlb used for both data/ins */
+	act_entries = tlb_lld_4k[ENTRIES];
+
+	/* tlb_flushall_shift is on balance point, details in commit log */
+	if (end == TLB_FLUSH_ALL || tlb_flushall_shift == -1 ||
+		(end - start) >> PAGE_SHIFT > act_entries >> tlb_flushall_shift)
+
+		on_each_cpu(do_flush_tlb_all, NULL, 1);
+	else {
+		info.flush_start = start;
+		info.flush_end = end;
+		on_each_cpu(do_kernel_range_flush, &info, 1);
+	}
+}
+
 #ifdef CONFIG_DEBUG_TLBFLUSH
 static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf,
 			     size_t count, loff_t *ppos)
-- 
1.7.5.4


^ permalink raw reply related	[flat|nested] 17+ messages in thread

* Re: [PATCH v8 0/8] x86 TLB flush range optimizing
  2012-06-12  9:06 [PATCH v8 0/8] x86 TLB flush range optimizing Alex Shi
                   ` (7 preceding siblings ...)
  2012-06-12  9:06 ` [PATCH v8 8/8] x86/tlb: do flush_tlb_kernel_range by 'invlpg' Alex Shi
@ 2012-06-13  7:42 ` Alex Shi
  2012-06-21  5:27   ` Alex Shi
  8 siblings, 1 reply; 17+ messages in thread
From: Alex Shi @ 2012-06-13  7:42 UTC (permalink / raw)
  To: Alex Shi
  Cc: tglx, mingo, hpa, arnd, rostedt, fweisbec, jeremy,
	seto.hidetoshi, borislav.petkov, tony.luck, luto, riel, avi,
	len.brown, tj, akpm, cl, ak, jbeulich, eric.dumazet,
	akinobu.mita, cpw, penberg, steiner, viro, kamezawa.hiroyu,
	aarcange, rientjes, linux-kernel

On 06/12/2012 05:06 PM, Alex Shi wrote:

> This version patches rebases on 3.5-rc2 kernel. It does some code clean up,
> change patches sequency to make it looks more logical.
> It also includes xen code change according to Jan Beulich's comments,
> includes comments update of tlb flushing IPI according to PeterZ's suggestions.
> 
> And Adding the flush_tlb_kernel_range support by invlpg.
> 
> Thanks for all comments! and appreciate for more. :) 


Any comments for this patch set. Specially on the 8th patch?

> 
> Alex Shi
> 
> [PATCH v8 1/8] x86/tlb_info: get last level TLB entry number of CPU
> [PATCH v8 2/8] x86/flush_tlb: try flush_tlb_single one by one in
> [PATCH v8 3/8] x86/tlb: fall back to flush all when meet a THP large
> [PATCH v8 4/8] x86/tlb: add tlb_flushall_shift for specific CPU
> [PATCH v8 5/8] x86/tlb: add tlb_flushall_shift knob into debugfs
> [PATCH v8 6/8] x86/tlb: enable tlb flush range support for generic
> [PATCH v8 7/8] x86/tlb: replace INVALIDATE_TLB_VECTOR by
> [PATCH v8 8/8] x86/tlb: do flush_tlb_kernel_range by 'invlpg'



^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH v8 8/8] x86/tlb: do flush_tlb_kernel_range by 'invlpg'
  2012-06-12  9:06 ` [PATCH v8 8/8] x86/tlb: do flush_tlb_kernel_range by 'invlpg' Alex Shi
@ 2012-06-13 14:56   ` Andi Kleen
  2012-06-14  1:10     ` Alex Shi
  0 siblings, 1 reply; 17+ messages in thread
From: Andi Kleen @ 2012-06-13 14:56 UTC (permalink / raw)
  To: Alex Shi
  Cc: tglx, mingo, hpa, arnd, rostedt, fweisbec, jeremy,
	seto.hidetoshi, borislav.petkov, tony.luck, luto, riel, avi,
	len.brown, tj, akpm, cl, jbeulich, eric.dumazet, akinobu.mita,
	cpw, penberg, steiner, viro, kamezawa.hiroyu, aarcange, rientjes,
	linux-kernel

On Tue, Jun 12, 2012 at 05:06:45PM +0800, Alex Shi wrote:
> This patch do flush_tlb_kernel_range by 'invlpg'. The performance pay
> and gain was analyzed in my patch (x86/flush_tlb: try flush_tlb_single
> one by one in flush_tlb_range). Now we move this logical into kernel
> part. The pay is multiple 'invlpg' execution cost, that is same. but
>  the gain(cost reducing of TLB entries refilling) is absoulutely
> increased.

The subtle point is whether INVLPG flushes global pages or not.
After some digging I found a sentence in the SDM that says it does.
So it may be safe.

What does it improve?

-Andi

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH v8 8/8] x86/tlb: do flush_tlb_kernel_range by 'invlpg'
  2012-06-13 14:56   ` Andi Kleen
@ 2012-06-14  1:10     ` Alex Shi
  2012-06-14  1:26       ` Alex Shi
  0 siblings, 1 reply; 17+ messages in thread
From: Alex Shi @ 2012-06-14  1:10 UTC (permalink / raw)
  To: Andi Kleen, android-kernel
  Cc: tglx, mingo, hpa, arnd, rostedt, fweisbec, jeremy,
	seto.hidetoshi, borislav.petkov, tony.luck, luto, riel, avi,
	len.brown, tj, akpm, cl, jbeulich, eric.dumazet, akinobu.mita,
	cpw, penberg, steiner, viro, kamezawa.hiroyu, aarcange, rientjes,
	linux-kernel

On 06/13/2012 10:56 PM, Andi Kleen wrote:

> On Tue, Jun 12, 2012 at 05:06:45PM +0800, Alex Shi wrote:
>> This patch do flush_tlb_kernel_range by 'invlpg'. The performance pay
>> and gain was analysed in my patch (x86/flush_tlb: try flush_tlb_single
>> one by one in flush_tlb_range). Now we move this logical into kernel
>> part. The pay is multiple 'invlpg' execution cost, that is same. but
>>  the gain(cost reducing of TLB entries refilling) is absolutely
>> increased.
> 
> The subtle point is whether INVLPG flushes global pages or not.
> After some digging I found a sentence in the SDM that says it does.
> So it may be safe.


Many thanks for your time!

> 
> What does it improve?


I have not specific benchmark for this. partly due to the gain theory
was proved since it is same as previous user process's page table flush.

The user of tlb kernel flush in kernel is vmalloc. and Android binder
IPC subsystem is using it(drivers/staging/android/binder.c)

I am wondering if it can help Andriod on this?
So, add cc to android-kernel@googlegroups.com

> -Andi



^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH v8 8/8] x86/tlb: do flush_tlb_kernel_range by 'invlpg'
  2012-06-14  1:10     ` Alex Shi
@ 2012-06-14  1:26       ` Alex Shi
  2012-06-14  2:10         ` Alex Shi
       [not found]         ` <4FD93DC7.8020501-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
  0 siblings, 2 replies; 17+ messages in thread
From: Alex Shi @ 2012-06-14  1:26 UTC (permalink / raw)
  To: Andi Kleen, linux-tegra, linux-omap
  Cc: tglx, mingo, hpa, arnd, rostedt, fweisbec, jeremy,
	seto.hidetoshi, borislav.petkov, tony.luck, luto, riel, avi,
	len.brown, tj, akpm, cl, jbeulich, eric.dumazet, akinobu.mita,
	cpw, penberg, steiner, viro, kamezawa.hiroyu, aarcange, rientjes,
	linux-kernel

On 06/14/2012 09:10 AM, Alex Shi wrote:

> On 06/13/2012 10:56 PM, Andi Kleen wrote:
> 
>> On Tue, Jun 12, 2012 at 05:06:45PM +0800, Alex Shi wrote:
>>> This patch do flush_tlb_kernel_range by 'invlpg'. The performance pay
>>> and gain was analysed in my patch (x86/flush_tlb: try flush_tlb_single
>>> one by one in flush_tlb_range). Now we move this logical into kernel
>>> part. The pay is multiple 'invlpg' execution cost, that is same. but
>>>  the gain(cost reducing of TLB entries refilling) is absolutely
>>> increased.
>>
>> The subtle point is whether INVLPG flushes global pages or not.
>> After some digging I found a sentence in the SDM that says it does.
>> So it may be safe.
> 
> 
> Many thanks for your time!
> 
>>
>> What does it improve?
> 
> 
> I have not specific benchmark for this. partly due to the gain theory
> was proved since it is same as previous user process's page table flush.
> 
> The user of tlb kernel flush in kernel is vmalloc. and Android binder
> IPC subsystem is using it(drivers/staging/android/binder.c)
> 
> I am wondering if it can help Andriod on this?
> So, add cc to android-kernel@googlegroups.com


Sorry, Andriod reject posting without register, so cc to
linux-omap@vger.kernel.org and linux-tegra@vger.kernel.org instead.

> 
>> -Andi
> 
> 

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH v8 8/8] x86/tlb: do flush_tlb_kernel_range by 'invlpg'
  2012-06-14  1:26       ` Alex Shi
@ 2012-06-14  2:10         ` Alex Shi
       [not found]         ` <4FD93DC7.8020501-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
  1 sibling, 0 replies; 17+ messages in thread
From: Alex Shi @ 2012-06-14  2:10 UTC (permalink / raw)
  To: Andi Kleen, linux-tegra, linux-omap
  Cc: tglx, mingo, hpa, arnd, rostedt, fweisbec, jeremy,
	seto.hidetoshi, borislav.petkov, tony.luck, luto, riel, avi,
	len.brown, tj, akpm, cl, jbeulich, eric.dumazet, akinobu.mita,
	cpw, penberg, steiner, viro, kamezawa.hiroyu, aarcange, rientjes,
	linux-kernel

On 06/14/2012 09:26 AM, Alex Shi wrote:

> On 06/14/2012 09:10 AM, Alex Shi wrote:
> 
>> On 06/13/2012 10:56 PM, Andi Kleen wrote:
>>
>>> On Tue, Jun 12, 2012 at 05:06:45PM +0800, Alex Shi wrote:
>>>> This patch do flush_tlb_kernel_range by 'invlpg'. The performance pay
>>>> and gain was analysed in my patch (x86/flush_tlb: try flush_tlb_single
>>>> one by one in flush_tlb_range). Now we move this logical into kernel
>>>> part. The pay is multiple 'invlpg' execution cost, that is same. but
>>>>  the gain(cost reducing of TLB entries refilling) is absolutely
>>>> increased.
>>>
>>> The subtle point is whether INVLPG flushes global pages or not.
>>> After some digging I found a sentence in the SDM that says it does.
>>> So it may be safe.
>>
>>
>> Many thanks for your time!
>>
>>>
>>> What does it improve?
>>
>>
>> I have not specific benchmark for this. partly due to the gain theory
>> was proved since it is same as previous user process's page table flush.
>>
>> The user of tlb kernel flush in kernel is vmalloc. and Android binder
>> IPC subsystem is using it(drivers/staging/android/binder.c)
>>
>> I am wondering if it can help Andriod on this?
>> So, add cc to android-kernel@googlegroups.com
> 
> 
> Sorry, Andriod reject posting without register, so cc to
> linux-omap@vger.kernel.org and linux-tegra@vger.kernel.org instead.


Ops, forget the architecture different again
This will help x86 android, not arm system. Forget above 2 mailing lists. :(

> 
>>
>>> -Andi
>>
>>
> 
> 

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH v8 8/8] x86/tlb: do flush_tlb_kernel_range by 'invlpg'
  2012-06-14  1:26       ` Alex Shi
@ 2012-06-21  5:25             ` Alex Shi
       [not found]         ` <4FD93DC7.8020501-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
  1 sibling, 0 replies; 17+ messages in thread
From: Alex Shi @ 2012-06-21  5:25 UTC (permalink / raw)
  To: Andi Kleen, linux-tegra-u79uwXL29TY76Z2rM5mHXA,
	linux-omap-u79uwXL29TY76Z2rM5mHXA
  Cc: tglx-hfZtesqFncYOwBW4kG4KsQ, mingo-H+wXaHxf7aLQT0dZR+AlfA,
	hpa-YMNOUZJC4hwAvxtiuMwx3w, arnd-r2nGTMty4D4,
	rostedt-nx8X9YLhiw1AfugRpC6u6w, fweisbec-Re5JQEeQqe8AvxtiuMwx3w,
	jeremy-TSDbQ3PG+2Y, seto.hidetoshi-+CUm20s59erQFUHtdCDX3A,
	borislav.petkov-5C7GfCeVMHo, tony.luck-ral2JQCrhuEAvxtiuMwx3w,
	luto-3s7WtUTddSA, riel-H+wXaHxf7aLQT0dZR+AlfA,
	avi-H+wXaHxf7aLQT0dZR+AlfA, len.brown-ral2JQCrhuEAvxtiuMwx3w,
	tj-DgEjT+Ai2ygdnm+yROfE0A, akpm-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b,
	cl-gkYfJU5Cukgdnm+yROfE0A, jbeulich-IBi9RG/b67k,
	eric.dumazet-Re5JQEeQqe8AvxtiuMwx3w,
	akinobu.mita-Re5JQEeQqe8AvxtiuMwx3w, cpw-sJ/iWh9BUns,
	penberg-DgEjT+Ai2ygdnm+yROfE0A, steiner-sJ/iWh9BUns,
	viro-RmSDqhL/yNMiFSDQTTA3OLVCufUGDwFn,
	kamezawa.hiroyu-+CUm20s59erQFUHtdCDX3A,
	aarcange-H+wXaHxf7aLQT0dZR+AlfA, rientjes-hpIqsD4AKlfQT0dZR+AlfA,
	linux-kernel-u79uwXL29TY76Z2rM5mHXA

On 06/14/2012 09:26 AM, Alex Shi wrote:

> On 06/14/2012 09:10 AM, Alex Shi wrote:
> 
>> On 06/13/2012 10:56 PM, Andi Kleen wrote:
>>
>>> On Tue, Jun 12, 2012 at 05:06:45PM +0800, Alex Shi wrote:
>>>> This patch do flush_tlb_kernel_range by 'invlpg'. The performance pay
>>>> and gain was analysed in my patch (x86/flush_tlb: try flush_tlb_single
>>>> one by one in flush_tlb_range). Now we move this logical into kernel
>>>> part. The pay is multiple 'invlpg' execution cost, that is same. but
>>>>  the gain(cost reducing of TLB entries refilling) is absolutely
>>>> increased.
>>>
>>> The subtle point is whether INVLPG flushes global pages or not.
>>> After some digging I found a sentence in the SDM that says it does.
>>> So it may be safe.
>>
>>
>> Many thanks for your time!
>>
>>>
>>> What does it improve?
>>
>>




I just write a rough kernel modules that alloc some page arrays in kernel and then map to vaddr by 'vmap'. 

Then my macro benchmark inject a 'unmap_kernel_range' request from a sysfs interface, and doing random memory access in user level during the time.

On my NHM EP 2P * 4 Cores * HT.

Without this patch, the memory access with 4 threads is ~12ns/time.
With this patch, the memory access with 4 threads is ~9ns/time.

With threads number increasing the benefit becomes small and nearly disappeared after thread number up to 256.

But no any regression. 


The rough user macro-benchmark and kernel module is here:

--- kernel module--

#include <linux/init.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/gfp.h>
#include <linux/fs.h>
#include <linux/mman.h>
#include <linux/uaccess.h>
#include <linux/sysfs.h>
#include <linux/hrtimer.h>
#include <linux/device.h>
#include <linux/cpu.h>

MODULE_LICENSE("Dual BSD/GPL");

/* 
 * $cat Makefile 
 * obj-m := modvmalloc.o
 *
 * compile command:
 *  #cd linux; make /home/alexs/exec/modules/modvmalloc.ko 
 */
#define NR_PAGES	(4)
#define NR_BLOCKS	(1024)

struct block {
	struct page ** page_array; 
	void *vaddr;
	int page_count;
};
struct block *block;

static int blocks = NR_BLOCKS;
module_param(blocks, uint, 0400);
MODULE_PARM_DESC(blocks, "map unmap blocks number ");

static struct page **relay_alloc_page_array(unsigned int nr_pages) 
{ 
	const size_t pa_size = NR_PAGES * sizeof(struct page *); 
	if (pa_size > PAGE_SIZE) 
		return vzalloc(pa_size); 
	return kzalloc(pa_size, GFP_KERNEL); 
} 

static void relay_free_page_array(struct page **array) 
{ 
	if (is_vmalloc_addr(array)) 
		vfree(array); 
	else
		kfree(array);
}

static void vmap_unmap(void)
{
	//purge_vmap_area_lazy();
	//vm_unmap_aliases();
	int i;
	for (i=0; i< blocks; i++)
		unmap_kernel_range((unsigned long)(block->vaddr), NR_PAGES*PAGE_SIZE);
}

// ---------------
long vmap_num = 0;

static ssize_t __vmap_num_store(const char *buf,
		size_t count, int smt)
{
	long factor = 0;
	long i;
	unsigned long start, stop;

	if (sscanf(buf, "%ld", &factor) != 1)
		return -EINVAL;

	vmap_num = factor;
	start = ktime_to_ns(ktime_get());

	vmap_unmap();

	stop = ktime_to_ns(ktime_get());
	i = blocks;
	printk(KERN_ERR "vunmap %ld times cost %ld ns/time\n", 
			i, (stop - start)/i);
	return count;
}

static ssize_t vmap_num_show(struct device *dev,
		struct device_attribute *attr,
		char *buf)
{
	return sprintf(buf, "%ld\n", vmap_num);
}
static ssize_t vmap_num_store(struct device *dev,
		struct device_attribute *attr,
		const char *buf, size_t count)
{
	return __vmap_num_store(buf, count, 0);
}

DEVICE_ATTR(vmap_num, 0644,
		vmap_num_show,
		vmap_num_store);

int create_sysfs_vmap_num(struct device *dev)
{
	return device_create_file(dev, &dev_attr_vmap_num);
}

static int mapunmap_init(void){
	long i,j,k;

	create_sysfs_vmap_num(cpu_subsys.dev_root);
	block = kmalloc(sizeof(struct block)*blocks, GFP_KERNEL);

	for (k=0; k< blocks; k++) {
		block[k].page_count = 0;
		block[k].page_array = relay_alloc_page_array(NR_PAGES);
		if (!block[k].page_array)
			return -1;

		for (i = 0; i < NR_PAGES; i++) {
			block[k].page_array[i] = alloc_page(GFP_KERNEL);
			if (unlikely(!block[k].page_array[i])) {
				printk(KERN_ERR "\talloc page error \n");
				goto depopulate;
			}
		}

		if (i!=NR_PAGES)	goto depopulate;

		block[k].page_count = i;
		block[k].vaddr = vmap(block[k].page_array, NR_PAGES, VM_MAP, PAGE_KERNEL);
		if (!(block[k].vaddr)) {
			printk(KERN_ERR "\t\t vmap error !\n");
			goto depopulate;
		}
	}
	printk(KERN_INFO "vmalloc module init OK \n");
	return 0;

depopulate:
	for (i=0; i< k; i++)
		if (block[i].page_count !=0) {
			for (j = 0; j < block[i].page_count; j++)
				__free_page((block[j].page_array[j]));
			relay_free_page_array(block[j].page_array);
		}
	printk(KERN_INFO "vmalloc module init fail\n");
	return -1;
}


static void mapunmap_exit(void){
	long i, j;

	printk(KERN_INFO "bye! this is test module\n");
	device_remove_file(cpu_subsys.dev_root, &dev_attr_vmap_num);

	for (i=0; i< blocks; i++)
		if (block[i].page_count !=0) {
			for (j = 0; j < block[i].page_count; j++)
				__free_page((block[j].page_array[j]));
			relay_free_page_array(block[j].page_array);
		}
}


module_init(mapunmap_init);
module_exit(mapunmap_exit);

--- benchmark ---

/*
   maccess.c
   This is a macrobenchmark for TLB flush range testing.

   This program is free software; you can redistribute it and/or modify
   it under the terms of the GNU General Public License as published by
   the Free Software Foundation; either version 2 of the License.

   This program is distributed in the hope that it will be useful,
   but WITHOUT ANY WARRANTY; without even the implied warranty of
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   GNU General Public License for more details.

   You should have received a copy of the GNU General Public License
   along with this program; if not, write to the Free Software
   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.

   Copyright (C) Intel 2012
   Coypright Alex Shi alex.shi-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org 

   gcc -o maccess maccess.c -lrt -lpthread -O2

    #perf stat -e r881,r882,r884 -e r801,r802,r810,r820,r840,r880,r807 -e rc01 -e r4901,r4902,r4910,r4920,r4940,r4980 -e r5f01  -e rbd01,rdb20  -e r4f02 -e r8004,r8201,r8501,r8502,r8504,r8510,r8520,r8540,r8580  -e rae01,rc820,rc102,rc900 -e r8600  -e rcb10  ./maccess 
*/

#define _GNU_SOURCE
#include <stdio.h>
#include <unistd.h>
#include <fcntl.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <sys/mman.h>
#include <time.h>
#include <sys/types.h>
#include <pthread.h>

#define FILE_SIZE	(1024*1024*1024)

#define PAGE_SIZE 	(4096)
#define HPAGE_SIZE 	(4096*512)

#ifndef MAP_HUGETLB
#define MAP_HUGETLB	0x40000
#endif


long getnsec(clockid_t clockid) {
        struct timespec ts;
        if (clock_gettime(clockid, &ts) == -1)
                perror("clock_gettime failed");
        return (long) ts.tv_sec * 1000000000 + (long) ts.tv_nsec;
}

//data for threads
struct data{
	int pagenum;
	void *startaddr;
	int rw;
	int loop;
};
volatile int * threadstart;
//thread for memory accessing
void *accessmm(void *data){
	struct data *d = data;
	long *actimes;
	char x;
	int i, k;
	int randn[PAGE_SIZE];
	
	for (i=0;i<PAGE_SIZE; i++)
		randn[i] = rand();

	actimes = malloc(sizeof(long));

	while (*threadstart == 0 )
		usleep(1);

	if (d->rw == 0)
		for (*actimes=0; *threadstart == 1; (*actimes)++)
			for (k=0; k < d->pagenum; k++)
				x = *(volatile char *)(d->startaddr + randn[k]%FILE_SIZE); 
	else
		for (*actimes=0; *threadstart == 1; (*actimes)++)
			for (k=0; k < d->pagenum; k++)
				*(char *)(d->startaddr + randn[k]%FILE_SIZE) = 1; 
	return actimes;
}

int main(int argc, char *argv[])
{
        static  char            optstr[] = "p:w:ht:s:";
	int s = 1;	/* */
	int p = 512;	/* default accessed page number, after maccess */
	int er = 0, rw = 0, h = 0, t = 2; /* d: debug; h: use huge page; t thread number */
	int pagesize = PAGE_SIZE; /*default for regular page */
	volatile char x;
	long protindex = 0;

	int i, j, k, c;
	void *m1, *startaddr;
	unsigned long *startaddr2[1024*512];
	volatile void *tempaddr;
	clockid_t clockid = CLOCK_MONOTONIC;
	unsigned long start, stop, mptime, actime;
	int randn[PAGE_SIZE];

	pthread_t	pid[1024];
	void * res;
	struct data data;

	char command[1024];

	for (i=0;i<PAGE_SIZE; i++)
		randn[i] = rand();

        while ((c = getopt(argc, argv, optstr)) != EOF)
                switch (c) {
                case 's':
                        s = atoi(optarg);
                        break;
                case 'p':
                        p = atoi(optarg);
                        break;
                case 'h':
                        h = 1;
                        break;
                case 'w':
                        rw = atoi(optarg);
                        break;
                case 't':
                        t = atoi(optarg);
                        break;
                case '?':
                        er = 1;
                        break;
                }
        if (er) {
                printf("usage: %s %s\n", argv[0], optstr);
                exit(1);
	}

	printf("pid is %d, thread number %d active %d seconds, access page num %d\n", getpid(), t, s, p);
	if (h == 0){
		startaddr = mmap(0, FILE_SIZE, PROT_READ|PROT_WRITE, MAP_ANONYMOUS | MAP_SHARED, -1, 0);
		pagesize = PAGE_SIZE;
	} else {
		startaddr = mmap(0, FILE_SIZE, PROT_READ|PROT_WRITE, MAP_ANONYMOUS | MAP_SHARED | MAP_HUGETLB, -1, 0);
		pagesize = HPAGE_SIZE;
	}

	start = getnsec(clockid);
	//access whole memory, will generate many page faults 
	for (tempaddr = startaddr; tempaddr < startaddr + FILE_SIZE; tempaddr += pagesize)
		memset((char *)tempaddr, 0, 1);
        stop = getnsec(clockid);

	threadstart = malloc(sizeof(int));
	*threadstart = 0;
	data.pagenum = p; data.startaddr = startaddr; data.rw = rw;
	for (i=0; i< t; i++)
		if(pthread_create(&pid[i], NULL, accessmm, &data))
			perror("pthread create");
	//wait for randn[] filling.
	sleep(1);

	mptime = actime = 0;
	sprintf(command, "sudo sh -c 'echo %d > /sys/devices/system/cpu/vmap_num'", s);
	printf("%s\n", command);

	start = getnsec(clockid);
	//kick threads, let them running.
	*threadstart = 1;

	system(command);
	*threadstart = 0;

	stop = getnsec(clockid);
	mptime += stop - start;

	//get threads' result.
	for (i=0; i< t; i++) {
		if (pthread_join(pid[i], &res))
			perror("pthread_join");
		actime += *(long*)res;
	}
end:
	printf("maccess %ld ms, memory access %ld times/thread/ms, cost %ldns/time\n",
		 mptime/1000000, actime*p*1000000/t/mptime, mptime*t/(actime*p));
	exit(0);
}

> 
>>
>>> -Andi
>>
>>
> 
> 

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH v8 8/8] x86/tlb: do flush_tlb_kernel_range by 'invlpg'
@ 2012-06-21  5:25             ` Alex Shi
  0 siblings, 0 replies; 17+ messages in thread
From: Alex Shi @ 2012-06-21  5:25 UTC (permalink / raw)
  To: Andi Kleen, linux-tegra, linux-omap
  Cc: tglx, mingo, hpa, arnd, rostedt, fweisbec, jeremy,
	seto.hidetoshi, borislav.petkov, tony.luck, luto, riel, avi,
	len.brown, tj, akpm, cl, jbeulich, eric.dumazet, akinobu.mita,
	cpw, penberg, steiner, viro, kamezawa.hiroyu, aarcange, rientjes,
	linux-kernel

On 06/14/2012 09:26 AM, Alex Shi wrote:

> On 06/14/2012 09:10 AM, Alex Shi wrote:
> 
>> On 06/13/2012 10:56 PM, Andi Kleen wrote:
>>
>>> On Tue, Jun 12, 2012 at 05:06:45PM +0800, Alex Shi wrote:
>>>> This patch do flush_tlb_kernel_range by 'invlpg'. The performance pay
>>>> and gain was analysed in my patch (x86/flush_tlb: try flush_tlb_single
>>>> one by one in flush_tlb_range). Now we move this logical into kernel
>>>> part. The pay is multiple 'invlpg' execution cost, that is same. but
>>>>  the gain(cost reducing of TLB entries refilling) is absolutely
>>>> increased.
>>>
>>> The subtle point is whether INVLPG flushes global pages or not.
>>> After some digging I found a sentence in the SDM that says it does.
>>> So it may be safe.
>>
>>
>> Many thanks for your time!
>>
>>>
>>> What does it improve?
>>
>>




I just write a rough kernel modules that alloc some page arrays in kernel and then map to vaddr by 'vmap'. 

Then my macro benchmark inject a 'unmap_kernel_range' request from a sysfs interface, and doing random memory access in user level during the time.

On my NHM EP 2P * 4 Cores * HT.

Without this patch, the memory access with 4 threads is ~12ns/time.
With this patch, the memory access with 4 threads is ~9ns/time.

With threads number increasing the benefit becomes small and nearly disappeared after thread number up to 256.

But no any regression. 


The rough user macro-benchmark and kernel module is here:

--- kernel module--

#include <linux/init.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/gfp.h>
#include <linux/fs.h>
#include <linux/mman.h>
#include <linux/uaccess.h>
#include <linux/sysfs.h>
#include <linux/hrtimer.h>
#include <linux/device.h>
#include <linux/cpu.h>

MODULE_LICENSE("Dual BSD/GPL");

/* 
 * $cat Makefile 
 * obj-m := modvmalloc.o
 *
 * compile command:
 *  #cd linux; make /home/alexs/exec/modules/modvmalloc.ko 
 */
#define NR_PAGES	(4)
#define NR_BLOCKS	(1024)

struct block {
	struct page ** page_array; 
	void *vaddr;
	int page_count;
};
struct block *block;

static int blocks = NR_BLOCKS;
module_param(blocks, uint, 0400);
MODULE_PARM_DESC(blocks, "map unmap blocks number ");

static struct page **relay_alloc_page_array(unsigned int nr_pages) 
{ 
	const size_t pa_size = NR_PAGES * sizeof(struct page *); 
	if (pa_size > PAGE_SIZE) 
		return vzalloc(pa_size); 
	return kzalloc(pa_size, GFP_KERNEL); 
} 

static void relay_free_page_array(struct page **array) 
{ 
	if (is_vmalloc_addr(array)) 
		vfree(array); 
	else
		kfree(array);
}

static void vmap_unmap(void)
{
	//purge_vmap_area_lazy();
	//vm_unmap_aliases();
	int i;
	for (i=0; i< blocks; i++)
		unmap_kernel_range((unsigned long)(block->vaddr), NR_PAGES*PAGE_SIZE);
}

// ---------------
long vmap_num = 0;

static ssize_t __vmap_num_store(const char *buf,
		size_t count, int smt)
{
	long factor = 0;
	long i;
	unsigned long start, stop;

	if (sscanf(buf, "%ld", &factor) != 1)
		return -EINVAL;

	vmap_num = factor;
	start = ktime_to_ns(ktime_get());

	vmap_unmap();

	stop = ktime_to_ns(ktime_get());
	i = blocks;
	printk(KERN_ERR "vunmap %ld times cost %ld ns/time\n", 
			i, (stop - start)/i);
	return count;
}

static ssize_t vmap_num_show(struct device *dev,
		struct device_attribute *attr,
		char *buf)
{
	return sprintf(buf, "%ld\n", vmap_num);
}
static ssize_t vmap_num_store(struct device *dev,
		struct device_attribute *attr,
		const char *buf, size_t count)
{
	return __vmap_num_store(buf, count, 0);
}

DEVICE_ATTR(vmap_num, 0644,
		vmap_num_show,
		vmap_num_store);

int create_sysfs_vmap_num(struct device *dev)
{
	return device_create_file(dev, &dev_attr_vmap_num);
}

static int mapunmap_init(void){
	long i,j,k;

	create_sysfs_vmap_num(cpu_subsys.dev_root);
	block = kmalloc(sizeof(struct block)*blocks, GFP_KERNEL);

	for (k=0; k< blocks; k++) {
		block[k].page_count = 0;
		block[k].page_array = relay_alloc_page_array(NR_PAGES);
		if (!block[k].page_array)
			return -1;

		for (i = 0; i < NR_PAGES; i++) {
			block[k].page_array[i] = alloc_page(GFP_KERNEL);
			if (unlikely(!block[k].page_array[i])) {
				printk(KERN_ERR "\talloc page error \n");
				goto depopulate;
			}
		}

		if (i!=NR_PAGES)	goto depopulate;

		block[k].page_count = i;
		block[k].vaddr = vmap(block[k].page_array, NR_PAGES, VM_MAP, PAGE_KERNEL);
		if (!(block[k].vaddr)) {
			printk(KERN_ERR "\t\t vmap error !\n");
			goto depopulate;
		}
	}
	printk(KERN_INFO "vmalloc module init OK \n");
	return 0;

depopulate:
	for (i=0; i< k; i++)
		if (block[i].page_count !=0) {
			for (j = 0; j < block[i].page_count; j++)
				__free_page((block[j].page_array[j]));
			relay_free_page_array(block[j].page_array);
		}
	printk(KERN_INFO "vmalloc module init fail\n");
	return -1;
}


static void mapunmap_exit(void){
	long i, j;

	printk(KERN_INFO "bye! this is test module\n");
	device_remove_file(cpu_subsys.dev_root, &dev_attr_vmap_num);

	for (i=0; i< blocks; i++)
		if (block[i].page_count !=0) {
			for (j = 0; j < block[i].page_count; j++)
				__free_page((block[j].page_array[j]));
			relay_free_page_array(block[j].page_array);
		}
}


module_init(mapunmap_init);
module_exit(mapunmap_exit);

--- benchmark ---

/*
   maccess.c
   This is a macrobenchmark for TLB flush range testing.

   This program is free software; you can redistribute it and/or modify
   it under the terms of the GNU General Public License as published by
   the Free Software Foundation; either version 2 of the License.

   This program is distributed in the hope that it will be useful,
   but WITHOUT ANY WARRANTY; without even the implied warranty of
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   GNU General Public License for more details.

   You should have received a copy of the GNU General Public License
   along with this program; if not, write to the Free Software
   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.

   Copyright (C) Intel 2012
   Coypright Alex Shi alex.shi@intel.com 

   gcc -o maccess maccess.c -lrt -lpthread -O2

    #perf stat -e r881,r882,r884 -e r801,r802,r810,r820,r840,r880,r807 -e rc01 -e r4901,r4902,r4910,r4920,r4940,r4980 -e r5f01  -e rbd01,rdb20  -e r4f02 -e r8004,r8201,r8501,r8502,r8504,r8510,r8520,r8540,r8580  -e rae01,rc820,rc102,rc900 -e r8600  -e rcb10  ./maccess 
*/

#define _GNU_SOURCE
#include <stdio.h>
#include <unistd.h>
#include <fcntl.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <sys/mman.h>
#include <time.h>
#include <sys/types.h>
#include <pthread.h>

#define FILE_SIZE	(1024*1024*1024)

#define PAGE_SIZE 	(4096)
#define HPAGE_SIZE 	(4096*512)

#ifndef MAP_HUGETLB
#define MAP_HUGETLB	0x40000
#endif


long getnsec(clockid_t clockid) {
        struct timespec ts;
        if (clock_gettime(clockid, &ts) == -1)
                perror("clock_gettime failed");
        return (long) ts.tv_sec * 1000000000 + (long) ts.tv_nsec;
}

//data for threads
struct data{
	int pagenum;
	void *startaddr;
	int rw;
	int loop;
};
volatile int * threadstart;
//thread for memory accessing
void *accessmm(void *data){
	struct data *d = data;
	long *actimes;
	char x;
	int i, k;
	int randn[PAGE_SIZE];
	
	for (i=0;i<PAGE_SIZE; i++)
		randn[i] = rand();

	actimes = malloc(sizeof(long));

	while (*threadstart == 0 )
		usleep(1);

	if (d->rw == 0)
		for (*actimes=0; *threadstart == 1; (*actimes)++)
			for (k=0; k < d->pagenum; k++)
				x = *(volatile char *)(d->startaddr + randn[k]%FILE_SIZE); 
	else
		for (*actimes=0; *threadstart == 1; (*actimes)++)
			for (k=0; k < d->pagenum; k++)
				*(char *)(d->startaddr + randn[k]%FILE_SIZE) = 1; 
	return actimes;
}

int main(int argc, char *argv[])
{
        static  char            optstr[] = "p:w:ht:s:";
	int s = 1;	/* */
	int p = 512;	/* default accessed page number, after maccess */
	int er = 0, rw = 0, h = 0, t = 2; /* d: debug; h: use huge page; t thread number */
	int pagesize = PAGE_SIZE; /*default for regular page */
	volatile char x;
	long protindex = 0;

	int i, j, k, c;
	void *m1, *startaddr;
	unsigned long *startaddr2[1024*512];
	volatile void *tempaddr;
	clockid_t clockid = CLOCK_MONOTONIC;
	unsigned long start, stop, mptime, actime;
	int randn[PAGE_SIZE];

	pthread_t	pid[1024];
	void * res;
	struct data data;

	char command[1024];

	for (i=0;i<PAGE_SIZE; i++)
		randn[i] = rand();

        while ((c = getopt(argc, argv, optstr)) != EOF)
                switch (c) {
                case 's':
                        s = atoi(optarg);
                        break;
                case 'p':
                        p = atoi(optarg);
                        break;
                case 'h':
                        h = 1;
                        break;
                case 'w':
                        rw = atoi(optarg);
                        break;
                case 't':
                        t = atoi(optarg);
                        break;
                case '?':
                        er = 1;
                        break;
                }
        if (er) {
                printf("usage: %s %s\n", argv[0], optstr);
                exit(1);
	}

	printf("pid is %d, thread number %d active %d seconds, access page num %d\n", getpid(), t, s, p);
	if (h == 0){
		startaddr = mmap(0, FILE_SIZE, PROT_READ|PROT_WRITE, MAP_ANONYMOUS | MAP_SHARED, -1, 0);
		pagesize = PAGE_SIZE;
	} else {
		startaddr = mmap(0, FILE_SIZE, PROT_READ|PROT_WRITE, MAP_ANONYMOUS | MAP_SHARED | MAP_HUGETLB, -1, 0);
		pagesize = HPAGE_SIZE;
	}

	start = getnsec(clockid);
	//access whole memory, will generate many page faults 
	for (tempaddr = startaddr; tempaddr < startaddr + FILE_SIZE; tempaddr += pagesize)
		memset((char *)tempaddr, 0, 1);
        stop = getnsec(clockid);

	threadstart = malloc(sizeof(int));
	*threadstart = 0;
	data.pagenum = p; data.startaddr = startaddr; data.rw = rw;
	for (i=0; i< t; i++)
		if(pthread_create(&pid[i], NULL, accessmm, &data))
			perror("pthread create");
	//wait for randn[] filling.
	sleep(1);

	mptime = actime = 0;
	sprintf(command, "sudo sh -c 'echo %d > /sys/devices/system/cpu/vmap_num'", s);
	printf("%s\n", command);

	start = getnsec(clockid);
	//kick threads, let them running.
	*threadstart = 1;

	system(command);
	*threadstart = 0;

	stop = getnsec(clockid);
	mptime += stop - start;

	//get threads' result.
	for (i=0; i< t; i++) {
		if (pthread_join(pid[i], &res))
			perror("pthread_join");
		actime += *(long*)res;
	}
end:
	printf("maccess %ld ms, memory access %ld times/thread/ms, cost %ldns/time\n",
		 mptime/1000000, actime*p*1000000/t/mptime, mptime*t/(actime*p));
	exit(0);
}

> 
>>
>>> -Andi
>>
>>
> 
> 





^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH v8 0/8] x86 TLB flush range optimizing
  2012-06-13  7:42 ` [PATCH v8 0/8] x86 TLB flush range optimizing Alex Shi
@ 2012-06-21  5:27   ` Alex Shi
  0 siblings, 0 replies; 17+ messages in thread
From: Alex Shi @ 2012-06-21  5:27 UTC (permalink / raw)
  To: Alex Shi
  Cc: tglx, mingo, hpa, arnd, rostedt, fweisbec, jeremy,
	seto.hidetoshi, borislav.petkov, tony.luck, luto, riel, avi,
	len.brown, tj, akpm, cl, ak, jbeulich, eric.dumazet,
	akinobu.mita, cpw, penberg, steiner, viro, kamezawa.hiroyu,
	aarcange, rientjes, linux-kernel

On 06/13/2012 03:42 PM, Alex Shi wrote:

> On 06/12/2012 05:06 PM, Alex Shi wrote:
> 
>> This version patches rebases on 3.5-rc2 kernel. It does some code clean up,
>> change patches sequency to make it looks more logical.
>> It also includes xen code change according to Jan Beulich's comments,
>> includes comments update of tlb flushing IPI according to PeterZ's suggestions.
>>
>> And Adding the flush_tlb_kernel_range support by invlpg.
>>
>> Thanks for all comments! and appreciate for more. :) 
> 
> 
> Any comments for this patch set. Specially on the 8th patch?


If no more comments, anyone like to pick up this patch set?

It does some performance and scalability help on x86 machine.

> 
>>
>> Alex Shi
>>
>> [PATCH v8 1/8] x86/tlb_info: get last level TLB entry number of CPU
>> [PATCH v8 2/8] x86/flush_tlb: try flush_tlb_single one by one in
>> [PATCH v8 3/8] x86/tlb: fall back to flush all when meet a THP large
>> [PATCH v8 4/8] x86/tlb: add tlb_flushall_shift for specific CPU
>> [PATCH v8 5/8] x86/tlb: add tlb_flushall_shift knob into debugfs
>> [PATCH v8 6/8] x86/tlb: enable tlb flush range support for generic
>> [PATCH v8 7/8] x86/tlb: replace INVALIDATE_TLB_VECTOR by
>> [PATCH v8 8/8] x86/tlb: do flush_tlb_kernel_range by 'invlpg'
> 
> 



^ permalink raw reply	[flat|nested] 17+ messages in thread

end of thread, other threads:[~2012-06-21  5:29 UTC | newest]

Thread overview: 17+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2012-06-12  9:06 [PATCH v8 0/8] x86 TLB flush range optimizing Alex Shi
2012-06-12  9:06 ` [PATCH v8 1/8] x86/tlb_info: get last level TLB entry number of CPU Alex Shi
2012-06-12  9:06 ` [PATCH v8 2/8] x86/flush_tlb: try flush_tlb_single one by one in flush_tlb_range Alex Shi
2012-06-12  9:06 ` [PATCH v8 3/8] x86/tlb: fall back to flush all when meet a THP large page Alex Shi
2012-06-12  9:06 ` [PATCH v8 4/8] x86/tlb: add tlb_flushall_shift for specific CPU Alex Shi
2012-06-12  9:06 ` [PATCH v8 5/8] x86/tlb: add tlb_flushall_shift knob into debugfs Alex Shi
2012-06-12  9:06 ` [PATCH v8 6/8] x86/tlb: enable tlb flush range support for generic mmu and x86 Alex Shi
2012-06-12  9:06 ` [PATCH v8 7/8] x86/tlb: replace INVALIDATE_TLB_VECTOR by CALL_FUNCTION_VECTOR Alex Shi
2012-06-12  9:06 ` [PATCH v8 8/8] x86/tlb: do flush_tlb_kernel_range by 'invlpg' Alex Shi
2012-06-13 14:56   ` Andi Kleen
2012-06-14  1:10     ` Alex Shi
2012-06-14  1:26       ` Alex Shi
2012-06-14  2:10         ` Alex Shi
     [not found]         ` <4FD93DC7.8020501-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
2012-06-21  5:25           ` Alex Shi
2012-06-21  5:25             ` Alex Shi
2012-06-13  7:42 ` [PATCH v8 0/8] x86 TLB flush range optimizing Alex Shi
2012-06-21  5:27   ` Alex Shi

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.