linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v2 0/5] tlb_flush_range optimization
@ 2012-05-04  6:50 Alex Shi
  2012-05-04  6:50 ` [PATH v2 1/5] x86/tlb_info: get last level TLB entry number of CPU Alex Shi
                   ` (4 more replies)
  0 siblings, 5 replies; 8+ messages in thread
From: Alex Shi @ 2012-05-04  6:50 UTC (permalink / raw)
  To: tglx, mingo, hpa, rostedt, fweisbec, bp, npiggin
  Cc: jeremy, riel, luto, alex.shi, avi, len.brown, dhowells,
	fenghua.yu, yinghai, borislav.petkov, ak, cpw, steiner, akpm,
	penberg, yongjie.ren, linux-kernel

Remove glommer@redhat.com and add lkml.

This is the second version of patchset, 3 main changes:
1, move tlb info getting function to cpu vendor specific files, that is friendly
for other x86 cpu vendor developers to adding their code
2, add a interface to give the different optimization factor for different cpu type
3, tested 3 intel's cpu families: NHM, SNB, core2 and set the specific optimizing
factors for them.

Riel: would you like to update you ack to this version?

Boris: you can enable AMD cpu base on this version. 

Happy coding!
and 
Comments appreciated!

Alex Shi


^ permalink raw reply	[flat|nested] 8+ messages in thread

* [PATH v2 1/5] x86/tlb_info: get last level TLB entry number of CPU
  2012-05-04  6:50 [PATCH v2 0/5] tlb_flush_range optimization Alex Shi
@ 2012-05-04  6:50 ` Alex Shi
  2012-05-04  6:50 ` [PATH v2 2/5] x86/flush_tlb: try flush_tlb_single one by one in flush_tlb_range Alex Shi
                   ` (3 subsequent siblings)
  4 siblings, 0 replies; 8+ messages in thread
From: Alex Shi @ 2012-05-04  6:50 UTC (permalink / raw)
  To: tglx, mingo, hpa, rostedt, fweisbec, bp, npiggin
  Cc: jeremy, riel, luto, alex.shi, avi, len.brown, dhowells,
	fenghua.yu, yinghai, borislav.petkov, ak, cpw, steiner, akpm,
	penberg, yongjie.ren, linux-kernel

For 4KB pages, x86 CPU has 2 or 1 level TLB, first level is data TLB and
instruction TLB, second level is shared TLB for both data and instructions.

For hupe page TLB, usually there is just one level and seperated by 2MB/4MB
and 1GB.

Although each levels TLB size is important for performance tuning, but for
genernal and rude optimizing, last level TLB entry number is suitable. And
in fact, last level TLB always has the biggest entry number.

This patch will get the biggest TLB entry number and use it in furture TLB
optimizing.

For all kinds of x86 vendor friendly, vendor specific code was moved to its
specific files.

Signed-off-by: Alex Shi <alex.shi@intel.com>
---
 arch/x86/include/asm/processor.h |   11 +++
 arch/x86/kernel/cpu/common.c     |   21 ++++++
 arch/x86/kernel/cpu/cpu.h        |    9 +++
 arch/x86/kernel/cpu/intel.c      |  141 ++++++++++++++++++++++++++++++++++++++
 4 files changed, 182 insertions(+), 0 deletions(-)

diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 4fa7dcc..797faca 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -61,6 +61,17 @@ static inline void *current_text_addr(void)
 # define ARCH_MIN_MMSTRUCT_ALIGN	0
 #endif
 
+enum tlb_infos {
+	ENTRIES,
+	NR_INFO
+};
+
+extern u16 __read_mostly tlb_lli_4k[NR_INFO];
+extern u16 __read_mostly tlb_lli_2m[NR_INFO];
+extern u16 __read_mostly tlb_lli_4m[NR_INFO];
+extern u16 __read_mostly tlb_lld_4k[NR_INFO];
+extern u16 __read_mostly tlb_lld_2m[NR_INFO];
+extern u16 __read_mostly tlb_lld_4m[NR_INFO];
 /*
  *  CPU type and hardware bug flags. Kept separately for each CPU.
  *  Members of this structure are referenced in head.S, so think twice
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 67e2583..d764819 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -452,6 +452,25 @@ void __cpuinit cpu_detect_cache_sizes(struct cpuinfo_x86 *c)
 	c->x86_cache_size = l2size;
 }
 
+u16 __read_mostly tlb_lli_4k[NR_INFO];
+u16 __read_mostly tlb_lli_2m[NR_INFO];
+u16 __read_mostly tlb_lli_4m[NR_INFO];
+u16 __read_mostly tlb_lld_4k[NR_INFO];
+u16 __read_mostly tlb_lld_2m[NR_INFO];
+u16 __read_mostly tlb_lld_4m[NR_INFO];
+
+void __cpuinit cpu_detect_tlb(struct cpuinfo_x86 *c)
+{
+	if (c->x86_vendor == X86_VENDOR_INTEL)
+		intel_cpu_detect_tlb(c);
+
+	printk(KERN_INFO "Last level iTLB entries: 4KB %d, 2MB %d, 4MB %d\n" \
+		"Last level dTLB entries: 4KB %d, 2MB %d, 4MB %d\n",
+		tlb_lli_4k[ENTRIES], tlb_lli_2m[ENTRIES],
+		tlb_lli_4m[ENTRIES], tlb_lld_4k[ENTRIES],
+		tlb_lld_2m[ENTRIES], tlb_lld_4m[ENTRIES]);
+}
+
 void __cpuinit detect_ht(struct cpuinfo_x86 *c)
 {
 #ifdef CONFIG_X86_HT
@@ -911,6 +930,8 @@ void __init identify_boot_cpu(void)
 #else
 	vgetcpu_set_mode();
 #endif
+	if (boot_cpu_data.cpuid_level >= 2)
+		cpu_detect_tlb(&boot_cpu_data);
 }
 
 void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
index 8bacc78..c8dc726 100644
--- a/arch/x86/kernel/cpu/cpu.h
+++ b/arch/x86/kernel/cpu/cpu.h
@@ -24,6 +24,14 @@ struct cpu_dev {
 	int		c_x86_vendor;
 };
 
+struct _tlb_table {
+	unsigned char descriptor;
+	char tlb_type;
+	unsigned int entries;
+	/* unsigned int ways; */
+	char info[128];
+};
+
 #define cpu_dev_register(cpu_devX) \
 	static const struct cpu_dev *const __cpu_dev_##cpu_devX __used \
 	__attribute__((__section__(".x86_cpu_dev.init"))) = \
@@ -34,4 +42,5 @@ extern const struct cpu_dev *const __x86_cpu_dev_start[],
 
 extern void get_cpu_cap(struct cpuinfo_x86 *c);
 extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c);
+extern void intel_cpu_detect_tlb(struct cpuinfo_x86 *c);
 #endif /* ARCH_X86_CPU_H */
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 3e6ff6c..86e6131 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -491,6 +491,147 @@ static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned i
 }
 #endif
 
+#define TLB_INST_4K	0x01
+#define TLB_INST_4M	0x02
+#define TLB_INST_2M_4M	0x03
+
+#define TLB_INST_ALL	0x05
+#define TLB_INST_1G	0x06
+
+#define TLB_DATA_4K	0x11
+#define TLB_DATA_4M	0x12
+#define TLB_DATA_2M_4M	0x13
+#define TLB_DATA_4K_4M	0x14
+
+#define TLB_DATA_1G	0x16
+
+#define TLB_DATA0_4K	0x21
+#define TLB_DATA0_4M	0x22
+#define TLB_DATA0_2M_4M	0x23
+
+#define STLB_4K		0x41
+
+static const struct _tlb_table intel_tlb_table[] = {
+	{ 0x01, TLB_INST_4K,		32,	" TLB_INST 4 KByte pages, 4-way set associative" },
+	{ 0x02, TLB_INST_4M,		2,	" TLB_INST 4 MByte pages, full associative" },
+	{ 0x03, TLB_DATA_4K,		64,	" TLB_DATA 4 KByte pages, 4-way set associative" },
+	{ 0x04, TLB_DATA_4M,		8,	" TLB_DATA 4 MByte pages, 4-way set associative" },
+	{ 0x05, TLB_DATA_4M,		32,	" TLB_DATA 4 MByte pages, 4-way set associative" },
+	{ 0x0b, TLB_INST_4M,		4,	" TLB_INST 4 MByte pages, 4-way set associative" },
+	{ 0x4f, TLB_INST_4K,		32, 	" TLB_INST 4 KByte pages */" },
+	{ 0x50, TLB_INST_ALL,		64, 	" TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
+	{ 0x51, TLB_INST_ALL,		128,	" TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
+	{ 0x52, TLB_INST_ALL,		256,	" TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
+	{ 0x55, TLB_INST_2M_4M,		7, 	" TLB_INST 2-MByte or 4-MByte pages, fully associative" },
+	{ 0x56, TLB_DATA0_4M,		16,	" TLB_DATA0 4 MByte pages, 4-way set associative" },
+	{ 0x57, TLB_DATA0_4K,		16,	" TLB_DATA0 4 KByte pages, 4-way associative" },
+	{ 0x59, TLB_DATA0_4K,		16,	" TLB_DATA0 4 KByte pages, fully associative" },
+	{ 0x5a, TLB_DATA0_2M_4M,	32,	" TLB_DATA0 2-MByte or 4 MByte pages, 4-way set associative" },
+	{ 0x5b, TLB_DATA_4K_4M,		64,	" TLB_DATA 4 KByte and 4 MByte pages" },
+	{ 0x5c, TLB_DATA_4K_4M,		128,	" TLB_DATA 4 KByte and 4 MByte pages" },
+	{ 0x5d, TLB_DATA_4K_4M,		256,	" TLB_DATA 4 KByte and 4 MByte pages" },
+	{ 0xb0, TLB_INST_4K,		128,	" TLB_INST 4 KByte pages, 4-way set associative" },
+	{ 0xb1, TLB_INST_2M_4M,		4,	" TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries" },
+	{ 0xb2, TLB_INST_4K,		64,	" TLB_INST 4KByte pages, 4-way set associative" },
+	{ 0xb3, TLB_DATA_4K,		128,	" TLB_DATA 4 KByte pages, 4-way set associative" },
+	{ 0xb4, TLB_DATA_4K,		256,	" TLB_DATA 4 KByte pages, 4-way associative" },
+	{ 0xba, TLB_DATA_4K,		64,	" TLB_DATA 4 KByte pages, 4-way associative" },
+	{ 0xc0, TLB_DATA_4K_4M,		8,	" TLB_DATA 4 KByte and 4 MByte pages, 4-way associative" },
+	{ 0xca, STLB_4K,		512,	" STLB 4 KByte pages, 4-way associative" },
+	{ 0x00, 0, 0 }
+};
+
+void intel_tlb_lookup(const unsigned char desc)
+{
+	unsigned char k;
+	if (desc == 0)
+		return;
+
+	/* look up this descriptor in the table */
+	for (k = 0; intel_tlb_table[k].descriptor != desc && \
+			intel_tlb_table[k].descriptor != 0; k++)
+		;
+
+	if (intel_tlb_table[k].tlb_type == 0)
+		return;
+
+	switch (intel_tlb_table[k].tlb_type) {
+	case STLB_4K:
+		if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
+			tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
+		if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
+			tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
+		break;
+	case TLB_INST_ALL:
+		if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
+			tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
+		if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
+			tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
+		if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
+			tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
+		break;
+	case TLB_INST_4K:
+		if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
+			tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
+		break;
+	case TLB_INST_4M:
+		if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
+			tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
+		break;
+	case TLB_INST_2M_4M:
+		if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
+			tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
+		if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
+			tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
+		break;
+	case TLB_DATA_4K:
+	case TLB_DATA0_4K:
+		if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
+			tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
+		break;
+	case TLB_DATA_4M:
+	case TLB_DATA0_4M:
+		if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
+			tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
+		break;
+	case TLB_DATA_2M_4M:
+	case TLB_DATA0_2M_4M:
+		if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries)
+			tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries;
+		if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
+			tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
+		break;
+	case TLB_DATA_4K_4M:
+		if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
+			tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
+		if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
+			tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
+		break;
+	}
+}
+
+void intel_cpu_detect_tlb(struct cpuinfo_x86 *c)
+{
+	int i, j, n;
+	unsigned int regs[4];
+	unsigned char *desc = (unsigned char *)regs;
+	/* Number of times to iterate */
+	n = cpuid_eax(2) & 0xFF;
+
+	for (i = 0 ; i < n ; i++) {
+		cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
+
+		/* If bit 31 is set, this is an unknown format */
+		for (j = 0 ; j < 3 ; j++)
+			if (regs[j] & (1 << 31))
+				regs[j] = 0;
+
+		/* Byte 0 is level count, not a descriptor */
+		for (j = 1 ; j < 16 ; j++)
+			intel_tlb_lookup(desc[j]);
+	}
+}
+
 static const struct cpu_dev __cpuinitconst intel_cpu_dev = {
 	.c_vendor	= "Intel",
 	.c_ident	= { "GenuineIntel" },
-- 
1.7.0.1


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATH v2 2/5] x86/flush_tlb: try flush_tlb_single one by one in flush_tlb_range
  2012-05-04  6:50 [PATCH v2 0/5] tlb_flush_range optimization Alex Shi
  2012-05-04  6:50 ` [PATH v2 1/5] x86/tlb_info: get last level TLB entry number of CPU Alex Shi
@ 2012-05-04  6:50 ` Alex Shi
  2012-05-04  6:50 ` [PATH v2 3/5] x86/tlb: fall back to flush all when meet a THP large page Alex Shi
                   ` (2 subsequent siblings)
  4 siblings, 0 replies; 8+ messages in thread
From: Alex Shi @ 2012-05-04  6:50 UTC (permalink / raw)
  To: tglx, mingo, hpa, rostedt, fweisbec, bp, npiggin
  Cc: jeremy, riel, luto, alex.shi, avi, len.brown, dhowells,
	fenghua.yu, yinghai, borislav.petkov, ak, cpw, steiner, akpm,
	penberg, yongjie.ren, linux-kernel

x86 has no flush_tlb_range support in instruction level. Currently the
flush_tlb_range just implemented by flushing all page table. That is not
the best solution for all scenarios. In fact, if we just use 'invlpg' to
flush few lines from TLB, we can get the performance gain from later
remain TLB lines accessing.

But the 'invlpg' instruction costs much of time. Its execution time can
compete with cr3 rewriting, and even a bit more on SNB CPU.

So, on a 512 4KB TLB entries CPU, the balance points is at:
	(512 - X) * 100ns(assumed TLB refill cost) =
		X(TLB flush entries) * 100ns(assumed invlpg cost)

Here, X is 256, that is 1/2 of 512 entries.

But with the mysterious CPU pre-fetcher and page miss handler Unit, the
assumed TLB refill cost is far lower then 100ns in sequential access. And
2 HT siblings in one core makes the memory access more faster if they are
accessing the same memory. So, in the patch, I just do the change when
the target entries is less than 1/16 of whole active tlb entries.
Actually, I have no data support for the percentage '1/16', so any
suggestions are welcomed.

As to hugetlb, guess due to smaller page table, and smaller active TLB
entries, I didn't see benefit via my benchmark, so no optimizing now.

My macro benchmark show in ideal scenarios, the performance improves 70
percent in reading. And in worst scenario, the reading/writing
performance is similar with unpatched 3.4-rc4 kernel.

Here is the reading data on my 2P * 4cores *HT NHM EP machine, with THP
'always':

multi thread testing, '-t' paramter is thread number:
	       	        with patch   unpatched 3.4-rc4
./mprotect -t 1           14ns		24ns
./mprotect -t 2           13ns		22ns
./mprotect -t 4           12ns		19ns
./mprotect -t 8           14ns		16ns
./mprotect -t 16          28ns		26ns
./mprotect -t 32          54ns		51ns
./mprotect -t 128         200ns		199ns

Single process with sequencial flushing and memory accessing:

		       	with patch   unpatched 3.4-rc4
./mprotect		    7ns			11ns
./mprotect -p 4096  -l 8 -n 10240
			    21ns		21ns

I also tried other benchmarks on Intel core2/NHM/SNB EP and NHM EX machine.
No clear performance change on specjbb2005 with openjdk, and oltp reading.

Signed-off-by: Alex Shi <alex.shi@intel.com>
---
 arch/x86/include/asm/paravirt.h       |    5 +-
 arch/x86/include/asm/paravirt_types.h |    3 +-
 arch/x86/include/asm/tlbflush.h       |   19 +++----
 arch/x86/include/asm/uv/uv.h          |    5 +-
 arch/x86/mm/tlb.c                     |   97 +++++++++++++++++++++++++++------
 arch/x86/platform/uv/tlb_uv.c         |    6 +-
 arch/x86/xen/mmu.c                    |    9 ++--
 include/trace/events/xen.h            |   12 +++--
 8 files changed, 113 insertions(+), 43 deletions(-)

diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index aa0f913..03da4ab 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -397,9 +397,10 @@ static inline void __flush_tlb_single(unsigned long addr)
 
 static inline void flush_tlb_others(const struct cpumask *cpumask,
 				    struct mm_struct *mm,
-				    unsigned long va)
+				    unsigned long start,
+				    unsigned long end)
 {
-	PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, cpumask, mm, va);
+	PVOP_VCALL4(pv_mmu_ops.flush_tlb_others, cpumask, mm, start, end);
 }
 
 static inline int paravirt_pgd_alloc(struct mm_struct *mm)
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 8e8b9a4..600a5fc 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -250,7 +250,8 @@ struct pv_mmu_ops {
 	void (*flush_tlb_single)(unsigned long addr);
 	void (*flush_tlb_others)(const struct cpumask *cpus,
 				 struct mm_struct *mm,
-				 unsigned long va);
+				 unsigned long start,
+				 unsigned long end);
 
 	/* Hooks for allocating and freeing a pagetable top-level */
 	int  (*pgd_alloc)(struct mm_struct *mm);
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index c0e108e..ec30dfb 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -77,7 +77,7 @@ static inline void __flush_tlb_one(unsigned long addr)
  *  - flush_tlb_page(vma, vmaddr) flushes one page
  *  - flush_tlb_range(vma, start, end) flushes a range of pages
  *  - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
- *  - flush_tlb_others(cpumask, mm, va) flushes TLBs on other cpus
+ *  - flush_tlb_others(cpumask, mm, start, end) flushes TLBs on other cpus
  *
  * ..but the i386 has somewhat limited tlb flushing capabilities,
  * and page-granular flushes are available only on i486 and up.
@@ -115,7 +115,8 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
 
 static inline void native_flush_tlb_others(const struct cpumask *cpumask,
 					   struct mm_struct *mm,
-					   unsigned long va)
+					   unsigned long start,
+					   unsigned long end)
 {
 }
 
@@ -133,17 +134,14 @@ extern void flush_tlb_all(void);
 extern void flush_tlb_current_task(void);
 extern void flush_tlb_mm(struct mm_struct *);
 extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
+extern void flush_tlb_range(struct vm_area_struct *vma,
+				   unsigned long start, unsigned long end);
 
 #define flush_tlb()	flush_tlb_current_task()
 
-static inline void flush_tlb_range(struct vm_area_struct *vma,
-				   unsigned long start, unsigned long end)
-{
-	flush_tlb_mm(vma->vm_mm);
-}
-
 void native_flush_tlb_others(const struct cpumask *cpumask,
-			     struct mm_struct *mm, unsigned long va);
+				struct mm_struct *mm,
+				unsigned long start, unsigned long end);
 
 #define TLBSTATE_OK	1
 #define TLBSTATE_LAZY	2
@@ -163,7 +161,8 @@ static inline void reset_lazy_tlbstate(void)
 #endif	/* SMP */
 
 #ifndef CONFIG_PARAVIRT
-#define flush_tlb_others(mask, mm, va)	native_flush_tlb_others(mask, mm, va)
+#define flush_tlb_others(mask, mm, start, end)	\
+	native_flush_tlb_others(mask, mm, start, end)
 #endif
 
 static inline void flush_tlb_kernel_range(unsigned long start,
diff --git a/arch/x86/include/asm/uv/uv.h b/arch/x86/include/asm/uv/uv.h
index 3bb9491..b47c2a8 100644
--- a/arch/x86/include/asm/uv/uv.h
+++ b/arch/x86/include/asm/uv/uv.h
@@ -15,7 +15,8 @@ extern void uv_nmi_init(void);
 extern void uv_system_init(void);
 extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
 						 struct mm_struct *mm,
-						 unsigned long va,
+						 unsigned long start,
+						 unsigned end,
 						 unsigned int cpu);
 
 #else	/* X86_UV */
@@ -26,7 +27,7 @@ static inline void uv_cpu_init(void)	{ }
 static inline void uv_system_init(void)	{ }
 static inline const struct cpumask *
 uv_flush_tlb_others(const struct cpumask *cpumask, struct mm_struct *mm,
-		    unsigned long va, unsigned int cpu)
+		    unsigned long start, unsigned long end, unsigned int cpu)
 { return cpumask; }
 
 #endif	/* X86_UV */
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index d6c0418..c4e694d 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -41,7 +41,8 @@ DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate)
 union smp_flush_state {
 	struct {
 		struct mm_struct *flush_mm;
-		unsigned long flush_va;
+		unsigned long flush_start;
+		unsigned long flush_end;
 		raw_spinlock_t tlbstate_lock;
 		DECLARE_BITMAP(flush_cpumask, NR_CPUS);
 	};
@@ -154,10 +155,19 @@ void smp_invalidate_interrupt(struct pt_regs *regs)
 
 	if (f->flush_mm == percpu_read(cpu_tlbstate.active_mm)) {
 		if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
-			if (f->flush_va == TLB_FLUSH_ALL)
+			if (f->flush_start == TLB_FLUSH_ALL
+					|| !cpu_has_invlpg)
 				local_flush_tlb();
-			else
-				__flush_tlb_one(f->flush_va);
+			else if (!f->flush_end)
+				__flush_tlb_single(f->flush_start);
+			else {
+				unsigned long addr;
+				addr = f->flush_start;
+				while (addr <= f->flush_end) {
+					__flush_tlb_single(addr);
+					addr += PAGE_SIZE;
+				}
+			}
 		} else
 			leave_mm(cpu);
 	}
@@ -170,7 +180,8 @@ out:
 }
 
 static void flush_tlb_others_ipi(const struct cpumask *cpumask,
-				 struct mm_struct *mm, unsigned long va)
+				 struct mm_struct *mm, unsigned long start,
+				 unsigned long end)
 {
 	unsigned int sender;
 	union smp_flush_state *f;
@@ -183,7 +194,8 @@ static void flush_tlb_others_ipi(const struct cpumask *cpumask,
 		raw_spin_lock(&f->tlbstate_lock);
 
 	f->flush_mm = mm;
-	f->flush_va = va;
+	f->flush_start = start;
+	f->flush_end = end;
 	if (cpumask_andnot(to_cpumask(f->flush_cpumask), cpumask, cpumask_of(smp_processor_id()))) {
 		/*
 		 * We have to send the IPI only to
@@ -197,24 +209,26 @@ static void flush_tlb_others_ipi(const struct cpumask *cpumask,
 	}
 
 	f->flush_mm = NULL;
-	f->flush_va = 0;
+	f->flush_start = 0;
+	f->flush_end = 0;
 	if (nr_cpu_ids > NUM_INVALIDATE_TLB_VECTORS)
 		raw_spin_unlock(&f->tlbstate_lock);
 }
 
 void native_flush_tlb_others(const struct cpumask *cpumask,
-			     struct mm_struct *mm, unsigned long va)
+				 struct mm_struct *mm, unsigned long start,
+				 unsigned long end)
 {
 	if (is_uv_system()) {
 		unsigned int cpu;
 
 		cpu = smp_processor_id();
-		cpumask = uv_flush_tlb_others(cpumask, mm, va, cpu);
+		cpumask = uv_flush_tlb_others(cpumask, mm, start, end, cpu);
 		if (cpumask)
-			flush_tlb_others_ipi(cpumask, mm, va);
+			flush_tlb_others_ipi(cpumask, mm, start, end);
 		return;
 	}
-	flush_tlb_others_ipi(cpumask, mm, va);
+	flush_tlb_others_ipi(cpumask, mm, start, end);
 }
 
 static void __cpuinit calculate_tlb_offset(void)
@@ -280,7 +294,7 @@ void flush_tlb_current_task(void)
 
 	local_flush_tlb();
 	if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
-		flush_tlb_others(mm_cpumask(mm), mm, TLB_FLUSH_ALL);
+		flush_tlb_others(mm_cpumask(mm), mm, TLB_FLUSH_ALL, 0UL);
 	preempt_enable();
 }
 
@@ -295,12 +309,63 @@ void flush_tlb_mm(struct mm_struct *mm)
 			leave_mm(smp_processor_id());
 	}
 	if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
-		flush_tlb_others(mm_cpumask(mm), mm, TLB_FLUSH_ALL);
+		flush_tlb_others(mm_cpumask(mm), mm, TLB_FLUSH_ALL, 0UL);
+
+	preempt_enable();
+}
+
+#define FLUSHALL_BAR	16
+
+void flush_tlb_range(struct vm_area_struct *vma,
+				   unsigned long start, unsigned long end)
+{
+	struct mm_struct *mm;
+
+	if (!cpu_has_invlpg || vma->vm_flags & VM_HUGETLB) {
+		flush_tlb_mm(vma->vm_mm);
+		return;
+	}
+
+	preempt_disable();
+	mm = vma->vm_mm;
+	if (current->active_mm == mm) {
+		if (current->mm) {
+			unsigned long addr, vmflag = vma->vm_flags;
+			unsigned act_entries, tlb_entries = 0;
+
+			if (vmflag & VM_EXEC)
+				tlb_entries = tlb_lli_4k[ENTRIES];
+			else
+				tlb_entries = tlb_lld_4k[ENTRIES];
+
+			act_entries = tlb_entries > mm->total_vm ?
+					mm->total_vm : tlb_entries;
 
+			if ((end - start)/PAGE_SIZE > act_entries/FLUSHALL_BAR)
+				local_flush_tlb();
+			else {
+				for (addr = start; addr <= end;
+						addr += PAGE_SIZE)
+					__flush_tlb_single(addr);
+
+				if (cpumask_any_but(mm_cpumask(mm),
+					smp_processor_id()) < nr_cpu_ids)
+					flush_tlb_others(mm_cpumask(mm), mm,
+								start, end);
+				preempt_enable();
+				return;
+			}
+		} else {
+			leave_mm(smp_processor_id());
+		}
+	}
+	if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
+		flush_tlb_others(mm_cpumask(mm), mm, TLB_FLUSH_ALL, 0UL);
 	preempt_enable();
 }
 
-void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
+
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long start)
 {
 	struct mm_struct *mm = vma->vm_mm;
 
@@ -308,13 +373,13 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
 
 	if (current->active_mm == mm) {
 		if (current->mm)
-			__flush_tlb_one(va);
+			__flush_tlb_one(start);
 		else
 			leave_mm(smp_processor_id());
 	}
 
 	if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
-		flush_tlb_others(mm_cpumask(mm), mm, va);
+		flush_tlb_others(mm_cpumask(mm), mm, start, 0UL);
 
 	preempt_enable();
 }
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index 3ae0e61..0df5ad2 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -1068,8 +1068,8 @@ static int set_distrib_bits(struct cpumask *flush_mask, struct bau_control *bcp,
  * done.  The returned pointer is valid till preemption is re-enabled.
  */
 const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
-				struct mm_struct *mm, unsigned long va,
-				unsigned int cpu)
+				struct mm_struct *mm, unsigned long start,
+				unsigned end, unsigned int cpu)
 {
 	int locals = 0;
 	int remotes = 0;
@@ -1112,7 +1112,7 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
 
 	record_send_statistics(stat, locals, hubs, remotes, bau_desc);
 
-	bau_desc->payload.address = va;
+	bau_desc->payload.address = start;
 	bau_desc->payload.sending_cpu = cpu;
 	/*
 	 * uv_flush_send_and_wait returns 0 if all cpu's were messaged,
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index b8e2794..75bab52 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1239,7 +1239,8 @@ static void xen_flush_tlb_single(unsigned long addr)
 }
 
 static void xen_flush_tlb_others(const struct cpumask *cpus,
-				 struct mm_struct *mm, unsigned long va)
+				 struct mm_struct *mm, unsigned long start,
+				 unsigned long end)
 {
 	struct {
 		struct mmuext_op op;
@@ -1251,7 +1252,7 @@ static void xen_flush_tlb_others(const struct cpumask *cpus,
 	} *args;
 	struct multicall_space mcs;
 
-	trace_xen_mmu_flush_tlb_others(cpus, mm, va);
+	trace_xen_mmu_flush_tlb_others(cpus, mm, start, end);
 
 	if (cpumask_empty(cpus))
 		return;		/* nothing to do */
@@ -1264,11 +1265,11 @@ static void xen_flush_tlb_others(const struct cpumask *cpus,
 	cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask);
 	cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
 
-	if (va == TLB_FLUSH_ALL) {
+	if (start == TLB_FLUSH_ALL) {
 		args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
 	} else {
 		args->op.cmd = MMUEXT_INVLPG_MULTI;
-		args->op.arg1.linear_addr = va;
+		args->op.arg1.linear_addr = start;
 	}
 
 	MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
diff --git a/include/trace/events/xen.h b/include/trace/events/xen.h
index 92f1a79..15ba03b 100644
--- a/include/trace/events/xen.h
+++ b/include/trace/events/xen.h
@@ -397,18 +397,20 @@ TRACE_EVENT(xen_mmu_flush_tlb_single,
 
 TRACE_EVENT(xen_mmu_flush_tlb_others,
 	    TP_PROTO(const struct cpumask *cpus, struct mm_struct *mm,
-		     unsigned long addr),
-	    TP_ARGS(cpus, mm, addr),
+		     unsigned long addr, unsigned long end),
+	    TP_ARGS(cpus, mm, addr, end),
 	    TP_STRUCT__entry(
 		    __field(unsigned, ncpus)
 		    __field(struct mm_struct *, mm)
 		    __field(unsigned long, addr)
+		    __field(unsigned long, end)
 		    ),
 	    TP_fast_assign(__entry->ncpus = cpumask_weight(cpus);
 			   __entry->mm = mm;
-			   __entry->addr = addr),
-	    TP_printk("ncpus %d mm %p addr %lx",
-		      __entry->ncpus, __entry->mm, __entry->addr)
+			   __entry->addr = addr,
+			   __entry->end = end),
+	    TP_printk("ncpus %d mm %p addr %lx, end %lx",
+		      __entry->ncpus, __entry->mm, __entry->addr, __entry->end)
 	);
 
 TRACE_EVENT(xen_mmu_write_cr3,
-- 
1.7.0.1


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATH v2 3/5] x86/tlb: fall back to flush all when meet a THP large page
  2012-05-04  6:50 [PATCH v2 0/5] tlb_flush_range optimization Alex Shi
  2012-05-04  6:50 ` [PATH v2 1/5] x86/tlb_info: get last level TLB entry number of CPU Alex Shi
  2012-05-04  6:50 ` [PATH v2 2/5] x86/flush_tlb: try flush_tlb_single one by one in flush_tlb_range Alex Shi
@ 2012-05-04  6:50 ` Alex Shi
       [not found]   ` <CAJd=RBDi1cJ=HmjoLdMG1hMU5GZNDV5ysXqvCT9PP09rEi342w@mail.gmail.com>
  2012-05-04  6:50 ` [PATH v2 4/5] x86/tlb: add tlb flush all factor for specific CPUs Alex Shi
  2012-05-04  6:50 ` [PATH v2 5/5] x86/tlb: remove comments for tlb_flush_range implement suggestion Alex Shi
  4 siblings, 1 reply; 8+ messages in thread
From: Alex Shi @ 2012-05-04  6:50 UTC (permalink / raw)
  To: tglx, mingo, hpa, rostedt, fweisbec, bp, npiggin
  Cc: jeremy, riel, luto, alex.shi, avi, len.brown, dhowells,
	fenghua.yu, yinghai, borislav.petkov, ak, cpw, steiner, akpm,
	penberg, yongjie.ren, linux-kernel

We don't need to flush large pages by PAGE_SIZE step, that just waste
time. and actually, large page don't need 'invlpg' optimizing according
to our macro benchmark. So, just flush whole TLB is enough for them.

The following result is tested on a 2CPU * 4cores * 2HT NHM EP machine,
with THP 'always' setting.

Multi-thread testing, '-t' paramter is thread number:
                       without this patch 	with this patch
./mprotect -t 1         14ns                       13ns
./mprotect -t 2         13ns                       13ns
./mprotect -t 4         12ns                       11ns
./mprotect -t 8         14ns                       10ns
./mprotect -t 16        28ns                       28ns
./mprotect -t 32        54ns                       52ns
./mprotect -t 128       200ns                      200ns

Signed-off-by: Alex Shi <alex.shi@intel.com>
---
 arch/x86/mm/tlb.c |   27 +++++++++++++++++++++++++++
 1 files changed, 27 insertions(+), 0 deletions(-)

diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index c4e694d..4f709e6 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -316,12 +316,35 @@ void flush_tlb_mm(struct mm_struct *mm)
 
 #define FLUSHALL_BAR	16
 
+static inline int has_large_page(struct mm_struct *mm,
+				 unsigned long start, unsigned long end)
+{
+	pgd_t *pgd;
+	pud_t *pud;
+	pmd_t *pmd;
+	unsigned long addr;
+	for (addr = start; addr <= end; addr += HPAGE_SIZE) {
+		pgd = pgd_offset(mm, addr);
+		if (likely(!pgd_none(*pgd))) {
+			pud = pud_offset(pgd, addr);
+			if (likely(!pud_none(*pud))) {
+				pmd = pmd_offset(pud, addr);
+				if (likely(!pmd_none(*pmd)))
+					if (pmd_large(*pmd))
+						return 1;
+			}
+		}
+	}
+	return 0;
+}
+
 void flush_tlb_range(struct vm_area_struct *vma,
 				   unsigned long start, unsigned long end)
 {
 	struct mm_struct *mm;
 
 	if (!cpu_has_invlpg || vma->vm_flags & VM_HUGETLB) {
+flush_all:
 		flush_tlb_mm(vma->vm_mm);
 		return;
 	}
@@ -344,6 +367,10 @@ void flush_tlb_range(struct vm_area_struct *vma,
 			if ((end - start)/PAGE_SIZE > act_entries/FLUSHALL_BAR)
 				local_flush_tlb();
 			else {
+				if (has_large_page(mm, start, end)) {
+					preempt_enable();
+					goto flush_all;
+				}
 				for (addr = start; addr <= end;
 						addr += PAGE_SIZE)
 					__flush_tlb_single(addr);
-- 
1.7.0.1


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATH v2 4/5] x86/tlb: add tlb flush all factor for specific CPUs
  2012-05-04  6:50 [PATCH v2 0/5] tlb_flush_range optimization Alex Shi
                   ` (2 preceding siblings ...)
  2012-05-04  6:50 ` [PATH v2 3/5] x86/tlb: fall back to flush all when meet a THP large page Alex Shi
@ 2012-05-04  6:50 ` Alex Shi
  2012-05-04  6:50 ` [PATH v2 5/5] x86/tlb: remove comments for tlb_flush_range implement suggestion Alex Shi
  4 siblings, 0 replies; 8+ messages in thread
From: Alex Shi @ 2012-05-04  6:50 UTC (permalink / raw)
  To: tglx, mingo, hpa, rostedt, fweisbec, bp, npiggin
  Cc: jeremy, riel, luto, alex.shi, avi, len.brown, dhowells,
	fenghua.yu, yinghai, borislav.petkov, ak, cpw, steiner, akpm,
	penberg, yongjie.ren, linux-kernel

Testing show different CPU type(micro architectures and NUMA mode) has
different balance points between the TLB flush all and multiple invlpg.
And there maybe has cases the tlb flush change has no any help.

This patch give a interface to let x86 vendor developers have a chance
to set different factors for different CPU type.

like some machine in my hands, balance points is 16 entries on Romely-EP;
while it is at 8 entries on Bloomfield NHM-EP; but on model 15 core2 Xeon
using invlpg has nothing help.

For untested machine, no optimization now.

Signed-off-by: Alex Shi <alex.shi@intel.com>
---
 arch/x86/include/asm/processor.h |    2 ++
 arch/x86/kernel/cpu/common.c     |   13 +++++++++++--
 arch/x86/kernel/cpu/intel.c      |   30 ++++++++++++++++++++++++++++++
 arch/x86/mm/tlb.c                |    8 ++++----
 4 files changed, 47 insertions(+), 6 deletions(-)

diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 797faca..6a7e9c3 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -72,6 +72,8 @@ extern u16 __read_mostly tlb_lli_4m[NR_INFO];
 extern u16 __read_mostly tlb_lld_4k[NR_INFO];
 extern u16 __read_mostly tlb_lld_2m[NR_INFO];
 extern u16 __read_mostly tlb_lld_4m[NR_INFO];
+extern u16 __read_mostly tlb_flushall_factor;
+
 /*
  *  CPU type and hardware bug flags. Kept separately for each CPU.
  *  Members of this structure are referenced in head.S, so think twice
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index d764819..71e7891 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -459,16 +459,25 @@ u16 __read_mostly tlb_lld_4k[NR_INFO];
 u16 __read_mostly tlb_lld_2m[NR_INFO];
 u16 __read_mostly tlb_lld_4m[NR_INFO];
 
+/*
+ * tlb_flushall_factor shows the balance point in replacing cr3 write with
+ * multiple 'invlpg'. Different CPU type has different this value, which
+ * get from a macro benchmark named mproctect.c that published in lkml.
+ */
+u16 __read_mostly tlb_flushall_factor;
+
 void __cpuinit cpu_detect_tlb(struct cpuinfo_x86 *c)
 {
 	if (c->x86_vendor == X86_VENDOR_INTEL)
 		intel_cpu_detect_tlb(c);
 
 	printk(KERN_INFO "Last level iTLB entries: 4KB %d, 2MB %d, 4MB %d\n" \
-		"Last level dTLB entries: 4KB %d, 2MB %d, 4MB %d\n",
+		"Last level dTLB entries: 4KB %d, 2MB %d, 4MB %d\n"	     \
+		"tlb_flushall_factor is 1/%d\n",
 		tlb_lli_4k[ENTRIES], tlb_lli_2m[ENTRIES],
 		tlb_lli_4m[ENTRIES], tlb_lld_4k[ENTRIES],
-		tlb_lld_2m[ENTRIES], tlb_lld_4m[ENTRIES]);
+		tlb_lld_2m[ENTRIES], tlb_lld_4m[ENTRIES],
+		tlb_flushall_factor);
 }
 
 void __cpuinit detect_ht(struct cpuinfo_x86 *c)
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 86e6131..42480c6 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -610,6 +610,35 @@ void intel_tlb_lookup(const unsigned char desc)
 	}
 }
 
+void intel_tlb_flushall_factor_set(struct cpuinfo_x86 *c)
+{
+	switch (c->x86_model) {
+	case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
+		tlb_flushall_factor = 0;
+		break;
+	case 26: /* 45 nm nehalem, "Bloomfield" */
+	case 30: /* 45 nm nehalem, "Lynnfield" */
+	case 37: /* 32 nm nehalem, "Clarkdale" */
+	case 44: /* 32 nm nehalem, "Gulftown" */
+	case 46: /* 45 nm nehalem-ex, "Beckton" */
+		tlb_flushall_factor = 64;
+		break;
+	case 42: /* SandyBridge */
+	case 45: /* SandyBridge, "Romely-EP" */
+		tlb_flushall_factor = 32;
+		break;
+	case 28: /* Atom */
+	case 47: /* 32 nm Xeon E7 */
+	case 14: /* 65 nm core solo/duo, "Yonah" */
+	case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
+	case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
+	case 29: /* six-core 45 nm xeon "Dunnington" */
+
+	default:
+		tlb_flushall_factor = 0;
+	}
+}
+
 void intel_cpu_detect_tlb(struct cpuinfo_x86 *c)
 {
 	int i, j, n;
@@ -630,6 +659,7 @@ void intel_cpu_detect_tlb(struct cpuinfo_x86 *c)
 		for (j = 1 ; j < 16 ; j++)
 			intel_tlb_lookup(desc[j]);
 	}
+	intel_tlb_flushall_factor_set(c);
 }
 
 static const struct cpu_dev __cpuinitconst intel_cpu_dev = {
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 4f709e6..91896dc 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -314,8 +314,6 @@ void flush_tlb_mm(struct mm_struct *mm)
 	preempt_enable();
 }
 
-#define FLUSHALL_BAR	16
-
 static inline int has_large_page(struct mm_struct *mm,
 				 unsigned long start, unsigned long end)
 {
@@ -343,7 +341,8 @@ void flush_tlb_range(struct vm_area_struct *vma,
 {
 	struct mm_struct *mm;
 
-	if (!cpu_has_invlpg || vma->vm_flags & VM_HUGETLB) {
+	if (!cpu_has_invlpg || vma->vm_flags & VM_HUGETLB
+				|| !tlb_flushall_factor) {
 flush_all:
 		flush_tlb_mm(vma->vm_mm);
 		return;
@@ -364,7 +363,8 @@ flush_all:
 			act_entries = tlb_entries > mm->total_vm ?
 					mm->total_vm : tlb_entries;
 
-			if ((end - start)/PAGE_SIZE > act_entries/FLUSHALL_BAR)
+			if ((end - start)/PAGE_SIZE >
+					act_entries/tlb_flushall_factor)
 				local_flush_tlb();
 			else {
 				if (has_large_page(mm, start, end)) {
-- 
1.7.0.1


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATH v2 5/5] x86/tlb: remove comments for tlb_flush_range implement suggestion
  2012-05-04  6:50 [PATCH v2 0/5] tlb_flush_range optimization Alex Shi
                   ` (3 preceding siblings ...)
  2012-05-04  6:50 ` [PATH v2 4/5] x86/tlb: add tlb flush all factor for specific CPUs Alex Shi
@ 2012-05-04  6:50 ` Alex Shi
  4 siblings, 0 replies; 8+ messages in thread
From: Alex Shi @ 2012-05-04  6:50 UTC (permalink / raw)
  To: tglx, mingo, hpa, rostedt, fweisbec, bp, npiggin
  Cc: jeremy, riel, luto, alex.shi, avi, len.brown, dhowells,
	fenghua.yu, yinghai, borislav.petkov, ak, cpw, steiner, akpm,
	penberg, yongjie.ren, linux-kernel

Thanks to this comments that inspired me using invlpg to replace cr3
rewrite.
Now, it is time to remove from code to avoid reader confusing.

Signed-off-by: Alex Shi <alex.shi@intel.com>
---
 arch/x86/include/asm/tlbflush.h |    4 ----
 1 files changed, 0 insertions(+), 4 deletions(-)

diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index ec30dfb..51f8b1c 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -81,10 +81,6 @@ static inline void __flush_tlb_one(unsigned long addr)
  *
  * ..but the i386 has somewhat limited tlb flushing capabilities,
  * and page-granular flushes are available only on i486 and up.
- *
- * x86-64 can only flush individual pages or full VMs. For a range flush
- * we always do the full VM. Might be worth trying if for a small
- * range a few INVLPGs in a row are a win.
  */
 
 #ifndef CONFIG_SMP
-- 
1.7.0.1


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [PATH v2 3/5] x86/tlb: fall back to flush all when meet a THP large page
       [not found]   ` <CAJd=RBDi1cJ=HmjoLdMG1hMU5GZNDV5ysXqvCT9PP09rEi342w@mail.gmail.com>
@ 2012-05-04 12:22     ` Hillf Danton
       [not found]     ` <4FA3D2DF.9070000@intel.com>
  1 sibling, 0 replies; 8+ messages in thread
From: Hillf Danton @ 2012-05-04 12:22 UTC (permalink / raw)
  To: LKML

[-- Warning: decoded text below may be mangled, UTF-8 assumed --]
[-- Attachment #1: Type: text/plain; charset=UTF-8, Size: 6183 bytes --]

On Fri, May 4, 2012 at 8:11 PM, Hillf Danton <dhillf@gmail.com> wrote:
> Hi Alex,
>
> On Fri, May 4, 2012 at 2:50 PM, Alex Shi <alex.shi@intel.com> wrote:
>> We don't need to flush large pages by PAGE_SIZE step, that just waste
>> time. and actually, large page don't need 'invlpg' optimizing according
>> to our macro benchmark. So, just flush whole TLB is enough for them.
>>
>> The following result is tested on a 2CPU * 4cores * 2HT NHM EP machine,
>> with THP 'always' setting.
>>
>> Multi-thread testing, '-t' paramter is thread number:
>>                       without this patch       with this patch
>> ./mprotect -t 1         14ns                       13ns
>> ./mprotect -t 2         13ns                       13ns
>> ./mprotect -t 4         12ns                       11ns
>> ./mprotect -t 8         14ns                       10ns
>> ./mprotect -t 16        28ns                       28ns
>> ./mprotect -t 32        54ns                       52ns
>> ./mprotect -t 128       200ns                      200ns
>>
>> Signed-off-by: Alex Shi <alex.shi@intel.com>
>> ---
>>  arch/x86/mm/tlb.c |   27 +++++++++++++++++++++++++++
>>  1 files changed, 27 insertions(+), 0 deletions(-)
>>
>> diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
>> index c4e694d..4f709e6 100644
>> --- a/arch/x86/mm/tlb.c
>> +++ b/arch/x86/mm/tlb.c
>> @@ -316,12 +316,35 @@ void flush_tlb_mm(struct mm_struct *mm)
>>
>>  #define FLUSHALL_BAR   16
>>
>> +static inline int has_large_page(struct mm_struct *mm,
>> +                                unsigned long start, unsigned long end)
>> +{
>> +       pgd_t *pgd;
>> +       pud_t *pud;
>> +       pmd_t *pmd;
>> +       unsigned long addr;
>> +       for (addr = start; addr <= end; addr += HPAGE_SIZE) {
>> +               pgd = pgd_offset(mm, addr);
>> +               if (likely(!pgd_none(*pgd))) {
>> +                       pud = pud_offset(pgd, addr);
>> +                       if (likely(!pud_none(*pud))) {
>> +                               pmd = pmd_offset(pud, addr);
>> +                               if (likely(!pmd_none(*pmd)))
>> +                                       if (pmd_large(*pmd))
>> +                                               return 1;
>> +                       }
>> +               }
>> +       }
>> +       return 0;
>> +}
>> +
>>  void flush_tlb_range(struct vm_area_struct *vma,
>>                                   unsigned long start, unsigned long end)
>>  {
>>        struct mm_struct *mm;
>>
>>        if (!cpu_has_invlpg || vma->vm_flags & VM_HUGETLB) {
>> +flush_all:
>>                flush_tlb_mm(vma->vm_mm);
>>                return;
>>        }
>> @@ -344,6 +367,10 @@ void flush_tlb_range(struct vm_area_struct *vma,
>>                        if ((end - start)/PAGE_SIZE > act_entries/FLUSHALL_BAR)
>>                                local_flush_tlb();
>>                        else {
>> +                               if (has_large_page(mm, start, end)) {
>> +                                       preempt_enable();
>> +                                       goto flush_all;
>> +                               }
>>                                for (addr = start; addr <= end;
>>                                                addr += PAGE_SIZE)
>>                                        __flush_tlb_single(addr);
>> --
>>
> Perhaps huge pages could be handled alternatively, tho dunno the
> point to flush tlb manually.
>
> --- a/x86-mm-tlb.c      Fri May  4 19:53:40 2012
> +++ b/x86-mm-tlb.c      Fri May  4 20:01:36 2012
> @@ -2,7 +2,7 @@ void flush_tlb_range(struct vm_area_stru
>  {
>        struct mm_struct *mm;
>
> -       if (!cpu_has_invlpg || vma->vm_flags & VM_HUGETLB) {
> +       if (!cpu_has_invlpg) {
>                flush_tlb_mm(vma->vm_mm);
>                return;
>        }
> @@ -13,6 +13,7 @@ void flush_tlb_range(struct vm_area_stru
>                if (current->mm) {
>                        unsigned long addr, vmflag = vma->vm_flags;
>                        unsigned act_entries, tlb_entries = 0;
> +                       unsigned long pg_sz;
>
>                        if (vmflag & VM_EXEC)
>                                tlb_entries = tlb_lli_4k[ENTRIES];
> @@ -22,11 +23,15 @@ void flush_tlb_range(struct vm_area_stru
>                        act_entries = tlb_entries > mm->total_vm ?
>                                        mm->total_vm : tlb_entries;
>
> -                       if ((end - start)/PAGE_SIZE > act_entries/FLUSHALL_BAR)
> +                       pg_sz = (transparent_hugepage_enabled(vma) ||
> +                                               is_vm_hugetlb_page(vma)) ?
> +                               HPAGE_SIZE : PAGE_SIZE;
> +
> +                       if ((end - start)/pg_sz > act_entries/FLUSHALL_BAR)
>                                local_flush_tlb();
>                        else {
>                                for (addr = start; addr <= end;
> -                                               addr += PAGE_SIZE)
> +                                               addr += pg_sz)
>                                        __flush_tlb_single(addr);
>
>                                if (cpumask_any_but(mm_cpumask(mm),
> --
ÿôèº{.nÇ+‰·Ÿ®‰­†+%ŠËÿ±éݶ\x17¥Šwÿº{.nÇ+‰·¥Š{±þG«éÿŠ{ayº\x1dʇڙë,j\a­¢f£¢·hšïêÿ‘êçz_è®\x03(­éšŽŠÝ¢j"ú\x1a¶^[m§ÿÿ¾\a«þG«éÿ¢¸?™¨è­Ú&£ø§~á¶iO•æ¬z·švØ^\x14\x04\x1a¶^[m§ÿÿÃ\fÿ¶ìÿ¢¸?–I¥

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATH v2 3/5] x86/tlb: fall back to flush all when meet a THP large page
       [not found]     ` <4FA3D2DF.9070000@intel.com>
@ 2012-05-04 13:38       ` Hillf Danton
  0 siblings, 0 replies; 8+ messages in thread
From: Hillf Danton @ 2012-05-04 13:38 UTC (permalink / raw)
  To: Alex Shi
  Cc: tglx, mingo, hpa, rostedt, fweisbec, bp, npiggin, jeremy, riel,
	luto, avi, len.brown, dhowells, fenghua.yu, yinghai,
	borislav.petkov, ak, cpw, steiner, Andrew Morton, LKML

[-- Warning: decoded text below may be mangled, UTF-8 assumed --]
[-- Attachment #1: Type: text/plain; charset=UTF-8, Size: 1347 bytes --]

On Fri, May 4, 2012 at 9:00 PM, Alex Shi <alex.shi@intel.com> wrote:
> On 05/04/2012 08:11 PM, Hillf Danton wrote:
>
>> Hi Alex,
>>
>>                       if (vmflag & VM_EXEC)
>>                               tlb_entries = tlb_lli_4k[ENTRIES];
>> @@ -22,11 +23,15 @@ void flush_tlb_range(struct vm_area_stru
>>                       act_entries = tlb_entries > mm->total_vm ?
>>                                       mm->total_vm : tlb_entries;
>>
>> -                     if ((end - start)/PAGE_SIZE > act_entries/FLUSHALL_BAR)
>> +                     pg_sz = (transparent_hugepage_enabled(vma) ||
>> +                                             is_vm_hugetlb_page(vma)) ?
>> +                             HPAGE_SIZE : PAGE_SIZE;
>> +
>> +                     if ((end - start)/pg_sz > act_entries/FLUSHALL_BAR)
>
>
> Your patch should base on the second patch that expel the HUGETLB page.
>
The diff is only for showing idea.

> But the key fact is I did not find invlpg is help on HUGETLB.

Yep, I only assume flushing huge TLB is not far from regular TLB.
ÿôèº{.nÇ+‰·Ÿ®‰­†+%ŠËÿ±éݶ\x17¥Šwÿº{.nÇ+‰·¥Š{±þG«éÿŠ{ayº\x1dʇڙë,j\a­¢f£¢·hšïêÿ‘êçz_è®\x03(­éšŽŠÝ¢j"ú\x1a¶^[m§ÿÿ¾\a«þG«éÿ¢¸?™¨è­Ú&£ø§~á¶iO•æ¬z·švØ^\x14\x04\x1a¶^[m§ÿÿÃ\fÿ¶ìÿ¢¸?–I¥

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2012-05-04 13:38 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2012-05-04  6:50 [PATCH v2 0/5] tlb_flush_range optimization Alex Shi
2012-05-04  6:50 ` [PATH v2 1/5] x86/tlb_info: get last level TLB entry number of CPU Alex Shi
2012-05-04  6:50 ` [PATH v2 2/5] x86/flush_tlb: try flush_tlb_single one by one in flush_tlb_range Alex Shi
2012-05-04  6:50 ` [PATH v2 3/5] x86/tlb: fall back to flush all when meet a THP large page Alex Shi
     [not found]   ` <CAJd=RBDi1cJ=HmjoLdMG1hMU5GZNDV5ysXqvCT9PP09rEi342w@mail.gmail.com>
2012-05-04 12:22     ` Hillf Danton
     [not found]     ` <4FA3D2DF.9070000@intel.com>
2012-05-04 13:38       ` Hillf Danton
2012-05-04  6:50 ` [PATH v2 4/5] x86/tlb: add tlb flush all factor for specific CPUs Alex Shi
2012-05-04  6:50 ` [PATH v2 5/5] x86/tlb: remove comments for tlb_flush_range implement suggestion Alex Shi

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).