All of lore.kernel.org
 help / color / mirror / Atom feed
diff for duplicates of <1535660615.28258.37.camel@intel.com>

diff --git a/a/content_digest b/N1/content_digest
index bc0cbd5..18d6c67 100644
--- a/a/content_digest
+++ b/N1/content_digest
@@ -40,10 +40,7 @@
   " Mike Kravetz <mike.kravetz\@oracle.com>",
   " Nadav Amit <nadav.amit\@gmail.com>",
   " Oleg Nesterov <oleg\@redhat.com>",
-  " Pavel Machek <pavel\@ucw.cz>",
-  " Peter Zijlstra <peterz\@infradead.org>",
-  " Ravi V. Shankar <ravi.v.shankar\@intel.com>",
-  " Vedvyas Shanbhogue <vedvyas.shanbhogue\@intel.com>\0"
+  " Pave\0"
 ]
 [
   "\0000:1\0"
@@ -167,4 +164,4 @@
   "Thanks, I will fix it!"
 ]
 
-b7ecbbacc58b8731cc98357d98def28e30252495c2c465e5cc577352c0ac72e9
+d28ad2e0236c0e9995734819dbf14d865e12d54c668e4a7cbc3efb1abd3d17ce

diff --git a/a/1.txt b/N2/1.txt
index 4429d8f..9c12615 100644
--- a/a/1.txt
+++ b/N2/1.txt
@@ -2,16 +2,16 @@ On Thu, 2018-08-30 at 12:59 -0700, Randy Dunlap wrote:
 > On 08/30/2018 07:38 AM, Yu-cheng Yu wrote:
 > > 
 > > When Shadow Stack is enabled, the read-only and PAGE_DIRTY_HW PTE
-> > setting is reserved only for the Shadow Stack.  To track dirty of
+> > setting is reserved only for the Shadow Stack.A A To track dirty of
 > > non-Shadow Stack read-only PTEs, we use PAGE_DIRTY_SW.
 > > 
 > > Update ptep_set_wrprotect() and pmdp_set_wrprotect().
 > > 
 > > Signed-off-by: Yu-cheng Yu <yu-cheng.yu@intel.com>
 > > ---
-> >  arch/x86/include/asm/pgtable.h | 42
+> > A arch/x86/include/asm/pgtable.h | 42
 > > ++++++++++++++++++++++++++++++++++
-> >  1 file changed, 42 insertions(+)
+> > A 1 file changed, 42 insertions(+)
 > > 
 > > diff --git a/arch/x86/include/asm/pgtable.h
 > > b/arch/x86/include/asm/pgtable.h
@@ -20,93 +20,93 @@ On Thu, 2018-08-30 at 12:59 -0700, Randy Dunlap wrote:
 > > +++ b/arch/x86/include/asm/pgtable.h
 > > @@ -1203,7 +1203,28 @@ static inline pte_t
 > > ptep_get_and_clear_full(struct mm_struct *mm,
-> >  static inline void ptep_set_wrprotect(struct mm_struct *mm,
-> >  				      unsigned long addr, pte_t
+> > A static inline void ptep_set_wrprotect(struct mm_struct *mm,
+> > A 				A A A A A A unsigned long addr, pte_t
 > > *ptep)
-> >  {
+> > A {
 > > +	pte_t pte;
 > > +
-> >  	clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
+> > A 	clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
 > > +	pte = *ptep;
 > > +
 > > +	/*
-> > +	 * Some processors can start a write, but ending up
+> > +	A * Some processors can start a write, but ending up
 > > seeing
-> 	                                      but end up seeing
+> 	A A A A A A A A A A A A A A A A A A A A A A A A A A A A A A A A A A A A A A but end up seeing
 > 
 > > 
-> > +	 * a read-only PTE by the time they get to the Dirty bit.
-> > +	 * In this case, they will set the Dirty bit, leaving a
-> > +	 * read-only, Dirty PTE which looks like a Shadow Stack
+> > +	A * a read-only PTE by the time they get to the Dirty bit.
+> > +	A * In this case, they will set the Dirty bit, leaving a
+> > +	A * read-only, Dirty PTE which looks like a Shadow Stack
 > > PTE.
-> > +	 *
-> > +	 * However, this behavior has been improved and will not
+> > +	A *
+> > +	A * However, this behavior has been improved and will not
 > > occur
-> > +	 * on processors supporting Shadow Stacks.  Without this
-> > +	 * guarantee, a transition to a non-present PTE and flush
+> > +	A * on processors supporting Shadow Stacks.A A Without this
+> > +	A * guarantee, a transition to a non-present PTE and flush
 > > the
-> > +	 * TLB would be needed.
-> > +	 *
-> > +	 * When change a writable PTE to read-only and if the PTE
+> > +	A * TLB would be needed.
+> > +	A *
+> > +	A * When change a writable PTE to read-only and if the PTE
 > > has
-> 	        changing
+> 	A A A A A A A A changing
 > 
 > > 
-> > +	 * _PAGE_DIRTY_HW set, we move that bit to _PAGE_DIRTY_SW
+> > +	A * _PAGE_DIRTY_HW set, we move that bit to _PAGE_DIRTY_SW
 > > so
-> > +	 * that the PTE is not a valid Shadow Stack PTE.
-> > +	 */
+> > +	A * that the PTE is not a valid Shadow Stack PTE.
+> > +	A */
 > > +	pte = pte_move_flags(pte, _PAGE_DIRTY_HW,
 > > _PAGE_DIRTY_SW);
 > > +	set_pte_at(mm, addr, ptep, pte);
-> >  }
-> >  
-> >  #define flush_tlb_fix_spurious_fault(vma, address) do { } while
+> > A }
+> > A 
+> > A #define flush_tlb_fix_spurious_fault(vma, address) do { } while
 > > (0)
 > > @@ -1266,7 +1287,28 @@ static inline pud_t
 > > pudp_huge_get_and_clear(struct mm_struct *mm,
-> >  static inline void pmdp_set_wrprotect(struct mm_struct *mm,
-> >  				      unsigned long addr, pmd_t
+> > A static inline void pmdp_set_wrprotect(struct mm_struct *mm,
+> > A 				A A A A A A unsigned long addr, pmd_t
 > > *pmdp)
-> >  {
+> > A {
 > > +	pmd_t pmd;
 > > +
-> >  	clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp);
+> > A 	clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp);
 > > +	pmd = *pmdp;
 > > +
 > > +	/*
-> > +	 * Some processors can start a write, but ending up
+> > +	A * Some processors can start a write, but ending up
 > > seeing
-> 	                                      but end up seeing
+> 	A A A A A A A A A A A A A A A A A A A A A A A A A A A A A A A A A A A A A A but end up seeing
 > 
 > > 
-> > +	 * a read-only PTE by the time they get to the Dirty bit.
-> > +	 * In this case, they will set the Dirty bit, leaving a
-> > +	 * read-only, Dirty PTE which looks like a Shadow Stack
+> > +	A * a read-only PTE by the time they get to the Dirty bit.
+> > +	A * In this case, they will set the Dirty bit, leaving a
+> > +	A * read-only, Dirty PTE which looks like a Shadow Stack
 > > PTE.
-> > +	 *
-> > +	 * However, this behavior has been improved and will not
+> > +	A *
+> > +	A * However, this behavior has been improved and will not
 > > occur
-> > +	 * on processors supporting Shadow Stacks.  Without this
-> > +	 * guarantee, a transition to a non-present PTE and flush
+> > +	A * on processors supporting Shadow Stacks.A A Without this
+> > +	A * guarantee, a transition to a non-present PTE and flush
 > > the
-> > +	 * TLB would be needed.
-> > +	 *
-> > +	 * When change a writable PTE to read-only and if the PTE
+> > +	A * TLB would be needed.
+> > +	A *
+> > +	A * When change a writable PTE to read-only and if the PTE
 > > has
-> 	        changing
+> 	A A A A A A A A changing
 > 
 > > 
-> > +	 * _PAGE_DIRTY_HW set, we move that bit to _PAGE_DIRTY_SW
+> > +	A * _PAGE_DIRTY_HW set, we move that bit to _PAGE_DIRTY_SW
 > > so
-> > +	 * that the PTE is not a valid Shadow Stack PTE.
-> > +	 */
+> > +	A * that the PTE is not a valid Shadow Stack PTE.
+> > +	A */
 > > +	pmd = pmd_move_flags(pmd, _PAGE_DIRTY_HW,
 > > _PAGE_DIRTY_SW);
 > > +	set_pmd_at(mm, addr, pmdp, pmd);
-> >  }
-> >  
-> >  #define pud_write pud_write
+> > A }
+> > A 
+> > A #define pud_write pud_write
 > > 
 > 
 
diff --git a/a/content_digest b/N2/content_digest
index bc0cbd5..74a1a7e 100644
--- a/a/content_digest
+++ b/N2/content_digest
@@ -56,16 +56,16 @@
   "> On 08/30/2018 07:38 AM, Yu-cheng Yu wrote:\n",
   "> > \n",
   "> > When Shadow Stack is enabled, the read-only and PAGE_DIRTY_HW PTE\n",
-  "> > setting is reserved only for the Shadow Stack.\302\240\302\240To track dirty of\n",
+  "> > setting is reserved only for the Shadow Stack.A A To track dirty of\n",
   "> > non-Shadow Stack read-only PTEs, we use PAGE_DIRTY_SW.\n",
   "> > \n",
   "> > Update ptep_set_wrprotect() and pmdp_set_wrprotect().\n",
   "> > \n",
   "> > Signed-off-by: Yu-cheng Yu <yu-cheng.yu\@intel.com>\n",
   "> > ---\n",
-  "> > \302\240arch/x86/include/asm/pgtable.h | 42\n",
+  "> > A arch/x86/include/asm/pgtable.h | 42\n",
   "> > ++++++++++++++++++++++++++++++++++\n",
-  "> > \302\2401 file changed, 42 insertions(+)\n",
+  "> > A 1 file changed, 42 insertions(+)\n",
   "> > \n",
   "> > diff --git a/arch/x86/include/asm/pgtable.h\n",
   "> > b/arch/x86/include/asm/pgtable.h\n",
@@ -74,97 +74,97 @@
   "> > +++ b/arch/x86/include/asm/pgtable.h\n",
   "> > \@\@ -1203,7 +1203,28 \@\@ static inline pte_t\n",
   "> > ptep_get_and_clear_full(struct mm_struct *mm,\n",
-  "> > \302\240static inline void ptep_set_wrprotect(struct mm_struct *mm,\n",
-  "> > \302\240\t\t\t\t\302\240\302\240\302\240\302\240\302\240\302\240unsigned long addr, pte_t\n",
+  "> > A static inline void ptep_set_wrprotect(struct mm_struct *mm,\n",
+  "> > A \t\t\t\tA A A A A A unsigned long addr, pte_t\n",
   "> > *ptep)\n",
-  "> > \302\240{\n",
+  "> > A {\n",
   "> > +\tpte_t pte;\n",
   "> > +\n",
-  "> > \302\240\tclear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);\n",
+  "> > A \tclear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);\n",
   "> > +\tpte = *ptep;\n",
   "> > +\n",
   "> > +\t/*\n",
-  "> > +\t\302\240* Some processors can start a write, but ending up\n",
+  "> > +\tA * Some processors can start a write, but ending up\n",
   "> > seeing\n",
-  "> \t\302\240\302\240\302\240\302\240\302\240\302\240\302\240\302\240\302\240\302\240\302\240\302\240\302\240\302\240\302\240\302\240\302\240\302\240\302\240\302\240\302\240\302\240\302\240\302\240\302\240\302\240\302\240\302\240\302\240\302\240\302\240\302\240\302\240\302\240\302\240\302\240\302\240\302\240but end up seeing\n",
+  "> \tA A A A A A A A A A A A A A A A A A A A A A A A A A A A A A A A A A A A A A but end up seeing\n",
   "> \n",
   "> > \n",
-  "> > +\t\302\240* a read-only PTE by the time they get to the Dirty bit.\n",
-  "> > +\t\302\240* In this case, they will set the Dirty bit, leaving a\n",
-  "> > +\t\302\240* read-only, Dirty PTE which looks like a Shadow Stack\n",
+  "> > +\tA * a read-only PTE by the time they get to the Dirty bit.\n",
+  "> > +\tA * In this case, they will set the Dirty bit, leaving a\n",
+  "> > +\tA * read-only, Dirty PTE which looks like a Shadow Stack\n",
   "> > PTE.\n",
-  "> > +\t\302\240*\n",
-  "> > +\t\302\240* However, this behavior has been improved and will not\n",
+  "> > +\tA *\n",
+  "> > +\tA * However, this behavior has been improved and will not\n",
   "> > occur\n",
-  "> > +\t\302\240* on processors supporting Shadow Stacks.\302\240\302\240Without this\n",
-  "> > +\t\302\240* guarantee, a transition to a non-present PTE and flush\n",
+  "> > +\tA * on processors supporting Shadow Stacks.A A Without this\n",
+  "> > +\tA * guarantee, a transition to a non-present PTE and flush\n",
   "> > the\n",
-  "> > +\t\302\240* TLB would be needed.\n",
-  "> > +\t\302\240*\n",
-  "> > +\t\302\240* When change a writable PTE to read-only and if the PTE\n",
+  "> > +\tA * TLB would be needed.\n",
+  "> > +\tA *\n",
+  "> > +\tA * When change a writable PTE to read-only and if the PTE\n",
   "> > has\n",
-  "> \t\302\240\302\240\302\240\302\240\302\240\302\240\302\240\302\240changing\n",
+  "> \tA A A A A A A A changing\n",
   "> \n",
   "> > \n",
-  "> > +\t\302\240* _PAGE_DIRTY_HW set, we move that bit to _PAGE_DIRTY_SW\n",
+  "> > +\tA * _PAGE_DIRTY_HW set, we move that bit to _PAGE_DIRTY_SW\n",
   "> > so\n",
-  "> > +\t\302\240* that the PTE is not a valid Shadow Stack PTE.\n",
-  "> > +\t\302\240*/\n",
+  "> > +\tA * that the PTE is not a valid Shadow Stack PTE.\n",
+  "> > +\tA */\n",
   "> > +\tpte = pte_move_flags(pte, _PAGE_DIRTY_HW,\n",
   "> > _PAGE_DIRTY_SW);\n",
   "> > +\tset_pte_at(mm, addr, ptep, pte);\n",
-  "> > \302\240}\n",
-  "> > \302\240\n",
-  "> > \302\240#define flush_tlb_fix_spurious_fault(vma, address) do { } while\n",
+  "> > A }\n",
+  "> > A \n",
+  "> > A #define flush_tlb_fix_spurious_fault(vma, address) do { } while\n",
   "> > (0)\n",
   "> > \@\@ -1266,7 +1287,28 \@\@ static inline pud_t\n",
   "> > pudp_huge_get_and_clear(struct mm_struct *mm,\n",
-  "> > \302\240static inline void pmdp_set_wrprotect(struct mm_struct *mm,\n",
-  "> > \302\240\t\t\t\t\302\240\302\240\302\240\302\240\302\240\302\240unsigned long addr, pmd_t\n",
+  "> > A static inline void pmdp_set_wrprotect(struct mm_struct *mm,\n",
+  "> > A \t\t\t\tA A A A A A unsigned long addr, pmd_t\n",
   "> > *pmdp)\n",
-  "> > \302\240{\n",
+  "> > A {\n",
   "> > +\tpmd_t pmd;\n",
   "> > +\n",
-  "> > \302\240\tclear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp);\n",
+  "> > A \tclear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp);\n",
   "> > +\tpmd = *pmdp;\n",
   "> > +\n",
   "> > +\t/*\n",
-  "> > +\t\302\240* Some processors can start a write, but ending up\n",
+  "> > +\tA * Some processors can start a write, but ending up\n",
   "> > seeing\n",
-  "> \t\302\240\302\240\302\240\302\240\302\240\302\240\302\240\302\240\302\240\302\240\302\240\302\240\302\240\302\240\302\240\302\240\302\240\302\240\302\240\302\240\302\240\302\240\302\240\302\240\302\240\302\240\302\240\302\240\302\240\302\240\302\240\302\240\302\240\302\240\302\240\302\240\302\240\302\240but end up seeing\n",
+  "> \tA A A A A A A A A A A A A A A A A A A A A A A A A A A A A A A A A A A A A A but end up seeing\n",
   "> \n",
   "> > \n",
-  "> > +\t\302\240* a read-only PTE by the time they get to the Dirty bit.\n",
-  "> > +\t\302\240* In this case, they will set the Dirty bit, leaving a\n",
-  "> > +\t\302\240* read-only, Dirty PTE which looks like a Shadow Stack\n",
+  "> > +\tA * a read-only PTE by the time they get to the Dirty bit.\n",
+  "> > +\tA * In this case, they will set the Dirty bit, leaving a\n",
+  "> > +\tA * read-only, Dirty PTE which looks like a Shadow Stack\n",
   "> > PTE.\n",
-  "> > +\t\302\240*\n",
-  "> > +\t\302\240* However, this behavior has been improved and will not\n",
+  "> > +\tA *\n",
+  "> > +\tA * However, this behavior has been improved and will not\n",
   "> > occur\n",
-  "> > +\t\302\240* on processors supporting Shadow Stacks.\302\240\302\240Without this\n",
-  "> > +\t\302\240* guarantee, a transition to a non-present PTE and flush\n",
+  "> > +\tA * on processors supporting Shadow Stacks.A A Without this\n",
+  "> > +\tA * guarantee, a transition to a non-present PTE and flush\n",
   "> > the\n",
-  "> > +\t\302\240* TLB would be needed.\n",
-  "> > +\t\302\240*\n",
-  "> > +\t\302\240* When change a writable PTE to read-only and if the PTE\n",
+  "> > +\tA * TLB would be needed.\n",
+  "> > +\tA *\n",
+  "> > +\tA * When change a writable PTE to read-only and if the PTE\n",
   "> > has\n",
-  "> \t\302\240\302\240\302\240\302\240\302\240\302\240\302\240\302\240changing\n",
+  "> \tA A A A A A A A changing\n",
   "> \n",
   "> > \n",
-  "> > +\t\302\240* _PAGE_DIRTY_HW set, we move that bit to _PAGE_DIRTY_SW\n",
+  "> > +\tA * _PAGE_DIRTY_HW set, we move that bit to _PAGE_DIRTY_SW\n",
   "> > so\n",
-  "> > +\t\302\240* that the PTE is not a valid Shadow Stack PTE.\n",
-  "> > +\t\302\240*/\n",
+  "> > +\tA * that the PTE is not a valid Shadow Stack PTE.\n",
+  "> > +\tA */\n",
   "> > +\tpmd = pmd_move_flags(pmd, _PAGE_DIRTY_HW,\n",
   "> > _PAGE_DIRTY_SW);\n",
   "> > +\tset_pmd_at(mm, addr, pmdp, pmd);\n",
-  "> > \302\240}\n",
-  "> > \302\240\n",
-  "> > \302\240#define pud_write pud_write\n",
+  "> > A }\n",
+  "> > A \n",
+  "> > A #define pud_write pud_write\n",
   "> > \n",
   "> \n",
   "\n",
   "Thanks, I will fix it!"
 ]
 
-b7ecbbacc58b8731cc98357d98def28e30252495c2c465e5cc577352c0ac72e9
+b99b84be63c5378e76ba84aeefb6f1e227280f92337460a062caf5635e550ca8

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.