* [PATCH] mm: Fix modifying of page protection by insert_pfn_pmd()
@ 2019-03-30 5:41 ` Aneesh Kumar K.V
0 siblings, 0 replies; 6+ messages in thread
From: Aneesh Kumar K.V @ 2019-03-30 5:41 UTC (permalink / raw)
To: dan.j.williams, akpm, Jan Kara
Cc: linux-nvdimm, linux-mm, linuxppc-dev, Aneesh Kumar K.V, stable
With some architectures like ppc64, set_pmd_at() cannot cope with
a situation where there is already some (different) valid entry present.
Use pmdp_set_access_flags() instead to modify the pfn which is built to
deal with modifying existing PMD entries.
This is similar to
commit cae85cb8add3 ("mm/memory.c: fix modifying of page protection by insert_pfn()")
We also do similar update w.r.t insert_pfn_pud eventhough ppc64 don't support
pud pfn entries now.
CC: stable@vger.kernel.org
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
---
mm/huge_memory.c | 31 +++++++++++++++++++++++++++++++
1 file changed, 31 insertions(+)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 404acdcd0455..f7dca413c4b2 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -755,6 +755,20 @@ static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
spinlock_t *ptl;
ptl = pmd_lock(mm, pmd);
+ if (!pmd_none(*pmd)) {
+ if (write) {
+ if (pmd_pfn(*pmd) != pfn_t_to_pfn(pfn)) {
+ WARN_ON_ONCE(!is_huge_zero_pmd(*pmd));
+ goto out_unlock;
+ }
+ entry = pmd_mkyoung(*pmd);
+ entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
+ if (pmdp_set_access_flags(vma, addr, pmd, entry, 1))
+ update_mmu_cache_pmd(vma, addr, pmd);
+ }
+ goto out_unlock;
+ }
+
entry = pmd_mkhuge(pfn_t_pmd(pfn, prot));
if (pfn_t_devmap(pfn))
entry = pmd_mkdevmap(entry);
@@ -770,6 +784,7 @@ static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
set_pmd_at(mm, addr, pmd, entry);
update_mmu_cache_pmd(vma, addr, pmd);
+out_unlock:
spin_unlock(ptl);
}
@@ -821,6 +836,20 @@ static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
spinlock_t *ptl;
ptl = pud_lock(mm, pud);
+ if (!pud_none(*pud)) {
+ if (write) {
+ if (pud_pfn(*pud) != pfn_t_to_pfn(pfn)) {
+ WARN_ON_ONCE(!is_huge_zero_pud(*pud));
+ goto out_unlock;
+ }
+ entry = pud_mkyoung(*pud);
+ entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma);
+ if (pudp_set_access_flags(vma, addr, pud, entry, 1))
+ update_mmu_cache_pud(vma, addr, pud);
+ }
+ goto out_unlock;
+ }
+
entry = pud_mkhuge(pfn_t_pud(pfn, prot));
if (pfn_t_devmap(pfn))
entry = pud_mkdevmap(entry);
@@ -830,6 +859,8 @@ static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
}
set_pud_at(mm, addr, pud, entry);
update_mmu_cache_pud(vma, addr, pud);
+
+out_unlock:
spin_unlock(ptl);
}
--
2.20.1
^ permalink raw reply related [flat|nested] 6+ messages in thread
* [PATCH] mm: Fix modifying of page protection by insert_pfn_pmd()
@ 2019-03-30 5:41 ` Aneesh Kumar K.V
0 siblings, 0 replies; 6+ messages in thread
From: Aneesh Kumar K.V @ 2019-03-30 5:41 UTC (permalink / raw)
To: dan.j.williams, akpm, Jan Kara
Cc: linux-mm, linuxppc-dev, Aneesh Kumar K.V, stable, linux-nvdimm
With some architectures like ppc64, set_pmd_at() cannot cope with
a situation where there is already some (different) valid entry present.
Use pmdp_set_access_flags() instead to modify the pfn which is built to
deal with modifying existing PMD entries.
This is similar to
commit cae85cb8add3 ("mm/memory.c: fix modifying of page protection by insert_pfn()")
We also do similar update w.r.t insert_pfn_pud eventhough ppc64 don't support
pud pfn entries now.
CC: stable@vger.kernel.org
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
---
mm/huge_memory.c | 31 +++++++++++++++++++++++++++++++
1 file changed, 31 insertions(+)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 404acdcd0455..f7dca413c4b2 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -755,6 +755,20 @@ static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
spinlock_t *ptl;
ptl = pmd_lock(mm, pmd);
+ if (!pmd_none(*pmd)) {
+ if (write) {
+ if (pmd_pfn(*pmd) != pfn_t_to_pfn(pfn)) {
+ WARN_ON_ONCE(!is_huge_zero_pmd(*pmd));
+ goto out_unlock;
+ }
+ entry = pmd_mkyoung(*pmd);
+ entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
+ if (pmdp_set_access_flags(vma, addr, pmd, entry, 1))
+ update_mmu_cache_pmd(vma, addr, pmd);
+ }
+ goto out_unlock;
+ }
+
entry = pmd_mkhuge(pfn_t_pmd(pfn, prot));
if (pfn_t_devmap(pfn))
entry = pmd_mkdevmap(entry);
@@ -770,6 +784,7 @@ static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
set_pmd_at(mm, addr, pmd, entry);
update_mmu_cache_pmd(vma, addr, pmd);
+out_unlock:
spin_unlock(ptl);
}
@@ -821,6 +836,20 @@ static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
spinlock_t *ptl;
ptl = pud_lock(mm, pud);
+ if (!pud_none(*pud)) {
+ if (write) {
+ if (pud_pfn(*pud) != pfn_t_to_pfn(pfn)) {
+ WARN_ON_ONCE(!is_huge_zero_pud(*pud));
+ goto out_unlock;
+ }
+ entry = pud_mkyoung(*pud);
+ entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma);
+ if (pudp_set_access_flags(vma, addr, pud, entry, 1))
+ update_mmu_cache_pud(vma, addr, pud);
+ }
+ goto out_unlock;
+ }
+
entry = pud_mkhuge(pfn_t_pud(pfn, prot));
if (pfn_t_devmap(pfn))
entry = pud_mkdevmap(entry);
@@ -830,6 +859,8 @@ static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
}
set_pud_at(mm, addr, pud, entry);
update_mmu_cache_pud(vma, addr, pud);
+
+out_unlock:
spin_unlock(ptl);
}
--
2.20.1
^ permalink raw reply related [flat|nested] 6+ messages in thread
* Re: [PATCH] mm: Fix modifying of page protection by insert_pfn_pmd()
2019-03-30 5:41 ` Aneesh Kumar K.V
@ 2019-04-01 8:14 ` Jan Kara
-1 siblings, 0 replies; 6+ messages in thread
From: Jan Kara @ 2019-04-01 8:14 UTC (permalink / raw)
To: Aneesh Kumar K.V
Cc: dan.j.williams, akpm, Jan Kara, linux-nvdimm, linux-mm,
linuxppc-dev, stable
On Sat 30-03-19 11:11:21, Aneesh Kumar K.V wrote:
> With some architectures like ppc64, set_pmd_at() cannot cope with
> a situation where there is already some (different) valid entry present.
>
> Use pmdp_set_access_flags() instead to modify the pfn which is built to
> deal with modifying existing PMD entries.
>
> This is similar to
> commit cae85cb8add3 ("mm/memory.c: fix modifying of page protection by insert_pfn()")
>
> We also do similar update w.r.t insert_pfn_pud eventhough ppc64 don't support
> pud pfn entries now.
>
> CC: stable@vger.kernel.org
> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Thanks for fixing this! The patch looks good to me. Feel free to add:
Reviewed-by: Jan Kara <jack@suse.cz>
Honza
> ---
> mm/huge_memory.c | 31 +++++++++++++++++++++++++++++++
> 1 file changed, 31 insertions(+)
>
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index 404acdcd0455..f7dca413c4b2 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -755,6 +755,20 @@ static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
> spinlock_t *ptl;
>
> ptl = pmd_lock(mm, pmd);
> + if (!pmd_none(*pmd)) {
> + if (write) {
> + if (pmd_pfn(*pmd) != pfn_t_to_pfn(pfn)) {
> + WARN_ON_ONCE(!is_huge_zero_pmd(*pmd));
> + goto out_unlock;
> + }
> + entry = pmd_mkyoung(*pmd);
> + entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
> + if (pmdp_set_access_flags(vma, addr, pmd, entry, 1))
> + update_mmu_cache_pmd(vma, addr, pmd);
> + }
> + goto out_unlock;
> + }
> +
> entry = pmd_mkhuge(pfn_t_pmd(pfn, prot));
> if (pfn_t_devmap(pfn))
> entry = pmd_mkdevmap(entry);
> @@ -770,6 +784,7 @@ static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
>
> set_pmd_at(mm, addr, pmd, entry);
> update_mmu_cache_pmd(vma, addr, pmd);
> +out_unlock:
> spin_unlock(ptl);
> }
>
> @@ -821,6 +836,20 @@ static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
> spinlock_t *ptl;
>
> ptl = pud_lock(mm, pud);
> + if (!pud_none(*pud)) {
> + if (write) {
> + if (pud_pfn(*pud) != pfn_t_to_pfn(pfn)) {
> + WARN_ON_ONCE(!is_huge_zero_pud(*pud));
> + goto out_unlock;
> + }
> + entry = pud_mkyoung(*pud);
> + entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma);
> + if (pudp_set_access_flags(vma, addr, pud, entry, 1))
> + update_mmu_cache_pud(vma, addr, pud);
> + }
> + goto out_unlock;
> + }
> +
> entry = pud_mkhuge(pfn_t_pud(pfn, prot));
> if (pfn_t_devmap(pfn))
> entry = pud_mkdevmap(entry);
> @@ -830,6 +859,8 @@ static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
> }
> set_pud_at(mm, addr, pud, entry);
> update_mmu_cache_pud(vma, addr, pud);
> +
> +out_unlock:
> spin_unlock(ptl);
> }
>
> --
> 2.20.1
>
--
Jan Kara <jack@suse.com>
SUSE Labs, CR
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH] mm: Fix modifying of page protection by insert_pfn_pmd()
@ 2019-04-01 8:14 ` Jan Kara
0 siblings, 0 replies; 6+ messages in thread
From: Jan Kara @ 2019-04-01 8:14 UTC (permalink / raw)
To: Aneesh Kumar K.V
Cc: Jan Kara, linux-nvdimm, stable, linux-mm, dan.j.williams,
linuxppc-dev, akpm
On Sat 30-03-19 11:11:21, Aneesh Kumar K.V wrote:
> With some architectures like ppc64, set_pmd_at() cannot cope with
> a situation where there is already some (different) valid entry present.
>
> Use pmdp_set_access_flags() instead to modify the pfn which is built to
> deal with modifying existing PMD entries.
>
> This is similar to
> commit cae85cb8add3 ("mm/memory.c: fix modifying of page protection by insert_pfn()")
>
> We also do similar update w.r.t insert_pfn_pud eventhough ppc64 don't support
> pud pfn entries now.
>
> CC: stable@vger.kernel.org
> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Thanks for fixing this! The patch looks good to me. Feel free to add:
Reviewed-by: Jan Kara <jack@suse.cz>
Honza
> ---
> mm/huge_memory.c | 31 +++++++++++++++++++++++++++++++
> 1 file changed, 31 insertions(+)
>
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index 404acdcd0455..f7dca413c4b2 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -755,6 +755,20 @@ static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
> spinlock_t *ptl;
>
> ptl = pmd_lock(mm, pmd);
> + if (!pmd_none(*pmd)) {
> + if (write) {
> + if (pmd_pfn(*pmd) != pfn_t_to_pfn(pfn)) {
> + WARN_ON_ONCE(!is_huge_zero_pmd(*pmd));
> + goto out_unlock;
> + }
> + entry = pmd_mkyoung(*pmd);
> + entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
> + if (pmdp_set_access_flags(vma, addr, pmd, entry, 1))
> + update_mmu_cache_pmd(vma, addr, pmd);
> + }
> + goto out_unlock;
> + }
> +
> entry = pmd_mkhuge(pfn_t_pmd(pfn, prot));
> if (pfn_t_devmap(pfn))
> entry = pmd_mkdevmap(entry);
> @@ -770,6 +784,7 @@ static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
>
> set_pmd_at(mm, addr, pmd, entry);
> update_mmu_cache_pmd(vma, addr, pmd);
> +out_unlock:
> spin_unlock(ptl);
> }
>
> @@ -821,6 +836,20 @@ static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
> spinlock_t *ptl;
>
> ptl = pud_lock(mm, pud);
> + if (!pud_none(*pud)) {
> + if (write) {
> + if (pud_pfn(*pud) != pfn_t_to_pfn(pfn)) {
> + WARN_ON_ONCE(!is_huge_zero_pud(*pud));
> + goto out_unlock;
> + }
> + entry = pud_mkyoung(*pud);
> + entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma);
> + if (pudp_set_access_flags(vma, addr, pud, entry, 1))
> + update_mmu_cache_pud(vma, addr, pud);
> + }
> + goto out_unlock;
> + }
> +
> entry = pud_mkhuge(pfn_t_pud(pfn, prot));
> if (pfn_t_devmap(pfn))
> entry = pud_mkdevmap(entry);
> @@ -830,6 +859,8 @@ static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
> }
> set_pud_at(mm, addr, pud, entry);
> update_mmu_cache_pud(vma, addr, pud);
> +
> +out_unlock:
> spin_unlock(ptl);
> }
>
> --
> 2.20.1
>
--
Jan Kara <jack@suse.com>
SUSE Labs, CR
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH] mm: Fix modifying of page protection by insert_pfn_pmd()
2019-03-30 5:41 ` Aneesh Kumar K.V
@ 2019-04-01 10:49 ` Sasha Levin
-1 siblings, 0 replies; 6+ messages in thread
From: Sasha Levin @ 2019-04-01 10:49 UTC (permalink / raw)
To: Sasha Levin, Aneesh Kumar K.V,
dan.j.williams-ral2JQCrhuEAvxtiuMwx3w,
akpm-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b
Cc: linux-mm-Bw31MaZKKs3YtjvyW6yDsg, stable-u79uwXL29TY76Z2rM5mHXA,
linux-nvdimm-hn68Rpc1hR1g9hUCZPvPmw
Hi,
[This is an automated email]
This commit has been processed because it contains a -stable tag.
The stable tag indicates that it's relevant for the following trees: all
The bot has tested the following trees: v5.0.5, v4.19.32, v4.14.109, v4.9.166, v4.4.177, v3.18.137.
v5.0.5: Build OK!
v4.19.32: Build OK!
v4.14.109: Build OK!
v4.9.166: Failed to apply! Possible dependencies:
82b0f8c39a38 ("mm: join struct fault_env and vm_fault")
953c66c2b22a ("mm: THP page cache support for ppc64")
a00cc7d9dd93 ("mm, x86: add support for PUD-sized transparent hugepages")
b5bc66b71310 ("mm: update mmu_gather range correctly")
fd60775aea80 ("mm, thp: avoid unlikely branches for split_huge_pmd")
v4.4.177: Failed to apply! Possible dependencies:
01871e59af5c ("mm, dax: fix livelock, allow dax pmd mappings to become writeable")
01c8f1c44b83 ("mm, dax, gpu: convert vm_insert_mixed to pfn_t")
0e749e54244e ("dax: increase granularity of dax_clear_blocks() operations")
34c0fd540e79 ("mm, dax, pmem: introduce pfn_t")
52db400fcd50 ("pmem, dax: clean up clear_pmem()")
6077776b5908 ("bpf: split HAVE_BPF_JIT into cBPF and eBPF variant")
a00cc7d9dd93 ("mm, x86: add support for PUD-sized transparent hugepages")
b2e0d1625e19 ("dax: fix lifetime of in-kernel dax mappings with dax_map_atomic()")
b329f95d70f3 ("ARM: 8479/2: add implementation for arm-smccc")
e37e43a497d5 ("x86/mm/64: Enable vmapped stacks (CONFIG_HAVE_ARCH_VMAP_STACK=y)")
f25748e3c34e ("mm, dax: convert vmf_insert_pfn_pmd() to pfn_t")
v3.18.137: Failed to apply! Possible dependencies:
047fc8a1f9a6 ("libnvdimm, nfit, nd_blk: driver for BLK-mode access persistent memory")
2a3746984c98 ("x86: Use new cache mode type in track_pfn_remap() and track_pfn_insert()")
34c0fd540e79 ("mm, dax, pmem: introduce pfn_t")
4c1eaa2344fb ("drivers/block/pmem: Fix 32-bit build warning in pmem_alloc()")
5cad465d7fa6 ("mm: add vmf_insert_pfn_pmd()")
61031952f4c8 ("arch, x86: pmem api for ensuring durability of persistent memory updates")
62232e45f4a2 ("libnvdimm: control (ioctl) messages for nvdimm_bus and nvdimm devices")
777783e0abae ("staging: android: binder: move to the "real" part of the kernel")
957e3facd147 ("gcov: enable GCOV_PROFILE_ALL from ARCH Kconfigs")
9e853f2313e5 ("drivers/block/pmem: Add a driver for persistent memory")
9f53f9fa4ad1 ("libnvdimm, pmem: add libnvdimm support to the pmem driver")
b94d5230d06e ("libnvdimm, nfit: initial libnvdimm infrastructure and NFIT support")
cb389b9c0e00 ("dax: drop size parameter to ->direct_access()")
dd22f551ac0a ("block: Change direct_access calling convention")
e2e05394e4a3 ("pmem, dax: have direct_access use __pmem annotation")
ec776ef6bbe1 ("x86/mm: Add support for the non-standard protected e820 type")
f0dc089ce217 ("libnvdimm: enable iostat")
f25748e3c34e ("mm, dax: convert vmf_insert_pfn_pmd() to pfn_t")
How should we proceed with this patch?
--
Thanks,
Sasha
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH] mm: Fix modifying of page protection by insert_pfn_pmd()
@ 2019-04-01 10:49 ` Sasha Levin
0 siblings, 0 replies; 6+ messages in thread
From: Sasha Levin @ 2019-04-01 10:49 UTC (permalink / raw)
To: Sasha Levin, Aneesh Kumar K.V, dan.j.williams, akpm
Cc: linux-nvdimm, linux-mm, , stable, stable
Hi,
[This is an automated email]
This commit has been processed because it contains a -stable tag.
The stable tag indicates that it's relevant for the following trees: all
The bot has tested the following trees: v5.0.5, v4.19.32, v4.14.109, v4.9.166, v4.4.177, v3.18.137.
v5.0.5: Build OK!
v4.19.32: Build OK!
v4.14.109: Build OK!
v4.9.166: Failed to apply! Possible dependencies:
82b0f8c39a38 ("mm: join struct fault_env and vm_fault")
953c66c2b22a ("mm: THP page cache support for ppc64")
a00cc7d9dd93 ("mm, x86: add support for PUD-sized transparent hugepages")
b5bc66b71310 ("mm: update mmu_gather range correctly")
fd60775aea80 ("mm, thp: avoid unlikely branches for split_huge_pmd")
v4.4.177: Failed to apply! Possible dependencies:
01871e59af5c ("mm, dax: fix livelock, allow dax pmd mappings to become writeable")
01c8f1c44b83 ("mm, dax, gpu: convert vm_insert_mixed to pfn_t")
0e749e54244e ("dax: increase granularity of dax_clear_blocks() operations")
34c0fd540e79 ("mm, dax, pmem: introduce pfn_t")
52db400fcd50 ("pmem, dax: clean up clear_pmem()")
6077776b5908 ("bpf: split HAVE_BPF_JIT into cBPF and eBPF variant")
a00cc7d9dd93 ("mm, x86: add support for PUD-sized transparent hugepages")
b2e0d1625e19 ("dax: fix lifetime of in-kernel dax mappings with dax_map_atomic()")
b329f95d70f3 ("ARM: 8479/2: add implementation for arm-smccc")
e37e43a497d5 ("x86/mm/64: Enable vmapped stacks (CONFIG_HAVE_ARCH_VMAP_STACK=y)")
f25748e3c34e ("mm, dax: convert vmf_insert_pfn_pmd() to pfn_t")
v3.18.137: Failed to apply! Possible dependencies:
047fc8a1f9a6 ("libnvdimm, nfit, nd_blk: driver for BLK-mode access persistent memory")
2a3746984c98 ("x86: Use new cache mode type in track_pfn_remap() and track_pfn_insert()")
34c0fd540e79 ("mm, dax, pmem: introduce pfn_t")
4c1eaa2344fb ("drivers/block/pmem: Fix 32-bit build warning in pmem_alloc()")
5cad465d7fa6 ("mm: add vmf_insert_pfn_pmd()")
61031952f4c8 ("arch, x86: pmem api for ensuring durability of persistent memory updates")
62232e45f4a2 ("libnvdimm: control (ioctl) messages for nvdimm_bus and nvdimm devices")
777783e0abae ("staging: android: binder: move to the "real" part of the kernel")
957e3facd147 ("gcov: enable GCOV_PROFILE_ALL from ARCH Kconfigs")
9e853f2313e5 ("drivers/block/pmem: Add a driver for persistent memory")
9f53f9fa4ad1 ("libnvdimm, pmem: add libnvdimm support to the pmem driver")
b94d5230d06e ("libnvdimm, nfit: initial libnvdimm infrastructure and NFIT support")
cb389b9c0e00 ("dax: drop size parameter to ->direct_access()")
dd22f551ac0a ("block: Change direct_access calling convention")
e2e05394e4a3 ("pmem, dax: have direct_access use __pmem annotation")
ec776ef6bbe1 ("x86/mm: Add support for the non-standard protected e820 type")
f0dc089ce217 ("libnvdimm: enable iostat")
f25748e3c34e ("mm, dax: convert vmf_insert_pfn_pmd() to pfn_t")
How should we proceed with this patch?
--
Thanks,
Sasha
^ permalink raw reply [flat|nested] 6+ messages in thread
end of thread, other threads:[~2019-04-01 10:49 UTC | newest]
Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-03-30 5:41 [PATCH] mm: Fix modifying of page protection by insert_pfn_pmd() Aneesh Kumar K.V
2019-03-30 5:41 ` Aneesh Kumar K.V
2019-04-01 8:14 ` Jan Kara
2019-04-01 8:14 ` Jan Kara
[not found] ` <20190330054121.27831-1-aneesh.kumar-tEXmvtCZX7AybS5Ee8rs3A@public.gmane.org>
2019-04-01 10:49 ` Sasha Levin
2019-04-01 10:49 ` Sasha Levin
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.