linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Stephen Rothwell <sfr@canb.auug.org.au>
To: Dan Williams <dan.j.williams@intel.com>,
	Al Viro <viro@ZenIV.linux.org.uk>
Cc: Linux-Next Mailing List <linux-next@vger.kernel.org>,
	Linux Kernel Mailing List <linux-kernel@vger.kernel.org>
Subject: linux-next: manual merge of the nvdimm tree with the vfs tree
Date: Mon, 3 Jul 2017 16:41:43 +1000	[thread overview]
Message-ID: <20170703164143.6376f166@canb.auug.org.au> (raw)

Hi Dan,

Today's linux-next merge of the nvdimm tree got conflicts in:

  include/linux/uio.h
  lib/iov_iter.c

between commit:

  aa28de275a24 ("iov_iter/hardening: move object size checks to inlined part")

from the vfs tree and commit:

  0aed55af8834 ("x86, uaccess: introduce copy_from_iter_flushcache for pmem / cache-bypass operations")

from the nvdimm tree.

I fixed it up (see below) and can carry the fix as necessary. This
is now fixed as far as linux-next is concerned, but any non trivial
conflicts should be mentioned to your upstream maintainer when your tree
is submitted for merging.  You may also want to consider cooperating
with the maintainer of the conflicting tree to minimise any particularly
complex conflicts.

-- 
Cheers,
Stephen Rothwell

diff --cc include/linux/uio.h
index 243e2362fe1a,55cd54a0e941..000000000000
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@@ -92,58 -91,26 +92,74 @@@ size_t copy_page_to_iter(struct page *p
  			 struct iov_iter *i);
  size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
  			 struct iov_iter *i);
 -size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
 -size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
 -bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i);
 -size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);
 +
 +size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
 +size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
 +bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i);
 +size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);
 +bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i);
 +
 +static __always_inline __must_check
 +size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
 +{
 +	if (unlikely(!check_copy_size(addr, bytes, true)))
 +		return bytes;
 +	else
 +		return _copy_to_iter(addr, bytes, i);
 +}
 +
 +static __always_inline __must_check
 +size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
 +{
 +	if (unlikely(!check_copy_size(addr, bytes, false)))
 +		return bytes;
 +	else
 +		return _copy_from_iter(addr, bytes, i);
 +}
 +
 +static __always_inline __must_check
 +bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
 +{
 +	if (unlikely(!check_copy_size(addr, bytes, false)))
 +		return false;
 +	else
 +		return _copy_from_iter_full(addr, bytes, i);
 +}
 +
 +static __always_inline __must_check
 +size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
 +{
 +	if (unlikely(!check_copy_size(addr, bytes, false)))
 +		return bytes;
 +	else
 +		return _copy_from_iter_nocache(addr, bytes, i);
 +}
 +
 +static __always_inline __must_check
 +bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
 +{
 +	if (unlikely(!check_copy_size(addr, bytes, false)))
 +		return false;
 +	else
 +		return _copy_from_iter_full_nocache(addr, bytes, i);
 +}
 +
+ #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
+ /*
+  * Note, users like pmem that depend on the stricter semantics of
+  * copy_from_iter_flushcache() than copy_from_iter_nocache() must check for
+  * IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) before assuming that the
+  * destination is flushed from the cache on return.
+  */
+ size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i);
+ #else
+ static inline size_t copy_from_iter_flushcache(void *addr, size_t bytes,
+ 				       struct iov_iter *i)
+ {
+ 	return copy_from_iter_nocache(addr, bytes, i);
+ }
+ #endif
 -bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i);
++
  size_t iov_iter_zero(size_t bytes, struct iov_iter *);
  unsigned long iov_iter_alignment(const struct iov_iter *i);
  unsigned long iov_iter_gap_alignment(const struct iov_iter *i);
diff --cc lib/iov_iter.c
index d48f0976b8b4,c9a69064462f..000000000000
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@@ -639,9 -613,31 +639,31 @@@ size_t _copy_from_iter_nocache(void *ad
  
  	return bytes;
  }
 -EXPORT_SYMBOL(copy_from_iter_nocache);
 +EXPORT_SYMBOL(_copy_from_iter_nocache);
  
+ #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
+ size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
+ {
+ 	char *to = addr;
+ 	if (unlikely(i->type & ITER_PIPE)) {
+ 		WARN_ON(1);
+ 		return 0;
+ 	}
+ 	iterate_and_advance(i, bytes, v,
+ 		__copy_from_user_flushcache((to += v.iov_len) - v.iov_len,
+ 					 v.iov_base, v.iov_len),
+ 		memcpy_page_flushcache((to += v.bv_len) - v.bv_len, v.bv_page,
+ 				 v.bv_offset, v.bv_len),
+ 		memcpy_flushcache((to += v.iov_len) - v.iov_len, v.iov_base,
+ 			v.iov_len)
+ 	)
+ 
+ 	return bytes;
+ }
+ EXPORT_SYMBOL_GPL(copy_from_iter_flushcache);
+ #endif
+ 
 -bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
 +bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
  {
  	char *to = addr;
  	if (unlikely(i->type & ITER_PIPE)) {

             reply	other threads:[~2017-07-03  6:41 UTC|newest]

Thread overview: 5+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-07-03  6:41 Stephen Rothwell [this message]
2020-09-24  6:38 linux-next: manual merge of the nvdimm tree with the vfs tree Stephen Rothwell
2020-09-24  6:45 Stephen Rothwell
2020-09-24 14:10 ` Dan Williams
2020-09-24 14:31   ` Dan Williams

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20170703164143.6376f166@canb.auug.org.au \
    --to=sfr@canb.auug.org.au \
    --cc=dan.j.williams@intel.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-next@vger.kernel.org \
    --cc=viro@ZenIV.linux.org.uk \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).