linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* linux-next: manual merge of the folio tree with the ext3 tree
@ 2021-07-21  5:02 Stephen Rothwell
  0 siblings, 0 replies; 2+ messages in thread
From: Stephen Rothwell @ 2021-07-21  5:02 UTC (permalink / raw)
  To: Matthew Wilcox, Jan Kara
  Cc: Linux Kernel Mailing List, Linux Next Mailing List

[-- Attachment #1: Type: text/plain, Size: 4860 bytes --]

Hi all,

Today's linux-next merge of the folio tree got a conflict in:

  mm/filemap.c

between commit:

  730633f0b7f9 ("mm: Protect operations adding pages to page cache with invalidate_lock")

from the ext3 tree and commit:

  e3700f8b6abe ("mm/filemap: Add __folio_lock_async()")

from the folio tree.

I fixed it up (I think - see below) and can carry the fix as necessary.
This is now fixed as far as linux-next is concerned, but any non trivial
conflicts should be mentioned to your upstream maintainer when your tree
is submitted for merging.  You may also want to consider cooperating
with the maintainer of the conflicting tree to minimise any particularly
complex conflicts.

-- 
Cheers,
Stephen Rothwell

diff --cc mm/filemap.c
index 0fad08331cf4,104b27c372bf..000000000000
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@@ -999,54 -997,16 +999,54 @@@ struct folio *filemap_alloc_folio(gfp_
  		do {
  			cpuset_mems_cookie = read_mems_allowed_begin();
  			n = cpuset_mem_spread_node();
- 			page = __alloc_pages_node(n, gfp, 0);
- 		} while (!page && read_mems_allowed_retry(cpuset_mems_cookie));
+ 			folio = __folio_alloc_node(gfp, order, n);
+ 		} while (!folio && read_mems_allowed_retry(cpuset_mems_cookie));
  
- 		return page;
+ 		return folio;
  	}
- 	return alloc_pages(gfp, 0);
+ 	return folio_alloc(gfp, order);
  }
- EXPORT_SYMBOL(__page_cache_alloc);
+ EXPORT_SYMBOL(filemap_alloc_folio);
  #endif
  
 +/*
 + * filemap_invalidate_lock_two - lock invalidate_lock for two mappings
 + *
 + * Lock exclusively invalidate_lock of any passed mapping that is not NULL.
 + *
 + * @mapping1: the first mapping to lock
 + * @mapping2: the second mapping to lock
 + */
 +void filemap_invalidate_lock_two(struct address_space *mapping1,
 +				 struct address_space *mapping2)
 +{
 +	if (mapping1 > mapping2)
 +		swap(mapping1, mapping2);
 +	if (mapping1)
 +		down_write(&mapping1->invalidate_lock);
 +	if (mapping2 && mapping1 != mapping2)
 +		down_write_nested(&mapping2->invalidate_lock, 1);
 +}
 +EXPORT_SYMBOL(filemap_invalidate_lock_two);
 +
 +/*
 + * filemap_invalidate_unlock_two - unlock invalidate_lock for two mappings
 + *
 + * Unlock exclusive invalidate_lock of any passed mapping that is not NULL.
 + *
 + * @mapping1: the first mapping to unlock
 + * @mapping2: the second mapping to unlock
 + */
 +void filemap_invalidate_unlock_two(struct address_space *mapping1,
 +				   struct address_space *mapping2)
 +{
 +	if (mapping1)
 +		up_write(&mapping1->invalidate_lock);
 +	if (mapping2 && mapping1 != mapping2)
 +		up_write(&mapping2->invalidate_lock);
 +}
 +EXPORT_SYMBOL(filemap_invalidate_unlock_two);
 +
  /*
   * In order to wait for pages to become available there must be
   * waitqueues associated with pages. By using a hash table of
@@@ -2406,49 -2362,42 +2402,50 @@@ static int filemap_update_page(struct k
  		struct address_space *mapping, struct iov_iter *iter,
  		struct page *page)
  {
+ 	struct folio *folio = page_folio(page);
  	int error;
  
 +	if (iocb->ki_flags & IOCB_NOWAIT) {
 +		if (!filemap_invalidate_trylock_shared(mapping))
 +			return -EAGAIN;
 +	} else {
 +		filemap_invalidate_lock_shared(mapping);
 +	}
 +
- 	if (!trylock_page(page)) {
+ 	if (!folio_trylock(folio)) {
 +		error = -EAGAIN;
  		if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_NOIO))
 -			return -EAGAIN;
 +			goto unlock_mapping;
  		if (!(iocb->ki_flags & IOCB_WAITQ)) {
 +			filemap_invalidate_unlock_shared(mapping);
- 			put_and_wait_on_page_locked(page, TASK_KILLABLE);
+ 			put_and_wait_on_page_locked(&folio->page, TASK_KILLABLE);
  			return AOP_TRUNCATED_PAGE;
  		}
- 		error = __lock_page_async(page, iocb->ki_waitq);
+ 		error = __folio_lock_async(folio, iocb->ki_waitq);
  		if (error)
 -			return error;
 +			goto unlock_mapping;
  	}
  
 +	error = AOP_TRUNCATED_PAGE;
- 	if (!page->mapping)
+ 	if (!folio->mapping)
 -		goto truncated;
 +		goto unlock;
  
  	error = 0;
- 	if (filemap_range_uptodate(mapping, iocb->ki_pos, iter, page))
+ 	if (filemap_range_uptodate(mapping, iocb->ki_pos, iter, &folio->page))
  		goto unlock;
  
  	error = -EAGAIN;
  	if (iocb->ki_flags & (IOCB_NOIO | IOCB_NOWAIT | IOCB_WAITQ))
  		goto unlock;
  
- 	error = filemap_read_page(iocb->ki_filp, mapping, page);
+ 	error = filemap_read_page(iocb->ki_filp, mapping, &folio->page);
 -	if (error == AOP_TRUNCATED_PAGE)
 -		folio_put(folio);
 -	return error;
 -truncated:
 -	folio_unlock(folio);
 -	folio_put(folio);
 -	return AOP_TRUNCATED_PAGE;
 +	goto unlock_mapping;
  unlock:
- 	unlock_page(page);
+ 	folio_unlock(folio);
 +unlock_mapping:
 +	filemap_invalidate_unlock_shared(mapping);
 +	if (error == AOP_TRUNCATED_PAGE)
- 		put_page(page);
++		folio_put(folio);
  	return error;
  }
  

[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 488 bytes --]

^ permalink raw reply	[flat|nested] 2+ messages in thread

* linux-next: manual merge of the folio tree with the ext3 tree
@ 2021-07-21  4:42 Stephen Rothwell
  0 siblings, 0 replies; 2+ messages in thread
From: Stephen Rothwell @ 2021-07-21  4:42 UTC (permalink / raw)
  To: Matthew Wilcox, Jan Kara
  Cc: Linux Kernel Mailing List, Linux Next Mailing List

[-- Attachment #1: Type: text/plain, Size: 3928 bytes --]

Hi all,

Today's linux-next merge of the folio tree got a conflict in:

  mm/rmap.c

between commits:

  9608703e488c ("mm: Fix comments mentioning i_mutex")
  730633f0b7f9 ("mm: Protect operations adding pages to page cache with invalidate_lock")

from the ext3 tree and commit:

  a8f645b17cd4 ("mm/memcg: Add folio_lruvec_lock() and similar functions")

from the folio tree.

I fixed it up (see below) and can carry the fix as necessary. This
is now fixed as far as linux-next is concerned, but any non trivial
conflicts should be mentioned to your upstream maintainer when your tree
is submitted for merging.  You may also want to consider cooperating
with the maintainer of the conflicting tree to minimise any particularly
complex conflicts.

-- 
Cheers,
Stephen Rothwell

diff --cc mm/rmap.c
index 2d29a57d29e8,b3aae8eeaeaf..000000000000
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@@ -20,29 -20,28 +20,29 @@@
  /*
   * Lock ordering in mm:
   *
 - * inode->i_mutex	(while writing or truncating, not reading or faulting)
 + * inode->i_rwsem	(while writing or truncating, not reading or faulting)
   *   mm->mmap_lock
 - *     page->flags PG_locked (lock_page)   * (see huegtlbfs below)
 - *       hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share)
 - *         mapping->i_mmap_rwsem
 - *           hugetlb_fault_mutex (hugetlbfs specific page fault mutex)
 - *           anon_vma->rwsem
 - *             mm->page_table_lock or pte_lock
 - *               swap_lock (in swap_duplicate, swap_info_get)
 - *                 mmlist_lock (in mmput, drain_mmlist and others)
 - *                 mapping->private_lock (in __set_page_dirty_buffers)
 - *                   lock_page_memcg move_lock (in __set_page_dirty_buffers)
 - *                     i_pages lock (widely used)
 - *                       lruvec->lru_lock (in folio_lruvec_lock_irq)
 - *                 inode->i_lock (in set_page_dirty's __mark_inode_dirty)
 - *                 bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty)
 - *                   sb_lock (within inode_lock in fs/fs-writeback.c)
 - *                   i_pages lock (widely used, in set_page_dirty,
 - *                             in arch-dependent flush_dcache_mmap_lock,
 - *                             within bdi.wb->list_lock in __sync_single_inode)
 + *     mapping->invalidate_lock (in filemap_fault)
 + *       page->flags PG_locked (lock_page)   * (see hugetlbfs below)
 + *         hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share)
 + *           mapping->i_mmap_rwsem
 + *             hugetlb_fault_mutex (hugetlbfs specific page fault mutex)
 + *             anon_vma->rwsem
 + *               mm->page_table_lock or pte_lock
 + *                 swap_lock (in swap_duplicate, swap_info_get)
 + *                   mmlist_lock (in mmput, drain_mmlist and others)
 + *                   mapping->private_lock (in __set_page_dirty_buffers)
 + *                     lock_page_memcg move_lock (in __set_page_dirty_buffers)
 + *                       i_pages lock (widely used)
-  *                         lruvec->lru_lock (in lock_page_lruvec_irq)
++ *                         lruvec->lru_lock (in folio_lruvec_lock_irq)
 + *                   inode->i_lock (in set_page_dirty's __mark_inode_dirty)
 + *                   bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty)
 + *                     sb_lock (within inode_lock in fs/fs-writeback.c)
 + *                     i_pages lock (widely used, in set_page_dirty,
 + *                               in arch-dependent flush_dcache_mmap_lock,
 + *                               within bdi.wb->list_lock in __sync_single_inode)
   *
 - * anon_vma->rwsem,mapping->i_mutex      (memory_failure, collect_procs_anon)
 + * anon_vma->rwsem,mapping->i_mmap_rwsem   (memory_failure, collect_procs_anon)
   *   ->tasklist_lock
   *     pte map lock
   *

[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 488 bytes --]

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2021-07-21  5:02 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-07-21  5:02 linux-next: manual merge of the folio tree with the ext3 tree Stephen Rothwell
  -- strict thread matches above, loose matches on Subject: below --
2021-07-21  4:42 Stephen Rothwell

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).