linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/8] staging: erofs: fix some issues and clean up codes
@ 2018-08-12 14:01 Chao Yu
  2018-08-12 14:01 ` [PATCH 1/8] staging: erofs: introduce erofs_grab_bio Chao Yu
                   ` (7 more replies)
  0 siblings, 8 replies; 30+ messages in thread
From: Chao Yu @ 2018-08-12 14:01 UTC (permalink / raw)
  To: gregkh, devel; +Cc: linux-erofs, linux-kernel, Chao Yu

From: Chao Yu <yuchao0@huawei.com>

This patchset mainly adds error handing code for erofs xattr subsystem.
In addition, some code cleanups are also included in this patchset.

P.S. Some other patches are still previewing in the linux-erofs mailing
list, which will be posted in the 2nd part later.

Gao Xiang (8):
  staging: erofs: introduce erofs_grab_bio
  staging: erofs: separate erofs_get_meta_page
  staging: erofs: add error handling for xattr submodule
  staging: erofs: cleanup z_erofs_vle_work_{lookup, register}
  staging: erofs: rearrange vle clustertype definitions
  staging: erofs: fix vle_decompressed_index_clusterofs
  staging: erofs: fix integer overflow on 32-bit platform
  staging: erofs: fix compression mapping beyond EOF

 drivers/staging/erofs/Kconfig     |   9 ++
 drivers/staging/erofs/data.c      |  62 +++++++++----
 drivers/staging/erofs/erofs_fs.h  |  11 +++
 drivers/staging/erofs/internal.h  |  64 ++++++++-----
 drivers/staging/erofs/unzip_vle.c | 149 +++++++++++++++---------------
 drivers/staging/erofs/xattr.c     | 125 +++++++++++++++++--------
 6 files changed, 266 insertions(+), 154 deletions(-)

-- 
2.18.0


^ permalink raw reply	[flat|nested] 30+ messages in thread

* [PATCH 1/8] staging: erofs: introduce erofs_grab_bio
  2018-08-12 14:01 [PATCH 0/8] staging: erofs: fix some issues and clean up codes Chao Yu
@ 2018-08-12 14:01 ` Chao Yu
  2018-08-12 14:01 ` [PATCH 2/8] staging: erofs: separate erofs_get_meta_page Chao Yu
                   ` (6 subsequent siblings)
  7 siblings, 0 replies; 30+ messages in thread
From: Chao Yu @ 2018-08-12 14:01 UTC (permalink / raw)
  To: gregkh, devel; +Cc: linux-erofs, linux-kernel, Gao Xiang, Chao Yu

From: Gao Xiang <gaoxiang25@huawei.com>

this patch renames prepare_bio to erofs_grab_bio, and
adds a nofail option in order to retry in the bio allocator
under memory pressure.

Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
Reviewed-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Chao Yu <yuchao0@huawei.com>
---
 drivers/staging/erofs/data.c      | 12 +++++++++--
 drivers/staging/erofs/internal.h  | 36 +++++++++++++++----------------
 drivers/staging/erofs/unzip_vle.c |  4 ++--
 3 files changed, 30 insertions(+), 22 deletions(-)

diff --git a/drivers/staging/erofs/data.c b/drivers/staging/erofs/data.c
index ac263a180253..e0c046df6665 100644
--- a/drivers/staging/erofs/data.c
+++ b/drivers/staging/erofs/data.c
@@ -60,7 +60,8 @@ struct page *erofs_get_meta_page(struct super_block *sb,
 		struct bio *bio;
 		int err;
 
-		bio = prepare_bio(sb, blkaddr, 1, read_endio);
+		bio = erofs_grab_bio(sb, blkaddr, 1, read_endio, true);
+
 		err = bio_add_page(bio, page, PAGE_SIZE, 0);
 		BUG_ON(err != PAGE_SIZE);
 
@@ -278,7 +279,14 @@ static inline struct bio *erofs_read_raw_page(
 		if (nblocks > BIO_MAX_PAGES)
 			nblocks = BIO_MAX_PAGES;
 
-		bio = prepare_bio(inode->i_sb, blknr, nblocks, read_endio);
+		bio = erofs_grab_bio(inode->i_sb,
+			blknr, nblocks, read_endio, false);
+
+		if (IS_ERR(bio)) {
+			err = PTR_ERR(bio);
+			bio = NULL;
+			goto err_out;
+		}
 	}
 
 	err = bio_add_page(bio, page, PAGE_SIZE, 0);
diff --git a/drivers/staging/erofs/internal.h b/drivers/staging/erofs/internal.h
index 367b39fe46e5..1353b3ff8401 100644
--- a/drivers/staging/erofs/internal.h
+++ b/drivers/staging/erofs/internal.h
@@ -420,26 +420,26 @@ struct erofs_map_blocks {
 #define EROFS_GET_BLOCKS_RAW    0x0001
 
 /* data.c */
-static inline struct bio *prepare_bio(
-	struct super_block *sb,
-	erofs_blk_t blkaddr, unsigned nr_pages,
-	bio_end_io_t endio)
+static inline struct bio *
+erofs_grab_bio(struct super_block *sb,
+	       erofs_blk_t blkaddr, unsigned int nr_pages,
+	       bio_end_io_t endio, bool nofail)
 {
-	gfp_t gfp = GFP_NOIO;
-	struct bio *bio = bio_alloc(gfp, nr_pages);
-
-	if (unlikely(bio == NULL) &&
-		(current->flags & PF_MEMALLOC)) {
-		do {
-			nr_pages /= 2;
-			if (unlikely(!nr_pages)) {
-				bio = bio_alloc(gfp | __GFP_NOFAIL, 1);
-				BUG_ON(bio == NULL);
-				break;
+	const gfp_t gfp = GFP_NOIO;
+	struct bio *bio;
+
+	do {
+		if (nr_pages == 1) {
+			bio = bio_alloc(gfp | (nofail ? __GFP_NOFAIL : 0), 1);
+			if (unlikely(bio == NULL)) {
+				DBG_BUGON(nofail);
+				return ERR_PTR(-ENOMEM);
 			}
-			bio = bio_alloc(gfp, nr_pages);
-		} while (bio == NULL);
-	}
+			break;
+		}
+		bio = bio_alloc(gfp, nr_pages);
+		nr_pages /= 2;
+	} while (unlikely(bio == NULL));
 
 	bio->bi_end_io = endio;
 	bio_set_dev(bio, sb->s_bdev);
diff --git a/drivers/staging/erofs/unzip_vle.c b/drivers/staging/erofs/unzip_vle.c
index 8721f0a41d15..375c1711bb6b 100644
--- a/drivers/staging/erofs/unzip_vle.c
+++ b/drivers/staging/erofs/unzip_vle.c
@@ -1213,8 +1213,8 @@ static bool z_erofs_vle_submit_all(struct super_block *sb,
 		}
 
 		if (bio == NULL) {
-			bio = prepare_bio(sb, first_index + i,
-				BIO_MAX_PAGES, z_erofs_vle_read_endio);
+			bio = erofs_grab_bio(sb, first_index + i,
+				BIO_MAX_PAGES, z_erofs_vle_read_endio, true);
 			bio->bi_private = tagptr_cast_ptr(bi_private);
 
 			++nr_bios;
-- 
2.18.0


^ permalink raw reply related	[flat|nested] 30+ messages in thread

* [PATCH 2/8] staging: erofs: separate erofs_get_meta_page
  2018-08-12 14:01 [PATCH 0/8] staging: erofs: fix some issues and clean up codes Chao Yu
  2018-08-12 14:01 ` [PATCH 1/8] staging: erofs: introduce erofs_grab_bio Chao Yu
@ 2018-08-12 14:01 ` Chao Yu
  2018-08-13 11:04   ` Dan Carpenter
  2018-08-12 14:01 ` [PATCH 3/8] staging: erofs: add error handling for xattr submodule Chao Yu
                   ` (5 subsequent siblings)
  7 siblings, 1 reply; 30+ messages in thread
From: Chao Yu @ 2018-08-12 14:01 UTC (permalink / raw)
  To: gregkh, devel; +Cc: linux-erofs, linux-kernel, Gao Xiang, Chao Yu

From: Gao Xiang <gaoxiang25@huawei.com>

This patch separates 'erofs_get_meta_page' into 'erofs_get_meta_page'
and 'erofs_get_meta_page_nofail'. The second one ensures that it
should not fail under memory pressure and should make best efforts
if IO errors occur.

It also adds auxiliary variables in order to fulfill 80 character limit.

Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
Reviewed-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Chao Yu <yuchao0@huawei.com>
---
 drivers/staging/erofs/Kconfig     |  9 ++++++
 drivers/staging/erofs/data.c      | 52 ++++++++++++++++++++-----------
 drivers/staging/erofs/internal.h  | 30 +++++++++++++++---
 drivers/staging/erofs/unzip_vle.c | 12 ++++---
 drivers/staging/erofs/xattr.c     | 23 ++++++++------
 5 files changed, 89 insertions(+), 37 deletions(-)

diff --git a/drivers/staging/erofs/Kconfig b/drivers/staging/erofs/Kconfig
index 96f614934df1..7f209b32228f 100644
--- a/drivers/staging/erofs/Kconfig
+++ b/drivers/staging/erofs/Kconfig
@@ -78,6 +78,15 @@ config EROFS_FAULT_INJECTION
 	  Test EROFS to inject faults such as ENOMEM, EIO, and so on.
 	  If unsure, say N.
 
+config EROFS_FS_IO_MAX_RETRIES
+	int "EROFS IO Maximum Retries"
+	depends on EROFS_FS
+	default "5"
+	help
+	  Maximum retry count of IO Errors.
+
+	  If unsure, leave the default value (5 retries, 6 IOs at most).
+
 config EROFS_FS_ZIP
 	bool "EROFS Data Compresssion Support"
 	depends on EROFS_FS
diff --git a/drivers/staging/erofs/data.c b/drivers/staging/erofs/data.c
index e0c046df6665..0570af5c84a2 100644
--- a/drivers/staging/erofs/data.c
+++ b/drivers/staging/erofs/data.c
@@ -39,31 +39,44 @@ static inline void read_endio(struct bio *bio)
 }
 
 /* prio -- true is used for dir */
-struct page *erofs_get_meta_page(struct super_block *sb,
-	erofs_blk_t blkaddr, bool prio)
+struct page *__erofs_get_meta_page(struct super_block *sb,
+	erofs_blk_t blkaddr, bool prio, bool nofail)
 {
-	struct inode *bd_inode = sb->s_bdev->bd_inode;
-	struct address_space *mapping = bd_inode->i_mapping;
+	struct inode *const bd_inode = sb->s_bdev->bd_inode;
+	struct address_space *const mapping = bd_inode->i_mapping;
+	/* prefer retrying in the allocator to blindly looping below */
+	const gfp_t gfp = mapping_gfp_constraint(mapping, ~__GFP_FS) |
+		(nofail ? __GFP_NOFAIL : 0);
+	unsigned int io_retries = nofail ? EROFS_IO_MAX_RETRIES_NOFAIL : 0;
 	struct page *page;
 
 repeat:
-	page = find_or_create_page(mapping, blkaddr,
-	/*
-	 * Prefer looping in the allocator rather than here,
-	 * at least that code knows what it's doing.
-	 */
-		mapping_gfp_constraint(mapping, ~__GFP_FS) | __GFP_NOFAIL);
-
-	BUG_ON(!page || !PageLocked(page));
+	page = find_or_create_page(mapping, blkaddr, gfp);
+	if (unlikely(page == NULL)) {
+		DBG_BUGON(nofail);
+		return ERR_PTR(-ENOMEM);
+	}
+	DBG_BUGON(!PageLocked(page));
 
 	if (!PageUptodate(page)) {
 		struct bio *bio;
 		int err;
 
-		bio = erofs_grab_bio(sb, blkaddr, 1, read_endio, true);
+		bio = erofs_grab_bio(sb, blkaddr, 1, read_endio, nofail);
+		if (unlikely(bio == NULL)) {
+			DBG_BUGON(nofail);
+			err = -ENOMEM;
+err_out:
+			unlock_page(page);
+			put_page(page);
+			return ERR_PTR(err);
+		}
 
 		err = bio_add_page(bio, page, PAGE_SIZE, 0);
-		BUG_ON(err != PAGE_SIZE);
+		if (unlikely(err != PAGE_SIZE)) {
+			err = -EFAULT;
+			goto err_out;
+		}
 
 		__submit_bio(bio, REQ_OP_READ,
 			REQ_META | (prio ? REQ_PRIO : 0));
@@ -72,6 +85,7 @@ struct page *erofs_get_meta_page(struct super_block *sb,
 
 		/* the page has been truncated by others? */
 		if (unlikely(page->mapping != mapping)) {
+unlock_repeat:
 			unlock_page(page);
 			put_page(page);
 			goto repeat;
@@ -79,10 +93,12 @@ struct page *erofs_get_meta_page(struct super_block *sb,
 
 		/* more likely a read error */
 		if (unlikely(!PageUptodate(page))) {
-			unlock_page(page);
-			put_page(page);
-
-			page = ERR_PTR(-EIO);
+			if (io_retries) {
+				--io_retries;
+				goto unlock_repeat;
+			}
+			err = -EIO;
+			goto err_out;
 		}
 	}
 	return page;
diff --git a/drivers/staging/erofs/internal.h b/drivers/staging/erofs/internal.h
index 1353b3ff8401..a756abeff7e9 100644
--- a/drivers/staging/erofs/internal.h
+++ b/drivers/staging/erofs/internal.h
@@ -453,8 +453,27 @@ static inline void __submit_bio(struct bio *bio, unsigned op, unsigned op_flags)
 	submit_bio(bio);
 }
 
-extern struct page *erofs_get_meta_page(struct super_block *sb,
-	erofs_blk_t blkaddr, bool prio);
+#ifndef CONFIG_EROFS_FS_IO_MAX_RETRIES
+#define EROFS_IO_MAX_RETRIES_NOFAIL	0
+#else
+#define EROFS_IO_MAX_RETRIES_NOFAIL	CONFIG_EROFS_FS_IO_MAX_RETRIES
+#endif
+
+extern struct page *__erofs_get_meta_page(struct super_block *sb,
+	erofs_blk_t blkaddr, bool prio, bool nofail);
+
+static inline struct page *erofs_get_meta_page(struct super_block *sb,
+	erofs_blk_t blkaddr, bool prio)
+{
+	return __erofs_get_meta_page(sb, blkaddr, prio, false);
+}
+
+static inline struct page *erofs_get_meta_page_nofail(struct super_block *sb,
+	erofs_blk_t blkaddr, bool prio)
+{
+	return __erofs_get_meta_page(sb, blkaddr, prio, true);
+}
+
 extern int erofs_map_blocks(struct inode *, struct erofs_map_blocks *, int);
 extern int erofs_map_blocks_iter(struct inode *, struct erofs_map_blocks *,
 	struct page **, int);
@@ -465,10 +484,11 @@ struct erofs_map_blocks_iter {
 };
 
 
-static inline struct page *erofs_get_inline_page(struct inode *inode,
-	erofs_blk_t blkaddr)
+static inline struct page *
+erofs_get_inline_page_nofail(struct inode *inode,
+			     erofs_blk_t blkaddr)
 {
-	return erofs_get_meta_page(inode->i_sb,
+	return erofs_get_meta_page_nofail(inode->i_sb,
 		blkaddr, S_ISDIR(inode->i_mode));
 }
 
diff --git a/drivers/staging/erofs/unzip_vle.c b/drivers/staging/erofs/unzip_vle.c
index 375c1711bb6b..b2e05e2b4116 100644
--- a/drivers/staging/erofs/unzip_vle.c
+++ b/drivers/staging/erofs/unzip_vle.c
@@ -1488,7 +1488,8 @@ static erofs_off_t vle_get_logical_extent_head(
 	erofs_blk_t blkaddr = vle_extent_blkaddr(inode, lcn);
 	struct z_erofs_vle_decompressed_index *di;
 	unsigned long long ofs;
-	const unsigned int clusterbits = EROFS_SB(inode->i_sb)->clusterbits;
+	struct super_block *const sb = inode->i_sb;
+	const unsigned int clusterbits = EROFS_SB(sb)->clusterbits;
 	const unsigned int clustersize = 1 << clusterbits;
 
 	if (page->index != blkaddr) {
@@ -1496,8 +1497,8 @@ static erofs_off_t vle_get_logical_extent_head(
 		unlock_page(page);
 		put_page(page);
 
-		*page_iter = page = erofs_get_meta_page(inode->i_sb,
-			blkaddr, false);
+		page = erofs_get_meta_page_nofail(sb, blkaddr, false);
+		*page_iter = page;
 		*kaddr_iter = kmap_atomic(page);
 	}
 
@@ -1538,7 +1539,8 @@ int z_erofs_map_blocks_iter(struct inode *inode,
 	struct page *mpage = *mpage_ret;
 	void *kaddr;
 	bool initial;
-	const unsigned int clusterbits = EROFS_SB(inode->i_sb)->clusterbits;
+	struct super_block *const sb = inode->i_sb;
+	const unsigned int clusterbits = EROFS_SB(sb)->clusterbits;
 	const unsigned int clustersize = 1 << clusterbits;
 	int err = 0;
 
@@ -1569,7 +1571,7 @@ int z_erofs_map_blocks_iter(struct inode *inode,
 		if (mpage != NULL)
 			put_page(mpage);
 
-		mpage = erofs_get_meta_page(inode->i_sb, e_blkaddr, false);
+		mpage = erofs_get_meta_page_nofail(sb, e_blkaddr, false);
 		*mpage_ret = mpage;
 	} else {
 		lock_page(mpage);
diff --git a/drivers/staging/erofs/xattr.c b/drivers/staging/erofs/xattr.c
index 0e9cfeccdf99..2593c856b079 100644
--- a/drivers/staging/erofs/xattr.c
+++ b/drivers/staging/erofs/xattr.c
@@ -38,6 +38,7 @@ static void init_inode_xattrs(struct inode *inode)
 	struct xattr_iter it;
 	unsigned i;
 	struct erofs_xattr_ibody_header *ih;
+	struct super_block *sb;
 	struct erofs_sb_info *sbi;
 	struct erofs_vnode *vi;
 	bool atomic_map;
@@ -48,11 +49,12 @@ static void init_inode_xattrs(struct inode *inode)
 	vi = EROFS_V(inode);
 	BUG_ON(!vi->xattr_isize);
 
-	sbi = EROFS_I_SB(inode);
+	sb = inode->i_sb;
+	sbi = EROFS_SB(sb);
 	it.blkaddr = erofs_blknr(iloc(sbi, vi->nid) + vi->inode_isize);
 	it.ofs = erofs_blkoff(iloc(sbi, vi->nid) + vi->inode_isize);
 
-	it.page = erofs_get_inline_page(inode, it.blkaddr);
+	it.page = erofs_get_inline_page_nofail(inode, it.blkaddr);
 	BUG_ON(IS_ERR(it.page));
 
 	/* read in shared xattr array (non-atomic, see kmalloc below) */
@@ -75,7 +77,7 @@ static void init_inode_xattrs(struct inode *inode)
 			BUG_ON(it.ofs != EROFS_BLKSIZ);
 			xattr_iter_end(&it, atomic_map);
 
-			it.page = erofs_get_meta_page(inode->i_sb,
+			it.page = erofs_get_meta_page_nofail(sb,
 				++it.blkaddr, S_ISDIR(inode->i_mode));
 			BUG_ON(IS_ERR(it.page));
 
@@ -105,7 +107,8 @@ static void xattr_iter_fixup(struct xattr_iter *it)
 		xattr_iter_end(it, true);
 
 		it->blkaddr += erofs_blknr(it->ofs);
-		it->page = erofs_get_meta_page(it->sb, it->blkaddr, false);
+		it->page = erofs_get_meta_page_nofail(it->sb,
+			it->blkaddr, false);
 		BUG_ON(IS_ERR(it->page));
 
 		it->kaddr = kmap_atomic(it->page);
@@ -131,7 +134,7 @@ static int inline_xattr_iter_begin(struct xattr_iter *it,
 	it->blkaddr = erofs_blknr(iloc(sbi, vi->nid) + inline_xattr_ofs);
 	it->ofs = erofs_blkoff(iloc(sbi, vi->nid) + inline_xattr_ofs);
 
-	it->page = erofs_get_inline_page(inode, it->blkaddr);
+	it->page = erofs_get_inline_page_nofail(inode, it->blkaddr);
 	BUG_ON(IS_ERR(it->page));
 	it->kaddr = kmap_atomic(it->page);
 
@@ -300,7 +303,8 @@ static int inline_getxattr(struct inode *inode, struct getxattr_iter *it)
 static int shared_getxattr(struct inode *inode, struct getxattr_iter *it)
 {
 	struct erofs_vnode *const vi = EROFS_V(inode);
-	struct erofs_sb_info *const sbi = EROFS_SB(inode->i_sb);
+	struct super_block *const sb = inode->i_sb;
+	struct erofs_sb_info *const sbi = EROFS_SB(sb);
 	unsigned i;
 	int ret = -ENOATTR;
 
@@ -314,7 +318,7 @@ static int shared_getxattr(struct inode *inode, struct getxattr_iter *it)
 			if (i)
 				xattr_iter_end(&it->it, true);
 
-			it->it.page = erofs_get_meta_page(inode->i_sb,
+			it->it.page = erofs_get_meta_page_nofail(sb,
 				blkaddr, false);
 			BUG_ON(IS_ERR(it->it.page));
 			it->it.kaddr = kmap_atomic(it->it.page);
@@ -524,7 +528,8 @@ static int shared_listxattr(struct listxattr_iter *it)
 {
 	struct inode *const inode = d_inode(it->dentry);
 	struct erofs_vnode *const vi = EROFS_V(inode);
-	struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
+	struct super_block *const sb = inode->i_sb;
+	struct erofs_sb_info *const sbi = EROFS_SB(sb);
 	unsigned i;
 	int ret = 0;
 
@@ -537,7 +542,7 @@ static int shared_listxattr(struct listxattr_iter *it)
 			if (i)
 				xattr_iter_end(&it->it, true);
 
-			it->it.page = erofs_get_meta_page(inode->i_sb,
+			it->it.page = erofs_get_meta_page_nofail(sb,
 				blkaddr, false);
 			BUG_ON(IS_ERR(it->it.page));
 			it->it.kaddr = kmap_atomic(it->it.page);
-- 
2.18.0


^ permalink raw reply related	[flat|nested] 30+ messages in thread

* [PATCH 3/8] staging: erofs: add error handling for xattr submodule
  2018-08-12 14:01 [PATCH 0/8] staging: erofs: fix some issues and clean up codes Chao Yu
  2018-08-12 14:01 ` [PATCH 1/8] staging: erofs: introduce erofs_grab_bio Chao Yu
  2018-08-12 14:01 ` [PATCH 2/8] staging: erofs: separate erofs_get_meta_page Chao Yu
@ 2018-08-12 14:01 ` Chao Yu
  2018-08-13  2:00   ` Chao Yu
  2018-08-13 11:47   ` Dan Carpenter
  2018-08-12 14:01 ` [PATCH 4/8] staging: erofs: cleanup z_erofs_vle_work_{lookup, register} Chao Yu
                   ` (4 subsequent siblings)
  7 siblings, 2 replies; 30+ messages in thread
From: Chao Yu @ 2018-08-12 14:01 UTC (permalink / raw)
  To: gregkh, devel; +Cc: linux-erofs, linux-kernel, Gao Xiang, Chao Yu

From: Gao Xiang <gaoxiang25@huawei.com>

This patch enhances the missing error handling code for
xattr submodule, which improves the stability for the rare cases.

Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
Reviewed-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Chao Yu <yuchao0@huawei.com>
---
 drivers/staging/erofs/internal.h |   6 +-
 drivers/staging/erofs/xattr.c    | 120 ++++++++++++++++++++-----------
 2 files changed, 83 insertions(+), 43 deletions(-)

diff --git a/drivers/staging/erofs/internal.h b/drivers/staging/erofs/internal.h
index a756abeff7e9..8951e01216e3 100644
--- a/drivers/staging/erofs/internal.h
+++ b/drivers/staging/erofs/internal.h
@@ -485,10 +485,10 @@ struct erofs_map_blocks_iter {
 
 
 static inline struct page *
-erofs_get_inline_page_nofail(struct inode *inode,
-			     erofs_blk_t blkaddr)
+erofs_get_inline_page(struct inode *inode,
+		      erofs_blk_t blkaddr)
 {
-	return erofs_get_meta_page_nofail(inode->i_sb,
+	return erofs_get_meta_page(inode->i_sb,
 		blkaddr, S_ISDIR(inode->i_mode));
 }
 
diff --git a/drivers/staging/erofs/xattr.c b/drivers/staging/erofs/xattr.c
index 2593c856b079..1b5815fc70db 100644
--- a/drivers/staging/erofs/xattr.c
+++ b/drivers/staging/erofs/xattr.c
@@ -24,16 +24,25 @@ struct xattr_iter {
 
 static inline void xattr_iter_end(struct xattr_iter *it, bool atomic)
 {
-	/* only init_inode_xattrs use non-atomic once */
+	/* the only user of kunmap() is 'init_inode_xattrs' */
 	if (unlikely(!atomic))
 		kunmap(it->page);
 	else
 		kunmap_atomic(it->kaddr);
+
 	unlock_page(it->page);
 	put_page(it->page);
 }
 
-static void init_inode_xattrs(struct inode *inode)
+static inline void xattr_iter_end_final(struct xattr_iter *it)
+{
+	if (unlikely(it->page == NULL))
+		return;
+
+	xattr_iter_end(it, true);
+}
+
+static int init_inode_xattrs(struct inode *inode)
 {
 	struct xattr_iter it;
 	unsigned i;
@@ -44,7 +53,7 @@ static void init_inode_xattrs(struct inode *inode)
 	bool atomic_map;
 
 	if (likely(inode_has_inited_xattr(inode)))
-		return;
+		return 0;
 
 	vi = EROFS_V(inode);
 	BUG_ON(!vi->xattr_isize);
@@ -54,8 +63,9 @@ static void init_inode_xattrs(struct inode *inode)
 	it.blkaddr = erofs_blknr(iloc(sbi, vi->nid) + vi->inode_isize);
 	it.ofs = erofs_blkoff(iloc(sbi, vi->nid) + vi->inode_isize);
 
-	it.page = erofs_get_inline_page_nofail(inode, it.blkaddr);
-	BUG_ON(IS_ERR(it.page));
+	it.page = erofs_get_inline_page(inode, it.blkaddr);
+	if (IS_ERR(it.page))
+		return PTR_ERR(it.page);
 
 	/* read in shared xattr array (non-atomic, see kmalloc below) */
 	it.kaddr = kmap(it.page);
@@ -64,9 +74,12 @@ static void init_inode_xattrs(struct inode *inode)
 	ih = (struct erofs_xattr_ibody_header *)(it.kaddr + it.ofs);
 
 	vi->xattr_shared_count = ih->h_shared_count;
-	vi->xattr_shared_xattrs = (unsigned *)kmalloc_array(
-		vi->xattr_shared_count, sizeof(unsigned),
-		GFP_KERNEL | __GFP_NOFAIL);
+	vi->xattr_shared_xattrs = kmalloc_array(vi->xattr_shared_count,
+						sizeof(unsigned), GFP_KERNEL);
+	if (unlikely(vi->xattr_shared_xattrs == NULL)) {
+		xattr_iter_end(&it, atomic_map);
+		return -ENOMEM;
+	}
 
 	/* let's skip ibody header */
 	it.ofs += sizeof(struct erofs_xattr_ibody_header);
@@ -77,9 +90,10 @@ static void init_inode_xattrs(struct inode *inode)
 			BUG_ON(it.ofs != EROFS_BLKSIZ);
 			xattr_iter_end(&it, atomic_map);
 
-			it.page = erofs_get_meta_page_nofail(sb,
+			it.page = erofs_get_meta_page(sb,
 				++it.blkaddr, S_ISDIR(inode->i_mode));
-			BUG_ON(IS_ERR(it.page));
+			if (IS_ERR(it.page))
+				return PTR_ERR(it.page);
 
 			it.kaddr = kmap_atomic(it.page);
 			atomic_map = true;
@@ -92,6 +106,7 @@ static void init_inode_xattrs(struct inode *inode)
 	xattr_iter_end(&it, atomic_map);
 
 	inode_set_inited_xattr(inode);
+	return 0;
 }
 
 struct xattr_iter_handlers {
@@ -101,19 +116,24 @@ struct xattr_iter_handlers {
 	void (*value)(struct xattr_iter *, unsigned, char *, unsigned);
 };
 
-static void xattr_iter_fixup(struct xattr_iter *it)
+static inline int xattr_iter_fixup(struct xattr_iter *it)
 {
-	if (unlikely(it->ofs >= EROFS_BLKSIZ)) {
-		xattr_iter_end(it, true);
+	if (likely(it->ofs < EROFS_BLKSIZ))
+		return 0;
 
-		it->blkaddr += erofs_blknr(it->ofs);
-		it->page = erofs_get_meta_page_nofail(it->sb,
-			it->blkaddr, false);
-		BUG_ON(IS_ERR(it->page));
+	xattr_iter_end(it, true);
 
-		it->kaddr = kmap_atomic(it->page);
-		it->ofs = erofs_blkoff(it->ofs);
+	it->blkaddr += erofs_blknr(it->ofs);
+
+	it->page = erofs_get_meta_page(it->sb, it->blkaddr, false);
+	if (IS_ERR(it->page)) {
+		it->page = NULL;
+		return PTR_ERR(it->page);
 	}
+
+	it->kaddr = kmap_atomic(it->page);
+	it->ofs = erofs_blkoff(it->ofs);
+	return 0;
 }
 
 static int inline_xattr_iter_begin(struct xattr_iter *it,
@@ -134,22 +154,25 @@ static int inline_xattr_iter_begin(struct xattr_iter *it,
 	it->blkaddr = erofs_blknr(iloc(sbi, vi->nid) + inline_xattr_ofs);
 	it->ofs = erofs_blkoff(iloc(sbi, vi->nid) + inline_xattr_ofs);
 
-	it->page = erofs_get_inline_page_nofail(inode, it->blkaddr);
-	BUG_ON(IS_ERR(it->page));
-	it->kaddr = kmap_atomic(it->page);
+	it->page = erofs_get_inline_page(inode, it->blkaddr);
+	if (IS_ERR(it->page))
+		return PTR_ERR(it->page);
 
+	it->kaddr = kmap_atomic(it->page);
 	return vi->xattr_isize - xattr_header_sz;
 }
 
 static int xattr_foreach(struct xattr_iter *it,
-	struct xattr_iter_handlers *op, unsigned *tlimit)
+	const struct xattr_iter_handlers *op, unsigned *tlimit)
 {
 	struct erofs_xattr_entry entry;
 	unsigned value_sz, processed, slice;
 	int err;
 
 	/* 0. fixup blkaddr, ofs, ipage */
-	xattr_iter_fixup(it);
+	err = xattr_iter_fixup(it);
+	if (unlikely(err))
+		return err;
 
 	/*
 	 * 1. read xattr entry to the memory,
@@ -181,7 +204,9 @@ static int xattr_foreach(struct xattr_iter *it,
 		if (it->ofs >= EROFS_BLKSIZ) {
 			BUG_ON(it->ofs > EROFS_BLKSIZ);
 
-			xattr_iter_fixup(it);
+			err = xattr_iter_fixup(it);
+			if (unlikely(err))
+				goto out;
 			it->ofs = 0;
 		}
 
@@ -213,7 +238,10 @@ static int xattr_foreach(struct xattr_iter *it,
 	while (processed < value_sz) {
 		if (it->ofs >= EROFS_BLKSIZ) {
 			BUG_ON(it->ofs > EROFS_BLKSIZ);
-			xattr_iter_fixup(it);
+
+			err = xattr_iter_fixup(it);
+			if (unlikely(err))
+				goto out;
 			it->ofs = 0;
 		}
 
@@ -273,7 +301,7 @@ static void xattr_copyvalue(struct xattr_iter *_it,
 	memcpy(it->buffer + processed, buf, len);
 }
 
-static struct xattr_iter_handlers find_xattr_handlers = {
+static const struct xattr_iter_handlers find_xattr_handlers = {
 	.entry = xattr_entrymatch,
 	.name = xattr_namematch,
 	.alloc_buffer = xattr_checkbuffer,
@@ -294,8 +322,11 @@ static int inline_getxattr(struct inode *inode, struct getxattr_iter *it)
 		ret = xattr_foreach(&it->it, &find_xattr_handlers, &remaining);
 		if (ret >= 0)
 			break;
+
+		if (unlikely(ret != -ENOATTR))	/* -ENOMEM, -EIO, etc. */
+			break;
 	}
-	xattr_iter_end(&it->it, true);
+	xattr_iter_end_final(&it->it);
 
 	return ret < 0 ? ret : it->buffer_size;
 }
@@ -318,9 +349,10 @@ static int shared_getxattr(struct inode *inode, struct getxattr_iter *it)
 			if (i)
 				xattr_iter_end(&it->it, true);
 
-			it->it.page = erofs_get_meta_page_nofail(sb,
-				blkaddr, false);
-			BUG_ON(IS_ERR(it->it.page));
+			it->it.page = erofs_get_meta_page(sb, blkaddr, false);
+			if (IS_ERR(it->it.page))
+				return PTR_ERR(it->it.page);
+
 			it->it.kaddr = kmap_atomic(it->it.page);
 			it->it.blkaddr = blkaddr;
 		}
@@ -328,9 +360,12 @@ static int shared_getxattr(struct inode *inode, struct getxattr_iter *it)
 		ret = xattr_foreach(&it->it, &find_xattr_handlers, NULL);
 		if (ret >= 0)
 			break;
+
+		if (unlikely(ret != -ENOATTR))	/* -ENOMEM, -EIO, etc. */
+			break;
 	}
 	if (vi->xattr_shared_count)
-		xattr_iter_end(&it->it, true);
+		xattr_iter_end_final(&it->it);
 
 	return ret < 0 ? ret : it->buffer_size;
 }
@@ -355,7 +390,9 @@ int erofs_getxattr(struct inode *inode, int index,
 	if (unlikely(name == NULL))
 		return -EINVAL;
 
-	init_inode_xattrs(inode);
+	ret = init_inode_xattrs(inode);
+	if (unlikely(ret < 0))
+		return ret;
 
 	it.index = index;
 
@@ -498,7 +535,7 @@ static int xattr_skipvalue(struct xattr_iter *_it,
 	return 1;
 }
 
-static struct xattr_iter_handlers list_xattr_handlers = {
+static const struct xattr_iter_handlers list_xattr_handlers = {
 	.entry = xattr_entrylist,
 	.name = xattr_namelist,
 	.alloc_buffer = xattr_skipvalue,
@@ -520,7 +557,7 @@ static int inline_listxattr(struct listxattr_iter *it)
 		if (ret < 0)
 			break;
 	}
-	xattr_iter_end(&it->it, true);
+	xattr_iter_end_final(&it->it);
 	return ret < 0 ? ret : it->buffer_ofs;
 }
 
@@ -542,9 +579,10 @@ static int shared_listxattr(struct listxattr_iter *it)
 			if (i)
 				xattr_iter_end(&it->it, true);
 
-			it->it.page = erofs_get_meta_page_nofail(sb,
-				blkaddr, false);
-			BUG_ON(IS_ERR(it->it.page));
+			it->it.page = erofs_get_meta_page(sb, blkaddr, false);
+			if (IS_ERR(it->it.page))
+				return PTR_ERR(it->it.page);
+
 			it->it.kaddr = kmap_atomic(it->it.page);
 			it->it.blkaddr = blkaddr;
 		}
@@ -554,7 +592,7 @@ static int shared_listxattr(struct listxattr_iter *it)
 			break;
 	}
 	if (vi->xattr_shared_count)
-		xattr_iter_end(&it->it, true);
+		xattr_iter_end_final(&it->it);
 
 	return ret < 0 ? ret : it->buffer_ofs;
 }
@@ -565,7 +603,9 @@ ssize_t erofs_listxattr(struct dentry *dentry,
 	int ret;
 	struct listxattr_iter it;
 
-	init_inode_xattrs(d_inode(dentry));
+	ret = init_inode_xattrs(d_inode(dentry));
+	if (unlikely(ret < 0))
+		return ret;
 
 	it.dentry = dentry;
 	it.buffer = buffer;
-- 
2.18.0


^ permalink raw reply related	[flat|nested] 30+ messages in thread

* [PATCH 4/8] staging: erofs: cleanup z_erofs_vle_work_{lookup, register}
  2018-08-12 14:01 [PATCH 0/8] staging: erofs: fix some issues and clean up codes Chao Yu
                   ` (2 preceding siblings ...)
  2018-08-12 14:01 ` [PATCH 3/8] staging: erofs: add error handling for xattr submodule Chao Yu
@ 2018-08-12 14:01 ` Chao Yu
  2018-08-13 12:00   ` Dan Carpenter
  2018-08-12 14:01 ` [PATCH 5/8] staging: erofs: rearrange vle clustertype definitions Chao Yu
                   ` (3 subsequent siblings)
  7 siblings, 1 reply; 30+ messages in thread
From: Chao Yu @ 2018-08-12 14:01 UTC (permalink / raw)
  To: gregkh, devel; +Cc: linux-erofs, linux-kernel, Gao Xiang, Chao Yu

From: Gao Xiang <gaoxiang25@huawei.com>

This patch introduces 'struct z_erofs_vle_work_finder' to clean up
arguments of z_erofs_vle_work_lookup and z_erofs_vle_work_register.

Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
Reviewed-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Chao Yu <yuchao0@huawei.com>
---
 drivers/staging/erofs/unzip_vle.c | 89 ++++++++++++++++---------------
 1 file changed, 47 insertions(+), 42 deletions(-)

diff --git a/drivers/staging/erofs/unzip_vle.c b/drivers/staging/erofs/unzip_vle.c
index b2e05e2b4116..5032b3b05de1 100644
--- a/drivers/staging/erofs/unzip_vle.c
+++ b/drivers/staging/erofs/unzip_vle.c
@@ -271,36 +271,42 @@ static inline bool try_to_claim_workgroup(
 	return true;	/* lucky, I am the followee :) */
 }
 
+struct z_erofs_vle_work_finder {
+	struct super_block *sb;
+	pgoff_t idx;
+	unsigned pageofs;
+
+	struct z_erofs_vle_workgroup **grp_ret;
+	enum z_erofs_vle_work_role *role;
+	z_erofs_vle_owned_workgrp_t *owned_head;
+	bool *hosted;
+};
+
 static struct z_erofs_vle_work *
-z_erofs_vle_work_lookup(struct super_block *sb,
-			pgoff_t idx, unsigned pageofs,
-			struct z_erofs_vle_workgroup **grp_ret,
-			enum z_erofs_vle_work_role *role,
-			z_erofs_vle_owned_workgrp_t *owned_head,
-			bool *hosted)
+z_erofs_vle_work_lookup(const struct z_erofs_vle_work_finder *f)
 {
 	bool tag, primary;
 	struct erofs_workgroup *egrp;
 	struct z_erofs_vle_workgroup *grp;
 	struct z_erofs_vle_work *work;
 
-	egrp = erofs_find_workgroup(sb, idx, &tag);
+	egrp = erofs_find_workgroup(f->sb, f->idx, &tag);
 	if (egrp == NULL) {
-		*grp_ret = NULL;
+		*f->grp_ret = NULL;
 		return NULL;
 	}
 
-	*grp_ret = grp = container_of(egrp,
-		struct z_erofs_vle_workgroup, obj);
+	grp = container_of(egrp, struct z_erofs_vle_workgroup, obj);
+	*f->grp_ret = grp;
 
 #ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
-	work = z_erofs_vle_grab_work(grp, pageofs);
+	work = z_erofs_vle_grab_work(grp, f->pageofs);
 	primary = true;
 #else
 	BUG();
 #endif
 
-	DBG_BUGON(work->pageofs != pageofs);
+	DBG_BUGON(work->pageofs != f->pageofs);
 
 	/*
 	 * lock must be taken first to avoid grp->next == NIL between
@@ -340,29 +346,24 @@ z_erofs_vle_work_lookup(struct super_block *sb,
 	 */
 	mutex_lock(&work->lock);
 
-	*hosted = false;
+	*f->hosted = false;
 	if (!primary)
-		*role = Z_EROFS_VLE_WORK_SECONDARY;
+		*f->role = Z_EROFS_VLE_WORK_SECONDARY;
 	/* claim the workgroup if possible */
-	else if (try_to_claim_workgroup(grp, owned_head, hosted))
-		*role = Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED;
+	else if (try_to_claim_workgroup(grp, f->owned_head, f->hosted))
+		*f->role = Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED;
 	else
-		*role = Z_EROFS_VLE_WORK_PRIMARY;
+		*f->role = Z_EROFS_VLE_WORK_PRIMARY;
 
 	return work;
 }
 
 static struct z_erofs_vle_work *
-z_erofs_vle_work_register(struct super_block *sb,
-			  struct z_erofs_vle_workgroup **grp_ret,
-			  struct erofs_map_blocks *map,
-			  pgoff_t index, unsigned pageofs,
-			  enum z_erofs_vle_work_role *role,
-			  z_erofs_vle_owned_workgrp_t *owned_head,
-			  bool *hosted)
+z_erofs_vle_work_register(const struct z_erofs_vle_work_finder *f,
+			  struct erofs_map_blocks *map)
 {
-	bool newgrp = false;
-	struct z_erofs_vle_workgroup *grp = *grp_ret;
+	bool gnew = false;
+	struct z_erofs_vle_workgroup *grp = *f->grp_ret;
 	struct z_erofs_vle_work *work;
 
 #ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
@@ -376,7 +377,7 @@ z_erofs_vle_work_register(struct super_block *sb,
 	if (unlikely(grp == NULL))
 		return ERR_PTR(-ENOMEM);
 
-	grp->obj.index = index;
+	grp->obj.index = f->idx;
 	grp->llen = map->m_llen;
 
 	z_erofs_vle_set_workgrp_fmt(grp,
@@ -386,13 +387,13 @@ z_erofs_vle_work_register(struct super_block *sb,
 	atomic_set(&grp->obj.refcount, 1);
 
 	/* new workgrps have been claimed as type 1 */
-	WRITE_ONCE(grp->next, *owned_head);
+	WRITE_ONCE(grp->next, *f->owned_head);
 	/* primary and followed work for all new workgrps */
-	*role = Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED;
+	*f->role = Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED;
 	/* it should be submitted by ourselves */
-	*hosted = true;
+	*f->hosted = true;
 
-	newgrp = true;
+	gnew = true;
 #ifdef CONFIG_EROFS_FS_ZIP_MULTIREF
 skip:
 	/* currently unimplemented */
@@ -400,12 +401,12 @@ z_erofs_vle_work_register(struct super_block *sb,
 #else
 	work = z_erofs_vle_grab_primary_work(grp);
 #endif
-	work->pageofs = pageofs;
+	work->pageofs = f->pageofs;
 
 	mutex_init(&work->lock);
 
-	if (newgrp) {
-		int err = erofs_register_workgroup(sb, &grp->obj, 0);
+	if (gnew) {
+		int err = erofs_register_workgroup(f->sb, &grp->obj, 0);
 
 		if (err) {
 			kmem_cache_free(z_erofs_workgroup_cachep, grp);
@@ -413,7 +414,7 @@ z_erofs_vle_work_register(struct super_block *sb,
 		}
 	}
 
-	*owned_head = *grp_ret = grp;
+	*f->owned_head = *f->grp_ret = grp;
 
 	mutex_lock(&work->lock);
 	return work;
@@ -440,9 +441,16 @@ static int z_erofs_vle_work_iter_begin(struct z_erofs_vle_work_builder *builder,
 				       z_erofs_vle_owned_workgrp_t *owned_head)
 {
 	const unsigned clusterpages = erofs_clusterpages(EROFS_SB(sb));
-	const erofs_blk_t index = erofs_blknr(map->m_pa);
-	const unsigned pageofs = map->m_la & ~PAGE_MASK;
 	struct z_erofs_vle_workgroup *grp;
+	const struct z_erofs_vle_work_finder finder = {
+		.sb = sb,
+		.idx = erofs_blknr(map->m_pa),
+		.pageofs = map->m_la & ~PAGE_MASK,
+		.grp_ret = &grp,
+		.role = &builder->role,
+		.owned_head = owned_head,
+		.hosted = &builder->hosted
+	};
 	struct z_erofs_vle_work *work;
 
 	DBG_BUGON(builder->work != NULL);
@@ -454,16 +462,13 @@ static int z_erofs_vle_work_iter_begin(struct z_erofs_vle_work_builder *builder,
 	DBG_BUGON(erofs_blkoff(map->m_pa));
 
 repeat:
-	work = z_erofs_vle_work_lookup(sb, index,
-		pageofs, &grp, &builder->role, owned_head, &builder->hosted);
+	work = z_erofs_vle_work_lookup(&finder);
 	if (work != NULL) {
 		__update_workgrp_llen(grp, map->m_llen);
 		goto got_it;
 	}
 
-	work = z_erofs_vle_work_register(sb, &grp, map, index, pageofs,
-		&builder->role, owned_head, &builder->hosted);
-
+	work = z_erofs_vle_work_register(&finder, map);
 	if (unlikely(work == ERR_PTR(-EAGAIN)))
 		goto repeat;
 
-- 
2.18.0


^ permalink raw reply related	[flat|nested] 30+ messages in thread

* [PATCH 5/8] staging: erofs: rearrange vle clustertype definitions
  2018-08-12 14:01 [PATCH 0/8] staging: erofs: fix some issues and clean up codes Chao Yu
                   ` (3 preceding siblings ...)
  2018-08-12 14:01 ` [PATCH 4/8] staging: erofs: cleanup z_erofs_vle_work_{lookup, register} Chao Yu
@ 2018-08-12 14:01 ` Chao Yu
  2018-08-12 14:01 ` [PATCH 6/8] staging: erofs: fix vle_decompressed_index_clusterofs Chao Yu
                   ` (2 subsequent siblings)
  7 siblings, 0 replies; 30+ messages in thread
From: Chao Yu @ 2018-08-12 14:01 UTC (permalink / raw)
  To: gregkh, devel; +Cc: linux-erofs, linux-kernel, Gao Xiang, Chao Yu

From: Gao Xiang <gaoxiang25@huawei.com>

This patch moves vle clustertype definitions to erofs_fs.h
since they are part of on-disk format.

It also adds compile time check for Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS

Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
Reviewed-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Chao Yu <yuchao0@huawei.com>
---
 drivers/staging/erofs/erofs_fs.h  | 11 +++++++++++
 drivers/staging/erofs/unzip_vle.c |  8 --------
 2 files changed, 11 insertions(+), 8 deletions(-)

diff --git a/drivers/staging/erofs/erofs_fs.h b/drivers/staging/erofs/erofs_fs.h
index 2f8e2bf70941..d4bffa2852b3 100644
--- a/drivers/staging/erofs/erofs_fs.h
+++ b/drivers/staging/erofs/erofs_fs.h
@@ -202,6 +202,14 @@ struct erofs_extent_header {
  *        di_u.delta[1] = distance to its corresponding tail cluster
  *                (di_advise could be 0, 1 or 2)
  */
+enum {
+	Z_EROFS_VLE_CLUSTER_TYPE_PLAIN,
+	Z_EROFS_VLE_CLUSTER_TYPE_HEAD,
+	Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD,
+	Z_EROFS_VLE_CLUSTER_TYPE_RESERVED,
+	Z_EROFS_VLE_CLUSTER_TYPE_MAX
+};
+
 #define Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS        2
 #define Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT         0
 
@@ -260,6 +268,9 @@ static inline void erofs_check_ondisk_layout_definitions(void)
 	BUILD_BUG_ON(sizeof(struct erofs_extent_header) != 16);
 	BUILD_BUG_ON(sizeof(struct z_erofs_vle_decompressed_index) != 8);
 	BUILD_BUG_ON(sizeof(struct erofs_dirent) != 12);
+
+	BUILD_BUG_ON(BIT(Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS) <
+		     Z_EROFS_VLE_CLUSTER_TYPE_MAX - 1);
 }
 
 #endif
diff --git a/drivers/staging/erofs/unzip_vle.c b/drivers/staging/erofs/unzip_vle.c
index 5032b3b05de1..f597f079d579 100644
--- a/drivers/staging/erofs/unzip_vle.c
+++ b/drivers/staging/erofs/unzip_vle.c
@@ -1416,14 +1416,6 @@ const struct address_space_operations z_erofs_vle_normalaccess_aops = {
 #define __vle_cluster_type(advise) __vle_cluster_advise(advise, \
 	Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT, Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS)
 
-enum {
-	Z_EROFS_VLE_CLUSTER_TYPE_PLAIN,
-	Z_EROFS_VLE_CLUSTER_TYPE_HEAD,
-	Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD,
-	Z_EROFS_VLE_CLUSTER_TYPE_RESERVED,
-	Z_EROFS_VLE_CLUSTER_TYPE_MAX
-};
-
 #define vle_cluster_type(di)	\
 	__vle_cluster_type((di)->di_advise)
 
-- 
2.18.0


^ permalink raw reply related	[flat|nested] 30+ messages in thread

* [PATCH 6/8] staging: erofs: fix vle_decompressed_index_clusterofs
  2018-08-12 14:01 [PATCH 0/8] staging: erofs: fix some issues and clean up codes Chao Yu
                   ` (4 preceding siblings ...)
  2018-08-12 14:01 ` [PATCH 5/8] staging: erofs: rearrange vle clustertype definitions Chao Yu
@ 2018-08-12 14:01 ` Chao Yu
  2018-08-13 12:03   ` Dan Carpenter
  2018-08-12 14:01 ` [PATCH 7/8] staging: erofs: fix integer overflow on 32-bit platform Chao Yu
  2018-08-12 14:01 ` [PATCH 8/8] staging: erofs: fix compression mapping beyond EOF Chao Yu
  7 siblings, 1 reply; 30+ messages in thread
From: Chao Yu @ 2018-08-12 14:01 UTC (permalink / raw)
  To: gregkh, devel; +Cc: linux-erofs, linux-kernel, Gao Xiang, Chao Yu

From: Gao Xiang <gaoxiang25@huawei.com>

This patch adds error handing code, and fixes a missing
endian conversion in vle_decompressed_index_clusterofs.

Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
Reviewed-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Chao Yu <yuchao0@huawei.com>
---
 drivers/staging/erofs/unzip_vle.c | 26 +++++++++++++++-----------
 1 file changed, 15 insertions(+), 11 deletions(-)

diff --git a/drivers/staging/erofs/unzip_vle.c b/drivers/staging/erofs/unzip_vle.c
index f597f079d579..ae99b6811d4a 100644
--- a/drivers/staging/erofs/unzip_vle.c
+++ b/drivers/staging/erofs/unzip_vle.c
@@ -1419,24 +1419,24 @@ const struct address_space_operations z_erofs_vle_normalaccess_aops = {
 #define vle_cluster_type(di)	\
 	__vle_cluster_type((di)->di_advise)
 
-static inline unsigned
-vle_compressed_index_clusterofs(unsigned clustersize,
-	struct z_erofs_vle_decompressed_index *di)
+static inline int
+vle_decompressed_index_clusterofs(unsigned int *clusterofs,
+				  unsigned int clustersize,
+				  struct z_erofs_vle_decompressed_index *di)
 {
-	debugln("%s, vle=%pK, advise=%x (type %u), clusterofs=%x blkaddr=%x",
-		__func__, di, di->di_advise, vle_cluster_type(di),
-		di->di_clusterofs, di->di_u.blkaddr);
-
 	switch (vle_cluster_type(di)) {
 	case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
+		*clusterofs = clustersize;
 		break;
 	case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
 	case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
-		return di->di_clusterofs;
+		*clusterofs = le16_to_cpu(di->di_clusterofs);
+		break;
 	default:
-		BUG_ON(1);
+		DBG_BUGON(1);
+		return -EIO;
 	}
-	return clustersize;
+	return 0;
 }
 
 static inline erofs_blk_t
@@ -1581,7 +1581,11 @@ int z_erofs_map_blocks_iter(struct inode *inode,
 	debugln("%s, lcn %u e_blkaddr %u e_blkoff %u", __func__, lcn,
 		e_blkaddr, vle_extent_blkoff(inode, lcn));
 
-	logical_cluster_ofs = vle_compressed_index_clusterofs(clustersize, di);
+	err = vle_decompressed_index_clusterofs(&logical_cluster_ofs,
+						clustersize, di);
+	if (unlikely(err))
+		goto unmap_out;
+
 	if (!initial) {
 		/* [walking mode] 'map' has been already initialized */
 		map->m_llen += logical_cluster_ofs;
-- 
2.18.0


^ permalink raw reply related	[flat|nested] 30+ messages in thread

* [PATCH 7/8] staging: erofs: fix integer overflow on 32-bit platform
  2018-08-12 14:01 [PATCH 0/8] staging: erofs: fix some issues and clean up codes Chao Yu
                   ` (5 preceding siblings ...)
  2018-08-12 14:01 ` [PATCH 6/8] staging: erofs: fix vle_decompressed_index_clusterofs Chao Yu
@ 2018-08-12 14:01 ` Chao Yu
  2018-08-12 14:01 ` [PATCH 8/8] staging: erofs: fix compression mapping beyond EOF Chao Yu
  7 siblings, 0 replies; 30+ messages in thread
From: Chao Yu @ 2018-08-12 14:01 UTC (permalink / raw)
  To: gregkh, devel; +Cc: linux-erofs, linux-kernel, Gao Xiang, Chao Yu

From: Gao Xiang <gaoxiang25@huawei.com>

This patch fixes integer overflow on multiplication
of 32-bit `lcn' in z_erofs_map_blocks_iter.

Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
Reviewed-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Chao Yu <yuchao0@huawei.com>
---
 drivers/staging/erofs/unzip_vle.c | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/drivers/staging/erofs/unzip_vle.c b/drivers/staging/erofs/unzip_vle.c
index ae99b6811d4a..6c5b0a312592 100644
--- a/drivers/staging/erofs/unzip_vle.c
+++ b/drivers/staging/erofs/unzip_vle.c
@@ -1513,7 +1513,7 @@ static erofs_off_t vle_get_logical_extent_head(
 		*flags ^= EROFS_MAP_ZIPPED;
 	case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
 		/* clustersize should be a power of two */
-		ofs = ((unsigned long long)lcn << clusterbits) +
+		ofs = ((u64)lcn << clusterbits) +
 			(le16_to_cpu(di->di_clusterofs) & (clustersize - 1));
 		*pcn = le32_to_cpu(di->di_u.blkaddr);
 		break;
@@ -1595,7 +1595,7 @@ int z_erofs_map_blocks_iter(struct inode *inode,
 	/* by default, compressed */
 	map->m_flags |= EROFS_MAP_ZIPPED;
 
-	end = (u64)(lcn + 1) * clustersize;
+	end = ((u64)lcn + 1) * clustersize;
 
 	cluster_type = vle_cluster_type(di);
 
@@ -1611,7 +1611,7 @@ int z_erofs_map_blocks_iter(struct inode *inode,
 		}
 
 		if (ofs_rem > logical_cluster_ofs) {
-			ofs = lcn * clustersize | logical_cluster_ofs;
+			ofs = (u64)lcn * clustersize | logical_cluster_ofs;
 			pcn = le32_to_cpu(di->di_u.blkaddr);
 			break;
 		}
@@ -1623,7 +1623,7 @@ int z_erofs_map_blocks_iter(struct inode *inode,
 			err = -EIO;
 			goto unmap_out;
 		}
-		end = (lcn-- * clustersize) | logical_cluster_ofs;
+		end = ((u64)lcn-- * clustersize) | logical_cluster_ofs;
 		/* fallthrough */
 	case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
 		/* get the correspoinding first chunk */
-- 
2.18.0


^ permalink raw reply related	[flat|nested] 30+ messages in thread

* [PATCH 8/8] staging: erofs: fix compression mapping beyond EOF
  2018-08-12 14:01 [PATCH 0/8] staging: erofs: fix some issues and clean up codes Chao Yu
                   ` (6 preceding siblings ...)
  2018-08-12 14:01 ` [PATCH 7/8] staging: erofs: fix integer overflow on 32-bit platform Chao Yu
@ 2018-08-12 14:01 ` Chao Yu
  7 siblings, 0 replies; 30+ messages in thread
From: Chao Yu @ 2018-08-12 14:01 UTC (permalink / raw)
  To: gregkh, devel; +Cc: linux-erofs, linux-kernel, Gao Xiang, Chao Yu

From: Gao Xiang <gaoxiang25@huawei.com>

Logical address of EOF LTP mapping should start at
`inode->i_size' rather than `inode->i_size - 1' to
`m_la(in)', fix it.

Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
Reviewed-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Chao Yu <yuchao0@huawei.com>
---
 drivers/staging/erofs/unzip_vle.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/staging/erofs/unzip_vle.c b/drivers/staging/erofs/unzip_vle.c
index 6c5b0a312592..2f47498eb1fe 100644
--- a/drivers/staging/erofs/unzip_vle.c
+++ b/drivers/staging/erofs/unzip_vle.c
@@ -1548,7 +1548,7 @@ int z_erofs_map_blocks_iter(struct inode *inode,
 	if (unlikely(map->m_la >= inode->i_size)) {
 		BUG_ON(!initial);
 		map->m_llen = map->m_la + 1 - inode->i_size;
-		map->m_la = inode->i_size - 1;
+		map->m_la = inode->i_size;
 		map->m_flags = 0;
 		goto out;
 	}
-- 
2.18.0


^ permalink raw reply related	[flat|nested] 30+ messages in thread

* Re: [PATCH 3/8] staging: erofs: add error handling for xattr submodule
  2018-08-12 14:01 ` [PATCH 3/8] staging: erofs: add error handling for xattr submodule Chao Yu
@ 2018-08-13  2:00   ` Chao Yu
  2018-08-13  2:36     ` Gao Xiang
  2018-08-13 11:47   ` Dan Carpenter
  1 sibling, 1 reply; 30+ messages in thread
From: Chao Yu @ 2018-08-13  2:00 UTC (permalink / raw)
  To: Gao Xiang; +Cc: Chao Yu, devel, linux-erofs, linux-kernel

On 2018/8/12 22:01, Chao Yu wrote:
> From: Gao Xiang <gaoxiang25@huawei.com>
> 
> This patch enhances the missing error handling code for
> xattr submodule, which improves the stability for the rare cases.
> 
> Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
> Reviewed-by: Chao Yu <yuchao0@huawei.com>
> Signed-off-by: Chao Yu <yuchao0@huawei.com>
> ---
>  drivers/staging/erofs/internal.h |   6 +-
>  drivers/staging/erofs/xattr.c    | 120 ++++++++++++++++++++-----------
>  2 files changed, 83 insertions(+), 43 deletions(-)
> 
> diff --git a/drivers/staging/erofs/internal.h b/drivers/staging/erofs/internal.h
> index a756abeff7e9..8951e01216e3 100644
> --- a/drivers/staging/erofs/internal.h
> +++ b/drivers/staging/erofs/internal.h
> @@ -485,10 +485,10 @@ struct erofs_map_blocks_iter {
>  
>  
>  static inline struct page *
> -erofs_get_inline_page_nofail(struct inode *inode,
> -			     erofs_blk_t blkaddr)
> +erofs_get_inline_page(struct inode *inode,
> +		      erofs_blk_t blkaddr)
>  {
> -	return erofs_get_meta_page_nofail(inode->i_sb,
> +	return erofs_get_meta_page(inode->i_sb,
>  		blkaddr, S_ISDIR(inode->i_mode));
>  }
>  
> diff --git a/drivers/staging/erofs/xattr.c b/drivers/staging/erofs/xattr.c
> index 2593c856b079..1b5815fc70db 100644
> --- a/drivers/staging/erofs/xattr.c
> +++ b/drivers/staging/erofs/xattr.c
> @@ -24,16 +24,25 @@ struct xattr_iter {
>  
>  static inline void xattr_iter_end(struct xattr_iter *it, bool atomic)
>  {
> -	/* only init_inode_xattrs use non-atomic once */
> +	/* the only user of kunmap() is 'init_inode_xattrs' */
>  	if (unlikely(!atomic))
>  		kunmap(it->page);
>  	else
>  		kunmap_atomic(it->kaddr);
> +
>  	unlock_page(it->page);
>  	put_page(it->page);
>  }
>  
> -static void init_inode_xattrs(struct inode *inode)
> +static inline void xattr_iter_end_final(struct xattr_iter *it)
> +{
> +	if (unlikely(it->page == NULL))
> +		return;
> +
> +	xattr_iter_end(it, true);
> +}
> +
> +static int init_inode_xattrs(struct inode *inode)
>  {
>  	struct xattr_iter it;
>  	unsigned i;
> @@ -44,7 +53,7 @@ static void init_inode_xattrs(struct inode *inode)
>  	bool atomic_map;
>  
>  	if (likely(inode_has_inited_xattr(inode)))
> -		return;
> +		return 0;
>  
>  	vi = EROFS_V(inode);
>  	BUG_ON(!vi->xattr_isize);
> @@ -54,8 +63,9 @@ static void init_inode_xattrs(struct inode *inode)
>  	it.blkaddr = erofs_blknr(iloc(sbi, vi->nid) + vi->inode_isize);
>  	it.ofs = erofs_blkoff(iloc(sbi, vi->nid) + vi->inode_isize);
>  
> -	it.page = erofs_get_inline_page_nofail(inode, it.blkaddr);
> -	BUG_ON(IS_ERR(it.page));
> +	it.page = erofs_get_inline_page(inode, it.blkaddr);
> +	if (IS_ERR(it.page))
> +		return PTR_ERR(it.page);
>  
>  	/* read in shared xattr array (non-atomic, see kmalloc below) */
>  	it.kaddr = kmap(it.page);
> @@ -64,9 +74,12 @@ static void init_inode_xattrs(struct inode *inode)
>  	ih = (struct erofs_xattr_ibody_header *)(it.kaddr + it.ofs);
>  
>  	vi->xattr_shared_count = ih->h_shared_count;
> -	vi->xattr_shared_xattrs = (unsigned *)kmalloc_array(
> -		vi->xattr_shared_count, sizeof(unsigned),
> -		GFP_KERNEL | __GFP_NOFAIL);
> +	vi->xattr_shared_xattrs = kmalloc_array(vi->xattr_shared_count,
> +						sizeof(unsigned), GFP_KERNEL);
> +	if (unlikely(vi->xattr_shared_xattrs == NULL)) {
> +		xattr_iter_end(&it, atomic_map);
> +		return -ENOMEM;
> +	}
>  
>  	/* let's skip ibody header */
>  	it.ofs += sizeof(struct erofs_xattr_ibody_header);
> @@ -77,9 +90,10 @@ static void init_inode_xattrs(struct inode *inode)
>  			BUG_ON(it.ofs != EROFS_BLKSIZ);
>  			xattr_iter_end(&it, atomic_map);
>  
> -			it.page = erofs_get_meta_page_nofail(sb,
> +			it.page = erofs_get_meta_page(sb,
>  				++it.blkaddr, S_ISDIR(inode->i_mode));
> -			BUG_ON(IS_ERR(it.page));
> +			if (IS_ERR(it.page))
> +				return PTR_ERR(it.page);
>  
>  			it.kaddr = kmap_atomic(it.page);
>  			atomic_map = true;
> @@ -92,6 +106,7 @@ static void init_inode_xattrs(struct inode *inode)
>  	xattr_iter_end(&it, atomic_map);
>  
>  	inode_set_inited_xattr(inode);
> +	return 0;
>  }
>  
>  struct xattr_iter_handlers {
> @@ -101,19 +116,24 @@ struct xattr_iter_handlers {
>  	void (*value)(struct xattr_iter *, unsigned, char *, unsigned);
>  };
>  
> -static void xattr_iter_fixup(struct xattr_iter *it)
> +static inline int xattr_iter_fixup(struct xattr_iter *it)
>  {
> -	if (unlikely(it->ofs >= EROFS_BLKSIZ)) {
> -		xattr_iter_end(it, true);
> +	if (likely(it->ofs < EROFS_BLKSIZ))
> +		return 0;
>  
> -		it->blkaddr += erofs_blknr(it->ofs);
> -		it->page = erofs_get_meta_page_nofail(it->sb,
> -			it->blkaddr, false);
> -		BUG_ON(IS_ERR(it->page));
> +	xattr_iter_end(it, true);
>  
> -		it->kaddr = kmap_atomic(it->page);
> -		it->ofs = erofs_blkoff(it->ofs);
> +	it->blkaddr += erofs_blknr(it->ofs);
> +
> +	it->page = erofs_get_meta_page(it->sb, it->blkaddr, false);
> +	if (IS_ERR(it->page)) {
> +		it->page = NULL;
> +		return PTR_ERR(it->page);

As LKP reported, we need to fix this as below:

err = PTR_ERR(it->page);
it->page = NULL;
return err;

Thanks,

>  	}
> +
> +	it->kaddr = kmap_atomic(it->page);
> +	it->ofs = erofs_blkoff(it->ofs);
> +	return 0;
>  }
>  
>  static int inline_xattr_iter_begin(struct xattr_iter *it,
> @@ -134,22 +154,25 @@ static int inline_xattr_iter_begin(struct xattr_iter *it,
>  	it->blkaddr = erofs_blknr(iloc(sbi, vi->nid) + inline_xattr_ofs);
>  	it->ofs = erofs_blkoff(iloc(sbi, vi->nid) + inline_xattr_ofs);
>  
> -	it->page = erofs_get_inline_page_nofail(inode, it->blkaddr);
> -	BUG_ON(IS_ERR(it->page));
> -	it->kaddr = kmap_atomic(it->page);
> +	it->page = erofs_get_inline_page(inode, it->blkaddr);
> +	if (IS_ERR(it->page))
> +		return PTR_ERR(it->page);
>  
> +	it->kaddr = kmap_atomic(it->page);
>  	return vi->xattr_isize - xattr_header_sz;
>  }
>  
>  static int xattr_foreach(struct xattr_iter *it,
> -	struct xattr_iter_handlers *op, unsigned *tlimit)
> +	const struct xattr_iter_handlers *op, unsigned *tlimit)
>  {
>  	struct erofs_xattr_entry entry;
>  	unsigned value_sz, processed, slice;
>  	int err;
>  
>  	/* 0. fixup blkaddr, ofs, ipage */
> -	xattr_iter_fixup(it);
> +	err = xattr_iter_fixup(it);
> +	if (unlikely(err))
> +		return err;
>  
>  	/*
>  	 * 1. read xattr entry to the memory,
> @@ -181,7 +204,9 @@ static int xattr_foreach(struct xattr_iter *it,
>  		if (it->ofs >= EROFS_BLKSIZ) {
>  			BUG_ON(it->ofs > EROFS_BLKSIZ);
>  
> -			xattr_iter_fixup(it);
> +			err = xattr_iter_fixup(it);
> +			if (unlikely(err))
> +				goto out;
>  			it->ofs = 0;
>  		}
>  
> @@ -213,7 +238,10 @@ static int xattr_foreach(struct xattr_iter *it,
>  	while (processed < value_sz) {
>  		if (it->ofs >= EROFS_BLKSIZ) {
>  			BUG_ON(it->ofs > EROFS_BLKSIZ);
> -			xattr_iter_fixup(it);
> +
> +			err = xattr_iter_fixup(it);
> +			if (unlikely(err))
> +				goto out;
>  			it->ofs = 0;
>  		}
>  
> @@ -273,7 +301,7 @@ static void xattr_copyvalue(struct xattr_iter *_it,
>  	memcpy(it->buffer + processed, buf, len);
>  }
>  
> -static struct xattr_iter_handlers find_xattr_handlers = {
> +static const struct xattr_iter_handlers find_xattr_handlers = {
>  	.entry = xattr_entrymatch,
>  	.name = xattr_namematch,
>  	.alloc_buffer = xattr_checkbuffer,
> @@ -294,8 +322,11 @@ static int inline_getxattr(struct inode *inode, struct getxattr_iter *it)
>  		ret = xattr_foreach(&it->it, &find_xattr_handlers, &remaining);
>  		if (ret >= 0)
>  			break;
> +
> +		if (unlikely(ret != -ENOATTR))	/* -ENOMEM, -EIO, etc. */
> +			break;
>  	}
> -	xattr_iter_end(&it->it, true);
> +	xattr_iter_end_final(&it->it);
>  
>  	return ret < 0 ? ret : it->buffer_size;
>  }
> @@ -318,9 +349,10 @@ static int shared_getxattr(struct inode *inode, struct getxattr_iter *it)
>  			if (i)
>  				xattr_iter_end(&it->it, true);
>  
> -			it->it.page = erofs_get_meta_page_nofail(sb,
> -				blkaddr, false);
> -			BUG_ON(IS_ERR(it->it.page));
> +			it->it.page = erofs_get_meta_page(sb, blkaddr, false);
> +			if (IS_ERR(it->it.page))
> +				return PTR_ERR(it->it.page);
> +
>  			it->it.kaddr = kmap_atomic(it->it.page);
>  			it->it.blkaddr = blkaddr;
>  		}
> @@ -328,9 +360,12 @@ static int shared_getxattr(struct inode *inode, struct getxattr_iter *it)
>  		ret = xattr_foreach(&it->it, &find_xattr_handlers, NULL);
>  		if (ret >= 0)
>  			break;
> +
> +		if (unlikely(ret != -ENOATTR))	/* -ENOMEM, -EIO, etc. */
> +			break;
>  	}
>  	if (vi->xattr_shared_count)
> -		xattr_iter_end(&it->it, true);
> +		xattr_iter_end_final(&it->it);
>  
>  	return ret < 0 ? ret : it->buffer_size;
>  }
> @@ -355,7 +390,9 @@ int erofs_getxattr(struct inode *inode, int index,
>  	if (unlikely(name == NULL))
>  		return -EINVAL;
>  
> -	init_inode_xattrs(inode);
> +	ret = init_inode_xattrs(inode);
> +	if (unlikely(ret < 0))
> +		return ret;
>  
>  	it.index = index;
>  
> @@ -498,7 +535,7 @@ static int xattr_skipvalue(struct xattr_iter *_it,
>  	return 1;
>  }
>  
> -static struct xattr_iter_handlers list_xattr_handlers = {
> +static const struct xattr_iter_handlers list_xattr_handlers = {
>  	.entry = xattr_entrylist,
>  	.name = xattr_namelist,
>  	.alloc_buffer = xattr_skipvalue,
> @@ -520,7 +557,7 @@ static int inline_listxattr(struct listxattr_iter *it)
>  		if (ret < 0)
>  			break;
>  	}
> -	xattr_iter_end(&it->it, true);
> +	xattr_iter_end_final(&it->it);
>  	return ret < 0 ? ret : it->buffer_ofs;
>  }
>  
> @@ -542,9 +579,10 @@ static int shared_listxattr(struct listxattr_iter *it)
>  			if (i)
>  				xattr_iter_end(&it->it, true);
>  
> -			it->it.page = erofs_get_meta_page_nofail(sb,
> -				blkaddr, false);
> -			BUG_ON(IS_ERR(it->it.page));
> +			it->it.page = erofs_get_meta_page(sb, blkaddr, false);
> +			if (IS_ERR(it->it.page))
> +				return PTR_ERR(it->it.page);
> +
>  			it->it.kaddr = kmap_atomic(it->it.page);
>  			it->it.blkaddr = blkaddr;
>  		}
> @@ -554,7 +592,7 @@ static int shared_listxattr(struct listxattr_iter *it)
>  			break;
>  	}
>  	if (vi->xattr_shared_count)
> -		xattr_iter_end(&it->it, true);
> +		xattr_iter_end_final(&it->it);
>  
>  	return ret < 0 ? ret : it->buffer_ofs;
>  }
> @@ -565,7 +603,9 @@ ssize_t erofs_listxattr(struct dentry *dentry,
>  	int ret;
>  	struct listxattr_iter it;
>  
> -	init_inode_xattrs(d_inode(dentry));
> +	ret = init_inode_xattrs(d_inode(dentry));
> +	if (unlikely(ret < 0))
> +		return ret;
>  
>  	it.dentry = dentry;
>  	it.buffer = buffer;
> 


^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 3/8] staging: erofs: add error handling for xattr submodule
  2018-08-13  2:00   ` Chao Yu
@ 2018-08-13  2:36     ` Gao Xiang
  2018-08-13  2:56       ` [PATCH v2 " Gao Xiang
  2018-08-13  8:15       ` [PATCH " Chao Yu
  0 siblings, 2 replies; 30+ messages in thread
From: Gao Xiang @ 2018-08-13  2:36 UTC (permalink / raw)
  To: Chao Yu; +Cc: Chao Yu, devel, linux-erofs, linux-kernel

Hi Chao,

On 2018/8/13 10:00, Chao Yu wrote:
> On 2018/8/12 22:01, Chao Yu wrote:
>> From: Gao Xiang <gaoxiang25@huawei.com>
>>
>> This patch enhances the missing error handling code for
>> xattr submodule, which improves the stability for the rare cases.
>>
>> Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
>> Reviewed-by: Chao Yu <yuchao0@huawei.com>
>> Signed-off-by: Chao Yu <yuchao0@huawei.com>
>> ---
>>  drivers/staging/erofs/internal.h |   6 +-
>>  drivers/staging/erofs/xattr.c    | 120 ++++++++++++++++++++-----------
>>  2 files changed, 83 insertions(+), 43 deletions(-)
>>
>> diff --git a/drivers/staging/erofs/internal.h b/drivers/staging/erofs/internal.h
>> index a756abeff7e9..8951e01216e3 100644
>> --- a/drivers/staging/erofs/internal.h
>> +++ b/drivers/staging/erofs/internal.h
>> @@ -485,10 +485,10 @@ struct erofs_map_blocks_iter {
>>  
>>  
>>  static inline struct page *
>> -erofs_get_inline_page_nofail(struct inode *inode,
>> -			     erofs_blk_t blkaddr)
>> +erofs_get_inline_page(struct inode *inode,
>> +		      erofs_blk_t blkaddr)
>>  {
>> -	return erofs_get_meta_page_nofail(inode->i_sb,
>> +	return erofs_get_meta_page(inode->i_sb,
>>  		blkaddr, S_ISDIR(inode->i_mode));
>>  }
>>  
>> diff --git a/drivers/staging/erofs/xattr.c b/drivers/staging/erofs/xattr.c
>> index 2593c856b079..1b5815fc70db 100644
>> --- a/drivers/staging/erofs/xattr.c
>> +++ b/drivers/staging/erofs/xattr.c
>> @@ -24,16 +24,25 @@ struct xattr_iter {
>>  
>>  static inline void xattr_iter_end(struct xattr_iter *it, bool atomic)
>>  {
>> -	/* only init_inode_xattrs use non-atomic once */
>> +	/* the only user of kunmap() is 'init_inode_xattrs' */
>>  	if (unlikely(!atomic))
>>  		kunmap(it->page);
>>  	else
>>  		kunmap_atomic(it->kaddr);
>> +
>>  	unlock_page(it->page);
>>  	put_page(it->page);
>>  }
>>  
>> -static void init_inode_xattrs(struct inode *inode)
>> +static inline void xattr_iter_end_final(struct xattr_iter *it)
>> +{
>> +	if (unlikely(it->page == NULL))
>> +		return;
>> +
>> +	xattr_iter_end(it, true);
>> +}
>> +
>> +static int init_inode_xattrs(struct inode *inode)
>>  {
>>  	struct xattr_iter it;
>>  	unsigned i;
>> @@ -44,7 +53,7 @@ static void init_inode_xattrs(struct inode *inode)
>>  	bool atomic_map;
>>  
>>  	if (likely(inode_has_inited_xattr(inode)))
>> -		return;
>> +		return 0;
>>  
>>  	vi = EROFS_V(inode);
>>  	BUG_ON(!vi->xattr_isize);
>> @@ -54,8 +63,9 @@ static void init_inode_xattrs(struct inode *inode)
>>  	it.blkaddr = erofs_blknr(iloc(sbi, vi->nid) + vi->inode_isize);
>>  	it.ofs = erofs_blkoff(iloc(sbi, vi->nid) + vi->inode_isize);
>>  
>> -	it.page = erofs_get_inline_page_nofail(inode, it.blkaddr);
>> -	BUG_ON(IS_ERR(it.page));
>> +	it.page = erofs_get_inline_page(inode, it.blkaddr);
>> +	if (IS_ERR(it.page))
>> +		return PTR_ERR(it.page);
>>  
>>  	/* read in shared xattr array (non-atomic, see kmalloc below) */
>>  	it.kaddr = kmap(it.page);
>> @@ -64,9 +74,12 @@ static void init_inode_xattrs(struct inode *inode)
>>  	ih = (struct erofs_xattr_ibody_header *)(it.kaddr + it.ofs);
>>  
>>  	vi->xattr_shared_count = ih->h_shared_count;
>> -	vi->xattr_shared_xattrs = (unsigned *)kmalloc_array(
>> -		vi->xattr_shared_count, sizeof(unsigned),
>> -		GFP_KERNEL | __GFP_NOFAIL);
>> +	vi->xattr_shared_xattrs = kmalloc_array(vi->xattr_shared_count,
>> +						sizeof(unsigned), GFP_KERNEL);
>> +	if (unlikely(vi->xattr_shared_xattrs == NULL)) {
>> +		xattr_iter_end(&it, atomic_map);
>> +		return -ENOMEM;
>> +	}
>>  
>>  	/* let's skip ibody header */
>>  	it.ofs += sizeof(struct erofs_xattr_ibody_header);
>> @@ -77,9 +90,10 @@ static void init_inode_xattrs(struct inode *inode)
>>  			BUG_ON(it.ofs != EROFS_BLKSIZ);
>>  			xattr_iter_end(&it, atomic_map);
>>  
>> -			it.page = erofs_get_meta_page_nofail(sb,
>> +			it.page = erofs_get_meta_page(sb,
>>  				++it.blkaddr, S_ISDIR(inode->i_mode));
>> -			BUG_ON(IS_ERR(it.page));
>> +			if (IS_ERR(it.page))
>> +				return PTR_ERR(it.page);
>>  
>>  			it.kaddr = kmap_atomic(it.page);
>>  			atomic_map = true;
>> @@ -92,6 +106,7 @@ static void init_inode_xattrs(struct inode *inode)
>>  	xattr_iter_end(&it, atomic_map);
>>  
>>  	inode_set_inited_xattr(inode);
>> +	return 0;
>>  }
>>  
>>  struct xattr_iter_handlers {
>> @@ -101,19 +116,24 @@ struct xattr_iter_handlers {
>>  	void (*value)(struct xattr_iter *, unsigned, char *, unsigned);
>>  };
>>  
>> -static void xattr_iter_fixup(struct xattr_iter *it)
>> +static inline int xattr_iter_fixup(struct xattr_iter *it)
>>  {
>> -	if (unlikely(it->ofs >= EROFS_BLKSIZ)) {
>> -		xattr_iter_end(it, true);
>> +	if (likely(it->ofs < EROFS_BLKSIZ))
>> +		return 0;
>>  
>> -		it->blkaddr += erofs_blknr(it->ofs);
>> -		it->page = erofs_get_meta_page_nofail(it->sb,
>> -			it->blkaddr, false);
>> -		BUG_ON(IS_ERR(it->page));
>> +	xattr_iter_end(it, true);
>>  
>> -		it->kaddr = kmap_atomic(it->page);
>> -		it->ofs = erofs_blkoff(it->ofs);
>> +	it->blkaddr += erofs_blknr(it->ofs);
>> +
>> +	it->page = erofs_get_meta_page(it->sb, it->blkaddr, false);
>> +	if (IS_ERR(it->page)) {
>> +		it->page = NULL;
>> +		return PTR_ERR(it->page);
> As LKP reported, we need to fix this as below:
> 
> err = PTR_ERR(it->page);
> it->page = NULL;
> return err;
> 
> Thanks,
>

I have seen this at
https://lists.01.org/pipermail/kbuild-all/2018-August/051187.html

I will fix it soon. Thanks for report.

Thanks,
Gao Xiang

^ permalink raw reply	[flat|nested] 30+ messages in thread

* [PATCH v2 3/8] staging: erofs: add error handling for xattr submodule
  2018-08-13  2:36     ` Gao Xiang
@ 2018-08-13  2:56       ` Gao Xiang
  2018-08-13  8:15       ` [PATCH " Chao Yu
  1 sibling, 0 replies; 30+ messages in thread
From: Gao Xiang @ 2018-08-13  2:56 UTC (permalink / raw)
  To: Greg Kroah-Hartman, devel
  Cc: LKML, linux-erofs, Chao Yu, Miao Xie, weidu.du, Gao Xiang

This patch enhances the missing error handling code for
xattr submodule, which improves the stability for the rare cases.

Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
Reviewed-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Chao Yu <yuchao0@huawei.com>
---
change log v2:
 - fix `PTR_ERR applied after initialization to constant' reported by
     kbuild test robot <lkp@intel.com>
   Link: https://lists.01.org/pipermail/kbuild-all/2018-August/051187.html

 drivers/staging/erofs/internal.h |   6 +-
 drivers/staging/erofs/xattr.c    | 122 ++++++++++++++++++++++++++-------------
 2 files changed, 85 insertions(+), 43 deletions(-)

diff --git a/drivers/staging/erofs/internal.h b/drivers/staging/erofs/internal.h
index a756abe..8951e01 100644
--- a/drivers/staging/erofs/internal.h
+++ b/drivers/staging/erofs/internal.h
@@ -485,10 +485,10 @@ struct erofs_map_blocks_iter {
 
 
 static inline struct page *
-erofs_get_inline_page_nofail(struct inode *inode,
-			     erofs_blk_t blkaddr)
+erofs_get_inline_page(struct inode *inode,
+		      erofs_blk_t blkaddr)
 {
-	return erofs_get_meta_page_nofail(inode->i_sb,
+	return erofs_get_meta_page(inode->i_sb,
 		blkaddr, S_ISDIR(inode->i_mode));
 }
 
diff --git a/drivers/staging/erofs/xattr.c b/drivers/staging/erofs/xattr.c
index 2593c85..2a846cf 100644
--- a/drivers/staging/erofs/xattr.c
+++ b/drivers/staging/erofs/xattr.c
@@ -24,16 +24,25 @@ struct xattr_iter {
 
 static inline void xattr_iter_end(struct xattr_iter *it, bool atomic)
 {
-	/* only init_inode_xattrs use non-atomic once */
+	/* the only user of kunmap() is 'init_inode_xattrs' */
 	if (unlikely(!atomic))
 		kunmap(it->page);
 	else
 		kunmap_atomic(it->kaddr);
+
 	unlock_page(it->page);
 	put_page(it->page);
 }
 
-static void init_inode_xattrs(struct inode *inode)
+static inline void xattr_iter_end_final(struct xattr_iter *it)
+{
+	if (unlikely(it->page == NULL))
+		return;
+
+	xattr_iter_end(it, true);
+}
+
+static int init_inode_xattrs(struct inode *inode)
 {
 	struct xattr_iter it;
 	unsigned i;
@@ -44,7 +53,7 @@ static void init_inode_xattrs(struct inode *inode)
 	bool atomic_map;
 
 	if (likely(inode_has_inited_xattr(inode)))
-		return;
+		return 0;
 
 	vi = EROFS_V(inode);
 	BUG_ON(!vi->xattr_isize);
@@ -54,8 +63,9 @@ static void init_inode_xattrs(struct inode *inode)
 	it.blkaddr = erofs_blknr(iloc(sbi, vi->nid) + vi->inode_isize);
 	it.ofs = erofs_blkoff(iloc(sbi, vi->nid) + vi->inode_isize);
 
-	it.page = erofs_get_inline_page_nofail(inode, it.blkaddr);
-	BUG_ON(IS_ERR(it.page));
+	it.page = erofs_get_inline_page(inode, it.blkaddr);
+	if (IS_ERR(it.page))
+		return PTR_ERR(it.page);
 
 	/* read in shared xattr array (non-atomic, see kmalloc below) */
 	it.kaddr = kmap(it.page);
@@ -64,9 +74,12 @@ static void init_inode_xattrs(struct inode *inode)
 	ih = (struct erofs_xattr_ibody_header *)(it.kaddr + it.ofs);
 
 	vi->xattr_shared_count = ih->h_shared_count;
-	vi->xattr_shared_xattrs = (unsigned *)kmalloc_array(
-		vi->xattr_shared_count, sizeof(unsigned),
-		GFP_KERNEL | __GFP_NOFAIL);
+	vi->xattr_shared_xattrs = kmalloc_array(vi->xattr_shared_count,
+						sizeof(unsigned), GFP_KERNEL);
+	if (unlikely(vi->xattr_shared_xattrs == NULL)) {
+		xattr_iter_end(&it, atomic_map);
+		return -ENOMEM;
+	}
 
 	/* let's skip ibody header */
 	it.ofs += sizeof(struct erofs_xattr_ibody_header);
@@ -77,9 +90,10 @@ static void init_inode_xattrs(struct inode *inode)
 			BUG_ON(it.ofs != EROFS_BLKSIZ);
 			xattr_iter_end(&it, atomic_map);
 
-			it.page = erofs_get_meta_page_nofail(sb,
+			it.page = erofs_get_meta_page(sb,
 				++it.blkaddr, S_ISDIR(inode->i_mode));
-			BUG_ON(IS_ERR(it.page));
+			if (IS_ERR(it.page))
+				return PTR_ERR(it.page);
 
 			it.kaddr = kmap_atomic(it.page);
 			atomic_map = true;
@@ -92,6 +106,7 @@ static void init_inode_xattrs(struct inode *inode)
 	xattr_iter_end(&it, atomic_map);
 
 	inode_set_inited_xattr(inode);
+	return 0;
 }
 
 struct xattr_iter_handlers {
@@ -101,19 +116,26 @@ struct xattr_iter_handlers {
 	void (*value)(struct xattr_iter *, unsigned, char *, unsigned);
 };
 
-static void xattr_iter_fixup(struct xattr_iter *it)
+static inline int xattr_iter_fixup(struct xattr_iter *it)
 {
-	if (unlikely(it->ofs >= EROFS_BLKSIZ)) {
-		xattr_iter_end(it, true);
+	if (likely(it->ofs < EROFS_BLKSIZ))
+		return 0;
+
+	xattr_iter_end(it, true);
 
-		it->blkaddr += erofs_blknr(it->ofs);
-		it->page = erofs_get_meta_page_nofail(it->sb,
-			it->blkaddr, false);
-		BUG_ON(IS_ERR(it->page));
+	it->blkaddr += erofs_blknr(it->ofs);
 
-		it->kaddr = kmap_atomic(it->page);
-		it->ofs = erofs_blkoff(it->ofs);
+	it->page = erofs_get_meta_page(it->sb, it->blkaddr, false);
+	if (IS_ERR(it->page)) {
+		int err = PTR_ERR(it->page);
+
+		it->page = NULL;
+		return err;
 	}
+
+	it->kaddr = kmap_atomic(it->page);
+	it->ofs = erofs_blkoff(it->ofs);
+	return 0;
 }
 
 static int inline_xattr_iter_begin(struct xattr_iter *it,
@@ -134,22 +156,25 @@ static int inline_xattr_iter_begin(struct xattr_iter *it,
 	it->blkaddr = erofs_blknr(iloc(sbi, vi->nid) + inline_xattr_ofs);
 	it->ofs = erofs_blkoff(iloc(sbi, vi->nid) + inline_xattr_ofs);
 
-	it->page = erofs_get_inline_page_nofail(inode, it->blkaddr);
-	BUG_ON(IS_ERR(it->page));
-	it->kaddr = kmap_atomic(it->page);
+	it->page = erofs_get_inline_page(inode, it->blkaddr);
+	if (IS_ERR(it->page))
+		return PTR_ERR(it->page);
 
+	it->kaddr = kmap_atomic(it->page);
 	return vi->xattr_isize - xattr_header_sz;
 }
 
 static int xattr_foreach(struct xattr_iter *it,
-	struct xattr_iter_handlers *op, unsigned *tlimit)
+	const struct xattr_iter_handlers *op, unsigned *tlimit)
 {
 	struct erofs_xattr_entry entry;
 	unsigned value_sz, processed, slice;
 	int err;
 
 	/* 0. fixup blkaddr, ofs, ipage */
-	xattr_iter_fixup(it);
+	err = xattr_iter_fixup(it);
+	if (unlikely(err))
+		return err;
 
 	/*
 	 * 1. read xattr entry to the memory,
@@ -181,7 +206,9 @@ static int xattr_foreach(struct xattr_iter *it,
 		if (it->ofs >= EROFS_BLKSIZ) {
 			BUG_ON(it->ofs > EROFS_BLKSIZ);
 
-			xattr_iter_fixup(it);
+			err = xattr_iter_fixup(it);
+			if (unlikely(err))
+				goto out;
 			it->ofs = 0;
 		}
 
@@ -213,7 +240,10 @@ static int xattr_foreach(struct xattr_iter *it,
 	while (processed < value_sz) {
 		if (it->ofs >= EROFS_BLKSIZ) {
 			BUG_ON(it->ofs > EROFS_BLKSIZ);
-			xattr_iter_fixup(it);
+
+			err = xattr_iter_fixup(it);
+			if (unlikely(err))
+				goto out;
 			it->ofs = 0;
 		}
 
@@ -273,7 +303,7 @@ static void xattr_copyvalue(struct xattr_iter *_it,
 	memcpy(it->buffer + processed, buf, len);
 }
 
-static struct xattr_iter_handlers find_xattr_handlers = {
+static const struct xattr_iter_handlers find_xattr_handlers = {
 	.entry = xattr_entrymatch,
 	.name = xattr_namematch,
 	.alloc_buffer = xattr_checkbuffer,
@@ -294,8 +324,11 @@ static int inline_getxattr(struct inode *inode, struct getxattr_iter *it)
 		ret = xattr_foreach(&it->it, &find_xattr_handlers, &remaining);
 		if (ret >= 0)
 			break;
+
+		if (unlikely(ret != -ENOATTR))	/* -ENOMEM, -EIO, etc. */
+			break;
 	}
-	xattr_iter_end(&it->it, true);
+	xattr_iter_end_final(&it->it);
 
 	return ret < 0 ? ret : it->buffer_size;
 }
@@ -318,9 +351,10 @@ static int shared_getxattr(struct inode *inode, struct getxattr_iter *it)
 			if (i)
 				xattr_iter_end(&it->it, true);
 
-			it->it.page = erofs_get_meta_page_nofail(sb,
-				blkaddr, false);
-			BUG_ON(IS_ERR(it->it.page));
+			it->it.page = erofs_get_meta_page(sb, blkaddr, false);
+			if (IS_ERR(it->it.page))
+				return PTR_ERR(it->it.page);
+
 			it->it.kaddr = kmap_atomic(it->it.page);
 			it->it.blkaddr = blkaddr;
 		}
@@ -328,9 +362,12 @@ static int shared_getxattr(struct inode *inode, struct getxattr_iter *it)
 		ret = xattr_foreach(&it->it, &find_xattr_handlers, NULL);
 		if (ret >= 0)
 			break;
+
+		if (unlikely(ret != -ENOATTR))	/* -ENOMEM, -EIO, etc. */
+			break;
 	}
 	if (vi->xattr_shared_count)
-		xattr_iter_end(&it->it, true);
+		xattr_iter_end_final(&it->it);
 
 	return ret < 0 ? ret : it->buffer_size;
 }
@@ -355,7 +392,9 @@ int erofs_getxattr(struct inode *inode, int index,
 	if (unlikely(name == NULL))
 		return -EINVAL;
 
-	init_inode_xattrs(inode);
+	ret = init_inode_xattrs(inode);
+	if (unlikely(ret < 0))
+		return ret;
 
 	it.index = index;
 
@@ -498,7 +537,7 @@ static int xattr_skipvalue(struct xattr_iter *_it,
 	return 1;
 }
 
-static struct xattr_iter_handlers list_xattr_handlers = {
+static const struct xattr_iter_handlers list_xattr_handlers = {
 	.entry = xattr_entrylist,
 	.name = xattr_namelist,
 	.alloc_buffer = xattr_skipvalue,
@@ -520,7 +559,7 @@ static int inline_listxattr(struct listxattr_iter *it)
 		if (ret < 0)
 			break;
 	}
-	xattr_iter_end(&it->it, true);
+	xattr_iter_end_final(&it->it);
 	return ret < 0 ? ret : it->buffer_ofs;
 }
 
@@ -542,9 +581,10 @@ static int shared_listxattr(struct listxattr_iter *it)
 			if (i)
 				xattr_iter_end(&it->it, true);
 
-			it->it.page = erofs_get_meta_page_nofail(sb,
-				blkaddr, false);
-			BUG_ON(IS_ERR(it->it.page));
+			it->it.page = erofs_get_meta_page(sb, blkaddr, false);
+			if (IS_ERR(it->it.page))
+				return PTR_ERR(it->it.page);
+
 			it->it.kaddr = kmap_atomic(it->it.page);
 			it->it.blkaddr = blkaddr;
 		}
@@ -554,7 +594,7 @@ static int shared_listxattr(struct listxattr_iter *it)
 			break;
 	}
 	if (vi->xattr_shared_count)
-		xattr_iter_end(&it->it, true);
+		xattr_iter_end_final(&it->it);
 
 	return ret < 0 ? ret : it->buffer_ofs;
 }
@@ -565,7 +605,9 @@ ssize_t erofs_listxattr(struct dentry *dentry,
 	int ret;
 	struct listxattr_iter it;
 
-	init_inode_xattrs(d_inode(dentry));
+	ret = init_inode_xattrs(d_inode(dentry));
+	if (unlikely(ret < 0))
+		return ret;
 
 	it.dentry = dentry;
 	it.buffer = buffer;
-- 
1.9.1


^ permalink raw reply related	[flat|nested] 30+ messages in thread

* Re: [PATCH 3/8] staging: erofs: add error handling for xattr submodule
  2018-08-13  2:36     ` Gao Xiang
  2018-08-13  2:56       ` [PATCH v2 " Gao Xiang
@ 2018-08-13  8:15       ` Chao Yu
  1 sibling, 0 replies; 30+ messages in thread
From: Chao Yu @ 2018-08-13  8:15 UTC (permalink / raw)
  To: Gao Xiang; +Cc: Chao Yu, devel, linux-erofs, linux-kernel

Hi Xiang,

On 2018/8/13 10:36, Gao Xiang wrote:
> Hi Chao,
> 
> On 2018/8/13 10:00, Chao Yu wrote:
>> On 2018/8/12 22:01, Chao Yu wrote:
>>> From: Gao Xiang <gaoxiang25@huawei.com>
>>>
>>> This patch enhances the missing error handling code for
>>> xattr submodule, which improves the stability for the rare cases.
>>>
>>> Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
>>> Reviewed-by: Chao Yu <yuchao0@huawei.com>
>>> Signed-off-by: Chao Yu <yuchao0@huawei.com>
>>> ---
>>>  drivers/staging/erofs/internal.h |   6 +-
>>>  drivers/staging/erofs/xattr.c    | 120 ++++++++++++++++++++-----------
>>>  2 files changed, 83 insertions(+), 43 deletions(-)
>>>
>>> diff --git a/drivers/staging/erofs/internal.h b/drivers/staging/erofs/internal.h
>>> index a756abeff7e9..8951e01216e3 100644
>>> --- a/drivers/staging/erofs/internal.h
>>> +++ b/drivers/staging/erofs/internal.h
>>> @@ -485,10 +485,10 @@ struct erofs_map_blocks_iter {
>>>  
>>>  
>>>  static inline struct page *
>>> -erofs_get_inline_page_nofail(struct inode *inode,
>>> -			     erofs_blk_t blkaddr)
>>> +erofs_get_inline_page(struct inode *inode,
>>> +		      erofs_blk_t blkaddr)
>>>  {
>>> -	return erofs_get_meta_page_nofail(inode->i_sb,
>>> +	return erofs_get_meta_page(inode->i_sb,
>>>  		blkaddr, S_ISDIR(inode->i_mode));
>>>  }
>>>  
>>> diff --git a/drivers/staging/erofs/xattr.c b/drivers/staging/erofs/xattr.c
>>> index 2593c856b079..1b5815fc70db 100644
>>> --- a/drivers/staging/erofs/xattr.c
>>> +++ b/drivers/staging/erofs/xattr.c
>>> @@ -24,16 +24,25 @@ struct xattr_iter {
>>>  
>>>  static inline void xattr_iter_end(struct xattr_iter *it, bool atomic)
>>>  {
>>> -	/* only init_inode_xattrs use non-atomic once */
>>> +	/* the only user of kunmap() is 'init_inode_xattrs' */
>>>  	if (unlikely(!atomic))
>>>  		kunmap(it->page);
>>>  	else
>>>  		kunmap_atomic(it->kaddr);
>>> +
>>>  	unlock_page(it->page);
>>>  	put_page(it->page);
>>>  }
>>>  
>>> -static void init_inode_xattrs(struct inode *inode)
>>> +static inline void xattr_iter_end_final(struct xattr_iter *it)
>>> +{
>>> +	if (unlikely(it->page == NULL))
>>> +		return;
>>> +
>>> +	xattr_iter_end(it, true);
>>> +}
>>> +
>>> +static int init_inode_xattrs(struct inode *inode)
>>>  {
>>>  	struct xattr_iter it;
>>>  	unsigned i;
>>> @@ -44,7 +53,7 @@ static void init_inode_xattrs(struct inode *inode)
>>>  	bool atomic_map;
>>>  
>>>  	if (likely(inode_has_inited_xattr(inode)))
>>> -		return;
>>> +		return 0;
>>>  
>>>  	vi = EROFS_V(inode);
>>>  	BUG_ON(!vi->xattr_isize);
>>> @@ -54,8 +63,9 @@ static void init_inode_xattrs(struct inode *inode)
>>>  	it.blkaddr = erofs_blknr(iloc(sbi, vi->nid) + vi->inode_isize);
>>>  	it.ofs = erofs_blkoff(iloc(sbi, vi->nid) + vi->inode_isize);
>>>  
>>> -	it.page = erofs_get_inline_page_nofail(inode, it.blkaddr);
>>> -	BUG_ON(IS_ERR(it.page));
>>> +	it.page = erofs_get_inline_page(inode, it.blkaddr);
>>> +	if (IS_ERR(it.page))
>>> +		return PTR_ERR(it.page);
>>>  
>>>  	/* read in shared xattr array (non-atomic, see kmalloc below) */
>>>  	it.kaddr = kmap(it.page);
>>> @@ -64,9 +74,12 @@ static void init_inode_xattrs(struct inode *inode)
>>>  	ih = (struct erofs_xattr_ibody_header *)(it.kaddr + it.ofs);
>>>  
>>>  	vi->xattr_shared_count = ih->h_shared_count;
>>> -	vi->xattr_shared_xattrs = (unsigned *)kmalloc_array(
>>> -		vi->xattr_shared_count, sizeof(unsigned),
>>> -		GFP_KERNEL | __GFP_NOFAIL);
>>> +	vi->xattr_shared_xattrs = kmalloc_array(vi->xattr_shared_count,
>>> +						sizeof(unsigned), GFP_KERNEL);
>>> +	if (unlikely(vi->xattr_shared_xattrs == NULL)) {
>>> +		xattr_iter_end(&it, atomic_map);
>>> +		return -ENOMEM;
>>> +	}
>>>  
>>>  	/* let's skip ibody header */
>>>  	it.ofs += sizeof(struct erofs_xattr_ibody_header);
>>> @@ -77,9 +90,10 @@ static void init_inode_xattrs(struct inode *inode)
>>>  			BUG_ON(it.ofs != EROFS_BLKSIZ);
>>>  			xattr_iter_end(&it, atomic_map);
>>>  
>>> -			it.page = erofs_get_meta_page_nofail(sb,
>>> +			it.page = erofs_get_meta_page(sb,
>>>  				++it.blkaddr, S_ISDIR(inode->i_mode));
>>> -			BUG_ON(IS_ERR(it.page));
>>> +			if (IS_ERR(it.page))
>>> +				return PTR_ERR(it.page);
>>>  
>>>  			it.kaddr = kmap_atomic(it.page);
>>>  			atomic_map = true;
>>> @@ -92,6 +106,7 @@ static void init_inode_xattrs(struct inode *inode)
>>>  	xattr_iter_end(&it, atomic_map);
>>>  
>>>  	inode_set_inited_xattr(inode);
>>> +	return 0;
>>>  }
>>>  
>>>  struct xattr_iter_handlers {
>>> @@ -101,19 +116,24 @@ struct xattr_iter_handlers {
>>>  	void (*value)(struct xattr_iter *, unsigned, char *, unsigned);
>>>  };
>>>  
>>> -static void xattr_iter_fixup(struct xattr_iter *it)
>>> +static inline int xattr_iter_fixup(struct xattr_iter *it)
>>>  {
>>> -	if (unlikely(it->ofs >= EROFS_BLKSIZ)) {
>>> -		xattr_iter_end(it, true);
>>> +	if (likely(it->ofs < EROFS_BLKSIZ))
>>> +		return 0;
>>>  
>>> -		it->blkaddr += erofs_blknr(it->ofs);
>>> -		it->page = erofs_get_meta_page_nofail(it->sb,
>>> -			it->blkaddr, false);
>>> -		BUG_ON(IS_ERR(it->page));
>>> +	xattr_iter_end(it, true);
>>>  
>>> -		it->kaddr = kmap_atomic(it->page);
>>> -		it->ofs = erofs_blkoff(it->ofs);
>>> +	it->blkaddr += erofs_blknr(it->ofs);
>>> +
>>> +	it->page = erofs_get_meta_page(it->sb, it->blkaddr, false);
>>> +	if (IS_ERR(it->page)) {
>>> +		it->page = NULL;
>>> +		return PTR_ERR(it->page);
>> As LKP reported, we need to fix this as below:
>>
>> err = PTR_ERR(it->page);
>> it->page = NULL;
>> return err;
>>
>> Thanks,
>>
> 
> I have seen this at
> https://lists.01.org/pipermail/kbuild-all/2018-August/051187.html
> 
> I will fix it soon. Thanks for report.

I checked v2 patch, it looks good to me now. I will send v2 patchset later.

Thanks,

> 
> Thanks,
> Gao Xiang
> 
> .
> 


^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 2/8] staging: erofs: separate erofs_get_meta_page
  2018-08-12 14:01 ` [PATCH 2/8] staging: erofs: separate erofs_get_meta_page Chao Yu
@ 2018-08-13 11:04   ` Dan Carpenter
  2018-08-13 11:23     ` Gao Xiang
  2018-08-13 12:34     ` Chao Yu
  0 siblings, 2 replies; 30+ messages in thread
From: Dan Carpenter @ 2018-08-13 11:04 UTC (permalink / raw)
  To: Chao Yu; +Cc: gregkh, devel, Gao Xiang, Chao Yu, linux-erofs, linux-kernel

On Sun, Aug 12, 2018 at 10:01:44PM +0800, Chao Yu wrote:
> --- a/drivers/staging/erofs/data.c
> +++ b/drivers/staging/erofs/data.c
> @@ -39,31 +39,44 @@ static inline void read_endio(struct bio *bio)
>  }
>  
>  /* prio -- true is used for dir */
> -struct page *erofs_get_meta_page(struct super_block *sb,
> -	erofs_blk_t blkaddr, bool prio)
> +struct page *__erofs_get_meta_page(struct super_block *sb,
> +	erofs_blk_t blkaddr, bool prio, bool nofail)
>  {
> -	struct inode *bd_inode = sb->s_bdev->bd_inode;
> -	struct address_space *mapping = bd_inode->i_mapping;
> +	struct inode *const bd_inode = sb->s_bdev->bd_inode;
> +	struct address_space *const mapping = bd_inode->i_mapping;
> +	/* prefer retrying in the allocator to blindly looping below */
> +	const gfp_t gfp = mapping_gfp_constraint(mapping, ~__GFP_FS) |
> +		(nofail ? __GFP_NOFAIL : 0);
> +	unsigned int io_retries = nofail ? EROFS_IO_MAX_RETRIES_NOFAIL : 0;
>  	struct page *page;
>  
>  repeat:
> -	page = find_or_create_page(mapping, blkaddr,
> -	/*
> -	 * Prefer looping in the allocator rather than here,
> -	 * at least that code knows what it's doing.
> -	 */
> -		mapping_gfp_constraint(mapping, ~__GFP_FS) | __GFP_NOFAIL);
> -
> -	BUG_ON(!page || !PageLocked(page));
> +	page = find_or_create_page(mapping, blkaddr, gfp);
> +	if (unlikely(page == NULL)) {
> +		DBG_BUGON(nofail);
> +		return ERR_PTR(-ENOMEM);
> +	}
> +	DBG_BUGON(!PageLocked(page));
>  
>  	if (!PageUptodate(page)) {
>  		struct bio *bio;
>  		int err;
>  
> -		bio = erofs_grab_bio(sb, blkaddr, 1, read_endio, true);
> +		bio = erofs_grab_bio(sb, blkaddr, 1, read_endio, nofail);
> +		if (unlikely(bio == NULL)) {
> +			DBG_BUGON(nofail);
> +			err = -ENOMEM;
> +err_out:
> +			unlock_page(page);
> +			put_page(page);
> +			return ERR_PTR(err);


Put this err_out stuff at the bottom of the function so that we don't
have to do backward hops to get to it.

> +		}
>  
>  		err = bio_add_page(bio, page, PAGE_SIZE, 0);
> -		BUG_ON(err != PAGE_SIZE);
> +		if (unlikely(err != PAGE_SIZE)) {
> +			err = -EFAULT;
> +			goto err_out;
                        ^^^^^^^^^^^^
Like this.  Generally avoid backwards hops if you can.

> +		}
>  
>  		__submit_bio(bio, REQ_OP_READ,
>  			REQ_META | (prio ? REQ_PRIO : 0));
> @@ -72,6 +85,7 @@ struct page *erofs_get_meta_page(struct super_block *sb,
>  
>  		/* the page has been truncated by others? */
>  		if (unlikely(page->mapping != mapping)) {
> +unlock_repeat:

The question mark in the "truncated by others?" is a little concerning.
It's slightly weird that we don't check "io_retries" on this path.

>  			unlock_page(page);
>  			put_page(page);
>  			goto repeat;
> @@ -79,10 +93,12 @@ struct page *erofs_get_meta_page(struct super_block *sb,
>  
>  		/* more likely a read error */
>  		if (unlikely(!PageUptodate(page))) {
> -			unlock_page(page);
> -			put_page(page);
> -
> -			page = ERR_PTR(-EIO);
> +			if (io_retries) {
> +				--io_retries;
> +				goto unlock_repeat;
> +			}
> +			err = -EIO;
> +			goto err_out;

regards,
dan carpenter


^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 2/8] staging: erofs: separate erofs_get_meta_page
  2018-08-13 11:04   ` Dan Carpenter
@ 2018-08-13 11:23     ` Gao Xiang
  2018-08-13 12:34     ` Chao Yu
  1 sibling, 0 replies; 30+ messages in thread
From: Gao Xiang @ 2018-08-13 11:23 UTC (permalink / raw)
  To: Dan Carpenter, Chao Yu; +Cc: gregkh, devel, Chao Yu, linux-erofs, linux-kernel

Hi Dan,

On 2018/8/13 19:04, Dan Carpenter wrote:
> On Sun, Aug 12, 2018 at 10:01:44PM +0800, Chao Yu wrote:
>> --- a/drivers/staging/erofs/data.c
>> +++ b/drivers/staging/erofs/data.c
>> @@ -39,31 +39,44 @@ static inline void read_endio(struct bio *bio)
>>  }
>>  
>>  /* prio -- true is used for dir */
>> -struct page *erofs_get_meta_page(struct super_block *sb,
>> -	erofs_blk_t blkaddr, bool prio)
>> +struct page *__erofs_get_meta_page(struct super_block *sb,
>> +	erofs_blk_t blkaddr, bool prio, bool nofail)
>>  {
>> -	struct inode *bd_inode = sb->s_bdev->bd_inode;
>> -	struct address_space *mapping = bd_inode->i_mapping;
>> +	struct inode *const bd_inode = sb->s_bdev->bd_inode;
>> +	struct address_space *const mapping = bd_inode->i_mapping;
>> +	/* prefer retrying in the allocator to blindly looping below */
>> +	const gfp_t gfp = mapping_gfp_constraint(mapping, ~__GFP_FS) |
>> +		(nofail ? __GFP_NOFAIL : 0);
>> +	unsigned int io_retries = nofail ? EROFS_IO_MAX_RETRIES_NOFAIL : 0;
>>  	struct page *page;
>>  
>>  repeat:
>> -	page = find_or_create_page(mapping, blkaddr,
>> -	/*
>> -	 * Prefer looping in the allocator rather than here,
>> -	 * at least that code knows what it's doing.
>> -	 */
>> -		mapping_gfp_constraint(mapping, ~__GFP_FS) | __GFP_NOFAIL);
>> -
>> -	BUG_ON(!page || !PageLocked(page));
>> +	page = find_or_create_page(mapping, blkaddr, gfp);
>> +	if (unlikely(page == NULL)) {
>> +		DBG_BUGON(nofail);
>> +		return ERR_PTR(-ENOMEM);
>> +	}
>> +	DBG_BUGON(!PageLocked(page));
>>  
>>  	if (!PageUptodate(page)) {
>>  		struct bio *bio;
>>  		int err;
>>  
>> -		bio = erofs_grab_bio(sb, blkaddr, 1, read_endio, true);
>> +		bio = erofs_grab_bio(sb, blkaddr, 1, read_endio, nofail);
>> +		if (unlikely(bio == NULL)) {
>> +			DBG_BUGON(nofail);
>> +			err = -ENOMEM;
>> +err_out:
>> +			unlock_page(page);
>> +			put_page(page);
>> +			return ERR_PTR(err);
> 
> Put this err_out stuff at the bottom of the function so that we don't
> have to do backward hops to get to it.
> 
>> +		}
>>  
>>  		err = bio_add_page(bio, page, PAGE_SIZE, 0);
>> -		BUG_ON(err != PAGE_SIZE);
>> +		if (unlikely(err != PAGE_SIZE)) {
>> +			err = -EFAULT;
>> +			goto err_out;
>                         ^^^^^^^^^^^^
> Like this.  Generally avoid backwards hops if you can.
> 
>> +		}
>>  
>>  		__submit_bio(bio, REQ_OP_READ,
>>  			REQ_META | (prio ? REQ_PRIO : 0));
>> @@ -72,6 +85,7 @@ struct page *erofs_get_meta_page(struct super_block *sb,
>>  
>>  		/* the page has been truncated by others? */
>>  		if (unlikely(page->mapping != mapping)) {
>> +unlock_repeat:
> The question mark in the "truncated by others?" is a little concerning.
> It's slightly weird that we don't check "io_retries" on this path.
>
Thanks for your reply.

erofs use the shared block device bd_inode->mapping for its meta use, which is visible
to other code rather than erofs internally only, so I think it needs to get rid of
potentially truncating by others, and such comments can also be found at

- pagecache_get_page
1557                 /* Has the page been truncated? */
1558                 if (unlikely(page->mapping != mapping)) {
1559                         unlock_page(page);
1560                         put_page(page);
1561                         goto repeat;
1562                 }

- filemap_fault
2535         /* Did it get truncated? */
2536         if (unlikely(page->mapping != mapping)) {
2537                 unlock_page(page);
2538                 put_page(page);
2539                 goto retry_find;
2540         }

I think it is somewhat necessary, and some suggestion about this? thanks in advance.

"It's slightly weird that we don't check "io_retries" on this path"
I think you are right, let me rethink about it and send the next patch...

Thanks,
Gao Xiang


^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 3/8] staging: erofs: add error handling for xattr submodule
  2018-08-12 14:01 ` [PATCH 3/8] staging: erofs: add error handling for xattr submodule Chao Yu
  2018-08-13  2:00   ` Chao Yu
@ 2018-08-13 11:47   ` Dan Carpenter
  2018-08-13 12:17     ` Gao Xiang
  1 sibling, 1 reply; 30+ messages in thread
From: Dan Carpenter @ 2018-08-13 11:47 UTC (permalink / raw)
  To: Chao Yu; +Cc: gregkh, devel, Gao Xiang, Chao Yu, linux-erofs, linux-kernel

> -static void xattr_iter_fixup(struct xattr_iter *it)
> +static inline int xattr_iter_fixup(struct xattr_iter *it)
>  {
> -	if (unlikely(it->ofs >= EROFS_BLKSIZ)) {
> -		xattr_iter_end(it, true);
> +	if (likely(it->ofs < EROFS_BLKSIZ))
> +		return 0;
>  
> -		it->blkaddr += erofs_blknr(it->ofs);
> -		it->page = erofs_get_meta_page_nofail(it->sb,
> -			it->blkaddr, false);
> -		BUG_ON(IS_ERR(it->page));
> +	xattr_iter_end(it, true);
>  
> -		it->kaddr = kmap_atomic(it->page);
> -		it->ofs = erofs_blkoff(it->ofs);
> +	it->blkaddr += erofs_blknr(it->ofs);
> +
> +	it->page = erofs_get_meta_page(it->sb, it->blkaddr, false);
> +	if (IS_ERR(it->page)) {
> +		it->page = NULL;
> +		return PTR_ERR(it->page);

This is returning PTR_ERR(NULL) which is success.  There is a smatch
test for this and maybe other static checkers as well so it would have
been caught very quickly.

>  	}
> +
> +	it->kaddr = kmap_atomic(it->page);
> +	it->ofs = erofs_blkoff(it->ofs);
> +	return 0;
>  }
>  
>  static int inline_xattr_iter_begin(struct xattr_iter *it,
> @@ -134,22 +154,25 @@ static int inline_xattr_iter_begin(struct xattr_iter *it,
>  	it->blkaddr = erofs_blknr(iloc(sbi, vi->nid) + inline_xattr_ofs);
>  	it->ofs = erofs_blkoff(iloc(sbi, vi->nid) + inline_xattr_ofs);
>  
> -	it->page = erofs_get_inline_page_nofail(inode, it->blkaddr);
> -	BUG_ON(IS_ERR(it->page));
> -	it->kaddr = kmap_atomic(it->page);
> +	it->page = erofs_get_inline_page(inode, it->blkaddr);
> +	if (IS_ERR(it->page))
> +		return PTR_ERR(it->page);
>  
> +	it->kaddr = kmap_atomic(it->page);
>  	return vi->xattr_isize - xattr_header_sz;
>  }
>  
>  static int xattr_foreach(struct xattr_iter *it,
> -	struct xattr_iter_handlers *op, unsigned *tlimit)
> +	const struct xattr_iter_handlers *op, unsigned *tlimit)
>  {
>  	struct erofs_xattr_entry entry;
>  	unsigned value_sz, processed, slice;
>  	int err;
>  
>  	/* 0. fixup blkaddr, ofs, ipage */
> -	xattr_iter_fixup(it);
> +	err = xattr_iter_fixup(it);
> +	if (unlikely(err))
> +		return err;
>  
>  	/*
>  	 * 1. read xattr entry to the memory,
> @@ -181,7 +204,9 @@ static int xattr_foreach(struct xattr_iter *it,
>  		if (it->ofs >= EROFS_BLKSIZ) {
>  			BUG_ON(it->ofs > EROFS_BLKSIZ);
>  
> -			xattr_iter_fixup(it);
> +			err = xattr_iter_fixup(it);
> +			if (unlikely(err))
> +				goto out;
>  			it->ofs = 0;
>  		}
>  
> @@ -213,7 +238,10 @@ static int xattr_foreach(struct xattr_iter *it,
>  	while (processed < value_sz) {
>  		if (it->ofs >= EROFS_BLKSIZ) {
>  			BUG_ON(it->ofs > EROFS_BLKSIZ);
> -			xattr_iter_fixup(it);
> +
> +			err = xattr_iter_fixup(it);
> +			if (unlikely(err))
> +				goto out;
>  			it->ofs = 0;
>  		}
>  
> @@ -273,7 +301,7 @@ static void xattr_copyvalue(struct xattr_iter *_it,
>  	memcpy(it->buffer + processed, buf, len);
>  }
>  
> -static struct xattr_iter_handlers find_xattr_handlers = {
> +static const struct xattr_iter_handlers find_xattr_handlers = {
>  	.entry = xattr_entrymatch,
>  	.name = xattr_namematch,
>  	.alloc_buffer = xattr_checkbuffer,
> @@ -294,8 +322,11 @@ static int inline_getxattr(struct inode *inode, struct getxattr_iter *it)
>  		ret = xattr_foreach(&it->it, &find_xattr_handlers, &remaining);
>  		if (ret >= 0)
>  			break;
> +
> +		if (unlikely(ret != -ENOATTR))	/* -ENOMEM, -EIO, etc. */

I have held off commenting on all the likely/unlikely annotations we
are adding because I don't know what the fast paths are in this code.
However, this is clearly an error path here, not on a fast path.

Generally the rule on likely/unlikely is that they hurt readability so
we should only add them if it makes a difference in benchmarking.

> +			break;
>  	}
> -	xattr_iter_end(&it->it, true);
> +	xattr_iter_end_final(&it->it);
>  
>  	return ret < 0 ? ret : it->buffer_size;
>  }
> @@ -318,9 +349,10 @@ static int shared_getxattr(struct inode *inode, struct getxattr_iter *it)
>  			if (i)
>  				xattr_iter_end(&it->it, true);
>  
> -			it->it.page = erofs_get_meta_page_nofail(sb,
> -				blkaddr, false);
> -			BUG_ON(IS_ERR(it->it.page));
> +			it->it.page = erofs_get_meta_page(sb, blkaddr, false);
> +			if (IS_ERR(it->it.page))
> +				return PTR_ERR(it->it.page);
> +
>  			it->it.kaddr = kmap_atomic(it->it.page);
>  			it->it.blkaddr = blkaddr;
>  		}
> @@ -328,9 +360,12 @@ static int shared_getxattr(struct inode *inode, struct getxattr_iter *it)
>  		ret = xattr_foreach(&it->it, &find_xattr_handlers, NULL);
>  		if (ret >= 0)
>  			break;
> +
> +		if (unlikely(ret != -ENOATTR))	/* -ENOMEM, -EIO, etc. */
> +			break;
>  	}
>  	if (vi->xattr_shared_count)
> -		xattr_iter_end(&it->it, true);
> +		xattr_iter_end_final(&it->it);
>  
>  	return ret < 0 ? ret : it->buffer_size;
>  }
> @@ -355,7 +390,9 @@ int erofs_getxattr(struct inode *inode, int index,
>  	if (unlikely(name == NULL))
>  		return -EINVAL;
>  
> -	init_inode_xattrs(inode);
> +	ret = init_inode_xattrs(inode);
> +	if (unlikely(ret < 0))
> +		return ret;
>  
>  	it.index = index;
>  
> @@ -498,7 +535,7 @@ static int xattr_skipvalue(struct xattr_iter *_it,
>  	return 1;

Not related to your patch but what does "return 1;" mean here?  It's
weird to me that xattr_skipvalue() doesn't use value_sz at all.  I can't
decide if the function always succeeds or always fails.

The xattr_skipvalue/alloc_buffer function is called like this:

	err = op->alloc_buffer(it, value_sz);
	if (err) {
		it->ofs += value_sz;
		goto out;
	}

It looks like if we hit an error, we increase it->ofs.  All the other
error paths in this function take a negative error and increase it->ofs
so this looks like an error path.  The goto out look like this:

	/* we assume that ofs is aligned with 4 bytes */
	it->ofs = EROFS_XATTR_ALIGN(it->ofs);
	return err;

So we return 1 here and the callers all treat it at success...  There
needs to be some documentation for what positive returns mean.

regards,
dan carpenter

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 4/8] staging: erofs: cleanup z_erofs_vle_work_{lookup, register}
  2018-08-12 14:01 ` [PATCH 4/8] staging: erofs: cleanup z_erofs_vle_work_{lookup, register} Chao Yu
@ 2018-08-13 12:00   ` Dan Carpenter
  2018-08-13 12:37     ` Gao Xiang
  0 siblings, 1 reply; 30+ messages in thread
From: Dan Carpenter @ 2018-08-13 12:00 UTC (permalink / raw)
  To: Chao Yu; +Cc: gregkh, devel, Gao Xiang, Chao Yu, linux-erofs, linux-kernel

On Sun, Aug 12, 2018 at 10:01:46PM +0800, Chao Yu wrote:
> From: Gao Xiang <gaoxiang25@huawei.com>
> 
> This patch introduces 'struct z_erofs_vle_work_finder' to clean up
> arguments of z_erofs_vle_work_lookup and z_erofs_vle_work_register.
> 
> Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
> Reviewed-by: Chao Yu <yuchao0@huawei.com>
> Signed-off-by: Chao Yu <yuchao0@huawei.com>
> ---
>  drivers/staging/erofs/unzip_vle.c | 89 ++++++++++++++++---------------
>  1 file changed, 47 insertions(+), 42 deletions(-)
> 
> diff --git a/drivers/staging/erofs/unzip_vle.c b/drivers/staging/erofs/unzip_vle.c
> index b2e05e2b4116..5032b3b05de1 100644
> --- a/drivers/staging/erofs/unzip_vle.c
> +++ b/drivers/staging/erofs/unzip_vle.c
> @@ -271,36 +271,42 @@ static inline bool try_to_claim_workgroup(
>  	return true;	/* lucky, I am the followee :) */
>  }
>  
> +struct z_erofs_vle_work_finder {
> +	struct super_block *sb;
> +	pgoff_t idx;
> +	unsigned pageofs;
> +
> +	struct z_erofs_vle_workgroup **grp_ret;
> +	enum z_erofs_vle_work_role *role;
> +	z_erofs_vle_owned_workgrp_t *owned_head;
> +	bool *hosted;
> +};
> +
>  static struct z_erofs_vle_work *
> -z_erofs_vle_work_lookup(struct super_block *sb,
> -			pgoff_t idx, unsigned pageofs,
> -			struct z_erofs_vle_workgroup **grp_ret,
> -			enum z_erofs_vle_work_role *role,
> -			z_erofs_vle_owned_workgrp_t *owned_head,
> -			bool *hosted)
> +z_erofs_vle_work_lookup(const struct z_erofs_vle_work_finder *f)
>  {
>  	bool tag, primary;
>  	struct erofs_workgroup *egrp;
>  	struct z_erofs_vle_workgroup *grp;
>  	struct z_erofs_vle_work *work;
>  
> -	egrp = erofs_find_workgroup(sb, idx, &tag);
> +	egrp = erofs_find_workgroup(f->sb, f->idx, &tag);
>  	if (egrp == NULL) {
> -		*grp_ret = NULL;
> +		*f->grp_ret = NULL;

All these pointers to pointer seem a bit messy.  Just do this:

	struct z_erofs_vle_workgroup *grp;

Then replace "grp" in z_erofs_vle_work_iter_begin() with finder.grp;

regards,
dan carpenter


^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 6/8] staging: erofs: fix vle_decompressed_index_clusterofs
  2018-08-12 14:01 ` [PATCH 6/8] staging: erofs: fix vle_decompressed_index_clusterofs Chao Yu
@ 2018-08-13 12:03   ` Dan Carpenter
  2018-08-13 13:01     ` Gao Xiang
  0 siblings, 1 reply; 30+ messages in thread
From: Dan Carpenter @ 2018-08-13 12:03 UTC (permalink / raw)
  To: Chao Yu; +Cc: gregkh, devel, Gao Xiang, Chao Yu, linux-erofs, linux-kernel

> -static inline unsigned
> -vle_compressed_index_clusterofs(unsigned clustersize,
> -	struct z_erofs_vle_decompressed_index *di)
> +static inline int
> +vle_decompressed_index_clusterofs(unsigned int *clusterofs,

Not related to your patch, but don't make functions inline.  Leave it to
the compiler to decide.

regards,
dan carpenter



^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 3/8] staging: erofs: add error handling for xattr submodule
  2018-08-13 11:47   ` Dan Carpenter
@ 2018-08-13 12:17     ` Gao Xiang
  2018-08-13 12:25       ` Dan Carpenter
                         ` (2 more replies)
  0 siblings, 3 replies; 30+ messages in thread
From: Gao Xiang @ 2018-08-13 12:17 UTC (permalink / raw)
  To: Dan Carpenter, Chao Yu; +Cc: gregkh, devel, Chao Yu, linux-erofs, linux-kernel

Hi Dan,

On 2018/8/13 19:47, Dan Carpenter wrote:
>> -static void xattr_iter_fixup(struct xattr_iter *it)
>> +static inline int xattr_iter_fixup(struct xattr_iter *it)
>>  {
>> -	if (unlikely(it->ofs >= EROFS_BLKSIZ)) {
>> -		xattr_iter_end(it, true);
>> +	if (likely(it->ofs < EROFS_BLKSIZ))
>> +		return 0;
>>  
>> -		it->blkaddr += erofs_blknr(it->ofs);
>> -		it->page = erofs_get_meta_page_nofail(it->sb,
>> -			it->blkaddr, false);
>> -		BUG_ON(IS_ERR(it->page));
>> +	xattr_iter_end(it, true);
>>  
>> -		it->kaddr = kmap_atomic(it->page);
>> -		it->ofs = erofs_blkoff(it->ofs);
>> +	it->blkaddr += erofs_blknr(it->ofs);
>> +
>> +	it->page = erofs_get_meta_page(it->sb, it->blkaddr, false);
>> +	if (IS_ERR(it->page)) {
>> +		it->page = NULL;
>> +		return PTR_ERR(it->page);
> 
> This is returning PTR_ERR(NULL) which is success.  There is a smatch
> test for this and maybe other static checkers as well so it would have
> been caught very quickly.
> 

I am sorry about this. It has already fixed in patch v2.

>>  	}
>> +
>> +	it->kaddr = kmap_atomic(it->page);
>> +	it->ofs = erofs_blkoff(it->ofs);
>> +	return 0;
>>  }
>>  
>>  static int inline_xattr_iter_begin(struct xattr_iter *it,
>> @@ -134,22 +154,25 @@ static int inline_xattr_iter_begin(struct xattr_iter *it,
>>  	it->blkaddr = erofs_blknr(iloc(sbi, vi->nid) + inline_xattr_ofs);
>>  	it->ofs = erofs_blkoff(iloc(sbi, vi->nid) + inline_xattr_ofs);
>>  
>> -	it->page = erofs_get_inline_page_nofail(inode, it->blkaddr);
>> -	BUG_ON(IS_ERR(it->page));
>> -	it->kaddr = kmap_atomic(it->page);
>> +	it->page = erofs_get_inline_page(inode, it->blkaddr);
>> +	if (IS_ERR(it->page))
>> +		return PTR_ERR(it->page);
>>  
>> +	it->kaddr = kmap_atomic(it->page);
>>  	return vi->xattr_isize - xattr_header_sz;
>>  }
>>  
>>  static int xattr_foreach(struct xattr_iter *it,
>> -	struct xattr_iter_handlers *op, unsigned *tlimit)
>> +	const struct xattr_iter_handlers *op, unsigned *tlimit)
>>  {
>>  	struct erofs_xattr_entry entry;
>>  	unsigned value_sz, processed, slice;
>>  	int err;
>>  
>>  	/* 0. fixup blkaddr, ofs, ipage */
>> -	xattr_iter_fixup(it);
>> +	err = xattr_iter_fixup(it);
>> +	if (unlikely(err))
>> +		return err;
>>  
>>  	/*
>>  	 * 1. read xattr entry to the memory,
>> @@ -181,7 +204,9 @@ static int xattr_foreach(struct xattr_iter *it,
>>  		if (it->ofs >= EROFS_BLKSIZ) {
>>  			BUG_ON(it->ofs > EROFS_BLKSIZ);
>>  
>> -			xattr_iter_fixup(it);
>> +			err = xattr_iter_fixup(it);
>> +			if (unlikely(err))
>> +				goto out;
>>  			it->ofs = 0;
>>  		}
>>  
>> @@ -213,7 +238,10 @@ static int xattr_foreach(struct xattr_iter *it,
>>  	while (processed < value_sz) {
>>  		if (it->ofs >= EROFS_BLKSIZ) {
>>  			BUG_ON(it->ofs > EROFS_BLKSIZ);
>> -			xattr_iter_fixup(it);
>> +
>> +			err = xattr_iter_fixup(it);
>> +			if (unlikely(err))
>> +				goto out;
>>  			it->ofs = 0;
>>  		}
>>  
>> @@ -273,7 +301,7 @@ static void xattr_copyvalue(struct xattr_iter *_it,
>>  	memcpy(it->buffer + processed, buf, len);
>>  }
>>  
>> -static struct xattr_iter_handlers find_xattr_handlers = {
>> +static const struct xattr_iter_handlers find_xattr_handlers = {
>>  	.entry = xattr_entrymatch,
>>  	.name = xattr_namematch,
>>  	.alloc_buffer = xattr_checkbuffer,
>> @@ -294,8 +322,11 @@ static int inline_getxattr(struct inode *inode, struct getxattr_iter *it)
>>  		ret = xattr_foreach(&it->it, &find_xattr_handlers, &remaining);
>>  		if (ret >= 0)
>>  			break;
>> +
>> +		if (unlikely(ret != -ENOATTR))	/* -ENOMEM, -EIO, etc. */
> 
> I have held off commenting on all the likely/unlikely annotations we
> are adding because I don't know what the fast paths are in this code.
> However, this is clearly an error path here, not on a fast path.
> 
> Generally the rule on likely/unlikely is that they hurt readability so
> we should only add them if it makes a difference in benchmarking.
> 

In my opinion, return values other than 0 and ENOATTR(ENODATA) rarely happens,
it should be in the slow path...

>> +			break;
>>  	}
>> -	xattr_iter_end(&it->it, true);
>> +	xattr_iter_end_final(&it->it);
>>  
>>  	return ret < 0 ? ret : it->buffer_size;
>>  }
>> @@ -318,9 +349,10 @@ static int shared_getxattr(struct inode *inode, struct getxattr_iter *it)
>>  			if (i)
>>  				xattr_iter_end(&it->it, true);
>>  
>> -			it->it.page = erofs_get_meta_page_nofail(sb,
>> -				blkaddr, false);
>> -			BUG_ON(IS_ERR(it->it.page));
>> +			it->it.page = erofs_get_meta_page(sb, blkaddr, false);
>> +			if (IS_ERR(it->it.page))
>> +				return PTR_ERR(it->it.page);
>> +
>>  			it->it.kaddr = kmap_atomic(it->it.page);
>>  			it->it.blkaddr = blkaddr;
>>  		}
>> @@ -328,9 +360,12 @@ static int shared_getxattr(struct inode *inode, struct getxattr_iter *it)
>>  		ret = xattr_foreach(&it->it, &find_xattr_handlers, NULL);
>>  		if (ret >= 0)
>>  			break;
>> +
>> +		if (unlikely(ret != -ENOATTR))	/* -ENOMEM, -EIO, etc. */
>> +			break;
>>  	}
>>  	if (vi->xattr_shared_count)
>> -		xattr_iter_end(&it->it, true);
>> +		xattr_iter_end_final(&it->it);
>>  
>>  	return ret < 0 ? ret : it->buffer_size;
>>  }
>> @@ -355,7 +390,9 @@ int erofs_getxattr(struct inode *inode, int index,
>>  	if (unlikely(name == NULL))
>>  		return -EINVAL;
>>  
>> -	init_inode_xattrs(inode);
>> +	ret = init_inode_xattrs(inode);
>> +	if (unlikely(ret < 0))
>> +		return ret;
>>  
>>  	it.index = index;
>>  
>> @@ -498,7 +535,7 @@ static int xattr_skipvalue(struct xattr_iter *_it,
>>  	return 1;
> 
> Not related to your patch but what does "return 1;" mean here?  It's
> weird to me that xattr_skipvalue() doesn't use value_sz at all.  I can't
> decide if the function always succeeds or always fails.
> 

There are two implementations of .alloc_buffer, the one is
xattr_skipvalue (used for listxattr, always succeeds in order to skip
processing its xattr values), and another is xattr_checkbuffer (used for getxattr).

> The xattr_skipvalue/alloc_buffer function is called like this:
> 
> 	err = op->alloc_buffer(it, value_sz);
> 	if (err) {
> 		it->ofs += value_sz;
> 		goto out;
> 	}
> 
> It looks like if we hit an error, we increase it->ofs.  All the other
> error paths in this function take a negative error and increase it->ofs
> so this looks like an error path.  The goto out look like this:

Let me try to explain the design idea... If we are about to processing the xattr value,

for getxattr, we need to check the xattr buffer is enough to contain the upcoming xattr value,
return 0 to process(copy) its value to xattr buffer, return < 0 if some error (eg. buffer size is not enough) occurred.

for listxattr, we need to skip this round to handle the next xattr item rather than continue to processing
its upcoming xattr value, so 1 (>0) is returned.

P.S. Whether `foreach' succeed or not, it should point to the next xattr item rather than the arbitary position,
so that we can handle the next xattr properly (if the current xattr is not the needed xattr).

> 
> 	/* we assume that ofs is aligned with 4 bytes */
> 	it->ofs = EROFS_XATTR_ALIGN(it->ofs);
> 	return err;
> 
> So we return 1 here and the callers all treat it at success...  There
> needs to be some documentation for what positive returns mean.
OK, let me add more comments to show that.

Thanks,
Gao Xiang

> 
> regards,
> dan carpenter
> 

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 3/8] staging: erofs: add error handling for xattr submodule
  2018-08-13 12:17     ` Gao Xiang
@ 2018-08-13 12:25       ` Dan Carpenter
  2018-08-13 13:40         ` Gao Xiang
  2018-08-13 12:40       ` Dan Carpenter
  2018-08-13 12:46       ` Chao Yu
  2 siblings, 1 reply; 30+ messages in thread
From: Dan Carpenter @ 2018-08-13 12:25 UTC (permalink / raw)
  To: Gao Xiang; +Cc: Chao Yu, gregkh, devel, Chao Yu, linux-erofs, linux-kernel

On Mon, Aug 13, 2018 at 08:17:27PM +0800, Gao Xiang wrote:
> > 	/* we assume that ofs is aligned with 4 bytes */
> > 	it->ofs = EROFS_XATTR_ALIGN(it->ofs);
> > 	return err;
> > 

This might be cleaner if we wrote:

	return (err < 0) ? error : 0;

The callers all treate zero and one the same so there isn't any reason
to propogate the 1 back.

regards,
dan carpenter


^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 2/8] staging: erofs: separate erofs_get_meta_page
  2018-08-13 11:04   ` Dan Carpenter
  2018-08-13 11:23     ` Gao Xiang
@ 2018-08-13 12:34     ` Chao Yu
  1 sibling, 0 replies; 30+ messages in thread
From: Chao Yu @ 2018-08-13 12:34 UTC (permalink / raw)
  To: Dan Carpenter
  Cc: gregkh, devel, Gao Xiang, Chao Yu, linux-erofs, linux-kernel

On 2018/8/13 19:04, Dan Carpenter wrote:
> On Sun, Aug 12, 2018 at 10:01:44PM +0800, Chao Yu wrote:
>> --- a/drivers/staging/erofs/data.c
>> +++ b/drivers/staging/erofs/data.c
>> @@ -39,31 +39,44 @@ static inline void read_endio(struct bio *bio)
>>  }
>>  
>>  /* prio -- true is used for dir */
>> -struct page *erofs_get_meta_page(struct super_block *sb,
>> -	erofs_blk_t blkaddr, bool prio)
>> +struct page *__erofs_get_meta_page(struct super_block *sb,
>> +	erofs_blk_t blkaddr, bool prio, bool nofail)
>>  {
>> -	struct inode *bd_inode = sb->s_bdev->bd_inode;
>> -	struct address_space *mapping = bd_inode->i_mapping;
>> +	struct inode *const bd_inode = sb->s_bdev->bd_inode;
>> +	struct address_space *const mapping = bd_inode->i_mapping;
>> +	/* prefer retrying in the allocator to blindly looping below */
>> +	const gfp_t gfp = mapping_gfp_constraint(mapping, ~__GFP_FS) |
>> +		(nofail ? __GFP_NOFAIL : 0);
>> +	unsigned int io_retries = nofail ? EROFS_IO_MAX_RETRIES_NOFAIL : 0;
>>  	struct page *page;
>>  
>>  repeat:
>> -	page = find_or_create_page(mapping, blkaddr,
>> -	/*
>> -	 * Prefer looping in the allocator rather than here,
>> -	 * at least that code knows what it's doing.
>> -	 */
>> -		mapping_gfp_constraint(mapping, ~__GFP_FS) | __GFP_NOFAIL);
>> -
>> -	BUG_ON(!page || !PageLocked(page));
>> +	page = find_or_create_page(mapping, blkaddr, gfp);
>> +	if (unlikely(page == NULL)) {
>> +		DBG_BUGON(nofail);
>> +		return ERR_PTR(-ENOMEM);
>> +	}
>> +	DBG_BUGON(!PageLocked(page));
>>  
>>  	if (!PageUptodate(page)) {
>>  		struct bio *bio;
>>  		int err;
>>  
>> -		bio = erofs_grab_bio(sb, blkaddr, 1, read_endio, true);
>> +		bio = erofs_grab_bio(sb, blkaddr, 1, read_endio, nofail);
>> +		if (unlikely(bio == NULL)) {
>> +			DBG_BUGON(nofail);
>> +			err = -ENOMEM;
>> +err_out:
>> +			unlock_page(page);
>> +			put_page(page);
>> +			return ERR_PTR(err);
> 
> 
> Put this err_out stuff at the bottom of the function so that we don't
> have to do backward hops to get to it.

Agreed, we can move error path at the bottom of this function.

> 
>> +		}
>>  
>>  		err = bio_add_page(bio, page, PAGE_SIZE, 0);
>> -		BUG_ON(err != PAGE_SIZE);
>> +		if (unlikely(err != PAGE_SIZE)) {
>> +			err = -EFAULT;
>> +			goto err_out;
>                         ^^^^^^^^^^^^
> Like this.  Generally avoid backwards hops if you can.
> 
>> +		}
>>  
>>  		__submit_bio(bio, REQ_OP_READ,
>>  			REQ_META | (prio ? REQ_PRIO : 0));
>> @@ -72,6 +85,7 @@ struct page *erofs_get_meta_page(struct super_block *sb,
>>  
>>  		/* the page has been truncated by others? */
>>  		if (unlikely(page->mapping != mapping)) {
>> +unlock_repeat:
> 
> The question mark in the "truncated by others?" is a little concerning.

Yup, we can remove the question mark.

> It's slightly weird that we don't check "io_retries" on this path.

We don't need to cover this path since io_retries is used for accounting retry
time only when IO occurs.

Thanks,

> 
>>  			unlock_page(page);
>>  			put_page(page);
>>  			goto repeat;
>> @@ -79,10 +93,12 @@ struct page *erofs_get_meta_page(struct super_block *sb,
>>  
>>  		/* more likely a read error */
>>  		if (unlikely(!PageUptodate(page))) {
>> -			unlock_page(page);
>> -			put_page(page);
>> -
>> -			page = ERR_PTR(-EIO);
>> +			if (io_retries) {
>> +				--io_retries;
>> +				goto unlock_repeat;
>> +			}
>> +			err = -EIO;
>> +			goto err_out;
> 
> regards,
> dan carpenter
> 

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 4/8] staging: erofs: cleanup z_erofs_vle_work_{lookup, register}
  2018-08-13 12:00   ` Dan Carpenter
@ 2018-08-13 12:37     ` Gao Xiang
  2018-08-13 13:05       ` Dan Carpenter
  0 siblings, 1 reply; 30+ messages in thread
From: Gao Xiang @ 2018-08-13 12:37 UTC (permalink / raw)
  To: Dan Carpenter, Chao Yu; +Cc: gregkh, devel, Chao Yu, linux-erofs, linux-kernel

Hi Dan,

On 2018/8/13 20:00, Dan Carpenter wrote:
> On Sun, Aug 12, 2018 at 10:01:46PM +0800, Chao Yu wrote:
>> From: Gao Xiang <gaoxiang25@huawei.com>
>>
>> This patch introduces 'struct z_erofs_vle_work_finder' to clean up
>> arguments of z_erofs_vle_work_lookup and z_erofs_vle_work_register.
>>
>> Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
>> Reviewed-by: Chao Yu <yuchao0@huawei.com>
>> Signed-off-by: Chao Yu <yuchao0@huawei.com>
>> ---
>>  drivers/staging/erofs/unzip_vle.c | 89 ++++++++++++++++---------------
>>  1 file changed, 47 insertions(+), 42 deletions(-)
>>
>> diff --git a/drivers/staging/erofs/unzip_vle.c b/drivers/staging/erofs/unzip_vle.c
>> index b2e05e2b4116..5032b3b05de1 100644
>> --- a/drivers/staging/erofs/unzip_vle.c
>> +++ b/drivers/staging/erofs/unzip_vle.c
>> @@ -271,36 +271,42 @@ static inline bool try_to_claim_workgroup(
>>  	return true;	/* lucky, I am the followee :) */
>>  }
>>  
>> +struct z_erofs_vle_work_finder {
>> +	struct super_block *sb;
>> +	pgoff_t idx;
>> +	unsigned pageofs;
>> +
>> +	struct z_erofs_vle_workgroup **grp_ret;
>> +	enum z_erofs_vle_work_role *role;
>> +	z_erofs_vle_owned_workgrp_t *owned_head;
>> +	bool *hosted;
>> +};
>> +
>>  static struct z_erofs_vle_work *
>> -z_erofs_vle_work_lookup(struct super_block *sb,
>> -			pgoff_t idx, unsigned pageofs,
>> -			struct z_erofs_vle_workgroup **grp_ret,
>> -			enum z_erofs_vle_work_role *role,
>> -			z_erofs_vle_owned_workgrp_t *owned_head,
>> -			bool *hosted)
>> +z_erofs_vle_work_lookup(const struct z_erofs_vle_work_finder *f)
>>  {
>>  	bool tag, primary;
>>  	struct erofs_workgroup *egrp;
>>  	struct z_erofs_vle_workgroup *grp;
>>  	struct z_erofs_vle_work *work;
>>  
>> -	egrp = erofs_find_workgroup(sb, idx, &tag);
>> +	egrp = erofs_find_workgroup(f->sb, f->idx, &tag);
>>  	if (egrp == NULL) {
>> -		*grp_ret = NULL;
>> +		*f->grp_ret = NULL;
> 
> All these pointers to pointer seem a bit messy.  Just do this:
> 
> 	struct z_erofs_vle_workgroup *grp;
> 
> Then replace "grp" in z_erofs_vle_work_iter_begin() with finder.grp;
> 

I wrote this because I am not sure of all compiler behaviors.

Notice that the struct `struct z_erofs_vle_work_finder' has been all marked as const.

If I use `struct z_erofs_vle_workgroup *grp;' and drop the `const' decorator,
compilers could do some re-read operations bacause its value could potentially change by its caller at the same time.

Thanks,
Gao Xiang

> regards,
> dan carpenter
> 

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 3/8] staging: erofs: add error handling for xattr submodule
  2018-08-13 12:17     ` Gao Xiang
  2018-08-13 12:25       ` Dan Carpenter
@ 2018-08-13 12:40       ` Dan Carpenter
  2018-08-13 12:46         ` Gao Xiang
  2018-08-13 12:46       ` Chao Yu
  2 siblings, 1 reply; 30+ messages in thread
From: Dan Carpenter @ 2018-08-13 12:40 UTC (permalink / raw)
  To: Gao Xiang; +Cc: Chao Yu, devel, gregkh, Chao Yu, linux-erofs, linux-kernel

On Mon, Aug 13, 2018 at 08:17:27PM +0800, Gao Xiang wrote:
> >> @@ -294,8 +322,11 @@ static int inline_getxattr(struct inode *inode, struct getxattr_iter *it)
> >>  		ret = xattr_foreach(&it->it, &find_xattr_handlers, &remaining);
> >>  		if (ret >= 0)
> >>  			break;
> >> +
> >> +		if (unlikely(ret != -ENOATTR))	/* -ENOMEM, -EIO, etc. */
> > 
> > I have held off commenting on all the likely/unlikely annotations we
> > are adding because I don't know what the fast paths are in this code.
> > However, this is clearly an error path here, not on a fast path.
> > 
> > Generally the rule on likely/unlikely is that they hurt readability so
> > we should only add them if it makes a difference in benchmarking.
> > 
> 
> In my opinion, return values other than 0 and ENOATTR(ENODATA) rarely happens,
> it should be in the slow path...
> 

What I'm trying to say is please stop adding so many likely/unlikely
annotations.  You should only add them if you have the benchmark data to
show the it really is required.


regards,
dan carpenter


^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 3/8] staging: erofs: add error handling for xattr submodule
  2018-08-13 12:40       ` Dan Carpenter
@ 2018-08-13 12:46         ` Gao Xiang
  0 siblings, 0 replies; 30+ messages in thread
From: Gao Xiang @ 2018-08-13 12:46 UTC (permalink / raw)
  To: Dan Carpenter; +Cc: Chao Yu, devel, gregkh, Chao Yu, linux-erofs, linux-kernel

Hi Dan,

On 2018/8/13 20:40, Dan Carpenter wrote:
> On Mon, Aug 13, 2018 at 08:17:27PM +0800, Gao Xiang wrote:
>>>> @@ -294,8 +322,11 @@ static int inline_getxattr(struct inode *inode, struct getxattr_iter *it)
>>>>  		ret = xattr_foreach(&it->it, &find_xattr_handlers, &remaining);
>>>>  		if (ret >= 0)
>>>>  			break;
>>>> +
>>>> +		if (unlikely(ret != -ENOATTR))	/* -ENOMEM, -EIO, etc. */
>>> I have held off commenting on all the likely/unlikely annotations we
>>> are adding because I don't know what the fast paths are in this code.
>>> However, this is clearly an error path here, not on a fast path.
>>>
>>> Generally the rule on likely/unlikely is that they hurt readability so
>>> we should only add them if it makes a difference in benchmarking.
>>>
>> In my opinion, return values other than 0 and ENOATTR(ENODATA) rarely happens,
>> it should be in the slow path...
>>
> What I'm trying to say is please stop adding so many likely/unlikely
> annotations.  You should only add them if you have the benchmark data to
> show the it really is required.
> 
> 
OK, I'll follow your suggestion.

I could say it is my personal code tendency(style).
If you don't like it/think them useless, I will remove them all. 

Thanks for your suggestion.

Thanks,
Gao Xiang

> regards,
> dan carpenter
> 

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 3/8] staging: erofs: add error handling for xattr submodule
  2018-08-13 12:17     ` Gao Xiang
  2018-08-13 12:25       ` Dan Carpenter
  2018-08-13 12:40       ` Dan Carpenter
@ 2018-08-13 12:46       ` Chao Yu
  2 siblings, 0 replies; 30+ messages in thread
From: Chao Yu @ 2018-08-13 12:46 UTC (permalink / raw)
  To: Gao Xiang, Dan Carpenter
  Cc: gregkh, devel, Chao Yu, linux-erofs, linux-kernel

On 2018/8/13 20:17, Gao Xiang wrote:
>> Generally the rule on likely/unlikely is that they hurt readability so
>> we should only add them if it makes a difference in benchmarking.
>>
> 
> In my opinion, return values other than 0 and ENOATTR(ENODATA) rarely happens,
> it should be in the slow path...

Hi Dan, thanks for the comments.

IMO, we should check and clean up all likely/unlikely in erofs, to make sure
they are used in the right place.

Thanks,

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 6/8] staging: erofs: fix vle_decompressed_index_clusterofs
  2018-08-13 12:03   ` Dan Carpenter
@ 2018-08-13 13:01     ` Gao Xiang
  0 siblings, 0 replies; 30+ messages in thread
From: Gao Xiang @ 2018-08-13 13:01 UTC (permalink / raw)
  To: Dan Carpenter, Chao Yu; +Cc: gregkh, devel, Chao Yu, linux-erofs, linux-kernel

Hi Dan,

On 2018/8/13 20:03, Dan Carpenter wrote:
>> -static inline unsigned
>> -vle_compressed_index_clusterofs(unsigned clustersize,
>> -	struct z_erofs_vle_decompressed_index *di)
>> +static inline int
>> +vle_decompressed_index_clusterofs(unsigned int *clusterofs,
> 
> Not related to your patch, but don't make functions inline.  Leave it to
> the compiler to decide.

OK, thanks for your suggestion.

Thanks,
Gao Xiang

> 
> regards,
> dan carpenter
> 
> 

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 4/8] staging: erofs: cleanup z_erofs_vle_work_{lookup, register}
  2018-08-13 12:37     ` Gao Xiang
@ 2018-08-13 13:05       ` Dan Carpenter
  2018-08-13 13:19         ` Gao Xiang
  0 siblings, 1 reply; 30+ messages in thread
From: Dan Carpenter @ 2018-08-13 13:05 UTC (permalink / raw)
  To: Gao Xiang; +Cc: Chao Yu, devel, gregkh, Chao Yu, linux-erofs, linux-kernel

Yeah.  You'd have to remove the const.

Anyway, on looking at it more, I guess this patch is fine for now.  We
will probably end up changing z_erofs_vle_work_lookup() and
z_erofs_vle_work_register() some more in the future.

regards,
dan carpenter


^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 4/8] staging: erofs: cleanup z_erofs_vle_work_{lookup, register}
  2018-08-13 13:05       ` Dan Carpenter
@ 2018-08-13 13:19         ` Gao Xiang
  0 siblings, 0 replies; 30+ messages in thread
From: Gao Xiang @ 2018-08-13 13:19 UTC (permalink / raw)
  To: Dan Carpenter; +Cc: Chao Yu, devel, gregkh, Chao Yu, linux-erofs, linux-kernel

Hi Dan,

On 2018/8/13 21:05, Dan Carpenter wrote:
> Yeah.  You'd have to remove the const.
> 
> Anyway, on looking at it more, I guess this patch is fine for now.  We
> will probably end up changing z_erofs_vle_work_lookup() and
> z_erofs_vle_work_register() some more in the future.
> 

Thanks for review, I personally tend to leave this patch unmodified for now :( .........

The struct is wrote in order to read-only or assign and read at once, and let compilers notice that
(all modifications are local so compilers can handle them safely)...

Thanks,
Gao Xiang

> regards,
> dan carpenter
> 

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 3/8] staging: erofs: add error handling for xattr submodule
  2018-08-13 12:25       ` Dan Carpenter
@ 2018-08-13 13:40         ` Gao Xiang
  2018-08-13 13:50           ` Dan Carpenter
  0 siblings, 1 reply; 30+ messages in thread
From: Gao Xiang @ 2018-08-13 13:40 UTC (permalink / raw)
  To: Dan Carpenter; +Cc: Chao Yu, gregkh, devel, Chao Yu, linux-erofs, linux-kernel

Hi Dan,

On 2018/8/13 20:25, Dan Carpenter wrote:
> On Mon, Aug 13, 2018 at 08:17:27PM +0800, Gao Xiang wrote:
>>> 	/* we assume that ofs is aligned with 4 bytes */
>>> 	it->ofs = EROFS_XATTR_ALIGN(it->ofs);
>>> 	return err;
>>>
> This might be cleaner if we wrote:
> 
> 	return (err < 0) ? error : 0;
> 
> The callers all treate zero and one the same so there isn't any reason
> to propogate the 1 back.
> 

Thanks, I will recheck all callers and fix as you suggested.
But it is better to fix them in an independent patch. :) I will send a new patch later.

Thanks,
Gao Xiang

> regards,
> dan carpenter
> 

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 3/8] staging: erofs: add error handling for xattr submodule
  2018-08-13 13:40         ` Gao Xiang
@ 2018-08-13 13:50           ` Dan Carpenter
  0 siblings, 0 replies; 30+ messages in thread
From: Dan Carpenter @ 2018-08-13 13:50 UTC (permalink / raw)
  To: Gao Xiang; +Cc: devel, Chao Yu, gregkh, Chao Yu, linux-kernel, linux-erofs

> But it is better to fix them in an independent patch. :)

Yah.  Of course.  This was completely unrelated.

regards,
dan carpenter

^ permalink raw reply	[flat|nested] 30+ messages in thread

end of thread, other threads:[~2018-08-13 13:51 UTC | newest]

Thread overview: 30+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-08-12 14:01 [PATCH 0/8] staging: erofs: fix some issues and clean up codes Chao Yu
2018-08-12 14:01 ` [PATCH 1/8] staging: erofs: introduce erofs_grab_bio Chao Yu
2018-08-12 14:01 ` [PATCH 2/8] staging: erofs: separate erofs_get_meta_page Chao Yu
2018-08-13 11:04   ` Dan Carpenter
2018-08-13 11:23     ` Gao Xiang
2018-08-13 12:34     ` Chao Yu
2018-08-12 14:01 ` [PATCH 3/8] staging: erofs: add error handling for xattr submodule Chao Yu
2018-08-13  2:00   ` Chao Yu
2018-08-13  2:36     ` Gao Xiang
2018-08-13  2:56       ` [PATCH v2 " Gao Xiang
2018-08-13  8:15       ` [PATCH " Chao Yu
2018-08-13 11:47   ` Dan Carpenter
2018-08-13 12:17     ` Gao Xiang
2018-08-13 12:25       ` Dan Carpenter
2018-08-13 13:40         ` Gao Xiang
2018-08-13 13:50           ` Dan Carpenter
2018-08-13 12:40       ` Dan Carpenter
2018-08-13 12:46         ` Gao Xiang
2018-08-13 12:46       ` Chao Yu
2018-08-12 14:01 ` [PATCH 4/8] staging: erofs: cleanup z_erofs_vle_work_{lookup, register} Chao Yu
2018-08-13 12:00   ` Dan Carpenter
2018-08-13 12:37     ` Gao Xiang
2018-08-13 13:05       ` Dan Carpenter
2018-08-13 13:19         ` Gao Xiang
2018-08-12 14:01 ` [PATCH 5/8] staging: erofs: rearrange vle clustertype definitions Chao Yu
2018-08-12 14:01 ` [PATCH 6/8] staging: erofs: fix vle_decompressed_index_clusterofs Chao Yu
2018-08-13 12:03   ` Dan Carpenter
2018-08-13 13:01     ` Gao Xiang
2018-08-12 14:01 ` [PATCH 7/8] staging: erofs: fix integer overflow on 32-bit platform Chao Yu
2018-08-12 14:01 ` [PATCH 8/8] staging: erofs: fix compression mapping beyond EOF Chao Yu

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).