All of lore.kernel.org
 help / color / mirror / Atom feed
From: Gao Xiang <gaoxiang25@huawei.com>
To: Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
	<devel@driverdev.osuosl.org>
Cc: <linux-fsdevel@vger.kernel.org>, <linux-kernel@vger.kernel.org>,
	<linux-erofs@lists.ozlabs.org>, <yuchao0@huawei.com>,
	<miaoxie@huawei.com>, <weidu.du@huawei.com>, <hsiangkao@aol.com>,
	<chao@kernel.org>, Gao Xiang <gaoxiang25@huawei.com>
Subject: [PATCH 15/25] staging: erofs: add erofs_map_blocks_iter
Date: Thu, 26 Jul 2018 20:21:58 +0800	[thread overview]
Message-ID: <1532607728-103372-16-git-send-email-gaoxiang25@huawei.com> (raw)
In-Reply-To: <1532607728-103372-1-git-send-email-gaoxiang25@huawei.com>

This patch introduces an iterable L2P mapping
operation 'erofs_map_blocks_iter'.
Compared with 'erofs_map_blocks', it avoids
a number of redundant 'release and regrab'
processes if they request the same meta page.

Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
---
 drivers/staging/erofs/Kconfig     |  10 ++
 drivers/staging/erofs/Makefile    |   1 +
 drivers/staging/erofs/data.c      |  36 +++++-
 drivers/staging/erofs/internal.h  |  12 ++
 drivers/staging/erofs/unzip_vle.c | 243 ++++++++++++++++++++++++++++++++++++++
 5 files changed, 300 insertions(+), 2 deletions(-)
 create mode 100644 drivers/staging/erofs/unzip_vle.c

diff --git a/drivers/staging/erofs/Kconfig b/drivers/staging/erofs/Kconfig
index edda055..63bec70 100644
--- a/drivers/staging/erofs/Kconfig
+++ b/drivers/staging/erofs/Kconfig
@@ -77,3 +77,13 @@ config EROFS_FAULT_INJECTION
 	help
 	  Test EROFS to inject faults such as ENOMEM, EIO, and so on.
 	  If unsure, say N.
+
+config EROFS_FS_ZIP
+	bool "EROFS Data Compresssion Support"
+	depends on EROFS_FS
+	help
+	  Currently we support VLE Compression only.
+	  Play at your own risk.
+
+	  If you don't want to use compression feature, say N.
+
diff --git a/drivers/staging/erofs/Makefile b/drivers/staging/erofs/Makefile
index 977b7e0..8558c76 100644
--- a/drivers/staging/erofs/Makefile
+++ b/drivers/staging/erofs/Makefile
@@ -9,4 +9,5 @@ obj-$(CONFIG_EROFS_FS) += erofs.o
 ccflags-y += -I$(src)/include
 erofs-objs := super.o inode.o data.o namei.o dir.o
 erofs-$(CONFIG_EROFS_FS_XATTR) += xattr.o
+erofs-$(CONFIG_EROFS_FS_ZIP) += unzip_vle.o
 
diff --git a/drivers/staging/erofs/data.c b/drivers/staging/erofs/data.c
index 47d1787..163bfe6 100644
--- a/drivers/staging/erofs/data.c
+++ b/drivers/staging/erofs/data.c
@@ -157,12 +157,44 @@ static int erofs_map_blocks_flatmode(struct inode *inode,
 	return 0;
 }
 
+#ifdef CONFIG_EROFS_FS_ZIP
+extern int z_erofs_map_blocks_iter(struct inode *,
+	struct erofs_map_blocks *, struct page **, int);
+#endif
+
+int erofs_map_blocks_iter(struct inode *inode,
+	struct erofs_map_blocks *map,
+	struct page **mpage_ret, int flags)
+{
+	/* by default, reading raw data never use erofs_map_blocks_iter */
+	if (unlikely(!is_inode_layout_compression(inode))) {
+		if (*mpage_ret != NULL)
+			put_page(*mpage_ret);
+		*mpage_ret = NULL;
+
+		return erofs_map_blocks(inode, map, flags);
+	}
+
+#ifdef CONFIG_EROFS_FS_ZIP
+	return z_erofs_map_blocks_iter(inode, map, mpage_ret, flags);
+#else
+	/* data compression is not available */
+	return -ENOTSUPP;
+#endif
+}
+
 int erofs_map_blocks(struct inode *inode,
 	struct erofs_map_blocks *map, int flags)
 {
-	if (unlikely(is_inode_layout_compression(inode)))
-		return -ENOTSUPP;
+	if (unlikely(is_inode_layout_compression(inode))) {
+		struct page *mpage = NULL;
+		int err;
 
+		err = erofs_map_blocks_iter(inode, map, &mpage, flags);
+		if (mpage != NULL)
+			put_page(mpage);
+		return err;
+	}
 	return erofs_map_blocks_flatmode(inode, map, flags);
 }
 
diff --git a/drivers/staging/erofs/internal.h b/drivers/staging/erofs/internal.h
index ca22486..bea5ec4 100644
--- a/drivers/staging/erofs/internal.h
+++ b/drivers/staging/erofs/internal.h
@@ -72,6 +72,10 @@ struct erofs_sb_info {
 
 	/* inode slot unit size in bit shift */
 	unsigned char islotbits;
+#ifdef CONFIG_EROFS_FS_ZIP
+	/* cluster size in bit shift */
+	unsigned char clusterbits;
+#endif
 
 	u32 build_time_nsec;
 	u64 build_time;
@@ -284,6 +288,14 @@ struct erofs_map_blocks {
 extern struct page *erofs_get_meta_page(struct super_block *sb,
 	erofs_blk_t blkaddr, bool prio);
 extern int erofs_map_blocks(struct inode *, struct erofs_map_blocks *, int);
+extern int erofs_map_blocks_iter(struct inode *, struct erofs_map_blocks *,
+	struct page **, int);
+
+struct erofs_map_blocks_iter {
+	struct erofs_map_blocks map;
+	struct page *mpage;
+};
+
 
 static inline struct page *erofs_get_inline_page(struct inode *inode,
 	erofs_blk_t blkaddr)
diff --git a/drivers/staging/erofs/unzip_vle.c b/drivers/staging/erofs/unzip_vle.c
new file mode 100644
index 0000000..329cbe4
--- /dev/null
+++ b/drivers/staging/erofs/unzip_vle.c
@@ -0,0 +1,243 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * linux/drivers/staging/erofs/unzip_vle.c
+ *
+ * Copyright (C) 2018 HUAWEI, Inc.
+ *             http://www.huawei.com/
+ * Created by Gao Xiang <gaoxiang25@huawei.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file COPYING in the main directory of the Linux
+ * distribution for more details.
+ */
+#include "internal.h"
+
+#define __vle_cluster_advise(x, bit, bits) \
+	((le16_to_cpu(x) >> (bit)) & ((1 << (bits)) - 1))
+
+#define __vle_cluster_type(advise) __vle_cluster_advise(advise, \
+	Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT, Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS)
+
+enum {
+	Z_EROFS_VLE_CLUSTER_TYPE_PLAIN,
+	Z_EROFS_VLE_CLUSTER_TYPE_HEAD,
+	Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD,
+	Z_EROFS_VLE_CLUSTER_TYPE_RESERVED,
+	Z_EROFS_VLE_CLUSTER_TYPE_MAX
+};
+
+#define vle_cluster_type(di)	\
+	__vle_cluster_type((di)->di_advise)
+
+static inline unsigned
+vle_compressed_index_clusterofs(unsigned clustersize,
+	struct z_erofs_vle_decompressed_index *di)
+{
+	debugln("%s, vle=%pK, advise=%x (type %u), clusterofs=%x blkaddr=%x",
+		__func__, di, di->di_advise, vle_cluster_type(di),
+		di->di_clusterofs, di->di_u.blkaddr);
+
+	switch (vle_cluster_type(di)) {
+	case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
+		break;
+	case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
+	case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
+		return di->di_clusterofs;
+	default:
+		BUG_ON(1);
+	}
+	return clustersize;
+}
+
+static inline erofs_blk_t
+vle_extent_blkaddr(struct inode *inode, pgoff_t index)
+{
+	struct erofs_sb_info *sbi = EROFS_I_SB(inode);
+	struct erofs_vnode *vi = EROFS_V(inode);
+
+	unsigned ofs = Z_EROFS_VLE_EXTENT_ALIGN(vi->inode_isize +
+		vi->xattr_isize) + sizeof(struct erofs_extent_header) +
+		index * sizeof(struct z_erofs_vle_decompressed_index);
+
+	return erofs_blknr(iloc(sbi, vi->nid) + ofs);
+}
+
+static inline unsigned int
+vle_extent_blkoff(struct inode *inode, pgoff_t index)
+{
+	struct erofs_sb_info *sbi = EROFS_I_SB(inode);
+	struct erofs_vnode *vi = EROFS_V(inode);
+
+	unsigned ofs = Z_EROFS_VLE_EXTENT_ALIGN(vi->inode_isize +
+		vi->xattr_isize) + sizeof(struct erofs_extent_header) +
+		index * sizeof(struct z_erofs_vle_decompressed_index);
+
+	return erofs_blkoff(iloc(sbi, vi->nid) + ofs);
+}
+
+/*
+ * Variable-sized Logical Extent (Fixed Physical Cluster) Compression Mode
+ * ---
+ * VLE compression mode attempts to compress a number of logical data into
+ * a physical cluster with a fixed size.
+ * VLE compression mode uses "struct z_erofs_vle_decompressed_index".
+ */
+static erofs_off_t vle_get_logical_extent_head(
+	struct inode *inode,
+	struct page **page_iter,
+	void **kaddr_iter,
+	unsigned lcn,	/* logical cluster number */
+	erofs_blk_t *pcn,
+	unsigned *flags)
+{
+	/* for extent meta */
+	struct page *page = *page_iter;
+	erofs_blk_t blkaddr = vle_extent_blkaddr(inode, lcn);
+	struct z_erofs_vle_decompressed_index *di;
+	unsigned long long ofs;
+	const unsigned int clusterbits = EROFS_SB(inode->i_sb)->clusterbits;
+	const unsigned int clustersize = 1 << clusterbits;
+
+	if (page->index != blkaddr) {
+		kunmap_atomic(*kaddr_iter);
+		unlock_page(page);
+		put_page(page);
+
+		*page_iter = page = erofs_get_meta_page(inode->i_sb,
+			blkaddr, false);
+		*kaddr_iter = kmap_atomic(page);
+	}
+
+	di = *kaddr_iter + vle_extent_blkoff(inode, lcn);
+	switch (vle_cluster_type(di)) {
+	case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
+		BUG_ON(!di->di_u.delta[0]);
+		BUG_ON(lcn < di->di_u.delta[0]);
+
+		ofs = vle_get_logical_extent_head(inode,
+			page_iter, kaddr_iter,
+			lcn - di->di_u.delta[0], pcn, flags);
+		break;
+	case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
+		*flags ^= EROFS_MAP_ZIPPED;
+	case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
+		/* clustersize should be a power of two */
+		ofs = ((unsigned long long)lcn << clusterbits) +
+			(le16_to_cpu(di->di_clusterofs) & (clustersize - 1));
+		*pcn = le32_to_cpu(di->di_u.blkaddr);
+		break;
+	default:
+		BUG_ON(1);
+	}
+	return ofs;
+}
+
+int z_erofs_map_blocks_iter(struct inode *inode,
+	struct erofs_map_blocks *map,
+	struct page **mpage_ret, int flags)
+{
+	/* logicial extent (start, end) offset */
+	unsigned long long ofs, end;
+	struct z_erofs_vle_decompressed_index *di;
+	erofs_blk_t e_blkaddr, pcn;
+	unsigned lcn, logical_cluster_ofs;
+	u32 ofs_rem;
+	struct page *mpage = *mpage_ret;
+	void *kaddr;
+	bool initial;
+	const unsigned int clusterbits = EROFS_SB(inode->i_sb)->clusterbits;
+	const unsigned int clustersize = 1 << clusterbits;
+
+	/* if both m_(l,p)len are 0, regularize l_lblk, l_lofs, etc... */
+	initial = !map->m_llen;
+
+	/* when trying to read beyond EOF, leave it unmapped */
+	if (unlikely(map->m_la >= inode->i_size)) {
+		BUG_ON(!initial);
+		map->m_llen = map->m_la + 1 - inode->i_size;
+		map->m_la = inode->i_size - 1;
+		map->m_flags = 0;
+		goto out;
+	}
+
+	debugln("%s, m_la %llu m_llen %llu --- start", __func__,
+		map->m_la, map->m_llen);
+
+	ofs = map->m_la + map->m_llen;
+
+	/* clustersize should be power of two */
+	lcn = ofs >> clusterbits;
+	ofs_rem = ofs & (clustersize - 1);
+
+	e_blkaddr = vle_extent_blkaddr(inode, lcn);
+
+	if (mpage == NULL || mpage->index != e_blkaddr) {
+		if (mpage != NULL)
+			put_page(mpage);
+
+		mpage = erofs_get_meta_page(inode->i_sb, e_blkaddr, false);
+		*mpage_ret = mpage;
+	} else {
+		lock_page(mpage);
+		DBG_BUGON(!PageUptodate(mpage));
+	}
+
+	kaddr = kmap_atomic(mpage);
+	di = kaddr + vle_extent_blkoff(inode, lcn);
+
+	debugln("%s, lcn %u e_blkaddr %u e_blkoff %u", __func__, lcn,
+		e_blkaddr, vle_extent_blkoff(inode, lcn));
+
+	logical_cluster_ofs = vle_compressed_index_clusterofs(clustersize, di);
+	if (!initial) {
+		/* [walking mode] 'map' has been already initialized */
+		map->m_llen += logical_cluster_ofs;
+		goto unmap_out;
+	}
+
+	/* by default, compressed */
+	map->m_flags |= EROFS_MAP_ZIPPED;
+
+	end = (u64)(lcn + 1) * clustersize;
+
+	switch (vle_cluster_type(di)) {
+	case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
+		if (ofs_rem >= logical_cluster_ofs)
+			map->m_flags ^= EROFS_MAP_ZIPPED;
+	case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
+		if (ofs_rem == logical_cluster_ofs) {
+			pcn = le32_to_cpu(di->di_u.blkaddr);
+			goto exact_hitted;
+		}
+
+		if (ofs_rem > logical_cluster_ofs) {
+			ofs = lcn * clustersize | logical_cluster_ofs;
+			pcn = le32_to_cpu(di->di_u.blkaddr);
+			break;
+		}
+
+		BUG_ON(!lcn);	/* logical cluster number >= 1 */
+		end = (lcn-- * clustersize) | logical_cluster_ofs;
+	case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
+		/* get the correspoinding first chunk */
+		ofs = vle_get_logical_extent_head(inode, mpage_ret,
+			&kaddr, lcn, &pcn, &map->m_flags);
+		mpage = *mpage_ret;
+	}
+
+	map->m_la = ofs;
+exact_hitted:
+	map->m_llen = end - ofs;
+	map->m_plen = clustersize;
+	map->m_pa = blknr_to_addr(pcn);
+	map->m_flags |= EROFS_MAP_MAPPED;
+unmap_out:
+	kunmap_atomic(kaddr);
+	unlock_page(mpage);
+out:
+	debugln("%s, m_la %llu m_pa %llu m_llen %llu m_plen %llu m_flags 0%o",
+		__func__, map->m_la, map->m_pa,
+		map->m_llen, map->m_plen, map->m_flags);
+	return 0;
+}
+
-- 
1.9.1


WARNING: multiple messages have this Message-ID (diff)
From: gaoxiang25@huawei.com (Gao Xiang)
Subject: [PATCH 15/25] staging: erofs: add erofs_map_blocks_iter
Date: Thu, 26 Jul 2018 20:21:58 +0800	[thread overview]
Message-ID: <1532607728-103372-16-git-send-email-gaoxiang25@huawei.com> (raw)
In-Reply-To: <1532607728-103372-1-git-send-email-gaoxiang25@huawei.com>

This patch introduces an iterable L2P mapping
operation 'erofs_map_blocks_iter'.
Compared with 'erofs_map_blocks', it avoids
a number of redundant 'release and regrab'
processes if they request the same meta page.

Signed-off-by: Gao Xiang <gaoxiang25 at huawei.com>
---
 drivers/staging/erofs/Kconfig     |  10 ++
 drivers/staging/erofs/Makefile    |   1 +
 drivers/staging/erofs/data.c      |  36 +++++-
 drivers/staging/erofs/internal.h  |  12 ++
 drivers/staging/erofs/unzip_vle.c | 243 ++++++++++++++++++++++++++++++++++++++
 5 files changed, 300 insertions(+), 2 deletions(-)
 create mode 100644 drivers/staging/erofs/unzip_vle.c

diff --git a/drivers/staging/erofs/Kconfig b/drivers/staging/erofs/Kconfig
index edda055..63bec70 100644
--- a/drivers/staging/erofs/Kconfig
+++ b/drivers/staging/erofs/Kconfig
@@ -77,3 +77,13 @@ config EROFS_FAULT_INJECTION
 	help
 	  Test EROFS to inject faults such as ENOMEM, EIO, and so on.
 	  If unsure, say N.
+
+config EROFS_FS_ZIP
+	bool "EROFS Data Compresssion Support"
+	depends on EROFS_FS
+	help
+	  Currently we support VLE Compression only.
+	  Play at your own risk.
+
+	  If you don't want to use compression feature, say N.
+
diff --git a/drivers/staging/erofs/Makefile b/drivers/staging/erofs/Makefile
index 977b7e0..8558c76 100644
--- a/drivers/staging/erofs/Makefile
+++ b/drivers/staging/erofs/Makefile
@@ -9,4 +9,5 @@ obj-$(CONFIG_EROFS_FS) += erofs.o
 ccflags-y += -I$(src)/include
 erofs-objs := super.o inode.o data.o namei.o dir.o
 erofs-$(CONFIG_EROFS_FS_XATTR) += xattr.o
+erofs-$(CONFIG_EROFS_FS_ZIP) += unzip_vle.o
 
diff --git a/drivers/staging/erofs/data.c b/drivers/staging/erofs/data.c
index 47d1787..163bfe6 100644
--- a/drivers/staging/erofs/data.c
+++ b/drivers/staging/erofs/data.c
@@ -157,12 +157,44 @@ static int erofs_map_blocks_flatmode(struct inode *inode,
 	return 0;
 }
 
+#ifdef CONFIG_EROFS_FS_ZIP
+extern int z_erofs_map_blocks_iter(struct inode *,
+	struct erofs_map_blocks *, struct page **, int);
+#endif
+
+int erofs_map_blocks_iter(struct inode *inode,
+	struct erofs_map_blocks *map,
+	struct page **mpage_ret, int flags)
+{
+	/* by default, reading raw data never use erofs_map_blocks_iter */
+	if (unlikely(!is_inode_layout_compression(inode))) {
+		if (*mpage_ret != NULL)
+			put_page(*mpage_ret);
+		*mpage_ret = NULL;
+
+		return erofs_map_blocks(inode, map, flags);
+	}
+
+#ifdef CONFIG_EROFS_FS_ZIP
+	return z_erofs_map_blocks_iter(inode, map, mpage_ret, flags);
+#else
+	/* data compression is not available */
+	return -ENOTSUPP;
+#endif
+}
+
 int erofs_map_blocks(struct inode *inode,
 	struct erofs_map_blocks *map, int flags)
 {
-	if (unlikely(is_inode_layout_compression(inode)))
-		return -ENOTSUPP;
+	if (unlikely(is_inode_layout_compression(inode))) {
+		struct page *mpage = NULL;
+		int err;
 
+		err = erofs_map_blocks_iter(inode, map, &mpage, flags);
+		if (mpage != NULL)
+			put_page(mpage);
+		return err;
+	}
 	return erofs_map_blocks_flatmode(inode, map, flags);
 }
 
diff --git a/drivers/staging/erofs/internal.h b/drivers/staging/erofs/internal.h
index ca22486..bea5ec4 100644
--- a/drivers/staging/erofs/internal.h
+++ b/drivers/staging/erofs/internal.h
@@ -72,6 +72,10 @@ struct erofs_sb_info {
 
 	/* inode slot unit size in bit shift */
 	unsigned char islotbits;
+#ifdef CONFIG_EROFS_FS_ZIP
+	/* cluster size in bit shift */
+	unsigned char clusterbits;
+#endif
 
 	u32 build_time_nsec;
 	u64 build_time;
@@ -284,6 +288,14 @@ struct erofs_map_blocks {
 extern struct page *erofs_get_meta_page(struct super_block *sb,
 	erofs_blk_t blkaddr, bool prio);
 extern int erofs_map_blocks(struct inode *, struct erofs_map_blocks *, int);
+extern int erofs_map_blocks_iter(struct inode *, struct erofs_map_blocks *,
+	struct page **, int);
+
+struct erofs_map_blocks_iter {
+	struct erofs_map_blocks map;
+	struct page *mpage;
+};
+
 
 static inline struct page *erofs_get_inline_page(struct inode *inode,
 	erofs_blk_t blkaddr)
diff --git a/drivers/staging/erofs/unzip_vle.c b/drivers/staging/erofs/unzip_vle.c
new file mode 100644
index 0000000..329cbe4
--- /dev/null
+++ b/drivers/staging/erofs/unzip_vle.c
@@ -0,0 +1,243 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * linux/drivers/staging/erofs/unzip_vle.c
+ *
+ * Copyright (C) 2018 HUAWEI, Inc.
+ *             http://www.huawei.com/
+ * Created by Gao Xiang <gaoxiang25 at huawei.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file COPYING in the main directory of the Linux
+ * distribution for more details.
+ */
+#include "internal.h"
+
+#define __vle_cluster_advise(x, bit, bits) \
+	((le16_to_cpu(x) >> (bit)) & ((1 << (bits)) - 1))
+
+#define __vle_cluster_type(advise) __vle_cluster_advise(advise, \
+	Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT, Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS)
+
+enum {
+	Z_EROFS_VLE_CLUSTER_TYPE_PLAIN,
+	Z_EROFS_VLE_CLUSTER_TYPE_HEAD,
+	Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD,
+	Z_EROFS_VLE_CLUSTER_TYPE_RESERVED,
+	Z_EROFS_VLE_CLUSTER_TYPE_MAX
+};
+
+#define vle_cluster_type(di)	\
+	__vle_cluster_type((di)->di_advise)
+
+static inline unsigned
+vle_compressed_index_clusterofs(unsigned clustersize,
+	struct z_erofs_vle_decompressed_index *di)
+{
+	debugln("%s, vle=%pK, advise=%x (type %u), clusterofs=%x blkaddr=%x",
+		__func__, di, di->di_advise, vle_cluster_type(di),
+		di->di_clusterofs, di->di_u.blkaddr);
+
+	switch (vle_cluster_type(di)) {
+	case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
+		break;
+	case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
+	case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
+		return di->di_clusterofs;
+	default:
+		BUG_ON(1);
+	}
+	return clustersize;
+}
+
+static inline erofs_blk_t
+vle_extent_blkaddr(struct inode *inode, pgoff_t index)
+{
+	struct erofs_sb_info *sbi = EROFS_I_SB(inode);
+	struct erofs_vnode *vi = EROFS_V(inode);
+
+	unsigned ofs = Z_EROFS_VLE_EXTENT_ALIGN(vi->inode_isize +
+		vi->xattr_isize) + sizeof(struct erofs_extent_header) +
+		index * sizeof(struct z_erofs_vle_decompressed_index);
+
+	return erofs_blknr(iloc(sbi, vi->nid) + ofs);
+}
+
+static inline unsigned int
+vle_extent_blkoff(struct inode *inode, pgoff_t index)
+{
+	struct erofs_sb_info *sbi = EROFS_I_SB(inode);
+	struct erofs_vnode *vi = EROFS_V(inode);
+
+	unsigned ofs = Z_EROFS_VLE_EXTENT_ALIGN(vi->inode_isize +
+		vi->xattr_isize) + sizeof(struct erofs_extent_header) +
+		index * sizeof(struct z_erofs_vle_decompressed_index);
+
+	return erofs_blkoff(iloc(sbi, vi->nid) + ofs);
+}
+
+/*
+ * Variable-sized Logical Extent (Fixed Physical Cluster) Compression Mode
+ * ---
+ * VLE compression mode attempts to compress a number of logical data into
+ * a physical cluster with a fixed size.
+ * VLE compression mode uses "struct z_erofs_vle_decompressed_index".
+ */
+static erofs_off_t vle_get_logical_extent_head(
+	struct inode *inode,
+	struct page **page_iter,
+	void **kaddr_iter,
+	unsigned lcn,	/* logical cluster number */
+	erofs_blk_t *pcn,
+	unsigned *flags)
+{
+	/* for extent meta */
+	struct page *page = *page_iter;
+	erofs_blk_t blkaddr = vle_extent_blkaddr(inode, lcn);
+	struct z_erofs_vle_decompressed_index *di;
+	unsigned long long ofs;
+	const unsigned int clusterbits = EROFS_SB(inode->i_sb)->clusterbits;
+	const unsigned int clustersize = 1 << clusterbits;
+
+	if (page->index != blkaddr) {
+		kunmap_atomic(*kaddr_iter);
+		unlock_page(page);
+		put_page(page);
+
+		*page_iter = page = erofs_get_meta_page(inode->i_sb,
+			blkaddr, false);
+		*kaddr_iter = kmap_atomic(page);
+	}
+
+	di = *kaddr_iter + vle_extent_blkoff(inode, lcn);
+	switch (vle_cluster_type(di)) {
+	case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
+		BUG_ON(!di->di_u.delta[0]);
+		BUG_ON(lcn < di->di_u.delta[0]);
+
+		ofs = vle_get_logical_extent_head(inode,
+			page_iter, kaddr_iter,
+			lcn - di->di_u.delta[0], pcn, flags);
+		break;
+	case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
+		*flags ^= EROFS_MAP_ZIPPED;
+	case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
+		/* clustersize should be a power of two */
+		ofs = ((unsigned long long)lcn << clusterbits) +
+			(le16_to_cpu(di->di_clusterofs) & (clustersize - 1));
+		*pcn = le32_to_cpu(di->di_u.blkaddr);
+		break;
+	default:
+		BUG_ON(1);
+	}
+	return ofs;
+}
+
+int z_erofs_map_blocks_iter(struct inode *inode,
+	struct erofs_map_blocks *map,
+	struct page **mpage_ret, int flags)
+{
+	/* logicial extent (start, end) offset */
+	unsigned long long ofs, end;
+	struct z_erofs_vle_decompressed_index *di;
+	erofs_blk_t e_blkaddr, pcn;
+	unsigned lcn, logical_cluster_ofs;
+	u32 ofs_rem;
+	struct page *mpage = *mpage_ret;
+	void *kaddr;
+	bool initial;
+	const unsigned int clusterbits = EROFS_SB(inode->i_sb)->clusterbits;
+	const unsigned int clustersize = 1 << clusterbits;
+
+	/* if both m_(l,p)len are 0, regularize l_lblk, l_lofs, etc... */
+	initial = !map->m_llen;
+
+	/* when trying to read beyond EOF, leave it unmapped */
+	if (unlikely(map->m_la >= inode->i_size)) {
+		BUG_ON(!initial);
+		map->m_llen = map->m_la + 1 - inode->i_size;
+		map->m_la = inode->i_size - 1;
+		map->m_flags = 0;
+		goto out;
+	}
+
+	debugln("%s, m_la %llu m_llen %llu --- start", __func__,
+		map->m_la, map->m_llen);
+
+	ofs = map->m_la + map->m_llen;
+
+	/* clustersize should be power of two */
+	lcn = ofs >> clusterbits;
+	ofs_rem = ofs & (clustersize - 1);
+
+	e_blkaddr = vle_extent_blkaddr(inode, lcn);
+
+	if (mpage == NULL || mpage->index != e_blkaddr) {
+		if (mpage != NULL)
+			put_page(mpage);
+
+		mpage = erofs_get_meta_page(inode->i_sb, e_blkaddr, false);
+		*mpage_ret = mpage;
+	} else {
+		lock_page(mpage);
+		DBG_BUGON(!PageUptodate(mpage));
+	}
+
+	kaddr = kmap_atomic(mpage);
+	di = kaddr + vle_extent_blkoff(inode, lcn);
+
+	debugln("%s, lcn %u e_blkaddr %u e_blkoff %u", __func__, lcn,
+		e_blkaddr, vle_extent_blkoff(inode, lcn));
+
+	logical_cluster_ofs = vle_compressed_index_clusterofs(clustersize, di);
+	if (!initial) {
+		/* [walking mode] 'map' has been already initialized */
+		map->m_llen += logical_cluster_ofs;
+		goto unmap_out;
+	}
+
+	/* by default, compressed */
+	map->m_flags |= EROFS_MAP_ZIPPED;
+
+	end = (u64)(lcn + 1) * clustersize;
+
+	switch (vle_cluster_type(di)) {
+	case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
+		if (ofs_rem >= logical_cluster_ofs)
+			map->m_flags ^= EROFS_MAP_ZIPPED;
+	case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
+		if (ofs_rem == logical_cluster_ofs) {
+			pcn = le32_to_cpu(di->di_u.blkaddr);
+			goto exact_hitted;
+		}
+
+		if (ofs_rem > logical_cluster_ofs) {
+			ofs = lcn * clustersize | logical_cluster_ofs;
+			pcn = le32_to_cpu(di->di_u.blkaddr);
+			break;
+		}
+
+		BUG_ON(!lcn);	/* logical cluster number >= 1 */
+		end = (lcn-- * clustersize) | logical_cluster_ofs;
+	case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
+		/* get the correspoinding first chunk */
+		ofs = vle_get_logical_extent_head(inode, mpage_ret,
+			&kaddr, lcn, &pcn, &map->m_flags);
+		mpage = *mpage_ret;
+	}
+
+	map->m_la = ofs;
+exact_hitted:
+	map->m_llen = end - ofs;
+	map->m_plen = clustersize;
+	map->m_pa = blknr_to_addr(pcn);
+	map->m_flags |= EROFS_MAP_MAPPED;
+unmap_out:
+	kunmap_atomic(kaddr);
+	unlock_page(mpage);
+out:
+	debugln("%s, m_la %llu m_pa %llu m_llen %llu m_plen %llu m_flags 0%o",
+		__func__, map->m_la, map->m_pa,
+		map->m_llen, map->m_plen, map->m_flags);
+	return 0;
+}
+
-- 
1.9.1

  parent reply	other threads:[~2018-07-26 12:26 UTC|newest]

Thread overview: 112+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-05-31 11:06 [NOMERGE] [RFC PATCH 00/12] erofs: introduce erofs file system Gao Xiang
2018-06-01  7:48 ` Richard Weinberger
2018-06-01  9:11   ` Gao Xiang
2018-06-01  9:28     ` Richard Weinberger
2018-06-01 11:16       ` Gao Xiang
2018-06-07 10:26         ` Pavel Machek
2018-07-27  0:55       ` Joey Pabalinas
2018-07-27  0:57         ` Joey Pabalinas
2018-07-26 12:21 ` [PATCH 00/25] staging: " Gao Xiang
2018-07-26 12:21   ` Gao Xiang
2018-07-26 12:21   ` [PATCH 01/25] staging: erofs: add on-disk layout Gao Xiang
2018-07-26 12:21     ` Gao Xiang
2018-07-26 12:21   ` [PATCH 02/25] staging: erofs: add erofs in-memory stuffs Gao Xiang
2018-07-26 12:21     ` Gao Xiang
2018-07-26 12:21   ` [PATCH 03/25] staging: erofs: add super block operations Gao Xiang
2018-07-26 12:21     ` Gao Xiang
2018-07-26 12:21   ` [PATCH 04/25] staging: erofs: add raw address_space operations Gao Xiang
2018-07-26 12:21     ` Gao Xiang
2018-07-26 12:21   ` [PATCH 05/25] staging: erofs: add inode operations Gao Xiang
2018-07-26 12:21     ` Gao Xiang
2018-07-26 12:21   ` [PATCH 06/25] staging: erofs: add directory operations Gao Xiang
2018-07-26 12:21     ` Gao Xiang
2018-07-26 12:21   ` [PATCH 07/25] staging: erofs: add namei functions Gao Xiang
2018-07-26 12:21     ` Gao Xiang
2018-07-26 12:21   ` [PATCH 08/25] staging: erofs: update Kconfig and Makefile Gao Xiang
2018-07-26 12:21     ` Gao Xiang
2018-07-26 12:21   ` [PATCH 09/25] staging: erofs: introduce xattr & acl support Gao Xiang
2018-07-26 12:21     ` Gao Xiang
2018-07-26 12:21   ` [PATCH 10/25] staging: erofs: support special inode Gao Xiang
2018-07-26 12:21     ` Gao Xiang
2018-07-26 12:21   ` [PATCH 11/25] staging: erofs: introduce error injection infrastructure Gao Xiang
2018-07-26 12:21     ` Gao Xiang
2018-07-26 12:21   ` [PATCH 12/25] staging: erofs: support tracepoint Gao Xiang
2018-07-26 12:21     ` Gao Xiang
2018-07-26 12:21   ` [PATCH 13/25] staging: erofs: <linux/tagptr.h>: introduce tagged pointer Gao Xiang
2018-07-26 12:21     ` Gao Xiang
2018-07-26 12:21   ` [PATCH 14/25] staging: erofs: introduce pagevec for unzip subsystem Gao Xiang
2018-07-26 12:21     ` Gao Xiang
2018-07-26 12:21   ` Gao Xiang [this message]
2018-07-26 12:21     ` [PATCH 15/25] staging: erofs: add erofs_map_blocks_iter Gao Xiang
2018-07-26 12:21   ` [PATCH 16/25] staging: erofs: add erofs_allocpage Gao Xiang
2018-07-26 12:21     ` Gao Xiang
2018-07-26 12:22   ` [PATCH 17/25] staging: erofs: globalize prepare_bio and __submit_bio Gao Xiang
2018-07-26 12:22     ` Gao Xiang
2018-07-26 12:22   ` [PATCH 18/25] staging: erofs: introduce a customized LZ4 decompression Gao Xiang
2018-07-26 12:22     ` Gao Xiang
2018-07-26 12:22   ` [PATCH 19/25] staging: erofs: add a generic z_erofs VLE decompressor Gao Xiang
2018-07-26 12:22     ` Gao Xiang
2018-07-26 12:22   ` [PATCH 20/25] staging: erofs: introduce superblock registration Gao Xiang
2018-07-26 12:22     ` Gao Xiang
2018-07-26 12:22   ` [PATCH 21/25] staging: erofs: introduce erofs shrinker Gao Xiang
2018-07-26 12:22     ` Gao Xiang
2018-07-26 12:22   ` [PATCH 22/25] staging: erofs: introduce workstation for decompression Gao Xiang
2018-07-26 12:22     ` Gao Xiang
2018-07-26 12:22   ` [PATCH 23/25] staging: erofs: introduce VLE decompression support Gao Xiang
2018-07-26 12:22     ` Gao Xiang
2018-07-26 12:22   ` [PATCH 24/25] staging: erofs: introduce cached decompression Gao Xiang
2018-07-26 12:22     ` Gao Xiang
2018-07-26 12:22   ` [PATCH 25/25] staging: erofs: add a TODO and update MAINTAINERS for staging Gao Xiang
2018-07-26 12:22     ` Gao Xiang
2018-07-28  7:10     ` [PATCH] staging: erofs: fix a compile warning of Z_EROFS_VLE_VMAP_ONSTACK_PAGES Gao Xiang
2018-07-28  7:10       ` Gao Xiang
2018-07-28 10:43       ` Chao Yu
2018-07-28 10:43         ` Chao Yu
2018-07-29  5:34       ` [PATCH 1/2] staging: erofs: fix compile error without built-in decompression support Gao Xiang
2018-07-29  5:34         ` Gao Xiang
2018-07-29  5:37         ` [PATCH 2/2] staging: erofs: fix conditional uninitialized `pcn' in z_erofs_map_blocks_iter Gao Xiang
2018-07-29  5:37           ` Gao Xiang
2018-07-30  1:51           ` [PATCH] staging: erofs: use the wrapped PTR_ERR_OR_ZERO instead of open code Gao Xiang
2018-07-30  1:51             ` Gao Xiang
2018-07-30  6:58             ` Chao Yu
2018-07-30  6:58               ` Chao Yu
2018-08-01  6:38             ` [PATCH 1/2] staging: erofs: add the missing break in z_erofs_map_blocks_iter Gao Xiang
2018-08-01  6:38               ` Gao Xiang
2018-08-01  6:38               ` [PATCH 2/2] staging: erofs: remove a redundant marco in xattr Gao Xiang
2018-08-01  6:38                 ` Gao Xiang
2018-08-01  9:02               ` [PATCH 1/2] staging: erofs: add the missing break in z_erofs_map_blocks_iter Dan Carpenter
2018-08-01  9:02                 ` Dan Carpenter
2018-08-01  9:19                 ` Gao Xiang
2018-08-01  9:19                   ` Gao Xiang
2018-08-01  9:36                   ` [PATCH RESEND " Gao Xiang
2018-08-01  9:36                     ` Gao Xiang
2018-08-01 11:36                     ` Dan Carpenter
2018-08-01 11:36                       ` Dan Carpenter
2018-08-01 12:08                       ` Gao Xiang
2018-08-01 12:08                         ` Gao Xiang
2018-07-30  2:07           ` [PATCH 2/2] staging: erofs: fix conditional uninitialized `pcn' " Chao Yu
2018-07-30  2:07             ` Chao Yu
2018-07-30  2:07         ` [PATCH 1/2] staging: erofs: fix compile error without built-in decompression support Chao Yu
2018-07-30  2:07           ` Chao Yu
2018-07-30  2:32           ` Gao Xiang
2018-07-30  2:32             ` Gao Xiang
2018-07-30  3:07             ` Chao Yu
2018-07-30  3:07               ` Chao Yu
2018-07-30  3:55               ` Gao Xiang
2018-07-30  3:55                 ` Gao Xiang
2018-07-30  3:34           ` [FOR INTERNAL REVIEW] [PATCH RESEND 1/3] staging: erofs: fix incorrect code in erofs_shrink_scan Gao Xiang
2018-07-30  3:34             ` [FOR INTERNAL REVIEW] [PATCH RESEND 2/3] staging: erofs: add 'erofs_' prefixes for try_to_free_(all_)cached_page(s) Gao Xiang
2018-07-30  6:57               ` Chao Yu
2018-07-30  3:34             ` [FOR INTERNAL REVIEW] [PATCH RESEND 3/3] staging: erofs: fix conditional uninitialized `pcn' in z_erofs_map_blocks_iter Gao Xiang
2018-07-30  6:56             ` [FOR INTERNAL REVIEW] [PATCH RESEND 1/3] staging: erofs: fix incorrect code in erofs_shrink_scan Chao Yu
2018-07-27  0:25   ` [PATCH 00/25] staging: erofs: introduce erofs file system Christian Kujau
2018-07-27  1:39     ` Gao Xiang
2018-07-27  1:39       ` Gao Xiang
2018-07-27  1:56       ` Gao Xiang
2018-07-27  1:56         ` Gao Xiang
2018-07-28  7:25   ` Greg Kroah-Hartman
2018-07-28  7:25     ` Greg Kroah-Hartman
2018-07-28  9:33     ` Gao Xiang
2018-07-28  9:33       ` Gao Xiang
2018-07-28 10:34     ` Chao Yu
2018-07-28 10:34       ` Chao Yu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1532607728-103372-16-git-send-email-gaoxiang25@huawei.com \
    --to=gaoxiang25@huawei.com \
    --cc=chao@kernel.org \
    --cc=devel@driverdev.osuosl.org \
    --cc=gregkh@linuxfoundation.org \
    --cc=hsiangkao@aol.com \
    --cc=linux-erofs@lists.ozlabs.org \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=miaoxie@huawei.com \
    --cc=weidu.du@huawei.com \
    --cc=yuchao0@huawei.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.