lustre-devel-lustre.org archive mirror
 help / color / mirror / Atom feed
From: James Simmons <jsimmons@infradead.org>
To: lustre-devel@lists.lustre.org
Subject: [lustre-devel] [PATCH 15/37] lustre: obdclass: use offset instead of cp_linkage
Date: Wed, 15 Jul 2020 16:44:56 -0400	[thread overview]
Message-ID: <1594845918-29027-16-git-send-email-jsimmons@infradead.org> (raw)
In-Reply-To: <1594845918-29027-1-git-send-email-jsimmons@infradead.org>

From: Wang Shilong <wshilong@ddn.com>

Since we have fixed-size cl_page allocations, we could use
offset array to store every slices pointer for cl_page.

With this patch, we will reduce cl_page size from 392 bytes
to 336 bytes which means we could allocate from 10 to 12 objects.

WC-bug-id: https://jira.whamcloud.com/browse/LU-13134
Lustre-commit: 55967f1e5c701 ("LU-13134 obdclass: use offset instead of cp_linkage")
Signed-off-by: Wang Shilong <wshilong@ddn.com>
Reviewed-on: https://review.whamcloud.com/37428
Reviewed-by: Andreas Dilger <adilger@whamcloud.com>
Reviewed-by: James Simmons <jsimmons@infradead.org>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
Signed-off-by: James Simmons <jsimmons@infradead.org>
---
 fs/lustre/include/cl_object.h |   8 +-
 fs/lustre/obdclass/cl_page.c  | 284 ++++++++++++++++++++++++------------------
 2 files changed, 168 insertions(+), 124 deletions(-)

diff --git a/fs/lustre/include/cl_object.h b/fs/lustre/include/cl_object.h
index a0b9e87..47997f8 100644
--- a/fs/lustre/include/cl_object.h
+++ b/fs/lustre/include/cl_object.h
@@ -737,8 +737,10 @@ struct cl_page {
 	struct page			*cp_vmpage;
 	/** Linkage of pages within group. Pages must be owned */
 	struct list_head		 cp_batch;
-	/** List of slices. Immutable after creation. */
-	struct list_head		 cp_layers;
+	/** array of slices offset. Immutable after creation. */
+	unsigned char			 cp_layer_offset[3];
+	/** current slice index */
+	unsigned char			 cp_layer_count:2;
 	/**
 	 * Page state. This field is const to avoid accidental update, it is
 	 * modified only internally within cl_page.c. Protected by a VM lock.
@@ -781,8 +783,6 @@ struct cl_page_slice {
 	 */
 	struct cl_object		*cpl_obj;
 	const struct cl_page_operations *cpl_ops;
-	/** Linkage into cl_page::cp_layers. Immutable after creation. */
-	struct list_head		 cpl_linkage;
 };
 
 /**
diff --git a/fs/lustre/obdclass/cl_page.c b/fs/lustre/obdclass/cl_page.c
index d5be0c5..cced026 100644
--- a/fs/lustre/obdclass/cl_page.c
+++ b/fs/lustre/obdclass/cl_page.c
@@ -72,22 +72,47 @@ static void cl_page_get_trust(struct cl_page *page)
 	refcount_inc(&page->cp_ref);
 }
 
+static struct cl_page_slice *
+cl_page_slice_get(const struct cl_page *cl_page, int index)
+{
+	if (index < 0 || index >= cl_page->cp_layer_count)
+		return NULL;
+
+	/* To get the cp_layer_offset values fit under 256 bytes, we
+	 * use the offset beyond the end of struct cl_page.
+	 */
+	return (struct cl_page_slice *)((char *)cl_page + sizeof(*cl_page) +
+					cl_page->cp_layer_offset[index]);
+}
+
+#define cl_page_slice_for_each(cl_page, slice, i)		\
+	for (i = 0, slice = cl_page_slice_get(cl_page, 0);	\
+	     i < (cl_page)->cp_layer_count;			\
+	     slice = cl_page_slice_get(cl_page, ++i))
+
+#define cl_page_slice_for_each_reverse(cl_page, slice, i)	\
+	for (i = (cl_page)->cp_layer_count - 1,			\
+	     slice = cl_page_slice_get(cl_page, i); i >= 0;	\
+	     slice = cl_page_slice_get(cl_page, --i))
+
 /**
- * Returns a slice within a page, corresponding to the given layer in the
+ * Returns a slice within a cl_page, corresponding to the given layer in the
  * device stack.
  *
  * \see cl_lock_at()
  */
 static const struct cl_page_slice *
-cl_page_at_trusted(const struct cl_page *page,
+cl_page_at_trusted(const struct cl_page *cl_page,
 		   const struct lu_device_type *dtype)
 {
 	const struct cl_page_slice *slice;
+	int i;
 
-	list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
+	cl_page_slice_for_each(cl_page, slice, i) {
 		if (slice->cpl_obj->co_lu.lo_dev->ld_type == dtype)
 			return slice;
 	}
+
 	return NULL;
 }
 
@@ -104,28 +129,28 @@ static void __cl_page_free(struct cl_page *cl_page, unsigned short bufsize)
 	}
 }
 
-static void cl_page_free(const struct lu_env *env, struct cl_page *page,
+static void cl_page_free(const struct lu_env *env, struct cl_page *cl_page,
 			 struct pagevec *pvec)
 {
-	struct cl_object *obj = page->cp_obj;
-	struct cl_page_slice *slice;
+	struct cl_object *obj = cl_page->cp_obj;
 	unsigned short bufsize = cl_object_header(obj)->coh_page_bufsize;
+	struct cl_page_slice *slice;
+	int i;
 
-	PASSERT(env, page, list_empty(&page->cp_batch));
-	PASSERT(env, page, !page->cp_owner);
-	PASSERT(env, page, page->cp_state == CPS_FREEING);
+	PASSERT(env, cl_page, list_empty(&cl_page->cp_batch));
+	PASSERT(env, cl_page, !cl_page->cp_owner);
+	PASSERT(env, cl_page, cl_page->cp_state == CPS_FREEING);
 
-	while ((slice = list_first_entry_or_null(&page->cp_layers,
-						 struct cl_page_slice,
-						 cpl_linkage)) != NULL) {
-		list_del_init(page->cp_layers.next);
+	cl_page_slice_for_each(cl_page, slice, i) {
 		if (unlikely(slice->cpl_ops->cpo_fini))
 			slice->cpl_ops->cpo_fini(env, slice, pvec);
 	}
-	lu_object_ref_del_at(&obj->co_lu, &page->cp_obj_ref, "cl_page", page);
+	cl_page->cp_layer_count = 0;
+	lu_object_ref_del_at(&obj->co_lu, &cl_page->cp_obj_ref,
+			     "cl_page", cl_page);
 	cl_object_put(env, obj);
-	lu_ref_fini(&page->cp_reference);
-	__cl_page_free(page, bufsize);
+	lu_ref_fini(&cl_page->cp_reference);
+	__cl_page_free(cl_page, bufsize);
 }
 
 /**
@@ -212,7 +237,6 @@ struct cl_page *cl_page_alloc(const struct lu_env *env,
 		page->cp_vmpage = vmpage;
 		cl_page_state_set_trust(page, CPS_CACHED);
 		page->cp_type = type;
-		INIT_LIST_HEAD(&page->cp_layers);
 		INIT_LIST_HEAD(&page->cp_batch);
 		lu_ref_init(&page->cp_reference);
 		cl_object_for_each(o2, o) {
@@ -455,22 +479,23 @@ static void cl_page_owner_set(struct cl_page *page)
 }
 
 void __cl_page_disown(const struct lu_env *env,
-		     struct cl_io *io, struct cl_page *pg)
+		      struct cl_io *io, struct cl_page *cl_page)
 {
 	const struct cl_page_slice *slice;
 	enum cl_page_state state;
+	int i;
 
-	state = pg->cp_state;
-	cl_page_owner_clear(pg);
+	state = cl_page->cp_state;
+	cl_page_owner_clear(cl_page);
 
 	if (state == CPS_OWNED)
-		cl_page_state_set(env, pg, CPS_CACHED);
+		cl_page_state_set(env, cl_page, CPS_CACHED);
 	/*
 	 * Completion call-backs are executed in the bottom-up order, so that
 	 * uppermost layer (llite), responsible for VFS/VM interaction runs
 	 * last and can release locks safely.
 	 */
-	list_for_each_entry_reverse(slice, &pg->cp_layers, cpl_linkage) {
+	cl_page_slice_for_each_reverse(cl_page, slice, i) {
 		if (slice->cpl_ops->cpo_disown)
 			(*slice->cpl_ops->cpo_disown)(env, slice, io);
 	}
@@ -494,12 +519,12 @@ int cl_page_is_owned(const struct cl_page *pg, const struct cl_io *io)
  * Waits until page is in cl_page_state::CPS_CACHED state, and then switch it
  * into cl_page_state::CPS_OWNED state.
  *
- * \pre  !cl_page_is_owned(pg, io)
- * \post result == 0 iff cl_page_is_owned(pg, io)
+ * \pre  !cl_page_is_owned(cl_page, io)
+ * \post result == 0 iff cl_page_is_owned(cl_page, io)
  *
  * Return:	0 success
  *
- *		-ve failure, e.g., page was destroyed (and landed in
+ *		-ve failure, e.g., cl_page was destroyed (and landed in
  *		cl_page_state::CPS_FREEING instead of
  *		cl_page_state::CPS_CACHED). or, page was owned by
  *		another thread, or in IO.
@@ -510,19 +535,20 @@ int cl_page_is_owned(const struct cl_page *pg, const struct cl_io *io)
  * \see cl_page_own
  */
 static int __cl_page_own(const struct lu_env *env, struct cl_io *io,
-			 struct cl_page *pg, int nonblock)
+			 struct cl_page *cl_page, int nonblock)
 {
 	const struct cl_page_slice *slice;
 	int result = 0;
+	int i;
 
 	io = cl_io_top(io);
 
-	if (pg->cp_state == CPS_FREEING) {
+	if (cl_page->cp_state == CPS_FREEING) {
 		result = -ENOENT;
 		goto out;
 	}
 
-	list_for_each_entry(slice, &pg->cp_layers, cpl_linkage) {
+	cl_page_slice_for_each(cl_page, slice, i) {
 		if (slice->cpl_ops->cpo_own)
 			result = (*slice->cpl_ops->cpo_own)(env, slice,
 							    io, nonblock);
@@ -533,13 +559,13 @@ static int __cl_page_own(const struct lu_env *env, struct cl_io *io,
 		result = 0;
 
 	if (result == 0) {
-		PASSERT(env, pg, !pg->cp_owner);
-		pg->cp_owner = cl_io_top(io);
-		cl_page_owner_set(pg);
-		if (pg->cp_state != CPS_FREEING) {
-			cl_page_state_set(env, pg, CPS_OWNED);
+		PASSERT(env, cl_page, !cl_page->cp_owner);
+		cl_page->cp_owner = cl_io_top(io);
+		cl_page_owner_set(cl_page);
+		if (cl_page->cp_state != CPS_FREEING) {
+			cl_page_state_set(env, cl_page, CPS_OWNED);
 		} else {
-			__cl_page_disown(env, io, pg);
+			__cl_page_disown(env, io, cl_page);
 			result = -ENOENT;
 		}
 	}
@@ -575,51 +601,53 @@ int cl_page_own_try(const struct lu_env *env, struct cl_io *io,
  *
  * Called when page is already locked by the hosting VM.
  *
- * \pre !cl_page_is_owned(pg, io)
- * \post cl_page_is_owned(pg, io)
+ * \pre !cl_page_is_owned(cl_page, io)
+ * \post cl_page_is_owned(cl_page, io)
  *
  * \see cl_page_operations::cpo_assume()
  */
 void cl_page_assume(const struct lu_env *env,
-		    struct cl_io *io, struct cl_page *pg)
+		    struct cl_io *io, struct cl_page *cl_page)
 {
 	const struct cl_page_slice *slice;
+	int i;
 
 	io = cl_io_top(io);
 
-	list_for_each_entry(slice, &pg->cp_layers, cpl_linkage) {
+	cl_page_slice_for_each(cl_page, slice, i) {
 		if (slice->cpl_ops->cpo_assume)
 			(*slice->cpl_ops->cpo_assume)(env, slice, io);
 	}
 
-	PASSERT(env, pg, !pg->cp_owner);
-	pg->cp_owner = cl_io_top(io);
-	cl_page_owner_set(pg);
-	cl_page_state_set(env, pg, CPS_OWNED);
+	PASSERT(env, cl_page, !cl_page->cp_owner);
+	cl_page->cp_owner = cl_io_top(io);
+	cl_page_owner_set(cl_page);
+	cl_page_state_set(env, cl_page, CPS_OWNED);
 }
 EXPORT_SYMBOL(cl_page_assume);
 
 /**
  * Releases page ownership without unlocking the page.
  *
- * Moves page into cl_page_state::CPS_CACHED without releasing a lock on the
- * underlying VM page (as VM is supposed to do this itself).
+ * Moves cl_page into cl_page_state::CPS_CACHED without releasing a lock
+ * on the underlying VM page (as VM is supposed to do this itself).
  *
- * \pre   cl_page_is_owned(pg, io)
- * \post !cl_page_is_owned(pg, io)
+ * \pre   cl_page_is_owned(cl_page, io)
+ * \post !cl_page_is_owned(cl_page, io)
  *
  * \see cl_page_assume()
  */
 void cl_page_unassume(const struct lu_env *env,
-		      struct cl_io *io, struct cl_page *pg)
+		      struct cl_io *io, struct cl_page *cl_page)
 {
 	const struct cl_page_slice *slice;
+	int i;
 
 	io = cl_io_top(io);
-	cl_page_owner_clear(pg);
-	cl_page_state_set(env, pg, CPS_CACHED);
+	cl_page_owner_clear(cl_page);
+	cl_page_state_set(env, cl_page, CPS_CACHED);
 
-	list_for_each_entry_reverse(slice, &pg->cp_layers, cpl_linkage) {
+	cl_page_slice_for_each_reverse(cl_page, slice, i) {
 		if (slice->cpl_ops->cpo_unassume)
 			(*slice->cpl_ops->cpo_unassume)(env, slice, io);
 	}
@@ -646,21 +674,22 @@ void cl_page_disown(const struct lu_env *env,
 EXPORT_SYMBOL(cl_page_disown);
 
 /**
- * Called when page is to be removed from the object, e.g., as a result of
- * truncate.
+ * Called when cl_page is to be removed from the object, e.g.,
+ * as a result of truncate.
  *
  * Calls cl_page_operations::cpo_discard() top-to-bottom.
  *
- * \pre cl_page_is_owned(pg, io)
+ * \pre cl_page_is_owned(cl_page, io)
  *
  * \see cl_page_operations::cpo_discard()
  */
 void cl_page_discard(const struct lu_env *env,
-		     struct cl_io *io, struct cl_page *pg)
+		     struct cl_io *io, struct cl_page *cl_page)
 {
 	const struct cl_page_slice *slice;
+	int i;
 
-	list_for_each_entry(slice, &pg->cp_layers, cpl_linkage) {
+	cl_page_slice_for_each(cl_page, slice, i) {
 		if (slice->cpl_ops->cpo_discard)
 			(*slice->cpl_ops->cpo_discard)(env, slice, io);
 	}
@@ -669,22 +698,24 @@ void cl_page_discard(const struct lu_env *env,
 
 /**
  * Version of cl_page_delete() that can be called for not fully constructed
- * pages, e.g,. in a error handling cl_page_find()->__cl_page_delete()
+ * cl_pages, e.g,. in a error handling cl_page_find()->__cl_page_delete()
  * path. Doesn't check page invariant.
  */
-static void __cl_page_delete(const struct lu_env *env, struct cl_page *pg)
+static void __cl_page_delete(const struct lu_env *env,
+			     struct cl_page *cl_page)
 {
 	const struct cl_page_slice *slice;
+	int i;
 
-	PASSERT(env, pg, pg->cp_state != CPS_FREEING);
+	PASSERT(env, cl_page, cl_page->cp_state != CPS_FREEING);
 
 	/*
-	 * Sever all ways to obtain new pointers to @pg.
+	 * Sever all ways to obtain new pointers to @cl_page.
 	 */
-	cl_page_owner_clear(pg);
-	__cl_page_state_set(env, pg, CPS_FREEING);
+	cl_page_owner_clear(cl_page);
+	__cl_page_state_set(env, cl_page, CPS_FREEING);
 
-	list_for_each_entry_reverse(slice, &pg->cp_layers, cpl_linkage) {
+	cl_page_slice_for_each_reverse(cl_page, slice, i) {
 		if (slice->cpl_ops->cpo_delete)
 			(*slice->cpl_ops->cpo_delete)(env, slice);
 	}
@@ -729,11 +760,13 @@ void cl_page_delete(const struct lu_env *env, struct cl_page *pg)
  *
  * \see cl_page_operations::cpo_export()
  */
-void cl_page_export(const struct lu_env *env, struct cl_page *pg, int uptodate)
+void cl_page_export(const struct lu_env *env, struct cl_page *cl_page,
+		    int uptodate)
 {
 	const struct cl_page_slice *slice;
+	int i;
 
-	list_for_each_entry(slice, &pg->cp_layers, cpl_linkage) {
+	cl_page_slice_for_each(cl_page, slice, i) {
 		if (slice->cpl_ops->cpo_export)
 			(*slice->cpl_ops->cpo_export)(env, slice, uptodate);
 	}
@@ -741,34 +774,36 @@ void cl_page_export(const struct lu_env *env, struct cl_page *pg, int uptodate)
 EXPORT_SYMBOL(cl_page_export);
 
 /**
- * Returns true, if @pg is VM locked in a suitable sense by the calling
+ * Returns true, if @cl_page is VM locked in a suitable sense by the calling
  * thread.
  */
-int cl_page_is_vmlocked(const struct lu_env *env, const struct cl_page *pg)
+int cl_page_is_vmlocked(const struct lu_env *env,
+			const struct cl_page *cl_page)
 {
 	const struct cl_page_slice *slice;
 	int result;
 
-	slice = list_first_entry(&pg->cp_layers,
-				 const struct cl_page_slice, cpl_linkage);
-	PASSERT(env, pg, slice->cpl_ops->cpo_is_vmlocked);
+	slice = cl_page_slice_get(cl_page, 0);
+	PASSERT(env, cl_page, slice->cpl_ops->cpo_is_vmlocked);
 	/*
 	 * Call ->cpo_is_vmlocked() directly instead of going through
 	 * CL_PAGE_INVOKE(), because cl_page_is_vmlocked() is used by
 	 * cl_page_invariant().
 	 */
 	result = slice->cpl_ops->cpo_is_vmlocked(env, slice);
-	PASSERT(env, pg, result == -EBUSY || result == -ENODATA);
+	PASSERT(env, cl_page, result == -EBUSY || result == -ENODATA);
+
 	return result == -EBUSY;
 }
 EXPORT_SYMBOL(cl_page_is_vmlocked);
 
-void cl_page_touch(const struct lu_env *env, const struct cl_page *pg,
-		  size_t to)
+void cl_page_touch(const struct lu_env *env,
+		   const struct cl_page *cl_page, size_t to)
 {
 	const struct cl_page_slice *slice;
+	int i;
 
-	list_for_each_entry(slice, &pg->cp_layers, cpl_linkage) {
+	cl_page_slice_for_each(cl_page, slice, i) {
 		if (slice->cpl_ops->cpo_page_touch)
 			(*slice->cpl_ops->cpo_page_touch)(env, slice, to);
 	}
@@ -799,20 +834,21 @@ static void cl_page_io_start(const struct lu_env *env,
  * transfer now.
  */
 int cl_page_prep(const struct lu_env *env, struct cl_io *io,
-		 struct cl_page *pg, enum cl_req_type crt)
+		 struct cl_page *cl_page, enum cl_req_type crt)
 {
 	const struct cl_page_slice *slice;
 	int result = 0;
+	int i;
 
 	/*
-	 * XXX this has to be called bottom-to-top, so that llite can set up
+	 * this has to be called bottom-to-top, so that llite can set up
 	 * PG_writeback without risking other layers deciding to skip this
 	 * page.
 	 */
 	if (crt >= CRT_NR)
 		return -EINVAL;
 
-	list_for_each_entry(slice, &pg->cp_layers, cpl_linkage) {
+	cl_page_slice_for_each(cl_page, slice, i) {
 		if (slice->cpl_ops->cpo_own)
 			result = (*slice->cpl_ops->io[crt].cpo_prep)(env, slice,
 								     io);
@@ -822,10 +858,10 @@ int cl_page_prep(const struct lu_env *env, struct cl_io *io,
 
 	if (result >= 0) {
 		result = 0;
-		cl_page_io_start(env, pg, crt);
+		cl_page_io_start(env, cl_page, crt);
 	}
 
-	CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, result);
+	CL_PAGE_HEADER(D_TRACE, env, cl_page, "%d %d\n", crt, result);
 	return result;
 }
 EXPORT_SYMBOL(cl_page_prep);
@@ -840,35 +876,36 @@ int cl_page_prep(const struct lu_env *env, struct cl_io *io,
  * uppermost layer (llite), responsible for the VFS/VM interaction runs last
  * and can release locks safely.
  *
- * \pre  pg->cp_state == CPS_PAGEIN || pg->cp_state == CPS_PAGEOUT
- * \post pg->cp_state == CPS_CACHED
+ * \pre  cl_page->cp_state == CPS_PAGEIN || cl_page->cp_state == CPS_PAGEOUT
+ * \post cl_page->cp_state == CPS_CACHED
  *
  * \see cl_page_operations::cpo_completion()
  */
 void cl_page_completion(const struct lu_env *env,
-			struct cl_page *pg, enum cl_req_type crt, int ioret)
+			struct cl_page *cl_page, enum cl_req_type crt,
+			int ioret)
 {
-	struct cl_sync_io *anchor = pg->cp_sync_io;
+	struct cl_sync_io *anchor = cl_page->cp_sync_io;
 	const struct cl_page_slice *slice;
+	int i;
 
-	PASSERT(env, pg, crt < CRT_NR);
-	PASSERT(env, pg, pg->cp_state == cl_req_type_state(crt));
-
-	CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, ioret);
+	PASSERT(env, cl_page, crt < CRT_NR);
+	PASSERT(env, cl_page, cl_page->cp_state == cl_req_type_state(crt));
 
-	cl_page_state_set(env, pg, CPS_CACHED);
+	CL_PAGE_HEADER(D_TRACE, env, cl_page, "%d %d\n", crt, ioret);
+	cl_page_state_set(env, cl_page, CPS_CACHED);
 	if (crt >= CRT_NR)
 		return;
 
-	list_for_each_entry_reverse(slice, &pg->cp_layers, cpl_linkage) {
+	cl_page_slice_for_each_reverse(cl_page, slice, i) {
 		if (slice->cpl_ops->io[crt].cpo_completion)
 			(*slice->cpl_ops->io[crt].cpo_completion)(env, slice,
 								  ioret);
 	}
 
 	if (anchor) {
-		LASSERT(pg->cp_sync_io == anchor);
-		pg->cp_sync_io = NULL;
+		LASSERT(cl_page->cp_sync_io == anchor);
+		cl_page->cp_sync_io = NULL;
 		cl_sync_io_note(env, anchor, ioret);
 	}
 }
@@ -878,53 +915,56 @@ void cl_page_completion(const struct lu_env *env,
  * Notify layers that transfer formation engine decided to yank this page from
  * the cache and to make it a part of a transfer.
  *
- * \pre  pg->cp_state == CPS_CACHED
- * \post pg->cp_state == CPS_PAGEIN || pg->cp_state == CPS_PAGEOUT
+ * \pre  cl_page->cp_state == CPS_CACHED
+ * \post cl_page->cp_state == CPS_PAGEIN || cl_page->cp_state == CPS_PAGEOUT
  *
  * \see cl_page_operations::cpo_make_ready()
  */
-int cl_page_make_ready(const struct lu_env *env, struct cl_page *pg,
+int cl_page_make_ready(const struct lu_env *env, struct cl_page *cl_page,
 		       enum cl_req_type crt)
 {
-	const struct cl_page_slice *sli;
+	const struct cl_page_slice *slice;
 	int result = 0;
+	int i;
 
 	if (crt >= CRT_NR)
 		return -EINVAL;
 
-	list_for_each_entry(sli, &pg->cp_layers, cpl_linkage) {
-		if (sli->cpl_ops->io[crt].cpo_make_ready)
-			result = (*sli->cpl_ops->io[crt].cpo_make_ready)(env,
-									 sli);
+	cl_page_slice_for_each(cl_page, slice, i) {
+		if (slice->cpl_ops->io[crt].cpo_make_ready)
+			result = (*slice->cpl_ops->io[crt].cpo_make_ready)(env,
+									   slice);
 		if (result != 0)
 			break;
 	}
 
 	if (result >= 0) {
-		PASSERT(env, pg, pg->cp_state == CPS_CACHED);
-		cl_page_io_start(env, pg, crt);
+		PASSERT(env, cl_page, cl_page->cp_state == CPS_CACHED);
+		cl_page_io_start(env, cl_page, crt);
 		result = 0;
 	}
-	CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, result);
+	CL_PAGE_HEADER(D_TRACE, env, cl_page, "%d %d\n", crt, result);
+
 	return result;
 }
 EXPORT_SYMBOL(cl_page_make_ready);
 
 /**
- * Called if a pge is being written back by kernel's intention.
+ * Called if a page is being written back by kernel's intention.
  *
- * \pre  cl_page_is_owned(pg, io)
- * \post ergo(result == 0, pg->cp_state == CPS_PAGEOUT)
+ * \pre  cl_page_is_owned(cl_page, io)
+ * \post ergo(result == 0, cl_page->cp_state == CPS_PAGEOUT)
  *
  * \see cl_page_operations::cpo_flush()
  */
 int cl_page_flush(const struct lu_env *env, struct cl_io *io,
-		  struct cl_page *pg)
+		  struct cl_page *cl_page)
 {
 	const struct cl_page_slice *slice;
 	int result = 0;
+	int i;
 
-	 list_for_each_entry(slice, &pg->cp_layers, cpl_linkage) {
+	cl_page_slice_for_each(cl_page, slice, i) {
 		if (slice->cpl_ops->cpo_flush)
 			result = (*slice->cpl_ops->cpo_flush)(env, slice, io);
 		if (result != 0)
@@ -933,7 +973,7 @@ int cl_page_flush(const struct lu_env *env, struct cl_io *io,
 	if (result > 0)
 		result = 0;
 
-	CL_PAGE_HEADER(D_TRACE, env, pg, "%d\n", result);
+	CL_PAGE_HEADER(D_TRACE, env, cl_page, "%d\n", result);
 	return result;
 }
 EXPORT_SYMBOL(cl_page_flush);
@@ -943,14 +983,14 @@ int cl_page_flush(const struct lu_env *env, struct cl_io *io,
  *
  * \see cl_page_operations::cpo_clip()
  */
-void cl_page_clip(const struct lu_env *env, struct cl_page *pg,
+void cl_page_clip(const struct lu_env *env, struct cl_page *cl_page,
 		  int from, int to)
 {
 	const struct cl_page_slice *slice;
+	int i;
 
-	CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", from, to);
-
-	list_for_each_entry(slice, &pg->cp_layers, cpl_linkage) {
+	CL_PAGE_HEADER(D_TRACE, env, cl_page, "%d %d\n", from, to);
+	cl_page_slice_for_each(cl_page, slice, i) {
 		if (slice->cpl_ops->cpo_clip)
 			(*slice->cpl_ops->cpo_clip)(env, slice, from, to);
 	}
@@ -972,24 +1012,24 @@ void cl_page_header_print(const struct lu_env *env, void *cookie,
 EXPORT_SYMBOL(cl_page_header_print);
 
 /**
- * Prints human readable representation of @pg to the @f.
+ * Prints human readable representation of @cl_page to the @f.
  */
 void cl_page_print(const struct lu_env *env, void *cookie,
-		   lu_printer_t printer, const struct cl_page *pg)
+		   lu_printer_t printer, const struct cl_page *cl_page)
 {
 	const struct cl_page_slice *slice;
 	int result = 0;
+	int i;
 
-	cl_page_header_print(env, cookie, printer, pg);
-
-	list_for_each_entry(slice, &pg->cp_layers, cpl_linkage) {
+	cl_page_header_print(env, cookie, printer, cl_page);
+	cl_page_slice_for_each(cl_page, slice, i) {
 		if (slice->cpl_ops->cpo_print)
 			result = (*slice->cpl_ops->cpo_print)(env, slice,
 							      cookie, printer);
 		if (result != 0)
 			break;
 	}
-	(*printer)(env, cookie, "end page@%p\n", pg);
+	(*printer)(env, cookie, "end page@%p\n", cl_page);
 }
 EXPORT_SYMBOL(cl_page_print);
 
@@ -1032,14 +1072,18 @@ size_t cl_page_size(const struct cl_object *obj)
  *
  * \see cl_lock_slice_add(), cl_req_slice_add(), cl_io_slice_add()
  */
-void cl_page_slice_add(struct cl_page *page, struct cl_page_slice *slice,
+void cl_page_slice_add(struct cl_page *cl_page, struct cl_page_slice *slice,
 		       struct cl_object *obj,
 		       const struct cl_page_operations *ops)
 {
-	list_add_tail(&slice->cpl_linkage, &page->cp_layers);
+	unsigned int offset = (char *)slice -
+			      ((char *)cl_page + sizeof(*cl_page));
+
+	LASSERT(offset < (1 << sizeof(cl_page->cp_layer_offset[0]) * 8));
+	cl_page->cp_layer_offset[cl_page->cp_layer_count++] = offset;
 	slice->cpl_obj = obj;
 	slice->cpl_ops = ops;
-	slice->cpl_page = page;
+	slice->cpl_page = cl_page;
 }
 EXPORT_SYMBOL(cl_page_slice_add);
 
-- 
1.8.3.1

  parent reply	other threads:[~2020-07-15 20:44 UTC|newest]

Thread overview: 38+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-07-15 20:44 [lustre-devel] [PATCH 00/37] lustre: latest patches landed to OpenSFS 07/14/2020 James Simmons
2020-07-15 20:44 ` [lustre-devel] [PATCH 01/37] lustre: osc: fix osc_extent_find() James Simmons
2020-07-15 20:44 ` [lustre-devel] [PATCH 02/37] lustre: ldlm: check slv and limit before updating James Simmons
2020-07-15 20:44 ` [lustre-devel] [PATCH 03/37] lustre: sec: better struct sepol_downcall_data James Simmons
2020-07-15 20:44 ` [lustre-devel] [PATCH 04/37] lustre: obdclass: remove init to 0 from lustre_init_lsi() James Simmons
2020-07-15 20:44 ` [lustre-devel] [PATCH 05/37] lustre: ptlrpc: handle conn_hash rhashtable resize James Simmons
2020-07-15 20:44 ` [lustre-devel] [PATCH 06/37] lustre: lu_object: convert lu_object cache to rhashtable James Simmons
2020-07-15 20:44 ` [lustre-devel] [PATCH 07/37] lustre: osc: disable ext merging for rdma only pages and non-rdma James Simmons
2020-07-15 20:44 ` [lustre-devel] [PATCH 08/37] lnet: socklnd: fix local interface binding James Simmons
2020-07-15 20:44 ` [lustre-devel] [PATCH 09/37] lnet: o2iblnd: allocate init_qp_attr on stack James Simmons
2020-07-15 20:44 ` [lustre-devel] [PATCH 10/37] lnet: Fix some out-of-date comments James Simmons
2020-07-15 20:44 ` [lustre-devel] [PATCH 11/37] lnet: socklnd: don't fall-back to tcp_sendpage James Simmons
2020-07-15 20:44 ` [lustre-devel] [PATCH 12/37] lustre: ptlrpc: re-enterable signal_completed_replay() James Simmons
2020-07-15 20:44 ` [lustre-devel] [PATCH 13/37] lustre: obdcalss: ensure LCT_QUIESCENT take sync James Simmons
2020-07-15 20:44 ` [lustre-devel] [PATCH 14/37] lustre: remove some "#ifdef CONFIG*" from .c files James Simmons
2020-07-15 20:44 ` James Simmons [this message]
2020-07-15 20:44 ` [lustre-devel] [PATCH 16/37] lustre: obdclass: re-declare cl_page variables to reduce its size James Simmons
2020-07-15 20:44 ` [lustre-devel] [PATCH 17/37] lustre: osc: re-declare ops_from/to to shrink osc_page James Simmons
2020-07-15 20:44 ` [lustre-devel] [PATCH 18/37] lustre: llite: Fix lock ordering in pagevec_dirty James Simmons
2020-07-15 20:45 ` [lustre-devel] [PATCH 19/37] lustre: misc: quiet compiler warning on armv7l James Simmons
2020-07-15 20:45 ` [lustre-devel] [PATCH 20/37] lustre: llite: fix to free cl_dio_aio properly James Simmons
2020-07-15 20:45 ` [lustre-devel] [PATCH 21/37] lnet: o2iblnd: Use ib_mtu_int_to_enum() James Simmons
2020-07-15 20:45 ` [lustre-devel] [PATCH 22/37] lnet: o2iblnd: wait properly for fps->increasing James Simmons
2020-07-15 20:45 ` [lustre-devel] [PATCH 23/37] lnet: o2iblnd: use need_resched() James Simmons
2020-07-15 20:45 ` [lustre-devel] [PATCH 24/37] lnet: o2iblnd: Use list_for_each_entry_safe James Simmons
2020-07-15 20:45 ` [lustre-devel] [PATCH 25/37] lnet: socklnd: use need_resched() James Simmons
2020-07-15 20:45 ` [lustre-devel] [PATCH 26/37] lnet: socklnd: use list_for_each_entry_safe() James Simmons
2020-07-15 20:45 ` [lustre-devel] [PATCH 27/37] lnet: socklnd: convert various refcounts to refcount_t James Simmons
2020-07-15 20:45 ` [lustre-devel] [PATCH 28/37] lnet: libcfs: don't call unshare_fs_struct() James Simmons
2020-07-15 20:45 ` [lustre-devel] [PATCH 29/37] lnet: Allow router to forward to healthier NID James Simmons
2020-07-15 20:45 ` [lustre-devel] [PATCH 30/37] lustre: llite: annotate non-owner locking James Simmons
2020-07-15 20:45 ` [lustre-devel] [PATCH 31/37] lustre: osc: consume grants for direct I/O James Simmons
2020-07-15 20:45 ` [lustre-devel] [PATCH 32/37] lnet: remove LNetMEUnlink and clean up related code James Simmons
2020-07-15 20:45 ` [lustre-devel] [PATCH 33/37] lnet: Set remote NI status in lnet_notify James Simmons
2020-07-15 20:45 ` [lustre-devel] [PATCH 34/37] lustre: ptlrpc: fix endless loop issue James Simmons
2020-07-15 20:45 ` [lustre-devel] [PATCH 35/37] lustre: llite: fix short io for AIO James Simmons
2020-07-15 20:45 ` [lustre-devel] [PATCH 36/37] lnet: socklnd: change ksnd_nthreads to atomic_t James Simmons
2020-07-15 20:45 ` [lustre-devel] [PATCH 37/37] lnet: check rtr_nid is a gateway James Simmons

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1594845918-29027-16-git-send-email-jsimmons@infradead.org \
    --to=jsimmons@infradead.org \
    --cc=lustre-devel@lists.lustre.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).