All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/2] Remove wrapper functions from workitem
@ 2015-11-08  9:16 ` Shivani Bhardwaj
  0 siblings, 0 replies; 6+ messages in thread
From: Shivani Bhardwaj @ 2015-11-08  9:16 UTC (permalink / raw)
  To: gregkh; +Cc: andreas.dilger, oleg.drokin, lustre-devel, linux-kernel, devel

This patchset removes unnecessary wrapper functions from the file workitem
and replace all their calls with the function that they wrap.
After applying this patch, code becomes cleaner.

Shivani Bhardwaj (2):
  staging: lustre: workitem: Remove cfs_wi_sched_lock wrapper
  staging: lustre: workitem: Remove cfs_wi_sched_unlock wrapper

 drivers/staging/lustre/lustre/libcfs/workitem.c | 48 ++++++++++---------------
 1 file changed, 18 insertions(+), 30 deletions(-)

-- 
2.1.0


^ permalink raw reply	[flat|nested] 6+ messages in thread

* [lustre-devel] [PATCH 0/2] Remove wrapper functions from workitem
@ 2015-11-08  9:16 ` Shivani Bhardwaj
  0 siblings, 0 replies; 6+ messages in thread
From: Shivani Bhardwaj @ 2015-11-08  9:16 UTC (permalink / raw)
  To: lustre-devel

This patchset removes unnecessary wrapper functions from the file workitem
and replace all their calls with the function that they wrap.
After applying this patch, code becomes cleaner.

Shivani Bhardwaj (2):
  staging: lustre: workitem: Remove cfs_wi_sched_lock wrapper
  staging: lustre: workitem: Remove cfs_wi_sched_unlock wrapper

 drivers/staging/lustre/lustre/libcfs/workitem.c | 48 ++++++++++---------------
 1 file changed, 18 insertions(+), 30 deletions(-)

-- 
2.1.0

^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH 1/2] staging: lustre: workitem: Remove cfs_wi_sched_lock wrapper
  2015-11-08  9:16 ` [lustre-devel] " Shivani Bhardwaj
@ 2015-11-08  9:17   ` Shivani Bhardwaj
  -1 siblings, 0 replies; 6+ messages in thread
From: Shivani Bhardwaj @ 2015-11-08  9:17 UTC (permalink / raw)
  To: gregkh; +Cc: andreas.dilger, oleg.drokin, lustre-devel, linux-kernel, devel

Remove the wrapper function cfs_wi_sched_lock() and replace all its
calls with the function it wrapped.

Signed-off-by: Shivani Bhardwaj <shivanib134@gmail.com>
---
 drivers/staging/lustre/lustre/libcfs/workitem.c | 22 ++++++++--------------
 1 file changed, 8 insertions(+), 14 deletions(-)

diff --git a/drivers/staging/lustre/lustre/libcfs/workitem.c b/drivers/staging/lustre/lustre/libcfs/workitem.c
index b57acbf..e8bac9b 100644
--- a/drivers/staging/lustre/lustre/libcfs/workitem.c
+++ b/drivers/staging/lustre/lustre/libcfs/workitem.c
@@ -87,12 +87,6 @@ static struct cfs_workitem_data {
 } cfs_wi_data;
 
 static inline void
-cfs_wi_sched_lock(struct cfs_wi_sched *sched)
-{
-	spin_lock(&sched->ws_lock);
-}
-
-static inline void
 cfs_wi_sched_unlock(struct cfs_wi_sched *sched)
 {
 	spin_unlock(&sched->ws_lock);
@@ -101,7 +95,7 @@ cfs_wi_sched_unlock(struct cfs_wi_sched *sched)
 static inline int
 cfs_wi_sched_cansleep(struct cfs_wi_sched *sched)
 {
-	cfs_wi_sched_lock(sched);
+	spin_lock(&sched->ws_lock);
 	if (sched->ws_stopping) {
 		cfs_wi_sched_unlock(sched);
 		return 0;
@@ -125,7 +119,7 @@ cfs_wi_exit(struct cfs_wi_sched *sched, cfs_workitem_t *wi)
 	LASSERT(!in_interrupt()); /* because we use plain spinlock */
 	LASSERT(!sched->ws_stopping);
 
-	cfs_wi_sched_lock(sched);
+	spin_lock(&sched->ws_lock);
 
 	LASSERT(wi->wi_running);
 	if (wi->wi_scheduled) { /* cancel pending schedules */
@@ -161,7 +155,7 @@ cfs_wi_deschedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi)
 	 * means the workitem will not be scheduled and will not have
 	 * any race with wi_action.
 	 */
-	cfs_wi_sched_lock(sched);
+	spin_lock(&sched->ws_lock);
 
 	rc = !(wi->wi_running);
 
@@ -195,7 +189,7 @@ cfs_wi_schedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi)
 	LASSERT(!in_interrupt()); /* because we use plain spinlock */
 	LASSERT(!sched->ws_stopping);
 
-	cfs_wi_sched_lock(sched);
+	spin_lock(&sched->ws_lock);
 
 	if (!wi->wi_scheduled) {
 		LASSERT (list_empty(&wi->wi_list));
@@ -237,7 +231,7 @@ cfs_wi_scheduler (void *arg)
 
 	spin_unlock(&cfs_wi_data.wi_glock);
 
-	cfs_wi_sched_lock(sched);
+	spin_lock(&sched->ws_lock);
 
 	while (!sched->ws_stopping) {
 		int	     nloops = 0;
@@ -263,7 +257,7 @@ cfs_wi_scheduler (void *arg)
 
 			rc = (*wi->wi_action) (wi);
 
-			cfs_wi_sched_lock(sched);
+			spin_lock(&sched->ws_lock);
 			if (rc != 0) /* WI should be dead, even be freed! */
 				continue;
 
@@ -282,14 +276,14 @@ cfs_wi_scheduler (void *arg)
 			/* don't sleep because some workitems still
 			 * expect me to come back soon */
 			cond_resched();
-			cfs_wi_sched_lock(sched);
+			spin_lock(&sched->ws_lock);
 			continue;
 		}
 
 		cfs_wi_sched_unlock(sched);
 		rc = wait_event_interruptible_exclusive(sched->ws_waitq,
 						!cfs_wi_sched_cansleep(sched));
-		cfs_wi_sched_lock(sched);
+		spin_lock(&sched->ws_lock);
 	}
 
 	cfs_wi_sched_unlock(sched);
-- 
2.1.0


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [lustre-devel] [PATCH 1/2] staging: lustre: workitem: Remove cfs_wi_sched_lock wrapper
@ 2015-11-08  9:17   ` Shivani Bhardwaj
  0 siblings, 0 replies; 6+ messages in thread
From: Shivani Bhardwaj @ 2015-11-08  9:17 UTC (permalink / raw)
  To: lustre-devel

Remove the wrapper function cfs_wi_sched_lock() and replace all its
calls with the function it wrapped.

Signed-off-by: Shivani Bhardwaj <shivanib134@gmail.com>
---
 drivers/staging/lustre/lustre/libcfs/workitem.c | 22 ++++++++--------------
 1 file changed, 8 insertions(+), 14 deletions(-)

diff --git a/drivers/staging/lustre/lustre/libcfs/workitem.c b/drivers/staging/lustre/lustre/libcfs/workitem.c
index b57acbf..e8bac9b 100644
--- a/drivers/staging/lustre/lustre/libcfs/workitem.c
+++ b/drivers/staging/lustre/lustre/libcfs/workitem.c
@@ -87,12 +87,6 @@ static struct cfs_workitem_data {
 } cfs_wi_data;
 
 static inline void
-cfs_wi_sched_lock(struct cfs_wi_sched *sched)
-{
-	spin_lock(&sched->ws_lock);
-}
-
-static inline void
 cfs_wi_sched_unlock(struct cfs_wi_sched *sched)
 {
 	spin_unlock(&sched->ws_lock);
@@ -101,7 +95,7 @@ cfs_wi_sched_unlock(struct cfs_wi_sched *sched)
 static inline int
 cfs_wi_sched_cansleep(struct cfs_wi_sched *sched)
 {
-	cfs_wi_sched_lock(sched);
+	spin_lock(&sched->ws_lock);
 	if (sched->ws_stopping) {
 		cfs_wi_sched_unlock(sched);
 		return 0;
@@ -125,7 +119,7 @@ cfs_wi_exit(struct cfs_wi_sched *sched, cfs_workitem_t *wi)
 	LASSERT(!in_interrupt()); /* because we use plain spinlock */
 	LASSERT(!sched->ws_stopping);
 
-	cfs_wi_sched_lock(sched);
+	spin_lock(&sched->ws_lock);
 
 	LASSERT(wi->wi_running);
 	if (wi->wi_scheduled) { /* cancel pending schedules */
@@ -161,7 +155,7 @@ cfs_wi_deschedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi)
 	 * means the workitem will not be scheduled and will not have
 	 * any race with wi_action.
 	 */
-	cfs_wi_sched_lock(sched);
+	spin_lock(&sched->ws_lock);
 
 	rc = !(wi->wi_running);
 
@@ -195,7 +189,7 @@ cfs_wi_schedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi)
 	LASSERT(!in_interrupt()); /* because we use plain spinlock */
 	LASSERT(!sched->ws_stopping);
 
-	cfs_wi_sched_lock(sched);
+	spin_lock(&sched->ws_lock);
 
 	if (!wi->wi_scheduled) {
 		LASSERT (list_empty(&wi->wi_list));
@@ -237,7 +231,7 @@ cfs_wi_scheduler (void *arg)
 
 	spin_unlock(&cfs_wi_data.wi_glock);
 
-	cfs_wi_sched_lock(sched);
+	spin_lock(&sched->ws_lock);
 
 	while (!sched->ws_stopping) {
 		int	     nloops = 0;
@@ -263,7 +257,7 @@ cfs_wi_scheduler (void *arg)
 
 			rc = (*wi->wi_action) (wi);
 
-			cfs_wi_sched_lock(sched);
+			spin_lock(&sched->ws_lock);
 			if (rc != 0) /* WI should be dead, even be freed! */
 				continue;
 
@@ -282,14 +276,14 @@ cfs_wi_scheduler (void *arg)
 			/* don't sleep because some workitems still
 			 * expect me to come back soon */
 			cond_resched();
-			cfs_wi_sched_lock(sched);
+			spin_lock(&sched->ws_lock);
 			continue;
 		}
 
 		cfs_wi_sched_unlock(sched);
 		rc = wait_event_interruptible_exclusive(sched->ws_waitq,
 						!cfs_wi_sched_cansleep(sched));
-		cfs_wi_sched_lock(sched);
+		spin_lock(&sched->ws_lock);
 	}
 
 	cfs_wi_sched_unlock(sched);
-- 
2.1.0

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH 2/2] staging: lustre: workitem: Remove cfs_wi_sched_unlock wrapper
  2015-11-08  9:16 ` [lustre-devel] " Shivani Bhardwaj
@ 2015-11-08  9:17   ` Shivani Bhardwaj
  -1 siblings, 0 replies; 6+ messages in thread
From: Shivani Bhardwaj @ 2015-11-08  9:17 UTC (permalink / raw)
  To: gregkh; +Cc: andreas.dilger, oleg.drokin, lustre-devel, linux-kernel, devel

Remove the wrapper function cfs_wi_sched_unlock() and replace all its
calls with the function it wrapped.

Signed-off-by: Shivani Bhardwaj <shivanib134@gmail.com>
---
 drivers/staging/lustre/lustre/libcfs/workitem.c | 26 ++++++++++---------------
 1 file changed, 10 insertions(+), 16 deletions(-)

diff --git a/drivers/staging/lustre/lustre/libcfs/workitem.c b/drivers/staging/lustre/lustre/libcfs/workitem.c
index e8bac9b..60bb88a 100644
--- a/drivers/staging/lustre/lustre/libcfs/workitem.c
+++ b/drivers/staging/lustre/lustre/libcfs/workitem.c
@@ -86,26 +86,20 @@ static struct cfs_workitem_data {
 	int			wi_stopping;
 } cfs_wi_data;
 
-static inline void
-cfs_wi_sched_unlock(struct cfs_wi_sched *sched)
-{
-	spin_unlock(&sched->ws_lock);
-}
-
 static inline int
 cfs_wi_sched_cansleep(struct cfs_wi_sched *sched)
 {
 	spin_lock(&sched->ws_lock);
 	if (sched->ws_stopping) {
-		cfs_wi_sched_unlock(sched);
+		spin_unlock(&sched->ws_lock);
 		return 0;
 	}
 
 	if (!list_empty(&sched->ws_runq)) {
-		cfs_wi_sched_unlock(sched);
+		spin_unlock(&sched->ws_lock);
 		return 0;
 	}
-	cfs_wi_sched_unlock(sched);
+	spin_unlock(&sched->ws_lock);
 	return 1;
 }
 
@@ -133,7 +127,7 @@ cfs_wi_exit(struct cfs_wi_sched *sched, cfs_workitem_t *wi)
 	LASSERT(list_empty(&wi->wi_list));
 
 	wi->wi_scheduled = 1; /* LBUG future schedule attempts */
-	cfs_wi_sched_unlock(sched);
+	spin_unlock(&sched->ws_lock);
 
 	return;
 }
@@ -171,7 +165,7 @@ cfs_wi_deschedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi)
 
 	LASSERT (list_empty(&wi->wi_list));
 
-	cfs_wi_sched_unlock(sched);
+	spin_unlock(&sched->ws_lock);
 	return rc;
 }
 EXPORT_SYMBOL(cfs_wi_deschedule);
@@ -205,7 +199,7 @@ cfs_wi_schedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi)
 	}
 
 	LASSERT (!list_empty(&wi->wi_list));
-	cfs_wi_sched_unlock(sched);
+	spin_unlock(&sched->ws_lock);
 	return;
 }
 EXPORT_SYMBOL(cfs_wi_schedule);
@@ -252,7 +246,7 @@ cfs_wi_scheduler (void *arg)
 			wi->wi_running   = 1;
 			wi->wi_scheduled = 0;
 
-			cfs_wi_sched_unlock(sched);
+			spin_unlock(&sched->ws_lock);
 			nloops++;
 
 			rc = (*wi->wi_action) (wi);
@@ -272,7 +266,7 @@ cfs_wi_scheduler (void *arg)
 		}
 
 		if (!list_empty(&sched->ws_runq)) {
-			cfs_wi_sched_unlock(sched);
+			spin_unlock(&sched->ws_lock);
 			/* don't sleep because some workitems still
 			 * expect me to come back soon */
 			cond_resched();
@@ -280,13 +274,13 @@ cfs_wi_scheduler (void *arg)
 			continue;
 		}
 
-		cfs_wi_sched_unlock(sched);
+		spin_unlock(&sched->ws_lock);
 		rc = wait_event_interruptible_exclusive(sched->ws_waitq,
 						!cfs_wi_sched_cansleep(sched));
 		spin_lock(&sched->ws_lock);
 	}
 
-	cfs_wi_sched_unlock(sched);
+	spin_unlock(&sched->ws_lock);
 
 	spin_lock(&cfs_wi_data.wi_glock);
 	sched->ws_nthreads--;
-- 
2.1.0


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [lustre-devel] [PATCH 2/2] staging: lustre: workitem: Remove cfs_wi_sched_unlock wrapper
@ 2015-11-08  9:17   ` Shivani Bhardwaj
  0 siblings, 0 replies; 6+ messages in thread
From: Shivani Bhardwaj @ 2015-11-08  9:17 UTC (permalink / raw)
  To: lustre-devel

Remove the wrapper function cfs_wi_sched_unlock() and replace all its
calls with the function it wrapped.

Signed-off-by: Shivani Bhardwaj <shivanib134@gmail.com>
---
 drivers/staging/lustre/lustre/libcfs/workitem.c | 26 ++++++++++---------------
 1 file changed, 10 insertions(+), 16 deletions(-)

diff --git a/drivers/staging/lustre/lustre/libcfs/workitem.c b/drivers/staging/lustre/lustre/libcfs/workitem.c
index e8bac9b..60bb88a 100644
--- a/drivers/staging/lustre/lustre/libcfs/workitem.c
+++ b/drivers/staging/lustre/lustre/libcfs/workitem.c
@@ -86,26 +86,20 @@ static struct cfs_workitem_data {
 	int			wi_stopping;
 } cfs_wi_data;
 
-static inline void
-cfs_wi_sched_unlock(struct cfs_wi_sched *sched)
-{
-	spin_unlock(&sched->ws_lock);
-}
-
 static inline int
 cfs_wi_sched_cansleep(struct cfs_wi_sched *sched)
 {
 	spin_lock(&sched->ws_lock);
 	if (sched->ws_stopping) {
-		cfs_wi_sched_unlock(sched);
+		spin_unlock(&sched->ws_lock);
 		return 0;
 	}
 
 	if (!list_empty(&sched->ws_runq)) {
-		cfs_wi_sched_unlock(sched);
+		spin_unlock(&sched->ws_lock);
 		return 0;
 	}
-	cfs_wi_sched_unlock(sched);
+	spin_unlock(&sched->ws_lock);
 	return 1;
 }
 
@@ -133,7 +127,7 @@ cfs_wi_exit(struct cfs_wi_sched *sched, cfs_workitem_t *wi)
 	LASSERT(list_empty(&wi->wi_list));
 
 	wi->wi_scheduled = 1; /* LBUG future schedule attempts */
-	cfs_wi_sched_unlock(sched);
+	spin_unlock(&sched->ws_lock);
 
 	return;
 }
@@ -171,7 +165,7 @@ cfs_wi_deschedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi)
 
 	LASSERT (list_empty(&wi->wi_list));
 
-	cfs_wi_sched_unlock(sched);
+	spin_unlock(&sched->ws_lock);
 	return rc;
 }
 EXPORT_SYMBOL(cfs_wi_deschedule);
@@ -205,7 +199,7 @@ cfs_wi_schedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi)
 	}
 
 	LASSERT (!list_empty(&wi->wi_list));
-	cfs_wi_sched_unlock(sched);
+	spin_unlock(&sched->ws_lock);
 	return;
 }
 EXPORT_SYMBOL(cfs_wi_schedule);
@@ -252,7 +246,7 @@ cfs_wi_scheduler (void *arg)
 			wi->wi_running   = 1;
 			wi->wi_scheduled = 0;
 
-			cfs_wi_sched_unlock(sched);
+			spin_unlock(&sched->ws_lock);
 			nloops++;
 
 			rc = (*wi->wi_action) (wi);
@@ -272,7 +266,7 @@ cfs_wi_scheduler (void *arg)
 		}
 
 		if (!list_empty(&sched->ws_runq)) {
-			cfs_wi_sched_unlock(sched);
+			spin_unlock(&sched->ws_lock);
 			/* don't sleep because some workitems still
 			 * expect me to come back soon */
 			cond_resched();
@@ -280,13 +274,13 @@ cfs_wi_scheduler (void *arg)
 			continue;
 		}
 
-		cfs_wi_sched_unlock(sched);
+		spin_unlock(&sched->ws_lock);
 		rc = wait_event_interruptible_exclusive(sched->ws_waitq,
 						!cfs_wi_sched_cansleep(sched));
 		spin_lock(&sched->ws_lock);
 	}
 
-	cfs_wi_sched_unlock(sched);
+	spin_unlock(&sched->ws_lock);
 
 	spin_lock(&cfs_wi_data.wi_glock);
 	sched->ws_nthreads--;
-- 
2.1.0

^ permalink raw reply related	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2015-11-08  9:18 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-11-08  9:16 [PATCH 0/2] Remove wrapper functions from workitem Shivani Bhardwaj
2015-11-08  9:16 ` [lustre-devel] " Shivani Bhardwaj
2015-11-08  9:17 ` [PATCH 1/2] staging: lustre: workitem: Remove cfs_wi_sched_lock wrapper Shivani Bhardwaj
2015-11-08  9:17   ` [lustre-devel] " Shivani Bhardwaj
2015-11-08  9:17 ` [PATCH 2/2] staging: lustre: workitem: Remove cfs_wi_sched_unlock wrapper Shivani Bhardwaj
2015-11-08  9:17   ` [lustre-devel] " Shivani Bhardwaj

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.