All of lore.kernel.org
 help / color / mirror / Atom feed
* [Ocfs2-devel] [PATCH] ocfs2: Optimization of code while free dead locks.
@ 2016-11-26 12:15 Guozhonghua
  2016-11-28  6:06 ` Eric Ren
  0 siblings, 1 reply; 3+ messages in thread
From: Guozhonghua @ 2016-11-26 12:15 UTC (permalink / raw)
  To: ocfs2-devel


The three loops can be optimized into one loop and its sub loops, so as small code can do the same work.
The patch is based on the linux-4.9-rc6.

Signed-off-by: Guozhonghua <guozhonghua@h3c.com>


--- ocfs2.orig/dlm/dlmrecovery.c        2016-11-26 19:13:04.833023242 +0800
+++ ocfs2/dlm/dlmrecovery.c     2016-11-26 19:24:03.982552497 +0800
@@ -2268,6 +2268,8 @@ static void dlm_free_dead_locks(struct d
 {
        struct dlm_lock *lock, *next;
        unsigned int freed = 0;
+       struct list_head *queue = NULL;
+       int i;

        /* this node is the lockres master:
         * 1) remove any stale locks for the dead node
@@ -2280,33 +2282,19 @@ static void dlm_free_dead_locks(struct d
         * to force the DLM_UNLOCK_FREE_LOCK action so as to free the locks */

        /* TODO: check pending_asts, pending_basts here */
-       list_for_each_entry_safe(lock, next, &res->granted, list) {
-               if (lock->ml.node == dead_node) {
-                       list_del_init(&lock->list);
-                       dlm_lock_put(lock);
-                       /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */
-                       dlm_lock_put(lock);
-                       freed++;
+       for (i = DLM_BLOCKED_LIST; i >= DLM_GRANTED_LIST; i--) {
+               queue = dlm_list_idx_to_ptr(res, i);
+               list_for_each_entry_safe(lock, next, queue, list) {
+                       if (lock->ml.node == dead_node) {
+                               list_del_init(&lock->list);
+                               dlm_lock_put(lock);
+
+                               /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */
+                               dlm_lock_put(lock);
+                               freed++;
+                       }
                }
-       }
-       list_for_each_entry_safe(lock, next, &res->converting, list) {
-               if (lock->ml.node == dead_node) {
-                       list_del_init(&lock->list);
-                       dlm_lock_put(lock);
-                       /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */
-                       dlm_lock_put(lock);
-                       freed++;
-               }
-       }
-       list_for_each_entry_safe(lock, next, &res->blocked, list) {
-               if (lock->ml.node == dead_node) {
-                       list_del_init(&lock->list);
-                       dlm_lock_put(lock);
-                       /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */
-                       dlm_lock_put(lock);
-                       freed++;
-               }
-       }
+       }

        if (freed) {
                mlog(0, "%s:%.*s: freed %u locks for dead node %u, "

-------------------------------------------------------------------------------------------------------------------------------------
????????????????????????????????????????
????????????????????????????????????????
????????????????????????????????????????
???
This e-mail and its attachments contain confidential information from H3C, which is
intended only for the person or entity whose address is listed above. Any use of the
information contained herein in any way (including, but not limited to, total or partial
disclosure, reproduction, or dissemination) by persons other than the intended
recipient(s) is prohibited. If you receive this e-mail in error, please notify the sender
by phone or email immediately and delete it!

^ permalink raw reply	[flat|nested] 3+ messages in thread

* [Ocfs2-devel] [PATCH] ocfs2: Optimization of code while free dead locks.
  2016-11-26 12:15 [Ocfs2-devel] [PATCH] ocfs2: Optimization of code while free dead locks Guozhonghua
@ 2016-11-28  6:06 ` Eric Ren
  2016-11-28  6:17   ` Joseph Qi
  0 siblings, 1 reply; 3+ messages in thread
From: Eric Ren @ 2016-11-28  6:06 UTC (permalink / raw)
  To: ocfs2-devel

Hi,

On 11/26/2016 08:15 PM, Guozhonghua wrote:
> The three loops can be optimized into one loop and its sub loops, so as small code can do the same work.
> The patch is based on the linux-4.9-rc6.
>
> Signed-off-by: Guozhonghua <guozhonghua@h3c.com>
>
>
> --- ocfs2.orig/dlm/dlmrecovery.c        2016-11-26 19:13:04.833023242 +0800
> +++ ocfs2/dlm/dlmrecovery.c     2016-11-26 19:24:03.982552497 +0800
I don't think this patch could be applied cleanly:
------
zhen at desktop:~/linux> git am ~/patches/temp/\[PATCH\]\ ocfs2\:\ Optimization\ of\ code\ 
while\ free\ dead\ locks..eml
Applying: ocfs2: Optimization of code while free dead locks.
.git/rebase-apply/patch:7: trailing whitespace.
        struct list_head *queue = NULL;
.git/rebase-apply/patch:8: trailing whitespace.
        int i;
fatal: corrupt patch at line 9
Patch failed at 0001 ocfs2: Optimization of code while free dead locks.
------

Please go through the docs below:
[1] https://kernelnewbies.org/FirstKernelPatch

The file path (ocfs2/dlm/dlmrecovery.c ) is weird.  It should be like:

                   diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c

> @@ -2268,6 +2268,8 @@ static void dlm_free_dead_locks(struct d
>   {
>          struct dlm_lock *lock, *next;
>          unsigned int freed = 0;
> +       struct list_head *queue = NULL;
> +       int i;
>
>          /* this node is the lockres master:
>           * 1) remove any stale locks for the dead node
> @@ -2280,33 +2282,19 @@ static void dlm_free_dead_locks(struct d
>           * to force the DLM_UNLOCK_FREE_LOCK action so as to free the locks */
>
>          /* TODO: check pending_asts, pending_basts here */
> -       list_for_each_entry_safe(lock, next, &res->granted, list) {
> -               if (lock->ml.node == dead_node) {
> -                       list_del_init(&lock->list);
> -                       dlm_lock_put(lock);
> -                       /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */
> -                       dlm_lock_put(lock);
> -                       freed++;
> +       for (i = DLM_BLOCKED_LIST; i >= DLM_GRANTED_LIST; i--) {

Is it right to loop the lists in a reversed order to the original?

Eric
> +               queue = dlm_list_idx_to_ptr(res, i);
> +               list_for_each_entry_safe(lock, next, queue, list) {
> +                       if (lock->ml.node == dead_node) {
> +                               list_del_init(&lock->list);
> +                               dlm_lock_put(lock);
> +
> +                               /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */
> +                               dlm_lock_put(lock);
> +                               freed++;
> +                       }
>                  }
> -       }
> -       list_for_each_entry_safe(lock, next, &res->converting, list) {
> -               if (lock->ml.node == dead_node) {
> -                       list_del_init(&lock->list);
> -                       dlm_lock_put(lock);
> -                       /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */
> -                       dlm_lock_put(lock);
> -                       freed++;
> -               }
> -       }
> -       list_for_each_entry_safe(lock, next, &res->blocked, list) {
> -               if (lock->ml.node == dead_node) {
> -                       list_del_init(&lock->list);
> -                       dlm_lock_put(lock);
> -                       /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */
> -                       dlm_lock_put(lock);
> -                       freed++;
> -               }
> -       }
> +       }
>
>          if (freed) {
>                  mlog(0, "%s:%.*s: freed %u locks for dead node %u, "
>
> -------------------------------------------------------------------------------------------------------------------------------------
> ????????????????????????????????????????
> ????????????????????????????????????????
> ????????????????????????????????????????
> ???
> This e-mail and its attachments contain confidential information from H3C, which is
> intended only for the person or entity whose address is listed above. Any use of the
> information contained herein in any way (including, but not limited to, total or partial
> disclosure, reproduction, or dissemination) by persons other than the intended
> recipient(s) is prohibited. If you receive this e-mail in error, please notify the sender
> by phone or email immediately and delete it!

^ permalink raw reply	[flat|nested] 3+ messages in thread

* [Ocfs2-devel] [PATCH] ocfs2: Optimization of code while free dead locks.
  2016-11-28  6:06 ` Eric Ren
@ 2016-11-28  6:17   ` Joseph Qi
  0 siblings, 0 replies; 3+ messages in thread
From: Joseph Qi @ 2016-11-28  6:17 UTC (permalink / raw)
  To: ocfs2-devel



On 16/11/28 14:06, Eric Ren wrote:
> Hi,
>
> On 11/26/2016 08:15 PM, Guozhonghua wrote:
>> The three loops can be optimized into one loop and its sub loops, so as small code can do the same work.
>> The patch is based on the linux-4.9-rc6.
>>
>> Signed-off-by: Guozhonghua <guozhonghua@h3c.com>
>>
>>
>> --- ocfs2.orig/dlm/dlmrecovery.c        2016-11-26 19:13:04.833023242 +0800
>> +++ ocfs2/dlm/dlmrecovery.c     2016-11-26 19:24:03.982552497 +0800
> I don't think this patch could be applied cleanly:
> ------
> zhen at desktop:~/linux> git am ~/patches/temp/\[PATCH\]\ ocfs2\:\ Optimization\ of\ code\
> while\ free\ dead\ locks..eml
> Applying: ocfs2: Optimization of code while free dead locks.
> .git/rebase-apply/patch:7: trailing whitespace.
>          struct list_head *queue = NULL;
> .git/rebase-apply/patch:8: trailing whitespace.
>          int i;
> fatal: corrupt patch at line 9
> Patch failed at 0001 ocfs2: Optimization of code while free dead locks.
> ------
>
> Please go through the docs below:
> [1] https://kernelnewbies.org/FirstKernelPatch
>
> The file path (ocfs2/dlm/dlmrecovery.c ) is weird.  It should be like:
>
>                     diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
>
>> @@ -2268,6 +2268,8 @@ static void dlm_free_dead_locks(struct d
>>    {
>>           struct dlm_lock *lock, *next;
>>           unsigned int freed = 0;
>> +       struct list_head *queue = NULL;
>> +       int i;
>>
>>           /* this node is the lockres master:
>>            * 1) remove any stale locks for the dead node
>> @@ -2280,33 +2282,19 @@ static void dlm_free_dead_locks(struct d
>>            * to force the DLM_UNLOCK_FREE_LOCK action so as to free the locks */
>>
>>           /* TODO: check pending_asts, pending_basts here */
>> -       list_for_each_entry_safe(lock, next, &res->granted, list) {
>> -               if (lock->ml.node == dead_node) {
>> -                       list_del_init(&lock->list);
>> -                       dlm_lock_put(lock);
>> -                       /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */
>> -                       dlm_lock_put(lock);
>> -                       freed++;
>> +       for (i = DLM_BLOCKED_LIST; i >= DLM_GRANTED_LIST; i--) {
> Is it right to loop the lists in a reversed order to the original?
>
> Eric
We have to keep the traverse order from granted to blocked.

Thanks,
Joseph
>> +               queue = dlm_list_idx_to_ptr(res, i);
>> +               list_for_each_entry_safe(lock, next, queue, list) {
>> +                       if (lock->ml.node == dead_node) {
>> +                               list_del_init(&lock->list);
>> +                               dlm_lock_put(lock);
>> +
>> +                               /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */
>> +                               dlm_lock_put(lock);
>> +                               freed++;
>> +                       }
>>                   }
>> -       }
>> -       list_for_each_entry_safe(lock, next, &res->converting, list) {
>> -               if (lock->ml.node == dead_node) {
>> -                       list_del_init(&lock->list);
>> -                       dlm_lock_put(lock);
>> -                       /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */
>> -                       dlm_lock_put(lock);
>> -                       freed++;
>> -               }
>> -       }
>> -       list_for_each_entry_safe(lock, next, &res->blocked, list) {
>> -               if (lock->ml.node == dead_node) {
>> -                       list_del_init(&lock->list);
>> -                       dlm_lock_put(lock);
>> -                       /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */
>> -                       dlm_lock_put(lock);
>> -                       freed++;
>> -               }
>> -       }
>> +       }
>>
>>           if (freed) {
>>                   mlog(0, "%s:%.*s: freed %u locks for dead node %u, "
>>
>> -------------------------------------------------------------------------------------------------------------------------------------
>> ????????????????????????????????????????
>> ????????????????????????????????????????
>> ????????????????????????????????????????
>> ???
>> This e-mail and its attachments contain confidential information from H3C, which is
>> intended only for the person or entity whose address is listed above. Any use of the
>> information contained herein in any way (including, but not limited to, total or partial
>> disclosure, reproduction, or dissemination) by persons other than the intended
>> recipient(s) is prohibited. If you receive this e-mail in error, please notify the sender
>> by phone or email immediately and delete it!
>
>
> _______________________________________________
> Ocfs2-devel mailing list
> Ocfs2-devel at oss.oracle.com
> https://oss.oracle.com/mailman/listinfo/ocfs2-devel

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2016-11-28  6:17 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-11-26 12:15 [Ocfs2-devel] [PATCH] ocfs2: Optimization of code while free dead locks Guozhonghua
2016-11-28  6:06 ` Eric Ren
2016-11-28  6:17   ` Joseph Qi

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.