All of lore.kernel.org
 help / color / mirror / Atom feed
* [Ocfs2-devel] [PATCH] ocfs2: Cache some system inodes of other nodes.
@ 2010-08-12  9:03 Tao Ma
  2010-08-12  9:43 ` Joel Becker
  0 siblings, 1 reply; 8+ messages in thread
From: Tao Ma @ 2010-08-12  9:03 UTC (permalink / raw)
  To: ocfs2-devel

In ocfs2, we now only cache the inodes for global system file
and the system inodes for our own slot. But we have some cases
that we may need to access system inodes of other nodes, such
as orphan scan, inode steal etc.
So consider the following cases, we are slot 0, and we are
replaying orphan_dir:0001.
So the general process is that for every file in this dir:
1. we will get orphan_dir:0001, since there is no inode for it.
   we will have to create an inode and read it from the disk.
2. do the normal work, such as delete_inode and remove it from
   the dir.
3. call iput when we have finish working on orphan_dir:0001. In
   this case, since we have no dcache for this inode, icount will
   reach 0, and VFS will have to call clear_inode and in
   ocfs2_clear_inode we will checkpoint the inode which will let
   ocfs2_cmt and journald begin to work.
4. We loop back to 1 for the next inode.

So you see, actually for every deleted file, we have to read the dir
from the disk and checkpoint the journal. It is very time consuming
and cause a lot of journal checkpoint I/O.
A better solution is that we can another refrence for these inodes
in ocfs2_super. So if there is no other race among nodes(which will
let dlmglue to checkpoint the inode), for step 3, clear_inode won't
be called and for step 1, we may only need to read the inode for the
1st time. This is a big win for us.

Currently, we only cached orphan_dir, extent_alloc and inode_alloc
since these 3 nodes in other slots have a chance to be used in our
own slot.

Signed-off-by: Tao Ma <tao.ma@oracle.com>
---
 fs/ocfs2/ocfs2.h   |    2 +
 fs/ocfs2/super.c   |    2 +
 fs/ocfs2/sysfile.c |   86 ++++++++++++++++++++++++++++++++++++++++++++++++++++
 fs/ocfs2/sysfile.h |    1 +
 4 files changed, 91 insertions(+), 0 deletions(-)

diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index c67003b..ec251ec 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -278,6 +278,8 @@ struct ocfs2_super
 	struct inode *root_inode;
 	struct inode *sys_root_inode;
 	struct inode *system_inodes[NUM_SYSTEM_INODES];
+	/* Some inodes of other nodes we want to store. */
+	struct inode **other_system_inodes;
 
 	struct ocfs2_slot_info *slot_info;
 
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 0eaa929..697af86 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -535,6 +535,8 @@ static void ocfs2_release_system_inodes(struct ocfs2_super *osb)
 		osb->root_inode = NULL;
 	}
 
+	ocfs2_release_other_system_inodes(osb);
+
 	mlog_exit(0);
 }
 
diff --git a/fs/ocfs2/sysfile.c b/fs/ocfs2/sysfile.c
index bfe7190..4f5c0e9 100644
--- a/fs/ocfs2/sysfile.c
+++ b/fs/ocfs2/sysfile.c
@@ -66,6 +66,90 @@ static inline int is_in_system_inode_array(struct ocfs2_super *osb,
 	return slot == osb->slot_num || is_global_system_inode(type);
 }
 
+/*
+ * System inodes of other nodes we want to store in osb->other_system_inodes.
+ * Current we only support oprhan dir, extent alloc and inode alloc.
+ */
+enum {
+	ORPHAN_DIR,
+	EXTENT_ALLOC,
+	INODE_ALLOC,
+	NUM_OTHER_SYSTEM_INODES,
+};
+
+static inline int is_in_other_system_inode_array(struct ocfs2_super *osb,
+						 int type,
+						 u32 slot)
+{
+	if (slot == osb->slot_num)
+		return 0;
+
+	return type == ORPHAN_DIR_SYSTEM_INODE ||
+	       type == EXTENT_ALLOC_SYSTEM_INODE ||
+	       type == INODE_ALLOC_SYSTEM_INODE;
+}
+
+/*
+ * Currently we only support orphan dir, extent alloc and inode alloc.
+ * They are contiguous, so the mapping of (type, slot) to index is simple.
+ * We can change it later to some other mapping later if we want to
+ * store other system inodes.
+ */
+static inline int get_system_inode_index(struct ocfs2_super *osb,
+					 int type,
+					 u32 slot)
+{
+	int index = type - ORPHAN_DIR_SYSTEM_INODE;
+
+	if (slot < osb->slot_num)
+		return slot * NUM_OTHER_SYSTEM_INODES + index;
+	else
+		return (slot - 1) * NUM_OTHER_SYSTEM_INODES + index;
+}
+
+static struct inode **get_other_system_inode(struct ocfs2_super *osb,
+					     int type,
+					     u32 slot)
+{
+	BUG_ON(slot == osb->slot_num);
+
+	if (unlikely(!osb->other_system_inodes)) {
+		osb->other_system_inodes = kzalloc(sizeof(struct inode *) *
+						   NUM_OTHER_SYSTEM_INODES *
+						   (osb->max_slots - 1),
+						   GFP_NOFS);
+		if (!osb->other_system_inodes) {
+			mlog_errno(-ENOMEM);
+			/*
+			 * return NULL here so that ocfs2_get_sytem_file_inodes
+			 * will try to create an inode and use it. We will try
+			 * to initialize other_system_inodes next time.
+			 */
+			return NULL;
+		}
+	}
+
+	return &osb->other_system_inodes[get_system_inode_index(osb,
+								type, slot)];
+}
+
+void ocfs2_release_other_system_inodes(struct ocfs2_super *osb)
+{
+	int i;
+
+	if (!osb->other_system_inodes)
+		return;
+
+	for (i = 0; i < NUM_OTHER_SYSTEM_INODES * (osb->max_slots - 1); i++) {
+		if (osb->other_system_inodes[i]) {
+			iput(osb->other_system_inodes[i]);
+			osb->other_system_inodes[i] = NULL;
+		}
+	}
+
+	kfree(osb->other_system_inodes);
+}
+
 struct inode *ocfs2_get_system_file_inode(struct ocfs2_super *osb,
 					  int type,
 					  u32 slot)
@@ -76,6 +160,8 @@ struct inode *ocfs2_get_system_file_inode(struct ocfs2_super *osb,
 	/* avoid the lookup if cached in local system file array */
 	if (is_in_system_inode_array(osb, type, slot))
 		arr = &(osb->system_inodes[type]);
+	else if (is_in_other_system_inode_array(osb, type, slot))
+		arr = get_other_system_inode(osb, type, slot);
 
 	if (arr && ((inode = *arr) != NULL)) {
 		/* get a ref in addition to the array ref */
diff --git a/fs/ocfs2/sysfile.h b/fs/ocfs2/sysfile.h
index cc9ea66..26cc4bc 100644
--- a/fs/ocfs2/sysfile.h
+++ b/fs/ocfs2/sysfile.h
@@ -29,5 +29,6 @@
 struct inode * ocfs2_get_system_file_inode(struct ocfs2_super *osb,
 					   int type,
 					   u32 slot);
+void ocfs2_release_other_system_inodes(struct ocfs2_super *osb);
 
 #endif /* OCFS2_SYSFILE_H */
-- 
1.7.1.GIT

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [Ocfs2-devel] [PATCH] ocfs2: Cache some system inodes of other nodes.
  2010-08-12  9:03 [Ocfs2-devel] [PATCH] ocfs2: Cache some system inodes of other nodes Tao Ma
@ 2010-08-12  9:43 ` Joel Becker
  2010-08-13  0:49   ` Tao Ma
  2010-08-13  1:03   ` Sunil Mushran
  0 siblings, 2 replies; 8+ messages in thread
From: Joel Becker @ 2010-08-12  9:43 UTC (permalink / raw)
  To: ocfs2-devel

On Thu, Aug 12, 2010 at 05:03:16PM +0800, Tao Ma wrote:
> In ocfs2, we now only cache the inodes for global system file
> and the system inodes for our own slot. But we have some cases
> that we may need to access system inodes of other nodes, such
> as orphan scan, inode steal etc.

	I don't see why you don't extend the existing cache and make one
cache.  Make it live the lifetime of the filesystem.  No real reason to
a) have to caches or b) limit the system inodes we might cache.  If we
don't have the lock we're going to re-read them anyway.

Joel

-- 

	Pitchers and catchers report.

Joel Becker
Consulting Software Developer
Oracle
E-mail: joel.becker at oracle.com
Phone: (650) 506-8127

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [Ocfs2-devel] [PATCH] ocfs2: Cache some system inodes of other nodes.
  2010-08-12  9:43 ` Joel Becker
@ 2010-08-13  0:49   ` Tao Ma
  2010-08-13  1:04     ` Joel Becker
  2010-08-13  1:03   ` Sunil Mushran
  1 sibling, 1 reply; 8+ messages in thread
From: Tao Ma @ 2010-08-13  0:49 UTC (permalink / raw)
  To: ocfs2-devel

Joel Becker wrote:
> On Thu, Aug 12, 2010 at 05:03:16PM +0800, Tao Ma wrote:
>   
>> In ocfs2, we now only cache the inodes for global system file
>> and the system inodes for our own slot. But we have some cases
>> that we may need to access system inodes of other nodes, such
>> as orphan scan, inode steal etc.
>>     
>
> 	I don't see why you don't extend the existing cache and make one
> cache.  Make it live the lifetime of the filesystem.  No real reason to
> a) have to caches or b) limit the system inodes we might cache.  If we
> don't have the lock we're going to re-read them anyway.
>   
You want me to do:
-        struct inode *system_inodes[NUM_SYSTEM_INODES];
+        struct inode **system_inodes

and do
+    system_inodes = kzalloc((NUM_SYSTEM_INODES - 
GROUP_QUOTA_SYSTEM_INODE) *
+                                            sizeof(struct inode *) * 
osb->max_slots);

So we will save other system inodes such as local_alloc, truncate_log, 
local_user_quota and local_group_quota and
actually we will never touch these inodes in the most cases(well, 
recovery is an exception). So why cache them
if in the most case they will not be used?
In http://oss.oracle.com/pipermail/ocfs2-devel/2010-June/006562.html, 
Goldwyn try to reduce our size by just
moving the postion of some fields, so I think we should save these 
memory for the kernel. :)

Regards,
Tao

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [Ocfs2-devel] [PATCH] ocfs2: Cache some system inodes of other nodes.
  2010-08-12  9:43 ` Joel Becker
  2010-08-13  0:49   ` Tao Ma
@ 2010-08-13  1:03   ` Sunil Mushran
  2010-08-13  1:21     ` Tao Ma
  1 sibling, 1 reply; 8+ messages in thread
From: Sunil Mushran @ 2010-08-13  1:03 UTC (permalink / raw)
  To: ocfs2-devel

On 08/12/2010 02:43 AM, Joel Becker wrote:
> On Thu, Aug 12, 2010 at 05:03:16PM +0800, Tao Ma wrote:
>> In ocfs2, we now only cache the inodes for global system file
>> and the system inodes for our own slot. But we have some cases
>> that we may need to access system inodes of other nodes, such
>> as orphan scan, inode steal etc.
>
> 	I don't see why you don't extend the existing cache and make one
> cache.  Make it live the lifetime of the filesystem.  No real reason to
> a) have to caches or b) limit the system inodes we might cache.  If we
> don't have the lock we're going to re-read them anyway.

Yeah... I have to agree. Will make it more readable.

enum {
         BAD_BLOCK_SYSTEM_INODE = 0,
         GLOBAL_INODE_ALLOC_SYSTEM_INODE,
         SLOT_MAP_SYSTEM_INODE,
#define OCFS2_FIRST_ONLINE_SYSTEM_INODE SLOT_MAP_SYSTEM_INODE
         HEARTBEAT_SYSTEM_INODE,
         GLOBAL_BITMAP_SYSTEM_INODE,
         USER_QUOTA_SYSTEM_INODE,
         GROUP_QUOTA_SYSTEM_INODE,
#define OCFS2_LAST_GLOBAL_SYSTEM_INODE GROUP_QUOTA_SYSTEM_INODE
#define OCFS2_FIRST_LOCAL_SYSTEM_INODE ORPHAN_DIR_SYSTEM_INODE
         ORPHAN_DIR_SYSTEM_INODE,
         EXTENT_ALLOC_SYSTEM_INODE,
         INODE_ALLOC_SYSTEM_INODE,
         JOURNAL_SYSTEM_INODE,
         LOCAL_ALLOC_SYSTEM_INODE,
         TRUNCATE_LOG_SYSTEM_INODE,
         LOCAL_USER_QUOTA_SYSTEM_INODE,
         LOCAL_GROUP_QUOTA_SYSTEM_INODE,
         NUM_SYSTEM_INODES
};
#define NUM_LOCAL_SYSTEM_INODES (NUM_SYSTEM_INODES - 
OCFS2_FIRST_LOCAL_SYSTEM_INODE)

struct ocfs2_super {
...
         struct inode *global_system_inodes[OCFS2_LAST_GLOBAL_SYSTEM_INODE];
         struct inode **local_system_inodes;
...
};

local_system_inodes = kzalloc(sizeof(struct inode *) * osb->num_slots *
                             NUM_LOCAL_SYSTEM_INODES;

         /* avoid the lookup if cached in local system file array */
         if (is_in_system_inode_array(osb, type, slot))
                 arr = &(osb->system_inodes[type]);

change to:

         if (is_global_system_inode(type)
                 arr = &(osb->global_system_inodes[type]);
         else {
                 BUG_ON(slot == OCFS2_INVALID_SLOT);
                 BUG_ON(type < OCFS2_FIRST_LOCAL_SYSTEM_INODE ||
                         type > LOCAL_GROUP_QUOTA_SYSTEM_INODE);
                 tmp = (slot * NUM_LOCAL_SYSTEM_INODES) +
                       (type - OCFS2_FIRST_LOCAL_SYSTEM_INODE);
                 arr = &(osb->local_system_inodes[tmp];
         }

How does this look?

The only downside is that the kzmalloc for more than 64 slots will
require more than a page on 64-bit systems. But that should be ok.

Sunil

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [Ocfs2-devel] [PATCH] ocfs2: Cache some system inodes of other nodes.
  2010-08-13  0:49   ` Tao Ma
@ 2010-08-13  1:04     ` Joel Becker
  2010-08-13  1:20       ` Tao Ma
  0 siblings, 1 reply; 8+ messages in thread
From: Joel Becker @ 2010-08-13  1:04 UTC (permalink / raw)
  To: ocfs2-devel

On Fri, Aug 13, 2010 at 08:49:16AM +0800, Tao Ma wrote:
> >	I don't see why you don't extend the existing cache and make one
> >cache.  Make it live the lifetime of the filesystem.  No real reason to
> >a) have to caches or b) limit the system inodes we might cache.  If we
> >don't have the lock we're going to re-read them anyway.
> You want me to do:
> -        struct inode *system_inodes[NUM_SYSTEM_INODES];
> +        struct inode **system_inodes
> 
> and do
> +    system_inodes = kzalloc((NUM_SYSTEM_INODES -
> GROUP_QUOTA_SYSTEM_INODE) *
> +                                            sizeof(struct inode *)
> * osb->max_slots);

	Something like that.  I'd be more inclined to have a global
inode cache, and a per-slot cache.  No need to have max_slots spaces for
the global inodes.
	Actually, why not an rb-tree?  We just want to be able to avoid
the dir lookup, really, right?  Why pre-alloc anything?  Just have a
node:

	struct ocfs2_system_inode_cache_node {
		struct rb_node sic_node;
		int sic_type;
		int sic_slot;
		u64 sic_blkno;
		struct inode *sic_inode;
	};

Although frankly a linked-list might work just as well.
	Essentially, anything that doesn't have the lock is going to
have to re-read the block, so what we really need cached is the mapping
from sic_type+sic_slot to iget().  Caching the inode itself is just
convenience.

> So we will save other system inodes such as local_alloc,
> truncate_log, local_user_quota and local_group_quota and
> actually we will never touch these inodes in the most cases(well,
> recovery is an exception). So why cache them
> if in the most case they will not be used?

	If we never touch them, we won't worry.  We've just used up a
pointer.  If we do use them, eg because we've recovered them, it doesn't
hurt to have them still in cache.  If you were really worried, you could
even hook into icache shrinking and drop them when kicked.  Keep the
tree nodes mapping sic_type+sic_slot->sic_blkno but drop sic_inode.
Maybe skip the ones where sic_slot==(this_slot || -1).

> In
> http://oss.oracle.com/pipermail/ocfs2-devel/2010-June/006562.html,
> Goldwyn try to reduce our size by just
> moving the postion of some fields, so I think we should save these
> memory for the kernel. :)

	Goldwyn's work is important because we have hundreds of
thousands of each thing.  We have very few system inodes.

Joel

-- 

"Too much walking shoes worn thin.
 Too much trippin' and my soul's worn thin.
 Time to catch a ride it leaves today
 Her name is what it means.
 Too much walking shoes worn thin."

Joel Becker
Consulting Software Developer
Oracle
E-mail: joel.becker at oracle.com
Phone: (650) 506-8127

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [Ocfs2-devel] [PATCH] ocfs2: Cache some system inodes of other nodes.
  2010-08-13  1:04     ` Joel Becker
@ 2010-08-13  1:20       ` Tao Ma
  0 siblings, 0 replies; 8+ messages in thread
From: Tao Ma @ 2010-08-13  1:20 UTC (permalink / raw)
  To: ocfs2-devel

Joel Becker wrote:
> On Fri, Aug 13, 2010 at 08:49:16AM +0800, Tao Ma wrote:
>   
>>> 	I don't see why you don't extend the existing cache and make one
>>> cache.  Make it live the lifetime of the filesystem.  No real reason to
>>> a) have to caches or b) limit the system inodes we might cache.  If we
>>> don't have the lock we're going to re-read them anyway.
>>>       
>> You want me to do:
>> -        struct inode *system_inodes[NUM_SYSTEM_INODES];
>> +        struct inode **system_inodes
>>
>> and do
>> +    system_inodes = kzalloc((NUM_SYSTEM_INODES -
>> GROUP_QUOTA_SYSTEM_INODE) *
>> +                                            sizeof(struct inode *)
>> * osb->max_slots);
>>     
>
> 	Something like that.  I'd be more inclined to have a global
> inode cache, and a per-slot cache.  No need to have max_slots spaces for
> the global inodes.
> 	Actually, why not an rb-tree?  We just want to be able to avoid
> the dir lookup, really, right?  Why pre-alloc anything?  Just have a
> node:
>   
no, this patch doesn't want to reduce dir lookup, although it resolve 
dir lookup somehow. ;) This patch just want to resolve
the problem of (read_disk + journal_checkpoint when we orphan scan 
another slot). It seems that I have put the most
important part of this patch in the head of  the commit log. :)
> 	struct ocfs2_system_inode_cache_node {
> 		struct rb_node sic_node;
> 		int sic_type;
> 		int sic_slot;
> 		u64 sic_blkno;
> 		struct inode *sic_inode;
> 	};
>
> Although frankly a linked-list might work just as well.
>   
rb-tree may be too much for us. I would prefer what Sunil describe in 
another mail. Just use a pointer array should be fine for us.
what we lose is just some pointers that will never be initialized.

Regards,
Tao

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [Ocfs2-devel] [PATCH] ocfs2: Cache some system inodes of other nodes.
  2010-08-13  1:03   ` Sunil Mushran
@ 2010-08-13  1:21     ` Tao Ma
  2010-08-13  1:28       ` Joel Becker
  0 siblings, 1 reply; 8+ messages in thread
From: Tao Ma @ 2010-08-13  1:21 UTC (permalink / raw)
  To: ocfs2-devel

Sunil Mushran wrote:
> On 08/12/2010 02:43 AM, Joel Becker wrote:
>> On Thu, Aug 12, 2010 at 05:03:16PM +0800, Tao Ma wrote:
>>> In ocfs2, we now only cache the inodes for global system file
>>> and the system inodes for our own slot. But we have some cases
>>> that we may need to access system inodes of other nodes, such
>>> as orphan scan, inode steal etc.
>>
>>     I don't see why you don't extend the existing cache and make one
>> cache.  Make it live the lifetime of the filesystem.  No real reason to
>> a) have to caches or b) limit the system inodes we might cache.  If we
>> don't have the lock we're going to re-read them anyway.
>
> Yeah... I have to agree. Will make it more readable.
>
> enum {
>         BAD_BLOCK_SYSTEM_INODE = 0,
>         GLOBAL_INODE_ALLOC_SYSTEM_INODE,
>         SLOT_MAP_SYSTEM_INODE,
> #define OCFS2_FIRST_ONLINE_SYSTEM_INODE SLOT_MAP_SYSTEM_INODE
>         HEARTBEAT_SYSTEM_INODE,
>         GLOBAL_BITMAP_SYSTEM_INODE,
>         USER_QUOTA_SYSTEM_INODE,
>         GROUP_QUOTA_SYSTEM_INODE,
> #define OCFS2_LAST_GLOBAL_SYSTEM_INODE GROUP_QUOTA_SYSTEM_INODE
> #define OCFS2_FIRST_LOCAL_SYSTEM_INODE ORPHAN_DIR_SYSTEM_INODE
>         ORPHAN_DIR_SYSTEM_INODE,
>         EXTENT_ALLOC_SYSTEM_INODE,
>         INODE_ALLOC_SYSTEM_INODE,
>         JOURNAL_SYSTEM_INODE,
>         LOCAL_ALLOC_SYSTEM_INODE,
>         TRUNCATE_LOG_SYSTEM_INODE,
>         LOCAL_USER_QUOTA_SYSTEM_INODE,
>         LOCAL_GROUP_QUOTA_SYSTEM_INODE,
>         NUM_SYSTEM_INODES
> };
> #define NUM_LOCAL_SYSTEM_INODES (NUM_SYSTEM_INODES - 
> OCFS2_FIRST_LOCAL_SYSTEM_INODE)
>
> struct ocfs2_super {
> ...
>         struct inode 
> *global_system_inodes[OCFS2_LAST_GLOBAL_SYSTEM_INODE];
>         struct inode **local_system_inodes;
> ...
> };
>
> local_system_inodes = kzalloc(sizeof(struct inode *) * osb->num_slots *
>                             NUM_LOCAL_SYSTEM_INODES;
>
>         /* avoid the lookup if cached in local system file array */
>         if (is_in_system_inode_array(osb, type, slot))
>                 arr = &(osb->system_inodes[type]);
>
> change to:
>
>         if (is_global_system_inode(type)
>                 arr = &(osb->global_system_inodes[type]);
>         else {
>                 BUG_ON(slot == OCFS2_INVALID_SLOT);
>                 BUG_ON(type < OCFS2_FIRST_LOCAL_SYSTEM_INODE ||
>                         type > LOCAL_GROUP_QUOTA_SYSTEM_INODE);
>                 tmp = (slot * NUM_LOCAL_SYSTEM_INODES) +
>                       (type - OCFS2_FIRST_LOCAL_SYSTEM_INODE);
>                 arr = &(osb->local_system_inodes[tmp];
>         }
>
> How does this look?
oh, since both of you prefer a simple system_inode array, I will rework 
it. Thanks.

Regards,
Tao

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [Ocfs2-devel] [PATCH] ocfs2: Cache some system inodes of other nodes.
  2010-08-13  1:21     ` Tao Ma
@ 2010-08-13  1:28       ` Joel Becker
  0 siblings, 0 replies; 8+ messages in thread
From: Joel Becker @ 2010-08-13  1:28 UTC (permalink / raw)
  To: ocfs2-devel

On Fri, Aug 13, 2010 at 09:21:26AM +0800, Tao Ma wrote:
> Sunil Mushran wrote:
> >        if (is_global_system_inode(type)
> >                arr = &(osb->global_system_inodes[type]);
> >        else {
> >                BUG_ON(slot == OCFS2_INVALID_SLOT);
> >                BUG_ON(type < OCFS2_FIRST_LOCAL_SYSTEM_INODE ||
> >                        type > LOCAL_GROUP_QUOTA_SYSTEM_INODE);
> >                tmp = (slot * NUM_LOCAL_SYSTEM_INODES) +
> >                      (type - OCFS2_FIRST_LOCAL_SYSTEM_INODE);
> >                arr = &(osb->local_system_inodes[tmp];
> >        }
> >
> >How does this look?
> oh, since both of you prefer a simple system_inode array, I will
> rework it. Thanks.

	In Sunil's proposal, you can easily keep the "which remote
inodes to keep" question very local.

		if ((slot == my_slot) ||
		    (type == TYPE1) ||
		    (type == TYPE2) ...) {
			local_system_inodes[x] = inode;

Joel


-- 

"Conservative, n.  A statesman who is enamoured of existing evils,
 as distinguished from the Liberal, who wishes to replace them
 with others."
	- Ambrose Bierce, The Devil's Dictionary

Joel Becker
Consulting Software Developer
Oracle
E-mail: joel.becker at oracle.com
Phone: (650) 506-8127

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2010-08-13  1:28 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2010-08-12  9:03 [Ocfs2-devel] [PATCH] ocfs2: Cache some system inodes of other nodes Tao Ma
2010-08-12  9:43 ` Joel Becker
2010-08-13  0:49   ` Tao Ma
2010-08-13  1:04     ` Joel Becker
2010-08-13  1:20       ` Tao Ma
2010-08-13  1:03   ` Sunil Mushran
2010-08-13  1:21     ` Tao Ma
2010-08-13  1:28       ` Joel Becker

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.