All of lore.kernel.org
 help / color / mirror / Atom feed
* [RFC] separate otw data from host data
@ 2009-11-06  2:03 Noah Watkins
  2009-11-06  5:33 ` Sage Weil
  0 siblings, 1 reply; 12+ messages in thread
From: Noah Watkins @ 2009-11-06  2:03 UTC (permalink / raw)
  To: ceph-devel; +Cc: Noah Watkins

[-- Attachment #1: Type: text/plain, Size: 689 bytes --]

Sage

A large number of le32_to_cpu (and the like) are used throughout the  
kernel client when referencing packed structs that are used OTW.  
However, I'd argue conversion should be done in only two places:  
before being sent or received. This allows non-packed versions of  
structs to be used only on the local machine, and use optimized  
alignment determined by gcc. Additionally it de-clutters the code of  
all the conversion routines. The patch I attached (not ready for use)  
demonstrates this for struct ceph_file_layout by introducing struct  
__ceph_file_layout for OTW, and does conversion only before sending  
or receiving over the network.

Comments, suggestions?

-n



[-- Attachment #2: use-non-packed-fl.diff --]
[-- Type: application/octet-stream, Size: 11268 bytes --]

diff --git a/caps.c b/caps.c
index 8b863db..1c2392b 100644
--- a/caps.c
+++ b/caps.c
@@ -2272,7 +2272,7 @@ restart:
 	cap->seq = seq;
 
 	/* file layout may have changed */
-	ci->i_layout = grant->layout;
+	ceph_layout_otw_to_cpu(&ci->i_layout, &grant->layout);
 
 	/* revocation, grant, or no-op? */
 	if (cap->issued & ~newcaps) {
diff --git a/ceph_fs.c b/ceph_fs.c
index a950b40..1ee72a1 100644
--- a/ceph_fs.c
+++ b/ceph_fs.c
@@ -3,6 +3,8 @@
  */
 #include "types.h"
 
+#if 0
+UNUSED: retain for reference
 /*
  * return true if @layout appears to be valid
  */
@@ -25,7 +27,7 @@ int ceph_file_layout_is_valid(const struct ceph_file_layout *layout)
 		return 0;
 	return 1;
 }
-
+#endif
 
 int ceph_flags_to_mode(int flags)
 {
diff --git a/ceph_fs.h b/ceph_fs.h
index ae52382..2ecaf6a 100644
--- a/ceph_fs.h
+++ b/ceph_fs.h
@@ -51,31 +51,66 @@
 
 unsigned int ceph_full_name_hash(const char *name, unsigned int len);
 
-
 /*
- * ceph_file_layout - describe data layout for a file/inode
+ * struct __ceph_file_layout - file/object mapping parameters (OTW)
+ *
+ * @fl_stripe_unit:  stripe unit, in bytes. page size multiple.
+ * @fl_stripe_count: over this many objects
+ * @fl_object_size:  until objects are this big, then move to next
+                     set of objects
+ * @fl_cas_hash:     0 = none; 1 = sha256
+ * @fl_object_su:    for per-object parity, if any
+ * @fl_pg_preferred: preferred primary for pg (-1 for none)
+ * @fl_pg_pool:      namespace, crush ruleset, rep level
+ *
+ * Member sames are prefixed with underscore to emphasize their difference from
+ * struct ceph_file_layout, the non-packed, internal-use version.
  */
-struct ceph_file_layout {
+struct __ceph_file_layout {
 	/* file -> object mapping */
-	__le32 fl_stripe_unit;     /* stripe unit, in bytes.  must be multiple
-				      of page size. */
-	__le32 fl_stripe_count;    /* over this many objects */
-	__le32 fl_object_size;     /* until objects are this big, then move to
-				      new objects */
-	__le32 fl_cas_hash;        /* 0 = none; 1 = sha256 */
-
+	__le32 _fl_stripe_unit;
+	__le32 _fl_stripe_count;
+	__le32 _fl_object_size;
+	__le32 _fl_cas_hash;
 	/* pg -> disk layout */
-	__le32 fl_object_stripe_unit;  /* for per-object parity, if any */
-
+	__le32 _fl_object_stripe_unit;
 	/* object -> pg layout */
-	__le32 fl_pg_preferred; /* preferred primary for pg (-1 for none) */
-	__le32 fl_pg_pool;      /* namespace, crush ruleset, rep level */
+	__le32 _fl_pg_preferred;
+	__le32 _fl_pg_pool;
 } __attribute__ ((packed));
 
-#define CEPH_MIN_STRIPE_UNIT 65536
+/*
+ * struct ceph_file_layout - file/object mapping parameters (internal)
+ *
+ * Stores information from struct __ceph_file_layout converted to native data
+ * types, and lets gcc do optimized packing/alignment.
+ *
+ * See struct __ceph_file_layout for description of members common to both
+ * structs.
+ */
+struct ceph_file_layout {
+	u32 fl_stripe_unit;
+	u32 fl_stripe_count;
+	u32 fl_object_size;
+	u32 fl_cas_hash;
+	u32 fl_object_stripe_unit;
+	s32 fl_pg_preferred;
+	u32 fl_pg_pool;
+};
 
-int ceph_file_layout_is_valid(const struct ceph_file_layout *layout);
+static inline void ceph_layout_otw_to_cpu(struct ceph_file_layout *n,
+		struct __ceph_file_layout *o)
+{
+	n->fl_stripe_unit	  = le32_to_cpu(o->_fl_stripe_unit);
+	n->fl_stripe_count	  = le32_to_cpu(o->_fl_stripe_count);
+	n->fl_object_size	  = le32_to_cpu(o->_fl_object_size);
+	n->fl_cas_hash		  = le32_to_cpu(o->_fl_cas_hash);
+	n->fl_object_stripe_unit  = le32_to_cpu(o->_fl_object_stripe_unit);
+	n->fl_pg_preferred 	  = (s32)le32_to_cpu(o->_fl_pg_preferred);
+	n->fl_pg_pool 		  = le32_to_cpu(o->_fl_pg_pool);
+}
 
+#define CEPH_MIN_STRIPE_UNIT 65536
 
 /*********************************************
  * message layer
@@ -316,7 +351,7 @@ union ceph_mds_request_args {
 		__le32 flags;
 	} __attribute__ ((packed)) setxattr;
 	struct {
-		struct ceph_file_layout layout;
+		struct __ceph_file_layout layout;
 	} __attribute__ ((packed)) setlayout;
 } __attribute__ ((packed));
 
@@ -386,7 +421,7 @@ struct ceph_mds_reply_inode {
 	__le64 version;                /* inode version */
 	__le64 xattr_version;          /* version for xattr blob */
 	struct ceph_mds_reply_cap cap; /* caps issued for this inode */
-	struct ceph_file_layout layout;
+	struct __ceph_file_layout layout;
 	struct ceph_timespec ctime, mtime, atime;
 	__le32 time_warp_seq;
 	__le64 size, max_size, truncate_size;
@@ -549,7 +584,7 @@ struct ceph_mds_caps {
 	__le64 size, max_size, truncate_size;
 	__le32 truncate_seq;
 	struct ceph_timespec mtime, atime, ctime;
-	struct ceph_file_layout layout;
+	struct __ceph_file_layout layout;
 	__le32 time_warp_seq;
 } __attribute__ ((packed));
 
diff --git a/inode.c b/inode.c
index 036873c..de81da0 100644
--- a/inode.c
+++ b/inode.c
@@ -574,8 +574,11 @@ static int fill_inode(struct inode *inode,
 			    &ctime, &mtime, &atime);
 
 	ci->i_max_size = le64_to_cpu(info->max_size);
-	ci->i_layout = info->layout;
-	inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
+
+	/* Convert OTW layout struct to internal representation */
+	ceph_layout_otw_to_cpu(&ci->i_layout, &info->layout);
+
+	inode->i_blkbits = fls(ci->i_layout.fl_stripe_unit) - 1;
 
 	/* xattrs */
 	/* note that if i_xattrs.len <= 4, i_xattrs.data will still be NULL. */
diff --git a/ioctl.c b/ioctl.c
index 4c33e19..d4848b4 100644
--- a/ioctl.c
+++ b/ioctl.c
@@ -19,16 +19,18 @@ static long ceph_ioctl_get_layout(struct file *file, void __user *arg)
 	int err;
 
 	err = ceph_do_getattr(file->f_dentry->d_inode, CEPH_STAT_CAP_LAYOUT);
-	if (!err) {
-		l.stripe_unit = ceph_file_layout_su(ci->i_layout);
-		l.stripe_count = ceph_file_layout_stripe_count(ci->i_layout);
-		l.object_size = ceph_file_layout_object_size(ci->i_layout);
-		l.data_pool = le32_to_cpu(ci->i_layout.fl_pg_pool);
-		if (copy_to_user(arg, &l, sizeof(l)))
-			return -EFAULT;
-	}
+	if (err)
+		return err;
 
-	return err;
+	l.stripe_unit = ci->i_layout.fl_stripe_unit;
+	l.stripe_count = ci->i_layout.fl_stripe_count;
+	l.object_size = ci->i_layout.fl_object_size;
+	l.data_pool = ci->i_layout.fl_pg_pool;
+
+	if (copy_to_user(arg, &l, sizeof(l)))
+		return -EFAULT;
+
+	return 0;
 }
 
 static long ceph_ioctl_set_layout(struct file *file, void __user *arg)
@@ -38,6 +40,7 @@ static long ceph_ioctl_set_layout(struct file *file, void __user *arg)
 	struct ceph_mds_client *mdsc = &ceph_sb_to_client(inode->i_sb)->mdsc;
 	struct ceph_mds_request *req;
 	struct ceph_ioctl_layout l;
+	struct __ceph_file_layout *layout;
 	int err, i;
 
 	/* copy and validate */
@@ -72,14 +75,12 @@ static long ceph_ioctl_set_layout(struct file *file, void __user *arg)
 	req->r_inode = igrab(inode);
 	req->r_inode_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_EXCL;
 
-	req->r_args.setlayout.layout.fl_stripe_unit =
-		cpu_to_le32(l.stripe_unit);
-	req->r_args.setlayout.layout.fl_stripe_count =
-		cpu_to_le32(l.stripe_count);
-	req->r_args.setlayout.layout.fl_object_size =
-		cpu_to_le32(l.object_size);
-	req->r_args.setlayout.layout.fl_pg_pool = cpu_to_le32(l.data_pool);
-	req->r_args.setlayout.layout.fl_pg_preferred = cpu_to_le32((s32)-1);
+	layout = &req->r_args.setlayout.layout;
+	layout->_fl_stripe_unit = cpu_to_le32(l.stripe_unit);
+	layout->_fl_stripe_count = cpu_to_le32(l.stripe_count);
+	layout->_fl_object_size = cpu_to_le32(l.object_size);
+	layout->_fl_pg_pool = cpu_to_le32(l.data_pool);
+	layout->_fl_pg_preferred = cpu_to_le32((s32)-1);
 
 	err = ceph_mdsc_do_request(mdsc, parent_inode, req);
 	ceph_mdsc_put_request(req);
@@ -109,8 +110,8 @@ static long ceph_ioctl_get_dataloc(struct file *file, void __user *arg)
 	ceph_calc_file_object_mapping(&ci->i_layout, dl.file_offset, &len,
 				      &dl.object_no, &dl.object_offset, &olen);
 	dl.file_offset -= dl.object_offset;
-	dl.object_size = ceph_file_layout_object_size(ci->i_layout);
-	dl.block_size = ceph_file_layout_su(ci->i_layout);
+	dl.object_size = ci->i_layout.fl_object_size;
+	dl.block_size = ci->i_layout.fl_stripe_unit;
 
 	/* block_offset = object_offset % block_size */
 	tmp = dl.object_offset;
diff --git a/osdmap.c b/osdmap.c
index 8b0cd11..226131d 100644
--- a/osdmap.c
+++ b/osdmap.c
@@ -746,9 +746,9 @@ void ceph_calc_file_object_mapping(struct ceph_file_layout *layout,
 				   u64 *ono,
 				   u64 *oxoff, u64 *oxlen)
 {
-	u32 osize = le32_to_cpu(layout->fl_object_size);
-	u32 su = le32_to_cpu(layout->fl_stripe_unit);
-	u32 sc = le32_to_cpu(layout->fl_stripe_count);
+	u32 osize = layout->fl_object_size;
+	u32 su = layout->fl_stripe_unit;
+	u32 sc = layout->fl_stripe_count;
 	u32 bl, stripeno, stripepos, objsetno;
 	u32 su_per_object;
 	u64 t, su_offset;
@@ -800,8 +800,8 @@ int ceph_calc_object_layout(struct ceph_object_layout *ol,
 {
 	unsigned num, num_mask;
 	struct ceph_pg pgid;
-	s32 preferred = (s32)le32_to_cpu(fl->fl_pg_preferred);
-	int poolid = le32_to_cpu(fl->fl_pg_pool);
+	s32 preferred = fl->fl_pg_preferred;
+	int poolid = fl->fl_pg_pool;
 	struct ceph_pg_pool_info *pool;
 	unsigned ps;
 
diff --git a/osdmap.h b/osdmap.h
index c4af841..05fb0cc 100644
--- a/osdmap.h
+++ b/osdmap.h
@@ -53,35 +53,6 @@ struct ceph_osdmap {
 	struct crush_map *crush;
 };
 
-/*
- * file layout helpers
- */
-#define ceph_file_layout_su(l) ((__s32)le32_to_cpu((l).fl_stripe_unit))
-#define ceph_file_layout_stripe_count(l) \
-	((__s32)le32_to_cpu((l).fl_stripe_count))
-#define ceph_file_layout_object_size(l) ((__s32)le32_to_cpu((l).fl_object_size))
-#define ceph_file_layout_cas_hash(l) ((__s32)le32_to_cpu((l).fl_cas_hash))
-#define ceph_file_layout_object_su(l) \
-	((__s32)le32_to_cpu((l).fl_object_stripe_unit))
-#define ceph_file_layout_pg_preferred(l) \
-	((__s32)le32_to_cpu((l).fl_pg_preferred))
-#define ceph_file_layout_pg_pool(l) \
-	((__s32)le32_to_cpu((l).fl_pg_pool))
-
-static inline unsigned ceph_file_layout_stripe_width(struct ceph_file_layout *l)
-{
-	return le32_to_cpu(l->fl_stripe_unit) *
-		le32_to_cpu(l->fl_stripe_count);
-}
-
-/* "period" == bytes before i start on a new set of objects */
-static inline unsigned ceph_file_layout_period(struct ceph_file_layout *l)
-{
-	return le32_to_cpu(l->fl_object_size) *
-		le32_to_cpu(l->fl_stripe_count);
-}
-
-
 static inline int ceph_osd_is_up(struct ceph_osdmap *map, int osd)
 {
 	return (osd < map->max_osd) && (map->osd_state[osd] & CEPH_OSD_UP);
diff --git a/xattr.c b/xattr.c
index 1a48a55..ba6039d 100644
--- a/xattr.c
+++ b/xattr.c
@@ -95,13 +95,12 @@ static size_t ceph_vxattrcb_layout(struct ceph_inode_info *ci, char *val,
 
 	ret = snprintf(val, size,
 		"chunk_bytes=%lld\nstripe_count=%lld\nobject_size=%lld\n",
-		(unsigned long long)ceph_file_layout_su(ci->i_layout),
-		(unsigned long long)ceph_file_layout_stripe_count(ci->i_layout),
-		(unsigned long long)ceph_file_layout_object_size(ci->i_layout));
-	if (ceph_file_layout_pg_preferred(ci->i_layout))
+		(unsigned long long)ci->i_layout.fl_stripe_unit,
+		(unsigned long long)ci->i_layout.fl_stripe_count,
+		(unsigned long long)ci->i_layout.fl_object_size);
+	if (ci->i_layout.fl_pg_preferred)
 		ret += snprintf(val + ret, size, "preferred_osd=%lld\n",
-			    (unsigned long long)ceph_file_layout_pg_preferred(
-				    ci->i_layout));
+			    (unsigned long long)ci->i_layout.fl_pg_preferred);
 	return ret;
 }
 

[-- Attachment #3: Type: text/plain, Size: 354 bytes --]

------------------------------------------------------------------------------
Let Crystal Reports handle the reporting - Free Crystal Reports 2008 30-Day 
trial. Simplify your report design, integration and deployment - and focus on 
what you do best, core application coding. Discover what's new with
Crystal Reports now.  http://p.sf.net/sfu/bobj-july

[-- Attachment #4: Type: text/plain, Size: 161 bytes --]

_______________________________________________
Ceph-devel mailing list
Ceph-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/ceph-devel

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* Re: [RFC] separate otw data from host data
  2009-11-06  2:03 [RFC] separate otw data from host data Noah Watkins
@ 2009-11-06  5:33 ` Sage Weil
  2009-11-06  6:09   ` Noah Watkins
  0 siblings, 1 reply; 12+ messages in thread
From: Sage Weil @ 2009-11-06  5:33 UTC (permalink / raw)
  To: Noah Watkins; +Cc: Noah Watkins, ceph-devel

On Thu, 5 Nov 2009, Noah Watkins wrote:

> Sage
> 
> A large number of le32_to_cpu (and the like) are used throughout the kernel
> client when referencing packed structs that are used OTW. However, I'd argue
> conversion should be done in only two places: before being sent or received.
> This allows non-packed versions of structs to be used only on the local
> machine, and use optimized alignment determined by gcc. Additionally it
> de-clutters the code of all the conversion routines. The patch I attached (not
> ready for use) demonstrates this for struct ceph_file_layout by introducing
> struct __ceph_file_layout for OTW, and does conversion only before sending or
> receiving over the network.
> 
> Comments, suggestions?

With the caveat that the performance advantages here are pretty minimal 
(most users are little endian, struct is i think already aligned), this 
does clean things up a bit.

I wouldn't change the types ceph_fs.[ch], msgr.h, rados.h if you can... 
these are all OTW types and shared between kernel and user space.  Maybe 
make __ceph_file_layout the internal type.  Maybe lose the fl_ prefix to 
catch accidental misuse.

Are there other types you were looking at?  I think most others are in the 
decode/use once category anyway?

sage

------------------------------------------------------------------------------
Let Crystal Reports handle the reporting - Free Crystal Reports 2008 30-Day 
trial. Simplify your report design, integration and deployment - and focus on 
what you do best, core application coding. Discover what's new with
Crystal Reports now.  http://p.sf.net/sfu/bobj-july

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [RFC] separate otw data from host data
  2009-11-06  5:33 ` Sage Weil
@ 2009-11-06  6:09   ` Noah Watkins
  2009-11-06  6:37     ` Sage Weil
  0 siblings, 1 reply; 12+ messages in thread
From: Noah Watkins @ 2009-11-06  6:09 UTC (permalink / raw)
  To: Sage Weil; +Cc: Noah Watkins, ceph-devel

> With the caveat that the performance advantages here are pretty  
> minimal
> (most users are little endian, struct is i think already aligned),  
> this
> does clean things up a bit.
Yeah, it probably is aligned in its current form. Extending structs  
in the future
might alter their aligned-by-chance factor, and separating the OTW/ 
internal
types will avoid this. I don't know what performance degradation due to
unaligned memory accesses feels like, but certainly in the case of  
file layout
information, it may be the most accessed Ceph data?
(mapping calculation done for every access).

> I wouldn't change the types ceph_fs.[ch], msgr.h, rados.h if you  
> can...
> these are all OTW types and shared between kernel and user space.   
> Maybe
> make __ceph_file_layout the internal type.
I'll migrate the internal types to a new location. Any suggestion? If  
this is the only
case of internal/OTW types then a new header is probably overkill.

> Maybe lose the fl_ prefix to
> catch accidental misuse.
Sure

>
> Are there other types you were looking at?  I think most others are  
> in the
> decode/use once category anyway?
In general I think the internal/OTW type separation is appropriate  
for all instances that
it applies to, if nothing but to help readability, but especially for  
easing future extensions.
I think about it as a separation between core-Ceph and Ceph's user- 
space interface (OTW and IOCTL).

I haven't done a survey of other similar instances, but a quick grep  
shows a large amount
of endianess conversions, and so the potential for a bit of clean-up.

Thoughts?

-n


------------------------------------------------------------------------------
Let Crystal Reports handle the reporting - Free Crystal Reports 2008 30-Day 
trial. Simplify your report design, integration and deployment - and focus on 
what you do best, core application coding. Discover what's new with
Crystal Reports now.  http://p.sf.net/sfu/bobj-july

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [RFC] separate otw data from host data
  2009-11-06  6:09   ` Noah Watkins
@ 2009-11-06  6:37     ` Sage Weil
  2009-11-06  7:31       ` Noah Watkins
                         ` (2 more replies)
  0 siblings, 3 replies; 12+ messages in thread
From: Sage Weil @ 2009-11-06  6:37 UTC (permalink / raw)
  To: Noah Watkins; +Cc: Noah Watkins, ceph-devel

On Thu, 5 Nov 2009, Noah Watkins wrote:

> > With the caveat that the performance advantages here are pretty minimal
> > (most users are little endian, struct is i think already aligned), this
> > does clean things up a bit.
> Yeah, it probably is aligned in its current form. Extending structs in the
> future
> might alter their aligned-by-chance factor, and separating the OTW/internal
> types will avoid this. I don't know what performance degradation due to
> unaligned memory accesses feels like, but certainly in the case of file layout
> information, it may be the most accessed Ceph data?
> (mapping calculation done for every access).

Right, just keep in mind swabbing these few bytes is nothing in comparison 
to the other work required per I/O.. allocating and filling out a request 
struct, on-wire message, sending a pile of data over the wire, waiting for 
ack, etc.  This is really about code readability, not speed.

> > I wouldn't change the types ceph_fs.[ch], msgr.h, rados.h if you can...
> > these are all OTW types and shared between kernel and user space.  Maybe
> > make __ceph_file_layout the internal type.
> I'll migrate the internal types to a new location. Any suggestion? If this is
> the only
> case of internal/OTW types then a new header is probably overkill.
> 
> > Maybe lose the fl_ prefix to
> > catch accidental misuse.
> Sure
> 
> > 
> > Are there other types you were looking at?  I think most others are in the
> > decode/use once category anyway?
> In general I think the internal/OTW type separation is appropriate for all
> instances that
> it applies to, if nothing but to help readability, but especially for easing
> future extensions.
> I think about it as a separation between core-Ceph and Ceph's user-space
> interface (OTW and IOCTL).
> 
> I haven't done a survey of other similar instances, but a quick grep shows a
> large amount
> of endianess conversions, and so the potential for a bit of clean-up.

Yeah, but like I said, they mostly all live in the message handlers that 
are in the business of decoding the on-wire structs.  The layout struct is 
pretty much the only one that is kept around in its prior form (swabbed or 
not).  The only other exception is in messenger.c itself, which is working 
with the request headers directly, but again I'm not sure how much it will 
improve readability.

My biggest concern with all of this is really just that the type names 
don't clearly indicate which ones are OTW and which aren't.  The most 
helpful cleanup may just be to include _otw_ or something similar in the 
ceph_fs/msgr/rados headers.  (Though it's really on-disk and/or 
over-the-wire.)  

I suspect that's the way to go?  It'd also avoid a type called 
__ceph_file_layout, which isn't particularly obvious.  I've been using __ 
throughout to mean something like "I'm already holding the relevant 
lock(s)" but that's not much better (tho hopefully it's at least pretty 
consistent).

sage

------------------------------------------------------------------------------
Let Crystal Reports handle the reporting - Free Crystal Reports 2008 30-Day 
trial. Simplify your report design, integration and deployment - and focus on 
what you do best, core application coding. Discover what's new with
Crystal Reports now.  http://p.sf.net/sfu/bobj-july

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [RFC] separate otw data from host data
  2009-11-06  6:37     ` Sage Weil
@ 2009-11-06  7:31       ` Noah Watkins
  2009-11-06 17:47       ` Zach Brown
  2009-11-06 19:29       ` [RFC] v2 " Noah Watkins
  2 siblings, 0 replies; 12+ messages in thread
From: Noah Watkins @ 2009-11-06  7:31 UTC (permalink / raw)
  To: Sage Weil; +Cc: Noah Watkins, ceph-devel

> I suspect that's the way to go?  It'd also avoid a type called
> __ceph_file_layout, which isn't particularly obvious.
Good point, i'll make another pass on this, and we'll see how it
looks.

I've been motivated to clean-up some of the file layout and object  
mapping
code in the client because I've been thinking a lot about the idea of  
generalized
mappings, that go beyond the object/stripe paradigm, and perhaps have  
non-uniform
layouts determined programmatically on a per-file basis. There are  
lot of other issues...hah.

>  I've been using __
> throughout to mean something like "I'm already holding the relevant
> lock(s)" but that's not much better (tho hopefully it's at least  
> pretty
> consistent).
Yeh, that's pretty common practice. Using _otw_ is worth looking at, but
there probably isn't a really great way to solve it in the long run.  
Just look
at the mini-summit at Japan Linux Symposium that Thomas Gleixner held
for the single purpose of naming spinlock variants in the -rt patch.

I've been hacking on stuff as I run across it, but do you have bug  
tracker
or to-do lists online?

-n


------------------------------------------------------------------------------
Let Crystal Reports handle the reporting - Free Crystal Reports 2008 30-Day 
trial. Simplify your report design, integration and deployment - and focus on 
what you do best, core application coding. Discover what's new with
Crystal Reports now.  http://p.sf.net/sfu/bobj-july

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [RFC] separate otw data from host data
  2009-11-06  6:37     ` Sage Weil
  2009-11-06  7:31       ` Noah Watkins
@ 2009-11-06 17:47       ` Zach Brown
  2009-11-06 19:29       ` [RFC] v2 " Noah Watkins
  2 siblings, 0 replies; 12+ messages in thread
From: Zach Brown @ 2009-11-06 17:47 UTC (permalink / raw)
  To: Sage Weil; +Cc: Noah Watkins, ceph-devel


> My biggest concern with all of this is really just that the type names 
> don't clearly indicate which ones are OTW and which aren't.  The most 
> helpful cleanup may just be to include _otw_ or something similar in the 
> ceph_fs/msgr/rados headers.  (Though it's really on-disk and/or 
> over-the-wire.)  

In the past I've used _disk or _wire suffixes in various projects.  It
works out pretty well, I think.

- z

------------------------------------------------------------------------------
Let Crystal Reports handle the reporting - Free Crystal Reports 2008 30-Day 
trial. Simplify your report design, integration and deployment - and focus on 
what you do best, core application coding. Discover what's new with
Crystal Reports now.  http://p.sf.net/sfu/bobj-july

^ permalink raw reply	[flat|nested] 12+ messages in thread

* [RFC] v2 separate otw data from host data
  2009-11-06  6:37     ` Sage Weil
  2009-11-06  7:31       ` Noah Watkins
  2009-11-06 17:47       ` Zach Brown
@ 2009-11-06 19:29       ` Noah Watkins
  2009-11-06 19:44         ` Sage Weil
  2 siblings, 1 reply; 12+ messages in thread
From: Noah Watkins @ 2009-11-06 19:29 UTC (permalink / raw)
  To: Sage Weil; +Cc: Noah Watkins, ceph-devel

[-- Attachment #1: Type: text/plain, Size: 168 bytes --]

Updated patch with _wire suffix suggested by Zack, moved internal  
type to types.h.
Looking over it again, perhaps just using some accessor macros is the  
best.

-n


[-- Attachment #2: use-non-packed-fl-v2.diff --]
[-- Type: application/octet-stream, Size: 11262 bytes --]

diff --git a/caps.c b/caps.c
index 8b863db..1c2392b 100644
--- a/caps.c
+++ b/caps.c
@@ -2272,7 +2272,7 @@ restart:
 	cap->seq = seq;
 
 	/* file layout may have changed */
-	ci->i_layout = grant->layout;
+	ceph_layout_otw_to_cpu(&ci->i_layout, &grant->layout);
 
 	/* revocation, grant, or no-op? */
 	if (cap->issued & ~newcaps) {
diff --git a/ceph_fs.c b/ceph_fs.c
index a950b40..1ee72a1 100644
--- a/ceph_fs.c
+++ b/ceph_fs.c
@@ -3,6 +3,8 @@
  */
 #include "types.h"
 
+#if 0
+UNUSED: retain for reference
 /*
  * return true if @layout appears to be valid
  */
@@ -25,7 +27,7 @@ int ceph_file_layout_is_valid(const struct ceph_file_layout *layout)
 		return 0;
 	return 1;
 }
-
+#endif
 
 int ceph_flags_to_mode(int flags)
 {
diff --git a/ceph_fs.h b/ceph_fs.h
index ae52382..4f6a07f 100644
--- a/ceph_fs.h
+++ b/ceph_fs.h
@@ -51,32 +51,33 @@
 
 unsigned int ceph_full_name_hash(const char *name, unsigned int len);
 
-
 /*
- * ceph_file_layout - describe data layout for a file/inode
+ * struct ceph_file_layout_wire - file/object mapping parameters (OTW)
+ *
+ * @fl_stripe_unit:  stripe unit, in bytes. page size multiple.
+ * @fl_stripe_count: over this many objects
+ * @fl_object_size:  until objects are this big, then move to next
+                     set of objects
+ * @fl_cas_hash:     0 = none; 1 = sha256
+ * @fl_object_su:    for per-object parity, if any
+ * @fl_pg_preferred: preferred primary for pg (-1 for none)
+ * @fl_pg_pool:      namespace, crush ruleset, rep level
  */
-struct ceph_file_layout {
+struct ceph_file_layout_wire {
 	/* file -> object mapping */
-	__le32 fl_stripe_unit;     /* stripe unit, in bytes.  must be multiple
-				      of page size. */
-	__le32 fl_stripe_count;    /* over this many objects */
-	__le32 fl_object_size;     /* until objects are this big, then move to
-				      new objects */
-	__le32 fl_cas_hash;        /* 0 = none; 1 = sha256 */
-
+	__le32 fl_stripe_unit;
+	__le32 fl_stripe_count;
+	__le32 fl_object_size;
+	__le32 fl_cas_hash;
 	/* pg -> disk layout */
-	__le32 fl_object_stripe_unit;  /* for per-object parity, if any */
-
+	__le32 fl_object_stripe_unit;
 	/* object -> pg layout */
-	__le32 fl_pg_preferred; /* preferred primary for pg (-1 for none) */
-	__le32 fl_pg_pool;      /* namespace, crush ruleset, rep level */
+	__le32 fl_pg_preferred;
+	__le32 fl_pg_pool;
 } __attribute__ ((packed));
 
 #define CEPH_MIN_STRIPE_UNIT 65536
 
-int ceph_file_layout_is_valid(const struct ceph_file_layout *layout);
-
-
 /*********************************************
  * message layer
  */
@@ -316,7 +317,7 @@ union ceph_mds_request_args {
 		__le32 flags;
 	} __attribute__ ((packed)) setxattr;
 	struct {
-		struct ceph_file_layout layout;
+		struct ceph_file_layout_wire layout;
 	} __attribute__ ((packed)) setlayout;
 } __attribute__ ((packed));
 
@@ -386,7 +387,7 @@ struct ceph_mds_reply_inode {
 	__le64 version;                /* inode version */
 	__le64 xattr_version;          /* version for xattr blob */
 	struct ceph_mds_reply_cap cap; /* caps issued for this inode */
-	struct ceph_file_layout layout;
+	struct ceph_file_layout_wire layout;
 	struct ceph_timespec ctime, mtime, atime;
 	__le32 time_warp_seq;
 	__le64 size, max_size, truncate_size;
@@ -549,7 +550,7 @@ struct ceph_mds_caps {
 	__le64 size, max_size, truncate_size;
 	__le32 truncate_seq;
 	struct ceph_timespec mtime, atime, ctime;
-	struct ceph_file_layout layout;
+	struct ceph_file_layout_wire layout;
 	__le32 time_warp_seq;
 } __attribute__ ((packed));
 
diff --git a/inode.c b/inode.c
index 036873c..de81da0 100644
--- a/inode.c
+++ b/inode.c
@@ -574,8 +574,11 @@ static int fill_inode(struct inode *inode,
 			    &ctime, &mtime, &atime);
 
 	ci->i_max_size = le64_to_cpu(info->max_size);
-	ci->i_layout = info->layout;
-	inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
+
+	/* Convert OTW layout struct to internal representation */
+	ceph_layout_otw_to_cpu(&ci->i_layout, &info->layout);
+
+	inode->i_blkbits = fls(ci->i_layout.fl_stripe_unit) - 1;
 
 	/* xattrs */
 	/* note that if i_xattrs.len <= 4, i_xattrs.data will still be NULL. */
diff --git a/ioctl.c b/ioctl.c
index 4c33e19..55201f8 100644
--- a/ioctl.c
+++ b/ioctl.c
@@ -19,16 +19,18 @@ static long ceph_ioctl_get_layout(struct file *file, void __user *arg)
 	int err;
 
 	err = ceph_do_getattr(file->f_dentry->d_inode, CEPH_STAT_CAP_LAYOUT);
-	if (!err) {
-		l.stripe_unit = ceph_file_layout_su(ci->i_layout);
-		l.stripe_count = ceph_file_layout_stripe_count(ci->i_layout);
-		l.object_size = ceph_file_layout_object_size(ci->i_layout);
-		l.data_pool = le32_to_cpu(ci->i_layout.fl_pg_pool);
-		if (copy_to_user(arg, &l, sizeof(l)))
-			return -EFAULT;
-	}
+	if (err)
+		return err;
 
-	return err;
+	l.stripe_unit = ci->i_layout.fl_stripe_unit;
+	l.stripe_count = ci->i_layout.fl_stripe_count;
+	l.object_size = ci->i_layout.fl_object_size;
+	l.data_pool = ci->i_layout.fl_pg_pool;
+
+	if (copy_to_user(arg, &l, sizeof(l)))
+		return -EFAULT;
+
+	return 0;
 }
 
 static long ceph_ioctl_set_layout(struct file *file, void __user *arg)
@@ -38,6 +40,7 @@ static long ceph_ioctl_set_layout(struct file *file, void __user *arg)
 	struct ceph_mds_client *mdsc = &ceph_sb_to_client(inode->i_sb)->mdsc;
 	struct ceph_mds_request *req;
 	struct ceph_ioctl_layout l;
+	struct ceph_file_layout_wire *layout;
 	int err, i;
 
 	/* copy and validate */
@@ -72,14 +75,12 @@ static long ceph_ioctl_set_layout(struct file *file, void __user *arg)
 	req->r_inode = igrab(inode);
 	req->r_inode_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_EXCL;
 
-	req->r_args.setlayout.layout.fl_stripe_unit =
-		cpu_to_le32(l.stripe_unit);
-	req->r_args.setlayout.layout.fl_stripe_count =
-		cpu_to_le32(l.stripe_count);
-	req->r_args.setlayout.layout.fl_object_size =
-		cpu_to_le32(l.object_size);
-	req->r_args.setlayout.layout.fl_pg_pool = cpu_to_le32(l.data_pool);
-	req->r_args.setlayout.layout.fl_pg_preferred = cpu_to_le32((s32)-1);
+	layout = &req->r_args.setlayout.layout;
+	layout->fl_stripe_unit = cpu_to_le32(l.stripe_unit);
+	layout->fl_stripe_count = cpu_to_le32(l.stripe_count);
+	layout->fl_object_size = cpu_to_le32(l.object_size);
+	layout->fl_pg_pool = cpu_to_le32(l.data_pool);
+	layout->fl_pg_preferred = cpu_to_le32((s32)-1);
 
 	err = ceph_mdsc_do_request(mdsc, parent_inode, req);
 	ceph_mdsc_put_request(req);
@@ -109,8 +110,8 @@ static long ceph_ioctl_get_dataloc(struct file *file, void __user *arg)
 	ceph_calc_file_object_mapping(&ci->i_layout, dl.file_offset, &len,
 				      &dl.object_no, &dl.object_offset, &olen);
 	dl.file_offset -= dl.object_offset;
-	dl.object_size = ceph_file_layout_object_size(ci->i_layout);
-	dl.block_size = ceph_file_layout_su(ci->i_layout);
+	dl.object_size = ci->i_layout.fl_object_size;
+	dl.block_size = ci->i_layout.fl_stripe_unit;
 
 	/* block_offset = object_offset % block_size */
 	tmp = dl.object_offset;
diff --git a/osdmap.c b/osdmap.c
index 8b0cd11..226131d 100644
--- a/osdmap.c
+++ b/osdmap.c
@@ -746,9 +746,9 @@ void ceph_calc_file_object_mapping(struct ceph_file_layout *layout,
 				   u64 *ono,
 				   u64 *oxoff, u64 *oxlen)
 {
-	u32 osize = le32_to_cpu(layout->fl_object_size);
-	u32 su = le32_to_cpu(layout->fl_stripe_unit);
-	u32 sc = le32_to_cpu(layout->fl_stripe_count);
+	u32 osize = layout->fl_object_size;
+	u32 su = layout->fl_stripe_unit;
+	u32 sc = layout->fl_stripe_count;
 	u32 bl, stripeno, stripepos, objsetno;
 	u32 su_per_object;
 	u64 t, su_offset;
@@ -800,8 +800,8 @@ int ceph_calc_object_layout(struct ceph_object_layout *ol,
 {
 	unsigned num, num_mask;
 	struct ceph_pg pgid;
-	s32 preferred = (s32)le32_to_cpu(fl->fl_pg_preferred);
-	int poolid = le32_to_cpu(fl->fl_pg_pool);
+	s32 preferred = fl->fl_pg_preferred;
+	int poolid = fl->fl_pg_pool;
 	struct ceph_pg_pool_info *pool;
 	unsigned ps;
 
diff --git a/osdmap.h b/osdmap.h
index c4af841..05fb0cc 100644
--- a/osdmap.h
+++ b/osdmap.h
@@ -53,35 +53,6 @@ struct ceph_osdmap {
 	struct crush_map *crush;
 };
 
-/*
- * file layout helpers
- */
-#define ceph_file_layout_su(l) ((__s32)le32_to_cpu((l).fl_stripe_unit))
-#define ceph_file_layout_stripe_count(l) \
-	((__s32)le32_to_cpu((l).fl_stripe_count))
-#define ceph_file_layout_object_size(l) ((__s32)le32_to_cpu((l).fl_object_size))
-#define ceph_file_layout_cas_hash(l) ((__s32)le32_to_cpu((l).fl_cas_hash))
-#define ceph_file_layout_object_su(l) \
-	((__s32)le32_to_cpu((l).fl_object_stripe_unit))
-#define ceph_file_layout_pg_preferred(l) \
-	((__s32)le32_to_cpu((l).fl_pg_preferred))
-#define ceph_file_layout_pg_pool(l) \
-	((__s32)le32_to_cpu((l).fl_pg_pool))
-
-static inline unsigned ceph_file_layout_stripe_width(struct ceph_file_layout *l)
-{
-	return le32_to_cpu(l->fl_stripe_unit) *
-		le32_to_cpu(l->fl_stripe_count);
-}
-
-/* "period" == bytes before i start on a new set of objects */
-static inline unsigned ceph_file_layout_period(struct ceph_file_layout *l)
-{
-	return le32_to_cpu(l->fl_object_size) *
-		le32_to_cpu(l->fl_stripe_count);
-}
-
-
 static inline int ceph_osd_is_up(struct ceph_osdmap *map, int osd)
 {
 	return (osd < map->max_osd) && (map->osd_state[osd] & CEPH_OSD_UP);
diff --git a/types.h b/types.h
index 8a51456..172438c 100644
--- a/types.h
+++ b/types.h
@@ -24,5 +24,35 @@ struct ceph_cap_reservation {
 	int count;
 };
 
+/*
+ * struct ceph_file_layout - file/object mapping parameters (internal)
+ *
+ * Stores information from struct ceph_file_layout_wire converted to native data
+ * types, and lets gcc do optimized packing/alignment.
+ *
+ * See struct ceph_file_layout_wire for description of members common to both
+ * structs.
+ */
+struct ceph_file_layout {
+	u32 fl_stripe_unit;
+	u32 fl_stripe_count;
+	u32 fl_object_size;
+	u32 fl_cas_hash;
+	u32 fl_object_stripe_unit;
+	s32 fl_pg_preferred;
+	u32 fl_pg_pool;
+};
+
+static inline void ceph_layout_otw_to_cpu(struct ceph_file_layout *n,
+		struct ceph_file_layout_wire *o)
+{
+	n->fl_stripe_unit	  = le32_to_cpu(o->fl_stripe_unit);
+	n->fl_stripe_count	  = le32_to_cpu(o->fl_stripe_count);
+	n->fl_object_size	  = le32_to_cpu(o->fl_object_size);
+	n->fl_cas_hash		  = le32_to_cpu(o->fl_cas_hash);
+	n->fl_object_stripe_unit  = le32_to_cpu(o->fl_object_stripe_unit);
+	n->fl_pg_preferred 	  = (s32)le32_to_cpu(o->fl_pg_preferred);
+	n->fl_pg_pool 		  = le32_to_cpu(o->fl_pg_pool);
+}
 
 #endif
diff --git a/xattr.c b/xattr.c
index 1a48a55..ba6039d 100644
--- a/xattr.c
+++ b/xattr.c
@@ -95,13 +95,12 @@ static size_t ceph_vxattrcb_layout(struct ceph_inode_info *ci, char *val,
 
 	ret = snprintf(val, size,
 		"chunk_bytes=%lld\nstripe_count=%lld\nobject_size=%lld\n",
-		(unsigned long long)ceph_file_layout_su(ci->i_layout),
-		(unsigned long long)ceph_file_layout_stripe_count(ci->i_layout),
-		(unsigned long long)ceph_file_layout_object_size(ci->i_layout));
-	if (ceph_file_layout_pg_preferred(ci->i_layout))
+		(unsigned long long)ci->i_layout.fl_stripe_unit,
+		(unsigned long long)ci->i_layout.fl_stripe_count,
+		(unsigned long long)ci->i_layout.fl_object_size);
+	if (ci->i_layout.fl_pg_preferred)
 		ret += snprintf(val + ret, size, "preferred_osd=%lld\n",
-			    (unsigned long long)ceph_file_layout_pg_preferred(
-				    ci->i_layout));
+			    (unsigned long long)ci->i_layout.fl_pg_preferred);
 	return ret;
 }
 

[-- Attachment #3: Type: text/plain, Size: 2 bytes --]




[-- Attachment #4: Type: text/plain, Size: 354 bytes --]

------------------------------------------------------------------------------
Let Crystal Reports handle the reporting - Free Crystal Reports 2008 30-Day 
trial. Simplify your report design, integration and deployment - and focus on 
what you do best, core application coding. Discover what's new with
Crystal Reports now.  http://p.sf.net/sfu/bobj-july

[-- Attachment #5: Type: text/plain, Size: 161 bytes --]

_______________________________________________
Ceph-devel mailing list
Ceph-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/ceph-devel

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* Re: [RFC] v2 separate otw data from host data
  2009-11-06 19:29       ` [RFC] v2 " Noah Watkins
@ 2009-11-06 19:44         ` Sage Weil
  2009-11-06 20:04           ` Noah Watkins
  0 siblings, 1 reply; 12+ messages in thread
From: Sage Weil @ 2009-11-06 19:44 UTC (permalink / raw)
  To: Noah Watkins; +Cc: Noah Watkins, ceph-devel

On Fri, 6 Nov 2009, Noah Watkins wrote:
> Updated patch with _wire suffix suggested by Zack, moved internal type to
> types.h.

- should use 'wire' instead of 'otw' consistently?
- the layout validation func is used by userspace, and should eventually 
be used by the kclient
- lost the comments on the struct in ceph_fs.h

> Looking over it again, perhaps just using some accessor macros is the best.

Yeah...  Well, at some point we should probably add a 'wire' suffix 
(prefix?) to all the disk/wire types, though.  (I'll stick that in the 
TODO.  It needs to be coordinated with the userspace change.)  This can 
come after?

sage

------------------------------------------------------------------------------
Let Crystal Reports handle the reporting - Free Crystal Reports 2008 30-Day 
trial. Simplify your report design, integration and deployment - and focus on 
what you do best, core application coding. Discover what's new with
Crystal Reports now.  http://p.sf.net/sfu/bobj-july

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [RFC] v2 separate otw data from host data
  2009-11-06 19:44         ` Sage Weil
@ 2009-11-06 20:04           ` Noah Watkins
  2009-11-06 20:08             ` Yehuda Sadeh Weinraub
  2009-11-06 20:49             ` Sage Weil
  0 siblings, 2 replies; 12+ messages in thread
From: Noah Watkins @ 2009-11-06 20:04 UTC (permalink / raw)
  To: Sage Weil; +Cc: Noah Watkins, ceph-devel

> - should use 'wire' instead of 'otw' consistently?
Up vote from me. I like 'wire' better than 'otw'.

> - the layout validation func is used by userspace, and should  
> eventually
> be used by the kclient
Ahh, I'll restore this. And ceph_ioctl_set_layout does some basic  
layout validation. These could be merged?

> - lost the comments on the struct in ceph_fs.h
I bumped the inline comments to a block comment above the struct. Or  
did you mean 'lose them' instead of 'lost' ?

>
>> Looking over it again, perhaps just using some accessor macros is  
>> the best.
>
> Yeah...  Well, at some point we should probably add a 'wire' suffix
> (prefix?) to all the disk/wire types, though.  (I'll stick that in the
> TODO.  It needs to be coordinated with the userspace change.)  This  
> can
> come after?
If this is overall a step in the right direction then starting with  
this patch, and re-visiting the topic as a larger iterative clean-up  
seems reasonable.

This brings up a good point about the level at which nested  
structures are labeled as 'wire'. For example, ceph_mds_reply_inode  
is a struct that should be labeled as 'wire', but it would be ugly to  
label all members of that struct as 'wire'.

-n



------------------------------------------------------------------------------
Let Crystal Reports handle the reporting - Free Crystal Reports 2008 30-Day 
trial. Simplify your report design, integration and deployment - and focus on 
what you do best, core application coding. Discover what's new with
Crystal Reports now.  http://p.sf.net/sfu/bobj-july

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [RFC] v2 separate otw data from host data
  2009-11-06 20:04           ` Noah Watkins
@ 2009-11-06 20:08             ` Yehuda Sadeh Weinraub
  2009-11-06 20:19               ` Noah Watkins
  2009-11-06 20:49             ` Sage Weil
  1 sibling, 1 reply; 12+ messages in thread
From: Yehuda Sadeh Weinraub @ 2009-11-06 20:08 UTC (permalink / raw)
  To: Noah Watkins; +Cc: Sage Weil, ceph-devel, Noah Watkins


[-- Attachment #1.1: Type: text/plain, Size: 366 bytes --]

> - lost the comments on the struct in ceph_fs.h

> I bumped the inline comments to a block comment above the struct. Or
> did you mean 'lose them' instead of 'lost' ?
>

personally I prefer seeing those inline on the struct. That way one feels
the obligation to update the comments whenever the struct is changed, and it
is faster reading through the code.

Yehuda

[-- Attachment #1.2: Type: text/html, Size: 582 bytes --]

[-- Attachment #2: Type: text/plain, Size: 354 bytes --]

------------------------------------------------------------------------------
Let Crystal Reports handle the reporting - Free Crystal Reports 2008 30-Day 
trial. Simplify your report design, integration and deployment - and focus on 
what you do best, core application coding. Discover what's new with
Crystal Reports now.  http://p.sf.net/sfu/bobj-july

[-- Attachment #3: Type: text/plain, Size: 161 bytes --]

_______________________________________________
Ceph-devel mailing list
Ceph-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/ceph-devel

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [RFC] v2 separate otw data from host data
  2009-11-06 20:08             ` Yehuda Sadeh Weinraub
@ 2009-11-06 20:19               ` Noah Watkins
  0 siblings, 0 replies; 12+ messages in thread
From: Noah Watkins @ 2009-11-06 20:19 UTC (permalink / raw)
  To: Yehuda Sadeh Weinraub; +Cc: Sage Weil, ceph-devel, Noah Watkins

> personally I prefer seeing those inline on the struct. That way one  
> feels the obligation to update the comments whenever the struct is  
> changed, and it is faster reading through the code.
I agree. I thought I had been noticing a trend of the block commented  
structs in other kernel code, but there is nothing about it in the  
coding style document. Reverted.
-n

------------------------------------------------------------------------------
Let Crystal Reports handle the reporting - Free Crystal Reports 2008 30-Day 
trial. Simplify your report design, integration and deployment - and focus on 
what you do best, core application coding. Discover what's new with
Crystal Reports now.  http://p.sf.net/sfu/bobj-july

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [RFC] v2 separate otw data from host data
  2009-11-06 20:04           ` Noah Watkins
  2009-11-06 20:08             ` Yehuda Sadeh Weinraub
@ 2009-11-06 20:49             ` Sage Weil
  1 sibling, 0 replies; 12+ messages in thread
From: Sage Weil @ 2009-11-06 20:49 UTC (permalink / raw)
  To: Noah Watkins; +Cc: Noah Watkins, ceph-devel

On Fri, 6 Nov 2009, Noah Watkins wrote:

> > - should use 'wire' instead of 'otw' consistently?
> Up vote from me. I like 'wire' better than 'otw'.
> 
> > - the layout validation func is used by userspace, and should  
> > eventually
> > be used by the kclient
> Ahh, I'll restore this. And ceph_ioctl_set_layout does some basic  
> layout validation. These could be merged?

Yes please :)

> > - lost the comments on the struct in ceph_fs.h
> I bumped the inline comments to a block comment above the struct. Or  
> did you mean 'lose them' instead of 'lost' ?

Oh right.  Let's keep them inline.

> >> Looking over it again, perhaps just using some accessor macros is  
> >> the best.
> >
> > Yeah...  Well, at some point we should probably add a 'wire' suffix
> > (prefix?) to all the disk/wire types, though.  (I'll stick that in the
> > TODO.  It needs to be coordinated with the userspace change.)  This  
> > can
> > come after?
> If this is overall a step in the right direction then starting with  
> this patch, and re-visiting the topic as a larger iterative clean-up  
> seems reasonable.
> 
> This brings up a good point about the level at which nested  
> structures are labeled as 'wire'. For example, ceph_mds_reply_inode  
> is a struct that should be labeled as 'wire', but it would be ugly to  
> label all members of that struct as 'wire'.

The type naming should be consistent.  ceph_wire_ prefix?

sage

------------------------------------------------------------------------------
Let Crystal Reports handle the reporting - Free Crystal Reports 2008 30-Day 
trial. Simplify your report design, integration and deployment - and focus on 
what you do best, core application coding. Discover what's new with
Crystal Reports now.  http://p.sf.net/sfu/bobj-july

^ permalink raw reply	[flat|nested] 12+ messages in thread

end of thread, other threads:[~2009-11-06 20:49 UTC | newest]

Thread overview: 12+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2009-11-06  2:03 [RFC] separate otw data from host data Noah Watkins
2009-11-06  5:33 ` Sage Weil
2009-11-06  6:09   ` Noah Watkins
2009-11-06  6:37     ` Sage Weil
2009-11-06  7:31       ` Noah Watkins
2009-11-06 17:47       ` Zach Brown
2009-11-06 19:29       ` [RFC] v2 " Noah Watkins
2009-11-06 19:44         ` Sage Weil
2009-11-06 20:04           ` Noah Watkins
2009-11-06 20:08             ` Yehuda Sadeh Weinraub
2009-11-06 20:19               ` Noah Watkins
2009-11-06 20:49             ` Sage Weil

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.