* [PATCH] erofs-utils: introduce xattr support
@ 2019-07-26 11:50 htyuxe+dhbrei4sq0df8
2019-08-05 14:54 ` Li Guifu
0 siblings, 1 reply; 12+ messages in thread
From: htyuxe+dhbrei4sq0df8 @ 2019-07-26 11:50 UTC (permalink / raw)
load xattrs from source files and pack them into target image.
---
include/erofs/hashtable.h | 502 ++++++++++++++++++++++++++++++++++++++
include/erofs/internal.h | 2 +-
include/erofs/xattr.h | 22 ++
lib/Makefile.am | 3 +-
lib/inode.c | 23 ++
lib/xattr.c | 319 ++++++++++++++++++++++++
6 files changed, 869 insertions(+), 2 deletions(-)
create mode 100644 include/erofs/hashtable.h
create mode 100644 include/erofs/xattr.h
create mode 100644 lib/xattr.c
diff --git a/include/erofs/hashtable.h b/include/erofs/hashtable.h
new file mode 100644
index 0000000..349a655
--- /dev/null
+++ b/include/erofs/hashtable.h
@@ -0,0 +1,502 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * include/erofs/hashtable.h
+ *
+ */
+
+#ifndef __EROFS_HASHTABLE_H
+#define __EROFS_HASHTABLE_H
+
+#define BITS_PER_LONG 32
+#ifndef __always_inline
+#define __always_inline inline
+#endif
+
+/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
+#define GOLDEN_RATIO_PRIME_32 0x9e370001UL
+/* 2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */
+#define GOLDEN_RATIO_PRIME_64 0x9e37fffffffc0001UL
+
+#if BITS_PER_LONG == 32
+#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_PRIME_32
+#define hash_long(val, bits) hash_32(val, bits)
+#elif BITS_PER_LONG == 64
+#define hash_long(val, bits) hash_64(val, bits)
+#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_PRIME_64
+#else
+#error Wordsize not 32 or 64
+#endif
+
+struct hlist_head {
+ struct hlist_node *first;
+};
+
+struct hlist_node {
+ struct hlist_node *next, **pprev;
+};
+
+/*
+ * Architectures might want to move the poison pointer offset
+ * into some well-recognized area such as 0xdead000000000000,
+ * that is also not mappable by user-space exploits:
+ */
+#ifdef CONFIG_ILLEGAL_POINTER_VALUE
+# define POISON_POINTER_DELTA _AC(CONFIG_ILLEGAL_POINTER_VALUE, UL)
+#else
+# define POISON_POINTER_DELTA 0
+#endif
+
+/*
+ * These are non-NULL pointers that will result in page faults
+ * under normal circumstances, used to verify that nobody uses
+ * non-initialized list entries.
+ */
+#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
+#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
+
+#undef offsetof
+#ifdef __compiler_offsetof
+#define offsetof(TYPE, MEMBER) __compiler_offsetof(TYPE, MEMBER)
+#else
+#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
+#endif
+
+/*
+ * Double linked lists with a single pointer list head.
+ * Mostly useful for hash tables where the two pointer list head is
+ * too wasteful.
+ * You lose the ability to access the tail in O(1).
+ */
+
+#define HLIST_HEAD_INIT { .first = NULL }
+#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL }
+#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
+static inline void INIT_HLIST_NODE(struct hlist_node *h)
+{
+ h->next = NULL;
+ h->pprev = NULL;
+}
+
+static inline int hlist_unhashed(const struct hlist_node *h)
+{
+ return !h->pprev;
+}
+
+static inline int hlist_empty(const struct hlist_head *h)
+{
+ return !h->first;
+}
+
+static inline void __hlist_del(struct hlist_node *n)
+{
+ struct hlist_node *next = n->next;
+ struct hlist_node **pprev = n->pprev;
+
+ *pprev = next;
+ if (next)
+ next->pprev = pprev;
+}
+
+static inline void hlist_del(struct hlist_node *n)
+{
+ __hlist_del(n);
+ n->next = LIST_POISON1;
+ n->pprev = LIST_POISON2;
+}
+
+static inline void hlist_del_init(struct hlist_node *n)
+{
+ if (!hlist_unhashed(n)) {
+ __hlist_del(n);
+ INIT_HLIST_NODE(n);
+ }
+}
+
+static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
+{
+ struct hlist_node *first = h->first;
+
+ n->next = first;
+ if (first)
+ first->pprev = &n->next;
+ h->first = n;
+ n->pprev = &h->first;
+}
+
+/* next must be != NULL */
+static inline void hlist_add_before(struct hlist_node *n,
+ struct hlist_node *next)
+{
+ n->pprev = next->pprev;
+ n->next = next;
+ next->pprev = &n->next;
+ *(n->pprev) = n;
+}
+
+static inline void hlist_add_behind(struct hlist_node *n,
+ struct hlist_node *prev)
+{
+ n->next = prev->next;
+ prev->next = n;
+ n->pprev = &prev->next;
+
+ if (n->next)
+ n->next->pprev = &n->next;
+}
+
+/* after that we'll appear to be on some hlist and hlist_del will work */
+static inline void hlist_add_fake(struct hlist_node *n)
+{
+ n->pprev = &n->next;
+}
+
+/*
+ * Move a list from one list head to another. Fixup the pprev
+ * reference of the first entry if it exists.
+ */
+static inline void hlist_move_list(struct hlist_head *old,
+ struct hlist_head *new)
+{
+ new->first = old->first;
+ if (new->first)
+ new->first->pprev = &new->first;
+ old->first = NULL;
+}
+
+#define hlist_entry(ptr, type, member) container_of(ptr, type, member)
+
+#define hlist_for_each(pos, head) \
+ for (pos = (head)->first; pos; pos = pos->next)
+
+#define hlist_for_each_safe(pos, n, head) \
+ for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \
+ pos = n)
+
+#define hlist_entry_safe(ptr, type, member) \
+ ({ typeof(ptr) ____ptr = (ptr); \
+ ____ptr ? hlist_entry(____ptr, type, member) : NULL; \
+ })
+
+/**
+ * hlist_for_each_entry - iterate over list of given type
+ * @pos:the type * to use as a loop cursor.
+ * @head:the head for your list.
+ * @member:the name of the hlist_node within the struct.
+ */
+#define hlist_for_each_entry(pos, head, member) \
+ for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member);\
+ pos; \
+ pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
+
+/**
+ * hlist_for_each_entry_continue
+ * iterate over a hlist continuing after current point
+ * @pos:the type * to use as a loop cursor.
+ * @member:the name of the hlist_node within the struct.
+ */
+#define hlist_for_each_entry_continue(pos, member) \
+ for (pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member);\
+ pos; \
+ pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
+
+/**
+ * hlist_for_each_entry_from
+ * iterate over a hlist continuing from current point
+ * @pos: the type * to use as a loop cursor.
+ * @member: the name of the hlist_node within the struct.
+ */
+#define hlist_for_each_entry_from(pos, member) \
+ for (; pos; \
+ pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
+
+/**
+ * hlist_for_each_entry_safe
+ * iterate over list of given type safe against removal of list entry
+ * @pos:the type * to use as a loop cursor.
+ * @n:another &struct hlist_node to use as temporary storage
+ * @head:the head for your list.
+ * @member:the name of the hlist_node within the struct.
+ */
+#define hlist_for_each_entry_safe(pos, n, head, member) \
+ for (pos = hlist_entry_safe((head)->first, typeof(*pos), member);\
+ pos && ({ n = pos->member.next; 1; }); \
+ pos = hlist_entry_safe(n, typeof(*pos), member))
+
+static __always_inline u64 hash_64(u64 val, unsigned int bits)
+{
+ u64 hash = val;
+
+#if defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64
+ hash = hash * GOLDEN_RATIO_PRIME_64;
+#else
+ /* Sigh, gcc can't optimise this alone like it does for 32 bits. */
+ u64 n = hash;
+
+ n <<= 18;
+ hash -= n;
+ n <<= 33;
+ hash -= n;
+ n <<= 3;
+ hash += n;
+ n <<= 3;
+ hash -= n;
+ n <<= 4;
+ hash += n;
+ n <<= 2;
+ hash += n;
+#endif
+
+ /* High bits are more random, so use them. */
+ return hash >> (64 - bits);
+}
+
+static inline u32 hash_32(u32 val, unsigned int bits)
+{
+ /* On some cpus multiply is faster, on others gcc will do shifts */
+ u32 hash = val * GOLDEN_RATIO_PRIME_32;
+
+ /* High bits are more random, so use them. */
+ return hash >> (32 - bits);
+}
+
+/**
+ * ilog2 - log of base 2 of 32-bit or a 64-bit unsigned value
+ * @n - parameter
+ *
+ * constant-capable log of base 2 calculation
+ * - this can be used to initialise global variables from constant data, hence
+ * the massive ternary operator construction
+ *
+ * selects the appropriately-sized optimised version depending on sizeof(n)
+ */
+#define ilog2(n) \
+( \
+ (n) & (1ULL << 63) ? 63 : \
+ (n) & (1ULL << 62) ? 62 : \
+ (n) & (1ULL << 61) ? 61 : \
+ (n) & (1ULL << 60) ? 60 : \
+ (n) & (1ULL << 59) ? 59 : \
+ (n) & (1ULL << 58) ? 58 : \
+ (n) & (1ULL << 57) ? 57 : \
+ (n) & (1ULL << 56) ? 56 : \
+ (n) & (1ULL << 55) ? 55 : \
+ (n) & (1ULL << 54) ? 54 : \
+ (n) & (1ULL << 53) ? 53 : \
+ (n) & (1ULL << 52) ? 52 : \
+ (n) & (1ULL << 51) ? 51 : \
+ (n) & (1ULL << 50) ? 50 : \
+ (n) & (1ULL << 49) ? 49 : \
+ (n) & (1ULL << 48) ? 48 : \
+ (n) & (1ULL << 47) ? 47 : \
+ (n) & (1ULL << 46) ? 46 : \
+ (n) & (1ULL << 45) ? 45 : \
+ (n) & (1ULL << 44) ? 44 : \
+ (n) & (1ULL << 43) ? 43 : \
+ (n) & (1ULL << 42) ? 42 : \
+ (n) & (1ULL << 41) ? 41 : \
+ (n) & (1ULL << 40) ? 40 : \
+ (n) & (1ULL << 39) ? 39 : \
+ (n) & (1ULL << 38) ? 38 : \
+ (n) & (1ULL << 37) ? 37 : \
+ (n) & (1ULL << 36) ? 36 : \
+ (n) & (1ULL << 35) ? 35 : \
+ (n) & (1ULL << 34) ? 34 : \
+ (n) & (1ULL << 33) ? 33 : \
+ (n) & (1ULL << 32) ? 32 : \
+ (n) & (1ULL << 31) ? 31 : \
+ (n) & (1ULL << 30) ? 30 : \
+ (n) & (1ULL << 29) ? 29 : \
+ (n) & (1ULL << 28) ? 28 : \
+ (n) & (1ULL << 27) ? 27 : \
+ (n) & (1ULL << 26) ? 26 : \
+ (n) & (1ULL << 25) ? 25 : \
+ (n) & (1ULL << 24) ? 24 : \
+ (n) & (1ULL << 23) ? 23 : \
+ (n) & (1ULL << 22) ? 22 : \
+ (n) & (1ULL << 21) ? 21 : \
+ (n) & (1ULL << 20) ? 20 : \
+ (n) & (1ULL << 19) ? 19 : \
+ (n) & (1ULL << 18) ? 18 : \
+ (n) & (1ULL << 17) ? 17 : \
+ (n) & (1ULL << 16) ? 16 : \
+ (n) & (1ULL << 15) ? 15 : \
+ (n) & (1ULL << 14) ? 14 : \
+ (n) & (1ULL << 13) ? 13 : \
+ (n) & (1ULL << 12) ? 12 : \
+ (n) & (1ULL << 11) ? 11 : \
+ (n) & (1ULL << 10) ? 10 : \
+ (n) & (1ULL << 9) ? 9 : \
+ (n) & (1ULL << 8) ? 8 : \
+ (n) & (1ULL << 7) ? 7 : \
+ (n) & (1ULL << 6) ? 6 : \
+ (n) & (1ULL << 5) ? 5 : \
+ (n) & (1ULL << 4) ? 4 : \
+ (n) & (1ULL << 3) ? 3 : \
+ (n) & (1ULL << 2) ? 2 : \
+ (n) & (1ULL << 1) ? 1 : 0 \
+)
+
+static const uint16_t crc16tab[256] = {
+ 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7,
+ 0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef,
+ 0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6,
+ 0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de,
+ 0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485,
+ 0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d,
+ 0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4,
+ 0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc,
+ 0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823,
+ 0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b,
+ 0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12,
+ 0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a,
+ 0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41,
+ 0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49,
+ 0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70,
+ 0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78,
+ 0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f,
+ 0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067,
+ 0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e,
+ 0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256,
+ 0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d,
+ 0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405,
+ 0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c,
+ 0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634,
+ 0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab,
+ 0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3,
+ 0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a,
+ 0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92,
+ 0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9,
+ 0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1,
+ 0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8,
+ 0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0
+};
+
+uint16_t crc16(const char *buf, int len)
+{
+ int counter;
+ uint16_t crc = 0;
+
+ for (counter = 0; counter < len; counter++)
+ crc = (crc<<8) ^ crc16tab[((crc>>8) ^ *buf++)&0x00FF];
+ return crc;
+}
+
+#define DEFINE_HASHTABLE(name, bits) \
+ struct hlist_head name[1 << (bits)] = \
+ { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT }
+
+#define DECLARE_HASHTABLE(name, bits) \
+ struct hlist_head name[1 << (bits)]
+
+#define HASH_SIZE(name) (ARRAY_SIZE(name))
+#define HASH_BITS(name) ilog2(HASH_SIZE(name))
+
+/* Use hash_32 when possible to allow for fast 32bit hashing in 64bit kernels*/
+#define hash_min(val, bits) \
+ (sizeof(val) <= 4 ? hash_32(val, bits) : hash_long(val, bits))
+
+static inline void __hash_init(struct hlist_head *ht, unsigned int sz)
+{
+ unsigned int i;
+
+ for (i = 0; i < sz; i++)
+ INIT_HLIST_HEAD(&ht[i]);
+}
+
+/**
+ * hash_init - initialize a hash table
+ * @hashtable: hashtable to be initialized
+ *
+ * Calculates the size of the hashtable from the given parameter, otherwise
+ * same as hash_init_size.
+ *
+ * This has to be a macro since HASH_BITS() will not work on pointers since
+ * it calculates the size during preprocessing.
+ */
+#define hash_init(hashtable) __hash_init(hashtable, HASH_SIZE(hashtable))
+
+/**
+ * hash_add - add an object to a hashtable
+ * @hashtable: hashtable to add to
+ * @node: the &struct hlist_node of the object to be added
+ * @key: the key of the object to be added
+ */
+#define hash_add(hashtable, node, key) \
+ hlist_add_head(node, &hashtable[hash_min(key, HASH_BITS(hashtable))])
+
+/**
+ * hash_hashed - check whether an object is in any hashtable
+ * @node: the &struct hlist_node of the object to be checked
+ */
+static inline bool hash_hashed(struct hlist_node *node)
+{
+ return !hlist_unhashed(node);
+}
+
+static inline bool __hash_empty(struct hlist_head *ht, unsigned int sz)
+{
+ unsigned int i;
+
+ for (i = 0; i < sz; i++)
+ if (!hlist_empty(&ht[i]))
+ return false;
+
+ return true;
+}
+
+/**
+ * hash_empty - check whether a hashtable is empty
+ * @hashtable: hashtable to check
+ *
+ * This has to be a macro since HASH_BITS() will not work on pointers since
+ * it calculates the size during preprocessing.
+ */
+#define hash_empty(hashtable) __hash_empty(hashtable, HASH_SIZE(hashtable))
+
+/**
+ * hash_del - remove an object from a hashtable
+ * @node: &struct hlist_node of the object to remove
+ */
+static inline void hash_del(struct hlist_node *node)
+{
+ hlist_del_init(node);
+}
+
+/**
+ * hash_for_each - iterate over a hashtable
+ * @name: hashtable to iterate
+ * @bkt: integer to use as bucket loop cursor
+ * @obj: the type * to use as a loop cursor for each entry
+ * @member: the name of the hlist_node within the struct
+ */
+#define hash_for_each(name, bkt, obj, member) \
+ for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
+ (bkt)++)\
+ hlist_for_each_entry(obj, &name[bkt], member)
+
+/**
+ * hash_for_each_safe - iterate over a hashtable safe against removal of
+ * hash entry
+ * @name: hashtable to iterate
+ * @bkt: integer to use as bucket loop cursor
+ * @tmp: a &struct used for temporary storage
+ * @obj: the type * to use as a loop cursor for each entry
+ * @member: the name of the hlist_node within the struct
+ */
+#define hash_for_each_safe(name, bkt, tmp, obj, member) \
+ for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
+ (bkt)++)\
+ hlist_for_each_entry_safe(obj, tmp, &name[bkt], member)
+
+/**
+ * hash_for_each_possible - iterate over all possible objects hashing to the
+ * same bucket
+ * @name: hashtable to iterate
+ * @obj: the type * to use as a loop cursor for each entry
+ * @member: the name of the hlist_node within the struct
+ * @key: the key of the objects to iterate over
+ */
+#define hash_for_each_possible(name, obj, member, key) \
+ hlist_for_each_entry(obj, &name[hash_min(key, HASH_BITS(name))], member)
+
+#endif
diff --git a/include/erofs/internal.h b/include/erofs/internal.h
index b7ce6f8..33a72b5 100644
--- a/include/erofs/internal.h
+++ b/include/erofs/internal.h
@@ -59,7 +59,7 @@ struct erofs_sb_info {
extern struct erofs_sb_info sbi;
struct erofs_inode {
- struct list_head i_hash, i_subdirs;
+ struct list_head i_hash, i_subdirs, i_xattrs;
unsigned int i_count;
struct erofs_inode *i_parent;
diff --git a/include/erofs/xattr.h b/include/erofs/xattr.h
new file mode 100644
index 0000000..dff9fd6
--- /dev/null
+++ b/include/erofs/xattr.h
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * include/erofs/xattr.h
+ *
+ */
+
+#ifndef __EROFS_XATTR_H
+#define __EROFS_XATTR_H
+
+#define XATTR_COUNT(_size) ({\
+ u32 __size = le16_to_cpu(_size); \
+ ((__size) == 0) ? 0 : \
+ (_size - sizeof(struct erofs_xattr_ibody_header)) / \
+ sizeof(struct erofs_xattr_entry) + 1; })
+
+
+int cust_xattr(struct list_head *hlist);
+int read_xattr_from_src(const char *path, struct list_head *hlist);
+int xattr_entry_size(struct list_head *hlist);
+char *xattr_data(struct list_head *hlist, int size);
+
+#endif
diff --git a/lib/Makefile.am b/lib/Makefile.am
index dea82f7..cbe3243 100644
--- a/lib/Makefile.am
+++ b/lib/Makefile.am
@@ -2,7 +2,8 @@
# Makefile.am
noinst_LTLIBRARIES = liberofs.la
-liberofs_la_SOURCES = config.c io.c cache.c inode.c compress.c compressor.c
+liberofs_la_SOURCES = config.c io.c cache.c inode.c compress.c compressor.c \
+ xattr.c
liberofs_la_CFLAGS = -Wall -Werror -I$(top_srcdir)/include
if ENABLE_LZ4
liberofs_la_CFLAGS += ${LZ4_CFLAGS}
diff --git a/lib/inode.c b/lib/inode.c
index 8b38270..615f117 100644
--- a/lib/inode.c
+++ b/lib/inode.c
@@ -18,6 +18,7 @@
#include "erofs/cache.h"
#include "erofs/io.h"
#include "erofs/compress.h"
+#include "erofs/xattr.h"
struct erofs_sb_info sbi;
@@ -364,8 +365,10 @@ static bool erofs_bh_flush_write_inode(struct erofs_buffer_head *bh)
/* let's support v1 currently */
struct erofs_inode_v1 v1 = {0};
int ret;
+ uint16_t count = XATTR_COUNT(inode->xattr_isize);
v1.i_advise = cpu_to_le16(0 | (inode->data_mapping_mode << 1));
+ v1.i_xattr_icount = cpu_to_le16(count);
v1.i_mode = cpu_to_le16(inode->i_mode);
v1.i_nlink = cpu_to_le16(inode->i_nlink);
v1.i_size = cpu_to_le32((u32)inode->i_size);
@@ -399,6 +402,20 @@ static bool erofs_bh_flush_write_inode(struct erofs_buffer_head *bh)
return false;
off += inode->inode_isize;
+ if (inode->xattr_isize) {
+ char *pbuf = xattr_data(&inode->i_xattrs, inode->xattr_isize);
+
+ if (IS_ERR(pbuf))
+ return false;
+
+ ret = dev_write(pbuf, off, inode->xattr_isize);
+ free(pbuf);
+ if (ret)
+ return false;
+
+ off += inode->xattr_isize;
+ }
+
if (inode->extent_isize) {
/* write compression metadata */
off = Z_EROFS_VLE_EXTENT_ALIGN(off);
@@ -452,6 +469,7 @@ int erofs_prepare_inode_buffer(struct erofs_inode *inode)
DBG_BUGON(inode->bh || inode->bh_inline);
+ inode->xattr_isize = xattr_entry_size(&inode->i_xattrs);
inodesize = inode->inode_isize + inode->xattr_isize +
inode->extent_isize;
@@ -612,6 +630,7 @@ struct erofs_inode *erofs_new_inode(void)
inode->i_count = 1;
init_list_head(&inode->i_subdirs);
+ init_list_head(&inode->i_xattrs);
inode->xattr_isize = 0;
inode->extent_isize = 0;
@@ -699,6 +718,10 @@ struct erofs_inode *erofs_mkfs_build_tree(struct erofs_inode *dir)
struct dirent *dp;
struct erofs_dentry *d;
+ ret = read_xattr_from_src(dir->i_srcpath, &dir->i_xattrs);
+ if (ret)
+ return ERR_PTR(ret);
+
if (!S_ISDIR(dir->i_mode)) {
if (S_ISLNK(dir->i_mode)) {
char *const symlink = malloc(dir->i_size);
diff --git a/lib/xattr.c b/lib/xattr.c
new file mode 100644
index 0000000..6278abc
--- /dev/null
+++ b/lib/xattr.c
@@ -0,0 +1,319 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * lib/xattr.c
+ *
+ */
+
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/xattr.h>
+#include <linux/xattr.h>
+#include <errno.h>
+#include <string.h>
+
+#include "erofs/defs.h"
+#include "erofs/print.h"
+#include "erofs/list.h"
+#include "erofs/internal.h"
+#include "erofs/hashtable.h"
+#include "err.h"
+#define EROFS_XATTR_HASH_TABLE_BITS 16
+
+struct xattr_item {
+ const char *buf;
+ unsigned int keylen;
+ unsigned int vallen;
+ unsigned int count;
+ u8 index;
+ struct hlist_node node;
+};
+
+struct xattr_list {
+ struct xattr_item *item;
+ struct list_head list;
+};
+
+DECLARE_HASHTABLE(my_hash_table, EROFS_XATTR_HASH_TABLE_BITS);
+
+struct xattr_prefix {
+ const char *prefix;
+ uint16_t prefix_len;
+ u8 index;
+} prefix[] = {
+ {
+ XATTR_USER_PREFIX,
+ XATTR_USER_PREFIX_LEN,
+ EROFS_XATTR_INDEX_USER
+ },
+ {
+ XATTR_SECURITY_PREFIX,
+ XATTR_SYSTEM_PREFIX_LEN,
+ EROFS_XATTR_INDEX_SECURITY
+ },
+ {
+ XATTR_TRUSTED_PREFIX,
+ XATTR_TRUSTED_PREFIX_LEN,
+ EROFS_XATTR_INDEX_TRUSTED
+ },
+ {NULL, 0, 0},
+};
+
+static inline void hxattr_add(struct hlist_node *node, uint16_t key)
+{
+ hash_add(my_hash_table, node, key);
+}
+
+static inline void hxattr_del(struct hlist_node *node)
+{
+ hash_del(node);
+}
+
+static struct xattr_item *hxattr_match(const char *buf, int len, u8 index)
+{
+ struct xattr_item *item;
+ uint16_t mkey = crc16(buf, len);
+
+ hash_for_each_possible(my_hash_table, item, node, mkey) {
+ if (index == item->index &&
+ len == (item->keylen + item->vallen) &&
+ !memcmp(buf, item->buf, len)) {
+ return item;
+ }
+ }
+
+ return ERR_PTR(-ENOENT);
+}
+
+static bool match_index(const char *key, u8 *index, uint16_t *len)
+{
+ struct xattr_prefix *p = prefix;
+
+ while (p->prefix) {
+ if (strncmp(p->prefix, key, p->prefix_len)) {
+ *len = p->prefix_len;
+ *index = p->index;
+ return true;
+ }
+ p++;
+ }
+
+ return false;
+}
+
+static struct xattr_item *new_xattr(const char *buf, u8 index,
+ int keylen, int vallen)
+{
+ struct xattr_item *item = malloc(sizeof(*item));
+
+ if (!item)
+ return ERR_PTR(-ENOMEM);
+
+ memset(item, 0, sizeof(*item));
+ INIT_HLIST_NODE(&item->node);
+ item->buf = buf;
+ item->keylen = keylen;
+ item->vallen = vallen;
+ item->count = 1;
+ item->index = index;
+ if (!item->index)
+ return ERR_PTR(-EPERM);
+
+ return item;
+}
+
+static int xattr_add(struct list_head *hlist, struct xattr_item *item)
+{
+ struct xattr_list *mlist = malloc(sizeof(*mlist));
+
+ if (!mlist)
+ return -ENOMEM;
+
+ init_list_head(&mlist->list);
+ mlist->item = item;
+ list_add(&mlist->list, hlist);
+ return 0;
+}
+
+static struct xattr_item *list_xattr_value(const char *path, const char *key)
+{
+ ssize_t keylen, vallen;
+ char *kxattr;
+ struct xattr_item *item;
+ u8 index;
+ uint16_t prelen, suflen;
+
+ /* Output attribute key.*/
+ erofs_info("path:%s key: [%s] ", path, key);
+
+ keylen = strlen(key);
+ if (!match_index(key, &index, &prelen))
+ return ERR_PTR(-ENODATA);
+
+ BUG_ON(keylen < prelen);
+ /* Determine length of the value.*/
+ vallen = lgetxattr(path, key, NULL, 0);
+ if (vallen == -1)
+ return ERR_PTR(-errno);
+
+ /*
+ * Allocate value buffer.
+ * One extra byte is needed to append 0x00.
+ */
+ suflen = keylen - prelen;
+ kxattr = malloc(suflen + vallen + 1);
+ if (!kxattr)
+ return ERR_PTR(-ENOMEM);
+
+ if (vallen == 0)
+ goto value_0;
+
+ /* Copy value to buffer.*/
+ vallen = lgetxattr(path, key, kxattr + suflen, vallen);
+ if (vallen == -1) {
+ free(kxattr);
+ return ERR_PTR(-errno);
+ }
+
+value_0:
+ memcpy(kxattr, key + prelen, suflen);
+ /* Output attribute value.*/
+ kxattr[suflen + vallen] = '\0';
+ erofs_info("value: [%s]", kxattr + suflen);
+
+ /* kxattr is used at xattr_add(), neednt free if SUCCESS */
+ item = hxattr_match(kxattr, suflen + vallen, index);
+ if (!IS_ERR(item)) {
+ item->count++;
+ free(kxattr);
+ return item;
+ }
+
+ item = new_xattr(kxattr, index, suflen, vallen);
+ if (IS_ERR(item))
+ free(kxattr);
+
+ return item;
+}
+
+int read_xattr_from_src(const char *path, struct list_head *hlist)
+{
+ int ret = 0;
+ char *kbuf, *key;
+ ssize_t buflen = llistxattr(path, NULL, 0);
+
+ if (buflen == -1)
+ return -errno;
+ else if (buflen == 0)
+ return 0;
+
+ /* Allocate the buffer.*/
+ kbuf = malloc(buflen);
+ if (!kbuf)
+ return -errno;
+
+ /* Copy the list of attribute keys to the buffer.*/
+ buflen = llistxattr(path, kbuf, buflen);
+ if (buflen == -1) {
+ ret = -errno;
+ goto exit_err;
+ }
+
+ /*
+ * Loop over the list of zero terminated strings with the
+ * attribute keys. Use the remaining buffer length to determine
+ * the end of the list.
+ */
+ key = kbuf;
+ while (buflen > 0) {
+ size_t keylen = strlen(key) + 1;
+ struct xattr_item *item = list_xattr_value(path, key);
+
+ if (!item) {
+ ret = -errno;
+ goto exit_err;
+ }
+
+ if (!hash_hashed(&item->node)) {
+ uint16_t mkey;
+
+ mkey = crc16(item->buf, item->keylen + item->vallen);
+ hxattr_add(&item->node, mkey);
+ }
+
+ if (hlist) {
+ ret = xattr_add(hlist, item);
+ if (ret < 0)
+ goto exit_err;
+ }
+
+ buflen -= keylen;
+ key += keylen;
+ }
+
+exit_err:
+ free(kbuf);
+ return ret;
+
+}
+
+int xattr_entry_size(struct list_head *hlist)
+{
+ int sum = 0;
+ struct xattr_list *lst;
+
+ if (list_empty(hlist))
+ return 0;
+
+ list_for_each_entry(lst, hlist, list) {
+ struct xattr_item *item = lst->item;
+
+ sum += sizeof(struct erofs_xattr_entry);
+ sum += item->keylen + item->vallen;
+ sum = EROFS_XATTR_ALIGN(sum);
+ }
+
+ sum += sizeof(struct erofs_xattr_ibody_header);
+
+ return EROFS_XATTR_ALIGN(sum);
+}
+
+char *xattr_data(struct list_head *hlist, int xattr_size)
+{
+ struct xattr_list *lst;
+ char *buf, *pbuf;
+ unsigned int size = 0;
+ struct erofs_xattr_ibody_header header = {
+ .h_checksum = 0,
+ .h_shared_count = 0,
+ };
+
+ erofs_info("xattr_size=%d", xattr_size);
+ buf = malloc(xattr_size);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ memset(buf, 0, xattr_size);
+ pbuf = buf + sizeof(struct erofs_xattr_ibody_header);
+
+ list_for_each_entry(lst, hlist, list) {
+ struct erofs_xattr_entry entry;
+ struct xattr_item *item = lst->item;
+
+ entry.e_name_index = item->index;
+ entry.e_name_len = item->keylen;
+ entry.e_value_size = cpu_to_le16(item->vallen);
+
+ BUG_ON(size > xattr_size);
+ memcpy(pbuf + size, &entry, sizeof(entry));
+
+ size += sizeof(struct erofs_xattr_entry);
+ memcpy(pbuf + size, item->buf, item->keylen + item->vallen);
+ size += item->keylen + item->vallen;
+ size = EROFS_XATTR_ALIGN(size);
+ }
+
+ memcpy(buf, &header, sizeof(header));
+
+ return buf;
+}
+
--
2.17.1
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [PATCH] erofs-utils: introduce xattr support
2019-07-26 11:50 [PATCH] erofs-utils: introduce xattr support htyuxe+dhbrei4sq0df8
@ 2019-08-05 14:54 ` Li Guifu
2019-08-05 17:30 ` Gao Xiang
0 siblings, 1 reply; 12+ messages in thread
From: Li Guifu @ 2019-08-05 14:54 UTC (permalink / raw)
Hi
It seems look good, a great step.
A new develop branch is a googd choice to make this more stable
and the hash function would better use a light weight one like
full_name_hash in the kernel
? 2019/7/26 19:50, htyuxe+dhbrei4sq0df8 at grr.la ??:
> load xattrs from source files and pack them into target image.
> ---
> include/erofs/hashtable.h | 502 ++++++++++++++++++++++++++++++++++++++
> include/erofs/internal.h | 2 +-
> include/erofs/xattr.h | 22 ++
> lib/Makefile.am | 3 +-
> lib/inode.c | 23 ++
> lib/xattr.c | 319 ++++++++++++++++++++++++
> 6 files changed, 869 insertions(+), 2 deletions(-)
> create mode 100644 include/erofs/hashtable.h
> create mode 100644 include/erofs/xattr.h
> create mode 100644 lib/xattr.c
>
> diff --git a/include/erofs/hashtable.h b/include/erofs/hashtable.h
> new file mode 100644
> index 0000000..349a655
> --- /dev/null
> +++ b/include/erofs/hashtable.h
> @@ -0,0 +1,502 @@
> +/* SPDX-License-Identifier: GPL-2.0+ */
> +/*
> + * include/erofs/hashtable.h
> + *
> + */
> +
> +#ifndef __EROFS_HASHTABLE_H
> +#define __EROFS_HASHTABLE_H
> +
> +#define BITS_PER_LONG 32
> +#ifndef __always_inline
> +#define __always_inline inline
> +#endif
> +
> +/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
> +#define GOLDEN_RATIO_PRIME_32 0x9e370001UL
> +/* 2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */
> +#define GOLDEN_RATIO_PRIME_64 0x9e37fffffffc0001UL
> +
> +#if BITS_PER_LONG == 32
> +#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_PRIME_32
> +#define hash_long(val, bits) hash_32(val, bits)
> +#elif BITS_PER_LONG == 64
> +#define hash_long(val, bits) hash_64(val, bits)
> +#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_PRIME_64
> +#else
> +#error Wordsize not 32 or 64
> +#endif
> +
> +struct hlist_head {
> + struct hlist_node *first;
> +};
> +
> +struct hlist_node {
> + struct hlist_node *next, **pprev;
> +};
> +
> +/*
> + * Architectures might want to move the poison pointer offset
> + * into some well-recognized area such as 0xdead000000000000,
> + * that is also not mappable by user-space exploits:
> + */
> +#ifdef CONFIG_ILLEGAL_POINTER_VALUE
> +# define POISON_POINTER_DELTA _AC(CONFIG_ILLEGAL_POINTER_VALUE, UL)
> +#else
> +# define POISON_POINTER_DELTA 0
> +#endif
> +
> +/*
> + * These are non-NULL pointers that will result in page faults
> + * under normal circumstances, used to verify that nobody uses
> + * non-initialized list entries.
> + */
> +#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
> +#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
> +
> +#undef offsetof
> +#ifdef __compiler_offsetof
> +#define offsetof(TYPE, MEMBER) __compiler_offsetof(TYPE, MEMBER)
> +#else
> +#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
> +#endif
> +
> +/*
> + * Double linked lists with a single pointer list head.
> + * Mostly useful for hash tables where the two pointer list head is
> + * too wasteful.
> + * You lose the ability to access the tail in O(1).
> + */
> +
> +#define HLIST_HEAD_INIT { .first = NULL }
> +#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL }
> +#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
> +static inline void INIT_HLIST_NODE(struct hlist_node *h)
> +{
> + h->next = NULL;
> + h->pprev = NULL;
> +}
> +
> +static inline int hlist_unhashed(const struct hlist_node *h)
> +{
> + return !h->pprev;
> +}
> +
> +static inline int hlist_empty(const struct hlist_head *h)
> +{
> + return !h->first;
> +}
> +
> +static inline void __hlist_del(struct hlist_node *n)
> +{
> + struct hlist_node *next = n->next;
> + struct hlist_node **pprev = n->pprev;
> +
> + *pprev = next;
> + if (next)
> + next->pprev = pprev;
> +}
> +
> +static inline void hlist_del(struct hlist_node *n)
> +{
> + __hlist_del(n);
> + n->next = LIST_POISON1;
> + n->pprev = LIST_POISON2;
> +}
> +
> +static inline void hlist_del_init(struct hlist_node *n)
> +{
> + if (!hlist_unhashed(n)) {
> + __hlist_del(n);
> + INIT_HLIST_NODE(n);
> + }
> +}
> +
> +static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
> +{
> + struct hlist_node *first = h->first;
> +
> + n->next = first;
> + if (first)
> + first->pprev = &n->next;
> + h->first = n;
> + n->pprev = &h->first;
> +}
> +
> +/* next must be != NULL */
> +static inline void hlist_add_before(struct hlist_node *n,
> + struct hlist_node *next)
> +{
> + n->pprev = next->pprev;
> + n->next = next;
> + next->pprev = &n->next;
> + *(n->pprev) = n;
> +}
> +
> +static inline void hlist_add_behind(struct hlist_node *n,
> + struct hlist_node *prev)
> +{
> + n->next = prev->next;
> + prev->next = n;
> + n->pprev = &prev->next;
> +
> + if (n->next)
> + n->next->pprev = &n->next;
> +}
> +
> +/* after that we'll appear to be on some hlist and hlist_del will work */
> +static inline void hlist_add_fake(struct hlist_node *n)
> +{
> + n->pprev = &n->next;
> +}
> +
> +/*
> + * Move a list from one list head to another. Fixup the pprev
> + * reference of the first entry if it exists.
> + */
> +static inline void hlist_move_list(struct hlist_head *old,
> + struct hlist_head *new)
> +{
> + new->first = old->first;
> + if (new->first)
> + new->first->pprev = &new->first;
> + old->first = NULL;
> +}
> +
> +#define hlist_entry(ptr, type, member) container_of(ptr, type, member)
> +
> +#define hlist_for_each(pos, head) \
> + for (pos = (head)->first; pos; pos = pos->next)
> +
> +#define hlist_for_each_safe(pos, n, head) \
> + for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \
> + pos = n)
> +
> +#define hlist_entry_safe(ptr, type, member) \
> + ({ typeof(ptr) ____ptr = (ptr); \
> + ____ptr ? hlist_entry(____ptr, type, member) : NULL; \
> + })
> +
> +/**
> + * hlist_for_each_entry - iterate over list of given type
> + * @pos:the type * to use as a loop cursor.
> + * @head:the head for your list.
> + * @member:the name of the hlist_node within the struct.
> + */
> +#define hlist_for_each_entry(pos, head, member) \
> + for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member);\
> + pos; \
> + pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
> +
> +/**
> + * hlist_for_each_entry_continue
> + * iterate over a hlist continuing after current point
> + * @pos:the type * to use as a loop cursor.
> + * @member:the name of the hlist_node within the struct.
> + */
> +#define hlist_for_each_entry_continue(pos, member) \
> + for (pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member);\
> + pos; \
> + pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
> +
> +/**
> + * hlist_for_each_entry_from
> + * iterate over a hlist continuing from current point
> + * @pos: the type * to use as a loop cursor.
> + * @member: the name of the hlist_node within the struct.
> + */
> +#define hlist_for_each_entry_from(pos, member) \
> + for (; pos; \
> + pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
> +
> +/**
> + * hlist_for_each_entry_safe
> + * iterate over list of given type safe against removal of list entry
> + * @pos:the type * to use as a loop cursor.
> + * @n:another &struct hlist_node to use as temporary storage
> + * @head:the head for your list.
> + * @member:the name of the hlist_node within the struct.
> + */
> +#define hlist_for_each_entry_safe(pos, n, head, member) \
> + for (pos = hlist_entry_safe((head)->first, typeof(*pos), member);\
> + pos && ({ n = pos->member.next; 1; }); \
> + pos = hlist_entry_safe(n, typeof(*pos), member))
> +
> +static __always_inline u64 hash_64(u64 val, unsigned int bits)
> +{
> + u64 hash = val;
> +
> +#if defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64
> + hash = hash * GOLDEN_RATIO_PRIME_64;
> +#else
> + /* Sigh, gcc can't optimise this alone like it does for 32 bits. */
> + u64 n = hash;
> +
> + n <<= 18;
> + hash -= n;
> + n <<= 33;
> + hash -= n;
> + n <<= 3;
> + hash += n;
> + n <<= 3;
> + hash -= n;
> + n <<= 4;
> + hash += n;
> + n <<= 2;
> + hash += n;
> +#endif
> +
> + /* High bits are more random, so use them. */
> + return hash >> (64 - bits);
> +}
> +
> +static inline u32 hash_32(u32 val, unsigned int bits)
> +{
> + /* On some cpus multiply is faster, on others gcc will do shifts */
> + u32 hash = val * GOLDEN_RATIO_PRIME_32;
> +
> + /* High bits are more random, so use them. */
> + return hash >> (32 - bits);
> +}
> +
> +/**
> + * ilog2 - log of base 2 of 32-bit or a 64-bit unsigned value
> + * @n - parameter
> + *
> + * constant-capable log of base 2 calculation
> + * - this can be used to initialise global variables from constant data, hence
> + * the massive ternary operator construction
> + *
> + * selects the appropriately-sized optimised version depending on sizeof(n)
> + */
> +#define ilog2(n) \
> +( \
> + (n) & (1ULL << 63) ? 63 : \
> + (n) & (1ULL << 62) ? 62 : \
> + (n) & (1ULL << 61) ? 61 : \
> + (n) & (1ULL << 60) ? 60 : \
> + (n) & (1ULL << 59) ? 59 : \
> + (n) & (1ULL << 58) ? 58 : \
> + (n) & (1ULL << 57) ? 57 : \
> + (n) & (1ULL << 56) ? 56 : \
> + (n) & (1ULL << 55) ? 55 : \
> + (n) & (1ULL << 54) ? 54 : \
> + (n) & (1ULL << 53) ? 53 : \
> + (n) & (1ULL << 52) ? 52 : \
> + (n) & (1ULL << 51) ? 51 : \
> + (n) & (1ULL << 50) ? 50 : \
> + (n) & (1ULL << 49) ? 49 : \
> + (n) & (1ULL << 48) ? 48 : \
> + (n) & (1ULL << 47) ? 47 : \
> + (n) & (1ULL << 46) ? 46 : \
> + (n) & (1ULL << 45) ? 45 : \
> + (n) & (1ULL << 44) ? 44 : \
> + (n) & (1ULL << 43) ? 43 : \
> + (n) & (1ULL << 42) ? 42 : \
> + (n) & (1ULL << 41) ? 41 : \
> + (n) & (1ULL << 40) ? 40 : \
> + (n) & (1ULL << 39) ? 39 : \
> + (n) & (1ULL << 38) ? 38 : \
> + (n) & (1ULL << 37) ? 37 : \
> + (n) & (1ULL << 36) ? 36 : \
> + (n) & (1ULL << 35) ? 35 : \
> + (n) & (1ULL << 34) ? 34 : \
> + (n) & (1ULL << 33) ? 33 : \
> + (n) & (1ULL << 32) ? 32 : \
> + (n) & (1ULL << 31) ? 31 : \
> + (n) & (1ULL << 30) ? 30 : \
> + (n) & (1ULL << 29) ? 29 : \
> + (n) & (1ULL << 28) ? 28 : \
> + (n) & (1ULL << 27) ? 27 : \
> + (n) & (1ULL << 26) ? 26 : \
> + (n) & (1ULL << 25) ? 25 : \
> + (n) & (1ULL << 24) ? 24 : \
> + (n) & (1ULL << 23) ? 23 : \
> + (n) & (1ULL << 22) ? 22 : \
> + (n) & (1ULL << 21) ? 21 : \
> + (n) & (1ULL << 20) ? 20 : \
> + (n) & (1ULL << 19) ? 19 : \
> + (n) & (1ULL << 18) ? 18 : \
> + (n) & (1ULL << 17) ? 17 : \
> + (n) & (1ULL << 16) ? 16 : \
> + (n) & (1ULL << 15) ? 15 : \
> + (n) & (1ULL << 14) ? 14 : \
> + (n) & (1ULL << 13) ? 13 : \
> + (n) & (1ULL << 12) ? 12 : \
> + (n) & (1ULL << 11) ? 11 : \
> + (n) & (1ULL << 10) ? 10 : \
> + (n) & (1ULL << 9) ? 9 : \
> + (n) & (1ULL << 8) ? 8 : \
> + (n) & (1ULL << 7) ? 7 : \
> + (n) & (1ULL << 6) ? 6 : \
> + (n) & (1ULL << 5) ? 5 : \
> + (n) & (1ULL << 4) ? 4 : \
> + (n) & (1ULL << 3) ? 3 : \
> + (n) & (1ULL << 2) ? 2 : \
> + (n) & (1ULL << 1) ? 1 : 0 \
> +)
> +
> +static const uint16_t crc16tab[256] = {
> + 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7,
> + 0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef,
> + 0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6,
> + 0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de,
> + 0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485,
> + 0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d,
> + 0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4,
> + 0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc,
> + 0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823,
> + 0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b,
> + 0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12,
> + 0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a,
> + 0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41,
> + 0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49,
> + 0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70,
> + 0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78,
> + 0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f,
> + 0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067,
> + 0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e,
> + 0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256,
> + 0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d,
> + 0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405,
> + 0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c,
> + 0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634,
> + 0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab,
> + 0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3,
> + 0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a,
> + 0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92,
> + 0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9,
> + 0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1,
> + 0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8,
> + 0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0
> +};
> +
> +uint16_t crc16(const char *buf, int len)
> +{
> + int counter;
> + uint16_t crc = 0;
> +
> + for (counter = 0; counter < len; counter++)
> + crc = (crc<<8) ^ crc16tab[((crc>>8) ^ *buf++)&0x00FF];
> + return crc;
> +}
> +
> +#define DEFINE_HASHTABLE(name, bits) \
> + struct hlist_head name[1 << (bits)] = \
> + { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT }
> +
> +#define DECLARE_HASHTABLE(name, bits) \
> + struct hlist_head name[1 << (bits)]
> +
> +#define HASH_SIZE(name) (ARRAY_SIZE(name))
> +#define HASH_BITS(name) ilog2(HASH_SIZE(name))
> +
> +/* Use hash_32 when possible to allow for fast 32bit hashing in 64bit kernels*/
> +#define hash_min(val, bits) \
> + (sizeof(val) <= 4 ? hash_32(val, bits) : hash_long(val, bits))
> +
> +static inline void __hash_init(struct hlist_head *ht, unsigned int sz)
> +{
> + unsigned int i;
> +
> + for (i = 0; i < sz; i++)
> + INIT_HLIST_HEAD(&ht[i]);
> +}
> +
> +/**
> + * hash_init - initialize a hash table
> + * @hashtable: hashtable to be initialized
> + *
> + * Calculates the size of the hashtable from the given parameter, otherwise
> + * same as hash_init_size.
> + *
> + * This has to be a macro since HASH_BITS() will not work on pointers since
> + * it calculates the size during preprocessing.
> + */
> +#define hash_init(hashtable) __hash_init(hashtable, HASH_SIZE(hashtable))
> +
> +/**
> + * hash_add - add an object to a hashtable
> + * @hashtable: hashtable to add to
> + * @node: the &struct hlist_node of the object to be added
> + * @key: the key of the object to be added
> + */
> +#define hash_add(hashtable, node, key) \
> + hlist_add_head(node, &hashtable[hash_min(key, HASH_BITS(hashtable))])
> +
> +/**
> + * hash_hashed - check whether an object is in any hashtable
> + * @node: the &struct hlist_node of the object to be checked
> + */
> +static inline bool hash_hashed(struct hlist_node *node)
> +{
> + return !hlist_unhashed(node);
> +}
> +
> +static inline bool __hash_empty(struct hlist_head *ht, unsigned int sz)
> +{
> + unsigned int i;
> +
> + for (i = 0; i < sz; i++)
> + if (!hlist_empty(&ht[i]))
> + return false;
> +
> + return true;
> +}
> +
> +/**
> + * hash_empty - check whether a hashtable is empty
> + * @hashtable: hashtable to check
> + *
> + * This has to be a macro since HASH_BITS() will not work on pointers since
> + * it calculates the size during preprocessing.
> + */
> +#define hash_empty(hashtable) __hash_empty(hashtable, HASH_SIZE(hashtable))
> +
> +/**
> + * hash_del - remove an object from a hashtable
> + * @node: &struct hlist_node of the object to remove
> + */
> +static inline void hash_del(struct hlist_node *node)
> +{
> + hlist_del_init(node);
> +}
> +
> +/**
> + * hash_for_each - iterate over a hashtable
> + * @name: hashtable to iterate
> + * @bkt: integer to use as bucket loop cursor
> + * @obj: the type * to use as a loop cursor for each entry
> + * @member: the name of the hlist_node within the struct
> + */
> +#define hash_for_each(name, bkt, obj, member) \
> + for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
> + (bkt)++)\
> + hlist_for_each_entry(obj, &name[bkt], member)
> +
> +/**
> + * hash_for_each_safe - iterate over a hashtable safe against removal of
> + * hash entry
> + * @name: hashtable to iterate
> + * @bkt: integer to use as bucket loop cursor
> + * @tmp: a &struct used for temporary storage
> + * @obj: the type * to use as a loop cursor for each entry
> + * @member: the name of the hlist_node within the struct
> + */
> +#define hash_for_each_safe(name, bkt, tmp, obj, member) \
> + for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
> + (bkt)++)\
> + hlist_for_each_entry_safe(obj, tmp, &name[bkt], member)
> +
> +/**
> + * hash_for_each_possible - iterate over all possible objects hashing to the
> + * same bucket
> + * @name: hashtable to iterate
> + * @obj: the type * to use as a loop cursor for each entry
> + * @member: the name of the hlist_node within the struct
> + * @key: the key of the objects to iterate over
> + */
> +#define hash_for_each_possible(name, obj, member, key) \
> + hlist_for_each_entry(obj, &name[hash_min(key, HASH_BITS(name))], member)
> +
> +#endif
> diff --git a/include/erofs/internal.h b/include/erofs/internal.h
> index b7ce6f8..33a72b5 100644
> --- a/include/erofs/internal.h
> +++ b/include/erofs/internal.h
> @@ -59,7 +59,7 @@ struct erofs_sb_info {
> extern struct erofs_sb_info sbi;
>
> struct erofs_inode {
> - struct list_head i_hash, i_subdirs;
> + struct list_head i_hash, i_subdirs, i_xattrs;
>
> unsigned int i_count;
> struct erofs_inode *i_parent;
> diff --git a/include/erofs/xattr.h b/include/erofs/xattr.h
> new file mode 100644
> index 0000000..dff9fd6
> --- /dev/null
> +++ b/include/erofs/xattr.h
> @@ -0,0 +1,22 @@
> +// SPDX-License-Identifier: GPL-2.0+
> +/*
> + * include/erofs/xattr.h
> + *
> + */
> +
> +#ifndef __EROFS_XATTR_H
> +#define __EROFS_XATTR_H
> +
> +#define XATTR_COUNT(_size) ({\
> + u32 __size = le16_to_cpu(_size); \
> + ((__size) == 0) ? 0 : \
> + (_size - sizeof(struct erofs_xattr_ibody_header)) / \
> + sizeof(struct erofs_xattr_entry) + 1; })
> +
> +
> +int cust_xattr(struct list_head *hlist);
> +int read_xattr_from_src(const char *path, struct list_head *hlist);
> +int xattr_entry_size(struct list_head *hlist);
> +char *xattr_data(struct list_head *hlist, int size);
> +
> +#endif
> diff --git a/lib/Makefile.am b/lib/Makefile.am
> index dea82f7..cbe3243 100644
> --- a/lib/Makefile.am
> +++ b/lib/Makefile.am
> @@ -2,7 +2,8 @@
> # Makefile.am
>
> noinst_LTLIBRARIES = liberofs.la
> -liberofs_la_SOURCES = config.c io.c cache.c inode.c compress.c compressor.c
> +liberofs_la_SOURCES = config.c io.c cache.c inode.c compress.c compressor.c \
> + xattr.c
> liberofs_la_CFLAGS = -Wall -Werror -I$(top_srcdir)/include
> if ENABLE_LZ4
> liberofs_la_CFLAGS += ${LZ4_CFLAGS}
> diff --git a/lib/inode.c b/lib/inode.c
> index 8b38270..615f117 100644
> --- a/lib/inode.c
> +++ b/lib/inode.c
> @@ -18,6 +18,7 @@
> #include "erofs/cache.h"
> #include "erofs/io.h"
> #include "erofs/compress.h"
> +#include "erofs/xattr.h"
>
> struct erofs_sb_info sbi;
>
> @@ -364,8 +365,10 @@ static bool erofs_bh_flush_write_inode(struct erofs_buffer_head *bh)
> /* let's support v1 currently */
> struct erofs_inode_v1 v1 = {0};
> int ret;
> + uint16_t count = XATTR_COUNT(inode->xattr_isize);
>
> v1.i_advise = cpu_to_le16(0 | (inode->data_mapping_mode << 1));
> + v1.i_xattr_icount = cpu_to_le16(count);
> v1.i_mode = cpu_to_le16(inode->i_mode);
> v1.i_nlink = cpu_to_le16(inode->i_nlink);
> v1.i_size = cpu_to_le32((u32)inode->i_size);
> @@ -399,6 +402,20 @@ static bool erofs_bh_flush_write_inode(struct erofs_buffer_head *bh)
> return false;
> off += inode->inode_isize;
>
> + if (inode->xattr_isize) {
> + char *pbuf = xattr_data(&inode->i_xattrs, inode->xattr_isize);
> +
> + if (IS_ERR(pbuf))
> + return false;
> +
> + ret = dev_write(pbuf, off, inode->xattr_isize);
> + free(pbuf);
> + if (ret)
> + return false;
> +
> + off += inode->xattr_isize;
> + }
> +
> if (inode->extent_isize) {
> /* write compression metadata */
> off = Z_EROFS_VLE_EXTENT_ALIGN(off);
> @@ -452,6 +469,7 @@ int erofs_prepare_inode_buffer(struct erofs_inode *inode)
>
> DBG_BUGON(inode->bh || inode->bh_inline);
>
> + inode->xattr_isize = xattr_entry_size(&inode->i_xattrs);
> inodesize = inode->inode_isize + inode->xattr_isize +
> inode->extent_isize;
>
> @@ -612,6 +630,7 @@ struct erofs_inode *erofs_new_inode(void)
> inode->i_count = 1;
>
> init_list_head(&inode->i_subdirs);
> + init_list_head(&inode->i_xattrs);
> inode->xattr_isize = 0;
> inode->extent_isize = 0;
>
> @@ -699,6 +718,10 @@ struct erofs_inode *erofs_mkfs_build_tree(struct erofs_inode *dir)
> struct dirent *dp;
> struct erofs_dentry *d;
>
> + ret = read_xattr_from_src(dir->i_srcpath, &dir->i_xattrs);
> + if (ret)
> + return ERR_PTR(ret);
> +
> if (!S_ISDIR(dir->i_mode)) {
> if (S_ISLNK(dir->i_mode)) {
> char *const symlink = malloc(dir->i_size);
> diff --git a/lib/xattr.c b/lib/xattr.c
> new file mode 100644
> index 0000000..6278abc
> --- /dev/null
> +++ b/lib/xattr.c
> @@ -0,0 +1,319 @@
> +// SPDX-License-Identifier: GPL-2.0+
> +/*
> + * lib/xattr.c
> + *
> + */
> +
> +#include <stdlib.h>
> +#include <sys/types.h>
> +#include <sys/xattr.h>
> +#include <linux/xattr.h>
> +#include <errno.h>
> +#include <string.h>
> +
> +#include "erofs/defs.h"
> +#include "erofs/print.h"
> +#include "erofs/list.h"
> +#include "erofs/internal.h"
> +#include "erofs/hashtable.h"
> +#include "err.h"
> +#define EROFS_XATTR_HASH_TABLE_BITS 16
> +
> +struct xattr_item {
> + const char *buf;
> + unsigned int keylen;
> + unsigned int vallen;
> + unsigned int count;
> + u8 index;
> + struct hlist_node node;
> +};
> +
> +struct xattr_list {
> + struct xattr_item *item;
> + struct list_head list;
> +};
> +
> +DECLARE_HASHTABLE(my_hash_table, EROFS_XATTR_HASH_TABLE_BITS);
> +
> +struct xattr_prefix {
> + const char *prefix;
> + uint16_t prefix_len;
> + u8 index;
> +} prefix[] = {
> + {
> + XATTR_USER_PREFIX,
> + XATTR_USER_PREFIX_LEN,
> + EROFS_XATTR_INDEX_USER
> + },
> + {
> + XATTR_SECURITY_PREFIX,
> + XATTR_SYSTEM_PREFIX_LEN,
> + EROFS_XATTR_INDEX_SECURITY
> + },
> + {
> + XATTR_TRUSTED_PREFIX,
> + XATTR_TRUSTED_PREFIX_LEN,
> + EROFS_XATTR_INDEX_TRUSTED
> + },
> + {NULL, 0, 0},
> +};
> +
> +static inline void hxattr_add(struct hlist_node *node, uint16_t key)
> +{
> + hash_add(my_hash_table, node, key);
> +}
> +
> +static inline void hxattr_del(struct hlist_node *node)
> +{
> + hash_del(node);
> +}
> +
> +static struct xattr_item *hxattr_match(const char *buf, int len, u8 index)
> +{
> + struct xattr_item *item;
> + uint16_t mkey = crc16(buf, len);
> +
> + hash_for_each_possible(my_hash_table, item, node, mkey) {
> + if (index == item->index &&
> + len == (item->keylen + item->vallen) &&
> + !memcmp(buf, item->buf, len)) {
> + return item;
> + }
> + }
> +
> + return ERR_PTR(-ENOENT);
> +}
> +
> +static bool match_index(const char *key, u8 *index, uint16_t *len)
> +{
> + struct xattr_prefix *p = prefix;
> +
> + while (p->prefix) {
> + if (strncmp(p->prefix, key, p->prefix_len)) {
> + *len = p->prefix_len;
> + *index = p->index;
> + return true;
> + }
> + p++;
> + }
> +
> + return false;
> +}
> +
> +static struct xattr_item *new_xattr(const char *buf, u8 index,
> + int keylen, int vallen)
> +{
> + struct xattr_item *item = malloc(sizeof(*item));
> +
> + if (!item)
> + return ERR_PTR(-ENOMEM);
> +
> + memset(item, 0, sizeof(*item));
> + INIT_HLIST_NODE(&item->node);
> + item->buf = buf;
> + item->keylen = keylen;
> + item->vallen = vallen;
> + item->count = 1;
> + item->index = index;
> + if (!item->index)
> + return ERR_PTR(-EPERM);
> +
> + return item;
> +}
> +
> +static int xattr_add(struct list_head *hlist, struct xattr_item *item)
> +{
> + struct xattr_list *mlist = malloc(sizeof(*mlist));
> +
> + if (!mlist)
> + return -ENOMEM;
> +
> + init_list_head(&mlist->list);
> + mlist->item = item;
> + list_add(&mlist->list, hlist);
> + return 0;
> +}
> +
> +static struct xattr_item *list_xattr_value(const char *path, const char *key)
> +{
> + ssize_t keylen, vallen;
> + char *kxattr;
> + struct xattr_item *item;
> + u8 index;
> + uint16_t prelen, suflen;
> +
> + /* Output attribute key.*/
> + erofs_info("path:%s key: [%s] ", path, key);
> +
> + keylen = strlen(key);
> + if (!match_index(key, &index, &prelen))
> + return ERR_PTR(-ENODATA);
> +
> + BUG_ON(keylen < prelen);
> + /* Determine length of the value.*/
> + vallen = lgetxattr(path, key, NULL, 0);
> + if (vallen == -1)
> + return ERR_PTR(-errno);
> +
> + /*
> + * Allocate value buffer.
> + * One extra byte is needed to append 0x00.
> + */
> + suflen = keylen - prelen;
> + kxattr = malloc(suflen + vallen + 1);
> + if (!kxattr)
> + return ERR_PTR(-ENOMEM);
> +
> + if (vallen == 0)
> + goto value_0;
> +
> + /* Copy value to buffer.*/
> + vallen = lgetxattr(path, key, kxattr + suflen, vallen);
> + if (vallen == -1) {
> + free(kxattr);
> + return ERR_PTR(-errno);
> + }
> +
> +value_0:
> + memcpy(kxattr, key + prelen, suflen);
> + /* Output attribute value.*/
> + kxattr[suflen + vallen] = '\0';
> + erofs_info("value: [%s]", kxattr + suflen);
> +
> + /* kxattr is used at xattr_add(), neednt free if SUCCESS */
> + item = hxattr_match(kxattr, suflen + vallen, index);
> + if (!IS_ERR(item)) {
> + item->count++;
> + free(kxattr);
> + return item;
> + }
> +
> + item = new_xattr(kxattr, index, suflen, vallen);
> + if (IS_ERR(item))
> + free(kxattr);
> +
> + return item;
> +}
> +
> +int read_xattr_from_src(const char *path, struct list_head *hlist)
> +{
> + int ret = 0;
> + char *kbuf, *key;
> + ssize_t buflen = llistxattr(path, NULL, 0);
> +
> + if (buflen == -1)
> + return -errno;
> + else if (buflen == 0)
> + return 0;
> +
> + /* Allocate the buffer.*/
> + kbuf = malloc(buflen);
> + if (!kbuf)
> + return -errno;
> +
> + /* Copy the list of attribute keys to the buffer.*/
> + buflen = llistxattr(path, kbuf, buflen);
> + if (buflen == -1) {
> + ret = -errno;
> + goto exit_err;
> + }
> +
> + /*
> + * Loop over the list of zero terminated strings with the
> + * attribute keys. Use the remaining buffer length to determine
> + * the end of the list.
> + */
> + key = kbuf;
> + while (buflen > 0) {
> + size_t keylen = strlen(key) + 1;
> + struct xattr_item *item = list_xattr_value(path, key);
> +
> + if (!item) {
> + ret = -errno;
> + goto exit_err;
> + }
> +
> + if (!hash_hashed(&item->node)) {
> + uint16_t mkey;
> +
> + mkey = crc16(item->buf, item->keylen + item->vallen);
> + hxattr_add(&item->node, mkey);
> + }
> +
> + if (hlist) {
> + ret = xattr_add(hlist, item);
> + if (ret < 0)
> + goto exit_err;
> + }
> +
> + buflen -= keylen;
> + key += keylen;
> + }
> +
> +exit_err:
> + free(kbuf);
> + return ret;
> +
> +}
> +
> +int xattr_entry_size(struct list_head *hlist)
> +{
> + int sum = 0;
> + struct xattr_list *lst;
> +
> + if (list_empty(hlist))
> + return 0;
> +
> + list_for_each_entry(lst, hlist, list) {
> + struct xattr_item *item = lst->item;
> +
> + sum += sizeof(struct erofs_xattr_entry);
> + sum += item->keylen + item->vallen;
> + sum = EROFS_XATTR_ALIGN(sum);
> + }
> +
> + sum += sizeof(struct erofs_xattr_ibody_header);
> +
> + return EROFS_XATTR_ALIGN(sum);
> +}
> +
> +char *xattr_data(struct list_head *hlist, int xattr_size)
> +{
> + struct xattr_list *lst;
> + char *buf, *pbuf;
> + unsigned int size = 0;
> + struct erofs_xattr_ibody_header header = {
> + .h_checksum = 0,
> + .h_shared_count = 0,
> + };
> +
> + erofs_info("xattr_size=%d", xattr_size);
> + buf = malloc(xattr_size);
> + if (!buf)
> + return ERR_PTR(-ENOMEM);
> +
> + memset(buf, 0, xattr_size);
> + pbuf = buf + sizeof(struct erofs_xattr_ibody_header);
> +
> + list_for_each_entry(lst, hlist, list) {
> + struct erofs_xattr_entry entry;
> + struct xattr_item *item = lst->item;
> +
> + entry.e_name_index = item->index;
> + entry.e_name_len = item->keylen;
> + entry.e_value_size = cpu_to_le16(item->vallen);
> +
> + BUG_ON(size > xattr_size);
> + memcpy(pbuf + size, &entry, sizeof(entry));
> +
> + size += sizeof(struct erofs_xattr_entry);
> + memcpy(pbuf + size, item->buf, item->keylen + item->vallen);
> + size += item->keylen + item->vallen;
> + size = EROFS_XATTR_ALIGN(size);
> + }
> +
> + memcpy(buf, &header, sizeof(header));
> +
> + return buf;
> +}
> +
>
^ permalink raw reply [flat|nested] 12+ messages in thread
* [PATCH] erofs-utils: introduce xattr support
2019-08-05 14:54 ` Li Guifu
@ 2019-08-05 17:30 ` Gao Xiang
2019-08-11 17:10 ` [PATCH v2] erofs-utils: introduce preliminary " Gao Xiang
0 siblings, 1 reply; 12+ messages in thread
From: Gao Xiang @ 2019-08-05 17:30 UTC (permalink / raw)
Hi Guifu,
On Mon, Aug 05, 2019@10:54:52PM +0800, Li Guifu wrote:
> Hi
> It seems look good, a great step.
> A new develop branch is a googd choice to make this more stable
> and the hash function would better use a light weight one like
> full_name_hash in the kernel
Yes, you are right. it seems a great baby step of xattr.
I will make a new branch to test this of course.
BTW, Guifu, please help review this patch as well, and
don't forget to tag "Reviewed-by:" if it looks good to you.
Thanks,
Gao Xiang
>
> ?? 2019/7/26 19:50, htyuxe+dhbrei4sq0df8 at grr.la ????:
> > load xattrs from source files and pack them into target image.
> > ---
> > include/erofs/hashtable.h | 502 ++++++++++++++++++++++++++++++++++++++
> > include/erofs/internal.h | 2 +-
> > include/erofs/xattr.h | 22 ++
> > lib/Makefile.am | 3 +-
> > lib/inode.c | 23 ++
> > lib/xattr.c | 319 ++++++++++++++++++++++++
> > 6 files changed, 869 insertions(+), 2 deletions(-)
> > create mode 100644 include/erofs/hashtable.h
> > create mode 100644 include/erofs/xattr.h
> > create mode 100644 lib/xattr.c
> >
> > diff --git a/include/erofs/hashtable.h b/include/erofs/hashtable.h
> > new file mode 100644
> > index 0000000..349a655
> > --- /dev/null
> > +++ b/include/erofs/hashtable.h
> > @@ -0,0 +1,502 @@
> > +/* SPDX-License-Identifier: GPL-2.0+ */
> > +/*
> > + * include/erofs/hashtable.h
> > + *
> > + */
> > +
> > +#ifndef __EROFS_HASHTABLE_H
> > +#define __EROFS_HASHTABLE_H
> > +
> > +#define BITS_PER_LONG 32
> > +#ifndef __always_inline
> > +#define __always_inline inline
> > +#endif
> > +
> > +/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
> > +#define GOLDEN_RATIO_PRIME_32 0x9e370001UL
> > +/* 2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */
> > +#define GOLDEN_RATIO_PRIME_64 0x9e37fffffffc0001UL
> > +
> > +#if BITS_PER_LONG == 32
> > +#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_PRIME_32
> > +#define hash_long(val, bits) hash_32(val, bits)
> > +#elif BITS_PER_LONG == 64
> > +#define hash_long(val, bits) hash_64(val, bits)
> > +#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_PRIME_64
> > +#else
> > +#error Wordsize not 32 or 64
> > +#endif
> > +
> > +struct hlist_head {
> > + struct hlist_node *first;
> > +};
> > +
> > +struct hlist_node {
> > + struct hlist_node *next, **pprev;
> > +};
> > +
> > +/*
> > + * Architectures might want to move the poison pointer offset
> > + * into some well-recognized area such as 0xdead000000000000,
> > + * that is also not mappable by user-space exploits:
> > + */
> > +#ifdef CONFIG_ILLEGAL_POINTER_VALUE
> > +# define POISON_POINTER_DELTA _AC(CONFIG_ILLEGAL_POINTER_VALUE, UL)
> > +#else
> > +# define POISON_POINTER_DELTA 0
> > +#endif
> > +
> > +/*
> > + * These are non-NULL pointers that will result in page faults
> > + * under normal circumstances, used to verify that nobody uses
> > + * non-initialized list entries.
> > + */
> > +#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
> > +#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
> > +
> > +#undef offsetof
> > +#ifdef __compiler_offsetof
> > +#define offsetof(TYPE, MEMBER) __compiler_offsetof(TYPE, MEMBER)
> > +#else
> > +#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
> > +#endif
> > +
> > +/*
> > + * Double linked lists with a single pointer list head.
> > + * Mostly useful for hash tables where the two pointer list head is
> > + * too wasteful.
> > + * You lose the ability to access the tail in O(1).
> > + */
> > +
> > +#define HLIST_HEAD_INIT { .first = NULL }
> > +#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL }
> > +#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
> > +static inline void INIT_HLIST_NODE(struct hlist_node *h)
> > +{
> > + h->next = NULL;
> > + h->pprev = NULL;
> > +}
> > +
> > +static inline int hlist_unhashed(const struct hlist_node *h)
> > +{
> > + return !h->pprev;
> > +}
> > +
> > +static inline int hlist_empty(const struct hlist_head *h)
> > +{
> > + return !h->first;
> > +}
> > +
> > +static inline void __hlist_del(struct hlist_node *n)
> > +{
> > + struct hlist_node *next = n->next;
> > + struct hlist_node **pprev = n->pprev;
> > +
> > + *pprev = next;
> > + if (next)
> > + next->pprev = pprev;
> > +}
> > +
> > +static inline void hlist_del(struct hlist_node *n)
> > +{
> > + __hlist_del(n);
> > + n->next = LIST_POISON1;
> > + n->pprev = LIST_POISON2;
> > +}
> > +
> > +static inline void hlist_del_init(struct hlist_node *n)
> > +{
> > + if (!hlist_unhashed(n)) {
> > + __hlist_del(n);
> > + INIT_HLIST_NODE(n);
> > + }
> > +}
> > +
> > +static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
> > +{
> > + struct hlist_node *first = h->first;
> > +
> > + n->next = first;
> > + if (first)
> > + first->pprev = &n->next;
> > + h->first = n;
> > + n->pprev = &h->first;
> > +}
> > +
> > +/* next must be != NULL */
> > +static inline void hlist_add_before(struct hlist_node *n,
> > + struct hlist_node *next)
> > +{
> > + n->pprev = next->pprev;
> > + n->next = next;
> > + next->pprev = &n->next;
> > + *(n->pprev) = n;
> > +}
> > +
> > +static inline void hlist_add_behind(struct hlist_node *n,
> > + struct hlist_node *prev)
> > +{
> > + n->next = prev->next;
> > + prev->next = n;
> > + n->pprev = &prev->next;
> > +
> > + if (n->next)
> > + n->next->pprev = &n->next;
> > +}
> > +
> > +/* after that we'll appear to be on some hlist and hlist_del will work */
> > +static inline void hlist_add_fake(struct hlist_node *n)
> > +{
> > + n->pprev = &n->next;
> > +}
> > +
> > +/*
> > + * Move a list from one list head to another. Fixup the pprev
> > + * reference of the first entry if it exists.
> > + */
> > +static inline void hlist_move_list(struct hlist_head *old,
> > + struct hlist_head *new)
> > +{
> > + new->first = old->first;
> > + if (new->first)
> > + new->first->pprev = &new->first;
> > + old->first = NULL;
> > +}
> > +
> > +#define hlist_entry(ptr, type, member) container_of(ptr, type, member)
> > +
> > +#define hlist_for_each(pos, head) \
> > + for (pos = (head)->first; pos; pos = pos->next)
> > +
> > +#define hlist_for_each_safe(pos, n, head) \
> > + for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \
> > + pos = n)
> > +
> > +#define hlist_entry_safe(ptr, type, member) \
> > + ({ typeof(ptr) ____ptr = (ptr); \
> > + ____ptr ? hlist_entry(____ptr, type, member) : NULL; \
> > + })
> > +
> > +/**
> > + * hlist_for_each_entry - iterate over list of given type
> > + * @pos:the type * to use as a loop cursor.
> > + * @head:the head for your list.
> > + * @member:the name of the hlist_node within the struct.
> > + */
> > +#define hlist_for_each_entry(pos, head, member) \
> > + for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member);\
> > + pos; \
> > + pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
> > +
> > +/**
> > + * hlist_for_each_entry_continue
> > + * iterate over a hlist continuing after current point
> > + * @pos:the type * to use as a loop cursor.
> > + * @member:the name of the hlist_node within the struct.
> > + */
> > +#define hlist_for_each_entry_continue(pos, member) \
> > + for (pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member);\
> > + pos; \
> > + pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
> > +
> > +/**
> > + * hlist_for_each_entry_from
> > + * iterate over a hlist continuing from current point
> > + * @pos: the type * to use as a loop cursor.
> > + * @member: the name of the hlist_node within the struct.
> > + */
> > +#define hlist_for_each_entry_from(pos, member) \
> > + for (; pos; \
> > + pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
> > +
> > +/**
> > + * hlist_for_each_entry_safe
> > + * iterate over list of given type safe against removal of list entry
> > + * @pos:the type * to use as a loop cursor.
> > + * @n:another &struct hlist_node to use as temporary storage
> > + * @head:the head for your list.
> > + * @member:the name of the hlist_node within the struct.
> > + */
> > +#define hlist_for_each_entry_safe(pos, n, head, member) \
> > + for (pos = hlist_entry_safe((head)->first, typeof(*pos), member);\
> > + pos && ({ n = pos->member.next; 1; }); \
> > + pos = hlist_entry_safe(n, typeof(*pos), member))
> > +
> > +static __always_inline u64 hash_64(u64 val, unsigned int bits)
> > +{
> > + u64 hash = val;
> > +
> > +#if defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64
> > + hash = hash * GOLDEN_RATIO_PRIME_64;
> > +#else
> > + /* Sigh, gcc can't optimise this alone like it does for 32 bits. */
> > + u64 n = hash;
> > +
> > + n <<= 18;
> > + hash -= n;
> > + n <<= 33;
> > + hash -= n;
> > + n <<= 3;
> > + hash += n;
> > + n <<= 3;
> > + hash -= n;
> > + n <<= 4;
> > + hash += n;
> > + n <<= 2;
> > + hash += n;
> > +#endif
> > +
> > + /* High bits are more random, so use them. */
> > + return hash >> (64 - bits);
> > +}
> > +
> > +static inline u32 hash_32(u32 val, unsigned int bits)
> > +{
> > + /* On some cpus multiply is faster, on others gcc will do shifts */
> > + u32 hash = val * GOLDEN_RATIO_PRIME_32;
> > +
> > + /* High bits are more random, so use them. */
> > + return hash >> (32 - bits);
> > +}
> > +
> > +/**
> > + * ilog2 - log of base 2 of 32-bit or a 64-bit unsigned value
> > + * @n - parameter
> > + *
> > + * constant-capable log of base 2 calculation
> > + * - this can be used to initialise global variables from constant data, hence
> > + * the massive ternary operator construction
> > + *
> > + * selects the appropriately-sized optimised version depending on sizeof(n)
> > + */
> > +#define ilog2(n) \
> > +( \
> > + (n) & (1ULL << 63) ? 63 : \
> > + (n) & (1ULL << 62) ? 62 : \
> > + (n) & (1ULL << 61) ? 61 : \
> > + (n) & (1ULL << 60) ? 60 : \
> > + (n) & (1ULL << 59) ? 59 : \
> > + (n) & (1ULL << 58) ? 58 : \
> > + (n) & (1ULL << 57) ? 57 : \
> > + (n) & (1ULL << 56) ? 56 : \
> > + (n) & (1ULL << 55) ? 55 : \
> > + (n) & (1ULL << 54) ? 54 : \
> > + (n) & (1ULL << 53) ? 53 : \
> > + (n) & (1ULL << 52) ? 52 : \
> > + (n) & (1ULL << 51) ? 51 : \
> > + (n) & (1ULL << 50) ? 50 : \
> > + (n) & (1ULL << 49) ? 49 : \
> > + (n) & (1ULL << 48) ? 48 : \
> > + (n) & (1ULL << 47) ? 47 : \
> > + (n) & (1ULL << 46) ? 46 : \
> > + (n) & (1ULL << 45) ? 45 : \
> > + (n) & (1ULL << 44) ? 44 : \
> > + (n) & (1ULL << 43) ? 43 : \
> > + (n) & (1ULL << 42) ? 42 : \
> > + (n) & (1ULL << 41) ? 41 : \
> > + (n) & (1ULL << 40) ? 40 : \
> > + (n) & (1ULL << 39) ? 39 : \
> > + (n) & (1ULL << 38) ? 38 : \
> > + (n) & (1ULL << 37) ? 37 : \
> > + (n) & (1ULL << 36) ? 36 : \
> > + (n) & (1ULL << 35) ? 35 : \
> > + (n) & (1ULL << 34) ? 34 : \
> > + (n) & (1ULL << 33) ? 33 : \
> > + (n) & (1ULL << 32) ? 32 : \
> > + (n) & (1ULL << 31) ? 31 : \
> > + (n) & (1ULL << 30) ? 30 : \
> > + (n) & (1ULL << 29) ? 29 : \
> > + (n) & (1ULL << 28) ? 28 : \
> > + (n) & (1ULL << 27) ? 27 : \
> > + (n) & (1ULL << 26) ? 26 : \
> > + (n) & (1ULL << 25) ? 25 : \
> > + (n) & (1ULL << 24) ? 24 : \
> > + (n) & (1ULL << 23) ? 23 : \
> > + (n) & (1ULL << 22) ? 22 : \
> > + (n) & (1ULL << 21) ? 21 : \
> > + (n) & (1ULL << 20) ? 20 : \
> > + (n) & (1ULL << 19) ? 19 : \
> > + (n) & (1ULL << 18) ? 18 : \
> > + (n) & (1ULL << 17) ? 17 : \
> > + (n) & (1ULL << 16) ? 16 : \
> > + (n) & (1ULL << 15) ? 15 : \
> > + (n) & (1ULL << 14) ? 14 : \
> > + (n) & (1ULL << 13) ? 13 : \
> > + (n) & (1ULL << 12) ? 12 : \
> > + (n) & (1ULL << 11) ? 11 : \
> > + (n) & (1ULL << 10) ? 10 : \
> > + (n) & (1ULL << 9) ? 9 : \
> > + (n) & (1ULL << 8) ? 8 : \
> > + (n) & (1ULL << 7) ? 7 : \
> > + (n) & (1ULL << 6) ? 6 : \
> > + (n) & (1ULL << 5) ? 5 : \
> > + (n) & (1ULL << 4) ? 4 : \
> > + (n) & (1ULL << 3) ? 3 : \
> > + (n) & (1ULL << 2) ? 2 : \
> > + (n) & (1ULL << 1) ? 1 : 0 \
> > +)
> > +
> > +static const uint16_t crc16tab[256] = {
> > + 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7,
> > + 0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef,
> > + 0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6,
> > + 0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de,
> > + 0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485,
> > + 0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d,
> > + 0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4,
> > + 0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc,
> > + 0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823,
> > + 0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b,
> > + 0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12,
> > + 0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a,
> > + 0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41,
> > + 0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49,
> > + 0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70,
> > + 0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78,
> > + 0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f,
> > + 0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067,
> > + 0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e,
> > + 0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256,
> > + 0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d,
> > + 0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405,
> > + 0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c,
> > + 0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634,
> > + 0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab,
> > + 0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3,
> > + 0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a,
> > + 0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92,
> > + 0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9,
> > + 0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1,
> > + 0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8,
> > + 0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0
> > +};
> > +
> > +uint16_t crc16(const char *buf, int len)
> > +{
> > + int counter;
> > + uint16_t crc = 0;
> > +
> > + for (counter = 0; counter < len; counter++)
> > + crc = (crc<<8) ^ crc16tab[((crc>>8) ^ *buf++)&0x00FF];
> > + return crc;
> > +}
> > +
> > +#define DEFINE_HASHTABLE(name, bits) \
> > + struct hlist_head name[1 << (bits)] = \
> > + { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT }
> > +
> > +#define DECLARE_HASHTABLE(name, bits) \
> > + struct hlist_head name[1 << (bits)]
> > +
> > +#define HASH_SIZE(name) (ARRAY_SIZE(name))
> > +#define HASH_BITS(name) ilog2(HASH_SIZE(name))
> > +
> > +/* Use hash_32 when possible to allow for fast 32bit hashing in 64bit kernels*/
> > +#define hash_min(val, bits) \
> > + (sizeof(val) <= 4 ? hash_32(val, bits) : hash_long(val, bits))
> > +
> > +static inline void __hash_init(struct hlist_head *ht, unsigned int sz)
> > +{
> > + unsigned int i;
> > +
> > + for (i = 0; i < sz; i++)
> > + INIT_HLIST_HEAD(&ht[i]);
> > +}
> > +
> > +/**
> > + * hash_init - initialize a hash table
> > + * @hashtable: hashtable to be initialized
> > + *
> > + * Calculates the size of the hashtable from the given parameter, otherwise
> > + * same as hash_init_size.
> > + *
> > + * This has to be a macro since HASH_BITS() will not work on pointers since
> > + * it calculates the size during preprocessing.
> > + */
> > +#define hash_init(hashtable) __hash_init(hashtable, HASH_SIZE(hashtable))
> > +
> > +/**
> > + * hash_add - add an object to a hashtable
> > + * @hashtable: hashtable to add to
> > + * @node: the &struct hlist_node of the object to be added
> > + * @key: the key of the object to be added
> > + */
> > +#define hash_add(hashtable, node, key) \
> > + hlist_add_head(node, &hashtable[hash_min(key, HASH_BITS(hashtable))])
> > +
> > +/**
> > + * hash_hashed - check whether an object is in any hashtable
> > + * @node: the &struct hlist_node of the object to be checked
> > + */
> > +static inline bool hash_hashed(struct hlist_node *node)
> > +{
> > + return !hlist_unhashed(node);
> > +}
> > +
> > +static inline bool __hash_empty(struct hlist_head *ht, unsigned int sz)
> > +{
> > + unsigned int i;
> > +
> > + for (i = 0; i < sz; i++)
> > + if (!hlist_empty(&ht[i]))
> > + return false;
> > +
> > + return true;
> > +}
> > +
> > +/**
> > + * hash_empty - check whether a hashtable is empty
> > + * @hashtable: hashtable to check
> > + *
> > + * This has to be a macro since HASH_BITS() will not work on pointers since
> > + * it calculates the size during preprocessing.
> > + */
> > +#define hash_empty(hashtable) __hash_empty(hashtable, HASH_SIZE(hashtable))
> > +
> > +/**
> > + * hash_del - remove an object from a hashtable
> > + * @node: &struct hlist_node of the object to remove
> > + */
> > +static inline void hash_del(struct hlist_node *node)
> > +{
> > + hlist_del_init(node);
> > +}
> > +
> > +/**
> > + * hash_for_each - iterate over a hashtable
> > + * @name: hashtable to iterate
> > + * @bkt: integer to use as bucket loop cursor
> > + * @obj: the type * to use as a loop cursor for each entry
> > + * @member: the name of the hlist_node within the struct
> > + */
> > +#define hash_for_each(name, bkt, obj, member) \
> > + for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
> > + (bkt)++)\
> > + hlist_for_each_entry(obj, &name[bkt], member)
> > +
> > +/**
> > + * hash_for_each_safe - iterate over a hashtable safe against removal of
> > + * hash entry
> > + * @name: hashtable to iterate
> > + * @bkt: integer to use as bucket loop cursor
> > + * @tmp: a &struct used for temporary storage
> > + * @obj: the type * to use as a loop cursor for each entry
> > + * @member: the name of the hlist_node within the struct
> > + */
> > +#define hash_for_each_safe(name, bkt, tmp, obj, member) \
> > + for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
> > + (bkt)++)\
> > + hlist_for_each_entry_safe(obj, tmp, &name[bkt], member)
> > +
> > +/**
> > + * hash_for_each_possible - iterate over all possible objects hashing to the
> > + * same bucket
> > + * @name: hashtable to iterate
> > + * @obj: the type * to use as a loop cursor for each entry
> > + * @member: the name of the hlist_node within the struct
> > + * @key: the key of the objects to iterate over
> > + */
> > +#define hash_for_each_possible(name, obj, member, key) \
> > + hlist_for_each_entry(obj, &name[hash_min(key, HASH_BITS(name))], member)
> > +
> > +#endif
> > diff --git a/include/erofs/internal.h b/include/erofs/internal.h
> > index b7ce6f8..33a72b5 100644
> > --- a/include/erofs/internal.h
> > +++ b/include/erofs/internal.h
> > @@ -59,7 +59,7 @@ struct erofs_sb_info {
> > extern struct erofs_sb_info sbi;
> > struct erofs_inode {
> > - struct list_head i_hash, i_subdirs;
> > + struct list_head i_hash, i_subdirs, i_xattrs;
> > unsigned int i_count;
> > struct erofs_inode *i_parent;
> > diff --git a/include/erofs/xattr.h b/include/erofs/xattr.h
> > new file mode 100644
> > index 0000000..dff9fd6
> > --- /dev/null
> > +++ b/include/erofs/xattr.h
> > @@ -0,0 +1,22 @@
> > +// SPDX-License-Identifier: GPL-2.0+
> > +/*
> > + * include/erofs/xattr.h
> > + *
> > + */
> > +
> > +#ifndef __EROFS_XATTR_H
> > +#define __EROFS_XATTR_H
> > +
> > +#define XATTR_COUNT(_size) ({\
> > + u32 __size = le16_to_cpu(_size); \
> > + ((__size) == 0) ? 0 : \
> > + (_size - sizeof(struct erofs_xattr_ibody_header)) / \
> > + sizeof(struct erofs_xattr_entry) + 1; })
> > +
> > +
> > +int cust_xattr(struct list_head *hlist);
> > +int read_xattr_from_src(const char *path, struct list_head *hlist);
> > +int xattr_entry_size(struct list_head *hlist);
> > +char *xattr_data(struct list_head *hlist, int size);
> > +
> > +#endif
> > diff --git a/lib/Makefile.am b/lib/Makefile.am
> > index dea82f7..cbe3243 100644
> > --- a/lib/Makefile.am
> > +++ b/lib/Makefile.am
> > @@ -2,7 +2,8 @@
> > # Makefile.am
> > noinst_LTLIBRARIES = liberofs.la
> > -liberofs_la_SOURCES = config.c io.c cache.c inode.c compress.c compressor.c
> > +liberofs_la_SOURCES = config.c io.c cache.c inode.c compress.c compressor.c \
> > + xattr.c
> > liberofs_la_CFLAGS = -Wall -Werror -I$(top_srcdir)/include
> > if ENABLE_LZ4
> > liberofs_la_CFLAGS += ${LZ4_CFLAGS}
> > diff --git a/lib/inode.c b/lib/inode.c
> > index 8b38270..615f117 100644
> > --- a/lib/inode.c
> > +++ b/lib/inode.c
> > @@ -18,6 +18,7 @@
> > #include "erofs/cache.h"
> > #include "erofs/io.h"
> > #include "erofs/compress.h"
> > +#include "erofs/xattr.h"
> > struct erofs_sb_info sbi;
> > @@ -364,8 +365,10 @@ static bool erofs_bh_flush_write_inode(struct erofs_buffer_head *bh)
> > /* let's support v1 currently */
> > struct erofs_inode_v1 v1 = {0};
> > int ret;
> > + uint16_t count = XATTR_COUNT(inode->xattr_isize);
> > v1.i_advise = cpu_to_le16(0 | (inode->data_mapping_mode << 1));
> > + v1.i_xattr_icount = cpu_to_le16(count);
> > v1.i_mode = cpu_to_le16(inode->i_mode);
> > v1.i_nlink = cpu_to_le16(inode->i_nlink);
> > v1.i_size = cpu_to_le32((u32)inode->i_size);
> > @@ -399,6 +402,20 @@ static bool erofs_bh_flush_write_inode(struct erofs_buffer_head *bh)
> > return false;
> > off += inode->inode_isize;
> > + if (inode->xattr_isize) {
> > + char *pbuf = xattr_data(&inode->i_xattrs, inode->xattr_isize);
> > +
> > + if (IS_ERR(pbuf))
> > + return false;
> > +
> > + ret = dev_write(pbuf, off, inode->xattr_isize);
> > + free(pbuf);
> > + if (ret)
> > + return false;
> > +
> > + off += inode->xattr_isize;
> > + }
> > +
> > if (inode->extent_isize) {
> > /* write compression metadata */
> > off = Z_EROFS_VLE_EXTENT_ALIGN(off);
> > @@ -452,6 +469,7 @@ int erofs_prepare_inode_buffer(struct erofs_inode *inode)
> > DBG_BUGON(inode->bh || inode->bh_inline);
> > + inode->xattr_isize = xattr_entry_size(&inode->i_xattrs);
> > inodesize = inode->inode_isize + inode->xattr_isize +
> > inode->extent_isize;
> > @@ -612,6 +630,7 @@ struct erofs_inode *erofs_new_inode(void)
> > inode->i_count = 1;
> > init_list_head(&inode->i_subdirs);
> > + init_list_head(&inode->i_xattrs);
> > inode->xattr_isize = 0;
> > inode->extent_isize = 0;
> > @@ -699,6 +718,10 @@ struct erofs_inode *erofs_mkfs_build_tree(struct erofs_inode *dir)
> > struct dirent *dp;
> > struct erofs_dentry *d;
> > + ret = read_xattr_from_src(dir->i_srcpath, &dir->i_xattrs);
> > + if (ret)
> > + return ERR_PTR(ret);
> > +
> > if (!S_ISDIR(dir->i_mode)) {
> > if (S_ISLNK(dir->i_mode)) {
> > char *const symlink = malloc(dir->i_size);
> > diff --git a/lib/xattr.c b/lib/xattr.c
> > new file mode 100644
> > index 0000000..6278abc
> > --- /dev/null
> > +++ b/lib/xattr.c
> > @@ -0,0 +1,319 @@
> > +// SPDX-License-Identifier: GPL-2.0+
> > +/*
> > + * lib/xattr.c
> > + *
> > + */
> > +
> > +#include <stdlib.h>
> > +#include <sys/types.h>
> > +#include <sys/xattr.h>
> > +#include <linux/xattr.h>
> > +#include <errno.h>
> > +#include <string.h>
> > +
> > +#include "erofs/defs.h"
> > +#include "erofs/print.h"
> > +#include "erofs/list.h"
> > +#include "erofs/internal.h"
> > +#include "erofs/hashtable.h"
> > +#include "err.h"
> > +#define EROFS_XATTR_HASH_TABLE_BITS 16
> > +
> > +struct xattr_item {
> > + const char *buf;
> > + unsigned int keylen;
> > + unsigned int vallen;
> > + unsigned int count;
> > + u8 index;
> > + struct hlist_node node;
> > +};
> > +
> > +struct xattr_list {
> > + struct xattr_item *item;
> > + struct list_head list;
> > +};
> > +
> > +DECLARE_HASHTABLE(my_hash_table, EROFS_XATTR_HASH_TABLE_BITS);
> > +
> > +struct xattr_prefix {
> > + const char *prefix;
> > + uint16_t prefix_len;
> > + u8 index;
> > +} prefix[] = {
> > + {
> > + XATTR_USER_PREFIX,
> > + XATTR_USER_PREFIX_LEN,
> > + EROFS_XATTR_INDEX_USER
> > + },
> > + {
> > + XATTR_SECURITY_PREFIX,
> > + XATTR_SYSTEM_PREFIX_LEN,
> > + EROFS_XATTR_INDEX_SECURITY
> > + },
> > + {
> > + XATTR_TRUSTED_PREFIX,
> > + XATTR_TRUSTED_PREFIX_LEN,
> > + EROFS_XATTR_INDEX_TRUSTED
> > + },
> > + {NULL, 0, 0},
> > +};
> > +
> > +static inline void hxattr_add(struct hlist_node *node, uint16_t key)
> > +{
> > + hash_add(my_hash_table, node, key);
> > +}
> > +
> > +static inline void hxattr_del(struct hlist_node *node)
> > +{
> > + hash_del(node);
> > +}
> > +
> > +static struct xattr_item *hxattr_match(const char *buf, int len, u8 index)
> > +{
> > + struct xattr_item *item;
> > + uint16_t mkey = crc16(buf, len);
> > +
> > + hash_for_each_possible(my_hash_table, item, node, mkey) {
> > + if (index == item->index &&
> > + len == (item->keylen + item->vallen) &&
> > + !memcmp(buf, item->buf, len)) {
> > + return item;
> > + }
> > + }
> > +
> > + return ERR_PTR(-ENOENT);
> > +}
> > +
> > +static bool match_index(const char *key, u8 *index, uint16_t *len)
> > +{
> > + struct xattr_prefix *p = prefix;
> > +
> > + while (p->prefix) {
> > + if (strncmp(p->prefix, key, p->prefix_len)) {
> > + *len = p->prefix_len;
> > + *index = p->index;
> > + return true;
> > + }
> > + p++;
> > + }
> > +
> > + return false;
> > +}
> > +
> > +static struct xattr_item *new_xattr(const char *buf, u8 index,
> > + int keylen, int vallen)
> > +{
> > + struct xattr_item *item = malloc(sizeof(*item));
> > +
> > + if (!item)
> > + return ERR_PTR(-ENOMEM);
> > +
> > + memset(item, 0, sizeof(*item));
> > + INIT_HLIST_NODE(&item->node);
> > + item->buf = buf;
> > + item->keylen = keylen;
> > + item->vallen = vallen;
> > + item->count = 1;
> > + item->index = index;
> > + if (!item->index)
> > + return ERR_PTR(-EPERM);
> > +
> > + return item;
> > +}
> > +
> > +static int xattr_add(struct list_head *hlist, struct xattr_item *item)
> > +{
> > + struct xattr_list *mlist = malloc(sizeof(*mlist));
> > +
> > + if (!mlist)
> > + return -ENOMEM;
> > +
> > + init_list_head(&mlist->list);
> > + mlist->item = item;
> > + list_add(&mlist->list, hlist);
> > + return 0;
> > +}
> > +
> > +static struct xattr_item *list_xattr_value(const char *path, const char *key)
> > +{
> > + ssize_t keylen, vallen;
> > + char *kxattr;
> > + struct xattr_item *item;
> > + u8 index;
> > + uint16_t prelen, suflen;
> > +
> > + /* Output attribute key.*/
> > + erofs_info("path:%s key: [%s] ", path, key);
> > +
> > + keylen = strlen(key);
> > + if (!match_index(key, &index, &prelen))
> > + return ERR_PTR(-ENODATA);
> > +
> > + BUG_ON(keylen < prelen);
> > + /* Determine length of the value.*/
> > + vallen = lgetxattr(path, key, NULL, 0);
> > + if (vallen == -1)
> > + return ERR_PTR(-errno);
> > +
> > + /*
> > + * Allocate value buffer.
> > + * One extra byte is needed to append 0x00.
> > + */
> > + suflen = keylen - prelen;
> > + kxattr = malloc(suflen + vallen + 1);
> > + if (!kxattr)
> > + return ERR_PTR(-ENOMEM);
> > +
> > + if (vallen == 0)
> > + goto value_0;
> > +
> > + /* Copy value to buffer.*/
> > + vallen = lgetxattr(path, key, kxattr + suflen, vallen);
> > + if (vallen == -1) {
> > + free(kxattr);
> > + return ERR_PTR(-errno);
> > + }
> > +
> > +value_0:
> > + memcpy(kxattr, key + prelen, suflen);
> > + /* Output attribute value.*/
> > + kxattr[suflen + vallen] = '\0';
> > + erofs_info("value: [%s]", kxattr + suflen);
> > +
> > + /* kxattr is used at xattr_add(), neednt free if SUCCESS */
> > + item = hxattr_match(kxattr, suflen + vallen, index);
> > + if (!IS_ERR(item)) {
> > + item->count++;
> > + free(kxattr);
> > + return item;
> > + }
> > +
> > + item = new_xattr(kxattr, index, suflen, vallen);
> > + if (IS_ERR(item))
> > + free(kxattr);
> > +
> > + return item;
> > +}
> > +
> > +int read_xattr_from_src(const char *path, struct list_head *hlist)
> > +{
> > + int ret = 0;
> > + char *kbuf, *key;
> > + ssize_t buflen = llistxattr(path, NULL, 0);
> > +
> > + if (buflen == -1)
> > + return -errno;
> > + else if (buflen == 0)
> > + return 0;
> > +
> > + /* Allocate the buffer.*/
> > + kbuf = malloc(buflen);
> > + if (!kbuf)
> > + return -errno;
> > +
> > + /* Copy the list of attribute keys to the buffer.*/
> > + buflen = llistxattr(path, kbuf, buflen);
> > + if (buflen == -1) {
> > + ret = -errno;
> > + goto exit_err;
> > + }
> > +
> > + /*
> > + * Loop over the list of zero terminated strings with the
> > + * attribute keys. Use the remaining buffer length to determine
> > + * the end of the list.
> > + */
> > + key = kbuf;
> > + while (buflen > 0) {
> > + size_t keylen = strlen(key) + 1;
> > + struct xattr_item *item = list_xattr_value(path, key);
> > +
> > + if (!item) {
> > + ret = -errno;
> > + goto exit_err;
> > + }
> > +
> > + if (!hash_hashed(&item->node)) {
> > + uint16_t mkey;
> > +
> > + mkey = crc16(item->buf, item->keylen + item->vallen);
> > + hxattr_add(&item->node, mkey);
> > + }
> > +
> > + if (hlist) {
> > + ret = xattr_add(hlist, item);
> > + if (ret < 0)
> > + goto exit_err;
> > + }
> > +
> > + buflen -= keylen;
> > + key += keylen;
> > + }
> > +
> > +exit_err:
> > + free(kbuf);
> > + return ret;
> > +
> > +}
> > +
> > +int xattr_entry_size(struct list_head *hlist)
> > +{
> > + int sum = 0;
> > + struct xattr_list *lst;
> > +
> > + if (list_empty(hlist))
> > + return 0;
> > +
> > + list_for_each_entry(lst, hlist, list) {
> > + struct xattr_item *item = lst->item;
> > +
> > + sum += sizeof(struct erofs_xattr_entry);
> > + sum += item->keylen + item->vallen;
> > + sum = EROFS_XATTR_ALIGN(sum);
> > + }
> > +
> > + sum += sizeof(struct erofs_xattr_ibody_header);
> > +
> > + return EROFS_XATTR_ALIGN(sum);
> > +}
> > +
> > +char *xattr_data(struct list_head *hlist, int xattr_size)
> > +{
> > + struct xattr_list *lst;
> > + char *buf, *pbuf;
> > + unsigned int size = 0;
> > + struct erofs_xattr_ibody_header header = {
> > + .h_checksum = 0,
> > + .h_shared_count = 0,
> > + };
> > +
> > + erofs_info("xattr_size=%d", xattr_size);
> > + buf = malloc(xattr_size);
> > + if (!buf)
> > + return ERR_PTR(-ENOMEM);
> > +
> > + memset(buf, 0, xattr_size);
> > + pbuf = buf + sizeof(struct erofs_xattr_ibody_header);
> > +
> > + list_for_each_entry(lst, hlist, list) {
> > + struct erofs_xattr_entry entry;
> > + struct xattr_item *item = lst->item;
> > +
> > + entry.e_name_index = item->index;
> > + entry.e_name_len = item->keylen;
> > + entry.e_value_size = cpu_to_le16(item->vallen);
> > +
> > + BUG_ON(size > xattr_size);
> > + memcpy(pbuf + size, &entry, sizeof(entry));
> > +
> > + size += sizeof(struct erofs_xattr_entry);
> > + memcpy(pbuf + size, item->buf, item->keylen + item->vallen);
> > + size += item->keylen + item->vallen;
> > + size = EROFS_XATTR_ALIGN(size);
> > + }
> > +
> > + memcpy(buf, &header, sizeof(header));
> > +
> > + return buf;
> > +}
> > +
> >
^ permalink raw reply [flat|nested] 12+ messages in thread
* [PATCH v2] erofs-utils: introduce preliminary xattr support
2019-08-05 17:30 ` Gao Xiang
@ 2019-08-11 17:10 ` Gao Xiang
2019-10-05 14:20 ` [PATCH v3 1/2] erofs-utils: introduce inline " Gao Xiang via Linux-erofs
0 siblings, 1 reply; 12+ messages in thread
From: Gao Xiang @ 2019-08-11 17:10 UTC (permalink / raw)
From: "htyuxe+dhbrei4sq0df8@grr.la" <htyuxe+dhbrei4sq0df8@grr.la>
Load xattrs from source files and pack them into target image.
Signed-off-by: htyuxe+dhbrei4sq0df8 at grr.la <htyuxe+dhbrei4sq0df8 at grr.la>
Signed-off-by: Li Guifu <blucerlee at gmail.com>
Signed-off-by: Gao Xiang <xiang at kernel.org>
---
This is a modified version of xattr implementation from anonymous person.
Guifu, please also take some time to test the functionality of this patch,
therefore I can merge this to dev branch ASAP.
configure.ac | 1 +
include/erofs/defs.h | 6 +
include/erofs/hashtable.h | 451 ++++++++++++++++++++++++++++++++++++++
include/erofs/internal.h | 2 +-
include/erofs/xattr.h | 44 ++++
lib/Makefile.am | 3 +-
lib/inode.c | 23 ++
lib/xattr.c | 289 ++++++++++++++++++++++++
8 files changed, 817 insertions(+), 2 deletions(-)
create mode 100644 include/erofs/hashtable.h
create mode 100644 include/erofs/xattr.h
create mode 100644 lib/xattr.c
diff --git a/configure.ac b/configure.ac
index fcdf30a..9174711 100644
--- a/configure.ac
+++ b/configure.ac
@@ -75,6 +75,7 @@ AC_CHECK_HEADERS(m4_flatten([
linux/falloc.h
linux/fs.h
linux/types.h
+ linux/xattr.h
limits.h
stddef.h
stdint.h
diff --git a/include/erofs/defs.h b/include/erofs/defs.h
index 0d9910c..b325d01 100644
--- a/include/erofs/defs.h
+++ b/include/erofs/defs.h
@@ -147,6 +147,12 @@ typedef int64_t s64;
#define BITS_PER_BYTE 8
#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
+#ifdef __SIZEOF_LONG__
+#define BITS_PER_LONG (__CHAR_BIT__ * __SIZEOF_LONG__)
+#else
+#define BITS_PER_LONG __WORDSIZE
+#endif
+
#define BUG_ON(cond) assert(!(cond))
#ifdef NDEBUG
diff --git a/include/erofs/hashtable.h b/include/erofs/hashtable.h
new file mode 100644
index 0000000..231ce99
--- /dev/null
+++ b/include/erofs/hashtable.h
@@ -0,0 +1,451 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * erofs_utils/include/erofs/hashtable.h
+ * (code originally taken from include/linux/hash{,table}.h)
+ */
+#ifndef __EROFS_HASHTABLE_H
+#define __EROFS_HASHTABLE_H
+
+#include "defs.h"
+
+/*
+ * The "GOLDEN_RATIO_PRIME" is used in ifs/btrfs/brtfs_inode.h and
+ * fs/inode.c. It's not actually prime any more (the previous primes
+ * were actively bad for hashing), but the name remains.
+ */
+#if BITS_PER_LONG == 32
+#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_32
+#define hash_long(val, bits) hash_32(val, bits)
+#elif BITS_PER_LONG == 64
+#define hash_long(val, bits) hash_64(val, bits)
+#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_64
+#else
+#error Wordsize not 32 or 64
+#endif
+
+/*
+ * This hash multiplies the input by a large odd number and takes the
+ * high bits. Since multiplication propagates changes to the most
+ * significant end only, it is essential that the high bits of the
+ * product be used for the hash value.
+ *
+ * Chuck Lever verified the effectiveness of this technique:
+ * http://www.citi.umich.edu/techreports/reports/citi-tr-00-1.pdf
+ *
+ * Although a random odd number will do, it turns out that the golden
+ * ratio phi = (sqrt(5)-1)/2, or its negative, has particularly nice
+ * properties. (See Knuth vol 3, section 6.4, exercise 9.)
+ *
+ * These are the negative, (1 - phi) = phi**2 = (3 - sqrt(5))/2,
+ * which is very slightly easier to multiply by and makes no
+ * difference to the hash distribution.
+ */
+#define GOLDEN_RATIO_32 0x61C88647
+#define GOLDEN_RATIO_64 0x61C8864680B583EBull
+
+struct hlist_head {
+ struct hlist_node *first;
+};
+
+struct hlist_node {
+ struct hlist_node *next, **pprev;
+};
+
+/*
+ * Architectures might want to move the poison pointer offset
+ * into some well-recognized area such as 0xdead000000000000,
+ * that is also not mappable by user-space exploits:
+ */
+#ifdef CONFIG_ILLEGAL_POINTER_VALUE
+# define POISON_POINTER_DELTA _AC(CONFIG_ILLEGAL_POINTER_VALUE, UL)
+#else
+# define POISON_POINTER_DELTA 0
+#endif
+
+/*
+ * These are non-NULL pointers that will result in page faults
+ * under normal circumstances, used to verify that nobody uses
+ * non-initialized list entries.
+ */
+#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
+#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
+
+/*
+ * Double linked lists with a single pointer list head.
+ * Mostly useful for hash tables where the two pointer list head is
+ * too wasteful.
+ * You lose the ability to access the tail in O(1).
+ */
+
+#define HLIST_HEAD_INIT { .first = NULL }
+#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL }
+#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
+static inline void INIT_HLIST_NODE(struct hlist_node *h)
+{
+ h->next = NULL;
+ h->pprev = NULL;
+}
+
+static inline int hlist_unhashed(const struct hlist_node *h)
+{
+ return !h->pprev;
+}
+
+static inline int hlist_empty(const struct hlist_head *h)
+{
+ return !h->first;
+}
+
+static inline void __hlist_del(struct hlist_node *n)
+{
+ struct hlist_node *next = n->next;
+ struct hlist_node **pprev = n->pprev;
+
+ *pprev = next;
+ if (next)
+ next->pprev = pprev;
+}
+
+static inline void hlist_del(struct hlist_node *n)
+{
+ __hlist_del(n);
+ n->next = LIST_POISON1;
+ n->pprev = LIST_POISON2;
+}
+
+static inline void hlist_del_init(struct hlist_node *n)
+{
+ if (!hlist_unhashed(n)) {
+ __hlist_del(n);
+ INIT_HLIST_NODE(n);
+ }
+}
+
+static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
+{
+ struct hlist_node *first = h->first;
+
+ n->next = first;
+ if (first)
+ first->pprev = &n->next;
+ h->first = n;
+ n->pprev = &h->first;
+}
+
+/* next must be != NULL */
+static inline void hlist_add_before(struct hlist_node *n,
+ struct hlist_node *next)
+{
+ n->pprev = next->pprev;
+ n->next = next;
+ next->pprev = &n->next;
+ *(n->pprev) = n;
+}
+
+static inline void hlist_add_behind(struct hlist_node *n,
+ struct hlist_node *prev)
+{
+ n->next = prev->next;
+ prev->next = n;
+ n->pprev = &prev->next;
+
+ if (n->next)
+ n->next->pprev = &n->next;
+}
+
+/* after that we'll appear to be on some hlist and hlist_del will work */
+static inline void hlist_add_fake(struct hlist_node *n)
+{
+ n->pprev = &n->next;
+}
+
+/*
+ * Move a list from one list head to another. Fixup the pprev
+ * reference of the first entry if it exists.
+ */
+static inline void hlist_move_list(struct hlist_head *old,
+ struct hlist_head *new)
+{
+ new->first = old->first;
+ if (new->first)
+ new->first->pprev = &new->first;
+ old->first = NULL;
+}
+
+#define hlist_entry(ptr, type, member) container_of(ptr, type, member)
+
+#define hlist_for_each(pos, head) \
+ for (pos = (head)->first; pos; pos = pos->next)
+
+#define hlist_for_each_safe(pos, n, head) \
+ for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \
+ pos = n)
+
+#define hlist_entry_safe(ptr, type, member) \
+ ({ typeof(ptr) ____ptr = (ptr); \
+ ____ptr ? hlist_entry(____ptr, type, member) : NULL; \
+ })
+
+/**
+ * hlist_for_each_entry - iterate over list of given type
+ * @pos:the type * to use as a loop cursor.
+ * @head:the head for your list.
+ * @member:the name of the hlist_node within the struct.
+ */
+#define hlist_for_each_entry(pos, head, member) \
+ for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member);\
+ pos; \
+ pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
+
+/**
+ * hlist_for_each_entry_continue
+ * iterate over a hlist continuing after current point
+ * @pos:the type * to use as a loop cursor.
+ * @member:the name of the hlist_node within the struct.
+ */
+#define hlist_for_each_entry_continue(pos, member) \
+ for (pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member);\
+ pos; \
+ pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
+
+/**
+ * hlist_for_each_entry_from
+ * iterate over a hlist continuing from current point
+ * @pos: the type * to use as a loop cursor.
+ * @member: the name of the hlist_node within the struct.
+ */
+#define hlist_for_each_entry_from(pos, member) \
+ for (; pos; \
+ pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
+
+/**
+ * hlist_for_each_entry_safe
+ * iterate over list of given type safe against removal of list entry
+ * @pos:the type * to use as a loop cursor.
+ * @n:another &struct hlist_node to use as temporary storage
+ * @head:the head for your list.
+ * @member:the name of the hlist_node within the struct.
+ */
+#define hlist_for_each_entry_safe(pos, n, head, member) \
+ for (pos = hlist_entry_safe((head)->first, typeof(*pos), member);\
+ pos && ({ n = pos->member.next; 1; }); \
+ pos = hlist_entry_safe(n, typeof(*pos), member))
+
+static inline u32 __hash_32(u32 val)
+{
+ return val * GOLDEN_RATIO_32;
+}
+
+static inline u32 hash_32(u32 val, unsigned int bits)
+{
+ /* High bits are more random, so use them. */
+ return __hash_32(val) >> (32 - bits);
+}
+
+static __always_inline u32 hash_64(u64 val, unsigned int bits)
+{
+#if BITS_PER_LONG == 64
+ /* 64x64-bit multiply is efficient on all 64-bit processors */
+ return val * GOLDEN_RATIO_64 >> (64 - bits);
+#else
+ /* Hash 64 bits using only 32x32-bit multiply. */
+ return hash_32((u32)val ^ __hash_32(val >> 32), bits);
+#endif
+}
+
+/**
+ * ilog2 - log of base 2 of 32-bit or a 64-bit unsigned value
+ * @n - parameter
+ *
+ * constant-capable log of base 2 calculation
+ * - this can be used to initialise global variables from constant data, hence
+ * the massive ternary operator construction
+ *
+ * selects the appropriately-sized optimised version depending on sizeof(n)
+ */
+#define ilog2(n) \
+( \
+ (n) & (1ULL << 63) ? 63 : \
+ (n) & (1ULL << 62) ? 62 : \
+ (n) & (1ULL << 61) ? 61 : \
+ (n) & (1ULL << 60) ? 60 : \
+ (n) & (1ULL << 59) ? 59 : \
+ (n) & (1ULL << 58) ? 58 : \
+ (n) & (1ULL << 57) ? 57 : \
+ (n) & (1ULL << 56) ? 56 : \
+ (n) & (1ULL << 55) ? 55 : \
+ (n) & (1ULL << 54) ? 54 : \
+ (n) & (1ULL << 53) ? 53 : \
+ (n) & (1ULL << 52) ? 52 : \
+ (n) & (1ULL << 51) ? 51 : \
+ (n) & (1ULL << 50) ? 50 : \
+ (n) & (1ULL << 49) ? 49 : \
+ (n) & (1ULL << 48) ? 48 : \
+ (n) & (1ULL << 47) ? 47 : \
+ (n) & (1ULL << 46) ? 46 : \
+ (n) & (1ULL << 45) ? 45 : \
+ (n) & (1ULL << 44) ? 44 : \
+ (n) & (1ULL << 43) ? 43 : \
+ (n) & (1ULL << 42) ? 42 : \
+ (n) & (1ULL << 41) ? 41 : \
+ (n) & (1ULL << 40) ? 40 : \
+ (n) & (1ULL << 39) ? 39 : \
+ (n) & (1ULL << 38) ? 38 : \
+ (n) & (1ULL << 37) ? 37 : \
+ (n) & (1ULL << 36) ? 36 : \
+ (n) & (1ULL << 35) ? 35 : \
+ (n) & (1ULL << 34) ? 34 : \
+ (n) & (1ULL << 33) ? 33 : \
+ (n) & (1ULL << 32) ? 32 : \
+ (n) & (1ULL << 31) ? 31 : \
+ (n) & (1ULL << 30) ? 30 : \
+ (n) & (1ULL << 29) ? 29 : \
+ (n) & (1ULL << 28) ? 28 : \
+ (n) & (1ULL << 27) ? 27 : \
+ (n) & (1ULL << 26) ? 26 : \
+ (n) & (1ULL << 25) ? 25 : \
+ (n) & (1ULL << 24) ? 24 : \
+ (n) & (1ULL << 23) ? 23 : \
+ (n) & (1ULL << 22) ? 22 : \
+ (n) & (1ULL << 21) ? 21 : \
+ (n) & (1ULL << 20) ? 20 : \
+ (n) & (1ULL << 19) ? 19 : \
+ (n) & (1ULL << 18) ? 18 : \
+ (n) & (1ULL << 17) ? 17 : \
+ (n) & (1ULL << 16) ? 16 : \
+ (n) & (1ULL << 15) ? 15 : \
+ (n) & (1ULL << 14) ? 14 : \
+ (n) & (1ULL << 13) ? 13 : \
+ (n) & (1ULL << 12) ? 12 : \
+ (n) & (1ULL << 11) ? 11 : \
+ (n) & (1ULL << 10) ? 10 : \
+ (n) & (1ULL << 9) ? 9 : \
+ (n) & (1ULL << 8) ? 8 : \
+ (n) & (1ULL << 7) ? 7 : \
+ (n) & (1ULL << 6) ? 6 : \
+ (n) & (1ULL << 5) ? 5 : \
+ (n) & (1ULL << 4) ? 4 : \
+ (n) & (1ULL << 3) ? 3 : \
+ (n) & (1ULL << 2) ? 2 : \
+ (n) & (1ULL << 1) ? 1 : 0 \
+)
+
+#define DEFINE_HASHTABLE(name, bits) \
+ struct hlist_head name[1 << (bits)] = \
+ { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT }
+
+#define DECLARE_HASHTABLE(name, bits) \
+ struct hlist_head name[1 << (bits)]
+
+#define HASH_SIZE(name) (ARRAY_SIZE(name))
+#define HASH_BITS(name) ilog2(HASH_SIZE(name))
+
+/* Use hash_32 when possible to allow for fast 32bit hashing in 64bit kernels*/
+#define hash_min(val, bits) \
+ (sizeof(val) <= 4 ? hash_32(val, bits) : hash_long(val, bits))
+
+static inline void __hash_init(struct hlist_head *ht, unsigned int sz)
+{
+ unsigned int i;
+
+ for (i = 0; i < sz; i++)
+ INIT_HLIST_HEAD(&ht[i]);
+}
+
+/**
+ * hash_init - initialize a hash table
+ * @hashtable: hashtable to be initialized
+ *
+ * Calculates the size of the hashtable from the given parameter, otherwise
+ * same as hash_init_size.
+ *
+ * This has to be a macro since HASH_BITS() will not work on pointers since
+ * it calculates the size during preprocessing.
+ */
+#define hash_init(hashtable) __hash_init(hashtable, HASH_SIZE(hashtable))
+
+/**
+ * hash_add - add an object to a hashtable
+ * @hashtable: hashtable to add to
+ * @node: the &struct hlist_node of the object to be added
+ * @key: the key of the object to be added
+ */
+#define hash_add(hashtable, node, key) \
+ hlist_add_head(node, &hashtable[hash_min(key, HASH_BITS(hashtable))])
+
+/**
+ * hash_hashed - check whether an object is in any hashtable
+ * @node: the &struct hlist_node of the object to be checked
+ */
+static inline bool hash_hashed(struct hlist_node *node)
+{
+ return !hlist_unhashed(node);
+}
+
+static inline bool __hash_empty(struct hlist_head *ht, unsigned int sz)
+{
+ unsigned int i;
+
+ for (i = 0; i < sz; i++)
+ if (!hlist_empty(&ht[i]))
+ return false;
+
+ return true;
+}
+
+/**
+ * hash_empty - check whether a hashtable is empty
+ * @hashtable: hashtable to check
+ *
+ * This has to be a macro since HASH_BITS() will not work on pointers since
+ * it calculates the size during preprocessing.
+ */
+#define hash_empty(hashtable) __hash_empty(hashtable, HASH_SIZE(hashtable))
+
+/**
+ * hash_del - remove an object from a hashtable
+ * @node: &struct hlist_node of the object to remove
+ */
+static inline void hash_del(struct hlist_node *node)
+{
+ hlist_del_init(node);
+}
+
+/**
+ * hash_for_each - iterate over a hashtable
+ * @name: hashtable to iterate
+ * @bkt: integer to use as bucket loop cursor
+ * @obj: the type * to use as a loop cursor for each entry
+ * @member: the name of the hlist_node within the struct
+ */
+#define hash_for_each(name, bkt, obj, member) \
+ for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
+ (bkt)++)\
+ hlist_for_each_entry(obj, &name[bkt], member)
+
+/**
+ * hash_for_each_safe - iterate over a hashtable safe against removal of
+ * hash entry
+ * @name: hashtable to iterate
+ * @bkt: integer to use as bucket loop cursor
+ * @tmp: a &struct used for temporary storage
+ * @obj: the type * to use as a loop cursor for each entry
+ * @member: the name of the hlist_node within the struct
+ */
+#define hash_for_each_safe(name, bkt, tmp, obj, member) \
+ for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
+ (bkt)++)\
+ hlist_for_each_entry_safe(obj, tmp, &name[bkt], member)
+
+/**
+ * hash_for_each_possible - iterate over all possible objects hashing to the
+ * same bucket
+ * @name: hashtable to iterate
+ * @obj: the type * to use as a loop cursor for each entry
+ * @member: the name of the hlist_node within the struct
+ * @key: the key of the objects to iterate over
+ */
+#define hash_for_each_possible(name, obj, member, key) \
+ hlist_for_each_entry(obj, &name[hash_min(key, HASH_BITS(name))], member)
+
+#endif
diff --git a/include/erofs/internal.h b/include/erofs/internal.h
index b7ce6f8..33a72b5 100644
--- a/include/erofs/internal.h
+++ b/include/erofs/internal.h
@@ -59,7 +59,7 @@ struct erofs_sb_info {
extern struct erofs_sb_info sbi;
struct erofs_inode {
- struct list_head i_hash, i_subdirs;
+ struct list_head i_hash, i_subdirs, i_xattrs;
unsigned int i_count;
struct erofs_inode *i_parent;
diff --git a/include/erofs/xattr.h b/include/erofs/xattr.h
new file mode 100644
index 0000000..5fd6a59
--- /dev/null
+++ b/include/erofs/xattr.h
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * erofs_utils/include/erofs/xattr.h
+ */
+#ifndef __EROFS_XATTR_H
+#define __EROFS_XATTR_H
+
+#include "internal.h"
+
+#define EROFS_INODE_XATTR_ICOUNT(_size) ({\
+ u32 __size = le16_to_cpu(_size); \
+ ((__size) == 0) ? 0 : \
+ (_size - sizeof(struct erofs_xattr_ibody_header)) / \
+ sizeof(struct erofs_xattr_entry) + 1; })
+
+#ifndef XATTR_USER_PREFIX
+#define XATTR_USER_PREFIX "user."
+#endif
+#ifndef XATTR_USER_PREFIX_LEN
+#define XATTR_USER_PREFIX_LEN (sizeof(XATTR_USER_PREFIX) - 1)
+#endif
+#ifndef XATTR_SECURITY_PREFIX
+#define XATTR_SECURITY_PREFIX "security."
+#endif
+#ifndef XATTR_SECURITY_PREFIX_LEN
+#define XATTR_SECURITY_PREFIX_LEN (sizeof(XATTR_SECURITY_PREFIX) - 1)
+#endif
+#ifndef XATTR_TRUSTED_PREFIX
+#define XATTR_TRUSTED_PREFIX "trusted."
+#endif
+#ifndef XATTR_TRUSTED_PREFIX_LEN
+#define XATTR_TRUSTED_PREFIX_LEN (sizeof(XATTR_TRUSTED_PREFIX) - 1)
+#endif
+#ifndef XATTR_NAME_POSIX_ACL_ACCESS
+#define XATTR_NAME_POSIX_ACL_ACCESS "system.posix_acl_access"
+#endif
+#ifndef XATTR_NAME_POSIX_ACL_DEFAULT
+#define XATTR_NAME_POSIX_ACL_DEFAULT "system.posix_acl_default"
+#endif
+
+int erofs_prepare_xattr_ibody(const char *path, struct list_head *ixattrs);
+char *erofs_export_xattr_ibody(struct list_head *ixattrs, unsigned int size);
+
+#endif
diff --git a/lib/Makefile.am b/lib/Makefile.am
index dea82f7..1ff81f9 100644
--- a/lib/Makefile.am
+++ b/lib/Makefile.am
@@ -2,7 +2,8 @@
# Makefile.am
noinst_LTLIBRARIES = liberofs.la
-liberofs_la_SOURCES = config.c io.c cache.c inode.c compress.c compressor.c
+liberofs_la_SOURCES = config.c io.c cache.c inode.c xattr.c \
+ compress.c compressor.c
liberofs_la_CFLAGS = -Wall -Werror -I$(top_srcdir)/include
if ENABLE_LZ4
liberofs_la_CFLAGS += ${LZ4_CFLAGS}
diff --git a/lib/inode.c b/lib/inode.c
index 581f263..6521a28 100644
--- a/lib/inode.c
+++ b/lib/inode.c
@@ -18,6 +18,7 @@
#include "erofs/cache.h"
#include "erofs/io.h"
#include "erofs/compress.h"
+#include "erofs/xattr.h"
struct erofs_sb_info sbi;
@@ -363,9 +364,11 @@ static bool erofs_bh_flush_write_inode(struct erofs_buffer_head *bh)
/* let's support v1 currently */
struct erofs_inode_v1 v1 = {0};
+ const u16 icount = EROFS_INODE_XATTR_ICOUNT(inode->xattr_isize);
int ret;
v1.i_advise = cpu_to_le16(0 | (inode->data_mapping_mode << 1));
+ v1.i_xattr_icount = cpu_to_le16(icount);
v1.i_mode = cpu_to_le16(inode->i_mode);
v1.i_nlink = cpu_to_le16(inode->i_nlink);
v1.i_size = cpu_to_le32((u32)inode->i_size);
@@ -398,6 +401,20 @@ static bool erofs_bh_flush_write_inode(struct erofs_buffer_head *bh)
return false;
off += inode->inode_isize;
+ if (inode->xattr_isize) {
+ char *xattrs = erofs_export_xattr_ibody(&inode->i_xattrs,
+ inode->xattr_isize);
+ if (IS_ERR(xattrs))
+ return false;
+
+ ret = dev_write(xattrs, off, inode->xattr_isize);
+ free(xattrs);
+ if (ret)
+ return false;
+
+ off += inode->xattr_isize;
+ }
+
if (inode->extent_isize) {
/* write compression metadata */
off = Z_EROFS_VLE_EXTENT_ALIGN(off);
@@ -612,6 +629,7 @@ struct erofs_inode *erofs_new_inode(void)
inode->i_count = 1;
init_list_head(&inode->i_subdirs);
+ init_list_head(&inode->i_xattrs);
inode->xattr_isize = 0;
inode->extent_isize = 0;
@@ -699,6 +717,11 @@ struct erofs_inode *erofs_mkfs_build_tree(struct erofs_inode *dir)
struct dirent *dp;
struct erofs_dentry *d;
+ ret = erofs_prepare_xattr_ibody(dir->i_srcpath, &dir->i_xattrs);
+ if (ret < 0)
+ return ERR_PTR(ret);
+ dir->xattr_isize = ret;
+
if (!S_ISDIR(dir->i_mode)) {
if (S_ISLNK(dir->i_mode)) {
char *const symlink = malloc(dir->i_size);
diff --git a/lib/xattr.c b/lib/xattr.c
new file mode 100644
index 0000000..1ab959b
--- /dev/null
+++ b/lib/xattr.c
@@ -0,0 +1,289 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * erofs_utils/lib/xattr.c
+ */
+#include <stdlib.h>
+#include <sys/xattr.h>
+#ifdef HAVE_LINUX_XATTR_H
+#include <linux/xattr.h>
+#endif
+#include "erofs/print.h"
+#include "erofs/hashtable.h"
+#include "erofs/xattr.h"
+
+#define EA_HASHTABLE_BITS 16
+
+struct xattr_item {
+ const char *kvbuf;
+ unsigned int hash[2], len[2], count;
+ u8 prefix;
+ struct hlist_node node;
+};
+
+struct inode_xattr_node {
+ struct list_head list;
+ struct xattr_item *item;
+};
+
+static DECLARE_HASHTABLE(ea_hashtable, EA_HASHTABLE_BITS);
+
+static struct xattr_prefix {
+ const char *prefix;
+ u16 prefix_len;
+} prefix[] = {
+ [EROFS_XATTR_INDEX_USER] = {
+ XATTR_USER_PREFIX,
+ XATTR_USER_PREFIX_LEN
+ }, [EROFS_XATTR_INDEX_POSIX_ACL_ACCESS] = {
+ XATTR_NAME_POSIX_ACL_ACCESS,
+ sizeof(XATTR_NAME_POSIX_ACL_ACCESS) - 1
+ }, [EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT] = {
+ XATTR_NAME_POSIX_ACL_DEFAULT,
+ sizeof(XATTR_NAME_POSIX_ACL_DEFAULT) - 1
+ }, [EROFS_XATTR_INDEX_TRUSTED] = {
+ XATTR_TRUSTED_PREFIX,
+ XATTR_TRUSTED_PREFIX_LEN
+ }, [EROFS_XATTR_INDEX_SECURITY] = {
+ XATTR_SECURITY_PREFIX,
+ XATTR_SECURITY_PREFIX_LEN
+ }
+};
+
+static unsigned int BKDRHash(char *str, unsigned int len)
+{
+ const unsigned int seed = 131313;
+ unsigned int hash = 0;
+
+ while (len) {
+ hash = hash * seed + (*str++);
+ --len;
+ }
+ return hash;
+}
+
+static unsigned int xattr_item_hash(u8 prefix, char *buf,
+ unsigned int len[2], unsigned int hash[2])
+{
+ hash[0] = BKDRHash(buf, len[0]); /* key */
+ hash[1] = BKDRHash(buf + len[0], len[1]); /* value */
+
+ return prefix ^ hash[0] ^ hash[1];
+}
+
+static struct xattr_item *xattr_item_get(u8 prefix, char *kvbuf,
+ unsigned int len[2])
+{
+ struct xattr_item *item;
+ unsigned int hash[2], hkey;
+
+ hkey = xattr_item_hash(prefix, kvbuf, len, hash);
+
+ hash_for_each_possible(ea_hashtable, item, node, hkey) {
+ if (prefix == item->prefix &&
+ item->len[0] == len[0] && item->len[1] == len[1] &&
+ item->hash[0] == hash[0] && item->hash[1] == hash[1] &&
+ !memcmp(kvbuf, item->kvbuf, len[0] + len[1])) {
+ free(kvbuf);
+ ++item->count;
+ return item;
+ }
+ }
+
+ item = malloc(sizeof(*item));
+ if (!item) {
+ free(kvbuf);
+ return ERR_PTR(-ENOMEM);
+ }
+ INIT_HLIST_NODE(&item->node);
+ item->count = 1;
+ item->kvbuf = kvbuf;
+ item->len[0] = len[0];
+ item->len[1] = len[1];
+ item->hash[0] = hash[0];
+ item->hash[1] = hash[1];
+ item->prefix = prefix;
+ hash_add(ea_hashtable, &item->node, hkey);
+ return item;
+}
+
+static bool match_prefix(const char *key, u8 *index, u16 *len)
+{
+ struct xattr_prefix *p;
+
+ for (p = prefix; p < prefix + ARRAY_SIZE(prefix); ++p) {
+ if (p->prefix && !strncmp(p->prefix, key, p->prefix_len)) {
+ *len = p->prefix_len;
+ *index = p - prefix;
+ return true;
+ }
+ }
+ return false;
+}
+
+static struct xattr_item *parse_one_xattr(const char *path, const char *key,
+ unsigned int keylen)
+{
+ ssize_t ret;
+ u8 prefix;
+ u16 prefixlen;
+ unsigned int len[2];
+ char *kvbuf;
+
+ erofs_dbg("parse xattr [%s] of %s", path, key);
+
+ if (!match_prefix(key, &prefix, &prefixlen))
+ return ERR_PTR(-ENODATA);
+
+ DBG_BUGON(keylen < prefixlen);
+
+ /* determine length of the value */
+ ret = lgetxattr(path, key, NULL, 0);
+ if (ret < 0)
+ return ERR_PTR(-errno);
+ len[1] = ret;
+
+ /* allocate key-value buffer */
+ len[0] = keylen - prefixlen;
+
+ kvbuf = malloc(len[0] + len[1]);
+ if (!kvbuf)
+ return ERR_PTR(-ENOMEM);
+
+ memcpy(kvbuf, key + prefixlen, len[0]);
+ if (len[1]) {
+ /* copy value to buffer */
+ ret = lgetxattr(path, key, kvbuf + len[0], len[1]);
+ if (ret < 0) {
+ free(kvbuf);
+ return ERR_PTR(-errno);
+ }
+ if (len[1] != ret) {
+ erofs_err("xattr value changed just now (%u-> %ld)",
+ len[1], ret);
+ len[1] = ret;
+ }
+ }
+ return xattr_item_get(prefix, kvbuf, len);
+}
+
+static int inode_xattr_add(struct list_head *hlist, struct xattr_item *item)
+{
+ struct inode_xattr_node *node = malloc(sizeof(*node));
+
+ if (!node)
+ return -ENOMEM;
+ init_list_head(&node->list);
+ node->item = item;
+ list_add(&node->list, hlist);
+ return 0;
+}
+
+static int read_xattrs_from_file(const char *path, struct list_head *ixattrs)
+{
+ int ret = 0;
+ char *keylst, *key;
+ ssize_t kllen = llistxattr(path, NULL, 0);
+
+ if (kllen < 0 && errno != ENODATA)
+ return -errno;
+ if (kllen <= 1)
+ return 0;
+
+ keylst = malloc(kllen);
+ if (!keylst)
+ return -errno;
+
+ /* copy the list of attribute keys to the buffer.*/
+ kllen = llistxattr(path, keylst, kllen);
+ if (kllen < 0) {
+ ret = -errno;
+ goto err;
+ }
+
+ /*
+ * loop over the list of zero terminated strings with the
+ * attribute keys. Use the remaining buffer length to determine
+ * the end of the list.
+ */
+ key = keylst;
+ while (kllen > 0) {
+ unsigned int keylen = strlen(key);
+ struct xattr_item *item = parse_one_xattr(path, key, keylen);
+
+ if (IS_ERR(item)) {
+ ret = PTR_ERR(item);
+ goto err;
+ }
+
+ if (ixattrs) {
+ ret = inode_xattr_add(ixattrs, item);
+ if (ret < 0)
+ goto err;
+ }
+ kllen -= keylen + 1;
+ key += keylen + 1;
+ }
+err:
+ free(keylst);
+ return ret;
+
+}
+
+int erofs_prepare_xattr_ibody(const char *path, struct list_head *ixattrs)
+{
+ int ret;
+ struct inode_xattr_node *node;
+
+ ret = read_xattrs_from_file(path, ixattrs);
+ if (ret < 0)
+ return ret;
+
+ if (list_empty(ixattrs))
+ return 0;
+
+ /* get xattr ibody size */
+ ret = sizeof(struct erofs_xattr_ibody_header);
+ list_for_each_entry(node, ixattrs, list) {
+ const struct xattr_item *item = node->item;
+
+ ret += sizeof(struct erofs_xattr_entry);
+ ret = EROFS_XATTR_ALIGN(ret + item->len[0] + item->len[1]);
+ }
+ return ret;
+}
+
+char *erofs_export_xattr_ibody(struct list_head *ixattrs, unsigned int size)
+{
+ char *buf;
+ struct inode_xattr_node *node;
+ struct erofs_xattr_ibody_header *header;
+ unsigned int p;
+
+ buf = calloc(1, size);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ header = (struct erofs_xattr_ibody_header *)buf;
+ header->h_shared_count = 0;
+
+ p = sizeof(struct erofs_xattr_ibody_header);
+ list_for_each_entry(node, ixattrs, list) {
+ struct xattr_item *const item = node->item;
+ const struct erofs_xattr_entry entry = {
+ .e_name_index = item->prefix,
+ .e_name_len = item->len[0],
+ .e_value_size = cpu_to_le16(item->len[1])
+ };
+
+ memcpy(buf + p, &entry, sizeof(entry));
+ p += sizeof(struct erofs_xattr_entry);
+ memcpy(buf + p, item->kvbuf, item->len[0] + item->len[1]);
+ p = EROFS_XATTR_ALIGN(p + item->len[0] + item->len[1]);
+
+ list_del(&node->list);
+ free(node);
+ }
+ DBG_BUGON(p > size);
+ return buf;
+}
+
--
2.17.1
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [PATCH v3 1/2] erofs-utils: introduce inline xattr support
2019-08-11 17:10 ` [PATCH v2] erofs-utils: introduce preliminary " Gao Xiang
@ 2019-10-05 14:20 ` Gao Xiang via Linux-erofs
2019-10-05 14:20 ` [PATCH 2/2] erofs-utils: introduce shared " Gao Xiang via Linux-erofs
2019-10-05 16:44 ` [PATCH v3 1/2] erofs-utils: introduce inline " Li Guifu
0 siblings, 2 replies; 12+ messages in thread
From: Gao Xiang via Linux-erofs @ 2019-10-05 14:20 UTC (permalink / raw)
To: Li Guifu, Chao Yu, linux-erofs; +Cc: htyuxe+dhbrei4sq0df8
From: "htyuxe+dhbrei4sq0df8@grr.la" <htyuxe+dhbrei4sq0df8@grr.la>
Load xattrs from source files and pack them into target image.
Signed-off-by: htyuxe+dhbrei4sq0df8@grr.la <htyuxe+dhbrei4sq0df8@grr.la>
Signed-off-by: Li Guifu <blucerlee@gmail.com>
Signed-off-by: Gao Xiang <hsiangkao@aol.com>
---
This is a cleanup xattr implementation for new erofs-utils
compared with old internal commerical mess, which will be
included in erofs-utils v1.0 eventually.
v3:
- adapt shared xattr patch;
- fix alignment after xattr applied.
configure.ac | 1 +
include/erofs/defs.h | 6 +
include/erofs/hashtable.h | 462 ++++++++++++++++++++++++++++++++++++++
include/erofs/internal.h | 2 +-
include/erofs/xattr.h | 48 ++++
lib/Makefile.am | 3 +-
lib/inode.c | 31 ++-
lib/xattr.c | 296 ++++++++++++++++++++++++
8 files changed, 845 insertions(+), 4 deletions(-)
create mode 100644 include/erofs/hashtable.h
create mode 100644 include/erofs/xattr.h
create mode 100644 lib/xattr.c
diff --git a/configure.ac b/configure.ac
index 07e034e..8c31cb7 100644
--- a/configure.ac
+++ b/configure.ac
@@ -75,6 +75,7 @@ AC_CHECK_HEADERS(m4_flatten([
linux/falloc.h
linux/fs.h
linux/types.h
+ linux/xattr.h
limits.h
stddef.h
stdint.h
diff --git a/include/erofs/defs.h b/include/erofs/defs.h
index 15db4e3..c4cdb2d 100644
--- a/include/erofs/defs.h
+++ b/include/erofs/defs.h
@@ -151,6 +151,12 @@ typedef int64_t s64;
#define BITS_PER_BYTE 8
#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
+#ifdef __SIZEOF_LONG__
+#define BITS_PER_LONG (__CHAR_BIT__ * __SIZEOF_LONG__)
+#else
+#define BITS_PER_LONG __WORDSIZE
+#endif
+
#define BUG_ON(cond) assert(!(cond))
#ifdef NDEBUG
diff --git a/include/erofs/hashtable.h b/include/erofs/hashtable.h
new file mode 100644
index 0000000..ab57b56
--- /dev/null
+++ b/include/erofs/hashtable.h
@@ -0,0 +1,462 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * erofs_utils/include/erofs/hashtable.h
+ *
+ * Original code taken from 'linux/include/linux/hash{,table}.h'
+ */
+#ifndef __EROFS_HASHTABLE_H
+#define __EROFS_HASHTABLE_H
+
+/*
+ * Fast hashing routine for ints, longs and pointers.
+ * (C) 2002 Nadia Yvette Chambers, IBM
+ */
+
+/*
+ * Statically sized hash table implementation
+ * (C) 2012 Sasha Levin <levinsasha928@gmail.com>
+ */
+
+#include "defs.h"
+
+/*
+ * The "GOLDEN_RATIO_PRIME" is used in ifs/btrfs/brtfs_inode.h and
+ * fs/inode.c. It's not actually prime any more (the previous primes
+ * were actively bad for hashing), but the name remains.
+ */
+#if BITS_PER_LONG == 32
+#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_32
+#define hash_long(val, bits) hash_32(val, bits)
+#elif BITS_PER_LONG == 64
+#define hash_long(val, bits) hash_64(val, bits)
+#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_64
+#else
+#error Wordsize not 32 or 64
+#endif
+
+/*
+ * This hash multiplies the input by a large odd number and takes the
+ * high bits. Since multiplication propagates changes to the most
+ * significant end only, it is essential that the high bits of the
+ * product be used for the hash value.
+ *
+ * Chuck Lever verified the effectiveness of this technique:
+ * http://www.citi.umich.edu/techreports/reports/citi-tr-00-1.pdf
+ *
+ * Although a random odd number will do, it turns out that the golden
+ * ratio phi = (sqrt(5)-1)/2, or its negative, has particularly nice
+ * properties. (See Knuth vol 3, section 6.4, exercise 9.)
+ *
+ * These are the negative, (1 - phi) = phi**2 = (3 - sqrt(5))/2,
+ * which is very slightly easier to multiply by and makes no
+ * difference to the hash distribution.
+ */
+#define GOLDEN_RATIO_32 0x61C88647
+#define GOLDEN_RATIO_64 0x61C8864680B583EBull
+
+struct hlist_head {
+ struct hlist_node *first;
+};
+
+struct hlist_node {
+ struct hlist_node *next, **pprev;
+};
+
+/*
+ * Architectures might want to move the poison pointer offset
+ * into some well-recognized area such as 0xdead000000000000,
+ * that is also not mappable by user-space exploits:
+ */
+#ifdef CONFIG_ILLEGAL_POINTER_VALUE
+# define POISON_POINTER_DELTA _AC(CONFIG_ILLEGAL_POINTER_VALUE, UL)
+#else
+# define POISON_POINTER_DELTA 0
+#endif
+
+/*
+ * These are non-NULL pointers that will result in page faults
+ * under normal circumstances, used to verify that nobody uses
+ * non-initialized list entries.
+ */
+#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
+#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
+
+/*
+ * Double linked lists with a single pointer list head.
+ * Mostly useful for hash tables where the two pointer list head is
+ * too wasteful.
+ * You lose the ability to access the tail in O(1).
+ */
+
+#define HLIST_HEAD_INIT { .first = NULL }
+#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL }
+#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
+static inline void INIT_HLIST_NODE(struct hlist_node *h)
+{
+ h->next = NULL;
+ h->pprev = NULL;
+}
+
+static inline int hlist_unhashed(const struct hlist_node *h)
+{
+ return !h->pprev;
+}
+
+static inline int hlist_empty(const struct hlist_head *h)
+{
+ return !h->first;
+}
+
+static inline void __hlist_del(struct hlist_node *n)
+{
+ struct hlist_node *next = n->next;
+ struct hlist_node **pprev = n->pprev;
+
+ *pprev = next;
+ if (next)
+ next->pprev = pprev;
+}
+
+static inline void hlist_del(struct hlist_node *n)
+{
+ __hlist_del(n);
+ n->next = LIST_POISON1;
+ n->pprev = LIST_POISON2;
+}
+
+static inline void hlist_del_init(struct hlist_node *n)
+{
+ if (!hlist_unhashed(n)) {
+ __hlist_del(n);
+ INIT_HLIST_NODE(n);
+ }
+}
+
+static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
+{
+ struct hlist_node *first = h->first;
+
+ n->next = first;
+ if (first)
+ first->pprev = &n->next;
+ h->first = n;
+ n->pprev = &h->first;
+}
+
+/* next must be != NULL */
+static inline void hlist_add_before(struct hlist_node *n,
+ struct hlist_node *next)
+{
+ n->pprev = next->pprev;
+ n->next = next;
+ next->pprev = &n->next;
+ *(n->pprev) = n;
+}
+
+static inline void hlist_add_behind(struct hlist_node *n,
+ struct hlist_node *prev)
+{
+ n->next = prev->next;
+ prev->next = n;
+ n->pprev = &prev->next;
+
+ if (n->next)
+ n->next->pprev = &n->next;
+}
+
+/* after that we'll appear to be on some hlist and hlist_del will work */
+static inline void hlist_add_fake(struct hlist_node *n)
+{
+ n->pprev = &n->next;
+}
+
+/*
+ * Move a list from one list head to another. Fixup the pprev
+ * reference of the first entry if it exists.
+ */
+static inline void hlist_move_list(struct hlist_head *old,
+ struct hlist_head *new)
+{
+ new->first = old->first;
+ if (new->first)
+ new->first->pprev = &new->first;
+ old->first = NULL;
+}
+
+#define hlist_entry(ptr, type, member) container_of(ptr, type, member)
+
+#define hlist_for_each(pos, head) \
+ for (pos = (head)->first; pos; pos = pos->next)
+
+#define hlist_for_each_safe(pos, n, head) \
+ for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \
+ pos = n)
+
+#define hlist_entry_safe(ptr, type, member) \
+ ({ typeof(ptr) ____ptr = (ptr); \
+ ____ptr ? hlist_entry(____ptr, type, member) : NULL; \
+ })
+
+/**
+ * hlist_for_each_entry - iterate over list of given type
+ * @pos:the type * to use as a loop cursor.
+ * @head:the head for your list.
+ * @member:the name of the hlist_node within the struct.
+ */
+#define hlist_for_each_entry(pos, head, member) \
+ for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member);\
+ pos; \
+ pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
+
+/**
+ * hlist_for_each_entry_continue
+ * iterate over a hlist continuing after current point
+ * @pos:the type * to use as a loop cursor.
+ * @member:the name of the hlist_node within the struct.
+ */
+#define hlist_for_each_entry_continue(pos, member) \
+ for (pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member);\
+ pos; \
+ pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
+
+/**
+ * hlist_for_each_entry_from
+ * iterate over a hlist continuing from current point
+ * @pos: the type * to use as a loop cursor.
+ * @member: the name of the hlist_node within the struct.
+ */
+#define hlist_for_each_entry_from(pos, member) \
+ for (; pos; \
+ pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
+
+/**
+ * hlist_for_each_entry_safe
+ * iterate over list of given type safe against removal of list entry
+ * @pos:the type * to use as a loop cursor.
+ * @n:another &struct hlist_node to use as temporary storage
+ * @head:the head for your list.
+ * @member:the name of the hlist_node within the struct.
+ */
+#define hlist_for_each_entry_safe(pos, n, head, member) \
+ for (pos = hlist_entry_safe((head)->first, typeof(*pos), member);\
+ pos && ({ n = pos->member.next; 1; }); \
+ pos = hlist_entry_safe(n, typeof(*pos), member))
+
+static inline u32 __hash_32(u32 val)
+{
+ return val * GOLDEN_RATIO_32;
+}
+
+static inline u32 hash_32(u32 val, unsigned int bits)
+{
+ /* High bits are more random, so use them. */
+ return __hash_32(val) >> (32 - bits);
+}
+
+static __always_inline u32 hash_64(u64 val, unsigned int bits)
+{
+#if BITS_PER_LONG == 64
+ /* 64x64-bit multiply is efficient on all 64-bit processors */
+ return val * GOLDEN_RATIO_64 >> (64 - bits);
+#else
+ /* Hash 64 bits using only 32x32-bit multiply. */
+ return hash_32((u32)val ^ __hash_32(val >> 32), bits);
+#endif
+}
+
+/**
+ * ilog2 - log of base 2 of 32-bit or a 64-bit unsigned value
+ * @n - parameter
+ *
+ * constant-capable log of base 2 calculation
+ * - this can be used to initialise global variables from constant data, hence
+ * the massive ternary operator construction
+ *
+ * selects the appropriately-sized optimised version depending on sizeof(n)
+ */
+#define ilog2(n) \
+( \
+ (n) & (1ULL << 63) ? 63 : \
+ (n) & (1ULL << 62) ? 62 : \
+ (n) & (1ULL << 61) ? 61 : \
+ (n) & (1ULL << 60) ? 60 : \
+ (n) & (1ULL << 59) ? 59 : \
+ (n) & (1ULL << 58) ? 58 : \
+ (n) & (1ULL << 57) ? 57 : \
+ (n) & (1ULL << 56) ? 56 : \
+ (n) & (1ULL << 55) ? 55 : \
+ (n) & (1ULL << 54) ? 54 : \
+ (n) & (1ULL << 53) ? 53 : \
+ (n) & (1ULL << 52) ? 52 : \
+ (n) & (1ULL << 51) ? 51 : \
+ (n) & (1ULL << 50) ? 50 : \
+ (n) & (1ULL << 49) ? 49 : \
+ (n) & (1ULL << 48) ? 48 : \
+ (n) & (1ULL << 47) ? 47 : \
+ (n) & (1ULL << 46) ? 46 : \
+ (n) & (1ULL << 45) ? 45 : \
+ (n) & (1ULL << 44) ? 44 : \
+ (n) & (1ULL << 43) ? 43 : \
+ (n) & (1ULL << 42) ? 42 : \
+ (n) & (1ULL << 41) ? 41 : \
+ (n) & (1ULL << 40) ? 40 : \
+ (n) & (1ULL << 39) ? 39 : \
+ (n) & (1ULL << 38) ? 38 : \
+ (n) & (1ULL << 37) ? 37 : \
+ (n) & (1ULL << 36) ? 36 : \
+ (n) & (1ULL << 35) ? 35 : \
+ (n) & (1ULL << 34) ? 34 : \
+ (n) & (1ULL << 33) ? 33 : \
+ (n) & (1ULL << 32) ? 32 : \
+ (n) & (1ULL << 31) ? 31 : \
+ (n) & (1ULL << 30) ? 30 : \
+ (n) & (1ULL << 29) ? 29 : \
+ (n) & (1ULL << 28) ? 28 : \
+ (n) & (1ULL << 27) ? 27 : \
+ (n) & (1ULL << 26) ? 26 : \
+ (n) & (1ULL << 25) ? 25 : \
+ (n) & (1ULL << 24) ? 24 : \
+ (n) & (1ULL << 23) ? 23 : \
+ (n) & (1ULL << 22) ? 22 : \
+ (n) & (1ULL << 21) ? 21 : \
+ (n) & (1ULL << 20) ? 20 : \
+ (n) & (1ULL << 19) ? 19 : \
+ (n) & (1ULL << 18) ? 18 : \
+ (n) & (1ULL << 17) ? 17 : \
+ (n) & (1ULL << 16) ? 16 : \
+ (n) & (1ULL << 15) ? 15 : \
+ (n) & (1ULL << 14) ? 14 : \
+ (n) & (1ULL << 13) ? 13 : \
+ (n) & (1ULL << 12) ? 12 : \
+ (n) & (1ULL << 11) ? 11 : \
+ (n) & (1ULL << 10) ? 10 : \
+ (n) & (1ULL << 9) ? 9 : \
+ (n) & (1ULL << 8) ? 8 : \
+ (n) & (1ULL << 7) ? 7 : \
+ (n) & (1ULL << 6) ? 6 : \
+ (n) & (1ULL << 5) ? 5 : \
+ (n) & (1ULL << 4) ? 4 : \
+ (n) & (1ULL << 3) ? 3 : \
+ (n) & (1ULL << 2) ? 2 : \
+ (n) & (1ULL << 1) ? 1 : 0 \
+)
+
+#define DEFINE_HASHTABLE(name, bits) \
+ struct hlist_head name[1 << (bits)] = \
+ { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT }
+
+#define DECLARE_HASHTABLE(name, bits) \
+ struct hlist_head name[1 << (bits)]
+
+#define HASH_SIZE(name) (ARRAY_SIZE(name))
+#define HASH_BITS(name) ilog2(HASH_SIZE(name))
+
+/* Use hash_32 when possible to allow for fast 32bit hashing in 64bit kernels*/
+#define hash_min(val, bits) \
+ (sizeof(val) <= 4 ? hash_32(val, bits) : hash_long(val, bits))
+
+static inline void __hash_init(struct hlist_head *ht, unsigned int sz)
+{
+ unsigned int i;
+
+ for (i = 0; i < sz; i++)
+ INIT_HLIST_HEAD(&ht[i]);
+}
+
+/**
+ * hash_init - initialize a hash table
+ * @hashtable: hashtable to be initialized
+ *
+ * Calculates the size of the hashtable from the given parameter, otherwise
+ * same as hash_init_size.
+ *
+ * This has to be a macro since HASH_BITS() will not work on pointers since
+ * it calculates the size during preprocessing.
+ */
+#define hash_init(hashtable) __hash_init(hashtable, HASH_SIZE(hashtable))
+
+/**
+ * hash_add - add an object to a hashtable
+ * @hashtable: hashtable to add to
+ * @node: the &struct hlist_node of the object to be added
+ * @key: the key of the object to be added
+ */
+#define hash_add(hashtable, node, key) \
+ hlist_add_head(node, &hashtable[hash_min(key, HASH_BITS(hashtable))])
+
+/**
+ * hash_hashed - check whether an object is in any hashtable
+ * @node: the &struct hlist_node of the object to be checked
+ */
+static inline bool hash_hashed(struct hlist_node *node)
+{
+ return !hlist_unhashed(node);
+}
+
+static inline bool __hash_empty(struct hlist_head *ht, unsigned int sz)
+{
+ unsigned int i;
+
+ for (i = 0; i < sz; i++)
+ if (!hlist_empty(&ht[i]))
+ return false;
+
+ return true;
+}
+
+/**
+ * hash_empty - check whether a hashtable is empty
+ * @hashtable: hashtable to check
+ *
+ * This has to be a macro since HASH_BITS() will not work on pointers since
+ * it calculates the size during preprocessing.
+ */
+#define hash_empty(hashtable) __hash_empty(hashtable, HASH_SIZE(hashtable))
+
+/**
+ * hash_del - remove an object from a hashtable
+ * @node: &struct hlist_node of the object to remove
+ */
+static inline void hash_del(struct hlist_node *node)
+{
+ hlist_del_init(node);
+}
+
+/**
+ * hash_for_each - iterate over a hashtable
+ * @name: hashtable to iterate
+ * @bkt: integer to use as bucket loop cursor
+ * @obj: the type * to use as a loop cursor for each entry
+ * @member: the name of the hlist_node within the struct
+ */
+#define hash_for_each(name, bkt, obj, member) \
+ for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
+ (bkt)++)\
+ hlist_for_each_entry(obj, &name[bkt], member)
+
+/**
+ * hash_for_each_safe - iterate over a hashtable safe against removal of
+ * hash entry
+ * @name: hashtable to iterate
+ * @bkt: integer to use as bucket loop cursor
+ * @tmp: a &struct used for temporary storage
+ * @obj: the type * to use as a loop cursor for each entry
+ * @member: the name of the hlist_node within the struct
+ */
+#define hash_for_each_safe(name, bkt, tmp, obj, member) \
+ for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
+ (bkt)++)\
+ hlist_for_each_entry_safe(obj, tmp, &name[bkt], member)
+
+/**
+ * hash_for_each_possible - iterate over all possible objects hashing to the
+ * same bucket
+ * @name: hashtable to iterate
+ * @obj: the type * to use as a loop cursor for each entry
+ * @member: the name of the hlist_node within the struct
+ * @key: the key of the objects to iterate over
+ */
+#define hash_for_each_possible(name, obj, member, key) \
+ hlist_for_each_entry(obj, &name[hash_min(key, HASH_BITS(name))], member)
+
+#endif
diff --git a/include/erofs/internal.h b/include/erofs/internal.h
index 5384946..25ce7b5 100644
--- a/include/erofs/internal.h
+++ b/include/erofs/internal.h
@@ -61,7 +61,7 @@ struct erofs_sb_info {
extern struct erofs_sb_info sbi;
struct erofs_inode {
- struct list_head i_hash, i_subdirs;
+ struct list_head i_hash, i_subdirs, i_xattrs;
unsigned int i_count;
struct erofs_inode *i_parent;
diff --git a/include/erofs/xattr.h b/include/erofs/xattr.h
new file mode 100644
index 0000000..29df025
--- /dev/null
+++ b/include/erofs/xattr.h
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * erofs_utils/include/erofs/xattr.h
+ *
+ * Originally contributed by an anonymous person,
+ * heavily changed by Li Guifu <blucerlee@gmail.com>
+ * and Gao Xiang <xiang@kernel.org>
+ */
+#ifndef __EROFS_XATTR_H
+#define __EROFS_XATTR_H
+
+#include "internal.h"
+
+#define EROFS_INODE_XATTR_ICOUNT(_size) ({\
+ u32 __size = le16_to_cpu(_size); \
+ ((__size) == 0) ? 0 : \
+ (_size - sizeof(struct erofs_xattr_ibody_header)) / \
+ sizeof(struct erofs_xattr_entry) + 1; })
+
+#ifndef XATTR_USER_PREFIX
+#define XATTR_USER_PREFIX "user."
+#endif
+#ifndef XATTR_USER_PREFIX_LEN
+#define XATTR_USER_PREFIX_LEN (sizeof(XATTR_USER_PREFIX) - 1)
+#endif
+#ifndef XATTR_SECURITY_PREFIX
+#define XATTR_SECURITY_PREFIX "security."
+#endif
+#ifndef XATTR_SECURITY_PREFIX_LEN
+#define XATTR_SECURITY_PREFIX_LEN (sizeof(XATTR_SECURITY_PREFIX) - 1)
+#endif
+#ifndef XATTR_TRUSTED_PREFIX
+#define XATTR_TRUSTED_PREFIX "trusted."
+#endif
+#ifndef XATTR_TRUSTED_PREFIX_LEN
+#define XATTR_TRUSTED_PREFIX_LEN (sizeof(XATTR_TRUSTED_PREFIX) - 1)
+#endif
+#ifndef XATTR_NAME_POSIX_ACL_ACCESS
+#define XATTR_NAME_POSIX_ACL_ACCESS "system.posix_acl_access"
+#endif
+#ifndef XATTR_NAME_POSIX_ACL_DEFAULT
+#define XATTR_NAME_POSIX_ACL_DEFAULT "system.posix_acl_default"
+#endif
+
+int erofs_prepare_xattr_ibody(const char *path, struct list_head *ixattrs);
+char *erofs_export_xattr_ibody(struct list_head *ixattrs, unsigned int size);
+
+#endif
diff --git a/lib/Makefile.am b/lib/Makefile.am
index dea82f7..1ff81f9 100644
--- a/lib/Makefile.am
+++ b/lib/Makefile.am
@@ -2,7 +2,8 @@
# Makefile.am
noinst_LTLIBRARIES = liberofs.la
-liberofs_la_SOURCES = config.c io.c cache.c inode.c compress.c compressor.c
+liberofs_la_SOURCES = config.c io.c cache.c inode.c xattr.c \
+ compress.c compressor.c
liberofs_la_CFLAGS = -Wall -Werror -I$(top_srcdir)/include
if ENABLE_LZ4
liberofs_la_CFLAGS += ${LZ4_CFLAGS}
diff --git a/lib/inode.c b/lib/inode.c
index d1f294a..a23bf8f 100644
--- a/lib/inode.c
+++ b/lib/inode.c
@@ -20,6 +20,7 @@
#include "erofs/cache.h"
#include "erofs/io.h"
#include "erofs/compress.h"
+#include "erofs/xattr.h"
struct erofs_sb_info sbi;
@@ -365,6 +366,7 @@ fail:
static bool erofs_bh_flush_write_inode(struct erofs_buffer_head *bh)
{
struct erofs_inode *const inode = bh->fsprivate;
+ const u16 icount = EROFS_INODE_XATTR_ICOUNT(inode->xattr_isize);
erofs_off_t off = erofs_btell(bh, false);
union {
struct erofs_inode_compact dic;
@@ -375,6 +377,7 @@ static bool erofs_bh_flush_write_inode(struct erofs_buffer_head *bh)
switch (inode->inode_isize) {
case sizeof(struct erofs_inode_compact):
u.dic.i_format = cpu_to_le16(0 | (inode->datalayout << 1));
+ u.dic.i_xattr_icount = cpu_to_le16(icount);
u.dic.i_mode = cpu_to_le16(inode->i_mode);
u.dic.i_nlink = cpu_to_le16(inode->i_nlink);
u.dic.i_size = cpu_to_le32((u32)inode->i_size);
@@ -404,6 +407,7 @@ static bool erofs_bh_flush_write_inode(struct erofs_buffer_head *bh)
break;
case sizeof(struct erofs_inode_extended):
u.die.i_format = cpu_to_le16(1 | (inode->datalayout << 1));
+ u.die.i_xattr_icount = cpu_to_le16(icount);
u.die.i_mode = cpu_to_le16(inode->i_mode);
u.die.i_nlink = cpu_to_le32(inode->i_nlink);
u.die.i_size = cpu_to_le64(inode->i_size);
@@ -445,6 +449,20 @@ static bool erofs_bh_flush_write_inode(struct erofs_buffer_head *bh)
return false;
off += inode->inode_isize;
+ if (inode->xattr_isize) {
+ char *xattrs = erofs_export_xattr_ibody(&inode->i_xattrs,
+ inode->xattr_isize);
+ if (IS_ERR(xattrs))
+ return false;
+
+ ret = dev_write(xattrs, off, inode->xattr_isize);
+ free(xattrs);
+ if (ret)
+ return false;
+
+ off += inode->xattr_isize;
+ }
+
if (inode->extent_isize) {
/* write compression metadata */
off = Z_EROFS_VLE_EXTENT_ALIGN(off);
@@ -499,8 +517,10 @@ int erofs_prepare_inode_buffer(struct erofs_inode *inode)
DBG_BUGON(inode->bh || inode->bh_inline);
- inodesize = inode->inode_isize + inode->xattr_isize +
- inode->extent_isize;
+ inodesize = inode->inode_isize + inode->xattr_isize;
+ if (inode->extent_isize)
+ inodesize = Z_EROFS_VLE_EXTENT_ALIGN(inodesize) +
+ inode->extent_isize;
if (is_inode_layout_compression(inode))
goto noinline;
@@ -707,6 +727,8 @@ struct erofs_inode *erofs_new_inode(void)
inode->i_count = 1;
init_list_head(&inode->i_subdirs);
+ init_list_head(&inode->i_xattrs);
+
inode->idata_size = 0;
inode->xattr_isize = 0;
inode->extent_isize = 0;
@@ -795,6 +817,11 @@ struct erofs_inode *erofs_mkfs_build_tree(struct erofs_inode *dir)
struct dirent *dp;
struct erofs_dentry *d;
+ ret = erofs_prepare_xattr_ibody(dir->i_srcpath, &dir->i_xattrs);
+ if (ret < 0)
+ return ERR_PTR(ret);
+ dir->xattr_isize = ret;
+
if (!S_ISDIR(dir->i_mode)) {
if (S_ISLNK(dir->i_mode)) {
char *const symlink = malloc(dir->i_size);
diff --git a/lib/xattr.c b/lib/xattr.c
new file mode 100644
index 0000000..8156f3e
--- /dev/null
+++ b/lib/xattr.c
@@ -0,0 +1,296 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * erofs_utils/lib/xattr.c
+ *
+ * Originally contributed by an anonymous person,
+ * heavily changed by Li Guifu <blucerlee@gmail.com>
+ * and Gao Xiang <hsiangkao@aol.com>
+ */
+#include <stdlib.h>
+#include <sys/xattr.h>
+#ifdef HAVE_LINUX_XATTR_H
+#include <linux/xattr.h>
+#endif
+#include "erofs/print.h"
+#include "erofs/hashtable.h"
+#include "erofs/xattr.h"
+
+#define EA_HASHTABLE_BITS 16
+
+struct xattr_item {
+ const char *kvbuf;
+ unsigned int hash[2], len[2], count;
+ u8 prefix;
+ struct hlist_node node;
+};
+
+struct inode_xattr_node {
+ struct list_head list;
+ struct xattr_item *item;
+};
+
+static DECLARE_HASHTABLE(ea_hashtable, EA_HASHTABLE_BITS);
+
+static struct xattr_prefix {
+ const char *prefix;
+ u16 prefix_len;
+} prefix[] = {
+ [EROFS_XATTR_INDEX_USER] = {
+ XATTR_USER_PREFIX,
+ XATTR_USER_PREFIX_LEN
+ }, [EROFS_XATTR_INDEX_POSIX_ACL_ACCESS] = {
+ XATTR_NAME_POSIX_ACL_ACCESS,
+ sizeof(XATTR_NAME_POSIX_ACL_ACCESS) - 1
+ }, [EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT] = {
+ XATTR_NAME_POSIX_ACL_DEFAULT,
+ sizeof(XATTR_NAME_POSIX_ACL_DEFAULT) - 1
+ }, [EROFS_XATTR_INDEX_TRUSTED] = {
+ XATTR_TRUSTED_PREFIX,
+ XATTR_TRUSTED_PREFIX_LEN
+ }, [EROFS_XATTR_INDEX_SECURITY] = {
+ XATTR_SECURITY_PREFIX,
+ XATTR_SECURITY_PREFIX_LEN
+ }
+};
+
+static unsigned int BKDRHash(char *str, unsigned int len)
+{
+ const unsigned int seed = 131313;
+ unsigned int hash = 0;
+
+ while (len) {
+ hash = hash * seed + (*str++);
+ --len;
+ }
+ return hash;
+}
+
+static unsigned int xattr_item_hash(u8 prefix, char *buf,
+ unsigned int len[2], unsigned int hash[2])
+{
+ hash[0] = BKDRHash(buf, len[0]); /* key */
+ hash[1] = BKDRHash(buf + len[0], len[1]); /* value */
+
+ return prefix ^ hash[0] ^ hash[1];
+}
+
+static struct xattr_item *xattr_item_get(u8 prefix, char *kvbuf,
+ unsigned int len[2])
+{
+ struct xattr_item *item;
+ unsigned int hash[2], hkey;
+
+ hkey = xattr_item_hash(prefix, kvbuf, len, hash);
+
+ hash_for_each_possible(ea_hashtable, item, node, hkey) {
+ if (prefix == item->prefix &&
+ item->len[0] == len[0] && item->len[1] == len[1] &&
+ item->hash[0] == hash[0] && item->hash[1] == hash[1] &&
+ !memcmp(kvbuf, item->kvbuf, len[0] + len[1])) {
+ free(kvbuf);
+ ++item->count;
+ return item;
+ }
+ }
+
+ item = malloc(sizeof(*item));
+ if (!item) {
+ free(kvbuf);
+ return ERR_PTR(-ENOMEM);
+ }
+ INIT_HLIST_NODE(&item->node);
+ item->count = 1;
+ item->kvbuf = kvbuf;
+ item->len[0] = len[0];
+ item->len[1] = len[1];
+ item->hash[0] = hash[0];
+ item->hash[1] = hash[1];
+ item->prefix = prefix;
+ hash_add(ea_hashtable, &item->node, hkey);
+ return item;
+}
+
+static bool match_prefix(const char *key, u8 *index, u16 *len)
+{
+ struct xattr_prefix *p;
+
+ for (p = prefix; p < prefix + ARRAY_SIZE(prefix); ++p) {
+ if (p->prefix && !strncmp(p->prefix, key, p->prefix_len)) {
+ *len = p->prefix_len;
+ *index = p - prefix;
+ return true;
+ }
+ }
+ return false;
+}
+
+static struct xattr_item *parse_one_xattr(const char *path, const char *key,
+ unsigned int keylen)
+{
+ ssize_t ret;
+ u8 prefix;
+ u16 prefixlen;
+ unsigned int len[2];
+ char *kvbuf;
+
+ erofs_dbg("parse xattr [%s] of %s", path, key);
+
+ if (!match_prefix(key, &prefix, &prefixlen))
+ return ERR_PTR(-ENODATA);
+
+ DBG_BUGON(keylen < prefixlen);
+
+ /* determine length of the value */
+ ret = lgetxattr(path, key, NULL, 0);
+ if (ret < 0)
+ return ERR_PTR(-errno);
+ len[1] = ret;
+
+ /* allocate key-value buffer */
+ len[0] = keylen - prefixlen;
+
+ kvbuf = malloc(len[0] + len[1]);
+ if (!kvbuf)
+ return ERR_PTR(-ENOMEM);
+
+ memcpy(kvbuf, key + prefixlen, len[0]);
+ if (len[1]) {
+ /* copy value to buffer */
+ ret = lgetxattr(path, key, kvbuf + len[0], len[1]);
+ if (ret < 0) {
+ free(kvbuf);
+ return ERR_PTR(-errno);
+ }
+ if (len[1] != ret) {
+ erofs_err("size of xattr value got changed just now (%u-> %ld)",
+ len[1], ret);
+ len[1] = ret;
+ }
+ }
+ return xattr_item_get(prefix, kvbuf, len);
+}
+
+static int inode_xattr_add(struct list_head *hlist, struct xattr_item *item)
+{
+ struct inode_xattr_node *node = malloc(sizeof(*node));
+
+ if (!node)
+ return -ENOMEM;
+ init_list_head(&node->list);
+ node->item = item;
+ list_add(&node->list, hlist);
+ return 0;
+}
+
+static int read_xattrs_from_file(const char *path, struct list_head *ixattrs)
+{
+ int ret = 0;
+ char *keylst, *key;
+ ssize_t kllen = llistxattr(path, NULL, 0);
+
+ if (kllen < 0 && errno != ENODATA) {
+ erofs_err("llistxattr to get the size of names for %s failed",
+ path);
+ return -errno;
+ }
+ if (kllen <= 1)
+ return 0;
+
+ keylst = malloc(kllen);
+ if (!keylst)
+ return -ENOMEM;
+
+ /* copy the list of attribute keys to the buffer.*/
+ kllen = llistxattr(path, keylst, kllen);
+ if (kllen < 0) {
+ erofs_err("llistxattr to get names for %s failed", path);
+ ret = -errno;
+ goto err;
+ }
+
+ /*
+ * loop over the list of zero terminated strings with the
+ * attribute keys. Use the remaining buffer length to determine
+ * the end of the list.
+ */
+ key = keylst;
+ while (kllen > 0) {
+ unsigned int keylen = strlen(key);
+ struct xattr_item *item = parse_one_xattr(path, key, keylen);
+
+ if (IS_ERR(item)) {
+ ret = PTR_ERR(item);
+ goto err;
+ }
+
+ if (ixattrs) {
+ ret = inode_xattr_add(ixattrs, item);
+ if (ret < 0)
+ goto err;
+ }
+ kllen -= keylen + 1;
+ key += keylen + 1;
+ }
+err:
+ free(keylst);
+ return ret;
+
+}
+
+int erofs_prepare_xattr_ibody(const char *path, struct list_head *ixattrs)
+{
+ int ret;
+ struct inode_xattr_node *node;
+
+ ret = read_xattrs_from_file(path, ixattrs);
+ if (ret < 0)
+ return ret;
+
+ if (list_empty(ixattrs))
+ return 0;
+
+ /* get xattr ibody size */
+ ret = sizeof(struct erofs_xattr_ibody_header);
+ list_for_each_entry(node, ixattrs, list) {
+ const struct xattr_item *item = node->item;
+
+ ret += sizeof(struct erofs_xattr_entry);
+ ret = EROFS_XATTR_ALIGN(ret + item->len[0] + item->len[1]);
+ }
+ return ret;
+}
+
+char *erofs_export_xattr_ibody(struct list_head *ixattrs, unsigned int size)
+{
+ struct inode_xattr_node *node, *n;
+ struct erofs_xattr_ibody_header *header;
+ unsigned int p;
+ char *buf = calloc(1, size);
+
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ header = (struct erofs_xattr_ibody_header *)buf;
+ header->h_shared_count = 0;
+
+ p = sizeof(struct erofs_xattr_ibody_header);
+ list_for_each_entry_safe(node, n, ixattrs, list) {
+ struct xattr_item *const item = node->item;
+ const struct erofs_xattr_entry entry = {
+ .e_name_index = item->prefix,
+ .e_name_len = item->len[0],
+ .e_value_size = cpu_to_le16(item->len[1])
+ };
+
+ memcpy(buf + p, &entry, sizeof(entry));
+ p += sizeof(struct erofs_xattr_entry);
+ memcpy(buf + p, item->kvbuf, item->len[0] + item->len[1]);
+ p = EROFS_XATTR_ALIGN(p + item->len[0] + item->len[1]);
+
+ list_del(&node->list);
+ free(node);
+ }
+ DBG_BUGON(p > size);
+ return buf;
+}
+
--
2.17.1
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [PATCH 2/2] erofs-utils: introduce shared xattr support
2019-10-05 14:20 ` [PATCH v3 1/2] erofs-utils: introduce inline " Gao Xiang via Linux-erofs
@ 2019-10-05 14:20 ` Gao Xiang via Linux-erofs
2019-10-05 16:43 ` Li Guifu
2019-10-05 16:44 ` [PATCH v3 1/2] erofs-utils: introduce inline " Li Guifu
1 sibling, 1 reply; 12+ messages in thread
From: Gao Xiang via Linux-erofs @ 2019-10-05 14:20 UTC (permalink / raw)
To: Li Guifu, Chao Yu, linux-erofs
From: Li Guifu <blucerlee@gmail.com>
Large xattrs or xattrs shared by a lot of files
can be stored in shared xattrs rather than
inlined right after inode.
Signed-off-by: Li Guifu <blucerlee@gmail.com>
Signed-off-by: Gao Xiang <hsiangkao@aol.com>
---
include/erofs/config.h | 2 +
include/erofs/xattr.h | 1 +
lib/config.c | 1 +
lib/xattr.c | 193 ++++++++++++++++++++++++++++++++++++++++-
mkfs/main.c | 10 ++-
5 files changed, 205 insertions(+), 2 deletions(-)
diff --git a/include/erofs/config.h b/include/erofs/config.h
index 8b09430..fde936c 100644
--- a/include/erofs/config.h
+++ b/include/erofs/config.h
@@ -28,6 +28,8 @@ struct erofs_configure {
char *c_compr_alg_master;
int c_compr_level_master;
int c_force_inodeversion;
+ /* < 0, xattr disabled and INT_MAX, always use inline xattrs */
+ int c_inline_xattr_tolerance;
};
extern struct erofs_configure cfg;
diff --git a/include/erofs/xattr.h b/include/erofs/xattr.h
index 29df025..3dff1ea 100644
--- a/include/erofs/xattr.h
+++ b/include/erofs/xattr.h
@@ -44,5 +44,6 @@
int erofs_prepare_xattr_ibody(const char *path, struct list_head *ixattrs);
char *erofs_export_xattr_ibody(struct list_head *ixattrs, unsigned int size);
+int erofs_build_shared_xattrs_from_path(const char *path);
#endif
diff --git a/lib/config.c b/lib/config.c
index 110c8b6..dc10754 100644
--- a/lib/config.c
+++ b/lib/config.c
@@ -23,6 +23,7 @@ void erofs_init_configure(void)
cfg.c_compr_level_master = -1;
sbi.feature_incompat = EROFS_FEATURE_INCOMPAT_LZ4_0PADDING;
cfg.c_force_inodeversion = 0;
+ cfg.c_inline_xattr_tolerance = 2;
}
void erofs_show_config(void)
diff --git a/lib/xattr.c b/lib/xattr.c
index 8156f3e..781d210 100644
--- a/lib/xattr.c
+++ b/lib/xattr.c
@@ -6,20 +6,26 @@
* heavily changed by Li Guifu <blucerlee@gmail.com>
* and Gao Xiang <hsiangkao@aol.com>
*/
+#define _GNU_SOURCE
+#include <limits.h>
#include <stdlib.h>
#include <sys/xattr.h>
#ifdef HAVE_LINUX_XATTR_H
#include <linux/xattr.h>
#endif
+#include <sys/stat.h>
+#include <dirent.h>
#include "erofs/print.h"
#include "erofs/hashtable.h"
#include "erofs/xattr.h"
+#include "erofs/cache.h"
#define EA_HASHTABLE_BITS 16
struct xattr_item {
const char *kvbuf;
unsigned int hash[2], len[2], count;
+ int shared_xattr_id;
u8 prefix;
struct hlist_node node;
};
@@ -31,6 +37,9 @@ struct inode_xattr_node {
static DECLARE_HASHTABLE(ea_hashtable, EA_HASHTABLE_BITS);
+static LIST_HEAD(shared_xattrs_list);
+static unsigned int shared_xattrs_count, shared_xattrs_size;
+
static struct xattr_prefix {
const char *prefix;
u16 prefix_len;
@@ -105,6 +114,7 @@ static struct xattr_item *xattr_item_get(u8 prefix, char *kvbuf,
item->len[1] = len[1];
item->hash[0] = hash[0];
item->hash[1] = hash[1];
+ item->shared_xattr_id = -1;
item->prefix = prefix;
hash_add(ea_hashtable, &item->node, hkey);
return item;
@@ -152,7 +162,6 @@ static struct xattr_item *parse_one_xattr(const char *path, const char *key,
kvbuf = malloc(len[0] + len[1]);
if (!kvbuf)
return ERR_PTR(-ENOMEM);
-
memcpy(kvbuf, key + prefixlen, len[0]);
if (len[1]) {
/* copy value to buffer */
@@ -182,6 +191,23 @@ static int inode_xattr_add(struct list_head *hlist, struct xattr_item *item)
return 0;
}
+static int shared_xattr_add(struct xattr_item *item)
+{
+ struct inode_xattr_node *node = malloc(sizeof(*node));
+
+ if (!node)
+ return -ENOMEM;
+
+ init_list_head(&node->list);
+ node->item = item;
+ list_add(&node->list, &shared_xattrs_list);
+
+ shared_xattrs_size += sizeof(struct erofs_xattr_entry);
+ shared_xattrs_size = EROFS_XATTR_ALIGN(shared_xattrs_size +
+ item->len[0] + item->len[1]);
+ return ++shared_xattrs_count;
+}
+
static int read_xattrs_from_file(const char *path, struct list_head *ixattrs)
{
int ret = 0;
@@ -227,6 +253,11 @@ static int read_xattrs_from_file(const char *path, struct list_head *ixattrs)
ret = inode_xattr_add(ixattrs, item);
if (ret < 0)
goto err;
+ } else if (item->count == cfg.c_inline_xattr_tolerance + 1) {
+ ret = shared_xattr_add(item);
+ if (ret < 0)
+ goto err;
+ ret = 0;
}
kllen -= keylen + 1;
key += keylen + 1;
@@ -242,6 +273,10 @@ int erofs_prepare_xattr_ibody(const char *path, struct list_head *ixattrs)
int ret;
struct inode_xattr_node *node;
+ /* check if xattr is disabled */
+ if (cfg.c_inline_xattr_tolerance < 0)
+ return 0;
+
ret = read_xattrs_from_file(path, ixattrs);
if (ret < 0)
return ret;
@@ -254,16 +289,155 @@ int erofs_prepare_xattr_ibody(const char *path, struct list_head *ixattrs)
list_for_each_entry(node, ixattrs, list) {
const struct xattr_item *item = node->item;
+ if (item->shared_xattr_id >= 0) {
+ ret += sizeof(__le32);
+ continue;
+ }
ret += sizeof(struct erofs_xattr_entry);
ret = EROFS_XATTR_ALIGN(ret + item->len[0] + item->len[1]);
}
return ret;
}
+static int erofs_count_all_xattrs_from_path(const char *path)
+{
+ int ret;
+ DIR *_dir;
+ struct stat64 st;
+
+ _dir = opendir(path);
+ if (!_dir) {
+ erofs_err("%s, failed to opendir at %s: %s",
+ __func__, path, erofs_strerror(errno));
+ return -errno;
+ }
+
+ ret = 0;
+ while (1) {
+ struct dirent *dp;
+ char buf[PATH_MAX];
+
+ /*
+ * set errno to 0 before calling readdir() in order to
+ * distinguish end of stream and from an error.
+ */
+ errno = 0;
+ dp = readdir(_dir);
+ if (!dp)
+ break;
+
+ if (is_dot_dotdot(dp->d_name) ||
+ !strncmp(dp->d_name, "lost+found", strlen("lost+found")))
+ continue;
+
+ ret = snprintf(buf, PATH_MAX, "%s/%s", path, dp->d_name);
+
+ if (ret < 0 || ret >= PATH_MAX) {
+ /* ignore the too long path */
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ ret = read_xattrs_from_file(buf, NULL);
+ if (ret)
+ goto fail;
+
+ ret = lstat64(buf, &st);
+ if (ret) {
+ ret = -errno;
+ goto fail;
+ }
+
+ if (!S_ISDIR(st.st_mode))
+ continue;
+
+ ret = erofs_count_all_xattrs_from_path(buf);
+ if (ret)
+ goto fail;
+ }
+
+ if (errno)
+ ret = -errno;
+
+fail:
+ closedir(_dir);
+ return ret;
+}
+
+int erofs_build_shared_xattrs_from_path(const char *path)
+{
+ int ret;
+ struct erofs_buffer_head *bh;
+ struct inode_xattr_node *node, *n;
+ char *buf;
+ unsigned int p;
+ erofs_off_t off;
+
+ /* check if xattr or shared xattr is disabled */
+ if (cfg.c_inline_xattr_tolerance < 0 ||
+ cfg.c_inline_xattr_tolerance == INT_MAX)
+ return 0;
+
+ if (shared_xattrs_size || shared_xattrs_count) {
+ DBG_BUGON(1);
+ return -EINVAL;
+ }
+
+ ret = erofs_count_all_xattrs_from_path(path);
+ if (ret)
+ return ret;
+
+ if (!shared_xattrs_size)
+ return 0;
+
+ buf = malloc(shared_xattrs_size);
+ if (!buf)
+ return -ENOMEM;
+
+ bh = erofs_balloc(XATTR, shared_xattrs_size, 0, 0);
+ if (IS_ERR(bh)) {
+ free(buf);
+ return PTR_ERR(bh);
+ }
+ bh->op = &erofs_skip_write_bhops;
+
+ erofs_mapbh(bh->block, true);
+ off = erofs_btell(bh, false);
+
+ sbi.xattr_blkaddr = off / EROFS_BLKSIZ;
+ off %= EROFS_BLKSIZ;
+ p = 0;
+
+ list_for_each_entry_safe(node, n, &shared_xattrs_list, list) {
+ struct xattr_item *const item = node->item;
+ const struct erofs_xattr_entry entry = {
+ .e_name_index = item->prefix,
+ .e_name_len = item->len[0],
+ .e_value_size = cpu_to_le16(item->len[1])
+ };
+
+ list_del(&node->list);
+
+ item->shared_xattr_id = (off + p) /
+ sizeof(struct erofs_xattr_entry);
+
+ memcpy(buf + p, &entry, sizeof(entry));
+ p += sizeof(struct erofs_xattr_entry);
+ memcpy(buf + p, item->kvbuf, item->len[0] + item->len[1]);
+ p = EROFS_XATTR_ALIGN(p + item->len[0] + item->len[1]);
+ free(node);
+ }
+
+ bh->fsprivate = buf;
+ bh->op = &erofs_buf_write_bhops;
+ return 0;
+}
+
char *erofs_export_xattr_ibody(struct list_head *ixattrs, unsigned int size)
{
struct inode_xattr_node *node, *n;
struct erofs_xattr_ibody_header *header;
+ LIST_HEAD(ilst);
unsigned int p;
char *buf = calloc(1, size);
@@ -276,6 +450,23 @@ char *erofs_export_xattr_ibody(struct list_head *ixattrs, unsigned int size)
p = sizeof(struct erofs_xattr_ibody_header);
list_for_each_entry_safe(node, n, ixattrs, list) {
struct xattr_item *const item = node->item;
+
+ list_del(&node->list);
+
+ /* move inline xattrs to the onstack list */
+ if (item->shared_xattr_id < 0) {
+ list_add(&node->list, &ilst);
+ continue;
+ }
+
+ *(__le32 *)(buf + p) = cpu_to_le32(item->shared_xattr_id);
+ p += sizeof(__le32);
+ ++header->h_shared_count;
+ free(node);
+ }
+
+ list_for_each_entry_safe(node, n, &ilst, list) {
+ struct xattr_item *const item = node->item;
const struct erofs_xattr_entry entry = {
.e_name_index = item->prefix,
.e_name_len = item->len[0],
diff --git a/mkfs/main.c b/mkfs/main.c
index 4b279c0..978c5b4 100644
--- a/mkfs/main.c
+++ b/mkfs/main.c
@@ -19,6 +19,7 @@
#include "erofs/inode.h"
#include "erofs/io.h"
#include "erofs/compress.h"
+#include "erofs/xattr.h"
#define EROFS_SUPER_END (EROFS_SUPER_OFFSET + sizeof(struct erofs_super_block))
@@ -169,7 +170,7 @@ int erofs_mkfs_update_super_block(struct erofs_buffer_head *bh,
.build_time_nsec = cpu_to_le32(sbi.build_time_nsec),
.blocks = 0,
.meta_blkaddr = sbi.meta_blkaddr,
- .xattr_blkaddr = 0,
+ .xattr_blkaddr = sbi.xattr_blkaddr,
.feature_incompat = cpu_to_le32(sbi.feature_incompat),
};
const unsigned int sb_blksize =
@@ -259,6 +260,13 @@ int main(int argc, char **argv)
erofs_inode_manager_init();
+ err = erofs_build_shared_xattrs_from_path(cfg.c_src_path);
+ if (err) {
+ erofs_err("Failed to build shared xattrs: %s",
+ erofs_strerror(err));
+ goto exit;
+ }
+
root_inode = erofs_mkfs_build_tree_from_path(NULL, cfg.c_src_path);
if (IS_ERR(root_inode)) {
err = PTR_ERR(root_inode);
--
2.17.1
^ permalink raw reply related [flat|nested] 12+ messages in thread
* Re: [PATCH 2/2] erofs-utils: introduce shared xattr support
2019-10-05 14:20 ` [PATCH 2/2] erofs-utils: introduce shared " Gao Xiang via Linux-erofs
@ 2019-10-05 16:43 ` Li Guifu
2019-10-06 5:01 ` Gao Xiang via Linux-erofs
0 siblings, 1 reply; 12+ messages in thread
From: Li Guifu @ 2019-10-05 16:43 UTC (permalink / raw)
To: Gao Xiang, Li Guifu, Chao Yu, linux-erofs
> From: Li Guifu <blucerlee@gmail.com>
>
> Large xattrs or xattrs shared by a lot of files
> can be stored in shared xattrs rather than
> inlined right after inode.
>
> Signed-off-by: Li Guifu <blucerlee@gmail.com>
> Signed-off-by: Gao Xiang <hsiangkao@aol.com>
Dear Gao Xiang,
Should It need to add a configure parameter to
cfg.c_inline_xattr_tolerance which is a custome threshold
of shared xattr ?
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH v3 1/2] erofs-utils: introduce inline xattr support
2019-10-05 14:20 ` [PATCH v3 1/2] erofs-utils: introduce inline " Gao Xiang via Linux-erofs
2019-10-05 14:20 ` [PATCH 2/2] erofs-utils: introduce shared " Gao Xiang via Linux-erofs
@ 2019-10-05 16:44 ` Li Guifu
1 sibling, 0 replies; 12+ messages in thread
From: Li Guifu @ 2019-10-05 16:44 UTC (permalink / raw)
To: Gao Xiang, Li Guifu, Chao Yu, linux-erofs; +Cc: htyuxe+dhbrei4sq0df8
> From: "htyuxe+dhbrei4sq0df8@grr.la" <htyuxe+dhbrei4sq0df8@grr.la>
>
> Load xattrs from source files and pack them into target image.
>
> Signed-off-by: htyuxe+dhbrei4sq0df8@grr.la <htyuxe+dhbrei4sq0df8@grr.la>
> Signed-off-by: Li Guifu <blucerlee@gmail.com>
> Signed-off-by: Gao Xiang <hsiangkao@aol.com>
> ---
It looks good
Tested-by: Li Guifu <blucerlee@gmail.com>
Thanks,
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH 2/2] erofs-utils: introduce shared xattr support
2019-10-05 16:43 ` Li Guifu
@ 2019-10-06 5:01 ` Gao Xiang via Linux-erofs
2019-10-14 11:42 ` [PATCH v4 1/2] erofs-utils: introduce inline " Gao Xiang via Linux-erofs
0 siblings, 1 reply; 12+ messages in thread
From: Gao Xiang via Linux-erofs @ 2019-10-06 5:01 UTC (permalink / raw)
To: Li Guifu; +Cc: linux-erofs
Hi Guifu,
On Sun, Oct 06, 2019 at 12:43:21AM +0800, Li Guifu wrote:
> > From: Li Guifu <blucerlee@gmail.com>
> >
> > Large xattrs or xattrs shared by a lot of files
> > can be stored in shared xattrs rather than
> > inlined right after inode.
> >
> > Signed-off-by: Li Guifu <blucerlee@gmail.com>
> > Signed-off-by: Gao Xiang <hsiangkao@aol.com>
>
> Dear Gao Xiang,
> Should It need to add a configure parameter to
> cfg.c_inline_xattr_tolerance which is a custome threshold
> of shared xattr ?
Make sense. I planned to add a brand new command argument but
I need to refer other mkfs first and see if some common name for
this. (If you have time, please help find as well...)
Thanks,
Gao Xiang
^ permalink raw reply [flat|nested] 12+ messages in thread
* [PATCH v4 1/2] erofs-utils: introduce inline xattr support
2019-10-06 5:01 ` Gao Xiang via Linux-erofs
@ 2019-10-14 11:42 ` Gao Xiang via Linux-erofs
2019-10-14 11:42 ` [PATCH v4 2/2] erofs-utils: introduce shared " Gao Xiang via Linux-erofs
0 siblings, 1 reply; 12+ messages in thread
From: Gao Xiang via Linux-erofs @ 2019-10-14 11:42 UTC (permalink / raw)
To: Li Guifu, linux-erofs; +Cc: Miao Xie, htyuxe+dhbrei4sq0df8
From: "htyuxe+dhbrei4sq0df8@grr.la" <htyuxe+dhbrei4sq0df8@grr.la>
Load xattrs from source files and pack them into target image.
Signed-off-by: htyuxe+dhbrei4sq0df8@grr.la <htyuxe+dhbrei4sq0df8@grr.la>
Signed-off-by: Li Guifu <blucerlee@gmail.com>
Signed-off-by: Gao Xiang <hsiangkao@aol.com>
---
changes since v3:
- add a option "-x" to indicate minimal refcount of a shared xattr or
xattr disabled;
- introduce put_xattritem() for unused xattr cleanup.
configure.ac | 1 +
include/erofs/config.h | 2 +
include/erofs/defs.h | 6 +
include/erofs/hashtable.h | 462 ++++++++++++++++++++++++++++++++++++++
include/erofs/internal.h | 2 +-
include/erofs/xattr.h | 48 ++++
lib/Makefile.am | 3 +-
lib/config.c | 1 +
lib/inode.c | 31 ++-
lib/xattr.c | 309 +++++++++++++++++++++++++
mkfs/main.c | 12 +-
11 files changed, 872 insertions(+), 5 deletions(-)
create mode 100644 include/erofs/hashtable.h
create mode 100644 include/erofs/xattr.h
create mode 100644 lib/xattr.c
diff --git a/configure.ac b/configure.ac
index 07e034e..8c31cb7 100644
--- a/configure.ac
+++ b/configure.ac
@@ -75,6 +75,7 @@ AC_CHECK_HEADERS(m4_flatten([
linux/falloc.h
linux/fs.h
linux/types.h
+ linux/xattr.h
limits.h
stddef.h
stdint.h
diff --git a/include/erofs/config.h b/include/erofs/config.h
index 9711638..2be05ee 100644
--- a/include/erofs/config.h
+++ b/include/erofs/config.h
@@ -28,6 +28,8 @@ struct erofs_configure {
char *c_compr_alg_master;
int c_compr_level_master;
int c_force_inodeversion;
+ /* < 0, xattr disabled and INT_MAX, always use inline xattrs */
+ int c_inline_xattr_tolerance;
u64 c_unix_timestamp;
};
diff --git a/include/erofs/defs.h b/include/erofs/defs.h
index db51350..aa127d0 100644
--- a/include/erofs/defs.h
+++ b/include/erofs/defs.h
@@ -152,6 +152,12 @@ typedef int64_t s64;
#define BITS_PER_BYTE 8
#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
+#ifdef __SIZEOF_LONG__
+#define BITS_PER_LONG (__CHAR_BIT__ * __SIZEOF_LONG__)
+#else
+#define BITS_PER_LONG __WORDSIZE
+#endif
+
#define BUG_ON(cond) assert(!(cond))
#ifdef NDEBUG
diff --git a/include/erofs/hashtable.h b/include/erofs/hashtable.h
new file mode 100644
index 0000000..ab57b56
--- /dev/null
+++ b/include/erofs/hashtable.h
@@ -0,0 +1,462 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * erofs_utils/include/erofs/hashtable.h
+ *
+ * Original code taken from 'linux/include/linux/hash{,table}.h'
+ */
+#ifndef __EROFS_HASHTABLE_H
+#define __EROFS_HASHTABLE_H
+
+/*
+ * Fast hashing routine for ints, longs and pointers.
+ * (C) 2002 Nadia Yvette Chambers, IBM
+ */
+
+/*
+ * Statically sized hash table implementation
+ * (C) 2012 Sasha Levin <levinsasha928@gmail.com>
+ */
+
+#include "defs.h"
+
+/*
+ * The "GOLDEN_RATIO_PRIME" is used in ifs/btrfs/brtfs_inode.h and
+ * fs/inode.c. It's not actually prime any more (the previous primes
+ * were actively bad for hashing), but the name remains.
+ */
+#if BITS_PER_LONG == 32
+#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_32
+#define hash_long(val, bits) hash_32(val, bits)
+#elif BITS_PER_LONG == 64
+#define hash_long(val, bits) hash_64(val, bits)
+#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_64
+#else
+#error Wordsize not 32 or 64
+#endif
+
+/*
+ * This hash multiplies the input by a large odd number and takes the
+ * high bits. Since multiplication propagates changes to the most
+ * significant end only, it is essential that the high bits of the
+ * product be used for the hash value.
+ *
+ * Chuck Lever verified the effectiveness of this technique:
+ * http://www.citi.umich.edu/techreports/reports/citi-tr-00-1.pdf
+ *
+ * Although a random odd number will do, it turns out that the golden
+ * ratio phi = (sqrt(5)-1)/2, or its negative, has particularly nice
+ * properties. (See Knuth vol 3, section 6.4, exercise 9.)
+ *
+ * These are the negative, (1 - phi) = phi**2 = (3 - sqrt(5))/2,
+ * which is very slightly easier to multiply by and makes no
+ * difference to the hash distribution.
+ */
+#define GOLDEN_RATIO_32 0x61C88647
+#define GOLDEN_RATIO_64 0x61C8864680B583EBull
+
+struct hlist_head {
+ struct hlist_node *first;
+};
+
+struct hlist_node {
+ struct hlist_node *next, **pprev;
+};
+
+/*
+ * Architectures might want to move the poison pointer offset
+ * into some well-recognized area such as 0xdead000000000000,
+ * that is also not mappable by user-space exploits:
+ */
+#ifdef CONFIG_ILLEGAL_POINTER_VALUE
+# define POISON_POINTER_DELTA _AC(CONFIG_ILLEGAL_POINTER_VALUE, UL)
+#else
+# define POISON_POINTER_DELTA 0
+#endif
+
+/*
+ * These are non-NULL pointers that will result in page faults
+ * under normal circumstances, used to verify that nobody uses
+ * non-initialized list entries.
+ */
+#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
+#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
+
+/*
+ * Double linked lists with a single pointer list head.
+ * Mostly useful for hash tables where the two pointer list head is
+ * too wasteful.
+ * You lose the ability to access the tail in O(1).
+ */
+
+#define HLIST_HEAD_INIT { .first = NULL }
+#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL }
+#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
+static inline void INIT_HLIST_NODE(struct hlist_node *h)
+{
+ h->next = NULL;
+ h->pprev = NULL;
+}
+
+static inline int hlist_unhashed(const struct hlist_node *h)
+{
+ return !h->pprev;
+}
+
+static inline int hlist_empty(const struct hlist_head *h)
+{
+ return !h->first;
+}
+
+static inline void __hlist_del(struct hlist_node *n)
+{
+ struct hlist_node *next = n->next;
+ struct hlist_node **pprev = n->pprev;
+
+ *pprev = next;
+ if (next)
+ next->pprev = pprev;
+}
+
+static inline void hlist_del(struct hlist_node *n)
+{
+ __hlist_del(n);
+ n->next = LIST_POISON1;
+ n->pprev = LIST_POISON2;
+}
+
+static inline void hlist_del_init(struct hlist_node *n)
+{
+ if (!hlist_unhashed(n)) {
+ __hlist_del(n);
+ INIT_HLIST_NODE(n);
+ }
+}
+
+static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
+{
+ struct hlist_node *first = h->first;
+
+ n->next = first;
+ if (first)
+ first->pprev = &n->next;
+ h->first = n;
+ n->pprev = &h->first;
+}
+
+/* next must be != NULL */
+static inline void hlist_add_before(struct hlist_node *n,
+ struct hlist_node *next)
+{
+ n->pprev = next->pprev;
+ n->next = next;
+ next->pprev = &n->next;
+ *(n->pprev) = n;
+}
+
+static inline void hlist_add_behind(struct hlist_node *n,
+ struct hlist_node *prev)
+{
+ n->next = prev->next;
+ prev->next = n;
+ n->pprev = &prev->next;
+
+ if (n->next)
+ n->next->pprev = &n->next;
+}
+
+/* after that we'll appear to be on some hlist and hlist_del will work */
+static inline void hlist_add_fake(struct hlist_node *n)
+{
+ n->pprev = &n->next;
+}
+
+/*
+ * Move a list from one list head to another. Fixup the pprev
+ * reference of the first entry if it exists.
+ */
+static inline void hlist_move_list(struct hlist_head *old,
+ struct hlist_head *new)
+{
+ new->first = old->first;
+ if (new->first)
+ new->first->pprev = &new->first;
+ old->first = NULL;
+}
+
+#define hlist_entry(ptr, type, member) container_of(ptr, type, member)
+
+#define hlist_for_each(pos, head) \
+ for (pos = (head)->first; pos; pos = pos->next)
+
+#define hlist_for_each_safe(pos, n, head) \
+ for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \
+ pos = n)
+
+#define hlist_entry_safe(ptr, type, member) \
+ ({ typeof(ptr) ____ptr = (ptr); \
+ ____ptr ? hlist_entry(____ptr, type, member) : NULL; \
+ })
+
+/**
+ * hlist_for_each_entry - iterate over list of given type
+ * @pos:the type * to use as a loop cursor.
+ * @head:the head for your list.
+ * @member:the name of the hlist_node within the struct.
+ */
+#define hlist_for_each_entry(pos, head, member) \
+ for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member);\
+ pos; \
+ pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
+
+/**
+ * hlist_for_each_entry_continue
+ * iterate over a hlist continuing after current point
+ * @pos:the type * to use as a loop cursor.
+ * @member:the name of the hlist_node within the struct.
+ */
+#define hlist_for_each_entry_continue(pos, member) \
+ for (pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member);\
+ pos; \
+ pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
+
+/**
+ * hlist_for_each_entry_from
+ * iterate over a hlist continuing from current point
+ * @pos: the type * to use as a loop cursor.
+ * @member: the name of the hlist_node within the struct.
+ */
+#define hlist_for_each_entry_from(pos, member) \
+ for (; pos; \
+ pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
+
+/**
+ * hlist_for_each_entry_safe
+ * iterate over list of given type safe against removal of list entry
+ * @pos:the type * to use as a loop cursor.
+ * @n:another &struct hlist_node to use as temporary storage
+ * @head:the head for your list.
+ * @member:the name of the hlist_node within the struct.
+ */
+#define hlist_for_each_entry_safe(pos, n, head, member) \
+ for (pos = hlist_entry_safe((head)->first, typeof(*pos), member);\
+ pos && ({ n = pos->member.next; 1; }); \
+ pos = hlist_entry_safe(n, typeof(*pos), member))
+
+static inline u32 __hash_32(u32 val)
+{
+ return val * GOLDEN_RATIO_32;
+}
+
+static inline u32 hash_32(u32 val, unsigned int bits)
+{
+ /* High bits are more random, so use them. */
+ return __hash_32(val) >> (32 - bits);
+}
+
+static __always_inline u32 hash_64(u64 val, unsigned int bits)
+{
+#if BITS_PER_LONG == 64
+ /* 64x64-bit multiply is efficient on all 64-bit processors */
+ return val * GOLDEN_RATIO_64 >> (64 - bits);
+#else
+ /* Hash 64 bits using only 32x32-bit multiply. */
+ return hash_32((u32)val ^ __hash_32(val >> 32), bits);
+#endif
+}
+
+/**
+ * ilog2 - log of base 2 of 32-bit or a 64-bit unsigned value
+ * @n - parameter
+ *
+ * constant-capable log of base 2 calculation
+ * - this can be used to initialise global variables from constant data, hence
+ * the massive ternary operator construction
+ *
+ * selects the appropriately-sized optimised version depending on sizeof(n)
+ */
+#define ilog2(n) \
+( \
+ (n) & (1ULL << 63) ? 63 : \
+ (n) & (1ULL << 62) ? 62 : \
+ (n) & (1ULL << 61) ? 61 : \
+ (n) & (1ULL << 60) ? 60 : \
+ (n) & (1ULL << 59) ? 59 : \
+ (n) & (1ULL << 58) ? 58 : \
+ (n) & (1ULL << 57) ? 57 : \
+ (n) & (1ULL << 56) ? 56 : \
+ (n) & (1ULL << 55) ? 55 : \
+ (n) & (1ULL << 54) ? 54 : \
+ (n) & (1ULL << 53) ? 53 : \
+ (n) & (1ULL << 52) ? 52 : \
+ (n) & (1ULL << 51) ? 51 : \
+ (n) & (1ULL << 50) ? 50 : \
+ (n) & (1ULL << 49) ? 49 : \
+ (n) & (1ULL << 48) ? 48 : \
+ (n) & (1ULL << 47) ? 47 : \
+ (n) & (1ULL << 46) ? 46 : \
+ (n) & (1ULL << 45) ? 45 : \
+ (n) & (1ULL << 44) ? 44 : \
+ (n) & (1ULL << 43) ? 43 : \
+ (n) & (1ULL << 42) ? 42 : \
+ (n) & (1ULL << 41) ? 41 : \
+ (n) & (1ULL << 40) ? 40 : \
+ (n) & (1ULL << 39) ? 39 : \
+ (n) & (1ULL << 38) ? 38 : \
+ (n) & (1ULL << 37) ? 37 : \
+ (n) & (1ULL << 36) ? 36 : \
+ (n) & (1ULL << 35) ? 35 : \
+ (n) & (1ULL << 34) ? 34 : \
+ (n) & (1ULL << 33) ? 33 : \
+ (n) & (1ULL << 32) ? 32 : \
+ (n) & (1ULL << 31) ? 31 : \
+ (n) & (1ULL << 30) ? 30 : \
+ (n) & (1ULL << 29) ? 29 : \
+ (n) & (1ULL << 28) ? 28 : \
+ (n) & (1ULL << 27) ? 27 : \
+ (n) & (1ULL << 26) ? 26 : \
+ (n) & (1ULL << 25) ? 25 : \
+ (n) & (1ULL << 24) ? 24 : \
+ (n) & (1ULL << 23) ? 23 : \
+ (n) & (1ULL << 22) ? 22 : \
+ (n) & (1ULL << 21) ? 21 : \
+ (n) & (1ULL << 20) ? 20 : \
+ (n) & (1ULL << 19) ? 19 : \
+ (n) & (1ULL << 18) ? 18 : \
+ (n) & (1ULL << 17) ? 17 : \
+ (n) & (1ULL << 16) ? 16 : \
+ (n) & (1ULL << 15) ? 15 : \
+ (n) & (1ULL << 14) ? 14 : \
+ (n) & (1ULL << 13) ? 13 : \
+ (n) & (1ULL << 12) ? 12 : \
+ (n) & (1ULL << 11) ? 11 : \
+ (n) & (1ULL << 10) ? 10 : \
+ (n) & (1ULL << 9) ? 9 : \
+ (n) & (1ULL << 8) ? 8 : \
+ (n) & (1ULL << 7) ? 7 : \
+ (n) & (1ULL << 6) ? 6 : \
+ (n) & (1ULL << 5) ? 5 : \
+ (n) & (1ULL << 4) ? 4 : \
+ (n) & (1ULL << 3) ? 3 : \
+ (n) & (1ULL << 2) ? 2 : \
+ (n) & (1ULL << 1) ? 1 : 0 \
+)
+
+#define DEFINE_HASHTABLE(name, bits) \
+ struct hlist_head name[1 << (bits)] = \
+ { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT }
+
+#define DECLARE_HASHTABLE(name, bits) \
+ struct hlist_head name[1 << (bits)]
+
+#define HASH_SIZE(name) (ARRAY_SIZE(name))
+#define HASH_BITS(name) ilog2(HASH_SIZE(name))
+
+/* Use hash_32 when possible to allow for fast 32bit hashing in 64bit kernels*/
+#define hash_min(val, bits) \
+ (sizeof(val) <= 4 ? hash_32(val, bits) : hash_long(val, bits))
+
+static inline void __hash_init(struct hlist_head *ht, unsigned int sz)
+{
+ unsigned int i;
+
+ for (i = 0; i < sz; i++)
+ INIT_HLIST_HEAD(&ht[i]);
+}
+
+/**
+ * hash_init - initialize a hash table
+ * @hashtable: hashtable to be initialized
+ *
+ * Calculates the size of the hashtable from the given parameter, otherwise
+ * same as hash_init_size.
+ *
+ * This has to be a macro since HASH_BITS() will not work on pointers since
+ * it calculates the size during preprocessing.
+ */
+#define hash_init(hashtable) __hash_init(hashtable, HASH_SIZE(hashtable))
+
+/**
+ * hash_add - add an object to a hashtable
+ * @hashtable: hashtable to add to
+ * @node: the &struct hlist_node of the object to be added
+ * @key: the key of the object to be added
+ */
+#define hash_add(hashtable, node, key) \
+ hlist_add_head(node, &hashtable[hash_min(key, HASH_BITS(hashtable))])
+
+/**
+ * hash_hashed - check whether an object is in any hashtable
+ * @node: the &struct hlist_node of the object to be checked
+ */
+static inline bool hash_hashed(struct hlist_node *node)
+{
+ return !hlist_unhashed(node);
+}
+
+static inline bool __hash_empty(struct hlist_head *ht, unsigned int sz)
+{
+ unsigned int i;
+
+ for (i = 0; i < sz; i++)
+ if (!hlist_empty(&ht[i]))
+ return false;
+
+ return true;
+}
+
+/**
+ * hash_empty - check whether a hashtable is empty
+ * @hashtable: hashtable to check
+ *
+ * This has to be a macro since HASH_BITS() will not work on pointers since
+ * it calculates the size during preprocessing.
+ */
+#define hash_empty(hashtable) __hash_empty(hashtable, HASH_SIZE(hashtable))
+
+/**
+ * hash_del - remove an object from a hashtable
+ * @node: &struct hlist_node of the object to remove
+ */
+static inline void hash_del(struct hlist_node *node)
+{
+ hlist_del_init(node);
+}
+
+/**
+ * hash_for_each - iterate over a hashtable
+ * @name: hashtable to iterate
+ * @bkt: integer to use as bucket loop cursor
+ * @obj: the type * to use as a loop cursor for each entry
+ * @member: the name of the hlist_node within the struct
+ */
+#define hash_for_each(name, bkt, obj, member) \
+ for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
+ (bkt)++)\
+ hlist_for_each_entry(obj, &name[bkt], member)
+
+/**
+ * hash_for_each_safe - iterate over a hashtable safe against removal of
+ * hash entry
+ * @name: hashtable to iterate
+ * @bkt: integer to use as bucket loop cursor
+ * @tmp: a &struct used for temporary storage
+ * @obj: the type * to use as a loop cursor for each entry
+ * @member: the name of the hlist_node within the struct
+ */
+#define hash_for_each_safe(name, bkt, tmp, obj, member) \
+ for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
+ (bkt)++)\
+ hlist_for_each_entry_safe(obj, tmp, &name[bkt], member)
+
+/**
+ * hash_for_each_possible - iterate over all possible objects hashing to the
+ * same bucket
+ * @name: hashtable to iterate
+ * @obj: the type * to use as a loop cursor for each entry
+ * @member: the name of the hlist_node within the struct
+ * @key: the key of the objects to iterate over
+ */
+#define hash_for_each_possible(name, obj, member, key) \
+ hlist_for_each_entry(obj, &name[hash_min(key, HASH_BITS(name))], member)
+
+#endif
diff --git a/include/erofs/internal.h b/include/erofs/internal.h
index 5384946..25ce7b5 100644
--- a/include/erofs/internal.h
+++ b/include/erofs/internal.h
@@ -61,7 +61,7 @@ struct erofs_sb_info {
extern struct erofs_sb_info sbi;
struct erofs_inode {
- struct list_head i_hash, i_subdirs;
+ struct list_head i_hash, i_subdirs, i_xattrs;
unsigned int i_count;
struct erofs_inode *i_parent;
diff --git a/include/erofs/xattr.h b/include/erofs/xattr.h
new file mode 100644
index 0000000..29df025
--- /dev/null
+++ b/include/erofs/xattr.h
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * erofs_utils/include/erofs/xattr.h
+ *
+ * Originally contributed by an anonymous person,
+ * heavily changed by Li Guifu <blucerlee@gmail.com>
+ * and Gao Xiang <xiang@kernel.org>
+ */
+#ifndef __EROFS_XATTR_H
+#define __EROFS_XATTR_H
+
+#include "internal.h"
+
+#define EROFS_INODE_XATTR_ICOUNT(_size) ({\
+ u32 __size = le16_to_cpu(_size); \
+ ((__size) == 0) ? 0 : \
+ (_size - sizeof(struct erofs_xattr_ibody_header)) / \
+ sizeof(struct erofs_xattr_entry) + 1; })
+
+#ifndef XATTR_USER_PREFIX
+#define XATTR_USER_PREFIX "user."
+#endif
+#ifndef XATTR_USER_PREFIX_LEN
+#define XATTR_USER_PREFIX_LEN (sizeof(XATTR_USER_PREFIX) - 1)
+#endif
+#ifndef XATTR_SECURITY_PREFIX
+#define XATTR_SECURITY_PREFIX "security."
+#endif
+#ifndef XATTR_SECURITY_PREFIX_LEN
+#define XATTR_SECURITY_PREFIX_LEN (sizeof(XATTR_SECURITY_PREFIX) - 1)
+#endif
+#ifndef XATTR_TRUSTED_PREFIX
+#define XATTR_TRUSTED_PREFIX "trusted."
+#endif
+#ifndef XATTR_TRUSTED_PREFIX_LEN
+#define XATTR_TRUSTED_PREFIX_LEN (sizeof(XATTR_TRUSTED_PREFIX) - 1)
+#endif
+#ifndef XATTR_NAME_POSIX_ACL_ACCESS
+#define XATTR_NAME_POSIX_ACL_ACCESS "system.posix_acl_access"
+#endif
+#ifndef XATTR_NAME_POSIX_ACL_DEFAULT
+#define XATTR_NAME_POSIX_ACL_DEFAULT "system.posix_acl_default"
+#endif
+
+int erofs_prepare_xattr_ibody(const char *path, struct list_head *ixattrs);
+char *erofs_export_xattr_ibody(struct list_head *ixattrs, unsigned int size);
+
+#endif
diff --git a/lib/Makefile.am b/lib/Makefile.am
index dea82f7..1ff81f9 100644
--- a/lib/Makefile.am
+++ b/lib/Makefile.am
@@ -2,7 +2,8 @@
# Makefile.am
noinst_LTLIBRARIES = liberofs.la
-liberofs_la_SOURCES = config.c io.c cache.c inode.c compress.c compressor.c
+liberofs_la_SOURCES = config.c io.c cache.c inode.c xattr.c \
+ compress.c compressor.c
liberofs_la_CFLAGS = -Wall -Werror -I$(top_srcdir)/include
if ENABLE_LZ4
liberofs_la_CFLAGS += ${LZ4_CFLAGS}
diff --git a/lib/config.c b/lib/config.c
index 46625d7..cb42706 100644
--- a/lib/config.c
+++ b/lib/config.c
@@ -21,6 +21,7 @@ void erofs_init_configure(void)
cfg.c_dry_run = false;
cfg.c_compr_level_master = -1;
cfg.c_force_inodeversion = 0;
+ cfg.c_inline_xattr_tolerance = 1;
cfg.c_unix_timestamp = -1;
}
diff --git a/lib/inode.c b/lib/inode.c
index 395caae..b7121e0 100644
--- a/lib/inode.c
+++ b/lib/inode.c
@@ -20,6 +20,7 @@
#include "erofs/cache.h"
#include "erofs/io.h"
#include "erofs/compress.h"
+#include "erofs/xattr.h"
struct erofs_sb_info sbi;
@@ -365,6 +366,7 @@ fail:
static bool erofs_bh_flush_write_inode(struct erofs_buffer_head *bh)
{
struct erofs_inode *const inode = bh->fsprivate;
+ const u16 icount = EROFS_INODE_XATTR_ICOUNT(inode->xattr_isize);
erofs_off_t off = erofs_btell(bh, false);
union {
struct erofs_inode_compact dic;
@@ -375,6 +377,7 @@ static bool erofs_bh_flush_write_inode(struct erofs_buffer_head *bh)
switch (inode->inode_isize) {
case sizeof(struct erofs_inode_compact):
u.dic.i_format = cpu_to_le16(0 | (inode->datalayout << 1));
+ u.dic.i_xattr_icount = cpu_to_le16(icount);
u.dic.i_mode = cpu_to_le16(inode->i_mode);
u.dic.i_nlink = cpu_to_le16(inode->i_nlink);
u.dic.i_size = cpu_to_le32((u32)inode->i_size);
@@ -404,6 +407,7 @@ static bool erofs_bh_flush_write_inode(struct erofs_buffer_head *bh)
break;
case sizeof(struct erofs_inode_extended):
u.die.i_format = cpu_to_le16(1 | (inode->datalayout << 1));
+ u.die.i_xattr_icount = cpu_to_le16(icount);
u.die.i_mode = cpu_to_le16(inode->i_mode);
u.die.i_nlink = cpu_to_le32(inode->i_nlink);
u.die.i_size = cpu_to_le64(inode->i_size);
@@ -445,6 +449,20 @@ static bool erofs_bh_flush_write_inode(struct erofs_buffer_head *bh)
return false;
off += inode->inode_isize;
+ if (inode->xattr_isize) {
+ char *xattrs = erofs_export_xattr_ibody(&inode->i_xattrs,
+ inode->xattr_isize);
+ if (IS_ERR(xattrs))
+ return false;
+
+ ret = dev_write(xattrs, off, inode->xattr_isize);
+ free(xattrs);
+ if (ret)
+ return false;
+
+ off += inode->xattr_isize;
+ }
+
if (inode->extent_isize) {
/* write compression metadata */
off = Z_EROFS_VLE_EXTENT_ALIGN(off);
@@ -499,8 +517,10 @@ int erofs_prepare_inode_buffer(struct erofs_inode *inode)
DBG_BUGON(inode->bh || inode->bh_inline);
- inodesize = inode->inode_isize + inode->xattr_isize +
- inode->extent_isize;
+ inodesize = inode->inode_isize + inode->xattr_isize;
+ if (inode->extent_isize)
+ inodesize = Z_EROFS_VLE_EXTENT_ALIGN(inodesize) +
+ inode->extent_isize;
if (is_inode_layout_compression(inode))
goto noinline;
@@ -707,6 +727,8 @@ struct erofs_inode *erofs_new_inode(void)
inode->i_count = 1;
init_list_head(&inode->i_subdirs);
+ init_list_head(&inode->i_xattrs);
+
inode->idata_size = 0;
inode->xattr_isize = 0;
inode->extent_isize = 0;
@@ -795,6 +817,11 @@ struct erofs_inode *erofs_mkfs_build_tree(struct erofs_inode *dir)
struct dirent *dp;
struct erofs_dentry *d;
+ ret = erofs_prepare_xattr_ibody(dir->i_srcpath, &dir->i_xattrs);
+ if (ret < 0)
+ return ERR_PTR(ret);
+ dir->xattr_isize = ret;
+
if (!S_ISDIR(dir->i_mode)) {
if (S_ISLNK(dir->i_mode)) {
char *const symlink = malloc(dir->i_size);
diff --git a/lib/xattr.c b/lib/xattr.c
new file mode 100644
index 0000000..d07d325
--- /dev/null
+++ b/lib/xattr.c
@@ -0,0 +1,309 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * erofs_utils/lib/xattr.c
+ *
+ * Originally contributed by an anonymous person,
+ * heavily changed by Li Guifu <blucerlee@gmail.com>
+ * and Gao Xiang <hsiangkao@aol.com>
+ */
+#include <stdlib.h>
+#include <sys/xattr.h>
+#ifdef HAVE_LINUX_XATTR_H
+#include <linux/xattr.h>
+#endif
+#include "erofs/print.h"
+#include "erofs/hashtable.h"
+#include "erofs/xattr.h"
+
+#define EA_HASHTABLE_BITS 16
+
+struct xattr_item {
+ const char *kvbuf;
+ unsigned int hash[2], len[2], count;
+ u8 prefix;
+ struct hlist_node node;
+};
+
+struct inode_xattr_node {
+ struct list_head list;
+ struct xattr_item *item;
+};
+
+static DECLARE_HASHTABLE(ea_hashtable, EA_HASHTABLE_BITS);
+
+static struct xattr_prefix {
+ const char *prefix;
+ u16 prefix_len;
+} xattr_types[] = {
+ [EROFS_XATTR_INDEX_USER] = {
+ XATTR_USER_PREFIX,
+ XATTR_USER_PREFIX_LEN
+ }, [EROFS_XATTR_INDEX_POSIX_ACL_ACCESS] = {
+ XATTR_NAME_POSIX_ACL_ACCESS,
+ sizeof(XATTR_NAME_POSIX_ACL_ACCESS) - 1
+ }, [EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT] = {
+ XATTR_NAME_POSIX_ACL_DEFAULT,
+ sizeof(XATTR_NAME_POSIX_ACL_DEFAULT) - 1
+ }, [EROFS_XATTR_INDEX_TRUSTED] = {
+ XATTR_TRUSTED_PREFIX,
+ XATTR_TRUSTED_PREFIX_LEN
+ }, [EROFS_XATTR_INDEX_SECURITY] = {
+ XATTR_SECURITY_PREFIX,
+ XATTR_SECURITY_PREFIX_LEN
+ }
+};
+
+static unsigned int BKDRHash(char *str, unsigned int len)
+{
+ const unsigned int seed = 131313;
+ unsigned int hash = 0;
+
+ while (len) {
+ hash = hash * seed + (*str++);
+ --len;
+ }
+ return hash;
+}
+
+static unsigned int xattr_item_hash(u8 prefix, char *buf,
+ unsigned int len[2], unsigned int hash[2])
+{
+ hash[0] = BKDRHash(buf, len[0]); /* key */
+ hash[1] = BKDRHash(buf + len[0], len[1]); /* value */
+
+ return prefix ^ hash[0] ^ hash[1];
+}
+
+static unsigned int put_xattritem(struct xattr_item *item)
+{
+ if (item->count > 1)
+ return --item->count;
+ free(item);
+ return 0;
+}
+
+static struct xattr_item *get_xattritem(u8 prefix, char *kvbuf,
+ unsigned int len[2])
+{
+ struct xattr_item *item;
+ unsigned int hash[2], hkey;
+
+ hkey = xattr_item_hash(prefix, kvbuf, len, hash);
+
+ hash_for_each_possible(ea_hashtable, item, node, hkey) {
+ if (prefix == item->prefix &&
+ item->len[0] == len[0] && item->len[1] == len[1] &&
+ item->hash[0] == hash[0] && item->hash[1] == hash[1] &&
+ !memcmp(kvbuf, item->kvbuf, len[0] + len[1])) {
+ free(kvbuf);
+ ++item->count;
+ return item;
+ }
+ }
+
+ item = malloc(sizeof(*item));
+ if (!item) {
+ free(kvbuf);
+ return ERR_PTR(-ENOMEM);
+ }
+ INIT_HLIST_NODE(&item->node);
+ item->count = 1;
+ item->kvbuf = kvbuf;
+ item->len[0] = len[0];
+ item->len[1] = len[1];
+ item->hash[0] = hash[0];
+ item->hash[1] = hash[1];
+ item->prefix = prefix;
+ hash_add(ea_hashtable, &item->node, hkey);
+ return item;
+}
+
+static bool match_prefix(const char *key, u8 *index, u16 *len)
+{
+ struct xattr_prefix *p;
+
+ for (p = xattr_types; p < xattr_types + ARRAY_SIZE(xattr_types); ++p) {
+ if (p->prefix && !strncmp(p->prefix, key, p->prefix_len)) {
+ *len = p->prefix_len;
+ *index = p - xattr_types;
+ return true;
+ }
+ }
+ return false;
+}
+
+static struct xattr_item *parse_one_xattr(const char *path, const char *key,
+ unsigned int keylen)
+{
+ ssize_t ret;
+ u8 prefix;
+ u16 prefixlen;
+ unsigned int len[2];
+ char *kvbuf;
+
+ erofs_dbg("parse xattr [%s] of %s", path, key);
+
+ if (!match_prefix(key, &prefix, &prefixlen))
+ return ERR_PTR(-ENODATA);
+
+ DBG_BUGON(keylen < prefixlen);
+
+ /* determine length of the value */
+ ret = lgetxattr(path, key, NULL, 0);
+ if (ret < 0)
+ return ERR_PTR(-errno);
+ len[1] = ret;
+
+ /* allocate key-value buffer */
+ len[0] = keylen - prefixlen;
+
+ kvbuf = malloc(len[0] + len[1]);
+ if (!kvbuf)
+ return ERR_PTR(-ENOMEM);
+
+ memcpy(kvbuf, key + prefixlen, len[0]);
+ if (len[1]) {
+ /* copy value to buffer */
+ ret = lgetxattr(path, key, kvbuf + len[0], len[1]);
+ if (ret < 0) {
+ free(kvbuf);
+ return ERR_PTR(-errno);
+ }
+ if (len[1] != ret) {
+ erofs_err("size of xattr value got changed just now (%u-> %ld)",
+ len[1], (long)ret);
+ len[1] = ret;
+ }
+ }
+ return get_xattritem(prefix, kvbuf, len);
+}
+
+static int inode_xattr_add(struct list_head *hlist, struct xattr_item *item)
+{
+ struct inode_xattr_node *node = malloc(sizeof(*node));
+
+ if (!node)
+ return -ENOMEM;
+ init_list_head(&node->list);
+ node->item = item;
+ list_add(&node->list, hlist);
+ return 0;
+}
+
+static int read_xattrs_from_file(const char *path, struct list_head *ixattrs)
+{
+ int ret = 0;
+ char *keylst, *key;
+ ssize_t kllen = llistxattr(path, NULL, 0);
+
+ if (kllen < 0 && errno != ENODATA) {
+ erofs_err("llistxattr to get the size of names for %s failed",
+ path);
+ return -errno;
+ }
+ if (kllen <= 1)
+ return 0;
+
+ keylst = malloc(kllen);
+ if (!keylst)
+ return -ENOMEM;
+
+ /* copy the list of attribute keys to the buffer.*/
+ kllen = llistxattr(path, keylst, kllen);
+ if (kllen < 0) {
+ erofs_err("llistxattr to get names for %s failed", path);
+ ret = -errno;
+ goto err;
+ }
+
+ /*
+ * loop over the list of zero terminated strings with the
+ * attribute keys. Use the remaining buffer length to determine
+ * the end of the list.
+ */
+ key = keylst;
+ while (kllen > 0) {
+ unsigned int keylen = strlen(key);
+ struct xattr_item *item = parse_one_xattr(path, key, keylen);
+
+ if (IS_ERR(item)) {
+ ret = PTR_ERR(item);
+ goto err;
+ }
+
+ if (ixattrs) {
+ ret = inode_xattr_add(ixattrs, item);
+ if (ret < 0)
+ goto err;
+ }
+ kllen -= keylen + 1;
+ key += keylen + 1;
+ }
+err:
+ free(keylst);
+ return ret;
+
+}
+
+int erofs_prepare_xattr_ibody(const char *path, struct list_head *ixattrs)
+{
+ int ret;
+ struct inode_xattr_node *node;
+
+ /* check if xattr is disabled */
+ if (cfg.c_inline_xattr_tolerance < 0)
+ return 0;
+
+ ret = read_xattrs_from_file(path, ixattrs);
+ if (ret < 0)
+ return ret;
+
+ if (list_empty(ixattrs))
+ return 0;
+
+ /* get xattr ibody size */
+ ret = sizeof(struct erofs_xattr_ibody_header);
+ list_for_each_entry(node, ixattrs, list) {
+ const struct xattr_item *item = node->item;
+
+ ret += sizeof(struct erofs_xattr_entry);
+ ret = EROFS_XATTR_ALIGN(ret + item->len[0] + item->len[1]);
+ }
+ return ret;
+}
+
+char *erofs_export_xattr_ibody(struct list_head *ixattrs, unsigned int size)
+{
+ struct inode_xattr_node *node, *n;
+ struct erofs_xattr_ibody_header *header;
+ unsigned int p;
+ char *buf = calloc(1, size);
+
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ header = (struct erofs_xattr_ibody_header *)buf;
+ header->h_shared_count = 0;
+
+ p = sizeof(struct erofs_xattr_ibody_header);
+ list_for_each_entry_safe(node, n, ixattrs, list) {
+ struct xattr_item *const item = node->item;
+ const struct erofs_xattr_entry entry = {
+ .e_name_index = item->prefix,
+ .e_name_len = item->len[0],
+ .e_value_size = cpu_to_le16(item->len[1])
+ };
+
+ memcpy(buf + p, &entry, sizeof(entry));
+ p += sizeof(struct erofs_xattr_entry);
+ memcpy(buf + p, item->kvbuf, item->len[0] + item->len[1]);
+ p = EROFS_XATTR_ALIGN(p + item->len[0] + item->len[1]);
+
+ list_del(&node->list);
+ free(node);
+ put_xattritem(item);
+ }
+ DBG_BUGON(p > size);
+ return buf;
+}
+
diff --git a/mkfs/main.c b/mkfs/main.c
index 536b784..0df2a96 100644
--- a/mkfs/main.c
+++ b/mkfs/main.c
@@ -28,6 +28,7 @@ static void usage(void)
fprintf(stderr, "Generate erofs image from DIRECTORY to FILE, and [options] are:\n");
fprintf(stderr, " -zX[,Y] X=compressor (Y=compression level, optional)\n");
fprintf(stderr, " -d# set output message level to # (maximum 9)\n");
+ fprintf(stderr, " -x# set xattr tolerance to # (< 0, disable xattrs; default 1)\n");
fprintf(stderr, " -EX[,...] X=extended options\n");
fprintf(stderr, " -T# set a fixed UNIX timestamp # to all files\n");
}
@@ -94,7 +95,7 @@ static int mkfs_parse_options_cfg(int argc, char *argv[])
char *endptr;
int opt, i;
- while ((opt = getopt(argc, argv, "d:z:E:T:")) != -1) {
+ while ((opt = getopt(argc, argv, "d:x:z:E:T:")) != -1) {
switch (opt) {
case 'z':
if (!optarg) {
@@ -122,6 +123,15 @@ static int mkfs_parse_options_cfg(int argc, char *argv[])
cfg.c_dbg_lvl = i;
break;
+ case 'x':
+ i = strtol(optarg, &endptr, 0);
+ if (*endptr != '\0') {
+ erofs_err("invalid xattr tolerance %s", optarg);
+ return -EINVAL;
+ }
+ cfg.c_inline_xattr_tolerance = i;
+ break;
+
case 'E':
opt = parse_extended_opts(optarg);
if (opt)
--
2.17.1
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [PATCH v4 2/2] erofs-utils: introduce shared xattr support
2019-10-14 11:42 ` [PATCH v4 1/2] erofs-utils: introduce inline " Gao Xiang via Linux-erofs
@ 2019-10-14 11:42 ` Gao Xiang via Linux-erofs
2019-10-14 23:53 ` [PATCH v5 " Gao Xiang via Linux-erofs
0 siblings, 1 reply; 12+ messages in thread
From: Gao Xiang via Linux-erofs @ 2019-10-14 11:42 UTC (permalink / raw)
To: Li Guifu, linux-erofs; +Cc: Miao Xie
From: Li Guifu <blucerlee@gmail.com>
Large xattrs or xattrs shared by a lot of files
can be stored in shared xattrs rather than
inlined right after inode.
Signed-off-by: Li Guifu <blucerlee@gmail.com>
Signed-off-by: Gao Xiang <hsiangkao@aol.com>
---
include/erofs/defs.h | 2 +-
include/erofs/xattr.h | 1 +
lib/config.c | 2 +-
lib/inode.c | 1 -
lib/xattr.c | 208 +++++++++++++++++++++++++++++++++++++++++-
mkfs/main.c | 12 ++-
6 files changed, 220 insertions(+), 6 deletions(-)
diff --git a/include/erofs/defs.h b/include/erofs/defs.h
index aa127d0..c67035d 100644
--- a/include/erofs/defs.h
+++ b/include/erofs/defs.h
@@ -14,7 +14,7 @@
#include <stdint.h>
#include <assert.h>
#include <inttypes.h>
-
+#include <limits.h>
#include <stdbool.h>
#ifdef HAVE_CONFIG_H
diff --git a/include/erofs/xattr.h b/include/erofs/xattr.h
index 29df025..3dff1ea 100644
--- a/include/erofs/xattr.h
+++ b/include/erofs/xattr.h
@@ -44,5 +44,6 @@
int erofs_prepare_xattr_ibody(const char *path, struct list_head *ixattrs);
char *erofs_export_xattr_ibody(struct list_head *ixattrs, unsigned int size);
+int erofs_build_shared_xattrs_from_path(const char *path);
#endif
diff --git a/lib/config.c b/lib/config.c
index cb42706..cbbecce 100644
--- a/lib/config.c
+++ b/lib/config.c
@@ -21,7 +21,7 @@ void erofs_init_configure(void)
cfg.c_dry_run = false;
cfg.c_compr_level_master = -1;
cfg.c_force_inodeversion = 0;
- cfg.c_inline_xattr_tolerance = 1;
+ cfg.c_inline_xattr_tolerance = 2;
cfg.c_unix_timestamp = -1;
}
diff --git a/lib/inode.c b/lib/inode.c
index b7121e0..86c465e 100644
--- a/lib/inode.c
+++ b/lib/inode.c
@@ -8,7 +8,6 @@
* with heavy changes by Gao Xiang <gaoxiang25@huawei.com>
*/
#define _GNU_SOURCE
-#include <limits.h>
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
diff --git a/lib/xattr.c b/lib/xattr.c
index d07d325..18a8c82 100644
--- a/lib/xattr.c
+++ b/lib/xattr.c
@@ -6,20 +6,25 @@
* heavily changed by Li Guifu <blucerlee@gmail.com>
* and Gao Xiang <hsiangkao@aol.com>
*/
+#define _GNU_SOURCE
#include <stdlib.h>
#include <sys/xattr.h>
#ifdef HAVE_LINUX_XATTR_H
#include <linux/xattr.h>
#endif
+#include <sys/stat.h>
+#include <dirent.h>
#include "erofs/print.h"
#include "erofs/hashtable.h"
#include "erofs/xattr.h"
+#include "erofs/cache.h"
#define EA_HASHTABLE_BITS 16
struct xattr_item {
const char *kvbuf;
unsigned int hash[2], len[2], count;
+ int shared_xattr_id;
u8 prefix;
struct hlist_node node;
};
@@ -31,6 +36,9 @@ struct inode_xattr_node {
static DECLARE_HASHTABLE(ea_hashtable, EA_HASHTABLE_BITS);
+static LIST_HEAD(shared_xattrs_list);
+static unsigned int shared_xattrs_count, shared_xattrs_size;
+
static struct xattr_prefix {
const char *prefix;
u16 prefix_len;
@@ -113,6 +121,7 @@ static struct xattr_item *get_xattritem(u8 prefix, char *kvbuf,
item->len[1] = len[1];
item->hash[0] = hash[0];
item->hash[1] = hash[1];
+ item->shared_xattr_id = -1;
item->prefix = prefix;
hash_add(ea_hashtable, &item->node, hkey);
return item;
@@ -160,7 +169,6 @@ static struct xattr_item *parse_one_xattr(const char *path, const char *key,
kvbuf = malloc(len[0] + len[1]);
if (!kvbuf)
return ERR_PTR(-ENOMEM);
-
memcpy(kvbuf, key + prefixlen, len[0]);
if (len[1]) {
/* copy value to buffer */
@@ -190,6 +198,23 @@ static int inode_xattr_add(struct list_head *hlist, struct xattr_item *item)
return 0;
}
+static int shared_xattr_add(struct xattr_item *item)
+{
+ struct inode_xattr_node *node = malloc(sizeof(*node));
+
+ if (!node)
+ return -ENOMEM;
+
+ init_list_head(&node->list);
+ node->item = item;
+ list_add(&node->list, &shared_xattrs_list);
+
+ shared_xattrs_size += sizeof(struct erofs_xattr_entry);
+ shared_xattrs_size = EROFS_XATTR_ALIGN(shared_xattrs_size +
+ item->len[0] + item->len[1]);
+ return ++shared_xattrs_count;
+}
+
static int read_xattrs_from_file(const char *path, struct list_head *ixattrs)
{
int ret = 0;
@@ -235,6 +260,11 @@ static int read_xattrs_from_file(const char *path, struct list_head *ixattrs)
ret = inode_xattr_add(ixattrs, item);
if (ret < 0)
goto err;
+ } else if (item->count == cfg.c_inline_xattr_tolerance + 1) {
+ ret = shared_xattr_add(item);
+ if (ret < 0)
+ goto err;
+ ret = 0;
}
kllen -= keylen + 1;
key += keylen + 1;
@@ -266,16 +296,174 @@ int erofs_prepare_xattr_ibody(const char *path, struct list_head *ixattrs)
list_for_each_entry(node, ixattrs, list) {
const struct xattr_item *item = node->item;
+ if (item->shared_xattr_id >= 0) {
+ ret += sizeof(__le32);
+ continue;
+ }
ret += sizeof(struct erofs_xattr_entry);
ret = EROFS_XATTR_ALIGN(ret + item->len[0] + item->len[1]);
}
return ret;
}
+static int erofs_count_all_xattrs_from_path(const char *path)
+{
+ int ret;
+ DIR *_dir;
+ struct stat64 st;
+
+ _dir = opendir(path);
+ if (!_dir) {
+ erofs_err("%s, failed to opendir at %s: %s",
+ __func__, path, erofs_strerror(errno));
+ return -errno;
+ }
+
+ ret = 0;
+ while (1) {
+ struct dirent *dp;
+ char buf[PATH_MAX];
+
+ /*
+ * set errno to 0 before calling readdir() in order to
+ * distinguish end of stream and from an error.
+ */
+ errno = 0;
+ dp = readdir(_dir);
+ if (!dp)
+ break;
+
+ if (is_dot_dotdot(dp->d_name) ||
+ !strncmp(dp->d_name, "lost+found", strlen("lost+found")))
+ continue;
+
+ ret = snprintf(buf, PATH_MAX, "%s/%s", path, dp->d_name);
+
+ if (ret < 0 || ret >= PATH_MAX) {
+ /* ignore the too long path */
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ ret = read_xattrs_from_file(buf, NULL);
+ if (ret)
+ goto fail;
+
+ ret = lstat64(buf, &st);
+ if (ret) {
+ ret = -errno;
+ goto fail;
+ }
+
+ if (!S_ISDIR(st.st_mode))
+ continue;
+
+ ret = erofs_count_all_xattrs_from_path(buf);
+ if (ret)
+ goto fail;
+ }
+
+ if (errno)
+ ret = -errno;
+
+fail:
+ closedir(_dir);
+ return ret;
+}
+
+static void erofs_cleanxattrs(bool sharedxattrs)
+{
+ unsigned int i;
+ struct xattr_item *item;
+
+ hash_for_each(ea_hashtable, i, item, node) {
+ if (sharedxattrs && item->shared_xattr_id >= 0)
+ continue;
+
+ hash_del(&item->node);
+ free(item);
+ }
+
+ if (sharedxattrs)
+ return;
+
+ shared_xattrs_size = shared_xattrs_count = 0;
+}
+
+int erofs_build_shared_xattrs_from_path(const char *path)
+{
+ int ret;
+ struct erofs_buffer_head *bh;
+ struct inode_xattr_node *node, *n;
+ char *buf;
+ unsigned int p;
+ erofs_off_t off;
+
+ /* check if xattr or shared xattr is disabled */
+ if (cfg.c_inline_xattr_tolerance < 0 ||
+ cfg.c_inline_xattr_tolerance == INT_MAX)
+ return 0;
+
+ if (shared_xattrs_size || shared_xattrs_count) {
+ DBG_BUGON(1);
+ return -EINVAL;
+ }
+
+ ret = erofs_count_all_xattrs_from_path(path);
+ if (ret)
+ return ret;
+
+ if (!shared_xattrs_size)
+ return 0;
+
+ buf = malloc(shared_xattrs_size);
+ if (!buf)
+ return -ENOMEM;
+
+ bh = erofs_balloc(XATTR, shared_xattrs_size, 0, 0);
+ if (IS_ERR(bh)) {
+ free(buf);
+ return PTR_ERR(bh);
+ }
+ bh->op = &erofs_skip_write_bhops;
+
+ erofs_mapbh(bh->block, true);
+ off = erofs_btell(bh, false);
+
+ sbi.xattr_blkaddr = off / EROFS_BLKSIZ;
+ off %= EROFS_BLKSIZ;
+ p = 0;
+
+ list_for_each_entry_safe(node, n, &shared_xattrs_list, list) {
+ struct xattr_item *const item = node->item;
+ const struct erofs_xattr_entry entry = {
+ .e_name_index = item->prefix,
+ .e_name_len = item->len[0],
+ .e_value_size = cpu_to_le16(item->len[1])
+ };
+
+ list_del(&node->list);
+
+ item->shared_xattr_id = (off + p) /
+ sizeof(struct erofs_xattr_entry);
+
+ memcpy(buf + p, &entry, sizeof(entry));
+ p += sizeof(struct erofs_xattr_entry);
+ memcpy(buf + p, item->kvbuf, item->len[0] + item->len[1]);
+ p = EROFS_XATTR_ALIGN(p + item->len[0] + item->len[1]);
+ free(node);
+ }
+ bh->fsprivate = buf;
+ bh->op = &erofs_buf_write_bhops;
+ erofs_cleanxattrs(true);
+ return 0;
+}
+
char *erofs_export_xattr_ibody(struct list_head *ixattrs, unsigned int size)
{
struct inode_xattr_node *node, *n;
struct erofs_xattr_ibody_header *header;
+ LIST_HEAD(ilst);
unsigned int p;
char *buf = calloc(1, size);
@@ -288,6 +476,24 @@ char *erofs_export_xattr_ibody(struct list_head *ixattrs, unsigned int size)
p = sizeof(struct erofs_xattr_ibody_header);
list_for_each_entry_safe(node, n, ixattrs, list) {
struct xattr_item *const item = node->item;
+
+ list_del(&node->list);
+
+ /* move inline xattrs to the onstack list */
+ if (item->shared_xattr_id < 0) {
+ list_add(&node->list, &ilst);
+ continue;
+ }
+
+ *(__le32 *)(buf + p) = cpu_to_le32(item->shared_xattr_id);
+ p += sizeof(__le32);
+ ++header->h_shared_count;
+ free(node);
+ put_xattritem(item);
+ }
+
+ list_for_each_entry_safe(node, n, &ilst, list) {
+ struct xattr_item *const item = node->item;
const struct erofs_xattr_entry entry = {
.e_name_index = item->prefix,
.e_name_len = item->len[0],
diff --git a/mkfs/main.c b/mkfs/main.c
index 0df2a96..71c81f5 100644
--- a/mkfs/main.c
+++ b/mkfs/main.c
@@ -19,6 +19,7 @@
#include "erofs/inode.h"
#include "erofs/io.h"
#include "erofs/compress.h"
+#include "erofs/xattr.h"
#define EROFS_SUPER_END (EROFS_SUPER_OFFSET + sizeof(struct erofs_super_block))
@@ -28,7 +29,7 @@ static void usage(void)
fprintf(stderr, "Generate erofs image from DIRECTORY to FILE, and [options] are:\n");
fprintf(stderr, " -zX[,Y] X=compressor (Y=compression level, optional)\n");
fprintf(stderr, " -d# set output message level to # (maximum 9)\n");
- fprintf(stderr, " -x# set xattr tolerance to # (< 0, disable xattrs; default 1)\n");
+ fprintf(stderr, " -x# set xattr tolerance to # (< 0, disable xattrs; default 2)\n");
fprintf(stderr, " -EX[,...] X=extended options\n");
fprintf(stderr, " -T# set a fixed UNIX timestamp # to all files\n");
}
@@ -188,7 +189,7 @@ int erofs_mkfs_update_super_block(struct erofs_buffer_head *bh,
.build_time_nsec = cpu_to_le32(sbi.build_time_nsec),
.blocks = 0,
.meta_blkaddr = sbi.meta_blkaddr,
- .xattr_blkaddr = 0,
+ .xattr_blkaddr = sbi.xattr_blkaddr,
.feature_incompat = cpu_to_le32(sbi.feature_incompat),
};
const unsigned int sb_blksize =
@@ -284,6 +285,13 @@ int main(int argc, char **argv)
erofs_inode_manager_init();
+ err = erofs_build_shared_xattrs_from_path(cfg.c_src_path);
+ if (err) {
+ erofs_err("Failed to build shared xattrs: %s",
+ erofs_strerror(err));
+ goto exit;
+ }
+
root_inode = erofs_mkfs_build_tree_from_path(NULL, cfg.c_src_path);
if (IS_ERR(root_inode)) {
err = PTR_ERR(root_inode);
--
2.17.1
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [PATCH v5 2/2] erofs-utils: introduce shared xattr support
2019-10-14 11:42 ` [PATCH v4 2/2] erofs-utils: introduce shared " Gao Xiang via Linux-erofs
@ 2019-10-14 23:53 ` Gao Xiang via Linux-erofs
0 siblings, 0 replies; 12+ messages in thread
From: Gao Xiang via Linux-erofs @ 2019-10-14 23:53 UTC (permalink / raw)
To: Li Guifu, linux-erofs; +Cc: Miao Xie
From: Li Guifu <blucerlee@gmail.com>
Large xattrs or xattrs shared by a lot of files
can be stored in shared xattrs rather than
inlined right after inode.
Signed-off-by: Li Guifu <blucerlee@gmail.com>
Signed-off-by: Gao Xiang <hsiangkao@aol.com>
---
Changes since v4:
- cleanxattrs when !shared_xattrs_size as well
include/erofs/defs.h | 2 +-
include/erofs/xattr.h | 1 +
lib/config.c | 2 +-
lib/inode.c | 1 -
lib/xattr.c | 209 +++++++++++++++++++++++++++++++++++++++++-
mkfs/main.c | 12 ++-
6 files changed, 221 insertions(+), 6 deletions(-)
diff --git a/include/erofs/defs.h b/include/erofs/defs.h
index aa127d0..c67035d 100644
--- a/include/erofs/defs.h
+++ b/include/erofs/defs.h
@@ -14,7 +14,7 @@
#include <stdint.h>
#include <assert.h>
#include <inttypes.h>
-
+#include <limits.h>
#include <stdbool.h>
#ifdef HAVE_CONFIG_H
diff --git a/include/erofs/xattr.h b/include/erofs/xattr.h
index 29df025..3dff1ea 100644
--- a/include/erofs/xattr.h
+++ b/include/erofs/xattr.h
@@ -44,5 +44,6 @@
int erofs_prepare_xattr_ibody(const char *path, struct list_head *ixattrs);
char *erofs_export_xattr_ibody(struct list_head *ixattrs, unsigned int size);
+int erofs_build_shared_xattrs_from_path(const char *path);
#endif
diff --git a/lib/config.c b/lib/config.c
index cb42706..cbbecce 100644
--- a/lib/config.c
+++ b/lib/config.c
@@ -21,7 +21,7 @@ void erofs_init_configure(void)
cfg.c_dry_run = false;
cfg.c_compr_level_master = -1;
cfg.c_force_inodeversion = 0;
- cfg.c_inline_xattr_tolerance = 1;
+ cfg.c_inline_xattr_tolerance = 2;
cfg.c_unix_timestamp = -1;
}
diff --git a/lib/inode.c b/lib/inode.c
index b7121e0..86c465e 100644
--- a/lib/inode.c
+++ b/lib/inode.c
@@ -8,7 +8,6 @@
* with heavy changes by Gao Xiang <gaoxiang25@huawei.com>
*/
#define _GNU_SOURCE
-#include <limits.h>
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
diff --git a/lib/xattr.c b/lib/xattr.c
index d07d325..1564016 100644
--- a/lib/xattr.c
+++ b/lib/xattr.c
@@ -6,20 +6,25 @@
* heavily changed by Li Guifu <blucerlee@gmail.com>
* and Gao Xiang <hsiangkao@aol.com>
*/
+#define _GNU_SOURCE
#include <stdlib.h>
#include <sys/xattr.h>
#ifdef HAVE_LINUX_XATTR_H
#include <linux/xattr.h>
#endif
+#include <sys/stat.h>
+#include <dirent.h>
#include "erofs/print.h"
#include "erofs/hashtable.h"
#include "erofs/xattr.h"
+#include "erofs/cache.h"
#define EA_HASHTABLE_BITS 16
struct xattr_item {
const char *kvbuf;
unsigned int hash[2], len[2], count;
+ int shared_xattr_id;
u8 prefix;
struct hlist_node node;
};
@@ -31,6 +36,9 @@ struct inode_xattr_node {
static DECLARE_HASHTABLE(ea_hashtable, EA_HASHTABLE_BITS);
+static LIST_HEAD(shared_xattrs_list);
+static unsigned int shared_xattrs_count, shared_xattrs_size;
+
static struct xattr_prefix {
const char *prefix;
u16 prefix_len;
@@ -113,6 +121,7 @@ static struct xattr_item *get_xattritem(u8 prefix, char *kvbuf,
item->len[1] = len[1];
item->hash[0] = hash[0];
item->hash[1] = hash[1];
+ item->shared_xattr_id = -1;
item->prefix = prefix;
hash_add(ea_hashtable, &item->node, hkey);
return item;
@@ -160,7 +169,6 @@ static struct xattr_item *parse_one_xattr(const char *path, const char *key,
kvbuf = malloc(len[0] + len[1]);
if (!kvbuf)
return ERR_PTR(-ENOMEM);
-
memcpy(kvbuf, key + prefixlen, len[0]);
if (len[1]) {
/* copy value to buffer */
@@ -190,6 +198,23 @@ static int inode_xattr_add(struct list_head *hlist, struct xattr_item *item)
return 0;
}
+static int shared_xattr_add(struct xattr_item *item)
+{
+ struct inode_xattr_node *node = malloc(sizeof(*node));
+
+ if (!node)
+ return -ENOMEM;
+
+ init_list_head(&node->list);
+ node->item = item;
+ list_add(&node->list, &shared_xattrs_list);
+
+ shared_xattrs_size += sizeof(struct erofs_xattr_entry);
+ shared_xattrs_size = EROFS_XATTR_ALIGN(shared_xattrs_size +
+ item->len[0] + item->len[1]);
+ return ++shared_xattrs_count;
+}
+
static int read_xattrs_from_file(const char *path, struct list_head *ixattrs)
{
int ret = 0;
@@ -235,6 +260,11 @@ static int read_xattrs_from_file(const char *path, struct list_head *ixattrs)
ret = inode_xattr_add(ixattrs, item);
if (ret < 0)
goto err;
+ } else if (item->count == cfg.c_inline_xattr_tolerance + 1) {
+ ret = shared_xattr_add(item);
+ if (ret < 0)
+ goto err;
+ ret = 0;
}
kllen -= keylen + 1;
key += keylen + 1;
@@ -266,16 +296,175 @@ int erofs_prepare_xattr_ibody(const char *path, struct list_head *ixattrs)
list_for_each_entry(node, ixattrs, list) {
const struct xattr_item *item = node->item;
+ if (item->shared_xattr_id >= 0) {
+ ret += sizeof(__le32);
+ continue;
+ }
ret += sizeof(struct erofs_xattr_entry);
ret = EROFS_XATTR_ALIGN(ret + item->len[0] + item->len[1]);
}
return ret;
}
+static int erofs_count_all_xattrs_from_path(const char *path)
+{
+ int ret;
+ DIR *_dir;
+ struct stat64 st;
+
+ _dir = opendir(path);
+ if (!_dir) {
+ erofs_err("%s, failed to opendir at %s: %s",
+ __func__, path, erofs_strerror(errno));
+ return -errno;
+ }
+
+ ret = 0;
+ while (1) {
+ struct dirent *dp;
+ char buf[PATH_MAX];
+
+ /*
+ * set errno to 0 before calling readdir() in order to
+ * distinguish end of stream and from an error.
+ */
+ errno = 0;
+ dp = readdir(_dir);
+ if (!dp)
+ break;
+
+ if (is_dot_dotdot(dp->d_name) ||
+ !strncmp(dp->d_name, "lost+found", strlen("lost+found")))
+ continue;
+
+ ret = snprintf(buf, PATH_MAX, "%s/%s", path, dp->d_name);
+
+ if (ret < 0 || ret >= PATH_MAX) {
+ /* ignore the too long path */
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ ret = read_xattrs_from_file(buf, NULL);
+ if (ret)
+ goto fail;
+
+ ret = lstat64(buf, &st);
+ if (ret) {
+ ret = -errno;
+ goto fail;
+ }
+
+ if (!S_ISDIR(st.st_mode))
+ continue;
+
+ ret = erofs_count_all_xattrs_from_path(buf);
+ if (ret)
+ goto fail;
+ }
+
+ if (errno)
+ ret = -errno;
+
+fail:
+ closedir(_dir);
+ return ret;
+}
+
+static void erofs_cleanxattrs(bool sharedxattrs)
+{
+ unsigned int i;
+ struct xattr_item *item;
+
+ hash_for_each(ea_hashtable, i, item, node) {
+ if (sharedxattrs && item->shared_xattr_id >= 0)
+ continue;
+
+ hash_del(&item->node);
+ free(item);
+ }
+
+ if (sharedxattrs)
+ return;
+
+ shared_xattrs_size = shared_xattrs_count = 0;
+}
+
+int erofs_build_shared_xattrs_from_path(const char *path)
+{
+ int ret;
+ struct erofs_buffer_head *bh;
+ struct inode_xattr_node *node, *n;
+ char *buf;
+ unsigned int p;
+ erofs_off_t off;
+
+ /* check if xattr or shared xattr is disabled */
+ if (cfg.c_inline_xattr_tolerance < 0 ||
+ cfg.c_inline_xattr_tolerance == INT_MAX)
+ return 0;
+
+ if (shared_xattrs_size || shared_xattrs_count) {
+ DBG_BUGON(1);
+ return -EINVAL;
+ }
+
+ ret = erofs_count_all_xattrs_from_path(path);
+ if (ret)
+ return ret;
+
+ if (!shared_xattrs_size)
+ goto out;
+
+ buf = malloc(shared_xattrs_size);
+ if (!buf)
+ return -ENOMEM;
+
+ bh = erofs_balloc(XATTR, shared_xattrs_size, 0, 0);
+ if (IS_ERR(bh)) {
+ free(buf);
+ return PTR_ERR(bh);
+ }
+ bh->op = &erofs_skip_write_bhops;
+
+ erofs_mapbh(bh->block, true);
+ off = erofs_btell(bh, false);
+
+ sbi.xattr_blkaddr = off / EROFS_BLKSIZ;
+ off %= EROFS_BLKSIZ;
+ p = 0;
+
+ list_for_each_entry_safe(node, n, &shared_xattrs_list, list) {
+ struct xattr_item *const item = node->item;
+ const struct erofs_xattr_entry entry = {
+ .e_name_index = item->prefix,
+ .e_name_len = item->len[0],
+ .e_value_size = cpu_to_le16(item->len[1])
+ };
+
+ list_del(&node->list);
+
+ item->shared_xattr_id = (off + p) /
+ sizeof(struct erofs_xattr_entry);
+
+ memcpy(buf + p, &entry, sizeof(entry));
+ p += sizeof(struct erofs_xattr_entry);
+ memcpy(buf + p, item->kvbuf, item->len[0] + item->len[1]);
+ p = EROFS_XATTR_ALIGN(p + item->len[0] + item->len[1]);
+ free(node);
+ }
+ bh->fsprivate = buf;
+ bh->op = &erofs_buf_write_bhops;
+out:
+ erofs_cleanxattrs(true);
+ return 0;
+}
+
char *erofs_export_xattr_ibody(struct list_head *ixattrs, unsigned int size)
{
struct inode_xattr_node *node, *n;
struct erofs_xattr_ibody_header *header;
+ LIST_HEAD(ilst);
unsigned int p;
char *buf = calloc(1, size);
@@ -288,6 +477,24 @@ char *erofs_export_xattr_ibody(struct list_head *ixattrs, unsigned int size)
p = sizeof(struct erofs_xattr_ibody_header);
list_for_each_entry_safe(node, n, ixattrs, list) {
struct xattr_item *const item = node->item;
+
+ list_del(&node->list);
+
+ /* move inline xattrs to the onstack list */
+ if (item->shared_xattr_id < 0) {
+ list_add(&node->list, &ilst);
+ continue;
+ }
+
+ *(__le32 *)(buf + p) = cpu_to_le32(item->shared_xattr_id);
+ p += sizeof(__le32);
+ ++header->h_shared_count;
+ free(node);
+ put_xattritem(item);
+ }
+
+ list_for_each_entry_safe(node, n, &ilst, list) {
+ struct xattr_item *const item = node->item;
const struct erofs_xattr_entry entry = {
.e_name_index = item->prefix,
.e_name_len = item->len[0],
diff --git a/mkfs/main.c b/mkfs/main.c
index 0df2a96..71c81f5 100644
--- a/mkfs/main.c
+++ b/mkfs/main.c
@@ -19,6 +19,7 @@
#include "erofs/inode.h"
#include "erofs/io.h"
#include "erofs/compress.h"
+#include "erofs/xattr.h"
#define EROFS_SUPER_END (EROFS_SUPER_OFFSET + sizeof(struct erofs_super_block))
@@ -28,7 +29,7 @@ static void usage(void)
fprintf(stderr, "Generate erofs image from DIRECTORY to FILE, and [options] are:\n");
fprintf(stderr, " -zX[,Y] X=compressor (Y=compression level, optional)\n");
fprintf(stderr, " -d# set output message level to # (maximum 9)\n");
- fprintf(stderr, " -x# set xattr tolerance to # (< 0, disable xattrs; default 1)\n");
+ fprintf(stderr, " -x# set xattr tolerance to # (< 0, disable xattrs; default 2)\n");
fprintf(stderr, " -EX[,...] X=extended options\n");
fprintf(stderr, " -T# set a fixed UNIX timestamp # to all files\n");
}
@@ -188,7 +189,7 @@ int erofs_mkfs_update_super_block(struct erofs_buffer_head *bh,
.build_time_nsec = cpu_to_le32(sbi.build_time_nsec),
.blocks = 0,
.meta_blkaddr = sbi.meta_blkaddr,
- .xattr_blkaddr = 0,
+ .xattr_blkaddr = sbi.xattr_blkaddr,
.feature_incompat = cpu_to_le32(sbi.feature_incompat),
};
const unsigned int sb_blksize =
@@ -284,6 +285,13 @@ int main(int argc, char **argv)
erofs_inode_manager_init();
+ err = erofs_build_shared_xattrs_from_path(cfg.c_src_path);
+ if (err) {
+ erofs_err("Failed to build shared xattrs: %s",
+ erofs_strerror(err));
+ goto exit;
+ }
+
root_inode = erofs_mkfs_build_tree_from_path(NULL, cfg.c_src_path);
if (IS_ERR(root_inode)) {
err = PTR_ERR(root_inode);
--
2.17.1
^ permalink raw reply related [flat|nested] 12+ messages in thread
end of thread, other threads:[~2019-10-14 23:53 UTC | newest]
Thread overview: 12+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-07-26 11:50 [PATCH] erofs-utils: introduce xattr support htyuxe+dhbrei4sq0df8
2019-08-05 14:54 ` Li Guifu
2019-08-05 17:30 ` Gao Xiang
2019-08-11 17:10 ` [PATCH v2] erofs-utils: introduce preliminary " Gao Xiang
2019-10-05 14:20 ` [PATCH v3 1/2] erofs-utils: introduce inline " Gao Xiang via Linux-erofs
2019-10-05 14:20 ` [PATCH 2/2] erofs-utils: introduce shared " Gao Xiang via Linux-erofs
2019-10-05 16:43 ` Li Guifu
2019-10-06 5:01 ` Gao Xiang via Linux-erofs
2019-10-14 11:42 ` [PATCH v4 1/2] erofs-utils: introduce inline " Gao Xiang via Linux-erofs
2019-10-14 11:42 ` [PATCH v4 2/2] erofs-utils: introduce shared " Gao Xiang via Linux-erofs
2019-10-14 23:53 ` [PATCH v5 " Gao Xiang via Linux-erofs
2019-10-05 16:44 ` [PATCH v3 1/2] erofs-utils: introduce inline " Li Guifu
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).