All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/2] hpfs: remember free space
@ 2014-01-28 23:10 Mikulas Patocka
  2014-01-28 23:11 ` [PATCH 2/2] hpfs: optimize quad buffer loading Mikulas Patocka
  0 siblings, 1 reply; 15+ messages in thread
From: Mikulas Patocka @ 2014-01-28 23:10 UTC (permalink / raw)
  To: Linus Torvalds; +Cc: linux-fsdevel

Previously, hpfs scanned all bitmaps each time the user asked for free
space using statfs. This patch changes it so that hpfs scans the bitmaps
only once, remembes the free space and on next invocation of statfs it
returns the value instantly.

New versions of wine are hammering on the statfs syscall very heavily,
making some games unplayable when they're stored on hpfs, with load times
in minutes.

This should be backported to the stable kernels because it fixes
user-visible problem (excessive level load times in wine).

Signed-off-by: Mikulas Patocka <mikulas@artax.karlin.mff.cuni.cz>
Cc: stable@vger.kernel.org

---
 fs/hpfs/alloc.c   |   66 ++++++++++++++++++++++++++++++++++++++++++++++++++++--
 fs/hpfs/hpfs_fn.h |    2 -
 fs/hpfs/super.c   |   29 ++++++++++++++++++-----
 3 files changed, 87 insertions(+), 10 deletions(-)

Index: linux-3.13/fs/hpfs/super.c
===================================================================
--- linux-3.13.orig/fs/hpfs/super.c	2014-01-24 22:46:38.364205365 +0100
+++ linux-3.13/fs/hpfs/super.c	2014-01-28 20:09:56.942744542 +0100
@@ -123,7 +123,7 @@ static void hpfs_put_super(struct super_
 	call_rcu(&hpfs_sb(s)->rcu, lazy_free_sbi);
 }
 
-unsigned hpfs_count_one_bitmap(struct super_block *s, secno secno)
+static unsigned hpfs_count_one_bitmap(struct super_block *s, secno secno)
 {
 	struct quad_buffer_head qbh;
 	unsigned long *bits;
@@ -131,7 +131,7 @@ unsigned hpfs_count_one_bitmap(struct su
 
 	bits = hpfs_map_4sectors(s, secno, &qbh, 0);
 	if (!bits)
-		return 0;
+		return (unsigned)-1;
 	count = bitmap_weight(bits, 2048 * BITS_PER_BYTE);
 	hpfs_brelse4(&qbh);
 	return count;
@@ -146,30 +146,45 @@ static unsigned count_bitmaps(struct sup
 		hpfs_prefetch_bitmap(s, n);
 	}
 	for (n = 0; n < n_bands; n++) {
+		unsigned c;
 		hpfs_prefetch_bitmap(s, n + COUNT_RD_AHEAD);
-		count += hpfs_count_one_bitmap(s, le32_to_cpu(hpfs_sb(s)->sb_bmp_dir[n]));
+		c = hpfs_count_one_bitmap(s, le32_to_cpu(hpfs_sb(s)->sb_bmp_dir[n]));
+		if (c != (unsigned)-1)
+			count += c;
 	}
 	return count;
 }
 
+unsigned hpfs_get_free_dnodes(struct super_block *s)
+{
+	struct hpfs_sb_info *sbi = hpfs_sb(s);
+	if (sbi->sb_n_free_dnodes == (unsigned)-1) {
+		unsigned c = hpfs_count_one_bitmap(s, sbi->sb_dmap);
+		if (c == (unsigned)-1)
+			return 0;
+		sbi->sb_n_free_dnodes = c;
+	}
+	return sbi->sb_n_free_dnodes;
+}
+
 static int hpfs_statfs(struct dentry *dentry, struct kstatfs *buf)
 {
 	struct super_block *s = dentry->d_sb;
 	struct hpfs_sb_info *sbi = hpfs_sb(s);
 	u64 id = huge_encode_dev(s->s_bdev->bd_dev);
+
 	hpfs_lock(s);
 
-	/*if (sbi->sb_n_free == -1) {*/
+	if (sbi->sb_n_free == (unsigned)-1)
 		sbi->sb_n_free = count_bitmaps(s);
-		sbi->sb_n_free_dnodes = hpfs_count_one_bitmap(s, sbi->sb_dmap);
-	/*}*/
+
 	buf->f_type = s->s_magic;
 	buf->f_bsize = 512;
 	buf->f_blocks = sbi->sb_fs_size;
 	buf->f_bfree = sbi->sb_n_free;
 	buf->f_bavail = sbi->sb_n_free;
 	buf->f_files = sbi->sb_dirband_size / 4;
-	buf->f_ffree = sbi->sb_n_free_dnodes;
+	buf->f_ffree = hpfs_get_free_dnodes(s);
 	buf->f_fsid.val[0] = (u32)id;
 	buf->f_fsid.val[1] = (u32)(id >> 32);
 	buf->f_namelen = 254;
Index: linux-3.13/fs/hpfs/alloc.c
===================================================================
--- linux-3.13.orig/fs/hpfs/alloc.c	2014-01-24 22:44:44.350671542 +0100
+++ linux-3.13/fs/hpfs/alloc.c	2014-01-28 20:28:51.438786159 +0100
@@ -8,6 +8,58 @@
 
 #include "hpfs_fn.h"
 
+static void hpfs_claim_alloc(struct super_block *s, secno sec)
+{
+	struct hpfs_sb_info *sbi = hpfs_sb(s);
+	if (sbi->sb_n_free != (unsigned)-1) {
+		if (unlikely(!sbi->sb_n_free)) {
+			hpfs_error(s, "free count underflow, allocating sector %08x", sec);
+			sbi->sb_n_free = -1;
+			return;
+		}
+		sbi->sb_n_free--;
+	}
+}
+
+static void hpfs_claim_free(struct super_block *s, secno sec)
+{
+	struct hpfs_sb_info *sbi = hpfs_sb(s);
+	if (sbi->sb_n_free != (unsigned)-1) {
+		if (unlikely(sbi->sb_n_free >= sbi->sb_fs_size)) {
+			hpfs_error(s, "free count overflow, freeing sector %08x", sec);
+			sbi->sb_n_free = -1;
+			return;
+		}
+		sbi->sb_n_free++;
+	}
+}
+
+static void hpfs_claim_dirband_alloc(struct super_block *s, secno sec)
+{
+	struct hpfs_sb_info *sbi = hpfs_sb(s);
+	if (sbi->sb_n_free_dnodes != (unsigned)-1) {
+		if (unlikely(!sbi->sb_n_free_dnodes)) {
+			hpfs_error(s, "dirband free count underflow, allocating sector %08x", sec);
+			sbi->sb_n_free_dnodes = -1;
+			return;
+		}
+		sbi->sb_n_free_dnodes--;
+	}
+}
+
+static void hpfs_claim_dirband_free(struct super_block *s, secno sec)
+{
+	struct hpfs_sb_info *sbi = hpfs_sb(s);
+	if (sbi->sb_n_free_dnodes != (unsigned)-1) {
+		if (unlikely(sbi->sb_n_free_dnodes >= sbi->sb_dirband_size / 4)) {
+			hpfs_error(s, "dirband free count overflow, freeing sector %08x", sec);
+			sbi->sb_n_free_dnodes = -1;
+			return;
+		}
+		sbi->sb_n_free_dnodes++;
+	}
+}
+
 /*
  * Check if a sector is allocated in bitmap
  * This is really slow. Turned on only if chk==2
@@ -203,9 +255,15 @@ secno hpfs_alloc_sector(struct super_blo
 	}
 	sec = 0;
 	ret:
+	if (sec) {
+		i = 0;
+		do
+			hpfs_claim_alloc(s, sec + i);
+		while (unlikely(++i < n));
+	}
 	if (sec && f_p) {
 		for (i = 0; i < forward; i++) {
-			if (!hpfs_alloc_if_possible(s, sec + i + 1)) {
+			if (!hpfs_alloc_if_possible(s, sec + n + i)) {
 				hpfs_error(s, "Prealloc doesn't work! Wanted %d, allocated at %08x, can't allocate %d", forward, sec, i);
 				sec = 0;
 				break;
@@ -228,6 +286,7 @@ static secno alloc_in_dirband(struct sup
 	nr >>= 2;
 	sec = alloc_in_bmp(s, (~0x3fff) | nr, 1, 0);
 	if (!sec) return 0;
+	hpfs_claim_dirband_alloc(s, sec);
 	return ((sec & 0x3fff) << 2) + sbi->sb_dirband_start;
 }
 
@@ -242,6 +301,7 @@ int hpfs_alloc_if_possible(struct super_
 		bmp[(sec & 0x3fff) >> 5] &= cpu_to_le32(~(1 << (sec & 0x1f)));
 		hpfs_mark_4buffers_dirty(&qbh);
 		hpfs_brelse4(&qbh);
+		hpfs_claim_alloc(s, sec);
 		return 1;
 	}
 	hpfs_brelse4(&qbh);
@@ -275,6 +335,7 @@ void hpfs_free_sectors(struct super_bloc
 		return;
 	}
 	bmp[(sec & 0x3fff) >> 5] |= cpu_to_le32(1 << (sec & 0x1f));
+	hpfs_claim_free(s, sec);
 	if (!--n) {
 		hpfs_mark_4buffers_dirty(&qbh);
 		hpfs_brelse4(&qbh);
@@ -359,6 +420,7 @@ void hpfs_free_dnode(struct super_block
 		bmp[ssec >> 5] |= cpu_to_le32(1 << (ssec & 0x1f));
 		hpfs_mark_4buffers_dirty(&qbh);
 		hpfs_brelse4(&qbh);
+		hpfs_claim_dirband_free(s, dno);
 	}
 }
 
@@ -366,7 +428,7 @@ struct dnode *hpfs_alloc_dnode(struct su
 			 dnode_secno *dno, struct quad_buffer_head *qbh)
 {
 	struct dnode *d;
-	if (hpfs_count_one_bitmap(s, hpfs_sb(s)->sb_dmap) > FREE_DNODES_ADD) {
+	if (hpfs_get_free_dnodes(s) > FREE_DNODES_ADD) {
 		if (!(*dno = alloc_in_dirband(s, near)))
 			if (!(*dno = hpfs_alloc_sector(s, near, 4, 0))) return NULL;
 	} else {
Index: linux-3.13/fs/hpfs/hpfs_fn.h
===================================================================
--- linux-3.13.orig/fs/hpfs/hpfs_fn.h	2014-01-24 19:37:02.955688316 +0100
+++ linux-3.13/fs/hpfs/hpfs_fn.h	2014-01-28 20:04:41.301674108 +0100
@@ -312,7 +312,7 @@ static inline struct hpfs_sb_info *hpfs_
 __printf(2, 3)
 void hpfs_error(struct super_block *, const char *, ...);
 int hpfs_stop_cycles(struct super_block *, int, int *, int *, char *);
-unsigned hpfs_count_one_bitmap(struct super_block *, secno);
+unsigned hpfs_get_free_dnodes(struct super_block *);
 
 /*
  * local time (HPFS) to GMT (Unix)

^ permalink raw reply	[flat|nested] 15+ messages in thread

* [PATCH 2/2] hpfs: optimize quad buffer loading
  2014-01-28 23:10 [PATCH 1/2] hpfs: remember free space Mikulas Patocka
@ 2014-01-28 23:11 ` Mikulas Patocka
  2014-01-28 23:44   ` Linus Torvalds
  0 siblings, 1 reply; 15+ messages in thread
From: Mikulas Patocka @ 2014-01-28 23:11 UTC (permalink / raw)
  To: Linus Torvalds; +Cc: linux-fsdevel

HPFS needs to load 4 consecutive 512-byte sectors when accessing the
directory nodes or bitmaps. We can't switch to 2048-byte block size
because files are allocated in the units of 512-byte sectors.

Previously, the driver would allocate a 2048-byte area using kmalloc, copy
the data from four buffers to this area and eventually copy them back if
they were modified.

In the current implementation of the buffer cache, buffers are allocated
in the pagecache. That means that 4 consecutive 512-byte buffers are
stored in consecutive areas in the kernel address space. So, we don't need
to allocate extra memory and copy the content of the buffers there.

This patch optimizes the code to avoid copying the buffers. It checks if
the four buffers are stored in contiguous memory - if they are not, it
falls back to allocating a 2048-byte area and copying data there.

Signed-off-by: Mikulas Patocka <mikulas@artax.karlin.mff.cuni.cz>

---
 fs/hpfs/buffer.c |   96 ++++++++++++++++++++++++++++---------------------------
 1 file changed, 50 insertions(+), 46 deletions(-)

Index: linux-3.12.6/fs/hpfs/buffer.c
===================================================================
--- linux-3.12.6.orig/fs/hpfs/buffer.c	2013-10-18 20:24:16.000000000 +0200
+++ linux-3.12.6/fs/hpfs/buffer.c	2013-12-30 22:22:56.078460838 +0100
@@ -86,7 +86,6 @@ void *hpfs_get_sector(struct super_block
 void *hpfs_map_4sectors(struct super_block *s, unsigned secno, struct quad_buffer_head *qbh,
 		   int ahead)
 {
-	struct buffer_head *bh;
 	char *data;
 
 	hpfs_lock_assert(s);
@@ -100,34 +99,32 @@ void *hpfs_map_4sectors(struct super_blo
 
 	hpfs_prefetch_sectors(s, secno, 4 + ahead);
 
+	if (!(qbh->bh[0] = sb_bread(s, secno + 0))) goto bail0;
+	if (!(qbh->bh[1] = sb_bread(s, secno + 1))) goto bail1;
+	if (!(qbh->bh[2] = sb_bread(s, secno + 2))) goto bail2;
+	if (!(qbh->bh[3] = sb_bread(s, secno + 3))) goto bail3;
+
+	if (likely(qbh->bh[1]->b_data == qbh->bh[0]->b_data + 1 * 512) &&
+	    likely(qbh->bh[2]->b_data == qbh->bh[0]->b_data + 2 * 512) &&
+	    likely(qbh->bh[3]->b_data == qbh->bh[0]->b_data + 3 * 512)) {
+		return qbh->data = qbh->bh[0]->b_data;
+	}
+
 	qbh->data = data = kmalloc(2048, GFP_NOFS);
 	if (!data) {
 		printk("HPFS: hpfs_map_4sectors: out of memory\n");
-		goto bail;
+		goto bail4;
 	}
 
-	qbh->bh[0] = bh = sb_bread(s, secno);
-	if (!bh)
-		goto bail0;
-	memcpy(data, bh->b_data, 512);
-
-	qbh->bh[1] = bh = sb_bread(s, secno + 1);
-	if (!bh)
-		goto bail1;
-	memcpy(data + 512, bh->b_data, 512);
-
-	qbh->bh[2] = bh = sb_bread(s, secno + 2);
-	if (!bh)
-		goto bail2;
-	memcpy(data + 2 * 512, bh->b_data, 512);
-
-	qbh->bh[3] = bh = sb_bread(s, secno + 3);
-	if (!bh)
-		goto bail3;
-	memcpy(data + 3 * 512, bh->b_data, 512);
+	memcpy(data + 0 * 512, qbh->bh[0]->b_data, 512);
+	memcpy(data + 1 * 512, qbh->bh[1]->b_data, 512);
+	memcpy(data + 2 * 512, qbh->bh[2]->b_data, 512);
+	memcpy(data + 3 * 512, qbh->bh[3]->b_data, 512);
 
 	return data;
 
+ bail4:
+	brelse(qbh->bh[3]);
  bail3:
 	brelse(qbh->bh[2]);
  bail2:
@@ -135,9 +132,6 @@ void *hpfs_map_4sectors(struct super_blo
  bail1:
 	brelse(qbh->bh[0]);
  bail0:
-	kfree(data);
-	printk("HPFS: hpfs_map_4sectors: read error\n");
- bail:
 	return NULL;
 }
 
@@ -155,44 +149,54 @@ void *hpfs_get_4sectors(struct super_blo
 		return NULL;
 	}
 
-	/*return hpfs_map_4sectors(s, secno, qbh, 0);*/
+	if (!hpfs_get_sector(s, secno + 0, &qbh->bh[0])) goto bail0;
+	if (!hpfs_get_sector(s, secno + 1, &qbh->bh[1])) goto bail1;
+	if (!hpfs_get_sector(s, secno + 2, &qbh->bh[2])) goto bail2;
+	if (!hpfs_get_sector(s, secno + 3, &qbh->bh[3])) goto bail3;
+
+	if (likely(qbh->bh[1]->b_data == qbh->bh[0]->b_data + 1 * 512) &&
+	    likely(qbh->bh[2]->b_data == qbh->bh[0]->b_data + 2 * 512) &&
+	    likely(qbh->bh[3]->b_data == qbh->bh[0]->b_data + 3 * 512)) {
+		return qbh->data = qbh->bh[0]->b_data;
+	}
+
 	if (!(qbh->data = kmalloc(2048, GFP_NOFS))) {
 		printk("HPFS: hpfs_get_4sectors: out of memory\n");
-		return NULL;
+		goto bail4;
 	}
-	if (!(hpfs_get_sector(s, secno, &qbh->bh[0]))) goto bail0;
-	if (!(hpfs_get_sector(s, secno + 1, &qbh->bh[1]))) goto bail1;
-	if (!(hpfs_get_sector(s, secno + 2, &qbh->bh[2]))) goto bail2;
-	if (!(hpfs_get_sector(s, secno + 3, &qbh->bh[3]))) goto bail3;
-	memcpy(qbh->data, qbh->bh[0]->b_data, 512);
-	memcpy(qbh->data + 512, qbh->bh[1]->b_data, 512);
-	memcpy(qbh->data + 2*512, qbh->bh[2]->b_data, 512);
-	memcpy(qbh->data + 3*512, qbh->bh[3]->b_data, 512);
 	return qbh->data;
 
-	bail3:	brelse(qbh->bh[2]);
-	bail2:	brelse(qbh->bh[1]);
-	bail1:	brelse(qbh->bh[0]);
-	bail0:
+bail4:
+	brelse(qbh->bh[3]);
+bail3:
+	brelse(qbh->bh[2]);
+bail2:
+	brelse(qbh->bh[1]);
+bail1:
+	brelse(qbh->bh[0]);
+bail0:
 	return NULL;
 }
 	
 
 void hpfs_brelse4(struct quad_buffer_head *qbh)
 {
-	brelse(qbh->bh[3]);
-	brelse(qbh->bh[2]);
-	brelse(qbh->bh[1]);
+	if (unlikely(qbh->data != qbh->bh[0]->b_data))
+		kfree(qbh->data);
 	brelse(qbh->bh[0]);
-	kfree(qbh->data);
+	brelse(qbh->bh[1]);
+	brelse(qbh->bh[2]);
+	brelse(qbh->bh[3]);
 }	
 
 void hpfs_mark_4buffers_dirty(struct quad_buffer_head *qbh)
 {
-	memcpy(qbh->bh[0]->b_data, qbh->data, 512);
-	memcpy(qbh->bh[1]->b_data, qbh->data + 512, 512);
-	memcpy(qbh->bh[2]->b_data, qbh->data + 2 * 512, 512);
-	memcpy(qbh->bh[3]->b_data, qbh->data + 3 * 512, 512);
+	if (unlikely(qbh->data != qbh->bh[0]->b_data)) {
+		memcpy(qbh->bh[0]->b_data, qbh->data + 0 * 512, 512);
+		memcpy(qbh->bh[1]->b_data, qbh->data + 1 * 512, 512);
+		memcpy(qbh->bh[2]->b_data, qbh->data + 2 * 512, 512);
+		memcpy(qbh->bh[3]->b_data, qbh->data + 3 * 512, 512);
+	}
 	mark_buffer_dirty(qbh->bh[0]);
 	mark_buffer_dirty(qbh->bh[1]);
 	mark_buffer_dirty(qbh->bh[2]);

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH 2/2] hpfs: optimize quad buffer loading
  2014-01-28 23:11 ` [PATCH 2/2] hpfs: optimize quad buffer loading Mikulas Patocka
@ 2014-01-28 23:44   ` Linus Torvalds
  2014-01-29  1:01     ` Mikulas Patocka
  0 siblings, 1 reply; 15+ messages in thread
From: Linus Torvalds @ 2014-01-28 23:44 UTC (permalink / raw)
  To: Mikulas Patocka; +Cc: linux-fsdevel

On Tue, Jan 28, 2014 at 3:11 PM, Mikulas Patocka
<mikulas@artax.karlin.mff.cuni.cz> wrote:
> HPFS needs to load 4 consecutive 512-byte sectors when accessing the
> directory nodes or bitmaps. We can't switch to 2048-byte block size
> because files are allocated in the units of 512-byte sectors.

Bah, this is untrue.

Well, it's true that you cannot *switch* to another size, but the
buffer head layer should be perfectly happy with mixed sizes within a
device, even if nobody happens to do it.

Just allocate a whole page, and make *that* page use 2048-byte buffers.

So you should be perfectly able to just do

  struct buffer_head *bh = __bread(dev, nr, 2048);

which gets and reads a single 2048-byte buffer head.

Now, the problem is that because nobody actually does this, I bet we
have bugs in this area, and some path ends up using
bd_inode->i_blkbits instead of the passed-in size. A very quick look
finds __find_get_block -> __find_get_block_slow() looking bad, for
example.

But I also bet that that should be easy to fix. In fact, I think the
only reason we use "i_blkbits" there is because it avoids a division
(and nobody had a *reason* to do it), but since this is the "we have
to do IO" path, just passing in the size and then using a
"sector_div()" is a nobrainer from a performance standpoint, I think.
So fixing that problem looks like a couple of lines.

Now, another issue is that with multiple block sizes, it's up to the
filesystem to then guarantee that there isn't aliasing between two
physical blocks (eg say "2048b sized block at offset 1" vs "512b
buffer-head at offset 4"). But if the aliasing is fairly naturally
avoided at the FS level (and if this is done only for particular parts
of the filesystem, that should be very natural), that shouldn't be a
problem either.

So I'd actually much rather see us taking advantage of multiple buffer
sizes and use a *native* 2k buffer-head when it makes sense, than this
odd "let's allocate them, and then maybe they are all properly aligned
anyway" kind of voodoo programming.

Would you be willing to try?

              Linus

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH 2/2] hpfs: optimize quad buffer loading
  2014-01-28 23:44   ` Linus Torvalds
@ 2014-01-29  1:01     ` Mikulas Patocka
  2014-01-29  1:51       ` Mikulas Patocka
  2014-01-29  2:01       ` Linus Torvalds
  0 siblings, 2 replies; 15+ messages in thread
From: Mikulas Patocka @ 2014-01-29  1:01 UTC (permalink / raw)
  To: Linus Torvalds; +Cc: linux-fsdevel



On Tue, 28 Jan 2014, Linus Torvalds wrote:

> On Tue, Jan 28, 2014 at 3:11 PM, Mikulas Patocka
> <mikulas@artax.karlin.mff.cuni.cz> wrote:
> > HPFS needs to load 4 consecutive 512-byte sectors when accessing the
> > directory nodes or bitmaps. We can't switch to 2048-byte block size
> > because files are allocated in the units of 512-byte sectors.
> 
> Bah, this is untrue.
> 
> Well, it's true that you cannot *switch* to another size, but the
> buffer head layer should be perfectly happy with mixed sizes within a
> device, even if nobody happens to do it.
> 
> Just allocate a whole page, and make *that* page use 2048-byte buffers.
> 
> So you should be perfectly able to just do
> 
>   struct buffer_head *bh = __bread(dev, nr, 2048);
> 
> which gets and reads a single 2048-byte buffer head.
>
> Now, the problem is that because nobody actually does this, I bet we
> have bugs in this area, and some path ends up using
> bd_inode->i_blkbits instead of the passed-in size. A very quick look
> finds __find_get_block -> __find_get_block_slow() looking bad, for
> example.
> 
> But I also bet that that should be easy to fix. In fact, I think the
> only reason we use "i_blkbits" there is because it avoids a division
> (and nobody had a *reason* to do it), but since this is the "we have
> to do IO" path, just passing in the size and then using a
> "sector_div()" is a nobrainer from a performance standpoint, I think.
> So fixing that problem looks like a couple of lines.
> 
> Now, another issue is that with multiple block sizes, it's up to the
> filesystem to then guarantee that there isn't aliasing between two
> physical blocks (eg say "2048b sized block at offset 1" vs "512b
> buffer-head at offset 4"). But if the aliasing is fairly naturally
> avoided at the FS level (and if this is done only for particular parts
> of the filesystem, that should be very natural), that shouldn't be a
> problem either.
> 
> So I'd actually much rather see us taking advantage of multiple buffer
> sizes and use a *native* 2k buffer-head when it makes sense, than this
> odd "let's allocate them, and then maybe they are all properly aligned
> anyway" kind of voodoo programming.
> 
> Would you be willing to try?
> 
>               Linus
> 

The page cache doesn't handle different-size buffers for one page. HPFS 
has some 2kB structures (dnodes, bitmaps) and some 512-byte structures 
(fnodes, anodes). We can have a 4kB page that contains one 2kB dnode and 
four 512-byte anodes or fnodes. That is impossible to create with 
create_empty_buffers.

(you can't access 512-byte structures using 2kB buffers because that could 
trash file data that lie in the same 2kB area)

__find_get_block_slow walks all the buffers on the page and finds the one 
that matches, that is fixable. But I have no clue how to replace 
create_empty_buffers. How much other code does depend on the fact that 
buffers completely cover a page and have the same size?


There is a general problem - HPFS has very small user base, no one is 
really stress-testing it. I'm not stress-testing it neither - I have some 
data on HPFS, but I'm not running any intensive parallel workload on it. 

So even if we hacked the page cache to support different-size buffers on a 
single page, there would likely be bugs in it and no one would found those 
bugs for a long time because of tiny HPFS user base. I think it would be 
better to try this approach on some widely used filesystem and when it is 
debugged, I can switch HPFS for that.

Mikulas

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH 2/2] hpfs: optimize quad buffer loading
  2014-01-29  1:01     ` Mikulas Patocka
@ 2014-01-29  1:51       ` Mikulas Patocka
  2014-01-29  2:05         ` Linus Torvalds
  2014-01-29  2:01       ` Linus Torvalds
  1 sibling, 1 reply; 15+ messages in thread
From: Mikulas Patocka @ 2014-01-29  1:51 UTC (permalink / raw)
  To: Linus Torvalds; +Cc: linux-fsdevel



On Wed, 29 Jan 2014, Mikulas Patocka wrote:

> The page cache doesn't handle different-size buffers for one page. HPFS 
> has some 2kB structures (dnodes, bitmaps) and some 512-byte structures 
> (fnodes, anodes). We can have a 4kB page that contains one 2kB dnode and 
> four 512-byte anodes or fnodes. That is impossible to create with 
> create_empty_buffers.
> 
> (you can't access 512-byte structures using 2kB buffers because that could 
> trash file data that lie in the same 2kB area)
> 
> __find_get_block_slow walks all the buffers on the page and finds the one 
> that matches, that is fixable. But I have no clue how to replace 
> create_empty_buffers. How much other code does depend on the fact that 
> buffers completely cover a page and have the same size?

Another question is - what would happen with direct access to the block 
device using read or write on nodes in /dev?

Direct access must use buffers - the reason is that some programs (lilo, 
grub, tune2fs) can write to a block device with mounted filesystem. They 
must write only the buffer they are modifying; if they were using 
full-page writes, they could destroy nearby data.

Now, how this could work with variable-size buffers? What happens if the 
block device is being read or written with some buffer size and the 
filesystem needs a different buffer size on the same place? It doesn't 
seem so easy to solve...

Mikulas

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH 2/2] hpfs: optimize quad buffer loading
  2014-01-29  1:01     ` Mikulas Patocka
  2014-01-29  1:51       ` Mikulas Patocka
@ 2014-01-29  2:01       ` Linus Torvalds
  2014-01-29 15:05         ` Mikulas Patocka
  1 sibling, 1 reply; 15+ messages in thread
From: Linus Torvalds @ 2014-01-29  2:01 UTC (permalink / raw)
  To: Mikulas Patocka; +Cc: linux-fsdevel

On Tue, Jan 28, 2014 at 5:01 PM, Mikulas Patocka
<mikulas@artax.karlin.mff.cuni.cz> wrote:
>
> The page cache doesn't handle different-size buffers for one page.

Correct, but that should not be relevant..

> HPFS
> has some 2kB structures (dnodes, bitmaps) and some 512-byte structures
> (fnodes, anodes). We can have a 4kB page that contains one 2kB dnode and
> four 512-byte anodes or fnodes. That is impossible to create with
> create_empty_buffers.

Damn. You're both right and wrong.

It's true that buffer heads within a page have to be the same size,
but that's not really relevant - you don't work with pages, so you
could have two totally independent 2kB buffer heads allocated to
within one page.

And that's actually how filesystems that virtually map pages do things
- they just fill the page with (equal-sized) buffer heads indexed on
the filesystem inode, and the buffer heads don't have to be related to
each other physically on the disk.

In fact, even the sizes don't even really *have* to be the same (in
theory the list of page buffers could point to five buffers: one 2k
and four 512-byte bhs), but all the helper functions to populate the
buffer head lists etc do assume that.

And way back when, buffer heads had their own hashed lookup, so even
with the bd_dev approach you could have two non-consecutive
independent 2kB bh's in the same page.

So you used to be wrong.

But the reason you're right is that we got rid of the buffer head
hashes, and now use the page-level hashing to look up the page that
the buffer heads are in, which does mean that now you can't really
alias different sizes on different pages any more, or have one page
that contains buffer heads that aren't related to each other
physically on the disk any more.

So yeah, very annoying, we're *so* close to being able to do this, but
because the buffer heads are really no longer "primary" data
structures and don't have any indexing of their own, we can't actually
do it.

               Linus

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH 2/2] hpfs: optimize quad buffer loading
  2014-01-29  1:51       ` Mikulas Patocka
@ 2014-01-29  2:05         ` Linus Torvalds
  2014-01-29 14:50           ` Mikulas Patocka
  0 siblings, 1 reply; 15+ messages in thread
From: Linus Torvalds @ 2014-01-29  2:05 UTC (permalink / raw)
  To: Mikulas Patocka; +Cc: linux-fsdevel

On Tue, Jan 28, 2014 at 5:51 PM, Mikulas Patocka
<mikulas@artax.karlin.mff.cuni.cz> wrote:
>
> Another question is - what would happen with direct access to the block
> device using read or write on nodes in /dev?

We actually don't support coherent direct block access and filesystem
access. We haven't since the page cache days. Back in *really* early
days, everything was about "struct buffer_head", and things were
coherent because all caches were physically indexed by the location on
disk.

But ever since the page cache (which has been around forever),
filesystem caching has been virtually indexed, and not coherent with
the physically indexed block device direct access.

            Linus

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH 2/2] hpfs: optimize quad buffer loading
  2014-01-29  2:05         ` Linus Torvalds
@ 2014-01-29 14:50           ` Mikulas Patocka
  0 siblings, 0 replies; 15+ messages in thread
From: Mikulas Patocka @ 2014-01-29 14:50 UTC (permalink / raw)
  To: Linus Torvalds; +Cc: linux-fsdevel



On Tue, 28 Jan 2014, Linus Torvalds wrote:

> On Tue, Jan 28, 2014 at 5:51 PM, Mikulas Patocka
> <mikulas@artax.karlin.mff.cuni.cz> wrote:
> >
> > Another question is - what would happen with direct access to the block
> > device using read or write on nodes in /dev?
> 
> We actually don't support coherent direct block access and filesystem
> access. We haven't since the page cache days. Back in *really* early
> days, everything was about "struct buffer_head", and things were
> coherent because all caches were physically indexed by the location on
> disk.
> 
> But ever since the page cache (which has been around forever),
> filesystem caching has been virtually indexed, and not coherent with
> the physically indexed block device direct access.
> 
>             Linus

You have to support it because tune2fs does it.

If you create an ext2 filesystem with 1kB block size, mount it, and run 
some command that modifies the superblock on it (for example tune2fs -c 
123), you can see on the block trace that the kernel writes only 1kB 
superblocks. If it were writing full pages, it may destroy data that are 
placed next to the superblocks --- for example:
	1. tune2fs reads a full page with some backup superblock
	2. the filesystem writes some file data that are placed next to 
that superblock
	3. tune2fs writes back that full page with the superblock 
modified, overwriting data stored at step 2

--- that's why direct access to block devices must use buffer cache.

grub is another example, it writes bootloader to the sector 0. If it were 
doing full-page read, modify, full-page write, it can destroy file data 
stored in sectors 1-7.

Mikulas

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH 2/2] hpfs: optimize quad buffer loading
  2014-01-29  2:01       ` Linus Torvalds
@ 2014-01-29 15:05         ` Mikulas Patocka
  2014-01-30 17:59           ` Linus Torvalds
  0 siblings, 1 reply; 15+ messages in thread
From: Mikulas Patocka @ 2014-01-29 15:05 UTC (permalink / raw)
  To: Linus Torvalds; +Cc: linux-fsdevel



On Tue, 28 Jan 2014, Linus Torvalds wrote:

> On Tue, Jan 28, 2014 at 5:01 PM, Mikulas Patocka
> <mikulas@artax.karlin.mff.cuni.cz> wrote:
> >
> > The page cache doesn't handle different-size buffers for one page.
> 
> Correct, but that should not be relevant..
> 
> > HPFS
> > has some 2kB structures (dnodes, bitmaps) and some 512-byte structures
> > (fnodes, anodes). We can have a 4kB page that contains one 2kB dnode and
> > four 512-byte anodes or fnodes. That is impossible to create with
> > create_empty_buffers.
> 
> Damn. You're both right and wrong.
> 
> It's true that buffer heads within a page have to be the same size,
> but that's not really relevant - you don't work with pages, so you
> could have two totally independent 2kB buffer heads allocated to
> within one page.

Suppose that 8 consecutive sectors on the disk contain this data:
dnode (4 sectors)
fnode (1 sector)
file content (3 sectors)
--- now, you can't access that fnode using 2kB buffer, if you did and if 
you marked that buffer dirty, you damage file content.

So you need different-sized buffers on one page.

> And that's actually how filesystems that virtually map pages do things
> - they just fill the page with (equal-sized) buffer heads indexed on
> the filesystem inode, and the buffer heads don't have to be related to
> each other physically on the disk.
> 
> In fact, even the sizes don't even really *have* to be the same (in
> theory the list of page buffers could point to five buffers: one 2k
> and four 512-byte bhs), but all the helper functions to populate the
> buffer head lists etc do assume that.
> 
> And way back when, buffer heads had their own hashed lookup, so even
> with the bd_dev approach you could have two non-consecutive
> independent 2kB bh's in the same page.
> 
> So you used to be wrong.
> 
> But the reason you're right is that we got rid of the buffer head
> hashes, and now use the page-level hashing to look up the page that
> the buffer heads are in, which does mean that now you can't really
> alias different sizes on different pages any more, or have one page
> that contains buffer heads that aren't related to each other
> physically on the disk any more.

Page-level lookup doesn't seem as a problem to me. All you need to do is 
to add "blocksize" argument to __find_get_block_slow, change
index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
to
index = block >> (PAGE_CACHE_SHIFT - __ffs(blocksize));

and change
else if (bh->b_blocknr == block)
to
else if (bh->b_blocknr == block && bh->b_size == block)

That would be enough to be able to find a buffer with different block 
size on one page.


The worse problem is how to create such buffers.

And how to synchronize it with concurrent access from userspace using 
def_blk_aops, that would be very hard.

> So yeah, very annoying, we're *so* close to being able to do this, but
> because the buffer heads are really no longer "primary" data
> structures and don't have any indexing of their own, we can't actually
> do it.
> 
>                Linus

Mikulas

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH 2/2] hpfs: optimize quad buffer loading
  2014-01-29 15:05         ` Mikulas Patocka
@ 2014-01-30 17:59           ` Linus Torvalds
  2014-01-31 17:41             ` Mikulas Patocka
  0 siblings, 1 reply; 15+ messages in thread
From: Linus Torvalds @ 2014-01-30 17:59 UTC (permalink / raw)
  To: Mikulas Patocka; +Cc: linux-fsdevel

On Wed, Jan 29, 2014 at 7:05 AM, Mikulas Patocka
<mikulas@artax.karlin.mff.cuni.cz> wrote:
>
> Suppose that 8 consecutive sectors on the disk contain this data:
> dnode (4 sectors)
> fnode (1 sector)
> file content (3 sectors)
> --- now, you can't access that fnode using 2kB buffer, if you did and if
> you marked that buffer dirty, you damage file content.
>
> So you need different-sized buffers on one page.

No. You're missing the whole point.

"consecutive sectors" does not mean "same page".

The page cache doesn't care. It never has. Non-consecutive sectors is
common for normal file mappings.

The *buffer* cache doesn't really care either, and in fact that
non-consecutive case used to be the common one (very much even for raw
disk accesses, exactly because things *used* to be coherent with a
mounted filesystem - so if there were files that had populated part of
the buffer cache with their non-consecutive sectors, the raw disk
access would just use those non-consecutive sectors).

And all that worked because we'd just look up the buffer head in the
hashes. The page it was on didn't matter.

The problem is that (not *that* long ago, relatively speaking) we have
castrated the buffer cache so much (because almost nobody really uses
it any more) that now it's really a slave of the page cache, and we
got rid of the buffer head hashes entirely. So now we look up the
buffer heads using the page cache, and *that* causes the problems (and
forces us to put those buffer heads in the same page, because we index
by page).

We can actually still just create such non-consecutive buffers and do
IO on them, we just can't look them up any more.

                  Linus

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH 2/2] hpfs: optimize quad buffer loading
  2014-01-30 17:59           ` Linus Torvalds
@ 2014-01-31 17:41             ` Mikulas Patocka
  2014-01-31 17:52               ` Linus Torvalds
  0 siblings, 1 reply; 15+ messages in thread
From: Mikulas Patocka @ 2014-01-31 17:41 UTC (permalink / raw)
  To: Linus Torvalds; +Cc: linux-fsdevel



On Thu, 30 Jan 2014, Linus Torvalds wrote:

> On Wed, Jan 29, 2014 at 7:05 AM, Mikulas Patocka
> <mikulas@artax.karlin.mff.cuni.cz> wrote:
> >
> > Suppose that 8 consecutive sectors on the disk contain this data:
> > dnode (4 sectors)
> > fnode (1 sector)
> > file content (3 sectors)
> > --- now, you can't access that fnode using 2kB buffer, if you did and if
> > you marked that buffer dirty, you damage file content.
> >
> > So you need different-sized buffers on one page.
> 
> No. You're missing the whole point.
>
> "consecutive sectors" does not mean "same page".

Huh? Let me say it again: we have an 8-sector disk area that is aligned on 
a page boundary (for example, sector number 24).

There is dnode on sectors 24-27
There is fnode on sector 28
There is file content on sectors 29-31

So, I claim this - if you access that fnode using 2k buffer (so the buffer 
contains not only the fnode, but also the following 3 sectors) and you 
mark that buffer head dirty, you may cause data corruption on the file.

If you disagree with it, say how is it supposed to work.

> The problem is that (not *that* long ago, relatively speaking) we have
> castrated the buffer cache so much (because almost nobody really uses
> it any more) that now it's really a slave of the page cache, and we
> got rid of the buffer head hashes entirely. So now we look up the
> buffer heads using the page cache, and *that* causes the problems (and
> forces us to put those buffer heads in the same page, because we index
> by page).
> 
> We can actually still just create such non-consecutive buffers and do
> IO on them, we just can't look them up any more.
> 
>                   Linus

Each page has a single-linked circular list of buffers. So you could in 
theory put buffers of different size on that list. For example, the page 
for sector 24 (page index 3) could have one buffer with block number 6 and 
block size 2048 and four buffers with block numbers 28-31 and block size 
512. It would be possible to find all those five buffers in 
__find_get_block_slow if you passed block size as an argument to 
__find_get_block_slow and if you verified block size when searching the 
linked list.

In theory you could put buffers with all possible combinations of buffer 
size on that linked list: the page at index 3 could have on its list the 
following 15 buffers:
8 512-byte buffers with block numbers 24-31
4 1024-byte buffers with block numbers 12-15
2 2048-byte buffers with block numbers 6-7
1 4096-byte buffer with block number 3

Mikulas

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH 2/2] hpfs: optimize quad buffer loading
  2014-01-31 17:41             ` Mikulas Patocka
@ 2014-01-31 17:52               ` Linus Torvalds
  2014-01-31 18:10                 ` Mikulas Patocka
  0 siblings, 1 reply; 15+ messages in thread
From: Linus Torvalds @ 2014-01-31 17:52 UTC (permalink / raw)
  To: Mikulas Patocka; +Cc: linux-fsdevel

On Fri, Jan 31, 2014 at 9:41 AM, Mikulas Patocka
<mikulas@artax.karlin.mff.cuni.cz> wrote:
> On Thu, 30 Jan 2014, Linus Torvalds wrote:
>>
>> "consecutive sectors" does not mean "same page".
>
> Huh? Let me say it again: we have an 8-sector disk area that is aligned on
> a page boundary (for example, sector number 24).\

Mikulas, stop the idiocy.

I just told you that "consecutive" doesn't matter. And then you start
arguing about consecutive.

Read the damn email before you respond, ok?

          Linus

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH 2/2] hpfs: optimize quad buffer loading
  2014-01-31 17:52               ` Linus Torvalds
@ 2014-01-31 18:10                 ` Mikulas Patocka
  2014-01-31 18:40                   ` Linus Torvalds
  0 siblings, 1 reply; 15+ messages in thread
From: Mikulas Patocka @ 2014-01-31 18:10 UTC (permalink / raw)
  To: Linus Torvalds; +Cc: linux-fsdevel



On Fri, 31 Jan 2014, Linus Torvalds wrote:

> On Fri, Jan 31, 2014 at 9:41 AM, Mikulas Patocka
> <mikulas@artax.karlin.mff.cuni.cz> wrote:
> > On Thu, 30 Jan 2014, Linus Torvalds wrote:
> >>
> >> "consecutive sectors" does not mean "same page".
> >
> > Huh? Let me say it again: we have an 8-sector disk area that is aligned on
> > a page boundary (for example, sector number 24).\
> 
> Mikulas, stop the idiocy.
> 
> I just told you that "consecutive" doesn't matter. And then you start
> arguing about consecutive.
> 
> Read the damn email before you respond, ok?
> 
>           Linus

I don't know what wrong have I said.

Buffer cache is backed by pages from page cache. If we have page size 4k, 
page with index 0 maps sectors 0-7, page with index 1 maps sectors 8-15, 
page with index 2 maps sectors 16-23, page with index 3 maps sectors 24-31 
and so on.

Pages that belong to a file can map non-consecutive area because the file 
may be scattered on the disk. But pages that are used for buffer cache map 
consecutive area.

I don't really know what do you mean?

Mikulas

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH 2/2] hpfs: optimize quad buffer loading
  2014-01-31 18:10                 ` Mikulas Patocka
@ 2014-01-31 18:40                   ` Linus Torvalds
  2014-01-31 19:19                     ` Mikulas Patocka
  0 siblings, 1 reply; 15+ messages in thread
From: Linus Torvalds @ 2014-01-31 18:40 UTC (permalink / raw)
  To: Mikulas Patocka; +Cc: linux-fsdevel

On Fri, Jan 31, 2014 at 10:10 AM, Mikulas Patocka
<mikulas@artax.karlin.mff.cuni.cz> wrote:
>
> Buffer cache is backed by pages from page cache. If we have page size 4k,
> page with index 0 maps sectors 0-7 [..]

Not at all necessarily.

One page might contain sectors 761, 51, 900 and 12-16. The buffer
heads have sector numbers that are *independent* of the page they are
in.

And we *use* that. Every single day. It's how the virtual file mapping
is done. The buffer cache still supports it, and it still works. The
buffer cache also technically supports mixing sizes in the same page
(and it still does *not* have to be about _consecutive_ sectors!), but
I won't actually say that it works, because nobody has ever actually
used that capability.

And I explained how we used to do that EVEN FOR DIRECT BLOCK IO (and
how we had a bh hash chain for lookups).

Christ, just read the email. Stop this "it has to be consecutive
sectors". Because it really doesn't. It really *isn't* (for file
backed pages).

The issue we have these days is that we actually dropped our buffer
cache hash chains, and buffer heads aren't actually independently
indexed any more. So now rely entirely on the page cache index. So
*lookup* right now depends on one page containing sectors that are
"related" (not necessarily physically on disk, though), but that's a
small implementation detail and isn't even historically true.

Now, it may well not be worth re-introducing the buffer head hash
lists. I'm not saying we should do that. Your ugly patch may be the
smaller pain, because in the end, few enough filesystems actually want
different sector sizes. So I'm really arguing to explain that the
whole "sectors have to be consecutive in a page" is BS.

You seem to be somewhat confused about the buffer cache usage, since
you also thought that we don't alias filesystem data and direct block
device data, We really really do. The same physical sectors can exist
in both - in different pages, and not coherent with each other.

The buffer cache is actually quite flexible. It's certainly not
perfect, and some filesystems have been migrating away from it due to
overheads (the bh allocations, for example, and many modern
filesystems like doing their IO directly using the bio interface
because it's closer to the disk, and once you do your caching in the
page cache yourself, the fact that buffer heads exist over more than
just the IO can be more of a pain than a gain), but it's actually
*designed* to do all this.

The "how to index it" is actually a fairly well separated issue from
the buffer cache. You can actually use the buffer heads without ever
really indexing them at all (and in many respects, that's how the page
cache uses them) and see them as just an IO entity. That was actually
a historical usage, but these days people would use a bio for that
case.

             Linus

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH 2/2] hpfs: optimize quad buffer loading
  2014-01-31 18:40                   ` Linus Torvalds
@ 2014-01-31 19:19                     ` Mikulas Patocka
  0 siblings, 0 replies; 15+ messages in thread
From: Mikulas Patocka @ 2014-01-31 19:19 UTC (permalink / raw)
  To: Linus Torvalds; +Cc: linux-fsdevel



On Fri, 31 Jan 2014, Linus Torvalds wrote:

> On Fri, Jan 31, 2014 at 10:10 AM, Mikulas Patocka
> <mikulas@artax.karlin.mff.cuni.cz> wrote:
> >
> > Buffer cache is backed by pages from page cache. If we have page size 4k,
> > page with index 0 maps sectors 0-7 [..]
> 
> Not at all necessarily.
> 
> One page might contain sectors 761, 51, 900 and 12-16. The buffer
> heads have sector numbers that are *independent* of the page they are
> in.
>
> Christ, just read the email. Stop this "it has to be consecutive
> sectors". Because it really doesn't. It really *isn't* (for file
> backed pages).

I understand that. Sure, the mapping is non-consecutive for file-based 
pages.

But the pages that are used for sb_bread, sb_getblk and related functions 
map consecutive area on the disk. So - if we are talking in this thread 
about implementation of sb_bread, we can say that the pages used for 
sb_bread map consecutive area.

> The issue we have these days is that we actually dropped our buffer
> cache hash chains, and buffer heads aren't actually independently
> indexed any more. So now rely entirely on the page cache index. So
> *lookup* right now depends on one page containing sectors that are
> "related" (not necessarily physically on disk, though), but that's a
> small implementation detail and isn't even historically true.
> 
> Now, it may well not be worth re-introducing the buffer head hash
> lists. I'm not saying we should do that. Your ugly patch may be the
> smaller pain, because in the end, few enough filesystems actually want
> different sector sizes. So I'm really arguing to explain that the
> whole "sectors have to be consecutive in a page" is BS.

Yes, it may be better to apply the patch than to redesign the buffer cache 
for different-sized buffer.

> You seem to be somewhat confused about the buffer cache usage, since
> you also thought that we don't alias filesystem data and direct block
> device data, We really really do. The same physical sectors can exist
> in both - in different pages, and not coherent with each other.

I understand it - that's why I said that you can't access on-disk 
structures on HPFS using a buffer that is larger than the structure itself 
- because it may alias a file and (as you correctly say) there is no 
coherency.

Mikulas

^ permalink raw reply	[flat|nested] 15+ messages in thread

end of thread, other threads:[~2014-01-31 19:19 UTC | newest]

Thread overview: 15+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2014-01-28 23:10 [PATCH 1/2] hpfs: remember free space Mikulas Patocka
2014-01-28 23:11 ` [PATCH 2/2] hpfs: optimize quad buffer loading Mikulas Patocka
2014-01-28 23:44   ` Linus Torvalds
2014-01-29  1:01     ` Mikulas Patocka
2014-01-29  1:51       ` Mikulas Patocka
2014-01-29  2:05         ` Linus Torvalds
2014-01-29 14:50           ` Mikulas Patocka
2014-01-29  2:01       ` Linus Torvalds
2014-01-29 15:05         ` Mikulas Patocka
2014-01-30 17:59           ` Linus Torvalds
2014-01-31 17:41             ` Mikulas Patocka
2014-01-31 17:52               ` Linus Torvalds
2014-01-31 18:10                 ` Mikulas Patocka
2014-01-31 18:40                   ` Linus Torvalds
2014-01-31 19:19                     ` Mikulas Patocka

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.