linux-ext4.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: "Ritesh Harjani (IBM)" <ritesh.list@gmail.com>
To: Theodore Ts'o <tytso@mit.edu>
Cc: linux-ext4@vger.kernel.org,
	Harshad Shirwadkar <harshadshirwadkar@gmail.com>,
	Wang Shilong <wshilong@ddn.com>,
	Andreas Dilger <adilger.kernel@dilger.ca>, Li Xi <lixi@ddn.com>,
	Ritesh Harjani <ritesh.list@gmail.com>
Subject: [RFCv1 56/72] e2fsck: wait fix thread finish before checking
Date: Mon,  7 Nov 2022 17:51:44 +0530	[thread overview]
Message-ID: <feeb2dc096e3cf080b91eca836957136f580da3a.1667822611.git.ritesh.list@gmail.com> (raw)
In-Reply-To: <cover.1667822611.git.ritesh.list@gmail.com>

From: Wang Shilong <wshilong@ddn.com>

Before proceeding next inodes, waitting existed
fixing finished.

Signed-off-by: Wang Shilong <wshilong@ddn.com>
[unlock from Jan's orphan inode path as well]
Signed-off-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com>
---
 e2fsck/e2fsck.c |  3 +++
 e2fsck/e2fsck.h |  5 +++-
 e2fsck/pass1.c  | 70 +++++++++++++++++++++++++++++++++++++++++++------
 e2fsck/util.c   | 56 ++++++++++++++++++++++++++++++++++++---
 4 files changed, 122 insertions(+), 12 deletions(-)

diff --git a/e2fsck/e2fsck.c b/e2fsck/e2fsck.c
index 1e295e3e..a5150dab 100644
--- a/e2fsck/e2fsck.c
+++ b/e2fsck/e2fsck.c
@@ -187,6 +187,9 @@ errcode_t e2fsck_reset_context(e2fsck_t ctx)
 	ctx->fs_fragmented_dir = 0;
 	ctx->large_files = 0;
 	ctx->large_dirs = 0;
+#ifdef HAVE_PTHREAD
+	ctx->fs_need_locking = 0;
+#endif
 
 	for (i=0; i < MAX_EXTENT_DEPTH_COUNT; i++)
 		ctx->extent_depth_count[i] = 0;
diff --git a/e2fsck/e2fsck.h b/e2fsck/e2fsck.h
index e3276924..01bd9d01 100644
--- a/e2fsck/e2fsck.h
+++ b/e2fsck/e2fsck.h
@@ -488,8 +488,9 @@ struct e2fsck_struct {
 
 #ifdef HAVE_PTHREAD
 	__u32			 fs_num_threads;
+	int			 fs_need_locking;
 	/* serialize fix operation for multiple threads */
-	pthread_mutex_t		 fs_fix_mutex;
+	pthread_rwlock_t	 fs_fix_rwlock;
 	/* protect block_found_map, block_dup_map */
 	pthread_rwlock_t	 fs_block_map_rwlock;
 #endif
@@ -553,6 +554,8 @@ extern int e2fsck_strnlen(const char * s, int count);
 
 extern void e2fsck_pass1(e2fsck_t ctx);
 extern void e2fsck_pass1_dupblocks(e2fsck_t ctx, char *block_buf);
+extern void e2fsck_pass1_check_lock(e2fsck_t ctx);
+extern void e2fsck_pass1_check_unlock(e2fsck_t ctx);
 extern void e2fsck_pass2(e2fsck_t ctx);
 extern void e2fsck_pass3(e2fsck_t ctx);
 extern void e2fsck_pass4(e2fsck_t ctx);
diff --git a/e2fsck/pass1.c b/e2fsck/pass1.c
index a5dc6e44..29333acf 100644
--- a/e2fsck/pass1.c
+++ b/e2fsck/pass1.c
@@ -991,8 +991,10 @@ static void finish_processing_inode(e2fsck_t ctx, ext2_ino_t ino,
 #define FINISH_INODE_LOOP(ctx, ino, pctx, failed_csum) \
 	do { \
 		finish_processing_inode((ctx), (ino), (pctx), (failed_csum)); \
-		if ((ctx)->flags & E2F_FLAG_ABORT) \
+		if ((ctx)->flags & E2F_FLAG_ABORT) { \
+			e2fsck_pass1_check_unlock(ctx); \
 			return; \
+		} \
 	} while (0)
 
 static int could_be_block_map(ext2_filsys fs, struct ext2_inode *inode)
@@ -1374,8 +1376,12 @@ static errcode_t e2fsck_pass1_prepare(e2fsck_t ctx)
 		ext2fs_mark_block_bitmap2(ctx->block_found_map,
 					  fs->super->s_mmp_block);
 #ifdef	HAVE_PTHREAD
-	pthread_mutex_init(&ctx->fs_fix_mutex, NULL);
+	pthread_rwlock_init(&ctx->fs_fix_rwlock, NULL);
 	pthread_rwlock_init(&ctx->fs_block_map_rwlock, NULL);
+	if (ctx->fs_num_threads > 1)
+		ctx->fs_need_locking = 1;
+	else
+		ctx->fs_need_locking = 0;
 #endif
 
 	return 0;
@@ -1387,6 +1393,10 @@ static void e2fsck_pass1_post(e2fsck_t ctx)
 	ext2_filsys fs = ctx->fs;
 	char *block_buf;
 
+#ifdef HAVE_PTHREAD
+	ctx->fs_need_locking = 0;
+#endif
+
 	if (e2fsck_should_abort(ctx))
 		return;
 
@@ -1662,6 +1672,7 @@ void e2fsck_pass1_run(e2fsck_t ctx)
 #endif
 
 	while (1) {
+		e2fsck_pass1_check_lock(ctx);
 		if (ino % (fs->super->s_inodes_per_group * 4) == 1) {
 			if (e2fsck_mmp_update(fs))
 				fatal_error(ctx, 0);
@@ -1672,8 +1683,10 @@ void e2fsck_pass1_run(e2fsck_t ctx)
 		if (ino > ino_threshold)
 			pass1_readahead(ctx, &ra_group, &ino_threshold);
 		ehandler_operation(old_op);
-		if (e2fsck_should_abort(ctx))
+		if (e2fsck_should_abort(ctx)) {
+			e2fsck_pass1_check_unlock(ctx);
 			goto endit;
+		}
 		if (pctx.errcode == EXT2_ET_BAD_BLOCK_IN_INODE_TABLE) {
 			/*
 			 * If badblocks says badblocks is bad, offer to clear
@@ -1694,27 +1707,45 @@ void e2fsck_pass1_run(e2fsck_t ctx)
 					fix_problem(ctx, PR_1_ISCAN_ERROR,
 						    &pctx);
 					ctx->flags |= E2F_FLAG_ABORT;
+					e2fsck_pass1_check_unlock(ctx);
+					goto endit;
 				} else
 					ctx->flags |= E2F_FLAG_RESTART;
-				goto endit;
+				err = ext2fs_inode_scan_goto_blockgroup(scan,
+									0);
+				if (err) {
+					fix_problem(ctx, PR_1_ISCAN_ERROR,
+						    &pctx);
+					ctx->flags |= E2F_FLAG_ABORT;
+					e2fsck_pass1_check_unlock(ctx);
+					goto endit;
+				}
+				e2fsck_pass1_check_unlock(ctx);
+				continue;
 			}
 			if (!ctx->inode_bb_map)
 				alloc_bb_map(ctx);
 			ext2fs_mark_inode_bitmap2(ctx->inode_bb_map, ino);
 			ext2fs_mark_inode_bitmap2(ctx->inode_used_map, ino);
+			e2fsck_pass1_check_unlock(ctx);
 			continue;
 		}
-		if (pctx.errcode == EXT2_ET_SCAN_FINISHED)
+		if (pctx.errcode == EXT2_ET_SCAN_FINISHED) {
+			e2fsck_pass1_check_unlock(ctx);
 			break;
+		}
 		if (pctx.errcode &&
 		    pctx.errcode != EXT2_ET_INODE_CSUM_INVALID &&
 		    pctx.errcode != EXT2_ET_INODE_IS_GARBAGE) {
 			fix_problem(ctx, PR_1_ISCAN_ERROR, &pctx);
 			ctx->flags |= E2F_FLAG_ABORT;
+			e2fsck_pass1_check_unlock(ctx);
 			goto endit;
 		}
-		if (!ino)
+		if (!ino) {
+			e2fsck_pass1_check_unlock(ctx);
 			break;
+		}
 #ifdef HAVE_PTHREAD
 		if (ctx->global_ctx)
 		        ctx->thread_info.et_inode_number++;
@@ -1767,6 +1798,7 @@ void e2fsck_pass1_run(e2fsck_t ctx)
 				pctx.num = inode->i_links_count;
 				fix_problem(ctx, PR_1_ICOUNT_STORE, &pctx);
 				ctx->flags |= E2F_FLAG_ABORT;
+				e2fsck_pass1_check_unlock(ctx);
 				goto endit;
 			}
 		} else if ((ino >= EXT2_FIRST_INODE(fs->super)) &&
@@ -1781,6 +1813,7 @@ void e2fsck_pass1_run(e2fsck_t ctx)
 				}
 			}
 			FINISH_INODE_LOOP(ctx, ino, &pctx, failed_csum);
+			e2fsck_pass1_check_unlock(ctx);
 			continue;
 		}
 
@@ -1801,6 +1834,7 @@ void e2fsck_pass1_run(e2fsck_t ctx)
 							       &pctx);
 			if (res < 0) {
 				/* skip FINISH_INODE_LOOP */
+				e2fsck_pass1_check_unlock(ctx);
 				continue;
 			}
 		}
@@ -1822,6 +1856,7 @@ void e2fsck_pass1_run(e2fsck_t ctx)
 			} else if (fix_problem(ctx, PR_1_INLINE_DATA_SET, &pctx)) {
 				e2fsck_clear_inode(ctx, ino, inode, 0, "pass1");
 				/* skip FINISH_INODE_LOOP */
+				e2fsck_pass1_check_unlock(ctx);
 				continue;
 			}
 		}
@@ -1866,6 +1901,7 @@ void e2fsck_pass1_run(e2fsck_t ctx)
 					if (err) {
 						pctx.errcode = err;
 						ctx->flags |= E2F_FLAG_ABORT;
+						e2fsck_pass1_check_unlock(ctx);
 						goto endit;
 					}
 					inode->i_flags &= ~EXT4_INLINE_DATA_FL;
@@ -1880,6 +1916,7 @@ void e2fsck_pass1_run(e2fsck_t ctx)
 				/* Some other kind of non-xattr error? */
 				pctx.errcode = err;
 				ctx->flags |= E2F_FLAG_ABORT;
+				e2fsck_pass1_check_unlock(ctx);
 				goto endit;
 			}
 		}
@@ -1917,6 +1954,7 @@ void e2fsck_pass1_run(e2fsck_t ctx)
 					ext2fs_mark_inode_bitmap2(ctx->inode_used_map,
 								 ino);
 				/* skip FINISH_INODE_LOOP */
+				e2fsck_pass1_check_unlock(ctx);
 				continue;
 			}
 		}
@@ -1980,6 +2018,7 @@ void e2fsck_pass1_run(e2fsck_t ctx)
 				pctx.num = 4;
 				fix_problem(ctx, PR_1_ALLOCATE_BBITMAP_ERROR, &pctx);
 				ctx->flags |= E2F_FLAG_ABORT;
+				e2fsck_pass1_check_unlock(ctx);
 				goto endit;
 			}
 			pb.ino = EXT2_BAD_INO;
@@ -1997,16 +2036,19 @@ void e2fsck_pass1_run(e2fsck_t ctx)
 			if (pctx.errcode) {
 				fix_problem(ctx, PR_1_BLOCK_ITERATE, &pctx);
 				ctx->flags |= E2F_FLAG_ABORT;
+				e2fsck_pass1_check_unlock(ctx);
 				goto endit;
 			}
 			if (pb.bbcheck)
 				if (!fix_problem(ctx, PR_1_BBINODE_BAD_METABLOCK_PROMPT, &pctx)) {
 				ctx->flags |= E2F_FLAG_ABORT;
+				e2fsck_pass1_check_unlock(ctx);
 				goto endit;
 			}
 			ext2fs_mark_inode_bitmap2(ctx->inode_used_map, ino);
 			clear_problem_context(&pctx);
 			FINISH_INODE_LOOP(ctx, ino, &pctx, failed_csum);
+			e2fsck_pass1_check_unlock(ctx);
 			continue;
 		} else if (ino == EXT2_ROOT_INO) {
 			/*
@@ -2048,6 +2090,7 @@ void e2fsck_pass1_run(e2fsck_t ctx)
 				}
 				check_blocks(ctx, &pctx, block_buf, NULL);
 				FINISH_INODE_LOOP(ctx, ino, &pctx, failed_csum);
+				e2fsck_pass1_check_unlock(ctx);
 				continue;
 			}
 			if ((inode->i_links_count ||
@@ -2075,6 +2118,7 @@ void e2fsck_pass1_run(e2fsck_t ctx)
 				}
 				check_blocks(ctx, &pctx, block_buf, NULL);
 				FINISH_INODE_LOOP(ctx, ino, &pctx, failed_csum);
+				e2fsck_pass1_check_unlock(ctx);
 				continue;
 			}
 			if ((inode->i_links_count ||
@@ -2101,6 +2145,7 @@ void e2fsck_pass1_run(e2fsck_t ctx)
 				}
 				check_blocks(ctx, &pctx, block_buf, NULL);
 				FINISH_INODE_LOOP(ctx, ino, &pctx, failed_csum);
+				e2fsck_pass1_check_unlock(ctx);
 				continue;
 			}
 			if ((inode->i_links_count ||
@@ -2139,11 +2184,13 @@ void e2fsck_pass1_run(e2fsck_t ctx)
 			}
 			check_blocks(ctx, &pctx, block_buf, NULL);
 			FINISH_INODE_LOOP(ctx, ino, &pctx, failed_csum);
+			e2fsck_pass1_check_unlock(ctx);
 			continue;
 		}
 
 		if (!inode->i_links_count) {
 			FINISH_INODE_LOOP(ctx, ino, &pctx, failed_csum);
+			e2fsck_pass1_check_unlock(ctx);
 			continue;
 		}
 		/*
@@ -2253,12 +2300,14 @@ void e2fsck_pass1_run(e2fsck_t ctx)
 			ctx->fs_symlinks_count++;
 			if (inode->i_flags & EXT4_INLINE_DATA_FL) {
 				FINISH_INODE_LOOP(ctx, ino, &pctx, failed_csum);
+				e2fsck_pass1_check_unlock(ctx);
 				continue;
 			} else if (ext2fs_is_fast_symlink(inode)) {
 				ctx->fs_fast_symlinks_count++;
 				check_blocks(ctx, &pctx, block_buf,
 					     &ea_ibody_quota);
 				FINISH_INODE_LOOP(ctx, ino, &pctx, failed_csum);
+				e2fsck_pass1_check_unlock(ctx);
 				continue;
 			}
 		}
@@ -2306,16 +2355,21 @@ void e2fsck_pass1_run(e2fsck_t ctx)
 
 		FINISH_INODE_LOOP(ctx, ino, &pctx, failed_csum);
 
-		if (ctx->flags & E2F_FLAG_SIGNAL_MASK)
+		if (e2fsck_should_abort(ctx)) {
+			e2fsck_pass1_check_unlock(ctx);
 			goto endit;
+		}
 
 		if (process_inode_count >= ctx->process_inode_size) {
 			process_inodes(ctx, block_buf, inodes_to_process,
 				       &process_inode_count);
 
-			if (e2fsck_should_abort(ctx))
+			if (e2fsck_should_abort(ctx)) {
+				e2fsck_pass1_check_unlock(ctx);
 				goto endit;
+			}
 		}
+		e2fsck_pass1_check_unlock(ctx);
 	}
 	process_inodes(ctx, block_buf, inodes_to_process,
 		       &process_inode_count);
diff --git a/e2fsck/util.c b/e2fsck/util.c
index 5714576a..b7c1e7a5 100644
--- a/e2fsck/util.c
+++ b/e2fsck/util.c
@@ -82,7 +82,8 @@ void fatal_error(e2fsck_t ctx, const char *msg)
 	}
 out:
 	ctx->flags |= E2F_FLAG_ABORT;
-	if (ctx->flags & E2F_FLAG_SETJMP_OK)
+	if (!(ctx->options & E2F_OPT_MULTITHREAD) &&
+	    ctx->flags & E2F_FLAG_SETJMP_OK)
 		longjmp(ctx->abort_loc, 1);
 	if (ctx->logf)
 		fprintf(ctx->logf, "Exit status: %d\n", exit_value);
@@ -580,38 +581,79 @@ void e2fsck_read_inode_full(e2fsck_t ctx, unsigned long ino,
 	if (!global_ctx)			\
 		global_ctx = ctx;		\
 
+/**
+ * before we hold write lock, read lock should
+ * has been held.
+ */
 void e2fsck_pass1_fix_lock(e2fsck_t ctx)
 {
+	int err;
+
+	if (!ctx->fs_need_locking)
+		return;
+
 	e2fsck_get_lock_context(ctx);
-	pthread_mutex_lock(&global_ctx->fs_fix_mutex);
+	err = pthread_rwlock_trywrlock(&global_ctx->fs_fix_rwlock);
+	assert(err != 0);
+	pthread_rwlock_unlock(&global_ctx->fs_fix_rwlock);
+	pthread_rwlock_wrlock(&global_ctx->fs_fix_rwlock);
 }
 
 void e2fsck_pass1_fix_unlock(e2fsck_t ctx)
 {
+	if (!ctx->fs_need_locking)
+		return;
 	e2fsck_get_lock_context(ctx);
-	pthread_mutex_unlock(&global_ctx->fs_fix_mutex);
+	/* unlock write lock */
+	pthread_rwlock_unlock(&global_ctx->fs_fix_rwlock);
+	/* get read lock again */
+	pthread_rwlock_rdlock(&global_ctx->fs_fix_rwlock);
+}
+
+void e2fsck_pass1_check_lock(e2fsck_t ctx)
+{
+	if (!ctx->fs_need_locking)
+		return;
+	e2fsck_get_lock_context(ctx);
+	pthread_rwlock_rdlock(&global_ctx->fs_fix_rwlock);
+}
+
+void e2fsck_pass1_check_unlock(e2fsck_t ctx)
+{
+	if (!ctx->fs_need_locking)
+		return;
+	e2fsck_get_lock_context(ctx);
+	pthread_rwlock_unlock(&global_ctx->fs_fix_rwlock);
 }
 
 void e2fsck_pass1_block_map_w_lock(e2fsck_t ctx)
 {
+	if (!ctx->fs_need_locking)
+		return;
 	e2fsck_get_lock_context(ctx);
 	pthread_rwlock_wrlock(&global_ctx->fs_block_map_rwlock);
 }
 
 void e2fsck_pass1_block_map_w_unlock(e2fsck_t ctx)
 {
+	if (!ctx->fs_need_locking)
+		return;
 	e2fsck_get_lock_context(ctx);
 	pthread_rwlock_unlock(&global_ctx->fs_block_map_rwlock);
 }
 
 void e2fsck_pass1_block_map_r_lock(e2fsck_t ctx)
 {
+	if (!ctx->fs_need_locking)
+		return;
 	e2fsck_get_lock_context(ctx);
 	pthread_rwlock_rdlock(&global_ctx->fs_block_map_rwlock);
 }
 
 void e2fsck_pass1_block_map_r_unlock(e2fsck_t ctx)
 {
+	if (!ctx->fs_need_locking)
+		return;
 	e2fsck_get_lock_context(ctx);
 	pthread_rwlock_unlock(&global_ctx->fs_block_map_rwlock);
  }
@@ -624,6 +666,14 @@ void e2fsck_pass1_fix_lock(e2fsck_t ctx)
 void e2fsck_pass1_fix_unlock(e2fsck_t ctx)
 {
 
+}
+void e2fsck_pass1_check_lock(e2fsck_t ctx)
+{
+
+}
+void e2fsck_pass1_check_unlock(e2fsck_t ctx)
+{
+
 }
 void e2fsck_pass1_block_map_w_lock(e2fsck_t ctx)
 {
-- 
2.37.3


  parent reply	other threads:[~2022-11-07 12:28 UTC|newest]

Thread overview: 104+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-11-07 12:20 [RFCv1 00/72] e2fsprogs: Parallel fsck support Ritesh Harjani (IBM)
2022-11-07 12:20 ` [RFCv1 01/72] e2fsck: Fix unbalanced mutex unlock for BOUNCE_MTX Ritesh Harjani (IBM)
2022-11-17 16:02   ` Theodore Ts'o
2022-11-17 18:45     ` Ritesh Harjani (IBM)
2022-11-18 10:34   ` Andreas Dilger
2022-11-18 11:37     ` Ritesh Harjani (IBM)
2022-11-18 13:20       ` Andreas Dilger
2022-11-19  3:46         ` Ritesh Harjani (IBM)
2023-01-24 16:40   ` Theodore Ts'o
2022-11-07 12:20 ` [RFCv1 02/72] gen_bitmaps: Fix ext2fs_compare_generic_bmap/bitmap logic Ritesh Harjani (IBM)
2022-11-23  5:04   ` Andreas Dilger
2023-01-24 16:59     ` Theodore Ts'o
2022-11-07 12:20 ` [RFCv1 03/72] blkmap64_ba: Add common helper for bits size calculation Ritesh Harjani (IBM)
2022-11-18 10:40   ` Andreas Dilger
2022-11-07 12:20 ` [RFCv1 04/72] badblocks: Remove unused badblocks_flags Ritesh Harjani (IBM)
2022-11-18 13:26   ` Andreas Dilger
2022-11-07 12:20 ` [RFCv1 05/72] badblocks: Add badblocks merge logic Ritesh Harjani (IBM)
2022-11-18 13:31   ` Andreas Dilger
2022-11-07 12:20 ` [RFCv1 06/72] dblist: add dblist " Ritesh Harjani (IBM)
2022-11-18 13:34   ` Andreas Dilger
2022-11-07 12:20 ` [RFCv1 07/72] libext2fs: Add rbtree bitmap " Ritesh Harjani (IBM)
2022-11-07 12:20 ` [RFCv1 08/72] libext2fs: Add bitmaps merge ops Ritesh Harjani (IBM)
2022-11-18 13:36   ` Andreas Dilger
2022-11-07 12:20 ` [RFCv1 09/72] libext2fs: Add flush cleanup API Ritesh Harjani (IBM)
2022-11-18 13:39   ` Andreas Dilger
2022-11-07 12:20 ` [RFCv1 10/72] libext2fs: merge icounts after thread finishes Ritesh Harjani (IBM)
2022-11-18 13:40   ` Andreas Dilger
2022-11-07 12:20 ` [RFCv1 11/72] libext2fs: merge quota context after threads finish Ritesh Harjani (IBM)
2022-11-18 13:42   ` Andreas Dilger
2022-11-07 12:21 ` [RFCv1 12/72] libext2fs: dupfs: Add fs clone & merge api Ritesh Harjani (IBM)
2022-11-18 19:46   ` Andreas Dilger
2022-11-19  5:02     ` Ritesh Harjani (IBM)
2022-11-07 12:21 ` [RFCv1 13/72] tst_badblocks: Add unit test to verify badblocks list " Ritesh Harjani (IBM)
2022-12-12 20:35   ` Andreas Dilger
2022-11-07 12:21 ` [RFCv1 14/72] tst_bitmaps_standalone: Add copy and merge bitmaps test Ritesh Harjani (IBM)
2022-12-12 20:40   ` Andreas Dilger
2022-12-14  5:12     ` Ritesh Harjani (IBM)
2022-11-07 12:21 ` [RFCv1 15/72] tst_bitmaps_pthread: Add merge bitmaps test using pthreads Ritesh Harjani (IBM)
2022-12-14 21:15   ` Andreas Dilger
2022-11-07 12:21 ` [RFCv1 16/72] tst_libext2fs_pthread: Add libext2fs merge/clone unit tests Ritesh Harjani (IBM)
2022-12-14 21:17   ` Andreas Dilger
2022-11-07 12:21 ` [RFCv1 17/72] libext2fs: Add support for ext2fs_test_block_bitmap_range2_valid() Ritesh Harjani (IBM)
2022-12-14 21:21   ` Andreas Dilger
2022-11-07 12:21 ` [RFCv1 18/72] libext2fs: Add support to get average group count Ritesh Harjani (IBM)
2022-12-14 21:24   ` Andreas Dilger
2022-11-07 12:21 ` [RFCv1 19/72] libext2fs: Misc fixes for struct_ext2_filsys Ritesh Harjani (IBM)
2022-12-14 21:22   ` Andreas Dilger
2022-11-07 12:21 ` [RFCv1 20/72] libext2fs: avoid too much memory allocation in case fs_num_threads Ritesh Harjani (IBM)
2022-11-18 13:37   ` Andreas Dilger
2022-11-07 12:21 ` [RFCv1 21/72] e2fsck: add -m option for multithread Ritesh Harjani (IBM)
2022-12-14 21:32   ` Andreas Dilger
2022-11-07 12:21 ` [RFCv1 22/72] e2fsck: copy context when using multi-thread fsck Ritesh Harjani (IBM)
2022-11-07 12:21 ` [RFCv1 23/72] e2fsck: create logs for multi-threads Ritesh Harjani (IBM)
2022-11-07 12:21 ` [RFCv1 24/72] e2fsck: configure one pfsck thread Ritesh Harjani (IBM)
2022-11-07 12:21 ` [RFCv1 25/72] e2fsck: Add e2fsck_pass1_thread_join return value Ritesh Harjani (IBM)
2022-11-07 12:21 ` [RFCv1 26/72] e2fsck: Use merge/clone apis of libext2fs Ritesh Harjani (IBM)
2022-11-07 12:21 ` [RFCv1 27/72] e2fsck: Add e2fsck_pass1_merge_bitmap() api Ritesh Harjani (IBM)
2022-11-07 12:21 ` [RFCv1 28/72] e2fsck: Add asserts in open_channel_fs Ritesh Harjani (IBM)
2022-11-07 12:21 ` [RFCv1 29/72] e2fsck: add start/end group for thread Ritesh Harjani (IBM)
2022-11-07 12:21 ` [RFCv1 30/72] e2fsck: split groups to different threads Ritesh Harjani (IBM)
2022-11-07 12:21 ` [RFCv1 31/72] e2fsck: print thread log properly Ritesh Harjani (IBM)
2022-11-07 12:21 ` [RFCv1 32/72] e2fsck: do not change global variables Ritesh Harjani (IBM)
2022-11-07 12:21 ` [RFCv1 33/72] e2fsck: optimize the inserting of dir_info_db Ritesh Harjani (IBM)
2022-11-07 12:21 ` [RFCv1 34/72] e2fsck: merge dir_info after thread finishes Ritesh Harjani (IBM)
2022-11-07 12:21 ` [RFCv1 35/72] e2fsck: rbtree bitmap for dir Ritesh Harjani (IBM)
2022-11-07 12:21 ` [RFCv1 36/72] e2fsck: merge icounts after thread finishes Ritesh Harjani (IBM)
2022-11-07 12:21 ` [RFCv1 37/72] e2fsck: add debug codes for multiple threads Ritesh Harjani (IBM)
2022-11-07 12:21 ` [RFCv1 38/72] e2fsck: merge counts after threads finish Ritesh Harjani (IBM)
2022-11-07 12:21 ` [RFCv1 39/72] e2fsck: merge dx_dir_info " Ritesh Harjani (IBM)
2022-11-07 12:21 ` [RFCv1 40/72] e2fsck: merge dirs_to_hash when " Ritesh Harjani (IBM)
2022-11-07 12:21 ` [RFCv1 41/72] e2fsck: merge context flags properly Ritesh Harjani (IBM)
2022-11-07 12:21 ` [RFCv1 42/72] e2fsck: merge quota context after threads finish Ritesh Harjani (IBM)
2022-11-07 12:21 ` [RFCv1 43/72] e2fsck: serialize fix operations Ritesh Harjani (IBM)
2022-11-07 12:21 ` [RFCv1 44/72] e2fsck: move some fixes out of parallel pthreads Ritesh Harjani (IBM)
2022-11-07 12:21 ` [RFCv1 45/72] e2fsck: split and merge invalid bitmaps Ritesh Harjani (IBM)
2022-11-07 12:21 ` [RFCv1 46/72] e2fsck: merge EA blocks properly Ritesh Harjani (IBM)
2022-11-07 12:21 ` [RFCv1 47/72] e2fsck: kickoff mutex lock for block found map Ritesh Harjani (IBM)
2022-11-07 12:21 ` [RFCv1 48/72] e2fsck: allow admin specify number of threads Ritesh Harjani (IBM)
2022-11-07 12:21 ` [RFCv1 49/72] e2fsck: adjust " Ritesh Harjani (IBM)
2022-11-07 12:21 ` [RFCv1 50/72] e2fsck: fix readahead for pfsck of pass1 Ritesh Harjani (IBM)
2022-11-07 12:21 ` [RFCv1 51/72] e2fsck: merge options after threads finish Ritesh Harjani (IBM)
2022-11-07 12:21 ` [RFCv1 52/72] e2fsck: reset lost_and_found " Ritesh Harjani (IBM)
2022-11-07 12:21 ` [RFCv1 53/72] e2fsck: merge extent depth count " Ritesh Harjani (IBM)
2022-11-07 12:21 ` [RFCv1 54/72] e2fsck: simplify e2fsck context merging codes Ritesh Harjani (IBM)
2022-11-07 12:21 ` [RFCv1 55/72] e2fsck: set E2F_FLAG_ALLOC_OK after threads Ritesh Harjani (IBM)
2022-11-07 12:21 ` Ritesh Harjani (IBM) [this message]
2022-11-07 12:21 ` [RFCv1 57/72] e2fsck: cleanup e2fsck_pass1_thread_join() Ritesh Harjani (IBM)
2022-11-07 12:21 ` [RFCv1 58/72] e2fsck: make default smallest RA size to 1M Ritesh Harjani (IBM)
2022-11-07 12:21 ` [RFCv1 59/72] e2fsck: update mmp block in one thread Ritesh Harjani (IBM)
2022-11-07 12:21 ` [RFCv1 60/72] e2fsck: reset @inodes_to_rebuild if restart Ritesh Harjani (IBM)
2022-11-07 12:21 ` [RFCv1 61/72] tests: add pfsck test Ritesh Harjani (IBM)
2022-11-07 12:21 ` [RFCv1 62/72] e2fsck: fix memory leaks with pfsck enabled Ritesh Harjani (IBM)
2022-11-07 12:21 ` [RFCv1 63/72] e2fsck: misc cleanups for pfsck Ritesh Harjani (IBM)
2022-11-07 12:21 ` [RFCv1 64/72] e2fsck: propagate number of threads Ritesh Harjani (IBM)
2022-11-07 12:21 ` [RFCv1 65/72] e2fsck: Annotating fields in e2fsck_struct Ritesh Harjani (IBM)
2022-11-07 12:21 ` [RFCv1 66/72] e2fsck: merge casefolded dir lists after thread finish Ritesh Harjani (IBM)
2022-11-07 12:21 ` [RFCv1 67/72] sec: support encrypted files handling in pfsck mode Ritesh Harjani (IBM)
2022-11-07 19:22   ` Eric Biggers
2022-11-07 12:21 ` [RFCv1 68/72] e2fsck: Fix io->align assert check Ritesh Harjani (IBM)
2022-11-07 12:21 ` [RFCv1 69/72] e2fsck: Fix double free of inodes_to_process Ritesh Harjani (IBM)
2022-11-07 12:21 ` [RFCv1 70/72] e2fsck: Fix and simplify update_mmp in case of pfsck Ritesh Harjani (IBM)
2022-11-07 12:21 ` [RFCv1 71/72] e2fsck: Make threads call log_out after pthread_join Ritesh Harjani (IBM)
2022-11-07 12:22 ` [RFCv1 72/72] tests/f_multithread: Fix f_multithread related tests Ritesh Harjani (IBM)
     [not found] ` <B4ED1C86-D3EC-4A0A-97B3-CFCB46617E1A@dilger.ca>
2022-11-19  5:39   ` [RFCv1 00/72] e2fsprogs: Parallel fsck support Ritesh Harjani

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=feeb2dc096e3cf080b91eca836957136f580da3a.1667822611.git.ritesh.list@gmail.com \
    --to=ritesh.list@gmail.com \
    --cc=adilger.kernel@dilger.ca \
    --cc=harshadshirwadkar@gmail.com \
    --cc=linux-ext4@vger.kernel.org \
    --cc=lixi@ddn.com \
    --cc=tytso@mit.edu \
    --cc=wshilong@ddn.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).