linux-lvm.redhat.com archive mirror
 help / color / mirror / Atom feed
* [linux-lvm] [PATCH] lvs: add -o lv_usable
@ 2020-09-05  9:06 Zhao Heming
  2020-09-05  9:08 ` heming.zhao
  2020-09-05  9:17 ` heming.zhao
  0 siblings, 2 replies; 6+ messages in thread
From: Zhao Heming @ 2020-09-05  9:06 UTC (permalink / raw)
  To: linux-lvm; +Cc: teigland, zkabelac, Zhao Heming

report LV is usable for upper layer.

Signed-off-by: Zhao Heming <heming.zhao@suse.com>
---
 lib/activate/activate.h          |   2 +
 lib/activate/dev_manager.c       |  67 ++++++++++++++++
 lib/metadata/metadata-exported.h |   1 +
 lib/metadata/metadata.c          | 130 +++++++++++++++++++++++++++++++
 lib/report/columns.h             |   1 +
 lib/report/properties.c          |   2 +
 lib/report/report.c              |  13 ++++
 lib/report/values.h              |   1 +
 8 files changed, 217 insertions(+)

diff --git a/lib/activate/activate.h b/lib/activate/activate.h
index e3c1bb35e..25de3d6b1 100644
--- a/lib/activate/activate.h
+++ b/lib/activate/activate.h
@@ -255,6 +255,8 @@ struct dev_usable_check_params {
  */
 int device_is_usable(struct device *dev, struct dev_usable_check_params check);
 
+char *lv_mapping_table(const char *dm_table_dev);
+bool dm_has_lvdev(const char *dm_table_dev, const char *lvdev);
 /*
  * Declaration moved here from fs.h to keep header fs.h hidden
  */
diff --git a/lib/activate/dev_manager.c b/lib/activate/dev_manager.c
index a626b000a..c272c20f6 100644
--- a/lib/activate/dev_manager.c
+++ b/lib/activate/dev_manager.c
@@ -778,6 +778,73 @@ int device_is_usable(struct device *dev, struct dev_usable_check_params check)
 	return r;
 }
 
+/*
+ * Return input LV underlying devs
+ * NOTE: Caller should free the return string.
+ */
+char *lv_mapping_table(const char *dm_table_dev)
+{
+	struct dm_task *dmt;
+	uint64_t start, len;
+	char *params, *type = NULL;
+	void *next = NULL;
+	char *ret_str = NULL;
+
+	if (!(dmt = _setup_task_run(DM_DEVICE_TABLE, NULL,
+					dm_table_dev, NULL, NULL, 0, 0, 0, 0, 0))) {
+		log_error("can't get %s device mapping table", dm_table_dev);
+		return NULL;
+	}
+
+	do {
+		next = dm_get_next_target(dmt, next, &start, &len, &type, &params);
+		if (strcmp(type, TARGET_NAME_LINEAR))
+				goto out; /* only support linear type */
+		/* TODO: merge all sub-dm dev into one ret_str */
+		ret_str = strndup(params, strchr(params, ' ') - params);
+		break;
+	} while (next);
+
+out:
+	dm_task_destroy(dmt);
+
+	return ret_str;
+}
+
+/*
+ * To check whether 1st parameter lv underlying devs contains
+ * 2nd parameter lv dev
+ */
+bool dm_has_lvdev(const char *dm_table_dev, const char *lvdev)
+{
+	struct dm_task *dmt;
+	uint64_t start, len;
+	char *params, *type = NULL;
+	void *next = NULL;
+	bool ret = false;
+
+	if (!(dmt = _setup_task_run(DM_DEVICE_TABLE, NULL,
+					dm_table_dev, NULL, NULL, 0, 0, 0, 0, 0))) {
+		log_error("can't get %s device mapping table", dm_table_dev);
+		return false;
+	}
+
+	do {
+		next = dm_get_next_target(dmt, next, &start, &len, &type, &params);
+		if (strcmp(type, TARGET_NAME_LINEAR))
+				goto out; /* only support linear type */
+		if (strstr(params, lvdev)) {
+			ret = true;
+			break;
+		}
+	} while (next);
+
+out:
+	dm_task_destroy(dmt);
+
+	return ret;
+}
+
 /*
  * If active LVs were activated by a version of LVM2 before 2.02.00 we must
  * perform additional checks to find them because they do not have the LVM-
diff --git a/lib/metadata/metadata-exported.h b/lib/metadata/metadata-exported.h
index 670656a0f..620216dc0 100644
--- a/lib/metadata/metadata-exported.h
+++ b/lib/metadata/metadata-exported.h
@@ -1416,5 +1416,6 @@ int lv_extend_integrity_in_raid(struct logical_volume *lv, struct dm_list *pvh);
 int lv_get_raid_integrity_settings(struct logical_volume *lv, struct integrity_settings **isettings);
 int integrity_mode_set(const char *mode, struct integrity_settings *settings);
 int lv_integrity_mismatches(struct cmd_context *cmd, const struct logical_volume *lv, uint64_t *mismatches);
+bool _lv_is_usable(const struct logical_volume *lv, char *dm_dev);
 
 #endif
diff --git a/lib/metadata/metadata.c b/lib/metadata/metadata.c
index 8b8c491c0..03bc399a8 100644
--- a/lib/metadata/metadata.c
+++ b/lib/metadata/metadata.c
@@ -2043,6 +2043,136 @@ static int _lv_mark_if_partial_collect(struct logical_volume *lv, void *data)
 	return 1;
 }
 
+/*
+ * Return LV is still work or not when underlying dev is removed
+ *
+ * RAID:
+ * - raid 0: if there is any disk loose, return false
+ * - raid1,10,4/5,6: below case think as available, return true:
+ *   - raid 1: at least 1 disk live
+ *   - raid 10: loose 1 disk
+ *   - raid 4/5: loose 1 disk
+ *   - raid 6: loose 2 disk
+ *
+ * LINEAR:
+ * - if there is any disk loose, return false
+ *
+ * MIRROR:
+ * - mirror type won't be in 'not available' status, return true directly.
+ * - the failing rule
+ *   - 3-way mirror convert to 2-way mirror
+ *   - 2-way mirror to linear device
+ *
+ * For all other LV type (e.g. thin, cache, integrity, vdo etc):
+ * - return false if there is any disk loose.
+ */
+bool _lv_is_usable(const struct logical_volume *lv, char *dm_dev)
+{
+	int s, missing_pv = 0, exist_pv = 0, un_usable_pv = 0;
+	bool ret = true;
+	struct lv_segment *seg = NULL;
+	struct device *dev;
+	char t_dev[8]; /* strlen(255:255)+1: 8 */
+	char lvname[50];
+	char *lv_dev;
+	struct physical_volume *pv;
+
+	/* see comment, return directly */
+	if (!dm_dev && seg_is_mirror(first_seg(lv))) {
+		ret = true;
+		goto out;
+	}
+
+	dm_list_iterate_items(seg, &lv->segments) {
+		for (s = 0; s < seg->area_count; ++s) {
+			if (seg_type(seg, s) == AREA_LV) {
+				if (seg_lv(seg, s)->status & PARTIAL_LV) {
+					missing_pv++;
+				} else {
+					/* format is right: "vgname" + '-' + "lvname" ?? */
+					snprintf(lvname, 50, "%s-%s", lv->vg->name, seg_lv(seg, s)->name);
+					lv_dev = lv_mapping_table(lvname);
+					if (lv_dev)
+						_lv_is_usable(seg_lv(seg, s), lv_dev) ?
+							exist_pv++ : un_usable_pv++;
+					else
+						missing_pv++;
+				}
+			} else if (seg_type(seg, s) == AREA_PV) {
+				pv = seg_pv(seg, s);
+				snprintf(t_dev, 8, "%d:%d",
+						pv->dev ? (int) MAJOR(pv->dev->dev) : -1,
+						pv->dev ? (int) MINOR(pv->dev->dev) : -1);
+
+				if (dm_dev) { /* call from recursion */
+					ret = strncmp(t_dev, dm_dev, 8) ? false : true;
+					free(dm_dev);
+					return ret;
+				}
+
+				if (!(pv->dev) && is_missing_pv(pv)) {
+					missing_pv++;
+				} else {
+					if (pv->dev) {
+						dev = seg_dev(seg, s);
+						snprintf(t_dev, 8, "%d:%d", (int) MAJOR(dev->dev), (int) MINOR(dev->dev));
+						/* format is right: "vgname" + '-' + "lvname" ?? */
+						snprintf(lvname, 50, "%s-%s", lv->vg->name, lv->name);
+						dm_has_lvdev(lvname, t_dev) ? exist_pv++ : un_usable_pv++;
+					} else
+						missing_pv++;
+				}
+			}
+		}
+	}
+
+	/* make sure recursioin must return from here */
+	if (dm_dev) {
+		ret = (un_usable_pv || missing_pv ) ? false : true;
+		free(dm_dev);
+		return ret;
+	}
+
+	seg = first_seg(lv);
+	if (seg_is_linear(seg)) {
+		ret = (missing_pv || un_usable_pv) ? false : true;
+		goto out;
+	}
+	if (seg_is_any_raid0(seg)) {
+		ret = (missing_pv || un_usable_pv) ? false : true;
+		goto out;
+	}
+	if (seg_is_raid1(seg)) {
+		ret = exist_pv ? true : false;
+		goto out;
+	}
+	if (seg_is_any_raid10(seg)) {
+		ret = ((missing_pv + un_usable_pv) > 1) ? false : true;
+		goto out;
+	}
+	if (seg_is_raid4(seg)) {
+		ret = ((missing_pv + un_usable_pv) > 1) ? false : true;
+		goto out;
+	}
+	if (seg_is_any_raid5(seg)) {
+		ret = ((missing_pv + un_usable_pv) > 1) ? false : true;
+		goto out;
+	}
+	if (seg_is_any_raid6(seg)) {
+		ret = ((missing_pv + un_usable_pv) > 2) ? false : true;
+		goto out;
+	}
+
+	/*
+	 * if code go there, the LV type must be thin, cache, integrity, vdo etc
+	 * return false if there is any disk loose or un_usable.
+	 */
+	ret = (missing_pv || un_usable_pv )? false : true;
+
+out:
+	return ret;
+}
+
 static int _lv_mark_if_partial_single(struct logical_volume *lv, void *data)
 {
 	unsigned s;
diff --git a/lib/report/columns.h b/lib/report/columns.h
index 426a32c50..357c42530 100644
--- a/lib/report/columns.h
+++ b/lib/report/columns.h
@@ -145,6 +145,7 @@ FIELD(LVSSTATUS, lv, STR_LIST, "KCacheSettings", lvid, 18, kernel_cache_settings
 FIELD(LVSSTATUS, lv, STR, "KCachePolicy", lvid, 18, kernel_cache_policy, kernel_cache_policy, "Cache policy used in kernel.", 0)
 FIELD(LVSSTATUS, lv, NUM, "KMFmt", lvid, 0, kernelmetadataformat, kernel_metadata_format, "Cache metadata format used in kernel.", 0)
 FIELD(LVSSTATUS, lv, STR, "Health", lvid, 15, lvhealthstatus, lv_health_status, "LV health status.", 0)
+FIELD(LVSSTATUS, lv, STR, "Usable", lvid, 15, lvusable, lv_usable, "whether lvm believes the uppser layer can successfully do io to the entire LV.", 0)
 FIELD(LVSSTATUS, lv, STR, "KDiscards", lvid, 0, kdiscards, kernel_discards, "For thin pools, how discards are handled in kernel.", 0)
 FIELD(LVSSTATUS, lv, BIN, "CheckNeeded", lvid, 15, lvcheckneeded, lv_check_needed, "For thin pools and cache volumes, whether metadata check is needed.", 0)
 FIELD(LVSSTATUS, lv, BIN, "MergeFailed", lvid, 15, lvmergefailed, lv_merge_failed, "Set if snapshot merge failed.", 0)
diff --git a/lib/report/properties.c b/lib/report/properties.c
index d4ac8c47e..e3d64a5d6 100644
--- a/lib/report/properties.c
+++ b/lib/report/properties.c
@@ -296,6 +296,8 @@ GET_PV_NUM_PROPERTY_FN(pv_ba_size, SECTOR_SIZE * pv->ba_size)
 #define _lv_device_open_get prop_not_implemented_get
 #define _lv_health_status_set prop_not_implemented_set
 #define _lv_health_status_get prop_not_implemented_get
+#define _lv_usable_set prop_not_implemented_set
+#define _lv_usable_get prop_not_implemented_get
 #define _lv_skip_activation_set prop_not_implemented_set
 #define _lv_skip_activation_get prop_not_implemented_get
 #define _lv_check_needed_set prop_not_implemented_set
diff --git a/lib/report/report.c b/lib/report/report.c
index cd7971562..dad7649aa 100644
--- a/lib/report/report.c
+++ b/lib/report/report.c
@@ -3900,6 +3900,19 @@ static int _lvhealthstatus_disp(struct dm_report *rh, struct dm_pool *mem,
 	return _field_string(rh, field, health);
 }
 
+static int _lvusable_disp(struct dm_report *rh, struct dm_pool *mem,
+				struct dm_report_field *field,
+				const void *data, void *private)
+{
+	const struct lv_with_info_and_seg_status *lvdm = (const struct lv_with_info_and_seg_status *) data;
+	const struct logical_volume *lv = lvdm->lv;
+	const char *usable = "";
+
+	usable = _lv_is_usable(lv, NULL) ? "usable" : "not usable";
+
+	return _field_string(rh, field, usable);
+}
+
 static int _lvcheckneeded_disp(struct dm_report *rh, struct dm_pool *mem,
 			       struct dm_report_field *field,
 			       const void *data, void *private)
diff --git a/lib/report/values.h b/lib/report/values.h
index 9b98c229e..53f285db6 100644
--- a/lib/report/values.h
+++ b/lib/report/values.h
@@ -102,6 +102,7 @@ FIELD_RESERVED_VALUE(NAMED | RANGE | FUZZY | DYNAMIC, lv_time_removed, lv_time_r
 FIELD_RESERVED_VALUE(NOFLAG, cache_policy, cache_policy_undef, "", "", "", "undefined")
 FIELD_RESERVED_VALUE(NOFLAG, seg_monitor, seg_monitor_undef, "", "", "", "undefined")
 FIELD_RESERVED_VALUE(NOFLAG, lv_health_status, health_undef, "", "", "", "undefined")
+FIELD_RESERVED_VALUE(NOFLAG, lv_usable, usable_undef, "", "", "", "undefined")
 FIELD_RESERVED_VALUE(NOFLAG, kernel_discards, seg_kernel_discards_undef, "", "", "", "undefined")
 FIELD_RESERVED_VALUE(NOFLAG, vdo_write_policy, vdo_write_policy_undef, "", "", "", "undefined")
 /* TODO the following 2 need STR_LIST support for reserved values
-- 
2.27.0

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [linux-lvm] [PATCH] lvs: add -o lv_usable
  2020-09-05  9:06 [linux-lvm] [PATCH] lvs: add -o lv_usable Zhao Heming
@ 2020-09-05  9:08 ` heming.zhao
  2020-09-07 14:32   ` Zdenek Kabelac
  2020-09-05  9:17 ` heming.zhao
  1 sibling, 1 reply; 6+ messages in thread
From: heming.zhao @ 2020-09-05  9:08 UTC (permalink / raw)
  To: linux-lvm; +Cc: teigland, zkabelac



On 9/5/20 5:06 PM, Zhao Heming wrote:
> report LV is usable for upper layer.
> 
> Signed-off-by: Zhao Heming <heming.zhao@suse.com>
> ---
>   lib/activate/activate.h          |   2 +
>   lib/activate/dev_manager.c       |  67 ++++++++++++++++
>   lib/metadata/metadata-exported.h |   1 +
>   lib/metadata/metadata.c          | 130 +++++++++++++++++++++++++++++++
>   lib/report/columns.h             |   1 +
>   lib/report/properties.c          |   2 +
>   lib/report/report.c              |  13 ++++
>   lib/report/values.h              |   1 +
>   8 files changed, 217 insertions(+)
> 

my test result:

## linear

[tb-clustermd2 lvm2.sourceware.git]# vgcreate vg1 /dev/sdg /dev/sdi
  Physical volume "/dev/sdg" successfully created.
  Physical volume "/dev/sdi" successfully created.
  Volume group "vg1" successfully created
[tb-clustermd2 lvm2.sourceware.git]# lvcreate -l 100%FREE -n lv1 vg1
  Logical volume "lv1" created.
[tb-clustermd2 lvm2.sourceware.git]# lsblk
NAME      MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sdg         8:96   0  100M  0 disk
└─vg1-lv1 254:0    0  192M  0 lvm
sdi         8:128  0  100M  0 disk
└─vg1-lv1 254:0    0  192M  0 lvm
vda       253:0    0   40G  0 disk
├─vda1    253:1    0    8M  0 part
├─vda2    253:2    0   38G  0 part /
└─vda3    253:3    0    2G  0 part [SWAP]

------ remove one disk -----------

[tb-clustermd2 lvm2.sourceware.git]# lsblk
NAME      MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sdg         8:96   0  100M  0 disk
└─vg1-lv1 254:0    0  192M  0 lvm
vda       253:0    0   40G  0 disk
├─vda1    253:1    0    8M  0 part
├─vda2    253:2    0   38G  0 part /
└─vda3    253:3    0    2G  0 part [SWAP]
[tb-clustermd2 lvm2.sourceware.git]# ./tools/lvm lvs -a -o name,devices,lv_usable,lv_health_status
  WARNING: Couldn't find device with uuid zOsVRm-ojxU-ZbfR-cLcT-MhxR-pfMh-eVfC42.
  WARNING: VG vg1 is missing PV zOsVRm-ojxU-ZbfR-cLcT-MhxR-pfMh-eVfC42 (last written to /dev/sdi).
  LV   Devices      Usable          Health
  lv1  /dev/sdg(0)  not usable      partial
  lv1  [unknown](0) not usable      partial
[tb-clustermd2 lvm2.sourceware.git]# ./tools/lvm lvs -o name,devices,lv_usable,lv_health_status
  WARNING: Couldn't find device with uuid zOsVRm-ojxU-ZbfR-cLcT-MhxR-pfMh-eVfC42.
  WARNING: VG vg1 is missing PV zOsVRm-ojxU-ZbfR-cLcT-MhxR-pfMh-eVfC42 (last written to /dev/sdi).
  LV   Devices      Usable          Health
  lv1  /dev/sdg(0)  not usable      partial
  lv1  [unknown](0) not usable      partial

----- re-insert disk, but disk major:minor changed -----

[tb-clustermd2 lvm2.sourceware.git]# lsblk
NAME      MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sdg         8:96   0  100M  0 disk
└─vg1-lv1 254:0    0  192M  0 lvm
sdh         8:112  0  100M  0 disk
vda       253:0    0   40G  0 disk
├─vda1    253:1    0    8M  0 part
├─vda2    253:2    0   38G  0 part /
└─vda3    253:3    0    2G  0 part [SWAP]
[tb-clustermd2 lvm2.sourceware.git]# ./tools/lvm lvs -a -o name,devices,lv_usable,lv_health_status
  LV   Devices     Usable          Health
  lv1  /dev/sdg(0) not usable
  lv1  /dev/sdh(0) not usable
[tb-clustermd2 lvm2.sourceware.git]# ./tools/lvm lvs -o name,devices,lv_usable,lv_health_status
  LV   Devices     Usable          Health
  lv1  /dev/sdg(0) not usable
  lv1  /dev/sdh(0) not usable

## mirror

[tb-clustermd2 lvm2.sourceware.git]# lvcreate --type mirror -l 100%FREE -n mirrorlv vg1
  Logical volume "mirrorlv" created.
[tb-clustermd2 lvm2.sourceware.git]# lsblk
NAME                    MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sdg                       8:96   0  100M  0 disk
└─vg1-mirrorlv_mimage_0 254:1    0   92M  0 lvm
  └─vg1-mirrorlv        254:3    0   92M  0 lvm
sdh                       8:112  0  100M  0 disk
├─vg1-mirrorlv_mlog     254:0    0    4M  0 lvm
│ └─vg1-mirrorlv        254:3    0   92M  0 lvm
└─vg1-mirrorlv_mimage_1 254:2    0   92M  0 lvm
  └─vg1-mirrorlv        254:3    0   92M  0 lvm
vda                     253:0    0   40G  0 disk
├─vda1                  253:1    0    8M  0 part
├─vda2                  253:2    0   38G  0 part /
└─vda3                  253:3    0    2G  0 part [SWAP]
[tb-clustermd2 lvm2.sourceware.git]# ./tools/lvm lvs -a -o name,devices,lv_usable,lv_health_status
  LV                  Devices                                   Usable          Health
  mirrorlv            mirrorlv_mimage_0(0),mirrorlv_mimage_1(0) usable
  [mirrorlv_mimage_0] /dev/sdg(0)                               usable
  [mirrorlv_mimage_1] /dev/sdh(0)                               usable
  [mirrorlv_mlog]     /dev/sdh(23)                              usable
[tb-clustermd2 lvm2.sourceware.git]# ./tools/lvm lvs -o name,devices,lv_usable,lv_health_status
  LV       Devices                                   Usable          Health
  mirrorlv mirrorlv_mimage_0(0),mirrorlv_mimage_1(0) usable

---- removed one disk -----

[tb-clustermd2 lvm2.sourceware.git]# ./tools/lvm lvs -a -o name,devices,lv_usable,lv_health_status
  WARNING: Couldn't find device with uuid a76Yn3-AaJE-Hv1e-J7Y6-6LeO-W7qG-wB7Sut.
  WARNING: VG vg1 is missing PV a76Yn3-AaJE-Hv1e-J7Y6-6LeO-W7qG-wB7Sut (last written to /dev/sdg).
  LV                  Devices                                   Usable          Health
  mirrorlv            mirrorlv_mimage_0(0),mirrorlv_mimage_1(0) usable          partial
  [mirrorlv_mimage_0] [unknown](0)                              not usable      partial
  [mirrorlv_mimage_1] /dev/sdh(0)                               usable
  [mirrorlv_mlog]     /dev/sdh(23)                              usable
[tb-clustermd2 lvm2.sourceware.git]# ./tools/lvm lvs -o name,devices,lv_usable,lv_health_status
  WARNING: Couldn't find device with uuid a76Yn3-AaJE-Hv1e-J7Y6-6LeO-W7qG-wB7Sut.
  WARNING: VG vg1 is missing PV a76Yn3-AaJE-Hv1e-J7Y6-6LeO-W7qG-wB7Sut (last written to /dev/sdg).
  LV       Devices                                   Usable          Health
  mirrorlv mirrorlv_mimage_0(0),mirrorlv_mimage_1(0) usable          partial

 **** issue io on mirrorlv, mirrorlv will switch to linear lv *****

[tb-clustermd2 lvm2.sourceware.git]# mkfs.ext4 /dev/vg1/mirrorlv
mke2fs 1.45.6 (20-Mar-2020)
Discarding device blocks: done
Creating filesystem with 94208 1k blocks and 23616 inodes
Filesystem UUID: 5a661fc3-8f2a-4c34-86ed-8413aa0ce03c
Superblock backups stored on blocks:
        8193, 24577, 40961, 57345, 73729

Allocating group tables: done
Writing inode tables: done
Creating journal (4096 blocks): done
Writing superblocks and filesystem accounting information: done

[tb-clustermd2 lvm2.sourceware.git]# lsblk
NAME           MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sdh              8:112  0  100M  0 disk
└─vg1-mirrorlv 254:3    0   92M  0 lvm
vda            253:0    0   40G  0 disk
├─vda1         253:1    0    8M  0 part
├─vda2         253:2    0   38G  0 part /
└─vda3         253:3    0    2G  0 part [SWAP]

[tb-clustermd2 lvm2.sourceware.git]# ./tools/lvm lvs -a -o name,devices,lv_usable,lv_health_status
  WARNING: Couldn't find device with uuid a76Yn3-AaJE-Hv1e-J7Y6-6LeO-W7qG-wB7Sut.
  WARNING: VG vg1 is missing PV a76Yn3-AaJE-Hv1e-J7Y6-6LeO-W7qG-wB7Sut (last written to [unknown]).
  LV       Devices     Usable          Health
  mirrorlv /dev/sdh(0) usable
[tb-clustermd2 lvm2.sourceware.git]# ./tools/lvm lvs -o name,devices,lv_usable,lv_health_status
  WARNING: Couldn't find device with uuid a76Yn3-AaJE-Hv1e-J7Y6-6LeO-W7qG-wB7Sut.
  WARNING: VG vg1 is missing PV a76Yn3-AaJE-Hv1e-J7Y6-6LeO-W7qG-wB7Sut (last written to [unknown]).
  LV       Devices     Usable          Health
  mirrorlv /dev/sdh(0) usable


---- re-insert disk, but this time mirrorlv had been changed to linear lv ----

[tb-clustermd2 lvm2.sourceware.git]# lsblk
NAME           MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sdg              8:96   0  100M  0 disk
sdh              8:112  0  100M  0 disk
└─vg1-mirrorlv 254:3    0   92M  0 lvm
vda            253:0    0   40G  0 disk
├─vda1         253:1    0    8M  0 part
├─vda2         253:2    0   38G  0 part /
└─vda3         253:3    0    2G  0 part [SWAP]



## raid0

[tb-clustermd2 lvm2.sourceware.git]# lvcreate --type raid0 -l 100%FREE -n raid0lv vg1
  Using default stripesize 64.00 KiB.
  Logical volume "raid0lv" created.
[tb-clustermd2 lvm2.sourceware.git]# lsblk
NAME                   MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sdg                      8:96   0  100M  0 disk
└─vg1-raid0lv_rimage_0 254:0    0   96M  0 lvm
  └─vg1-raid0lv        254:2    0  192M  0 lvm
sdh                      8:112  0  100M  0 disk
└─vg1-raid0lv_rimage_1 254:1    0   96M  0 lvm
  └─vg1-raid0lv        254:2    0  192M  0 lvm
vda                    253:0    0   40G  0 disk
├─vda1                 253:1    0    8M  0 part
├─vda2                 253:2    0   38G  0 part /
└─vda3                 253:3    0    2G  0 part [SWAP]

[tb-clustermd2 lvm2.sourceware.git]# ./tools/lvm lvs -a -o name,devices,lv_usable,lv_health_status
  LV                 Devices                                 Usable          Health
  raid0lv            raid0lv_rimage_0(0),raid0lv_rimage_1(0) usable
  [raid0lv_rimage_0] /dev/sdg(0)                             usable
  [raid0lv_rimage_1] /dev/sdh(0)                             usable
[tb-clustermd2 lvm2.sourceware.git]# ./tools/lvm lvs -o name,devices,lv_usable,lv_health_status
  LV      Devices                                 Usable          Health
  raid0lv raid0lv_rimage_0(0),raid0lv_rimage_1(0) usable

---- removed one disk -----

[tb-clustermd2 lvm2.sourceware.git]# lsblk
NAME                   MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sdg                      8:96   0  100M  0 disk
└─vg1-raid0lv_rimage_0 254:0    0   96M  0 lvm
  └─vg1-raid0lv        254:2    0  192M  0 lvm
vda                    253:0    0   40G  0 disk
├─vda1                 253:1    0    8M  0 part
├─vda2                 253:2    0   38G  0 part /
└─vda3                 253:3    0    2G  0 part [SWAP]
[tb-clustermd2 lvm2.sourceware.git]# ./tools/lvm lvs -a -o name,devices,lv_usable,lv_health_status
  WARNING: Couldn't find device with uuid lRcHCD-PFYu-XCWZ-GHCM-TFGI-55Mi-5MUzG3.
  WARNING: VG vg1 is missing PV lRcHCD-PFYu-XCWZ-GHCM-TFGI-55Mi-5MUzG3 (last written to /dev/sdh).
  LV                 Devices                                 Usable          Health
  raid0lv            raid0lv_rimage_0(0),raid0lv_rimage_1(0) not usable      partial
  [raid0lv_rimage_0] /dev/sdg(0)                             usable
  [raid0lv_rimage_1] [unknown](0)                            not usable      partial
[tb-clustermd2 lvm2.sourceware.git]# ./tools/lvm lvs -o name,devices,lv_usable,lv_health_status
  WARNING: Couldn't find device with uuid lRcHCD-PFYu-XCWZ-GHCM-TFGI-55Mi-5MUzG3.
  WARNING: VG vg1 is missing PV lRcHCD-PFYu-XCWZ-GHCM-TFGI-55Mi-5MUzG3 (last written to /dev/sdh).
  LV      Devices                                 Usable          Health
  raid0lv raid0lv_rimage_0(0),raid0lv_rimage_1(0) not usable      partial

---- re-insert disk, but major:minor changed ----

[tb-clustermd2 lvm2.sourceware.git]# lsblk
NAME                   MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sdg                      8:96   0  100M  0 disk
└─vg1-raid0lv_rimage_0 254:0    0   96M  0 lvm
  └─vg1-raid0lv        254:2    0  192M  0 lvm
sdi                      8:128  0  100M  0 disk
vda                    253:0    0   40G  0 disk
├─vda1                 253:1    0    8M  0 part
├─vda2                 253:2    0   38G  0 part /
└─vda3                 253:3    0    2G  0 part [SWAP]
[tb-clustermd2 lvm2.sourceware.git]# ./tools/lvm lvs -a -o name,devices,lv_usable,lv_health_status
  LV                 Devices                                 Usable          Health
  raid0lv            raid0lv_rimage_0(0),raid0lv_rimage_1(0) not usable
  [raid0lv_rimage_0] /dev/sdg(0)                             usable
  [raid0lv_rimage_1] /dev/sdi(0)                             not usable
[tb-clustermd2 lvm2.sourceware.git]# ./tools/lvm lvs -o name,devices,lv_usable,lv_health_status
  LV      Devices                                 Usable          Health
  raid0lv raid0lv_rimage_0(0),raid0lv_rimage_1(0) not usable

## raid1

[tb-clustermd2 lvm2.sourceware.git]# lvcreate --type raid1 -l 100%FREE -n raid1lv vg1
  Logical volume "raid1lv" created.
[tb-clustermd2 lvm2.sourceware.git]# lsblk
NAME                   MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sdg                      8:96   0  100M  0 disk
├─vg1-raid1lv_rmeta_0  254:0    0    4M  0 lvm
│ └─vg1-raid1lv        254:4    0   92M  0 lvm
└─vg1-raid1lv_rimage_0 254:1    0   92M  0 lvm
  └─vg1-raid1lv        254:4    0   92M  0 lvm
sdi                      8:128  0  100M  0 disk
├─vg1-raid1lv_rmeta_1  254:2    0    4M  0 lvm
│ └─vg1-raid1lv        254:4    0   92M  0 lvm
└─vg1-raid1lv_rimage_1 254:3    0   92M  0 lvm
  └─vg1-raid1lv        254:4    0   92M  0 lvm
vda                    253:0    0   40G  0 disk
├─vda1                 253:1    0    8M  0 part
├─vda2                 253:2    0   38G  0 part /
└─vda3                 253:3    0    2G  0 part [SWAP]
[tb-clustermd2 lvm2.sourceware.git]# ./tools/lvm lvs -a -o name,devices,lv_usable,lv_health_status
  LV                 Devices                                 Usable          Health
  raid1lv            raid1lv_rimage_0(0),raid1lv_rimage_1(0) usable
  [raid1lv_rimage_0] /dev/sdg(1)                             usable
  [raid1lv_rimage_1] /dev/sdi(1)                             usable
  [raid1lv_rmeta_0]  /dev/sdg(0)                             usable
  [raid1lv_rmeta_1]  /dev/sdi(0)                             usable
[tb-clustermd2 lvm2.sourceware.git]# ./tools/lvm lvs -o name,devices,lv_usable,lv_health_status
  LV      Devices                                 Usable          Health
  raid1lv raid1lv_rimage_0(0),raid1lv_rimage_1(0) usable


---- removed one disk -----

[tb-clustermd2 lvm2.sourceware.git]# lsblk
NAME                   MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sdg                      8:96   0  100M  0 disk
├─vg1-raid1lv_rmeta_0  254:0    0    4M  0 lvm
│ └─vg1-raid1lv        254:4    0   92M  0 lvm
└─vg1-raid1lv_rimage_0 254:1    0   92M  0 lvm
  └─vg1-raid1lv        254:4    0   92M  0 lvm
vda                    253:0    0   40G  0 disk
├─vda1                 253:1    0    8M  0 part
├─vda2                 253:2    0   38G  0 part /
└─vda3                 253:3    0    2G  0 part [SWAP]
[tb-clustermd2 lvm2.sourceware.git]# ./tools/lvm lvs -a -o name,devices,lv_usable,lv_health_status
  WARNING: Couldn't find device with uuid dLd1d2-Tpt8-iusf-N1lh-R1W6-JOW3-EgOJfA.
  WARNING: VG vg1 is missing PV dLd1d2-Tpt8-iusf-N1lh-R1W6-JOW3-EgOJfA (last written to /dev/sdi).
  LV                 Devices                                 Usable          Health
  raid1lv            raid1lv_rimage_0(0),raid1lv_rimage_1(0) usable          partial
  [raid1lv_rimage_0] /dev/sdg(1)                             usable
  [raid1lv_rimage_1] [unknown](1)                            not usable      partial
  [raid1lv_rmeta_0]  /dev/sdg(0)                             usable
  [raid1lv_rmeta_1]  [unknown](0)                            not usable      partial
[tb-clustermd2 lvm2.sourceware.git]# ./tools/lvm lvs -o name,devices,lv_usable,lv_health_status
  WARNING: Couldn't find device with uuid dLd1d2-Tpt8-iusf-N1lh-R1W6-JOW3-EgOJfA.
  WARNING: VG vg1 is missing PV dLd1d2-Tpt8-iusf-N1lh-R1W6-JOW3-EgOJfA (last written to /dev/sdi).
  LV      Devices                                 Usable          Health
  raid1lv raid1lv_rimage_0(0),raid1lv_rimage_1(0) usable          partial


---- re-insert disk, but disk major:minor changed ----

[tb-clustermd2 lvm2.sourceware.git]# lsblk
NAME                   MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sdg                      8:96   0  100M  0 disk
├─vg1-raid1lv_rmeta_0  254:0    0    4M  0 lvm
│ └─vg1-raid1lv        254:4    0   92M  0 lvm
└─vg1-raid1lv_rimage_0 254:1    0   92M  0 lvm
  └─vg1-raid1lv        254:4    0   92M  0 lvm
sdh                      8:112  0  100M  0 disk
vda                    253:0    0   40G  0 disk
├─vda1                 253:1    0    8M  0 part
├─vda2                 253:2    0   38G  0 part /
└─vda3                 253:3    0    2G  0 part [SWAP]
[tb-clustermd2 lvm2.sourceware.git]# ./tools/lvm lvs -a -o name,devices,lv_usable,lv_health_status
  LV                 Devices                                 Usable          Health
  raid1lv            raid1lv_rimage_0(0),raid1lv_rimage_1(0) usable
  [raid1lv_rimage_0] /dev/sdg(1)                             usable
  [raid1lv_rimage_1] /dev/sdh(1)                             not usable
  [raid1lv_rmeta_0]  /dev/sdg(0)                             usable
  [raid1lv_rmeta_1]  /dev/sdh(0)                             not usable
[tb-clustermd2 lvm2.sourceware.git]# ./tools/lvm lvs -o name,devices,lv_usable,lv_health_status
  LV      Devices                                 Usable          Health
  raid1lv raid1lv_rimage_0(0),raid1lv_rimage_1(0) usable


## raid10

[tb-clustermd2 lvm2.sourceware.git]# lvcreate --type raid10 -l 100%FREE -n raid10lv vg1
  Using default stripesize 64.00 KiB.
  Logical volume "raid10lv" created.
[tb-clustermd2 lvm2.sourceware.git]# lsblk
NAME                    MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT

sdg                       8:96   0  100M  0 disk
├─vg1-raid10lv_rmeta_0  254:0    0    4M  0 lvm
│ └─vg1-raid10lv        254:8    0  184M  0 lvm
└─vg1-raid10lv_rimage_0 254:1    0   92M  0 lvm
  └─vg1-raid10lv        254:8    0  184M  0 lvm
sdh                       8:112  0  100M  0 disk
├─vg1-raid10lv_rmeta_1  254:2    0    4M  0 lvm
│ └─vg1-raid10lv        254:8    0  184M  0 lvm
└─vg1-raid10lv_rimage_1 254:3    0   92M  0 lvm
  └─vg1-raid10lv        254:8    0  184M  0 lvm
sdi                       8:128  0  100M  0 disk
├─vg1-raid10lv_rmeta_2  254:4    0    4M  0 lvm
│ └─vg1-raid10lv        254:8    0  184M  0 lvm
└─vg1-raid10lv_rimage_2 254:5    0   92M  0 lvm
  └─vg1-raid10lv        254:8    0  184M  0 lvm
sdj                       8:144  0  100M  0 disk
├─vg1-raid10lv_rmeta_3  254:6    0    4M  0 lvm
│ └─vg1-raid10lv        254:8    0  184M  0 lvm
└─vg1-raid10lv_rimage_3 254:7    0   92M  0 lvm
  └─vg1-raid10lv        254:8    0  184M  0 lvm
vda                     253:0    0   40G  0 disk
├─vda1                  253:1    0    8M  0 part
├─vda2                  253:2    0   38G  0 part /
└─vda3                  253:3    0    2G  0 part [SWAP]

[tb-clustermd2 lvm2.sourceware.git]# ./tools/lvm lvs -a -o name,devices,lv_usable,lv_health_status
  LV                  Devices                                                                             Usable          Health
  raid10lv            raid10lv_rimage_0(0),raid10lv_rimage_1(0),raid10lv_rimage_2(0),raid10lv_rimage_3(0) usable
  [raid10lv_rimage_0] /dev/sdg(1)                                                                         usable
  [raid10lv_rimage_1] /dev/sdh(1)                                                                         usable
  [raid10lv_rimage_2] /dev/sdi(1)                                                                         usable
  [raid10lv_rimage_3] /dev/sdj(1)                                                                         usable
  [raid10lv_rmeta_0]  /dev/sdg(0)                                                                         usable
  [raid10lv_rmeta_1]  /dev/sdh(0)                                                                         usable
  [raid10lv_rmeta_2]  /dev/sdi(0)                                                                         usable
  [raid10lv_rmeta_3]  /dev/sdj(0)                                                                         usable
[tb-clustermd2 lvm2.sourceware.git]# ./tools/lvm lvs -o name,devices,lv_usable,lv_health_status
  LV       Devices                                                                             Usable          Health
  raid10lv raid10lv_rimage_0(0),raid10lv_rimage_1(0),raid10lv_rimage_2(0),raid10lv_rimage_3(0) usable

---- removed one disk -----

[tb-clustermd2 lvm2.sourceware.git]# lsblk
NAME                    MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sda                       8:0    0  300M  0 disk
sdb                       8:16   0  300M  0 disk
sdc                       8:32   0  300M  0 disk
sdd                       8:48   0  300M  0 disk
sde                       8:64   0  300M  0 disk
sdf                       8:80   0  300M  0 disk
sdg                       8:96   0  100M  0 disk
├─vg1-raid10lv_rmeta_0  254:0    0    4M  0 lvm
│ └─vg1-raid10lv        254:8    0  184M  0 lvm
└─vg1-raid10lv_rimage_0 254:1    0   92M  0 lvm
  └─vg1-raid10lv        254:8    0  184M  0 lvm
sdi                       8:128  0  100M  0 disk
├─vg1-raid10lv_rmeta_2  254:4    0    4M  0 lvm
│ └─vg1-raid10lv        254:8    0  184M  0 lvm
└─vg1-raid10lv_rimage_2 254:5    0   92M  0 lvm
  └─vg1-raid10lv        254:8    0  184M  0 lvm
sdj                       8:144  0  100M  0 disk
├─vg1-raid10lv_rmeta_3  254:6    0    4M  0 lvm
│ └─vg1-raid10lv        254:8    0  184M  0 lvm
└─vg1-raid10lv_rimage_3 254:7    0   92M  0 lvm
  └─vg1-raid10lv        254:8    0  184M  0 lvm
vda                     253:0    0   40G  0 disk
├─vda1                  253:1    0    8M  0 part
├─vda2                  253:2    0   38G  0 part /
└─vda3                  253:3    0    2G  0 part [SWAP]
[tb-clustermd2 lvm2.sourceware.git]# ./tools/lvm lvs -a -o name,devices,lv_usable,lv_health_status
  WARNING: Couldn't find device with uuid Nq84Xp-dfEB-Tso1-wOws-X0PT-WxGT-vB3EbQ.
  WARNING: VG vg1 is missing PV Nq84Xp-dfEB-Tso1-wOws-X0PT-WxGT-vB3EbQ (last written to /dev/sdh).
  LV                  Devices                                                                             Usable          Health
  raid10lv            raid10lv_rimage_0(0),raid10lv_rimage_1(0),raid10lv_rimage_2(0),raid10lv_rimage_3(0) usable          partial
  [raid10lv_rimage_0] /dev/sdg(1)                                                                         usable
  [raid10lv_rimage_1] [unknown](1)                                                                        not usable      partial
  [raid10lv_rimage_2] /dev/sdi(1)                                                                         usable
  [raid10lv_rimage_3] /dev/sdj(1)                                                                         usable
  [raid10lv_rmeta_0]  /dev/sdg(0)                                                                         usable
  [raid10lv_rmeta_1]  [unknown](0)                                                                        not usable      partial
  [raid10lv_rmeta_2]  /dev/sdi(0)                                                                         usable
  [raid10lv_rmeta_3]  /dev/sdj(0)                                                                         usable
[tb-clustermd2 lvm2.sourceware.git]# ./tools/lvm lvs -o name,devices,lv_usable,lv_health_status
  WARNING: Couldn't find device with uuid Nq84Xp-dfEB-Tso1-wOws-X0PT-WxGT-vB3EbQ.
  WARNING: VG vg1 is missing PV Nq84Xp-dfEB-Tso1-wOws-X0PT-WxGT-vB3EbQ (last written to /dev/sdh).
  LV       Devices                                                                             Usable          Health
  raid10lv raid10lv_rimage_0(0),raid10lv_rimage_1(0),raid10lv_rimage_2(0),raid10lv_rimage_3(0) usable          partial

---- re-insert disk, but disk major:minor changed ----

[tb-clustermd2 lvm2.sourceware.git]# lsblk
NAME                    MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sdg                       8:96   0  100M  0 disk
├─vg1-raid10lv_rmeta_0  254:0    0    4M  0 lvm
│ └─vg1-raid10lv        254:8    0  184M  0 lvm
└─vg1-raid10lv_rimage_0 254:1    0   92M  0 lvm
  └─vg1-raid10lv        254:8    0  184M  0 lvm
sdi                       8:128  0  100M  0 disk
├─vg1-raid10lv_rmeta_2  254:4    0    4M  0 lvm
│ └─vg1-raid10lv        254:8    0  184M  0 lvm
└─vg1-raid10lv_rimage_2 254:5    0   92M  0 lvm
  └─vg1-raid10lv        254:8    0  184M  0 lvm
sdj                       8:144  0  100M  0 disk
├─vg1-raid10lv_rmeta_3  254:6    0    4M  0 lvm
│ └─vg1-raid10lv        254:8    0  184M  0 lvm
└─vg1-raid10lv_rimage_3 254:7    0   92M  0 lvm
  └─vg1-raid10lv        254:8    0  184M  0 lvm
sdk                       8:160  0  100M  0 disk
vda                     253:0    0   40G  0 disk
├─vda1                  253:1    0    8M  0 part
├─vda2                  253:2    0   38G  0 part /
└─vda3                  253:3    0    2G  0 part [SWAP]
[tb-clustermd2 lvm2.sourceware.git]# ./tools/lvm lvs -a -o name,devices,lv_usable,lv_health_status
  LV                  Devices                                                                             Usable          Health
  raid10lv            raid10lv_rimage_0(0),raid10lv_rimage_1(0),raid10lv_rimage_2(0),raid10lv_rimage_3(0) usable
  [raid10lv_rimage_0] /dev/sdg(1)                                                                         usable
  [raid10lv_rimage_1] /dev/sdk(1)                                                                         not usable
  [raid10lv_rimage_2] /dev/sdi(1)                                                                         usable
  [raid10lv_rimage_3] /dev/sdj(1)                                                                         usable
  [raid10lv_rmeta_0]  /dev/sdg(0)                                                                         usable
  [raid10lv_rmeta_1]  /dev/sdk(0)                                                                         not usable
  [raid10lv_rmeta_2]  /dev/sdi(0)                                                                         usable
  [raid10lv_rmeta_3]  /dev/sdj(0)                                                                         usable
[tb-clustermd2 lvm2.sourceware.git]# ./tools/lvm lvs -o name,devices,lv_usable,lv_health_status
  LV       Devices                                                                             Usable          Health
  raid10lv raid10lv_rimage_0(0),raid10lv_rimage_1(0),raid10lv_rimage_2(0),raid10lv_rimage_3(0) usable

------- removed 2 disks from just created raid10 array ------

[tb-clustermd2 lvm2.sourceware.git]# lvcreate --type raid10 -l 100%FREE -n raid10lv vg1
  Using default stripesize 64.00 KiB.
  Logical volume "raid10lv" created.
[tb-clustermd2 lvm2.sourceware.git]#
[tb-clustermd2 lvm2.sourceware.git]#
[tb-clustermd2 lvm2.sourceware.git]# lsblk
NAME                    MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sdg                       8:96   0  100M  0 disk
├─vg1-raid10lv_rmeta_0  254:0    0    4M  0 lvm
│ └─vg1-raid10lv        254:8    0  184M  0 lvm
└─vg1-raid10lv_rimage_0 254:1    0   92M  0 lvm
  └─vg1-raid10lv        254:8    0  184M  0 lvm
sdi                       8:128  0  100M  0 disk
├─vg1-raid10lv_rmeta_1  254:2    0    4M  0 lvm
│ └─vg1-raid10lv        254:8    0  184M  0 lvm
└─vg1-raid10lv_rimage_1 254:3    0   92M  0 lvm
  └─vg1-raid10lv        254:8    0  184M  0 lvm
sdj                       8:144  0  100M  0 disk
├─vg1-raid10lv_rmeta_2  254:4    0    4M  0 lvm
│ └─vg1-raid10lv        254:8    0  184M  0 lvm
└─vg1-raid10lv_rimage_2 254:5    0   92M  0 lvm
  └─vg1-raid10lv        254:8    0  184M  0 lvm
sdk                       8:160  0  100M  0 disk
├─vg1-raid10lv_rmeta_3  254:6    0    4M  0 lvm
│ └─vg1-raid10lv        254:8    0  184M  0 lvm
└─vg1-raid10lv_rimage_3 254:7    0   92M  0 lvm
  └─vg1-raid10lv        254:8    0  184M  0 lvm
vda                     253:0    0   40G  0 disk
├─vda1                  253:1    0    8M  0 part
├─vda2                  253:2    0   38G  0 part /
└─vda3                  253:3    0    2G  0 part [SWAP]

  ** remove 2 disks **

[tb-clustermd2 lvm2.sourceware.git]# lsblk
NAME                    MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sdi                       8:128  0  100M  0 disk
├─vg1-raid10lv_rmeta_1  254:2    0    4M  0 lvm
│ └─vg1-raid10lv        254:8    0  184M  0 lvm
└─vg1-raid10lv_rimage_1 254:3    0   92M  0 lvm
  └─vg1-raid10lv        254:8    0  184M  0 lvm
sdk                       8:160  0  100M  0 disk
├─vg1-raid10lv_rmeta_3  254:6    0    4M  0 lvm
│ └─vg1-raid10lv        254:8    0  184M  0 lvm
└─vg1-raid10lv_rimage_3 254:7    0   92M  0 lvm
  └─vg1-raid10lv        254:8    0  184M  0 lvm
vda                     253:0    0   40G  0 disk
├─vda1                  253:1    0    8M  0 part
├─vda2                  253:2    0   38G  0 part /
└─vda3                  253:3    0    2G  0 part [SWAP]

[tb-clustermd2 lvm2.sourceware.git]# ./tools/lvm lvs -a -o name,devices,lv_usable,lv_health_status
  WARNING: Couldn't find device with uuid bsIGCe-p0co-Yxlw-hRsw-crms-szAH-HpWkBl.
  WARNING: Couldn't find device with uuid tazWyL-kmWh-Ful2-8h81-knsU-8Smg-H7O8n5.
  WARNING: VG vg1 is missing PV bsIGCe-p0co-Yxlw-hRsw-crms-szAH-HpWkBl (last written to /dev/sdg).
  WARNING: VG vg1 is missing PV tazWyL-kmWh-Ful2-8h81-knsU-8Smg-H7O8n5 (last written to /dev/sdj).
  LV                  Devices                                                                             Usable          Health
  raid10lv            raid10lv_rimage_0(0),raid10lv_rimage_1(0),raid10lv_rimage_2(0),raid10lv_rimage_3(0) not usable      partial
  [raid10lv_rimage_0] [unknown](1)                                                                        not usable      partial
  [raid10lv_rimage_1] /dev/sdi(1)                                                                         usable
  [raid10lv_rimage_2] [unknown](1)                                                                        not usable      partial
  [raid10lv_rimage_3] /dev/sdk(1)                                                                         usable
  [raid10lv_rmeta_0]  [unknown](0)                                                                        not usable      partial
  [raid10lv_rmeta_1]  /dev/sdi(0)                                                                         usable
  [raid10lv_rmeta_2]  [unknown](0)                                                                        not usable      partial
  [raid10lv_rmeta_3]  /dev/sdk(0)                                                                         usable

## raid4

[tb-clustermd2 lvm2.sourceware.git]# vgcreate vg1 /dev/sd{g,h,i}
  Physical volume "/dev/sdg" successfully created.
  Physical volume "/dev/sdh" successfully created.
  Physical volume "/dev/sdi" successfully created.
  Volume group "vg1" successfully created
[tb-clustermd2 lvm2.sourceware.git]# lvcreate --type raid4 -l 100%FREE -n raid4lv vg1
  Using default stripesize 64.00 KiB.
  Logical volume "raid4lv" created.
[tb-clustermd2 lvm2.sourceware.git]# lsblk
NAME                   MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sdg                      8:96   0  100M  0 disk
├─vg1-raid4lv_rmeta_0  254:0    0    4M  0 lvm
│ └─vg1-raid4lv        254:6    0  184M  0 lvm
└─vg1-raid4lv_rimage_0 254:1    0   92M  0 lvm
  └─vg1-raid4lv        254:6    0  184M  0 lvm
sdh                      8:112  0  100M  0 disk
├─vg1-raid4lv_rmeta_1  254:2    0    4M  0 lvm
│ └─vg1-raid4lv        254:6    0  184M  0 lvm
└─vg1-raid4lv_rimage_1 254:3    0   92M  0 lvm
  └─vg1-raid4lv        254:6    0  184M  0 lvm
sdi                      8:128  0  100M  0 disk
├─vg1-raid4lv_rmeta_2  254:4    0    4M  0 lvm
│ └─vg1-raid4lv        254:6    0  184M  0 lvm
└─vg1-raid4lv_rimage_2 254:5    0   92M  0 lvm
  └─vg1-raid4lv        254:6    0  184M  0 lvm
vda                    253:0    0   40G  0 disk
├─vda1                 253:1    0    8M  0 part
├─vda2                 253:2    0   38G  0 part /
└─vda3                 253:3    0    2G  0 part [SWAP]
[tb-clustermd2 lvm2.sourceware.git]# ./tools/lvm lvs -a -o name,devices,lv_usable,lv_health_status
  LV                 Devices                                                     Usable          Health
  raid4lv            raid4lv_rimage_0(0),raid4lv_rimage_1(0),raid4lv_rimage_2(0) usable
  [raid4lv_rimage_0] /dev/sdg(1)                                                 usable
  [raid4lv_rimage_1] /dev/sdh(1)                                                 usable
  [raid4lv_rimage_2] /dev/sdi(1)                                                 usable
  [raid4lv_rmeta_0]  /dev/sdg(0)                                                 usable
  [raid4lv_rmeta_1]  /dev/sdh(0)                                                 usable
  [raid4lv_rmeta_2]  /dev/sdi(0)                                                 usable
[tb-clustermd2 lvm2.sourceware.git]# ./tools/lvm lvs -o name,devices,lv_usable,lv_health_status
  LV      Devices                                                     Usable          Health
  raid4lv raid4lv_rimage_0(0),raid4lv_rimage_1(0),raid4lv_rimage_2(0) usable

---- removed one disk -----

[tb-clustermd2 lvm2.sourceware.git]# lsblk
NAME                   MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sda                      8:0    0  300M  0 disk
sdb                      8:16   0  300M  0 disk
sdc                      8:32   0  300M  0 disk
sdd                      8:48   0  300M  0 disk
sde                      8:64   0  300M  0 disk
sdf                      8:80   0  300M  0 disk
sdg                      8:96   0  100M  0 disk
├─vg1-raid4lv_rmeta_0  254:0    0    4M  0 lvm
│ └─vg1-raid4lv        254:6    0  184M  0 lvm
└─vg1-raid4lv_rimage_0 254:1    0   92M  0 lvm
  └─vg1-raid4lv        254:6    0  184M  0 lvm
sdi                      8:128  0  100M  0 disk
├─vg1-raid4lv_rmeta_2  254:4    0    4M  0 lvm
│ └─vg1-raid4lv        254:6    0  184M  0 lvm
└─vg1-raid4lv_rimage_2 254:5    0   92M  0 lvm
  └─vg1-raid4lv        254:6    0  184M  0 lvm
vda                    253:0    0   40G  0 disk
├─vda1                 253:1    0    8M  0 part
├─vda2                 253:2    0   38G  0 part /
└─vda3                 253:3    0    2G  0 part [SWAP]
[tb-clustermd2 lvm2.sourceware.git]# ./tools/lvm lvs -a -o name,devices,lv_usable,lv_health_status
  WARNING: Couldn't find device with uuid F2lYBQ-5w7f-HS03-6V8N-ZdNe-7vpc-19wkuY.
  WARNING: VG vg1 is missing PV F2lYBQ-5w7f-HS03-6V8N-ZdNe-7vpc-19wkuY (last written to /dev/sdh).
  LV                 Devices                                                     Usable          Health
  raid4lv            raid4lv_rimage_0(0),raid4lv_rimage_1(0),raid4lv_rimage_2(0) usable          partial
  [raid4lv_rimage_0] /dev/sdg(1)                                                 usable
  [raid4lv_rimage_1] [unknown](1)                                                not usable      partial
  [raid4lv_rimage_2] /dev/sdi(1)                                                 usable
  [raid4lv_rmeta_0]  /dev/sdg(0)                                                 usable
  [raid4lv_rmeta_1]  [unknown](0)                                                not usable      partial
  [raid4lv_rmeta_2]  /dev/sdi(0)                                                 usable
[tb-clustermd2 lvm2.sourceware.git]# ./tools/lvm lvs -o name,devices,lv_usable,lv_health_status
  WARNING: Couldn't find device with uuid F2lYBQ-5w7f-HS03-6V8N-ZdNe-7vpc-19wkuY.
  WARNING: VG vg1 is missing PV F2lYBQ-5w7f-HS03-6V8N-ZdNe-7vpc-19wkuY (last written to /dev/sdh).
  LV      Devices                                                     Usable          Health
  raid4lv raid4lv_rimage_0(0),raid4lv_rimage_1(0),raid4lv_rimage_2(0) usable          partial



---- re-insert disk, but disk major:minor changed ----

[tb-clustermd2 lvm2.sourceware.git]# lsblk
NAME                   MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sda                      8:0    0  300M  0 disk
sdb                      8:16   0  300M  0 disk
sdc                      8:32   0  300M  0 disk
sdd                      8:48   0  300M  0 disk
sde                      8:64   0  300M  0 disk
sdf                      8:80   0  300M  0 disk
sdg                      8:96   0  100M  0 disk
├─vg1-raid4lv_rmeta_0  254:0    0    4M  0 lvm
│ └─vg1-raid4lv        254:6    0  184M  0 lvm
└─vg1-raid4lv_rimage_0 254:1    0   92M  0 lvm
  └─vg1-raid4lv        254:6    0  184M  0 lvm
sdi                      8:128  0  100M  0 disk
├─vg1-raid4lv_rmeta_2  254:4    0    4M  0 lvm
│ └─vg1-raid4lv        254:6    0  184M  0 lvm
└─vg1-raid4lv_rimage_2 254:5    0   92M  0 lvm
  └─vg1-raid4lv        254:6    0  184M  0 lvm
sdj                      8:144  0  100M  0 disk
vda                    253:0    0   40G  0 disk
├─vda1                 253:1    0    8M  0 part
├─vda2                 253:2    0   38G  0 part /
└─vda3                 253:3    0    2G  0 part [SWAP]
[tb-clustermd2 lvm2.sourceware.git]# ./tools/lvm lvs -a -o name,devices,lv_usable,lv_health_status
  LV                 Devices                                                     Usable          Health
  raid4lv            raid4lv_rimage_0(0),raid4lv_rimage_1(0),raid4lv_rimage_2(0) usable
  [raid4lv_rimage_0] /dev/sdg(1)                                                 usable
  [raid4lv_rimage_1] /dev/sdj(1)                                                 not usable
  [raid4lv_rimage_2] /dev/sdi(1)                                                 usable
  [raid4lv_rmeta_0]  /dev/sdg(0)                                                 usable
  [raid4lv_rmeta_1]  /dev/sdj(0)                                                 not usable
  [raid4lv_rmeta_2]  /dev/sdi(0)                                                 usable
[tb-clustermd2 lvm2.sourceware.git]# ./tools/lvm lvs -o name,devices,lv_usable,lv_health_status
  LV      Devices                                                     Usable          Health
  raid4lv raid4lv_rimage_0(0),raid4lv_rimage_1(0),raid4lv_rimage_2(0) usable

  ** removed 2 disks ***
[tb-clustermd2 lvm2.sourceware.git]# vgcreate vg1 /dev/sdg /dev/sdi /dev/sdj
  Physical volume "/dev/sdg" successfully created.
  Physical volume "/dev/sdi" successfully created.
  Physical volume "/dev/sdj" successfully created.
  Volume group "vg1" successfully created
[tb-clustermd2 lvm2.sourceware.git]# lvcreate --type raid4 -l 100%FREE -n raid4lv vg1
  Using default stripesize 64.00 KiB.
  Logical volume "raid4lv" created.
[tb-clustermd2 lvm2.sourceware.git]# lsblk
NAME                   MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sdg                      8:96   0  100M  0 disk
├─vg1-raid4lv_rmeta_0  254:0    0    4M  0 lvm
│ └─vg1-raid4lv        254:6    0  184M  0 lvm
└─vg1-raid4lv_rimage_0 254:1    0   92M  0 lvm
  └─vg1-raid4lv        254:6    0  184M  0 lvm
sdi                      8:128  0  100M  0 disk
├─vg1-raid4lv_rmeta_1  254:2    0    4M  0 lvm
│ └─vg1-raid4lv        254:6    0  184M  0 lvm
└─vg1-raid4lv_rimage_1 254:3    0   92M  0 lvm
  └─vg1-raid4lv        254:6    0  184M  0 lvm
sdj                      8:144  0  100M  0 disk
├─vg1-raid4lv_rmeta_2  254:4    0    4M  0 lvm
│ └─vg1-raid4lv        254:6    0  184M  0 lvm
└─vg1-raid4lv_rimage_2 254:5    0   92M  0 lvm
  └─vg1-raid4lv        254:6    0  184M  0 lvm
vda                    253:0    0   40G  0 disk
├─vda1                 253:1    0    8M  0 part
├─vda2                 253:2    0   38G  0 part /
└─vda3                 253:3    0    2G  0 part [SWAP]

[tb-clustermd2 lvm2.sourceware.git]# lsblk
NAME                   MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sda                      8:0    0  300M  0 disk
sdb                      8:16   0  300M  0 disk
sdc                      8:32   0  300M  0 disk
sdd                      8:48   0  300M  0 disk
sde                      8:64   0  300M  0 disk
sdf                      8:80   0  300M  0 disk
sdg                      8:96   0  100M  0 disk
├─vg1-raid4lv_rmeta_0  254:0    0    4M  0 lvm
│ └─vg1-raid4lv        254:6    0  184M  0 lvm
└─vg1-raid4lv_rimage_0 254:1    0   92M  0 lvm
  └─vg1-raid4lv        254:6    0  184M  0 lvm
vda                    253:0    0   40G  0 disk
├─vda1                 253:1    0    8M  0 part
├─vda2                 253:2    0   38G  0 part /
└─vda3                 253:3    0    2G  0 part [SWAP]
[tb-clustermd2 lvm2.sourceware.git]# ./tools/lvm lvs -a -o name,devices,lv_usable,lv_health_status
  WARNING: Couldn't find device with uuid QVsbZQ-Cy34-HXZO-fYkk-8kH4-N3Vr-katDI2.
  WARNING: Couldn't find device with uuid Dxd3k6-RwBz-OYKG-ZexP-3yqf-iXVw-cqfWvx.
  WARNING: VG vg1 is missing PV QVsbZQ-Cy34-HXZO-fYkk-8kH4-N3Vr-katDI2 (last written to /dev/sdi).
  WARNING: VG vg1 is missing PV Dxd3k6-RwBz-OYKG-ZexP-3yqf-iXVw-cqfWvx (last written to /dev/sdj).
  LV                 Devices                                                     Usable          Health
  raid4lv            raid4lv_rimage_0(0),raid4lv_rimage_1(0),raid4lv_rimage_2(0) not usable      partial
  [raid4lv_rimage_0] /dev/sdg(1)                                                 usable
  [raid4lv_rimage_1] [unknown](1)                                                not usable      partial
  [raid4lv_rimage_2] [unknown](1)                                                not usable      partial
  [raid4lv_rmeta_0]  /dev/sdg(0)                                                 usable
  [raid4lv_rmeta_1]  [unknown](0)                                                not usable      partial
  [raid4lv_rmeta_2]  [unknown](0)                                                not usable      partial
[tb-clustermd2 lvm2.sourceware.git]# ./tools/lvm lvs -o name,devices,lv_usable,lv_health_status
  WARNING: Couldn't find device with uuid QVsbZQ-Cy34-HXZO-fYkk-8kH4-N3Vr-katDI2.
  WARNING: Couldn't find device with uuid Dxd3k6-RwBz-OYKG-ZexP-3yqf-iXVw-cqfWvx.
  WARNING: VG vg1 is missing PV QVsbZQ-Cy34-HXZO-fYkk-8kH4-N3Vr-katDI2 (last written to /dev/sdi).
  WARNING: VG vg1 is missing PV Dxd3k6-RwBz-OYKG-ZexP-3yqf-iXVw-cqfWvx (last written to /dev/sdj).
  LV      Devices                                                     Usable          Health
  raid4lv raid4lv_rimage_0(0),raid4lv_rimage_1(0),raid4lv_rimage_2(0) not usable      partial

## raid5

[tb-clustermd2 lvm2.sourceware.git]# lvcreate --type raid5 -l 100%FREE -n raid5lv vg1
  Using default stripesize 64.00 KiB.
  Logical volume "raid5lv" created.
[tb-clustermd2 lvm2.sourceware.git]# lsblk
NAME                   MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sdg                      8:96   0  100M  0 disk
├─vg1-raid5lv_rmeta_0  254:0    0    4M  0 lvm
│ └─vg1-raid5lv        254:6    0  184M  0 lvm
└─vg1-raid5lv_rimage_0 254:1    0   92M  0 lvm
  └─vg1-raid5lv        254:6    0  184M  0 lvm
sdh                      8:112  0  100M  0 disk
├─vg1-raid5lv_rmeta_1  254:2    0    4M  0 lvm
│ └─vg1-raid5lv        254:6    0  184M  0 lvm
└─vg1-raid5lv_rimage_1 254:3    0   92M  0 lvm
  └─vg1-raid5lv        254:6    0  184M  0 lvm
sdi                      8:128  0  100M  0 disk
├─vg1-raid5lv_rmeta_2  254:4    0    4M  0 lvm
│ └─vg1-raid5lv        254:6    0  184M  0 lvm
└─vg1-raid5lv_rimage_2 254:5    0   92M  0 lvm
  └─vg1-raid5lv        254:6    0  184M  0 lvm
vda                    253:0    0   40G  0 disk
├─vda1                 253:1    0    8M  0 part
├─vda2                 253:2    0   38G  0 part /
└─vda3                 253:3    0    2G  0 part [SWAP]


[tb-clustermd2 lvm2.sourceware.git]# ./tools/lvm lvs -a -o name,devices,lv_usable,lv_health_status
  LV                 Devices                                                     Usable          Health
  raid5lv            raid5lv_rimage_0(0),raid5lv_rimage_1(0),raid5lv_rimage_2(0) usable
  [raid5lv_rimage_0] /dev/sdg(1)                                                 usable
  [raid5lv_rimage_1] /dev/sdh(1)                                                 usable
  [raid5lv_rimage_2] /dev/sdi(1)                                                 usable
  [raid5lv_rmeta_0]  /dev/sdg(0)                                                 usable
  [raid5lv_rmeta_1]  /dev/sdh(0)                                                 usable
  [raid5lv_rmeta_2]  /dev/sdi(0)                                                 usable
[tb-clustermd2 lvm2.sourceware.git]# ./tools/lvm lvs -o name,devices,lv_usable,lv_health_status
  LV      Devices                                                     Usable          Health
  raid5lv raid5lv_rimage_0(0),raid5lv_rimage_1(0),raid5lv_rimage_2(0) usable

---- removed one disk -----

[tb-clustermd2 lvm2.sourceware.git]# lsblk
NAME                   MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sda                      8:0    0  300M  0 disk
sdb                      8:16   0  300M  0 disk
sdc                      8:32   0  300M  0 disk
sdd                      8:48   0  300M  0 disk
sde                      8:64   0  300M  0 disk
sdf                      8:80   0  300M  0 disk
sdg                      8:96   0  100M  0 disk
├─vg1-raid5lv_rmeta_0  254:0    0    4M  0 lvm
│ └─vg1-raid5lv        254:6    0  184M  0 lvm
└─vg1-raid5lv_rimage_0 254:1    0   92M  0 lvm
  └─vg1-raid5lv        254:6    0  184M  0 lvm
sdi                      8:128  0  100M  0 disk
├─vg1-raid5lv_rmeta_2  254:4    0    4M  0 lvm
│ └─vg1-raid5lv        254:6    0  184M  0 lvm
└─vg1-raid5lv_rimage_2 254:5    0   92M  0 lvm
  └─vg1-raid5lv        254:6    0  184M  0 lvm
vda                    253:0    0   40G  0 disk
├─vda1                 253:1    0    8M  0 part
├─vda2                 253:2    0   38G  0 part /
└─vda3                 253:3    0    2G  0 part [SWAP]
[tb-clustermd2 lvm2.sourceware.git]# ./tools/lvm lvs -a -o name,devices,lv_usable,lv_health_status
  WARNING: Couldn't find device with uuid 9WTdNI-YSLs-yWZm-6dnQ-kbLp-cb1n-gKZLCl.
  WARNING: VG vg1 is missing PV 9WTdNI-YSLs-yWZm-6dnQ-kbLp-cb1n-gKZLCl (last written to /dev/sdh).
  LV                 Devices                                                     Usable          Health
  raid5lv            raid5lv_rimage_0(0),raid5lv_rimage_1(0),raid5lv_rimage_2(0) usable          partial
  [raid5lv_rimage_0] /dev/sdg(1)                                                 usable
  [raid5lv_rimage_1] [unknown](1)                                                not usable      partial
  [raid5lv_rimage_2] /dev/sdi(1)                                                 usable
  [raid5lv_rmeta_0]  /dev/sdg(0)                                                 usable
  [raid5lv_rmeta_1]  [unknown](0)                                                not usable      partial
  [raid5lv_rmeta_2]  /dev/sdi(0)                                                 usable
[tb-clustermd2 lvm2.sourceware.git]# ./tools/lvm lvs -o name,devices,lv_usable,lv_health_status
  WARNING: Couldn't find device with uuid 9WTdNI-YSLs-yWZm-6dnQ-kbLp-cb1n-gKZLCl.
  WARNING: VG vg1 is missing PV 9WTdNI-YSLs-yWZm-6dnQ-kbLp-cb1n-gKZLCl (last written to /dev/sdh).
  LV      Devices                                                     Usable          Health
  raid5lv raid5lv_rimage_0(0),raid5lv_rimage_1(0),raid5lv_rimage_2(0) usable          partial

---- re-insert disk, but disk major:minor changed ----

[tb-clustermd2 lvm2.sourceware.git]# lsblk
NAME                   MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sdg                      8:96   0  100M  0 disk
├─vg1-raid5lv_rmeta_0  254:0    0    4M  0 lvm
│ └─vg1-raid5lv        254:6    0  184M  0 lvm
└─vg1-raid5lv_rimage_0 254:1    0   92M  0 lvm
  └─vg1-raid5lv        254:6    0  184M  0 lvm
sdi                      8:128  0  100M  0 disk
├─vg1-raid5lv_rmeta_2  254:4    0    4M  0 lvm
│ └─vg1-raid5lv        254:6    0  184M  0 lvm
└─vg1-raid5lv_rimage_2 254:5    0   92M  0 lvm
  └─vg1-raid5lv        254:6    0  184M  0 lvm
sdj                      8:144  0  100M  0 disk
vda                    253:0    0   40G  0 disk
├─vda1                 253:1    0    8M  0 part
├─vda2                 253:2    0   38G  0 part /
└─vda3                 253:3    0    2G  0 part [SWAP]
[tb-clustermd2 lvm2.sourceware.git]# ./tools/lvm lvs -a -o name,devices,lv_usable,lv_health_status
  LV                 Devices                                                     Usable          Health
  raid5lv            raid5lv_rimage_0(0),raid5lv_rimage_1(0),raid5lv_rimage_2(0) usable
  [raid5lv_rimage_0] /dev/sdg(1)                                                 usable
  [raid5lv_rimage_1] /dev/sdj(1)                                                 not usable
  [raid5lv_rimage_2] /dev/sdi(1)                                                 usable
  [raid5lv_rmeta_0]  /dev/sdg(0)                                                 usable
  [raid5lv_rmeta_1]  /dev/sdj(0)                                                 not usable
  [raid5lv_rmeta_2]  /dev/sdi(0)                                                 usable
[tb-clustermd2 lvm2.sourceware.git]# ./tools/lvm lvs -o name,devices,lv_usable,lv_health_status
  LV      Devices                                                     Usable          Health
  raid5lv raid5lv_rimage_0(0),raid5lv_rimage_1(0),raid5lv_rimage_2(0) usable

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [linux-lvm] [PATCH] lvs: add -o lv_usable
  2020-09-05  9:06 [linux-lvm] [PATCH] lvs: add -o lv_usable Zhao Heming
  2020-09-05  9:08 ` heming.zhao
@ 2020-09-05  9:17 ` heming.zhao
  1 sibling, 0 replies; 6+ messages in thread
From: heming.zhao @ 2020-09-05  9:17 UTC (permalink / raw)
  To: linux-lvm; +Cc: teigland, zkabelac



On 9/5/20 5:06 PM, Zhao Heming wrote:
> report LV is usable for upper layer.
> 
> Signed-off-by: Zhao Heming <heming.zhao@suse.com>
> ---
>   lib/activate/activate.h          |   2 +
>   lib/activate/dev_manager.c       |  67 ++++++++++++++++
>   lib/metadata/metadata-exported.h |   1 +
>   lib/metadata/metadata.c          | 130 +++++++++++++++++++++++++++++++
>   lib/report/columns.h             |   1 +
>   lib/report/properties.c          |   2 +
>   lib/report/report.c              |  13 ++++
>   lib/report/values.h              |   1 +
>   8 files changed, 217 insertions(+)
> 

There are some places need to improve.

1. in lib/activate/dev_manager.c, function lv_mapping_table() & dm_has_lvdev() almost same.
   it may merge into one function.

2. LV device-mapper name need to improve, 

  in lib/metadata/metadata.c  _lv_is_usable()
  I use below ugly code to construct dm name

  ```
  /* format is right: "vgname" + '-' + "lvname" ?? */
  snprintf(lvname, 50, "%s-%s", lv->vg->name, seg_lv(seg, s)->name);
  ... ...
  /* format is right: "vgname" + '-' + "lvname" ?? */
  snprintf(lvname, 50, "%s-%s", lv->vg->name, lv->name);
  ```


Thanks

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [linux-lvm] [PATCH] lvs: add -o lv_usable
  2020-09-05  9:08 ` heming.zhao
@ 2020-09-07 14:32   ` Zdenek Kabelac
  2020-09-08  6:40     ` heming.zhao
  0 siblings, 1 reply; 6+ messages in thread
From: Zdenek Kabelac @ 2020-09-07 14:32 UTC (permalink / raw)
  To: LVM general discussion and development, heming.zhao; +Cc: teigland, zkabelac

Dne 05. 09. 20 v 11:08 heming.zhao@suse.com napsal(a):
> 
> 
> On 9/5/20 5:06 PM, Zhao Heming wrote:
>> report LV is usable for upper layer.
>>
>> Signed-off-by: Zhao Heming <heming.zhao@suse.com>
>> ---
>>    lib/activate/activate.h          |   2 +
>>    lib/activate/dev_manager.c       |  67 ++++++++++++++++
>>    lib/metadata/metadata-exported.h |   1 +
>>    lib/metadata/metadata.c          | 130 +++++++++++++++++++++++++++++++
>>    lib/report/columns.h             |   1 +
>>    lib/report/properties.c          |   2 +
>>    lib/report/report.c              |  13 ++++
>>    lib/report/values.h              |   1 +
>>    8 files changed, 217 insertions(+)
>>
> 
> my test result:
> 
> ## linear
> 

Hi

We will need to take closer look  - the patchset itself is not going in right 
direction - since all the info about missing or present devices is already
available within lvm engine and we want to minimize 'repeated' query
(ideally all the information should be checked only once - otherwise
the decision state-machine is producing random-end result garbage - which
is the very hard to trace and debug.

So all the code which is doing runtime query again can't be accepted.

Next main rule is - we cache status values whenever possible - so there
should be at most  1 'status' per device per LV - but since lvm2 already
knows from scanning point and from metadata parsing which device is missing
the logic of evaluation of LV's usability needs to be based on these values.

But I think we also need some per-segment methods evaluating usability.

Regards

Zdenek

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [linux-lvm] [PATCH] lvs: add -o lv_usable
  2020-09-07 14:32   ` Zdenek Kabelac
@ 2020-09-08  6:40     ` heming.zhao
  2020-09-08 17:47       ` David Teigland
  0 siblings, 1 reply; 6+ messages in thread
From: heming.zhao @ 2020-09-08  6:40 UTC (permalink / raw)
  To: Zdenek Kabelac, LVM general discussion and development; +Cc: teigland

On 9/7/20 10:32 PM, Zdenek Kabelac wrote:
> Dne 05. 09. 20 v 11:08 heming.zhao@suse.com napsal(a):
>>
>>
>> On 9/5/20 5:06 PM, Zhao Heming wrote:
>>> report LV is usable for upper layer.
>>>
>>> Signed-off-by: Zhao Heming <heming.zhao@suse.com>
>>> ---
>>>    lib/activate/activate.h          |   2 +
>>>    lib/activate/dev_manager.c       |  67 ++++++++++++++++
>>>    lib/metadata/metadata-exported.h |   1 +
>>>    lib/metadata/metadata.c          | 130 +++++++++++++++++++++++++++++++
>>>    lib/report/columns.h             |   1 +
>>>    lib/report/properties.c          |   2 +
>>>    lib/report/report.c              |  13 ++++
>>>    lib/report/values.h              |   1 +
>>>    8 files changed, 217 insertions(+)
>>>
>>
>> my test result:
>>
>> ## linear
>>
> 
> Hi
> 
> We will need to take closer look  - the patchset itself is not going in right direction - since all the info about missing or present devices is already
> available within lvm engine and we want to minimize 'repeated' query
> (ideally all the information should be checked only once - otherwise
> the decision state-machine is producing random-end result garbage - which
> is the very hard to trace and debug.
> 
> So all the code which is doing runtime query again can't be accepted.
> 
> Next main rule is - we cache status values whenever possible - so there
> should be at most  1 'status' per device per LV - but since lvm2 already
> knows from scanning point and from metadata parsing which device is missing
> the logic of evaluation of LV's usability needs to be based on these values.
> 
> But I think we also need some per-segment methods evaluating usability.
> 
> Regards
> 
> Zdenek
> 

Hello Zdenek,

Thank you for your review comments, I got your meaning.

Does it acceptable to add new status bit in lv->status?
I ever sent it in previoud patch "[PATCH v2] lib/metadata: add new api lv_is_available()"
The define as below (the 'NOT_AVAIL_LV' will change to 'NOT_USABLE_LV'):
```
+#define NOT_AVAIL_LV	UINT64_C(0x0000000080000000)	/* LV - derived flag, not
+							   written out in metadata*/

+#define lv_is_available(lv)	(((lv)->status & NOT_AVAIL_LV) ? 0 : 1)
```

some description about the new patch:
- it will combine with two patches:
   - [PATCH v2] lib/metadata: add new api lv_is_available()
   - [PATCH] lvs: add -o lv_usable
- the new patch will add new status bit NOT_USABLE_LV, and this bit will be
   set in _lv_mark_if_partial_single().
- The only output which related with new status bit: lvs -o lv_usable

if above is acceptable, I will send the v2 patch. if not, I will give up this 'lv_usable' patch.

Thanks

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [linux-lvm] [PATCH] lvs: add -o lv_usable
  2020-09-08  6:40     ` heming.zhao
@ 2020-09-08 17:47       ` David Teigland
  0 siblings, 0 replies; 6+ messages in thread
From: David Teigland @ 2020-09-08 17:47 UTC (permalink / raw)
  To: heming.zhao; +Cc: linux-lvm, Zdenek Kabelac

On Tue, Sep 08, 2020 at 02:40:43PM +0800, heming.zhao wrote:
> Does it acceptable to add new status bit in lv->status?
> I ever sent it in previoud patch "[PATCH v2] lib/metadata: add new api lv_is_available()"
> The define as below (the 'NOT_AVAIL_LV' will change to 'NOT_USABLE_LV'):
> ```
> +#define NOT_AVAIL_LV	UINT64_C(0x0000000080000000)	/* LV - derived flag, not
> +							   written out in metadata*/
> 
> +#define lv_is_available(lv)	(((lv)->status & NOT_AVAIL_LV) ? 0 : 1)
> ```
> 
> some description about the new patch:
> - it will combine with two patches:
>   - [PATCH v2] lib/metadata: add new api lv_is_available()
>   - [PATCH] lvs: add -o lv_usable
> - the new patch will add new status bit NOT_USABLE_LV, and this bit will be
>   set in _lv_mark_if_partial_single().
> - The only output which related with new status bit: lvs -o lv_usable
> 
> if above is acceptable, I will send the v2 patch. if not, I will give up this 'lv_usable' patch.

That sounds better, the old patch was closer to what we need.  It can look
at the PVs listed for the LV in the metadata, and check if those PVs have
a device on the system (pv->dev is set) and are not flagged missing.
device mapper state would not be needed for that (lv_mapping_table
dm_has_lvdev functions are not needed.)

To report info about the active state of LVs is more complex and requires
different sort of code as Zdenek mentioned.

Dave

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2020-09-08 17:47 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-09-05  9:06 [linux-lvm] [PATCH] lvs: add -o lv_usable Zhao Heming
2020-09-05  9:08 ` heming.zhao
2020-09-07 14:32   ` Zdenek Kabelac
2020-09-08  6:40     ` heming.zhao
2020-09-08 17:47       ` David Teigland
2020-09-05  9:17 ` heming.zhao

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).