All of lore.kernel.org
 help / color / mirror / Atom feed
* [dm-devel] [PATCH] dm-raid: fix out of memory accesses in dm-raid
@ 2022-06-27 13:00 ` Mikulas Patocka
  0 siblings, 0 replies; 6+ messages in thread
From: Mikulas Patocka @ 2022-06-27 13:00 UTC (permalink / raw)
  To: Mike Snitzer, Song Liu
  Cc: Heinz Mauelshagen, Marian Csontos, linux-raid, dm-devel, Ming Lei

dm-raid allocates the array of devices with rs->raid_disks entries and
then accesses it in a loop for rs->md.raid_disks. During reshaping,
rs->md.raid_disks may be greater than rs->raid_disks, so it accesses
entries beyond the end of the array.

We fix this bug by limiting the iteration to rs->raid_disks.

Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Cc: stable@vger.kernel.org

---
 drivers/md/dm-raid.c |   12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

Index: linux-2.6/drivers/md/dm-raid.c
===================================================================
--- linux-2.6.orig/drivers/md/dm-raid.c	2022-06-27 14:45:30.000000000 +0200
+++ linux-2.6/drivers/md/dm-raid.c	2022-06-27 14:54:02.000000000 +0200
@@ -1004,7 +1004,7 @@ static int validate_raid_redundancy(stru
 	unsigned int rebuilds_per_group = 0, copies;
 	unsigned int group_size, last_group_start;
 
-	for (i = 0; i < rs->md.raid_disks; i++)
+	for (i = 0; i < rs->md.raid_disks && i < rs->raid_disks; i++)
 		if (!test_bit(In_sync, &rs->dev[i].rdev.flags) ||
 		    !rs->dev[i].rdev.sb_page)
 			rebuild_cnt++;
@@ -1047,7 +1047,7 @@ static int validate_raid_redundancy(stru
 		 *	    C	 D    D	   E	E
 		 */
 		if (__is_raid10_near(rs->md.new_layout)) {
-			for (i = 0; i < rs->md.raid_disks; i++) {
+			for (i = 0; i < rs->md.raid_disks && i < rs->raid_disks; i++) {
 				if (!(i % copies))
 					rebuilds_per_group = 0;
 				if ((!rs->dev[i].rdev.sb_page ||
@@ -1073,7 +1073,7 @@ static int validate_raid_redundancy(stru
 		group_size = (rs->md.raid_disks / copies);
 		last_group_start = (rs->md.raid_disks / group_size) - 1;
 		last_group_start *= group_size;
-		for (i = 0; i < rs->md.raid_disks; i++) {
+		for (i = 0; i < rs->md.raid_disks && i < rs->raid_disks; i++) {
 			if (!(i % copies) && !(i > last_group_start))
 				rebuilds_per_group = 0;
 			if ((!rs->dev[i].rdev.sb_page ||
@@ -1588,7 +1588,7 @@ static sector_t __rdev_sectors(struct ra
 {
 	int i;
 
-	for (i = 0; i < rs->md.raid_disks; i++) {
+	for (i = 0; i < rs->md.raid_disks && i < rs->raid_disks; i++) {
 		struct md_rdev *rdev = &rs->dev[i].rdev;
 
 		if (!test_bit(Journal, &rdev->flags) &&
@@ -3766,7 +3766,7 @@ static int raid_iterate_devices(struct d
 	unsigned int i;
 	int r = 0;
 
-	for (i = 0; !r && i < rs->md.raid_disks; i++)
+	for (i = 0; !r && i < rs->md.raid_disks && i < rs->raid_disks; i++)
 		if (rs->dev[i].data_dev)
 			r = fn(ti,
 				 rs->dev[i].data_dev,
@@ -3817,7 +3817,7 @@ static void attempt_restore_of_faulty_de
 
 	memset(cleared_failed_devices, 0, sizeof(cleared_failed_devices));
 
-	for (i = 0; i < mddev->raid_disks; i++) {
+	for (i = 0; i < mddev->raid_disks && i < rs->raid_disks; i++) {
 		r = &rs->dev[i].rdev;
 		/* HM FIXME: enhance journal device recovery processing */
 		if (test_bit(Journal, &r->flags))
--
dm-devel mailing list
dm-devel@redhat.com
https://listman.redhat.com/mailman/listinfo/dm-devel


^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH] dm-raid: fix out of memory accesses in dm-raid
@ 2022-06-27 13:00 ` Mikulas Patocka
  0 siblings, 0 replies; 6+ messages in thread
From: Mikulas Patocka @ 2022-06-27 13:00 UTC (permalink / raw)
  To: Mike Snitzer, Song Liu
  Cc: Benjamin Marzinski, Heinz Mauelshagen, Marian Csontos, Ming Lei,
	dm-devel, linux-raid

dm-raid allocates the array of devices with rs->raid_disks entries and
then accesses it in a loop for rs->md.raid_disks. During reshaping,
rs->md.raid_disks may be greater than rs->raid_disks, so it accesses
entries beyond the end of the array.

We fix this bug by limiting the iteration to rs->raid_disks.

Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Cc: stable@vger.kernel.org

---
 drivers/md/dm-raid.c |   12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

Index: linux-2.6/drivers/md/dm-raid.c
===================================================================
--- linux-2.6.orig/drivers/md/dm-raid.c	2022-06-27 14:45:30.000000000 +0200
+++ linux-2.6/drivers/md/dm-raid.c	2022-06-27 14:54:02.000000000 +0200
@@ -1004,7 +1004,7 @@ static int validate_raid_redundancy(stru
 	unsigned int rebuilds_per_group = 0, copies;
 	unsigned int group_size, last_group_start;
 
-	for (i = 0; i < rs->md.raid_disks; i++)
+	for (i = 0; i < rs->md.raid_disks && i < rs->raid_disks; i++)
 		if (!test_bit(In_sync, &rs->dev[i].rdev.flags) ||
 		    !rs->dev[i].rdev.sb_page)
 			rebuild_cnt++;
@@ -1047,7 +1047,7 @@ static int validate_raid_redundancy(stru
 		 *	    C	 D    D	   E	E
 		 */
 		if (__is_raid10_near(rs->md.new_layout)) {
-			for (i = 0; i < rs->md.raid_disks; i++) {
+			for (i = 0; i < rs->md.raid_disks && i < rs->raid_disks; i++) {
 				if (!(i % copies))
 					rebuilds_per_group = 0;
 				if ((!rs->dev[i].rdev.sb_page ||
@@ -1073,7 +1073,7 @@ static int validate_raid_redundancy(stru
 		group_size = (rs->md.raid_disks / copies);
 		last_group_start = (rs->md.raid_disks / group_size) - 1;
 		last_group_start *= group_size;
-		for (i = 0; i < rs->md.raid_disks; i++) {
+		for (i = 0; i < rs->md.raid_disks && i < rs->raid_disks; i++) {
 			if (!(i % copies) && !(i > last_group_start))
 				rebuilds_per_group = 0;
 			if ((!rs->dev[i].rdev.sb_page ||
@@ -1588,7 +1588,7 @@ static sector_t __rdev_sectors(struct ra
 {
 	int i;
 
-	for (i = 0; i < rs->md.raid_disks; i++) {
+	for (i = 0; i < rs->md.raid_disks && i < rs->raid_disks; i++) {
 		struct md_rdev *rdev = &rs->dev[i].rdev;
 
 		if (!test_bit(Journal, &rdev->flags) &&
@@ -3766,7 +3766,7 @@ static int raid_iterate_devices(struct d
 	unsigned int i;
 	int r = 0;
 
-	for (i = 0; !r && i < rs->md.raid_disks; i++)
+	for (i = 0; !r && i < rs->md.raid_disks && i < rs->raid_disks; i++)
 		if (rs->dev[i].data_dev)
 			r = fn(ti,
 				 rs->dev[i].data_dev,
@@ -3817,7 +3817,7 @@ static void attempt_restore_of_faulty_de
 
 	memset(cleared_failed_devices, 0, sizeof(cleared_failed_devices));
 
-	for (i = 0; i < mddev->raid_disks; i++) {
+	for (i = 0; i < mddev->raid_disks && i < rs->raid_disks; i++) {
 		r = &rs->dev[i].rdev;
 		/* HM FIXME: enhance journal device recovery processing */
 		if (test_bit(Journal, &r->flags))


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [dm-devel] [PATCH] dm-raid: fix out of memory accesses in dm-raid
  2022-06-27 13:00 ` Mikulas Patocka
  (?)
@ 2022-06-27 13:51 ` Heinz Mauelshagen
  2022-06-27 13:56     ` [dm-devel] " Mikulas Patocka
  -1 siblings, 1 reply; 6+ messages in thread
From: Heinz Mauelshagen @ 2022-06-27 13:51 UTC (permalink / raw)
  To: Mikulas Patocka
  Cc: Mike Snitzer, Marian Csontos, linux-raid, Song Liu, dm-devel, Ming Lei


[-- Attachment #1.1: Type: text/plain, Size: 3963 bytes --]

NACK,
loop conditionals have to be on rs->raid_disks, as we add legs to raid1 or
stripes to raid4/5/6/10 in which case rs->raid_disks will be larger than
rs->md.raid_disks.
As a result, the new legs wouldn't be processed during RAID validation or
device iterations.

Will rework the patch and share here after running it through the lvm test
suite raid tests...

On Mon, Jun 27, 2022 at 3:00 PM Mikulas Patocka <mpatocka@redhat.com> wrote:

> dm-raid allocates the array of devices with rs->raid_disks entries and
> then accesses it in a loop for rs->md.raid_disks. During reshaping,
> rs->md.raid_disks may be greater than rs->raid_disks, so it accesses
> entries beyond the end of the array.
>
> We fix this bug by limiting the iteration to rs->raid_disks.
>
> Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
> Cc: stable@vger.kernel.org
>
> ---
>  drivers/md/dm-raid.c |   12 ++++++------
>  1 file changed, 6 insertions(+), 6 deletions(-)
>
> Index: linux-2.6/drivers/md/dm-raid.c
> ===================================================================
> --- linux-2.6.orig/drivers/md/dm-raid.c 2022-06-27 14:45:30.000000000 +0200
> +++ linux-2.6/drivers/md/dm-raid.c      2022-06-27 14:54:02.000000000 +0200
> @@ -1004,7 +1004,7 @@ static int validate_raid_redundancy(stru
>         unsigned int rebuilds_per_group = 0, copies;
>         unsigned int group_size, last_group_start;
>
> -       for (i = 0; i < rs->md.raid_disks; i++)
> +       for (i = 0; i < rs->md.raid_disks && i < rs->raid_disks; i++)
>                 if (!test_bit(In_sync, &rs->dev[i].rdev.flags) ||
>                     !rs->dev[i].rdev.sb_page)
>                         rebuild_cnt++;
> @@ -1047,7 +1047,7 @@ static int validate_raid_redundancy(stru
>                  *          C    D    D    E    E
>                  */
>                 if (__is_raid10_near(rs->md.new_layout)) {
> -                       for (i = 0; i < rs->md.raid_disks; i++) {
> +                       for (i = 0; i < rs->md.raid_disks && i <
> rs->raid_disks; i++) {
>                                 if (!(i % copies))
>                                         rebuilds_per_group = 0;
>                                 if ((!rs->dev[i].rdev.sb_page ||
> @@ -1073,7 +1073,7 @@ static int validate_raid_redundancy(stru
>                 group_size = (rs->md.raid_disks / copies);
>                 last_group_start = (rs->md.raid_disks / group_size) - 1;
>                 last_group_start *= group_size;
> -               for (i = 0; i < rs->md.raid_disks; i++) {
> +               for (i = 0; i < rs->md.raid_disks && i < rs->raid_disks;
> i++) {
>                         if (!(i % copies) && !(i > last_group_start))
>                                 rebuilds_per_group = 0;
>                         if ((!rs->dev[i].rdev.sb_page ||
> @@ -1588,7 +1588,7 @@ static sector_t __rdev_sectors(struct ra
>  {
>         int i;
>
> -       for (i = 0; i < rs->md.raid_disks; i++) {
> +       for (i = 0; i < rs->md.raid_disks && i < rs->raid_disks; i++) {
>                 struct md_rdev *rdev = &rs->dev[i].rdev;
>
>                 if (!test_bit(Journal, &rdev->flags) &&
> @@ -3766,7 +3766,7 @@ static int raid_iterate_devices(struct d
>         unsigned int i;
>         int r = 0;
>
> -       for (i = 0; !r && i < rs->md.raid_disks; i++)
> +       for (i = 0; !r && i < rs->md.raid_disks && i < rs->raid_disks; i++)
>                 if (rs->dev[i].data_dev)
>                         r = fn(ti,
>                                  rs->dev[i].data_dev,
> @@ -3817,7 +3817,7 @@ static void attempt_restore_of_faulty_de
>
>         memset(cleared_failed_devices, 0, sizeof(cleared_failed_devices));
>
> -       for (i = 0; i < mddev->raid_disks; i++) {
> +       for (i = 0; i < mddev->raid_disks && i < rs->raid_disks; i++) {
>                 r = &rs->dev[i].rdev;
>                 /* HM FIXME: enhance journal device recovery processing */
>                 if (test_bit(Journal, &r->flags))
>
>

[-- Attachment #1.2: Type: text/html, Size: 5279 bytes --]

[-- Attachment #2: Type: text/plain, Size: 98 bytes --]

--
dm-devel mailing list
dm-devel@redhat.com
https://listman.redhat.com/mailman/listinfo/dm-devel

^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH v2] dm-raid: fix out of memory accesses in dm-raid
  2022-06-27 13:51 ` [dm-devel] " Heinz Mauelshagen
@ 2022-06-27 13:56     ` Mikulas Patocka
  0 siblings, 0 replies; 6+ messages in thread
From: Mikulas Patocka @ 2022-06-27 13:56 UTC (permalink / raw)
  To: Heinz Mauelshagen
  Cc: Mike Snitzer, Song Liu, Benjamin Marzinski, Marian Csontos,
	Ming Lei, dm-devel, linux-raid

dm-raid allocates the array of devices with rs->raid_disks entries and
then accesses it in a loop for rs->md.raid_disks. During reshaping,
rs->md.raid_disks may be greater than rs->raid_disks, so it accesses
entries beyond the end of the array.

We fix this bug by limiting the iteration to rs->raid_disks.

The bug is triggered when running lvm test shell/lvconvert-raid.sh and the
kernel is compiled with kasan.

Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Cc: stable@vger.kernel.org

---
 drivers/md/dm-raid.c |   12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

Index: linux-2.6/drivers/md/dm-raid.c
===================================================================
--- linux-2.6.orig/drivers/md/dm-raid.c	2022-06-27 15:44:12.000000000 +0200
+++ linux-2.6/drivers/md/dm-raid.c	2022-06-27 15:44:12.000000000 +0200
@@ -1004,7 +1004,7 @@ static int validate_raid_redundancy(stru
 	unsigned int rebuilds_per_group = 0, copies;
 	unsigned int group_size, last_group_start;
 
-	for (i = 0; i < rs->md.raid_disks; i++)
+	for (i = 0; i < rs->raid_disks; i++)
 		if (!test_bit(In_sync, &rs->dev[i].rdev.flags) ||
 		    !rs->dev[i].rdev.sb_page)
 			rebuild_cnt++;
@@ -1047,7 +1047,7 @@ static int validate_raid_redundancy(stru
 		 *	    C	 D    D	   E	E
 		 */
 		if (__is_raid10_near(rs->md.new_layout)) {
-			for (i = 0; i < rs->md.raid_disks; i++) {
+			for (i = 0; i < rs->raid_disks; i++) {
 				if (!(i % copies))
 					rebuilds_per_group = 0;
 				if ((!rs->dev[i].rdev.sb_page ||
@@ -1073,7 +1073,7 @@ static int validate_raid_redundancy(stru
 		group_size = (rs->md.raid_disks / copies);
 		last_group_start = (rs->md.raid_disks / group_size) - 1;
 		last_group_start *= group_size;
-		for (i = 0; i < rs->md.raid_disks; i++) {
+		for (i = 0; i < rs->raid_disks; i++) {
 			if (!(i % copies) && !(i > last_group_start))
 				rebuilds_per_group = 0;
 			if ((!rs->dev[i].rdev.sb_page ||
@@ -1588,7 +1588,7 @@ static sector_t __rdev_sectors(struct ra
 {
 	int i;
 
-	for (i = 0; i < rs->md.raid_disks; i++) {
+	for (i = 0; i < rs->raid_disks; i++) {
 		struct md_rdev *rdev = &rs->dev[i].rdev;
 
 		if (!test_bit(Journal, &rdev->flags) &&
@@ -3766,7 +3766,7 @@ static int raid_iterate_devices(struct d
 	unsigned int i;
 	int r = 0;
 
-	for (i = 0; !r && i < rs->md.raid_disks; i++)
+	for (i = 0; !r && i < rs->raid_disks; i++)
 		if (rs->dev[i].data_dev)
 			r = fn(ti,
 				 rs->dev[i].data_dev,
@@ -3817,7 +3817,7 @@ static void attempt_restore_of_faulty_de
 
 	memset(cleared_failed_devices, 0, sizeof(cleared_failed_devices));
 
-	for (i = 0; i < mddev->raid_disks; i++) {
+	for (i = 0; i < rs->raid_disks; i++) {
 		r = &rs->dev[i].rdev;
 		/* HM FIXME: enhance journal device recovery processing */
 		if (test_bit(Journal, &r->flags))


^ permalink raw reply	[flat|nested] 6+ messages in thread

* [dm-devel] [PATCH v2] dm-raid: fix out of memory accesses in dm-raid
@ 2022-06-27 13:56     ` Mikulas Patocka
  0 siblings, 0 replies; 6+ messages in thread
From: Mikulas Patocka @ 2022-06-27 13:56 UTC (permalink / raw)
  To: Heinz Mauelshagen
  Cc: Mike Snitzer, Marian Csontos, linux-raid, Song Liu, dm-devel, Ming Lei

dm-raid allocates the array of devices with rs->raid_disks entries and
then accesses it in a loop for rs->md.raid_disks. During reshaping,
rs->md.raid_disks may be greater than rs->raid_disks, so it accesses
entries beyond the end of the array.

We fix this bug by limiting the iteration to rs->raid_disks.

The bug is triggered when running lvm test shell/lvconvert-raid.sh and the
kernel is compiled with kasan.

Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Cc: stable@vger.kernel.org

---
 drivers/md/dm-raid.c |   12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

Index: linux-2.6/drivers/md/dm-raid.c
===================================================================
--- linux-2.6.orig/drivers/md/dm-raid.c	2022-06-27 15:44:12.000000000 +0200
+++ linux-2.6/drivers/md/dm-raid.c	2022-06-27 15:44:12.000000000 +0200
@@ -1004,7 +1004,7 @@ static int validate_raid_redundancy(stru
 	unsigned int rebuilds_per_group = 0, copies;
 	unsigned int group_size, last_group_start;
 
-	for (i = 0; i < rs->md.raid_disks; i++)
+	for (i = 0; i < rs->raid_disks; i++)
 		if (!test_bit(In_sync, &rs->dev[i].rdev.flags) ||
 		    !rs->dev[i].rdev.sb_page)
 			rebuild_cnt++;
@@ -1047,7 +1047,7 @@ static int validate_raid_redundancy(stru
 		 *	    C	 D    D	   E	E
 		 */
 		if (__is_raid10_near(rs->md.new_layout)) {
-			for (i = 0; i < rs->md.raid_disks; i++) {
+			for (i = 0; i < rs->raid_disks; i++) {
 				if (!(i % copies))
 					rebuilds_per_group = 0;
 				if ((!rs->dev[i].rdev.sb_page ||
@@ -1073,7 +1073,7 @@ static int validate_raid_redundancy(stru
 		group_size = (rs->md.raid_disks / copies);
 		last_group_start = (rs->md.raid_disks / group_size) - 1;
 		last_group_start *= group_size;
-		for (i = 0; i < rs->md.raid_disks; i++) {
+		for (i = 0; i < rs->raid_disks; i++) {
 			if (!(i % copies) && !(i > last_group_start))
 				rebuilds_per_group = 0;
 			if ((!rs->dev[i].rdev.sb_page ||
@@ -1588,7 +1588,7 @@ static sector_t __rdev_sectors(struct ra
 {
 	int i;
 
-	for (i = 0; i < rs->md.raid_disks; i++) {
+	for (i = 0; i < rs->raid_disks; i++) {
 		struct md_rdev *rdev = &rs->dev[i].rdev;
 
 		if (!test_bit(Journal, &rdev->flags) &&
@@ -3766,7 +3766,7 @@ static int raid_iterate_devices(struct d
 	unsigned int i;
 	int r = 0;
 
-	for (i = 0; !r && i < rs->md.raid_disks; i++)
+	for (i = 0; !r && i < rs->raid_disks; i++)
 		if (rs->dev[i].data_dev)
 			r = fn(ti,
 				 rs->dev[i].data_dev,
@@ -3817,7 +3817,7 @@ static void attempt_restore_of_faulty_de
 
 	memset(cleared_failed_devices, 0, sizeof(cleared_failed_devices));
 
-	for (i = 0; i < mddev->raid_disks; i++) {
+	for (i = 0; i < rs->raid_disks; i++) {
 		r = &rs->dev[i].rdev;
 		/* HM FIXME: enhance journal device recovery processing */
 		if (test_bit(Journal, &r->flags))

--
dm-devel mailing list
dm-devel@redhat.com
https://listman.redhat.com/mailman/listinfo/dm-devel


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [dm-devel] [PATCH v2] dm-raid: fix out of memory accesses in dm-raid
  2022-06-27 13:56     ` [dm-devel] " Mikulas Patocka
  (?)
@ 2022-06-27 22:37     ` Heinz Mauelshagen
  -1 siblings, 0 replies; 6+ messages in thread
From: Heinz Mauelshagen @ 2022-06-27 22:37 UTC (permalink / raw)
  To: Mikulas Patocka
  Cc: Mike Snitzer, Marian Csontos, linux-raid, Song Liu, dm-devel, Ming Lei


[-- Attachment #1.1: Type: text/plain, Size: 8019 bytes --]

From 852b91efc183e9b87ac50f15fb4df86f26f73532 Mon Sep 17 00:00:00 2001
From: Heinz Mauelshagen <heinzm@redhat.com>
Date: Mon, 27 Jun 2022 23:54:24 +0200
Subject: [PATCH] dm-raid: fix array out of bounds (OOB)

Supersedes "[PATCH] dm-raid: fix out of memory accesses in dm-raid"


On RAID mapping construction, dm-raid allocates an array
rs->devs[rs->raid_disks]
for the raid device members.  rs->raid_disks is defined by the number of
raid
metadata and image tupples passed into the target's constructor.

That number can be different from the current number of members for
existing raid
sets as defined in their superblocks in case of layout changes requested:

- raid1 legs being added/removed

- raid4/5/6/10 number of stripes changed (stripe reshaping)

- takeover to higher raid level (e.g. raid5 -> raid6)

Accessing array members, rs->raid_disks has to be used in loops instead of
potentially larger rs->md.raid_disks causing OOB access on the devices
array.

Patch changes instances prone to OOB.
Also ensures added devices are validated in validate_raid_redundancy().

Initially discovered by KASAN.

Passes all LVM2 RAID tests (KASAN enabled).

Signed-off-by: Heinz Mauelshagen <heinzm@redhat.com>
Tested-by: Heinz Mauelshagen <heinzm@redhat.com>
---
 drivers/md/dm-raid.c | 22 ++++++++++++----------
 1 file changed, 12 insertions(+), 10 deletions(-)

diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 2b26435a6946..bcec0e1a049d 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -1001,12 +1001,13 @@ static int validate_region_size(struct raid_set
*rs, unsigned long region_size)
 static int validate_raid_redundancy(struct raid_set *rs)
 {
        unsigned int i, rebuild_cnt = 0;
-       unsigned int rebuilds_per_group = 0, copies;
+       unsigned int rebuilds_per_group = 0, copies, raid_disks;
        unsigned int group_size, last_group_start;

-       for (i = 0; i < rs->md.raid_disks; i++)
-               if (!test_bit(In_sync, &rs->dev[i].rdev.flags) ||
-                   !rs->dev[i].rdev.sb_page)
+       for (i = 0; i < rs->raid_disks; i++)
+               if (!test_bit(FirstUse, &rs->dev[i].rdev.flags) &&
+                   ((!test_bit(In_sync, &rs->dev[i].rdev.flags) ||
+                     !rs->dev[i].rdev.sb_page)))
                        rebuild_cnt++;

        switch (rs->md.level) {
@@ -1046,8 +1047,9 @@ static int validate_raid_redundancy(struct raid_set
*rs)
                 *          A    A    B    B    C
                 *          C    D    D    E    E
                 */
+               raid_disks = min(rs->raid_disks, rs->md.raid_disks);
                if (__is_raid10_near(rs->md.new_layout)) {
-                       for (i = 0; i < rs->md.raid_disks; i++) {
+                       for (i = 0; i < raid_disks; i++) {
                                if (!(i % copies))
                                        rebuilds_per_group = 0;
                                if ((!rs->dev[i].rdev.sb_page ||
@@ -1070,10 +1072,10 @@ static int validate_raid_redundancy(struct raid_set
*rs)
                 * results in the need to treat the last (potentially
larger)
                 * set differently.
                 */
-               group_size = (rs->md.raid_disks / copies);
-               last_group_start = (rs->md.raid_disks / group_size) - 1;
+               group_size = (raid_disks / copies);
+               last_group_start = (raid_disks / group_size) - 1;
                last_group_start *= group_size;
-               for (i = 0; i < rs->md.raid_disks; i++) {
+               for (i = 0; i < raid_disks; i++) {
                        if (!(i % copies) && !(i > last_group_start))
                                rebuilds_per_group = 0;
                        if ((!rs->dev[i].rdev.sb_page ||
@@ -1588,7 +1590,7 @@ static sector_t __rdev_sectors(struct raid_set *rs)
 {
        int i;

-       for (i = 0; i < rs->md.raid_disks; i++) {
+       for (i = 0; i < rs->raid_disks; i++) {
                struct md_rdev *rdev = &rs->dev[i].rdev;

                if (!test_bit(Journal, &rdev->flags) &&
@@ -3771,7 +3773,7 @@ static int raid_iterate_devices(struct dm_target *ti,
        unsigned int i;
        int r = 0;

-       for (i = 0; !r && i < rs->md.raid_disks; i++)
+       for (i = 0; !r && i < rs->raid_disks; i++)
                if (rs->dev[i].data_dev)
                        r = fn(ti,
                                 rs->dev[i].data_dev,
-- 
2.36.1


On Mon, Jun 27, 2022 at 3:56 PM Mikulas Patocka <mpatocka@redhat.com> wrote:

> dm-raid allocates the array of devices with rs->raid_disks entries and
> then accesses it in a loop for rs->md.raid_disks. During reshaping,
> rs->md.raid_disks may be greater than rs->raid_disks, so it accesses
> entries beyond the end of the array.
>
> We fix this bug by limiting the iteration to rs->raid_disks.
>
> The bug is triggered when running lvm test shell/lvconvert-raid.sh and the
> kernel is compiled with kasan.
>
> Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
> Cc: stable@vger.kernel.org
>
> ---
>  drivers/md/dm-raid.c |   12 ++++++------
>  1 file changed, 6 insertions(+), 6 deletions(-)
>
> Index: linux-2.6/drivers/md/dm-raid.c
> ===================================================================
> --- linux-2.6.orig/drivers/md/dm-raid.c 2022-06-27 15:44:12.000000000 +0200
> +++ linux-2.6/drivers/md/dm-raid.c      2022-06-27 15:44:12.000000000 +0200
> @@ -1004,7 +1004,7 @@ static int validate_raid_redundancy(stru
>         unsigned int rebuilds_per_group = 0, copies;
>         unsigned int group_size, last_group_start;
>
> -       for (i = 0; i < rs->md.raid_disks; i++)
> +       for (i = 0; i < rs->raid_disks; i++)
>                 if (!test_bit(In_sync, &rs->dev[i].rdev.flags) ||
>                     !rs->dev[i].rdev.sb_page)
>                         rebuild_cnt++;
> @@ -1047,7 +1047,7 @@ static int validate_raid_redundancy(stru
>                  *          C    D    D    E    E
>                  */
>                 if (__is_raid10_near(rs->md.new_layout)) {
> -                       for (i = 0; i < rs->md.raid_disks; i++) {
> +                       for (i = 0; i < rs->raid_disks; i++) {
>                                 if (!(i % copies))
>                                         rebuilds_per_group = 0;
>                                 if ((!rs->dev[i].rdev.sb_page ||
> @@ -1073,7 +1073,7 @@ static int validate_raid_redundancy(stru
>                 group_size = (rs->md.raid_disks / copies);
>                 last_group_start = (rs->md.raid_disks / group_size) - 1;
>                 last_group_start *= group_size;
> -               for (i = 0; i < rs->md.raid_disks; i++) {
> +               for (i = 0; i < rs->raid_disks; i++) {
>                         if (!(i % copies) && !(i > last_group_start))
>                                 rebuilds_per_group = 0;
>                         if ((!rs->dev[i].rdev.sb_page ||
> @@ -1588,7 +1588,7 @@ static sector_t __rdev_sectors(struct ra
>  {
>         int i;
>
> -       for (i = 0; i < rs->md.raid_disks; i++) {
> +       for (i = 0; i < rs->raid_disks; i++) {
>                 struct md_rdev *rdev = &rs->dev[i].rdev;
>
>                 if (!test_bit(Journal, &rdev->flags) &&
> @@ -3766,7 +3766,7 @@ static int raid_iterate_devices(struct d
>         unsigned int i;
>         int r = 0;
>
> -       for (i = 0; !r && i < rs->md.raid_disks; i++)
> +       for (i = 0; !r && i < rs->raid_disks; i++)
>                 if (rs->dev[i].data_dev)
>                         r = fn(ti,
>                                  rs->dev[i].data_dev,
> @@ -3817,7 +3817,7 @@ static void attempt_restore_of_faulty_de
>
>         memset(cleared_failed_devices, 0, sizeof(cleared_failed_devices));
>
> -       for (i = 0; i < mddev->raid_disks; i++) {
> +       for (i = 0; i < rs->raid_disks; i++) {
>                 r = &rs->dev[i].rdev;
>                 /* HM FIXME: enhance journal device recovery processing */
>                 if (test_bit(Journal, &r->flags))
>
>

[-- Attachment #1.2: Type: text/html, Size: 10298 bytes --]

[-- Attachment #2: Type: text/plain, Size: 98 bytes --]

--
dm-devel mailing list
dm-devel@redhat.com
https://listman.redhat.com/mailman/listinfo/dm-devel

^ permalink raw reply related	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2022-06-27 22:37 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-06-27 13:00 [dm-devel] [PATCH] dm-raid: fix out of memory accesses in dm-raid Mikulas Patocka
2022-06-27 13:00 ` Mikulas Patocka
2022-06-27 13:51 ` [dm-devel] " Heinz Mauelshagen
2022-06-27 13:56   ` [PATCH v2] " Mikulas Patocka
2022-06-27 13:56     ` [dm-devel] " Mikulas Patocka
2022-06-27 22:37     ` Heinz Mauelshagen

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.