linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [linux-next:master 6117/8451] drivers/md/raid10.c:1707:39: warning: Uninitialized variable: first_r10bio [uninitvar]
@ 2021-03-31  1:59 kernel test robot
  2021-03-31 19:48 ` Song Liu
  0 siblings, 1 reply; 2+ messages in thread
From: kernel test robot @ 2021-03-31  1:59 UTC (permalink / raw)
  To: Xiao Ni; +Cc: kbuild-all, Linux Memory Management List, Song Liu

tree:   https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git master
head:   4143e05b7b171902f4938614c2a68821e1af46bc
commit: 254c271da0712ea8914f187588e0f81f7678ee2f [6117/8451] md/raid10: improve discard request for far layout
compiler: aarch64-linux-gcc (GCC) 9.3.0

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>


cppcheck warnings: (new ones prefixed by >>)
   In file included from drivers/md/raid10.c:
>> drivers/md/raid10.c:1707:39: warning: Uninitialized variable: first_r10bio [uninitvar]
     r10_bio->master_bio = (struct bio *)first_r10bio;
                                         ^

vim +1707 drivers/md/raid10.c

  1573	
  1574	/*
  1575	 * There are some limitations to handle discard bio
  1576	 * 1st, the discard size is bigger than stripe_size*2.
  1577	 * 2st, if the discard bio spans reshape progress, we use the old way to
  1578	 * handle discard bio
  1579	 */
  1580	static int raid10_handle_discard(struct mddev *mddev, struct bio *bio)
  1581	{
  1582		struct r10conf *conf = mddev->private;
  1583		struct geom *geo = &conf->geo;
  1584		int far_copies = geo->far_copies;
  1585		bool first_copy = true;
  1586		struct r10bio *r10_bio, *first_r10bio;
  1587		struct bio *split;
  1588		int disk;
  1589		sector_t chunk;
  1590		unsigned int stripe_size;
  1591		unsigned int stripe_data_disks;
  1592		sector_t split_size;
  1593		sector_t bio_start, bio_end;
  1594		sector_t first_stripe_index, last_stripe_index;
  1595		sector_t start_disk_offset;
  1596		unsigned int start_disk_index;
  1597		sector_t end_disk_offset;
  1598		unsigned int end_disk_index;
  1599		unsigned int remainder;
  1600	
  1601		if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
  1602			return -EAGAIN;
  1603	
  1604		wait_barrier(conf);
  1605	
  1606		/*
  1607		 * Check reshape again to avoid reshape happens after checking
  1608		 * MD_RECOVERY_RESHAPE and before wait_barrier
  1609		 */
  1610		if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
  1611			goto out;
  1612	
  1613		if (geo->near_copies)
  1614			stripe_data_disks = geo->raid_disks / geo->near_copies +
  1615						geo->raid_disks % geo->near_copies;
  1616		else
  1617			stripe_data_disks = geo->raid_disks;
  1618	
  1619		stripe_size = stripe_data_disks << geo->chunk_shift;
  1620	
  1621		bio_start = bio->bi_iter.bi_sector;
  1622		bio_end = bio_end_sector(bio);
  1623	
  1624		/*
  1625		 * Maybe one discard bio is smaller than strip size or across one
  1626		 * stripe and discard region is larger than one stripe size. For far
  1627		 * offset layout, if the discard region is not aligned with stripe
  1628		 * size, there is hole when we submit discard bio to member disk.
  1629		 * For simplicity, we only handle discard bio which discard region
  1630		 * is bigger than stripe_size * 2
  1631		 */
  1632		if (bio_sectors(bio) < stripe_size*2)
  1633			goto out;
  1634	
  1635		/*
  1636		 * Keep bio aligned with strip size.
  1637		 */
  1638		div_u64_rem(bio_start, stripe_size, &remainder);
  1639		if (remainder) {
  1640			split_size = stripe_size - remainder;
  1641			split = bio_split(bio, split_size, GFP_NOIO, &conf->bio_split);
  1642			bio_chain(split, bio);
  1643			allow_barrier(conf);
  1644			/* Resend the fist split part */
  1645			submit_bio_noacct(split);
  1646			wait_barrier(conf);
  1647		}
  1648		div_u64_rem(bio_end, stripe_size, &remainder);
  1649		if (remainder) {
  1650			split_size = bio_sectors(bio) - remainder;
  1651			split = bio_split(bio, split_size, GFP_NOIO, &conf->bio_split);
  1652			bio_chain(split, bio);
  1653			allow_barrier(conf);
  1654			/* Resend the second split part */
  1655			submit_bio_noacct(bio);
  1656			bio = split;
  1657			wait_barrier(conf);
  1658		}
  1659	
  1660		bio_start = bio->bi_iter.bi_sector;
  1661		bio_end = bio_end_sector(bio);
  1662	
  1663		/*
  1664		 * Raid10 uses chunk as the unit to store data. It's similar like raid0.
  1665		 * One stripe contains the chunks from all member disk (one chunk from
  1666		 * one disk at the same HBA address). For layout detail, see 'man md 4'
  1667		 */
  1668		chunk = bio_start >> geo->chunk_shift;
  1669		chunk *= geo->near_copies;
  1670		first_stripe_index = chunk;
  1671		start_disk_index = sector_div(first_stripe_index, geo->raid_disks);
  1672		if (geo->far_offset)
  1673			first_stripe_index *= geo->far_copies;
  1674		start_disk_offset = (bio_start & geo->chunk_mask) +
  1675					(first_stripe_index << geo->chunk_shift);
  1676	
  1677		chunk = bio_end >> geo->chunk_shift;
  1678		chunk *= geo->near_copies;
  1679		last_stripe_index = chunk;
  1680		end_disk_index = sector_div(last_stripe_index, geo->raid_disks);
  1681		if (geo->far_offset)
  1682			last_stripe_index *= geo->far_copies;
  1683		end_disk_offset = (bio_end & geo->chunk_mask) +
  1684					(last_stripe_index << geo->chunk_shift);
  1685	
  1686	retry_discard:
  1687		r10_bio = mempool_alloc(&conf->r10bio_pool, GFP_NOIO);
  1688		r10_bio->mddev = mddev;
  1689		r10_bio->state = 0;
  1690		r10_bio->sectors = 0;
  1691		memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) * geo->raid_disks);
  1692		wait_blocked_dev(mddev, r10_bio);
  1693	
  1694		/*
  1695		 * For far layout it needs more than one r10bio to cover all regions.
  1696		 * Inspired by raid10_sync_request, we can use the first r10bio->master_bio
  1697		 * to record the discard bio. Other r10bio->master_bio record the first
  1698		 * r10bio. The first r10bio only release after all other r10bios finish.
  1699		 * The discard bio returns only first r10bio finishes
  1700		 */
  1701		if (first_copy) {
  1702			r10_bio->master_bio = bio;
  1703			set_bit(R10BIO_Discard, &r10_bio->state);
  1704			first_copy = false;
  1705			first_r10bio = r10_bio;
  1706		} else
> 1707			r10_bio->master_bio = (struct bio *)first_r10bio;
  1708	
  1709		rcu_read_lock();
  1710		for (disk = 0; disk < geo->raid_disks; disk++) {
  1711			struct md_rdev *rdev = rcu_dereference(conf->mirrors[disk].rdev);
  1712			struct md_rdev *rrdev = rcu_dereference(
  1713				conf->mirrors[disk].replacement);
  1714	
  1715			r10_bio->devs[disk].bio = NULL;
  1716			r10_bio->devs[disk].repl_bio = NULL;
  1717	
  1718			if (rdev && (test_bit(Faulty, &rdev->flags)))
  1719				rdev = NULL;
  1720			if (rrdev && (test_bit(Faulty, &rrdev->flags)))
  1721				rrdev = NULL;
  1722			if (!rdev && !rrdev)
  1723				continue;
  1724	
  1725			if (rdev) {
  1726				r10_bio->devs[disk].bio = bio;
  1727				atomic_inc(&rdev->nr_pending);
  1728			}
  1729			if (rrdev) {
  1730				r10_bio->devs[disk].repl_bio = bio;
  1731				atomic_inc(&rrdev->nr_pending);
  1732			}
  1733		}
  1734		rcu_read_unlock();
  1735	
  1736		atomic_set(&r10_bio->remaining, 1);
  1737		for (disk = 0; disk < geo->raid_disks; disk++) {
  1738			sector_t dev_start, dev_end;
  1739			struct bio *mbio, *rbio = NULL;
  1740			struct md_rdev *rdev = rcu_dereference(conf->mirrors[disk].rdev);
  1741			struct md_rdev *rrdev = rcu_dereference(
  1742				conf->mirrors[disk].replacement);
  1743	
  1744			/*
  1745			 * Now start to calculate the start and end address for each disk.
  1746			 * The space between dev_start and dev_end is the discard region.
  1747			 *
  1748			 * For dev_start, it needs to consider three conditions:
  1749			 * 1st, the disk is before start_disk, you can imagine the disk in
  1750			 * the next stripe. So the dev_start is the start address of next
  1751			 * stripe.
  1752			 * 2st, the disk is after start_disk, it means the disk is at the
  1753			 * same stripe of first disk
  1754			 * 3st, the first disk itself, we can use start_disk_offset directly
  1755			 */
  1756			if (disk < start_disk_index)
  1757				dev_start = (first_stripe_index + 1) * mddev->chunk_sectors;
  1758			else if (disk > start_disk_index)
  1759				dev_start = first_stripe_index * mddev->chunk_sectors;
  1760			else
  1761				dev_start = start_disk_offset;
  1762	
  1763			if (disk < end_disk_index)
  1764				dev_end = (last_stripe_index + 1) * mddev->chunk_sectors;
  1765			else if (disk > end_disk_index)
  1766				dev_end = last_stripe_index * mddev->chunk_sectors;
  1767			else
  1768				dev_end = end_disk_offset;
  1769	
  1770			/*
  1771			 * It only handles discard bio which size is >= stripe size, so
  1772			 * dev_end > dev_start all the time
  1773			 */
  1774			if (r10_bio->devs[disk].bio) {
  1775				mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
  1776				mbio->bi_end_io = raid10_end_discard_request;
  1777				mbio->bi_private = r10_bio;
  1778				r10_bio->devs[disk].bio = mbio;
  1779				r10_bio->devs[disk].devnum = disk;
  1780				atomic_inc(&r10_bio->remaining);
  1781				md_submit_discard_bio(mddev, rdev, mbio,
  1782						dev_start + choose_data_offset(r10_bio, rdev),
  1783						dev_end - dev_start);
  1784				bio_endio(mbio);
  1785			}
  1786			if (r10_bio->devs[disk].repl_bio) {
  1787				rbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
  1788				rbio->bi_end_io = raid10_end_discard_request;
  1789				rbio->bi_private = r10_bio;
  1790				r10_bio->devs[disk].repl_bio = rbio;
  1791				r10_bio->devs[disk].devnum = disk;
  1792				atomic_inc(&r10_bio->remaining);
  1793				md_submit_discard_bio(mddev, rrdev, rbio,
  1794						dev_start + choose_data_offset(r10_bio, rrdev),
  1795						dev_end - dev_start);
  1796				bio_endio(rbio);
  1797			}
  1798		}
  1799	
  1800		if (!geo->far_offset && --far_copies) {
  1801			first_stripe_index += geo->stride >> geo->chunk_shift;
  1802			start_disk_offset += geo->stride;
  1803			last_stripe_index += geo->stride >> geo->chunk_shift;
  1804			end_disk_offset += geo->stride;
  1805			atomic_inc(&first_r10bio->remaining);
  1806			raid_end_discard_bio(r10_bio);
  1807			wait_barrier(conf);
  1808			goto retry_discard;
  1809		}
  1810	
  1811		raid_end_discard_bio(r10_bio);
  1812	
  1813		return 0;
  1814	out:
  1815		allow_barrier(conf);
  1816		return -EAGAIN;
  1817	}
  1818	

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org


^ permalink raw reply	[flat|nested] 2+ messages in thread

* Re: [linux-next:master 6117/8451] drivers/md/raid10.c:1707:39: warning: Uninitialized variable: first_r10bio [uninitvar]
  2021-03-31  1:59 [linux-next:master 6117/8451] drivers/md/raid10.c:1707:39: warning: Uninitialized variable: first_r10bio [uninitvar] kernel test robot
@ 2021-03-31 19:48 ` Song Liu
  0 siblings, 0 replies; 2+ messages in thread
From: Song Liu @ 2021-03-31 19:48 UTC (permalink / raw)
  To: kernel test robot; +Cc: Xiao Ni, kbuild-all, Linux Memory Management List



> On Mar 30, 2021, at 6:59 PM, kernel test robot <lkp@intel.com> wrote:
> 
> tree:   https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git master
> head:   4143e05b7b171902f4938614c2a68821e1af46bc
> commit: 254c271da0712ea8914f187588e0f81f7678ee2f [6117/8451] md/raid10: improve discard request for far layout
> compiler: aarch64-linux-gcc (GCC) 9.3.0
> 
> If you fix the issue, kindly add following tag as appropriate
> Reported-by: kernel test robot <lkp@intel.com>
> 
> 
> cppcheck warnings: (new ones prefixed by >>)
>   In file included from drivers/md/raid10.c:
>>> drivers/md/raid10.c:1707:39: warning: Uninitialized variable: first_r10bio [uninitvar]
>     r10_bio->master_bio = (struct bio *)first_r10bio;
>                                         ^
> 
> vim +1707 drivers/md/raid10.c
> 
>  1573	
>  1574	/*
>  1575	 * There are some limitations to handle discard bio
>  1576	 * 1st, the discard size is bigger than stripe_size*2.
>  1577	 * 2st, if the discard bio spans reshape progress, we use the old way to
>  1578	 * handle discard bio
>  1579	 */
>  1580	static int raid10_handle_discard(struct mddev *mddev, struct bio *bio)
>  1581	{
>  1582		struct r10conf *conf = mddev->private;
>  1583		struct geom *geo = &conf->geo;
>  1584		int far_copies = geo->far_copies;
>  1585		bool first_copy = true;
>  1586		struct r10bio *r10_bio, *first_r10bio;
>  1587		struct bio *split;
>  1588		int disk;
>  1589		sector_t chunk;
>  1590		unsigned int stripe_size;
>  1591		unsigned int stripe_data_disks;
>  1592		sector_t split_size;
>  1593		sector_t bio_start, bio_end;
>  1594		sector_t first_stripe_index, last_stripe_index;
>  1595		sector_t start_disk_offset;
>  1596		unsigned int start_disk_index;
>  1597		sector_t end_disk_offset;
>  1598		unsigned int end_disk_index;
>  1599		unsigned int remainder;
>  1600	
>  1601		if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
>  1602			return -EAGAIN;
>  1603	
>  1604		wait_barrier(conf);
>  1605	
>  1606		/*
>  1607		 * Check reshape again to avoid reshape happens after checking
>  1608		 * MD_RECOVERY_RESHAPE and before wait_barrier
>  1609		 */
>  1610		if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
>  1611			goto out;
>  1612	
>  1613		if (geo->near_copies)
>  1614			stripe_data_disks = geo->raid_disks / geo->near_copies +
>  1615						geo->raid_disks % geo->near_copies;
>  1616		else
>  1617			stripe_data_disks = geo->raid_disks;
>  1618	
>  1619		stripe_size = stripe_data_disks << geo->chunk_shift;
>  1620	
>  1621		bio_start = bio->bi_iter.bi_sector;
>  1622		bio_end = bio_end_sector(bio);
>  1623	
>  1624		/*
>  1625		 * Maybe one discard bio is smaller than strip size or across one
>  1626		 * stripe and discard region is larger than one stripe size. For far
>  1627		 * offset layout, if the discard region is not aligned with stripe
>  1628		 * size, there is hole when we submit discard bio to member disk.
>  1629		 * For simplicity, we only handle discard bio which discard region
>  1630		 * is bigger than stripe_size * 2
>  1631		 */
>  1632		if (bio_sectors(bio) < stripe_size*2)
>  1633			goto out;
>  1634	
>  1635		/*
>  1636		 * Keep bio aligned with strip size.
>  1637		 */
>  1638		div_u64_rem(bio_start, stripe_size, &remainder);
>  1639		if (remainder) {
>  1640			split_size = stripe_size - remainder;
>  1641			split = bio_split(bio, split_size, GFP_NOIO, &conf->bio_split);
>  1642			bio_chain(split, bio);
>  1643			allow_barrier(conf);
>  1644			/* Resend the fist split part */
>  1645			submit_bio_noacct(split);
>  1646			wait_barrier(conf);
>  1647		}
>  1648		div_u64_rem(bio_end, stripe_size, &remainder);
>  1649		if (remainder) {
>  1650			split_size = bio_sectors(bio) - remainder;
>  1651			split = bio_split(bio, split_size, GFP_NOIO, &conf->bio_split);
>  1652			bio_chain(split, bio);
>  1653			allow_barrier(conf);
>  1654			/* Resend the second split part */
>  1655			submit_bio_noacct(bio);
>  1656			bio = split;
>  1657			wait_barrier(conf);
>  1658		}
>  1659	
>  1660		bio_start = bio->bi_iter.bi_sector;
>  1661		bio_end = bio_end_sector(bio);
>  1662	
>  1663		/*
>  1664		 * Raid10 uses chunk as the unit to store data. It's similar like raid0.
>  1665		 * One stripe contains the chunks from all member disk (one chunk from
>  1666		 * one disk at the same HBA address). For layout detail, see 'man md 4'
>  1667		 */
>  1668		chunk = bio_start >> geo->chunk_shift;
>  1669		chunk *= geo->near_copies;
>  1670		first_stripe_index = chunk;
>  1671		start_disk_index = sector_div(first_stripe_index, geo->raid_disks);
>  1672		if (geo->far_offset)
>  1673			first_stripe_index *= geo->far_copies;
>  1674		start_disk_offset = (bio_start & geo->chunk_mask) +
>  1675					(first_stripe_index << geo->chunk_shift);
>  1676	
>  1677		chunk = bio_end >> geo->chunk_shift;
>  1678		chunk *= geo->near_copies;
>  1679		last_stripe_index = chunk;
>  1680		end_disk_index = sector_div(last_stripe_index, geo->raid_disks);
>  1681		if (geo->far_offset)
>  1682			last_stripe_index *= geo->far_copies;
>  1683		end_disk_offset = (bio_end & geo->chunk_mask) +
>  1684					(last_stripe_index << geo->chunk_shift);
>  1685	
>  1686	retry_discard:
>  1687		r10_bio = mempool_alloc(&conf->r10bio_pool, GFP_NOIO);
>  1688		r10_bio->mddev = mddev;
>  1689		r10_bio->state = 0;
>  1690		r10_bio->sectors = 0;
>  1691		memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) * geo->raid_disks);
>  1692		wait_blocked_dev(mddev, r10_bio);
>  1693	
>  1694		/*
>  1695		 * For far layout it needs more than one r10bio to cover all regions.
>  1696		 * Inspired by raid10_sync_request, we can use the first r10bio->master_bio
>  1697		 * to record the discard bio. Other r10bio->master_bio record the first
>  1698		 * r10bio. The first r10bio only release after all other r10bios finish.
>  1699		 * The discard bio returns only first r10bio finishes
>  1700		 */
>  1701		if (first_copy) {
>  1702			r10_bio->master_bio = bio;
>  1703			set_bit(R10BIO_Discard, &r10_bio->state);
>  1704			first_copy = false;
>  1705			first_r10bio = r10_bio;
>  1706		} else
>> 1707			r10_bio->master_bio = (struct bio *)first_r10bio;

This is a false alert. The function starts with first_copy = true. When we 
hit else clause, first_r10bio is already initialized. 

Thanks,
Song

[...]



^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2021-03-31 19:48 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-03-31  1:59 [linux-next:master 6117/8451] drivers/md/raid10.c:1707:39: warning: Uninitialized variable: first_r10bio [uninitvar] kernel test robot
2021-03-31 19:48 ` Song Liu

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).