From: Jane Chu <jane.chu@oracle.com>
To: Shiyang Ruan <ruansy.fnst@fujitsu.com>,
linux-kernel@vger.kernel.org, linux-xfs@vger.kernel.org,
nvdimm@lists.linux.dev, linux-mm@kvack.org,
linux-fsdevel@vger.kernel.org, dm-devel@redhat.com
Cc: djwong@kernel.org, dan.j.williams@intel.com, david@fromorbit.com,
hch@lst.de, agk@redhat.com, snitzer@redhat.com
Subject: Re: [PATCH RESEND v6 8/9] md: Implement dax_holder_operations
Date: Thu, 5 Aug 2021 17:48:14 -0700 [thread overview]
Message-ID: <4573e358-ff39-3627-6844-8a301d154d3e@oracle.com> (raw)
In-Reply-To: <20210730100158.3117319-9-ruansy.fnst@fujitsu.com>
On 7/30/2021 3:01 AM, Shiyang Ruan wrote:
> This is the case where the holder represents a mapped device, or a list
> of mapped devices more exactly(because it is possible to create more
> than one mapped device on one pmem device).
Could you share how do you test this scenario?
thanks,
-jane
>
> Find out which mapped device the offset belongs to, and translate the
> offset from target device to mapped device. When it is done, call
> dax_corrupted_range() for the holder of this mapped device.
>
> Signed-off-by: Shiyang Ruan <ruansy.fnst@fujitsu.com>
> ---
> drivers/md/dm.c | 126 +++++++++++++++++++++++++++++++++++++++++++++++-
> 1 file changed, 125 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/md/dm.c b/drivers/md/dm.c
> index 2c5f9e585211..a35b9a97a73f 100644
> --- a/drivers/md/dm.c
> +++ b/drivers/md/dm.c
> @@ -626,7 +626,11 @@ static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU)
> }
>
> static char *_dm_claim_ptr = "I belong to device-mapper";
> -
> +static const struct dax_holder_operations dm_dax_holder_ops;
> +struct dm_holder {
> + struct list_head list;
> + struct mapped_device *md;
> +};
> /*
> * Open a table device so we can use it as a map destination.
> */
> @@ -634,6 +638,8 @@ static int open_table_device(struct table_device *td, dev_t dev,
> struct mapped_device *md)
> {
> struct block_device *bdev;
> + struct list_head *holders;
> + struct dm_holder *holder;
>
> int r;
>
> @@ -651,6 +657,19 @@ static int open_table_device(struct table_device *td, dev_t dev,
>
> td->dm_dev.bdev = bdev;
> td->dm_dev.dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
> + if (!td->dm_dev.dax_dev)
> + return 0;
> +
> + holders = dax_get_holder(td->dm_dev.dax_dev);
> + if (!holders) {
> + holders = kmalloc(sizeof(*holders), GFP_KERNEL);
> + INIT_LIST_HEAD(holders);
> + dax_set_holder(td->dm_dev.dax_dev, holders, &dm_dax_holder_ops);
> + }
> + holder = kmalloc(sizeof(*holder), GFP_KERNEL);
> + holder->md = md;
> + list_add_tail(&holder->list, holders);
> +
> return 0;
> }
>
> @@ -659,9 +678,27 @@ static int open_table_device(struct table_device *td, dev_t dev,
> */
> static void close_table_device(struct table_device *td, struct mapped_device *md)
> {
> + struct list_head *holders;
> + struct dm_holder *holder, *n;
> +
> if (!td->dm_dev.bdev)
> return;
>
> + holders = dax_get_holder(td->dm_dev.dax_dev);
> + if (holders) {
> + list_for_each_entry_safe(holder, n, holders, list) {
> + if (holder->md == md) {
> + list_del(&holder->list);
> + kfree(holder);
> + }
> + }
> + if (list_empty(holders)) {
> + kfree(holders);
> + /* unset dax_device's holder_data */
> + dax_set_holder(td->dm_dev.dax_dev, NULL, NULL);
> + }
> + }
> +
> bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md));
> blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL);
> put_dax(td->dm_dev.dax_dev);
> @@ -1115,6 +1152,89 @@ static int dm_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
> return ret;
> }
>
> +#if IS_ENABLED(CONFIG_DAX_DRIVER)
> +struct corrupted_hit_info {
> + struct dax_device *dax_dev;
> + sector_t offset;
> +};
> +
> +static int dm_blk_corrupted_hit(struct dm_target *ti, struct dm_dev *dev,
> + sector_t start, sector_t count, void *data)
> +{
> + struct corrupted_hit_info *bc = data;
> +
> + return bc->dax_dev == (void *)dev->dax_dev &&
> + (start <= bc->offset && bc->offset < start + count);
> +}
> +
> +struct corrupted_do_info {
> + size_t length;
> + void *data;
> +};
> +
> +static int dm_blk_corrupted_do(struct dm_target *ti, struct block_device *bdev,
> + sector_t sector, void *data)
> +{
> + struct mapped_device *md = ti->table->md;
> + struct corrupted_do_info *bc = data;
> +
> + return dax_holder_notify_failure(md->dax_dev, to_bytes(sector),
> + bc->length, bc->data);
> +}
> +
> +static int dm_dax_notify_failure_one(struct mapped_device *md,
> + struct dax_device *dax_dev,
> + loff_t offset, size_t length, void *data)
> +{
> + struct dm_table *map;
> + struct dm_target *ti;
> + sector_t sect = to_sector(offset);
> + struct corrupted_hit_info hi = {dax_dev, sect};
> + struct corrupted_do_info di = {length, data};
> + int srcu_idx, i, rc = -ENODEV;
> +
> + map = dm_get_live_table(md, &srcu_idx);
> + if (!map)
> + return rc;
> +
> + /*
> + * find the target device, and then translate the offset of this target
> + * to the whole mapped device.
> + */
> + for (i = 0; i < dm_table_get_num_targets(map); i++) {
> + ti = dm_table_get_target(map, i);
> + if (!(ti->type->iterate_devices && ti->type->rmap))
> + continue;
> + if (!ti->type->iterate_devices(ti, dm_blk_corrupted_hit, &hi))
> + continue;
> +
> + rc = ti->type->rmap(ti, sect, dm_blk_corrupted_do, &di);
> + break;
> + }
> +
> + dm_put_live_table(md, srcu_idx);
> + return rc;
> +}
> +
> +static int dm_dax_notify_failure(struct dax_device *dax_dev,
> + loff_t offset, size_t length, void *data)
> +{
> + struct dm_holder *holder;
> + struct list_head *holders = dax_get_holder(dax_dev);
> + int rc = -ENODEV;
> +
> + list_for_each_entry(holder, holders, list) {
> + rc = dm_dax_notify_failure_one(holder->md, dax_dev, offset,
> + length, data);
> + if (rc != -ENODEV)
> + break;
> + }
> + return rc;
> +}
> +#else
> +#define dm_dax_notify_failure NULL
> +#endif
> +
> /*
> * A target may call dm_accept_partial_bio only from the map routine. It is
> * allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_* zone management
> @@ -3057,6 +3177,10 @@ static const struct dax_operations dm_dax_ops = {
> .zero_page_range = dm_dax_zero_page_range,
> };
>
> +static const struct dax_holder_operations dm_dax_holder_ops = {
> + .notify_failure = dm_dax_notify_failure,
> +};
> +
> /*
> * module hooks
> */
>
next prev parent reply other threads:[~2021-08-06 0:48 UTC|newest]
Thread overview: 40+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-07-30 10:01 [PATCH RESEND v6 0/9] fsdax: introduce fs query to support reflink Shiyang Ruan
2021-07-30 10:01 ` [PATCH RESEND v6 1/9] pagemap: Introduce ->memory_failure() Shiyang Ruan
2021-08-06 1:17 ` Jane Chu
2021-08-16 17:20 ` Jane Chu
2021-08-17 1:44 ` ruansy.fnst
2021-08-18 5:43 ` Jane Chu
2021-08-18 6:08 ` Jane Chu
2021-08-18 7:52 ` ruansy.fnst
2021-08-18 17:10 ` Dan Williams
2021-08-23 13:21 ` hch
2021-08-18 15:52 ` Darrick J. Wong
2021-08-19 7:18 ` Jane Chu
2021-08-19 8:11 ` Jane Chu
2021-08-19 9:10 ` ruansy.fnst
2021-08-19 20:50 ` Jane Chu
2021-08-20 16:07 ` Dan Williams
2021-07-30 10:01 ` [PATCH RESEND v6 2/9] dax: Introduce holder for dax_device Shiyang Ruan
2021-08-06 1:02 ` Jane Chu
2021-08-17 1:45 ` ruansy.fnst
2021-08-20 16:06 ` Dan Williams
2021-08-20 20:19 ` Dan Williams
2021-07-30 10:01 ` [PATCH RESEND v6 3/9] mm: factor helpers for memory_failure_dev_pagemap Shiyang Ruan
2021-08-06 1:00 ` Jane Chu
2021-08-20 16:54 ` Dan Williams
2021-07-30 10:01 ` [PATCH RESEND v6 4/9] pmem,mm: Implement ->memory_failure in pmem driver Shiyang Ruan
2021-08-20 20:51 ` Dan Williams
2021-07-30 10:01 ` [PATCH RESEND v6 5/9] mm: Introduce mf_dax_kill_procs() for fsdax case Shiyang Ruan
2021-08-06 0:59 ` Jane Chu
2021-08-20 22:40 ` Dan Williams
2021-07-30 10:01 ` [PATCH RESEND v6 6/9] xfs: Implement ->notify_failure() for XFS Shiyang Ruan
2021-08-06 0:50 ` Jane Chu
2021-08-20 22:56 ` Dan Williams
2021-08-20 22:59 ` Dan Williams
2021-07-30 10:01 ` [PATCH RESEND v6 7/9] dm: Introduce ->rmap() to find bdev offset Shiyang Ruan
2021-08-20 23:46 ` Dan Williams
2021-07-30 10:01 ` [PATCH RESEND v6 8/9] md: Implement dax_holder_operations Shiyang Ruan
2021-08-06 0:48 ` Jane Chu [this message]
2021-08-17 1:59 ` ruansy.fnst
2021-07-30 10:01 ` [PATCH RESEND v6 9/9] fsdax: add exception for reflinked files Shiyang Ruan
2021-08-06 0:46 ` Jane Chu
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=4573e358-ff39-3627-6844-8a301d154d3e@oracle.com \
--to=jane.chu@oracle.com \
--cc=agk@redhat.com \
--cc=dan.j.williams@intel.com \
--cc=david@fromorbit.com \
--cc=djwong@kernel.org \
--cc=dm-devel@redhat.com \
--cc=hch@lst.de \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linux-xfs@vger.kernel.org \
--cc=nvdimm@lists.linux.dev \
--cc=ruansy.fnst@fujitsu.com \
--cc=snitzer@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).