From: Nitesh Shetty <nj.shetty@samsung.com>
To: Jens Axboe <axboe@kernel.dk>, Jonathan Corbet <corbet@lwn.net>,
Alasdair Kergon <agk@redhat.com>,
Mike Snitzer <snitzer@kernel.org>,
dm-devel@redhat.com, Keith Busch <kbusch@kernel.org>,
Christoph Hellwig <hch@lst.de>, Sagi Grimberg <sagi@grimberg.me>,
Chaitanya Kulkarni <kch@nvidia.com>,
Alexander Viro <viro@zeniv.linux.org.uk>,
Christian Brauner <brauner@kernel.org>
Cc: Anuj Gupta <anuj20.g@samsung.com>,
Vincent Fu <vincent.fu@samsung.com>,
martin.petersen@oracle.com, linux-doc@vger.kernel.org,
gost.dev@samsung.com,
Damien Le Moal <damien.lemoal@opensource.wdc.com>,
linux-kernel@vger.kernel.org, linux-nvme@lists.infradead.org,
linux-block@vger.kernel.org, mcgrof@kernel.org,
dlemoal@kernel.org, linux-fsdevel@vger.kernel.org,
Nitesh Shetty <nj.shetty@samsung.com>
Subject: [dm-devel] [PATCH v14 11/11] null_blk: add support for copy offload
Date: Fri, 11 Aug 2023 16:22:54 +0530 [thread overview]
Message-ID: <20230811105300.15889-12-nj.shetty@samsung.com> (raw)
In-Reply-To: <20230811105300.15889-1-nj.shetty@samsung.com>
Implementation is based on existing read and write infrastructure.
copy_max_bytes: A new configfs and module parameter is introduced, which
can be used to set hardware/driver supported maximum copy limit.
Only request based queue mode will support for copy offload.
Added tracefs support to copy IO tracing.
Suggested-by: Damien Le Moal <damien.lemoal@opensource.wdc.com>
Signed-off-by: Anuj Gupta <anuj20.g@samsung.com>
Signed-off-by: Nitesh Shetty <nj.shetty@samsung.com>
Signed-off-by: Vincent Fu <vincent.fu@samsung.com>
---
Documentation/block/null_blk.rst | 5 ++
drivers/block/null_blk/main.c | 99 ++++++++++++++++++++++++++++++-
drivers/block/null_blk/null_blk.h | 1 +
drivers/block/null_blk/trace.h | 23 +++++++
4 files changed, 125 insertions(+), 3 deletions(-)
diff --git a/Documentation/block/null_blk.rst b/Documentation/block/null_blk.rst
index 4dd78f24d10a..6153e02fcf13 100644
--- a/Documentation/block/null_blk.rst
+++ b/Documentation/block/null_blk.rst
@@ -149,3 +149,8 @@ zone_size=[MB]: Default: 256
zone_nr_conv=[nr_conv]: Default: 0
The number of conventional zones to create when block device is zoned. If
zone_nr_conv >= nr_zones, it will be reduced to nr_zones - 1.
+
+copy_max_bytes=[size in bytes]: Default: COPY_MAX_BYTES
+ A module and configfs parameter which can be used to set hardware/driver
+ supported maximum copy offload limit.
+ COPY_MAX_BYTES(=128MB at present) is defined in fs.h
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index 864013019d6b..afc14aa20305 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -11,6 +11,8 @@
#include <linux/init.h>
#include "null_blk.h"
+#include "trace.h"
+
#undef pr_fmt
#define pr_fmt(fmt) "null_blk: " fmt
@@ -157,6 +159,10 @@ static int g_max_sectors;
module_param_named(max_sectors, g_max_sectors, int, 0444);
MODULE_PARM_DESC(max_sectors, "Maximum size of a command (in 512B sectors)");
+static unsigned long g_copy_max_bytes = COPY_MAX_BYTES;
+module_param_named(copy_max_bytes, g_copy_max_bytes, ulong, 0444);
+MODULE_PARM_DESC(copy_max_bytes, "Maximum size of a copy command (in bytes)");
+
static unsigned int nr_devices = 1;
module_param(nr_devices, uint, 0444);
MODULE_PARM_DESC(nr_devices, "Number of devices to register");
@@ -409,6 +415,7 @@ NULLB_DEVICE_ATTR(home_node, uint, NULL);
NULLB_DEVICE_ATTR(queue_mode, uint, NULL);
NULLB_DEVICE_ATTR(blocksize, uint, NULL);
NULLB_DEVICE_ATTR(max_sectors, uint, NULL);
+NULLB_DEVICE_ATTR(copy_max_bytes, uint, NULL);
NULLB_DEVICE_ATTR(irqmode, uint, NULL);
NULLB_DEVICE_ATTR(hw_queue_depth, uint, NULL);
NULLB_DEVICE_ATTR(index, uint, NULL);
@@ -550,6 +557,7 @@ static struct configfs_attribute *nullb_device_attrs[] = {
&nullb_device_attr_queue_mode,
&nullb_device_attr_blocksize,
&nullb_device_attr_max_sectors,
+ &nullb_device_attr_copy_max_bytes,
&nullb_device_attr_irqmode,
&nullb_device_attr_hw_queue_depth,
&nullb_device_attr_index,
@@ -656,7 +664,8 @@ static ssize_t memb_group_features_show(struct config_item *item, char *page)
"poll_queues,power,queue_mode,shared_tag_bitmap,size,"
"submit_queues,use_per_node_hctx,virt_boundary,zoned,"
"zone_capacity,zone_max_active,zone_max_open,"
- "zone_nr_conv,zone_offline,zone_readonly,zone_size\n");
+ "zone_nr_conv,zone_offline,zone_readonly,zone_size,"
+ "copy_max_bytes\n");
}
CONFIGFS_ATTR_RO(memb_group_, features);
@@ -722,6 +731,7 @@ static struct nullb_device *null_alloc_dev(void)
dev->queue_mode = g_queue_mode;
dev->blocksize = g_bs;
dev->max_sectors = g_max_sectors;
+ dev->copy_max_bytes = g_copy_max_bytes;
dev->irqmode = g_irqmode;
dev->hw_queue_depth = g_hw_queue_depth;
dev->blocking = g_blocking;
@@ -1271,6 +1281,81 @@ static int null_transfer(struct nullb *nullb, struct page *page,
return err;
}
+static inline int nullb_setup_copy(struct nullb *nullb, struct request *req,
+ bool is_fua)
+{
+ sector_t sector_in, sector_out;
+ loff_t offset_in, offset_out;
+ void *in, *out;
+ ssize_t chunk, rem = 0;
+ struct bio *bio;
+ struct nullb_page *t_page_in, *t_page_out;
+ u16 seg = 1;
+ int status = -EIO;
+
+ if (blk_rq_nr_phys_segments(req) != COPY_MAX_SEGMENTS)
+ return status;
+
+ /*
+ * First bio contains information about source and last bio contains
+ * information about destination.
+ */
+ __rq_for_each_bio(bio, req) {
+ if (seg == blk_rq_nr_phys_segments(req)) {
+ sector_out = bio->bi_iter.bi_sector;
+ if (rem != bio->bi_iter.bi_size)
+ return status;
+ } else {
+ sector_in = bio->bi_iter.bi_sector;
+ rem = bio->bi_iter.bi_size;
+ }
+ seg++;
+ }
+
+ trace_nullb_copy_op(req, sector_out << SECTOR_SHIFT,
+ sector_in << SECTOR_SHIFT, rem);
+
+ spin_lock_irq(&nullb->lock);
+ while (rem > 0) {
+ chunk = min_t(size_t, nullb->dev->blocksize, rem);
+ offset_in = (sector_in & SECTOR_MASK) << SECTOR_SHIFT;
+ offset_out = (sector_out & SECTOR_MASK) << SECTOR_SHIFT;
+
+ if (null_cache_active(nullb) && !is_fua)
+ null_make_cache_space(nullb, PAGE_SIZE);
+
+ t_page_in = null_lookup_page(nullb, sector_in, false,
+ !null_cache_active(nullb));
+ if (!t_page_in)
+ goto err;
+ t_page_out = null_insert_page(nullb, sector_out,
+ !null_cache_active(nullb) ||
+ is_fua);
+ if (!t_page_out)
+ goto err;
+
+ in = kmap_local_page(t_page_in->page);
+ out = kmap_local_page(t_page_out->page);
+
+ memcpy(out + offset_out, in + offset_in, chunk);
+ kunmap_local(out);
+ kunmap_local(in);
+ __set_bit(sector_out & SECTOR_MASK, t_page_out->bitmap);
+
+ if (is_fua)
+ null_free_sector(nullb, sector_out, true);
+
+ rem -= chunk;
+ sector_in += chunk >> SECTOR_SHIFT;
+ sector_out += chunk >> SECTOR_SHIFT;
+ }
+
+ status = 0;
+err:
+ spin_unlock_irq(&nullb->lock);
+ return status;
+}
+
static int null_handle_rq(struct nullb_cmd *cmd)
{
struct request *rq = cmd->rq;
@@ -1280,13 +1365,16 @@ static int null_handle_rq(struct nullb_cmd *cmd)
sector_t sector = blk_rq_pos(rq);
struct req_iterator iter;
struct bio_vec bvec;
+ bool fua = rq->cmd_flags & REQ_FUA;
+
+ if (op_is_copy(req_op(rq)))
+ return nullb_setup_copy(nullb, rq, fua);
spin_lock_irq(&nullb->lock);
rq_for_each_segment(bvec, rq, iter) {
len = bvec.bv_len;
err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
- op_is_write(req_op(rq)), sector,
- rq->cmd_flags & REQ_FUA);
+ op_is_write(req_op(rq)), sector, fua);
if (err) {
spin_unlock_irq(&nullb->lock);
return err;
@@ -2042,6 +2130,9 @@ static int null_validate_conf(struct nullb_device *dev)
return -EINVAL;
}
+ if (dev->queue_mode == NULL_Q_BIO)
+ dev->copy_max_bytes = 0;
+
return 0;
}
@@ -2161,6 +2252,8 @@ static int null_add_dev(struct nullb_device *dev)
dev->max_sectors = queue_max_hw_sectors(nullb->q);
dev->max_sectors = min(dev->max_sectors, BLK_DEF_MAX_SECTORS);
blk_queue_max_hw_sectors(nullb->q, dev->max_sectors);
+ blk_queue_max_copy_hw_sectors(nullb->q,
+ dev->copy_max_bytes >> SECTOR_SHIFT);
if (dev->virt_boundary)
blk_queue_virt_boundary(nullb->q, PAGE_SIZE - 1);
diff --git a/drivers/block/null_blk/null_blk.h b/drivers/block/null_blk/null_blk.h
index 929f659dd255..e82e53a2e2df 100644
--- a/drivers/block/null_blk/null_blk.h
+++ b/drivers/block/null_blk/null_blk.h
@@ -107,6 +107,7 @@ struct nullb_device {
unsigned int queue_mode; /* block interface */
unsigned int blocksize; /* block size */
unsigned int max_sectors; /* Max sectors per command */
+ unsigned long copy_max_bytes; /* Max copy offload length in bytes */
unsigned int irqmode; /* IRQ completion handler */
unsigned int hw_queue_depth; /* queue depth */
unsigned int index; /* index of the disk, only valid with a disk */
diff --git a/drivers/block/null_blk/trace.h b/drivers/block/null_blk/trace.h
index 6b2b370e786f..431c33e11a49 100644
--- a/drivers/block/null_blk/trace.h
+++ b/drivers/block/null_blk/trace.h
@@ -68,6 +68,29 @@ TRACE_EVENT(nullb_report_zones,
__print_disk_name(__entry->disk), __entry->nr_zones)
);
+TRACE_EVENT(nullb_copy_op,
+ TP_PROTO(struct request *req,
+ sector_t dst, sector_t src, size_t len),
+ TP_ARGS(req, dst, src, len),
+ TP_STRUCT__entry(
+ __array(char, disk, DISK_NAME_LEN)
+ __field(enum req_op, op)
+ __field(sector_t, dst)
+ __field(sector_t, src)
+ __field(size_t, len)
+ ),
+ TP_fast_assign(
+ __entry->op = req_op(req);
+ __assign_disk_name(__entry->disk, req->q->disk);
+ __entry->dst = dst;
+ __entry->src = src;
+ __entry->len = len;
+ ),
+ TP_printk("%s req=%-15s: dst=%llu, src=%llu, len=%lu",
+ __print_disk_name(__entry->disk),
+ blk_op_str(__entry->op),
+ __entry->dst, __entry->src, __entry->len)
+);
#endif /* _TRACE_NULLB_H */
#undef TRACE_INCLUDE_PATH
--
2.35.1.500.gb896f729e2
--
dm-devel mailing list
dm-devel@redhat.com
https://listman.redhat.com/mailman/listinfo/dm-devel
WARNING: multiple messages have this Message-ID (diff)
From: Nitesh Shetty <nj.shetty@samsung.com>
To: Jens Axboe <axboe@kernel.dk>, Jonathan Corbet <corbet@lwn.net>,
Alasdair Kergon <agk@redhat.com>,
Mike Snitzer <snitzer@kernel.org>,
dm-devel@redhat.com, Keith Busch <kbusch@kernel.org>,
Christoph Hellwig <hch@lst.de>, Sagi Grimberg <sagi@grimberg.me>,
Chaitanya Kulkarni <kch@nvidia.com>,
Alexander Viro <viro@zeniv.linux.org.uk>,
Christian Brauner <brauner@kernel.org>
Cc: martin.petersen@oracle.com, mcgrof@kernel.org,
dlemoal@kernel.org, gost.dev@samsung.com,
Nitesh Shetty <nj.shetty@samsung.com>,
Damien Le Moal <damien.lemoal@opensource.wdc.com>,
Anuj Gupta <anuj20.g@samsung.com>,
Vincent Fu <vincent.fu@samsung.com>,
linux-block@vger.kernel.org, linux-kernel@vger.kernel.org,
linux-doc@vger.kernel.org, linux-nvme@lists.infradead.org,
linux-fsdevel@vger.kernel.org
Subject: [PATCH v14 11/11] null_blk: add support for copy offload
Date: Fri, 11 Aug 2023 16:22:54 +0530 [thread overview]
Message-ID: <20230811105300.15889-12-nj.shetty@samsung.com> (raw)
In-Reply-To: <20230811105300.15889-1-nj.shetty@samsung.com>
Implementation is based on existing read and write infrastructure.
copy_max_bytes: A new configfs and module parameter is introduced, which
can be used to set hardware/driver supported maximum copy limit.
Only request based queue mode will support for copy offload.
Added tracefs support to copy IO tracing.
Suggested-by: Damien Le Moal <damien.lemoal@opensource.wdc.com>
Signed-off-by: Anuj Gupta <anuj20.g@samsung.com>
Signed-off-by: Nitesh Shetty <nj.shetty@samsung.com>
Signed-off-by: Vincent Fu <vincent.fu@samsung.com>
---
Documentation/block/null_blk.rst | 5 ++
drivers/block/null_blk/main.c | 99 ++++++++++++++++++++++++++++++-
drivers/block/null_blk/null_blk.h | 1 +
drivers/block/null_blk/trace.h | 23 +++++++
4 files changed, 125 insertions(+), 3 deletions(-)
diff --git a/Documentation/block/null_blk.rst b/Documentation/block/null_blk.rst
index 4dd78f24d10a..6153e02fcf13 100644
--- a/Documentation/block/null_blk.rst
+++ b/Documentation/block/null_blk.rst
@@ -149,3 +149,8 @@ zone_size=[MB]: Default: 256
zone_nr_conv=[nr_conv]: Default: 0
The number of conventional zones to create when block device is zoned. If
zone_nr_conv >= nr_zones, it will be reduced to nr_zones - 1.
+
+copy_max_bytes=[size in bytes]: Default: COPY_MAX_BYTES
+ A module and configfs parameter which can be used to set hardware/driver
+ supported maximum copy offload limit.
+ COPY_MAX_BYTES(=128MB at present) is defined in fs.h
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index 864013019d6b..afc14aa20305 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -11,6 +11,8 @@
#include <linux/init.h>
#include "null_blk.h"
+#include "trace.h"
+
#undef pr_fmt
#define pr_fmt(fmt) "null_blk: " fmt
@@ -157,6 +159,10 @@ static int g_max_sectors;
module_param_named(max_sectors, g_max_sectors, int, 0444);
MODULE_PARM_DESC(max_sectors, "Maximum size of a command (in 512B sectors)");
+static unsigned long g_copy_max_bytes = COPY_MAX_BYTES;
+module_param_named(copy_max_bytes, g_copy_max_bytes, ulong, 0444);
+MODULE_PARM_DESC(copy_max_bytes, "Maximum size of a copy command (in bytes)");
+
static unsigned int nr_devices = 1;
module_param(nr_devices, uint, 0444);
MODULE_PARM_DESC(nr_devices, "Number of devices to register");
@@ -409,6 +415,7 @@ NULLB_DEVICE_ATTR(home_node, uint, NULL);
NULLB_DEVICE_ATTR(queue_mode, uint, NULL);
NULLB_DEVICE_ATTR(blocksize, uint, NULL);
NULLB_DEVICE_ATTR(max_sectors, uint, NULL);
+NULLB_DEVICE_ATTR(copy_max_bytes, uint, NULL);
NULLB_DEVICE_ATTR(irqmode, uint, NULL);
NULLB_DEVICE_ATTR(hw_queue_depth, uint, NULL);
NULLB_DEVICE_ATTR(index, uint, NULL);
@@ -550,6 +557,7 @@ static struct configfs_attribute *nullb_device_attrs[] = {
&nullb_device_attr_queue_mode,
&nullb_device_attr_blocksize,
&nullb_device_attr_max_sectors,
+ &nullb_device_attr_copy_max_bytes,
&nullb_device_attr_irqmode,
&nullb_device_attr_hw_queue_depth,
&nullb_device_attr_index,
@@ -656,7 +664,8 @@ static ssize_t memb_group_features_show(struct config_item *item, char *page)
"poll_queues,power,queue_mode,shared_tag_bitmap,size,"
"submit_queues,use_per_node_hctx,virt_boundary,zoned,"
"zone_capacity,zone_max_active,zone_max_open,"
- "zone_nr_conv,zone_offline,zone_readonly,zone_size\n");
+ "zone_nr_conv,zone_offline,zone_readonly,zone_size,"
+ "copy_max_bytes\n");
}
CONFIGFS_ATTR_RO(memb_group_, features);
@@ -722,6 +731,7 @@ static struct nullb_device *null_alloc_dev(void)
dev->queue_mode = g_queue_mode;
dev->blocksize = g_bs;
dev->max_sectors = g_max_sectors;
+ dev->copy_max_bytes = g_copy_max_bytes;
dev->irqmode = g_irqmode;
dev->hw_queue_depth = g_hw_queue_depth;
dev->blocking = g_blocking;
@@ -1271,6 +1281,81 @@ static int null_transfer(struct nullb *nullb, struct page *page,
return err;
}
+static inline int nullb_setup_copy(struct nullb *nullb, struct request *req,
+ bool is_fua)
+{
+ sector_t sector_in, sector_out;
+ loff_t offset_in, offset_out;
+ void *in, *out;
+ ssize_t chunk, rem = 0;
+ struct bio *bio;
+ struct nullb_page *t_page_in, *t_page_out;
+ u16 seg = 1;
+ int status = -EIO;
+
+ if (blk_rq_nr_phys_segments(req) != COPY_MAX_SEGMENTS)
+ return status;
+
+ /*
+ * First bio contains information about source and last bio contains
+ * information about destination.
+ */
+ __rq_for_each_bio(bio, req) {
+ if (seg == blk_rq_nr_phys_segments(req)) {
+ sector_out = bio->bi_iter.bi_sector;
+ if (rem != bio->bi_iter.bi_size)
+ return status;
+ } else {
+ sector_in = bio->bi_iter.bi_sector;
+ rem = bio->bi_iter.bi_size;
+ }
+ seg++;
+ }
+
+ trace_nullb_copy_op(req, sector_out << SECTOR_SHIFT,
+ sector_in << SECTOR_SHIFT, rem);
+
+ spin_lock_irq(&nullb->lock);
+ while (rem > 0) {
+ chunk = min_t(size_t, nullb->dev->blocksize, rem);
+ offset_in = (sector_in & SECTOR_MASK) << SECTOR_SHIFT;
+ offset_out = (sector_out & SECTOR_MASK) << SECTOR_SHIFT;
+
+ if (null_cache_active(nullb) && !is_fua)
+ null_make_cache_space(nullb, PAGE_SIZE);
+
+ t_page_in = null_lookup_page(nullb, sector_in, false,
+ !null_cache_active(nullb));
+ if (!t_page_in)
+ goto err;
+ t_page_out = null_insert_page(nullb, sector_out,
+ !null_cache_active(nullb) ||
+ is_fua);
+ if (!t_page_out)
+ goto err;
+
+ in = kmap_local_page(t_page_in->page);
+ out = kmap_local_page(t_page_out->page);
+
+ memcpy(out + offset_out, in + offset_in, chunk);
+ kunmap_local(out);
+ kunmap_local(in);
+ __set_bit(sector_out & SECTOR_MASK, t_page_out->bitmap);
+
+ if (is_fua)
+ null_free_sector(nullb, sector_out, true);
+
+ rem -= chunk;
+ sector_in += chunk >> SECTOR_SHIFT;
+ sector_out += chunk >> SECTOR_SHIFT;
+ }
+
+ status = 0;
+err:
+ spin_unlock_irq(&nullb->lock);
+ return status;
+}
+
static int null_handle_rq(struct nullb_cmd *cmd)
{
struct request *rq = cmd->rq;
@@ -1280,13 +1365,16 @@ static int null_handle_rq(struct nullb_cmd *cmd)
sector_t sector = blk_rq_pos(rq);
struct req_iterator iter;
struct bio_vec bvec;
+ bool fua = rq->cmd_flags & REQ_FUA;
+
+ if (op_is_copy(req_op(rq)))
+ return nullb_setup_copy(nullb, rq, fua);
spin_lock_irq(&nullb->lock);
rq_for_each_segment(bvec, rq, iter) {
len = bvec.bv_len;
err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
- op_is_write(req_op(rq)), sector,
- rq->cmd_flags & REQ_FUA);
+ op_is_write(req_op(rq)), sector, fua);
if (err) {
spin_unlock_irq(&nullb->lock);
return err;
@@ -2042,6 +2130,9 @@ static int null_validate_conf(struct nullb_device *dev)
return -EINVAL;
}
+ if (dev->queue_mode == NULL_Q_BIO)
+ dev->copy_max_bytes = 0;
+
return 0;
}
@@ -2161,6 +2252,8 @@ static int null_add_dev(struct nullb_device *dev)
dev->max_sectors = queue_max_hw_sectors(nullb->q);
dev->max_sectors = min(dev->max_sectors, BLK_DEF_MAX_SECTORS);
blk_queue_max_hw_sectors(nullb->q, dev->max_sectors);
+ blk_queue_max_copy_hw_sectors(nullb->q,
+ dev->copy_max_bytes >> SECTOR_SHIFT);
if (dev->virt_boundary)
blk_queue_virt_boundary(nullb->q, PAGE_SIZE - 1);
diff --git a/drivers/block/null_blk/null_blk.h b/drivers/block/null_blk/null_blk.h
index 929f659dd255..e82e53a2e2df 100644
--- a/drivers/block/null_blk/null_blk.h
+++ b/drivers/block/null_blk/null_blk.h
@@ -107,6 +107,7 @@ struct nullb_device {
unsigned int queue_mode; /* block interface */
unsigned int blocksize; /* block size */
unsigned int max_sectors; /* Max sectors per command */
+ unsigned long copy_max_bytes; /* Max copy offload length in bytes */
unsigned int irqmode; /* IRQ completion handler */
unsigned int hw_queue_depth; /* queue depth */
unsigned int index; /* index of the disk, only valid with a disk */
diff --git a/drivers/block/null_blk/trace.h b/drivers/block/null_blk/trace.h
index 6b2b370e786f..431c33e11a49 100644
--- a/drivers/block/null_blk/trace.h
+++ b/drivers/block/null_blk/trace.h
@@ -68,6 +68,29 @@ TRACE_EVENT(nullb_report_zones,
__print_disk_name(__entry->disk), __entry->nr_zones)
);
+TRACE_EVENT(nullb_copy_op,
+ TP_PROTO(struct request *req,
+ sector_t dst, sector_t src, size_t len),
+ TP_ARGS(req, dst, src, len),
+ TP_STRUCT__entry(
+ __array(char, disk, DISK_NAME_LEN)
+ __field(enum req_op, op)
+ __field(sector_t, dst)
+ __field(sector_t, src)
+ __field(size_t, len)
+ ),
+ TP_fast_assign(
+ __entry->op = req_op(req);
+ __assign_disk_name(__entry->disk, req->q->disk);
+ __entry->dst = dst;
+ __entry->src = src;
+ __entry->len = len;
+ ),
+ TP_printk("%s req=%-15s: dst=%llu, src=%llu, len=%lu",
+ __print_disk_name(__entry->disk),
+ blk_op_str(__entry->op),
+ __entry->dst, __entry->src, __entry->len)
+);
#endif /* _TRACE_NULLB_H */
#undef TRACE_INCLUDE_PATH
--
2.35.1.500.gb896f729e2
next prev parent reply other threads:[~2023-08-11 11:21 UTC|newest]
Thread overview: 62+ messages / expand[flat|nested] mbox.gz Atom feed top
[not found] <CGME20230811105627epcas5p1aa1ef0e58bcd0fc05a072c8b40dcfb96@epcas5p1.samsung.com>
2023-08-11 10:52 ` [PATCH v14 00/11] Implement copy offload support Nitesh Shetty
2023-08-11 10:52 ` [dm-devel] " Nitesh Shetty
[not found] ` <CGME20230811105638epcas5p4db95584b6a432ea4b8b93e060a95e5f1@epcas5p4.samsung.com>
2023-08-11 10:52 ` [PATCH v14 01/11] block: Introduce queue limits and sysfs for copy-offload support Nitesh Shetty
2023-08-11 10:52 ` [dm-devel] " Nitesh Shetty
2023-08-11 21:56 ` Bart Van Assche
2023-08-11 21:56 ` Bart Van Assche
2023-08-14 10:35 ` Nitesh Shetty
2023-08-14 10:35 ` Nitesh Shetty
[not found] ` <CGME20230811105648epcas5p3ae8b8f6ed341e2aa253e8b4de8920a4d@epcas5p3.samsung.com>
2023-08-11 10:52 ` [PATCH v14 02/11] Add infrastructure for copy offload in block and request layer Nitesh Shetty
2023-08-11 10:52 ` [dm-devel] " Nitesh Shetty
2023-08-11 21:25 ` Bart Van Assche
2023-08-11 21:25 ` Bart Van Assche
2023-08-14 12:18 ` Nitesh Shetty
2023-08-14 12:18 ` Nitesh Shetty
2023-08-14 14:26 ` Bart Van Assche
2023-08-14 14:26 ` Bart Van Assche
2023-08-15 7:50 ` Nitesh Shetty
2023-08-15 7:50 ` Nitesh Shetty
2023-08-11 21:58 ` Bart Van Assche
2023-08-11 21:58 ` Bart Van Assche
2023-08-14 12:09 ` Nitesh Shetty
2023-08-14 12:09 ` Nitesh Shetty
[not found] ` <CGME20230811105659epcas5p1982eeaeb580c4cb9b23a29270945be08@epcas5p1.samsung.com>
2023-08-11 10:52 ` [PATCH v14 03/11] block: add copy offload support Nitesh Shetty
2023-08-11 10:52 ` [dm-devel] " Nitesh Shetty
2023-08-11 21:33 ` Bart Van Assche
2023-08-11 21:33 ` Bart Van Assche
2023-08-14 17:01 ` Nitesh Shetty
2023-08-14 17:01 ` Nitesh Shetty
2023-08-11 22:06 ` Bart Van Assche
2023-08-11 22:06 ` Bart Van Assche
2023-08-14 10:30 ` Nitesh Shetty
2023-08-14 10:30 ` Nitesh Shetty
[not found] ` <CGME20230811105713epcas5p3b5323a0c553006e60671dde6c72fc4c6@epcas5p3.samsung.com>
2023-08-11 10:52 ` [PATCH v14 04/11] block: add emulation for copy Nitesh Shetty
2023-08-11 10:52 ` [dm-devel] " Nitesh Shetty
2023-08-11 22:11 ` Bart Van Assche
2023-08-11 22:11 ` Bart Van Assche
2023-08-14 16:58 ` Nitesh Shetty
2023-08-14 16:58 ` Nitesh Shetty
[not found] ` <CGME20230811105723epcas5p468fa65dc9c5bea39d40359ce55bcd9aa@epcas5p4.samsung.com>
2023-08-11 10:52 ` [PATCH v14 05/11] fs/read_write: Enable copy_file_range for block device Nitesh Shetty
2023-08-11 10:52 ` [dm-devel] " Nitesh Shetty
[not found] ` <CGME20230811105734epcas5p1b465394a301ba85f2c52ed7fde334f52@epcas5p1.samsung.com>
2023-08-11 10:52 ` [PATCH v14 06/11] fs, block: copy_file_range for def_blk_ops for direct " Nitesh Shetty
2023-08-11 10:52 ` [dm-devel] " Nitesh Shetty
[not found] ` <CGME20230811105745epcas5p451c58384792038c13e9891fb2680050b@epcas5p4.samsung.com>
2023-08-11 10:52 ` [PATCH v14 07/11] nvme: add copy offload support Nitesh Shetty
2023-08-11 10:52 ` [dm-devel] " Nitesh Shetty
2023-08-11 23:39 ` kernel test robot
2023-08-11 23:39 ` [dm-devel] " kernel test robot
[not found] ` <CGME20230811105756epcas5p46a7e3f00c33e912e76848c989fc8eac2@epcas5p4.samsung.com>
2023-08-11 10:52 ` [PATCH v14 08/11] nvmet: add copy command support for bdev and file ns Nitesh Shetty
2023-08-11 10:52 ` [dm-devel] " Nitesh Shetty
[not found] ` <CGME20230811105808epcas5p4a9f9573d15f642a73bac46153bb935d2@epcas5p4.samsung.com>
2023-08-11 10:52 ` [PATCH v14 09/11] dm: Add support for copy offload Nitesh Shetty
2023-08-11 10:52 ` [dm-devel] " Nitesh Shetty
[not found] ` <CGME20230811105819epcas5p40ddff0991c70b6d80d516dfe055bd673@epcas5p4.samsung.com>
2023-08-11 10:52 ` [dm-devel] [PATCH v14 10/11] dm: Enable copy offload for dm-linear target Nitesh Shetty
2023-08-11 10:52 ` Nitesh Shetty
[not found] ` <CGME20230811105830epcas5p37a477e5e7b006613549647bebdf4d028@epcas5p3.samsung.com>
2023-08-11 10:52 ` Nitesh Shetty [this message]
2023-08-11 10:52 ` [PATCH v14 11/11] null_blk: add support for copy offload Nitesh Shetty
2023-08-11 17:16 ` kernel test robot
2023-08-11 17:16 ` [dm-devel] " kernel test robot
2023-08-11 21:15 ` kernel test robot
2023-08-11 21:15 ` [dm-devel] " kernel test robot
2023-08-11 21:50 ` [dm-devel] [PATCH v14 00/11] Implement copy offload support Bart Van Assche
2023-08-11 21:50 ` Bart Van Assche
2023-08-14 16:55 ` Nitesh Shetty
2023-08-14 16:55 ` Nitesh Shetty
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230811105300.15889-12-nj.shetty@samsung.com \
--to=nj.shetty@samsung.com \
--cc=agk@redhat.com \
--cc=anuj20.g@samsung.com \
--cc=axboe@kernel.dk \
--cc=brauner@kernel.org \
--cc=corbet@lwn.net \
--cc=damien.lemoal@opensource.wdc.com \
--cc=dlemoal@kernel.org \
--cc=dm-devel@redhat.com \
--cc=gost.dev@samsung.com \
--cc=hch@lst.de \
--cc=kbusch@kernel.org \
--cc=kch@nvidia.com \
--cc=linux-block@vger.kernel.org \
--cc=linux-doc@vger.kernel.org \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-nvme@lists.infradead.org \
--cc=martin.petersen@oracle.com \
--cc=mcgrof@kernel.org \
--cc=sagi@grimberg.me \
--cc=snitzer@kernel.org \
--cc=vincent.fu@samsung.com \
--cc=viro@zeniv.linux.org.uk \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.