From: Nitesh Shetty <nj.shetty@samsung.com>
To: Jens Axboe <axboe@kernel.dk>, Alasdair Kergon <agk@redhat.com>,
Mike Snitzer <snitzer@kernel.org>,
dm-devel@redhat.com, Keith Busch <kbusch@kernel.org>,
Christoph Hellwig <hch@lst.de>, Sagi Grimberg <sagi@grimberg.me>,
James Smart <james.smart@broadcom.com>,
Chaitanya Kulkarni <kch@nvidia.com>,
Alexander Viro <viro@zeniv.linux.org.uk>,
Christian Brauner <brauner@kernel.org>
Cc: bvanassche@acm.org, hare@suse.de, ming.lei@redhat.com,
dlemoal@kernel.org, anuj20.g@samsung.com, joshi.k@samsung.com,
nitheshshetty@gmail.com, gost.dev@samsung.com,
Nitesh Shetty <nj.shetty@samsung.com>,
linux-block@vger.kernel.org, linux-kernel@vger.kernel.org,
linux-nvme@lists.infradead.org, linux-fsdevel@vger.kernel.org
Subject: [PATCH v10 6/9] nvmet: add copy command support for bdev and file ns
Date: Wed, 19 Apr 2023 17:13:11 +0530 [thread overview]
Message-ID: <20230419114320.13674-7-nj.shetty@samsung.com> (raw)
In-Reply-To: <20230419114320.13674-1-nj.shetty@samsung.com>
Add support for handling target command on target.
For bdev-ns we call into blkdev_issue_copy, which the block layer
completes by a offloaded copy request to backend bdev or by emulating the
request.
For file-ns we call vfs_copy_file_range to service our request.
Currently target always shows copy capability by setting
NVME_CTRL_ONCS_COPY in controller ONCS.
Signed-off-by: Nitesh Shetty <nj.shetty@samsung.com>
Signed-off-by: Anuj Gupta <anuj20.g@samsung.com>
---
drivers/nvme/target/admin-cmd.c | 9 +++--
drivers/nvme/target/io-cmd-bdev.c | 58 +++++++++++++++++++++++++++++++
drivers/nvme/target/io-cmd-file.c | 52 +++++++++++++++++++++++++++
drivers/nvme/target/loop.c | 6 ++++
drivers/nvme/target/nvmet.h | 1 +
5 files changed, 124 insertions(+), 2 deletions(-)
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 39cb570f833d..8a09f99a2185 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -433,8 +433,7 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
id->nn = cpu_to_le32(NVMET_MAX_NAMESPACES);
id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES);
id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM |
- NVME_CTRL_ONCS_WRITE_ZEROES);
-
+ NVME_CTRL_ONCS_WRITE_ZEROES | NVME_CTRL_ONCS_COPY);
/* XXX: don't report vwc if the underlying device is write through */
id->vwc = NVME_CTRL_VWC_PRESENT;
@@ -536,6 +535,12 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
if (req->ns->bdev)
nvmet_bdev_set_limits(req->ns->bdev, id);
+ else {
+ id->msrc = (u8)to0based(BIO_MAX_VECS - 1);
+ id->mssrl = cpu_to_le16(BIO_MAX_VECS <<
+ (PAGE_SHIFT - SECTOR_SHIFT));
+ id->mcl = cpu_to_le32(le16_to_cpu(id->mssrl));
+ }
/*
* We just provide a single LBA format that matches what the
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index c2d6cea0236b..0af273097aa4 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -46,6 +46,19 @@ void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
id->npda = id->npdg;
/* NOWS = Namespace Optimal Write Size */
id->nows = to0based(bdev_io_opt(bdev) / bdev_logical_block_size(bdev));
+
+ /*Copy limits*/
+ if (bdev_max_copy_sectors(bdev)) {
+ id->msrc = id->msrc;
+ id->mssrl = cpu_to_le16((bdev_max_copy_sectors(bdev) <<
+ SECTOR_SHIFT) / bdev_logical_block_size(bdev));
+ id->mcl = cpu_to_le32(id->mssrl);
+ } else {
+ id->msrc = (u8)to0based(BIO_MAX_VECS - 1);
+ id->mssrl = cpu_to_le16((BIO_MAX_VECS << PAGE_SHIFT) /
+ bdev_logical_block_size(bdev));
+ id->mcl = cpu_to_le32(id->mssrl);
+ }
}
void nvmet_bdev_ns_disable(struct nvmet_ns *ns)
@@ -184,6 +197,19 @@ static void nvmet_bio_done(struct bio *bio)
nvmet_req_bio_put(req, bio);
}
+static void nvmet_bdev_copy_end_io(void *private, int comp_len)
+{
+ struct nvmet_req *req = (struct nvmet_req *)private;
+
+ if (comp_len == req->copy_len) {
+ req->cqe->result.u32 = cpu_to_le32(1);
+ nvmet_req_complete(req, errno_to_nvme_status(req, 0));
+ } else {
+ req->cqe->result.u32 = cpu_to_le32(0);
+ nvmet_req_complete(req, blk_to_nvme_status(req, BLK_STS_IOERR));
+ }
+}
+
#ifdef CONFIG_BLK_DEV_INTEGRITY
static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio,
struct sg_mapping_iter *miter)
@@ -450,6 +476,34 @@ static void nvmet_bdev_execute_write_zeroes(struct nvmet_req *req)
}
}
+/* At present we handle only one range entry */
+static void nvmet_bdev_execute_copy(struct nvmet_req *req)
+{
+ struct nvme_copy_range range;
+ struct nvme_command *cmnd = req->cmd;
+ int ret;
+
+
+ ret = nvmet_copy_from_sgl(req, 0, &range, sizeof(range));
+ if (ret)
+ goto out;
+
+ ret = blkdev_issue_copy(req->ns->bdev,
+ le64_to_cpu(cmnd->copy.sdlba) << req->ns->blksize_shift,
+ req->ns->bdev,
+ le64_to_cpu(range.slba) << req->ns->blksize_shift,
+ (le16_to_cpu(range.nlb) + 1) << req->ns->blksize_shift,
+ nvmet_bdev_copy_end_io, (void *)req, GFP_KERNEL);
+ if (ret) {
+ req->cqe->result.u32 = cpu_to_le32(0);
+ nvmet_req_complete(req, blk_to_nvme_status(req, BLK_STS_IOERR));
+ }
+
+ return;
+out:
+ nvmet_req_complete(req, errno_to_nvme_status(req, ret));
+}
+
u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req)
{
switch (req->cmd->common.opcode) {
@@ -468,6 +522,10 @@ u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req)
case nvme_cmd_write_zeroes:
req->execute = nvmet_bdev_execute_write_zeroes;
return 0;
+ case nvme_cmd_copy:
+ req->execute = nvmet_bdev_execute_copy;
+ return 0;
+
default:
return nvmet_report_invalid_opcode(req);
}
diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
index 2d068439b129..69f198ecec77 100644
--- a/drivers/nvme/target/io-cmd-file.c
+++ b/drivers/nvme/target/io-cmd-file.c
@@ -322,6 +322,49 @@ static void nvmet_file_dsm_work(struct work_struct *w)
}
}
+static void nvmet_file_copy_work(struct work_struct *w)
+{
+ struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
+ int nr_range;
+ loff_t pos;
+ struct nvme_command *cmnd = req->cmd;
+ int ret = 0, len = 0, src, id;
+
+ nr_range = cmnd->copy.nr_range + 1;
+ pos = le64_to_cpu(req->cmd->copy.sdlba) << req->ns->blksize_shift;
+ if (unlikely(pos + req->transfer_len > req->ns->size)) {
+ nvmet_req_complete(req, errno_to_nvme_status(req, -ENOSPC));
+ return;
+ }
+
+ for (id = 0 ; id < nr_range; id++) {
+ struct nvme_copy_range range;
+
+ ret = nvmet_copy_from_sgl(req, id * sizeof(range), &range,
+ sizeof(range));
+ if (ret)
+ goto out;
+
+ len = (le16_to_cpu(range.nlb) + 1) << (req->ns->blksize_shift);
+ src = (le64_to_cpu(range.slba) << (req->ns->blksize_shift));
+ ret = vfs_copy_file_range(req->ns->file, src, req->ns->file,
+ pos, len, 0);
+out:
+ if (ret != len) {
+ pos += ret;
+ req->cqe->result.u32 = cpu_to_le32(id);
+ nvmet_req_complete(req, ret < 0 ?
+ errno_to_nvme_status(req, ret) :
+ errno_to_nvme_status(req, -EIO));
+ return;
+
+ } else
+ pos += len;
+ }
+
+ nvmet_req_complete(req, 0);
+
+}
static void nvmet_file_execute_dsm(struct nvmet_req *req)
{
if (!nvmet_check_data_len_lte(req, nvmet_dsm_len(req)))
@@ -330,6 +373,12 @@ static void nvmet_file_execute_dsm(struct nvmet_req *req)
queue_work(nvmet_wq, &req->f.work);
}
+static void nvmet_file_execute_copy(struct nvmet_req *req)
+{
+ INIT_WORK(&req->f.work, nvmet_file_copy_work);
+ queue_work(nvmet_wq, &req->f.work);
+}
+
static void nvmet_file_write_zeroes_work(struct work_struct *w)
{
struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
@@ -376,6 +425,9 @@ u16 nvmet_file_parse_io_cmd(struct nvmet_req *req)
case nvme_cmd_write_zeroes:
req->execute = nvmet_file_execute_write_zeroes;
return 0;
+ case nvme_cmd_copy:
+ req->execute = nvmet_file_execute_copy;
+ return 0;
default:
return nvmet_report_invalid_opcode(req);
}
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index f2d24b2d992f..d18ed8067a15 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -146,6 +146,12 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
return ret;
nvme_start_request(req);
+ if (unlikely((req->cmd_flags & REQ_COPY) &&
+ (req_op(req) == REQ_OP_READ))) {
+ blk_mq_set_request_complete(req);
+ blk_mq_end_request(req, BLK_STS_OK);
+ return BLK_STS_OK;
+ }
iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
iod->req.port = queue->ctrl->port;
if (!nvmet_req_init(&iod->req, &queue->nvme_cq,
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index dc60a22646f7..1615dc9194ba 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -393,6 +393,7 @@ struct nvmet_req {
struct device *p2p_client;
u16 error_loc;
u64 error_slba;
+ size_t copy_len;
};
#define NVMET_MAX_MPOOL_BVEC 16
--
2.35.1.500.gb896f729e2
WARNING: multiple messages have this Message-ID (diff)
From: Nitesh Shetty <nj.shetty@samsung.com>
To: Jens Axboe <axboe@kernel.dk>, Alasdair Kergon <agk@redhat.com>,
Mike Snitzer <snitzer@kernel.org>,
dm-devel@redhat.com, Keith Busch <kbusch@kernel.org>,
Christoph Hellwig <hch@lst.de>, Sagi Grimberg <sagi@grimberg.me>,
James Smart <james.smart@broadcom.com>,
Chaitanya Kulkarni <kch@nvidia.com>,
Alexander Viro <viro@zeniv.linux.org.uk>,
Christian Brauner <brauner@kernel.org>
Cc: bvanassche@acm.org, joshi.k@samsung.com, gost.dev@samsung.com,
anuj20.g@samsung.com, linux-kernel@vger.kernel.org,
linux-nvme@lists.infradead.org, ming.lei@redhat.com,
linux-block@vger.kernel.org, dlemoal@kernel.org,
Nitesh Shetty <nj.shetty@samsung.com>,
linux-fsdevel@vger.kernel.org, nitheshshetty@gmail.com
Subject: [dm-devel] [PATCH v10 6/9] nvmet: add copy command support for bdev and file ns
Date: Wed, 19 Apr 2023 17:13:11 +0530 [thread overview]
Message-ID: <20230419114320.13674-7-nj.shetty@samsung.com> (raw)
In-Reply-To: <20230419114320.13674-1-nj.shetty@samsung.com>
Add support for handling target command on target.
For bdev-ns we call into blkdev_issue_copy, which the block layer
completes by a offloaded copy request to backend bdev or by emulating the
request.
For file-ns we call vfs_copy_file_range to service our request.
Currently target always shows copy capability by setting
NVME_CTRL_ONCS_COPY in controller ONCS.
Signed-off-by: Nitesh Shetty <nj.shetty@samsung.com>
Signed-off-by: Anuj Gupta <anuj20.g@samsung.com>
---
drivers/nvme/target/admin-cmd.c | 9 +++--
drivers/nvme/target/io-cmd-bdev.c | 58 +++++++++++++++++++++++++++++++
drivers/nvme/target/io-cmd-file.c | 52 +++++++++++++++++++++++++++
drivers/nvme/target/loop.c | 6 ++++
drivers/nvme/target/nvmet.h | 1 +
5 files changed, 124 insertions(+), 2 deletions(-)
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 39cb570f833d..8a09f99a2185 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -433,8 +433,7 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
id->nn = cpu_to_le32(NVMET_MAX_NAMESPACES);
id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES);
id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM |
- NVME_CTRL_ONCS_WRITE_ZEROES);
-
+ NVME_CTRL_ONCS_WRITE_ZEROES | NVME_CTRL_ONCS_COPY);
/* XXX: don't report vwc if the underlying device is write through */
id->vwc = NVME_CTRL_VWC_PRESENT;
@@ -536,6 +535,12 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
if (req->ns->bdev)
nvmet_bdev_set_limits(req->ns->bdev, id);
+ else {
+ id->msrc = (u8)to0based(BIO_MAX_VECS - 1);
+ id->mssrl = cpu_to_le16(BIO_MAX_VECS <<
+ (PAGE_SHIFT - SECTOR_SHIFT));
+ id->mcl = cpu_to_le32(le16_to_cpu(id->mssrl));
+ }
/*
* We just provide a single LBA format that matches what the
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index c2d6cea0236b..0af273097aa4 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -46,6 +46,19 @@ void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
id->npda = id->npdg;
/* NOWS = Namespace Optimal Write Size */
id->nows = to0based(bdev_io_opt(bdev) / bdev_logical_block_size(bdev));
+
+ /*Copy limits*/
+ if (bdev_max_copy_sectors(bdev)) {
+ id->msrc = id->msrc;
+ id->mssrl = cpu_to_le16((bdev_max_copy_sectors(bdev) <<
+ SECTOR_SHIFT) / bdev_logical_block_size(bdev));
+ id->mcl = cpu_to_le32(id->mssrl);
+ } else {
+ id->msrc = (u8)to0based(BIO_MAX_VECS - 1);
+ id->mssrl = cpu_to_le16((BIO_MAX_VECS << PAGE_SHIFT) /
+ bdev_logical_block_size(bdev));
+ id->mcl = cpu_to_le32(id->mssrl);
+ }
}
void nvmet_bdev_ns_disable(struct nvmet_ns *ns)
@@ -184,6 +197,19 @@ static void nvmet_bio_done(struct bio *bio)
nvmet_req_bio_put(req, bio);
}
+static void nvmet_bdev_copy_end_io(void *private, int comp_len)
+{
+ struct nvmet_req *req = (struct nvmet_req *)private;
+
+ if (comp_len == req->copy_len) {
+ req->cqe->result.u32 = cpu_to_le32(1);
+ nvmet_req_complete(req, errno_to_nvme_status(req, 0));
+ } else {
+ req->cqe->result.u32 = cpu_to_le32(0);
+ nvmet_req_complete(req, blk_to_nvme_status(req, BLK_STS_IOERR));
+ }
+}
+
#ifdef CONFIG_BLK_DEV_INTEGRITY
static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio,
struct sg_mapping_iter *miter)
@@ -450,6 +476,34 @@ static void nvmet_bdev_execute_write_zeroes(struct nvmet_req *req)
}
}
+/* At present we handle only one range entry */
+static void nvmet_bdev_execute_copy(struct nvmet_req *req)
+{
+ struct nvme_copy_range range;
+ struct nvme_command *cmnd = req->cmd;
+ int ret;
+
+
+ ret = nvmet_copy_from_sgl(req, 0, &range, sizeof(range));
+ if (ret)
+ goto out;
+
+ ret = blkdev_issue_copy(req->ns->bdev,
+ le64_to_cpu(cmnd->copy.sdlba) << req->ns->blksize_shift,
+ req->ns->bdev,
+ le64_to_cpu(range.slba) << req->ns->blksize_shift,
+ (le16_to_cpu(range.nlb) + 1) << req->ns->blksize_shift,
+ nvmet_bdev_copy_end_io, (void *)req, GFP_KERNEL);
+ if (ret) {
+ req->cqe->result.u32 = cpu_to_le32(0);
+ nvmet_req_complete(req, blk_to_nvme_status(req, BLK_STS_IOERR));
+ }
+
+ return;
+out:
+ nvmet_req_complete(req, errno_to_nvme_status(req, ret));
+}
+
u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req)
{
switch (req->cmd->common.opcode) {
@@ -468,6 +522,10 @@ u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req)
case nvme_cmd_write_zeroes:
req->execute = nvmet_bdev_execute_write_zeroes;
return 0;
+ case nvme_cmd_copy:
+ req->execute = nvmet_bdev_execute_copy;
+ return 0;
+
default:
return nvmet_report_invalid_opcode(req);
}
diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
index 2d068439b129..69f198ecec77 100644
--- a/drivers/nvme/target/io-cmd-file.c
+++ b/drivers/nvme/target/io-cmd-file.c
@@ -322,6 +322,49 @@ static void nvmet_file_dsm_work(struct work_struct *w)
}
}
+static void nvmet_file_copy_work(struct work_struct *w)
+{
+ struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
+ int nr_range;
+ loff_t pos;
+ struct nvme_command *cmnd = req->cmd;
+ int ret = 0, len = 0, src, id;
+
+ nr_range = cmnd->copy.nr_range + 1;
+ pos = le64_to_cpu(req->cmd->copy.sdlba) << req->ns->blksize_shift;
+ if (unlikely(pos + req->transfer_len > req->ns->size)) {
+ nvmet_req_complete(req, errno_to_nvme_status(req, -ENOSPC));
+ return;
+ }
+
+ for (id = 0 ; id < nr_range; id++) {
+ struct nvme_copy_range range;
+
+ ret = nvmet_copy_from_sgl(req, id * sizeof(range), &range,
+ sizeof(range));
+ if (ret)
+ goto out;
+
+ len = (le16_to_cpu(range.nlb) + 1) << (req->ns->blksize_shift);
+ src = (le64_to_cpu(range.slba) << (req->ns->blksize_shift));
+ ret = vfs_copy_file_range(req->ns->file, src, req->ns->file,
+ pos, len, 0);
+out:
+ if (ret != len) {
+ pos += ret;
+ req->cqe->result.u32 = cpu_to_le32(id);
+ nvmet_req_complete(req, ret < 0 ?
+ errno_to_nvme_status(req, ret) :
+ errno_to_nvme_status(req, -EIO));
+ return;
+
+ } else
+ pos += len;
+ }
+
+ nvmet_req_complete(req, 0);
+
+}
static void nvmet_file_execute_dsm(struct nvmet_req *req)
{
if (!nvmet_check_data_len_lte(req, nvmet_dsm_len(req)))
@@ -330,6 +373,12 @@ static void nvmet_file_execute_dsm(struct nvmet_req *req)
queue_work(nvmet_wq, &req->f.work);
}
+static void nvmet_file_execute_copy(struct nvmet_req *req)
+{
+ INIT_WORK(&req->f.work, nvmet_file_copy_work);
+ queue_work(nvmet_wq, &req->f.work);
+}
+
static void nvmet_file_write_zeroes_work(struct work_struct *w)
{
struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
@@ -376,6 +425,9 @@ u16 nvmet_file_parse_io_cmd(struct nvmet_req *req)
case nvme_cmd_write_zeroes:
req->execute = nvmet_file_execute_write_zeroes;
return 0;
+ case nvme_cmd_copy:
+ req->execute = nvmet_file_execute_copy;
+ return 0;
default:
return nvmet_report_invalid_opcode(req);
}
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index f2d24b2d992f..d18ed8067a15 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -146,6 +146,12 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
return ret;
nvme_start_request(req);
+ if (unlikely((req->cmd_flags & REQ_COPY) &&
+ (req_op(req) == REQ_OP_READ))) {
+ blk_mq_set_request_complete(req);
+ blk_mq_end_request(req, BLK_STS_OK);
+ return BLK_STS_OK;
+ }
iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
iod->req.port = queue->ctrl->port;
if (!nvmet_req_init(&iod->req, &queue->nvme_cq,
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index dc60a22646f7..1615dc9194ba 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -393,6 +393,7 @@ struct nvmet_req {
struct device *p2p_client;
u16 error_loc;
u64 error_slba;
+ size_t copy_len;
};
#define NVMET_MAX_MPOOL_BVEC 16
--
2.35.1.500.gb896f729e2
--
dm-devel mailing list
dm-devel@redhat.com
https://listman.redhat.com/mailman/listinfo/dm-devel
next prev parent reply other threads:[~2023-04-19 11:55 UTC|newest]
Thread overview: 22+ messages / expand[flat|nested] mbox.gz Atom feed top
[not found] <CGME20230419114623epcas5p49ee241d5159a15d13aef6b0b5a331c01@epcas5p4.samsung.com>
2023-04-19 11:43 ` [PATCH v10 0/9] Implement copy offload support Nitesh Shetty
2023-04-19 11:43 ` [dm-devel] " Nitesh Shetty
[not found] ` <CGME20230419114656epcas5p404001300c41f5bbd02362edd45d3ff45@epcas5p4.samsung.com>
2023-04-19 11:43 ` [PATCH v10 1/9] block: Introduce queue limits for copy-offload support Nitesh Shetty
2023-04-19 11:43 ` [dm-devel] " Nitesh Shetty
[not found] ` <CGME20230419114705epcas5p376d05f7c5f892d82590c2137650dd291@epcas5p3.samsung.com>
2023-04-19 11:43 ` [PATCH v10 2/9] block: Add copy offload support infrastructure Nitesh Shetty
2023-04-19 11:43 ` [dm-devel] " Nitesh Shetty
[not found] ` <CGME20230419114714epcas5p33084dcdc06787292b46c376aa51e5ec5@epcas5p3.samsung.com>
2023-04-19 11:43 ` [PATCH v10 3/9] block: add emulation for copy Nitesh Shetty
2023-04-19 11:43 ` [dm-devel] " Nitesh Shetty
[not found] ` <CGME20230419114723epcas5p461a6d54ffc6cc5c32ee9d5ab37978135@epcas5p4.samsung.com>
2023-04-19 11:43 ` [PATCH v10 4/9] fs, block: copy_file_range for def_blk_ops for direct block device Nitesh Shetty
2023-04-19 11:43 ` [dm-devel] " Nitesh Shetty
[not found] ` <CGME20230419114734epcas5p327483315c82893ca4b45bc3f3f871fb3@epcas5p3.samsung.com>
2023-04-19 11:43 ` [PATCH v10 5/9] nvme: add copy offload support Nitesh Shetty
2023-04-19 11:43 ` [dm-devel] " Nitesh Shetty
[not found] ` <CGME20230419114743epcas5p155559f1777d242e9d68c43cb61eb5777@epcas5p1.samsung.com>
2023-04-19 11:43 ` Nitesh Shetty [this message]
2023-04-19 11:43 ` [dm-devel] [PATCH v10 6/9] nvmet: add copy command support for bdev and file ns Nitesh Shetty
2023-04-19 18:55 ` kernel test robot
2023-04-19 18:55 ` [dm-devel] " kernel test robot
[not found] ` <CGME20230419114751epcas5p19249dff6e6e2c37795c80f973fd7eee3@epcas5p1.samsung.com>
2023-04-19 11:43 ` [PATCH v10 7/9] dm: Add support for copy offload Nitesh Shetty
2023-04-19 11:43 ` [dm-devel] " Nitesh Shetty
[not found] ` <CGME20230419114801epcas5p2eb7e9c375817d827d5175468de34f0cb@epcas5p2.samsung.com>
2023-04-19 11:43 ` [PATCH v10 8/9] dm: Enable copy offload for dm-linear target Nitesh Shetty
2023-04-19 11:43 ` [dm-devel] " Nitesh Shetty
[not found] ` <CGME20230419114810epcas5p3b10b7eddf9dae9ddc41940f09b483813@epcas5p3.samsung.com>
2023-04-19 11:43 ` [PATCH v10 9/9] null_blk: add support for copy offload Nitesh Shetty
2023-04-19 11:43 ` [dm-devel] " Nitesh Shetty
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230419114320.13674-7-nj.shetty@samsung.com \
--to=nj.shetty@samsung.com \
--cc=agk@redhat.com \
--cc=anuj20.g@samsung.com \
--cc=axboe@kernel.dk \
--cc=brauner@kernel.org \
--cc=bvanassche@acm.org \
--cc=dlemoal@kernel.org \
--cc=dm-devel@redhat.com \
--cc=gost.dev@samsung.com \
--cc=hare@suse.de \
--cc=hch@lst.de \
--cc=james.smart@broadcom.com \
--cc=joshi.k@samsung.com \
--cc=kbusch@kernel.org \
--cc=kch@nvidia.com \
--cc=linux-block@vger.kernel.org \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-nvme@lists.infradead.org \
--cc=ming.lei@redhat.com \
--cc=nitheshshetty@gmail.com \
--cc=sagi@grimberg.me \
--cc=snitzer@kernel.org \
--cc=viro@zeniv.linux.org.uk \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.