All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/2] block: move dif_prepare/dif_complete functions to block layer
@ 2018-07-22  9:49 ` Max Gurtovoy
  0 siblings, 0 replies; 16+ messages in thread
From: Max Gurtovoy @ 2018-07-22  9:49 UTC (permalink / raw)
  To: martin.petersen, linux-block, axboe, keith.busch, linux-nvme, sagi, hch
  Cc: Max Gurtovoy

Currently these functions are implemented in the scsi layer, but their
actual place should be the block layer since T10-PI is a general data
integrity feature that is used in the nvme protocol as well.

Suggested-by: Christoph Hellwig <hch@lst.de>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Max Gurtovoy <maxg@mellanox.com>
---
 block/blk-integrity.c  | 111 ++++++++++++++++++++++++++++++++++++++++++++++++
 drivers/scsi/sd.c      |  12 ++++--
 drivers/scsi/sd.h      |   9 ----
 drivers/scsi/sd_dif.c  | 113 -------------------------------------------------
 include/linux/blkdev.h |  14 ++++++
 5 files changed, 134 insertions(+), 125 deletions(-)

diff --git a/block/blk-integrity.c b/block/blk-integrity.c
index 6121611e1316..66b095a866d3 100644
--- a/block/blk-integrity.c
+++ b/block/blk-integrity.c
@@ -21,6 +21,7 @@
  */
 
 #include <linux/blkdev.h>
+#include <linux/t10-pi.h>
 #include <linux/backing-dev.h>
 #include <linux/mempool.h>
 #include <linux/bio.h>
@@ -451,3 +452,113 @@ void blk_integrity_del(struct gendisk *disk)
 	kobject_del(&disk->integrity_kobj);
 	kobject_put(&disk->integrity_kobj);
 }
+
+/*
+ * The virtual start sector is the one that was originally submitted
+ * by the block layer.	Due to partitioning, MD/DM cloning, etc. the
+ * actual physical start sector is likely to be different.  Remap
+ * protection information to match the physical LBA.
+ *
+ * From a protocol perspective there's a slight difference between
+ * Type 1 and 2.  The latter uses command's 32-byte exclusively, and the
+ * reference tag is seeded in the command.  This gives us the potential to
+ * avoid virt->phys remapping during write.  However, at read time we
+ * don't know whether the virt sector is the same as when we wrote it
+ * (we could be reading from real disk as opposed to MD/DM device.  So
+ * we always remap Type 2 making it identical to Type 1.
+ *
+ * Type 3 does not have a reference tag so no remapping is required.
+ */
+void blk_integrity_dif_prepare(struct request *rq, u8 protection_type,
+			       u32 ref_tag)
+{
+	const int tuple_sz = sizeof(struct t10_pi_tuple);
+	struct bio *bio;
+	struct t10_pi_tuple *pi;
+	u32 phys, virt;
+
+	if (protection_type == T10_PI_TYPE3_PROTECTION)
+		return;
+
+	phys = ref_tag;
+
+	__rq_for_each_bio(bio, rq) {
+		struct bio_integrity_payload *bip = bio_integrity(bio);
+		struct bio_vec iv;
+		struct bvec_iter iter;
+		unsigned int j;
+
+		/* Already remapped? */
+		if (bip->bip_flags & BIP_MAPPED_INTEGRITY)
+			break;
+
+		virt = bip_get_seed(bip) & 0xffffffff;
+
+		bip_for_each_vec(iv, bip, iter) {
+			pi = kmap_atomic(iv.bv_page) + iv.bv_offset;
+
+			for (j = 0; j < iv.bv_len; j += tuple_sz, pi++) {
+
+				if (be32_to_cpu(pi->ref_tag) == virt)
+					pi->ref_tag = cpu_to_be32(phys);
+
+				virt++;
+				phys++;
+			}
+
+			kunmap_atomic(pi);
+		}
+
+		bip->bip_flags |= BIP_MAPPED_INTEGRITY;
+	}
+}
+EXPORT_SYMBOL(blk_integrity_dif_prepare);
+
+/*
+ * Remap physical sector values in the reference tag to the virtual
+ * values expected by the block layer.
+ */
+void blk_integrity_dif_complete(struct request *rq, u8 protection_type,
+				u32 ref_tag, unsigned int intervals)
+{
+	const int tuple_sz = sizeof(struct t10_pi_tuple);
+	struct bio *bio;
+	struct t10_pi_tuple *pi;
+	unsigned int j;
+	u32 phys, virt;
+
+	if (protection_type == T10_PI_TYPE3_PROTECTION)
+		return;
+
+	phys = ref_tag;
+
+	__rq_for_each_bio(bio, rq) {
+		struct bio_integrity_payload *bip = bio_integrity(bio);
+		struct bio_vec iv;
+		struct bvec_iter iter;
+
+		virt = bip_get_seed(bip) & 0xffffffff;
+
+		bip_for_each_vec(iv, bip, iter) {
+			pi = kmap_atomic(iv.bv_page) + iv.bv_offset;
+
+			for (j = 0; j < iv.bv_len; j += tuple_sz, pi++) {
+
+				if (intervals == 0) {
+					kunmap_atomic(pi);
+					return;
+				}
+
+				if (be32_to_cpu(pi->ref_tag) == phys)
+					pi->ref_tag = cpu_to_be32(virt);
+
+				virt++;
+				phys++;
+				intervals--;
+			}
+
+			kunmap_atomic(pi);
+		}
+	}
+}
+EXPORT_SYMBOL(blk_integrity_dif_complete);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 9421d9877730..4186bf027c59 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1119,7 +1119,9 @@ static int sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt)
 		SCpnt->cmnd[0] = WRITE_6;
 
 		if (blk_integrity_rq(rq))
-			sd_dif_prepare(SCpnt);
+			blk_integrity_dif_prepare(SCpnt->request,
+						  sdkp->protection_type,
+						  scsi_prot_ref_tag(SCpnt));
 
 	} else if (rq_data_dir(rq) == READ) {
 		SCpnt->cmnd[0] = READ_6;
@@ -2047,8 +2049,12 @@ static int sd_done(struct scsi_cmnd *SCpnt)
 					   "sd_done: completed %d of %d bytes\n",
 					   good_bytes, scsi_bufflen(SCpnt)));
 
-	if (rq_data_dir(SCpnt->request) == READ && scsi_prot_sg_count(SCpnt))
-		sd_dif_complete(SCpnt, good_bytes);
+	if (rq_data_dir(SCpnt->request) == READ && scsi_prot_sg_count(SCpnt) &&
+	    good_bytes)
+		blk_integrity_dif_complete(SCpnt->request,
+				sdkp->protection_type,
+				scsi_prot_ref_tag(SCpnt),
+				good_bytes / scsi_prot_interval(SCpnt));
 
 	return good_bytes;
 }
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 392c7d078ae3..a7d4f50b67d4 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -254,21 +254,12 @@ static inline unsigned int sd_prot_flag_mask(unsigned int prot_op)
 #ifdef CONFIG_BLK_DEV_INTEGRITY
 
 extern void sd_dif_config_host(struct scsi_disk *);
-extern void sd_dif_prepare(struct scsi_cmnd *scmd);
-extern void sd_dif_complete(struct scsi_cmnd *, unsigned int);
 
 #else /* CONFIG_BLK_DEV_INTEGRITY */
 
 static inline void sd_dif_config_host(struct scsi_disk *disk)
 {
 }
-static inline int sd_dif_prepare(struct scsi_cmnd *scmd)
-{
-	return 0;
-}
-static inline void sd_dif_complete(struct scsi_cmnd *cmd, unsigned int a)
-{
-}
 
 #endif /* CONFIG_BLK_DEV_INTEGRITY */
 
diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c
index 9035380c0dda..db72c82486e3 100644
--- a/drivers/scsi/sd_dif.c
+++ b/drivers/scsi/sd_dif.c
@@ -95,116 +95,3 @@ void sd_dif_config_host(struct scsi_disk *sdkp)
 	blk_integrity_register(disk, &bi);
 }
 
-/*
- * The virtual start sector is the one that was originally submitted
- * by the block layer.	Due to partitioning, MD/DM cloning, etc. the
- * actual physical start sector is likely to be different.  Remap
- * protection information to match the physical LBA.
- *
- * From a protocol perspective there's a slight difference between
- * Type 1 and 2.  The latter uses 32-byte CDBs exclusively, and the
- * reference tag is seeded in the CDB.  This gives us the potential to
- * avoid virt->phys remapping during write.  However, at read time we
- * don't know whether the virt sector is the same as when we wrote it
- * (we could be reading from real disk as opposed to MD/DM device.  So
- * we always remap Type 2 making it identical to Type 1.
- *
- * Type 3 does not have a reference tag so no remapping is required.
- */
-void sd_dif_prepare(struct scsi_cmnd *scmd)
-{
-	const int tuple_sz = sizeof(struct t10_pi_tuple);
-	struct bio *bio;
-	struct scsi_disk *sdkp;
-	struct t10_pi_tuple *pi;
-	u32 phys, virt;
-
-	sdkp = scsi_disk(scmd->request->rq_disk);
-
-	if (sdkp->protection_type == T10_PI_TYPE3_PROTECTION)
-		return;
-
-	phys = scsi_prot_ref_tag(scmd);
-
-	__rq_for_each_bio(bio, scmd->request) {
-		struct bio_integrity_payload *bip = bio_integrity(bio);
-		struct bio_vec iv;
-		struct bvec_iter iter;
-		unsigned int j;
-
-		/* Already remapped? */
-		if (bip->bip_flags & BIP_MAPPED_INTEGRITY)
-			break;
-
-		virt = bip_get_seed(bip) & 0xffffffff;
-
-		bip_for_each_vec(iv, bip, iter) {
-			pi = kmap_atomic(iv.bv_page) + iv.bv_offset;
-
-			for (j = 0; j < iv.bv_len; j += tuple_sz, pi++) {
-
-				if (be32_to_cpu(pi->ref_tag) == virt)
-					pi->ref_tag = cpu_to_be32(phys);
-
-				virt++;
-				phys++;
-			}
-
-			kunmap_atomic(pi);
-		}
-
-		bip->bip_flags |= BIP_MAPPED_INTEGRITY;
-	}
-}
-
-/*
- * Remap physical sector values in the reference tag to the virtual
- * values expected by the block layer.
- */
-void sd_dif_complete(struct scsi_cmnd *scmd, unsigned int good_bytes)
-{
-	const int tuple_sz = sizeof(struct t10_pi_tuple);
-	struct scsi_disk *sdkp;
-	struct bio *bio;
-	struct t10_pi_tuple *pi;
-	unsigned int j, intervals;
-	u32 phys, virt;
-
-	sdkp = scsi_disk(scmd->request->rq_disk);
-
-	if (sdkp->protection_type == T10_PI_TYPE3_PROTECTION || good_bytes == 0)
-		return;
-
-	intervals = good_bytes / scsi_prot_interval(scmd);
-	phys = scsi_prot_ref_tag(scmd);
-
-	__rq_for_each_bio(bio, scmd->request) {
-		struct bio_integrity_payload *bip = bio_integrity(bio);
-		struct bio_vec iv;
-		struct bvec_iter iter;
-
-		virt = bip_get_seed(bip) & 0xffffffff;
-
-		bip_for_each_vec(iv, bip, iter) {
-			pi = kmap_atomic(iv.bv_page) + iv.bv_offset;
-
-			for (j = 0; j < iv.bv_len; j += tuple_sz, pi++) {
-
-				if (intervals == 0) {
-					kunmap_atomic(pi);
-					return;
-				}
-
-				if (be32_to_cpu(pi->ref_tag) == phys)
-					pi->ref_tag = cpu_to_be32(virt);
-
-				virt++;
-				phys++;
-				intervals--;
-			}
-
-			kunmap_atomic(pi);
-		}
-	}
-}
-
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 79226ca8f80f..18f3ca17d4f4 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -1823,6 +1823,10 @@ extern bool blk_integrity_merge_rq(struct request_queue *, struct request *,
 				   struct request *);
 extern bool blk_integrity_merge_bio(struct request_queue *, struct request *,
 				    struct bio *);
+extern void blk_integrity_dif_prepare(struct request *rq, u8 protection_type,
+			       u32 ref_tag);
+extern void blk_integrity_dif_complete(struct request *rq, u8 protection_type,
+				u32 ref_tag, unsigned int intervals);
 
 static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
 {
@@ -1950,6 +1954,16 @@ static inline bool integrity_req_gap_front_merge(struct request *req,
 	return false;
 }
 
+void blk_integrity_dif_prepare(struct request *rq, u8 protection_type,
+			       u32 ref_tag)
+{
+}
+
+void blk_integrity_dif_complete(struct request *rq, u8 protection_type,
+				u32 ref_tag, unsigned int intervals)
+{
+}
+
 #endif /* CONFIG_BLK_DEV_INTEGRITY */
 
 struct block_device_operations {
-- 
2.16.3

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH 1/2] block: move dif_prepare/dif_complete functions to block layer
@ 2018-07-22  9:49 ` Max Gurtovoy
  0 siblings, 0 replies; 16+ messages in thread
From: Max Gurtovoy @ 2018-07-22  9:49 UTC (permalink / raw)


Currently these functions are implemented in the scsi layer, but their
actual place should be the block layer since T10-PI is a general data
integrity feature that is used in the nvme protocol as well.

Suggested-by: Christoph Hellwig <hch at lst.de>
Cc: Jens Axboe <axboe at kernel.dk>
Cc: Martin K. Petersen <martin.petersen at oracle.com>
Signed-off-by: Max Gurtovoy <maxg at mellanox.com>
---
 block/blk-integrity.c  | 111 ++++++++++++++++++++++++++++++++++++++++++++++++
 drivers/scsi/sd.c      |  12 ++++--
 drivers/scsi/sd.h      |   9 ----
 drivers/scsi/sd_dif.c  | 113 -------------------------------------------------
 include/linux/blkdev.h |  14 ++++++
 5 files changed, 134 insertions(+), 125 deletions(-)

diff --git a/block/blk-integrity.c b/block/blk-integrity.c
index 6121611e1316..66b095a866d3 100644
--- a/block/blk-integrity.c
+++ b/block/blk-integrity.c
@@ -21,6 +21,7 @@
  */
 
 #include <linux/blkdev.h>
+#include <linux/t10-pi.h>
 #include <linux/backing-dev.h>
 #include <linux/mempool.h>
 #include <linux/bio.h>
@@ -451,3 +452,113 @@ void blk_integrity_del(struct gendisk *disk)
 	kobject_del(&disk->integrity_kobj);
 	kobject_put(&disk->integrity_kobj);
 }
+
+/*
+ * The virtual start sector is the one that was originally submitted
+ * by the block layer.	Due to partitioning, MD/DM cloning, etc. the
+ * actual physical start sector is likely to be different.  Remap
+ * protection information to match the physical LBA.
+ *
+ * From a protocol perspective there's a slight difference between
+ * Type 1 and 2.  The latter uses command's 32-byte exclusively, and the
+ * reference tag is seeded in the command.  This gives us the potential to
+ * avoid virt->phys remapping during write.  However, at read time we
+ * don't know whether the virt sector is the same as when we wrote it
+ * (we could be reading from real disk as opposed to MD/DM device.  So
+ * we always remap Type 2 making it identical to Type 1.
+ *
+ * Type 3 does not have a reference tag so no remapping is required.
+ */
+void blk_integrity_dif_prepare(struct request *rq, u8 protection_type,
+			       u32 ref_tag)
+{
+	const int tuple_sz = sizeof(struct t10_pi_tuple);
+	struct bio *bio;
+	struct t10_pi_tuple *pi;
+	u32 phys, virt;
+
+	if (protection_type == T10_PI_TYPE3_PROTECTION)
+		return;
+
+	phys = ref_tag;
+
+	__rq_for_each_bio(bio, rq) {
+		struct bio_integrity_payload *bip = bio_integrity(bio);
+		struct bio_vec iv;
+		struct bvec_iter iter;
+		unsigned int j;
+
+		/* Already remapped? */
+		if (bip->bip_flags & BIP_MAPPED_INTEGRITY)
+			break;
+
+		virt = bip_get_seed(bip) & 0xffffffff;
+
+		bip_for_each_vec(iv, bip, iter) {
+			pi = kmap_atomic(iv.bv_page) + iv.bv_offset;
+
+			for (j = 0; j < iv.bv_len; j += tuple_sz, pi++) {
+
+				if (be32_to_cpu(pi->ref_tag) == virt)
+					pi->ref_tag = cpu_to_be32(phys);
+
+				virt++;
+				phys++;
+			}
+
+			kunmap_atomic(pi);
+		}
+
+		bip->bip_flags |= BIP_MAPPED_INTEGRITY;
+	}
+}
+EXPORT_SYMBOL(blk_integrity_dif_prepare);
+
+/*
+ * Remap physical sector values in the reference tag to the virtual
+ * values expected by the block layer.
+ */
+void blk_integrity_dif_complete(struct request *rq, u8 protection_type,
+				u32 ref_tag, unsigned int intervals)
+{
+	const int tuple_sz = sizeof(struct t10_pi_tuple);
+	struct bio *bio;
+	struct t10_pi_tuple *pi;
+	unsigned int j;
+	u32 phys, virt;
+
+	if (protection_type == T10_PI_TYPE3_PROTECTION)
+		return;
+
+	phys = ref_tag;
+
+	__rq_for_each_bio(bio, rq) {
+		struct bio_integrity_payload *bip = bio_integrity(bio);
+		struct bio_vec iv;
+		struct bvec_iter iter;
+
+		virt = bip_get_seed(bip) & 0xffffffff;
+
+		bip_for_each_vec(iv, bip, iter) {
+			pi = kmap_atomic(iv.bv_page) + iv.bv_offset;
+
+			for (j = 0; j < iv.bv_len; j += tuple_sz, pi++) {
+
+				if (intervals == 0) {
+					kunmap_atomic(pi);
+					return;
+				}
+
+				if (be32_to_cpu(pi->ref_tag) == phys)
+					pi->ref_tag = cpu_to_be32(virt);
+
+				virt++;
+				phys++;
+				intervals--;
+			}
+
+			kunmap_atomic(pi);
+		}
+	}
+}
+EXPORT_SYMBOL(blk_integrity_dif_complete);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 9421d9877730..4186bf027c59 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1119,7 +1119,9 @@ static int sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt)
 		SCpnt->cmnd[0] = WRITE_6;
 
 		if (blk_integrity_rq(rq))
-			sd_dif_prepare(SCpnt);
+			blk_integrity_dif_prepare(SCpnt->request,
+						  sdkp->protection_type,
+						  scsi_prot_ref_tag(SCpnt));
 
 	} else if (rq_data_dir(rq) == READ) {
 		SCpnt->cmnd[0] = READ_6;
@@ -2047,8 +2049,12 @@ static int sd_done(struct scsi_cmnd *SCpnt)
 					   "sd_done: completed %d of %d bytes\n",
 					   good_bytes, scsi_bufflen(SCpnt)));
 
-	if (rq_data_dir(SCpnt->request) == READ && scsi_prot_sg_count(SCpnt))
-		sd_dif_complete(SCpnt, good_bytes);
+	if (rq_data_dir(SCpnt->request) == READ && scsi_prot_sg_count(SCpnt) &&
+	    good_bytes)
+		blk_integrity_dif_complete(SCpnt->request,
+				sdkp->protection_type,
+				scsi_prot_ref_tag(SCpnt),
+				good_bytes / scsi_prot_interval(SCpnt));
 
 	return good_bytes;
 }
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 392c7d078ae3..a7d4f50b67d4 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -254,21 +254,12 @@ static inline unsigned int sd_prot_flag_mask(unsigned int prot_op)
 #ifdef CONFIG_BLK_DEV_INTEGRITY
 
 extern void sd_dif_config_host(struct scsi_disk *);
-extern void sd_dif_prepare(struct scsi_cmnd *scmd);
-extern void sd_dif_complete(struct scsi_cmnd *, unsigned int);
 
 #else /* CONFIG_BLK_DEV_INTEGRITY */
 
 static inline void sd_dif_config_host(struct scsi_disk *disk)
 {
 }
-static inline int sd_dif_prepare(struct scsi_cmnd *scmd)
-{
-	return 0;
-}
-static inline void sd_dif_complete(struct scsi_cmnd *cmd, unsigned int a)
-{
-}
 
 #endif /* CONFIG_BLK_DEV_INTEGRITY */
 
diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c
index 9035380c0dda..db72c82486e3 100644
--- a/drivers/scsi/sd_dif.c
+++ b/drivers/scsi/sd_dif.c
@@ -95,116 +95,3 @@ void sd_dif_config_host(struct scsi_disk *sdkp)
 	blk_integrity_register(disk, &bi);
 }
 
-/*
- * The virtual start sector is the one that was originally submitted
- * by the block layer.	Due to partitioning, MD/DM cloning, etc. the
- * actual physical start sector is likely to be different.  Remap
- * protection information to match the physical LBA.
- *
- * From a protocol perspective there's a slight difference between
- * Type 1 and 2.  The latter uses 32-byte CDBs exclusively, and the
- * reference tag is seeded in the CDB.  This gives us the potential to
- * avoid virt->phys remapping during write.  However, at read time we
- * don't know whether the virt sector is the same as when we wrote it
- * (we could be reading from real disk as opposed to MD/DM device.  So
- * we always remap Type 2 making it identical to Type 1.
- *
- * Type 3 does not have a reference tag so no remapping is required.
- */
-void sd_dif_prepare(struct scsi_cmnd *scmd)
-{
-	const int tuple_sz = sizeof(struct t10_pi_tuple);
-	struct bio *bio;
-	struct scsi_disk *sdkp;
-	struct t10_pi_tuple *pi;
-	u32 phys, virt;
-
-	sdkp = scsi_disk(scmd->request->rq_disk);
-
-	if (sdkp->protection_type == T10_PI_TYPE3_PROTECTION)
-		return;
-
-	phys = scsi_prot_ref_tag(scmd);
-
-	__rq_for_each_bio(bio, scmd->request) {
-		struct bio_integrity_payload *bip = bio_integrity(bio);
-		struct bio_vec iv;
-		struct bvec_iter iter;
-		unsigned int j;
-
-		/* Already remapped? */
-		if (bip->bip_flags & BIP_MAPPED_INTEGRITY)
-			break;
-
-		virt = bip_get_seed(bip) & 0xffffffff;
-
-		bip_for_each_vec(iv, bip, iter) {
-			pi = kmap_atomic(iv.bv_page) + iv.bv_offset;
-
-			for (j = 0; j < iv.bv_len; j += tuple_sz, pi++) {
-
-				if (be32_to_cpu(pi->ref_tag) == virt)
-					pi->ref_tag = cpu_to_be32(phys);
-
-				virt++;
-				phys++;
-			}
-
-			kunmap_atomic(pi);
-		}
-
-		bip->bip_flags |= BIP_MAPPED_INTEGRITY;
-	}
-}
-
-/*
- * Remap physical sector values in the reference tag to the virtual
- * values expected by the block layer.
- */
-void sd_dif_complete(struct scsi_cmnd *scmd, unsigned int good_bytes)
-{
-	const int tuple_sz = sizeof(struct t10_pi_tuple);
-	struct scsi_disk *sdkp;
-	struct bio *bio;
-	struct t10_pi_tuple *pi;
-	unsigned int j, intervals;
-	u32 phys, virt;
-
-	sdkp = scsi_disk(scmd->request->rq_disk);
-
-	if (sdkp->protection_type == T10_PI_TYPE3_PROTECTION || good_bytes == 0)
-		return;
-
-	intervals = good_bytes / scsi_prot_interval(scmd);
-	phys = scsi_prot_ref_tag(scmd);
-
-	__rq_for_each_bio(bio, scmd->request) {
-		struct bio_integrity_payload *bip = bio_integrity(bio);
-		struct bio_vec iv;
-		struct bvec_iter iter;
-
-		virt = bip_get_seed(bip) & 0xffffffff;
-
-		bip_for_each_vec(iv, bip, iter) {
-			pi = kmap_atomic(iv.bv_page) + iv.bv_offset;
-
-			for (j = 0; j < iv.bv_len; j += tuple_sz, pi++) {
-
-				if (intervals == 0) {
-					kunmap_atomic(pi);
-					return;
-				}
-
-				if (be32_to_cpu(pi->ref_tag) == phys)
-					pi->ref_tag = cpu_to_be32(virt);
-
-				virt++;
-				phys++;
-				intervals--;
-			}
-
-			kunmap_atomic(pi);
-		}
-	}
-}
-
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 79226ca8f80f..18f3ca17d4f4 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -1823,6 +1823,10 @@ extern bool blk_integrity_merge_rq(struct request_queue *, struct request *,
 				   struct request *);
 extern bool blk_integrity_merge_bio(struct request_queue *, struct request *,
 				    struct bio *);
+extern void blk_integrity_dif_prepare(struct request *rq, u8 protection_type,
+			       u32 ref_tag);
+extern void blk_integrity_dif_complete(struct request *rq, u8 protection_type,
+				u32 ref_tag, unsigned int intervals);
 
 static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
 {
@@ -1950,6 +1954,16 @@ static inline bool integrity_req_gap_front_merge(struct request *req,
 	return false;
 }
 
+void blk_integrity_dif_prepare(struct request *rq, u8 protection_type,
+			       u32 ref_tag)
+{
+}
+
+void blk_integrity_dif_complete(struct request *rq, u8 protection_type,
+				u32 ref_tag, unsigned int intervals)
+{
+}
+
 #endif /* CONFIG_BLK_DEV_INTEGRITY */
 
 struct block_device_operations {
-- 
2.16.3

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH 2/2] nvme: use blk API to remap ref tags for IOs with metadata
  2018-07-22  9:49 ` Max Gurtovoy
@ 2018-07-22  9:49   ` Max Gurtovoy
  -1 siblings, 0 replies; 16+ messages in thread
From: Max Gurtovoy @ 2018-07-22  9:49 UTC (permalink / raw)
  To: martin.petersen, linux-block, axboe, keith.busch, linux-nvme, sagi, hch
  Cc: Max Gurtovoy

Also moved the logic of the remapping to the nvme core driver instead
of implementing it in the nvme pci driver. This way all the other nvme
transport drivers will benefit from it (in case they'll implement metadata
support).

Suggested-by: Christoph Hellwig <hch@lst.de>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Max Gurtovoy <maxg@mellanox.com>
---
 drivers/nvme/host/core.c | 23 +++++++++++++--
 drivers/nvme/host/nvme.h |  9 +-----
 drivers/nvme/host/pci.c  | 75 +-----------------------------------------------
 3 files changed, 23 insertions(+), 84 deletions(-)

diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 46df030b2c3f..0d94d3eb641c 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -591,6 +591,7 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
 		nvme_assign_write_stream(ctrl, req, &control, &dsmgmt);
 
 	if (ns->ms) {
+		u32 ref_tag = nvme_block_nr(ns, blk_rq_pos(req));
 		/*
 		 * If formated with metadata, the block layer always provides a
 		 * metadata buffer if CONFIG_BLK_DEV_INTEGRITY is enabled.  Else
@@ -601,6 +602,8 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
 			if (WARN_ON_ONCE(!nvme_ns_has_pi(ns)))
 				return BLK_STS_NOTSUPP;
 			control |= NVME_RW_PRINFO_PRACT;
+		} else if (req_op(req) == REQ_OP_WRITE) {
+			blk_integrity_dif_prepare(req, ns->pi_type, ref_tag);
 		}
 
 		switch (ns->pi_type) {
@@ -611,8 +614,7 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
 		case NVME_NS_DPS_PI_TYPE2:
 			control |= NVME_RW_PRINFO_PRCHK_GUARD |
 					NVME_RW_PRINFO_PRCHK_REF;
-			cmnd->rw.reftag = cpu_to_le32(
-					nvme_block_nr(ns, blk_rq_pos(req)));
+			cmnd->rw.reftag = cpu_to_le32(ref_tag);
 			break;
 		}
 	}
@@ -622,6 +624,23 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
 	return 0;
 }
 
+void nvme_cleanup_cmd(struct request *req)
+{
+	if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ &&
+	    nvme_error_status(req) == BLK_STS_OK) {
+		struct nvme_ns *ns = req->rq_disk->private_data;
+
+		blk_integrity_dif_complete(req, ns->pi_type,
+					   nvme_block_nr(ns, blk_rq_pos(req)),
+					   blk_rq_bytes(req) >> ns->lba_shift);
+	}
+	if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
+		kfree(page_address(req->special_vec.bv_page) +
+		      req->special_vec.bv_offset);
+	}
+}
+EXPORT_SYMBOL_GPL(nvme_cleanup_cmd);
+
 blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
 		struct nvme_command *cmd)
 {
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 0c4a33df3b2f..dfc01ffb30df 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -356,14 +356,6 @@ static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector)
 	return (sector >> (ns->lba_shift - 9));
 }
 
-static inline void nvme_cleanup_cmd(struct request *req)
-{
-	if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
-		kfree(page_address(req->special_vec.bv_page) +
-		      req->special_vec.bv_offset);
-	}
-}
-
 static inline void nvme_end_request(struct request *req, __le16 status,
 		union nvme_result result)
 {
@@ -420,6 +412,7 @@ void nvme_start_freeze(struct nvme_ctrl *ctrl);
 #define NVME_QID_ANY -1
 struct request *nvme_alloc_request(struct request_queue *q,
 		struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid);
+void nvme_cleanup_cmd(struct request *req);
 blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
 		struct nvme_command *cmd);
 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index ba943f211687..20851d17acf3 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -535,73 +535,6 @@ static void nvme_free_iod(struct nvme_dev *dev, struct request *req)
 		mempool_free(iod->sg, dev->iod_mempool);
 }
 
-#ifdef CONFIG_BLK_DEV_INTEGRITY
-static void nvme_dif_prep(u32 p, u32 v, struct t10_pi_tuple *pi)
-{
-	if (be32_to_cpu(pi->ref_tag) == v)
-		pi->ref_tag = cpu_to_be32(p);
-}
-
-static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi)
-{
-	if (be32_to_cpu(pi->ref_tag) == p)
-		pi->ref_tag = cpu_to_be32(v);
-}
-
-/**
- * nvme_dif_remap - remaps ref tags to bip seed and physical lba
- *
- * The virtual start sector is the one that was originally submitted by the
- * block layer.	Due to partitioning, MD/DM cloning, etc. the actual physical
- * start sector may be different. Remap protection information to match the
- * physical LBA on writes, and back to the original seed on reads.
- *
- * Type 0 and 3 do not have a ref tag, so no remapping required.
- */
-static void nvme_dif_remap(struct request *req,
-			void (*dif_swap)(u32 p, u32 v, struct t10_pi_tuple *pi))
-{
-	struct nvme_ns *ns = req->rq_disk->private_data;
-	struct bio_integrity_payload *bip;
-	struct t10_pi_tuple *pi;
-	void *p, *pmap;
-	u32 i, nlb, ts, phys, virt;
-
-	if (!ns->pi_type || ns->pi_type == NVME_NS_DPS_PI_TYPE3)
-		return;
-
-	bip = bio_integrity(req->bio);
-	if (!bip)
-		return;
-
-	pmap = kmap_atomic(bip->bip_vec->bv_page) + bip->bip_vec->bv_offset;
-
-	p = pmap;
-	virt = bip_get_seed(bip);
-	phys = nvme_block_nr(ns, blk_rq_pos(req));
-	nlb = (blk_rq_bytes(req) >> ns->lba_shift);
-	ts = ns->disk->queue->integrity.tuple_size;
-
-	for (i = 0; i < nlb; i++, virt++, phys++) {
-		pi = (struct t10_pi_tuple *)p;
-		dif_swap(phys, virt, pi);
-		p += ts;
-	}
-	kunmap_atomic(pmap);
-}
-#else /* CONFIG_BLK_DEV_INTEGRITY */
-static void nvme_dif_remap(struct request *req,
-			void (*dif_swap)(u32 p, u32 v, struct t10_pi_tuple *pi))
-{
-}
-static void nvme_dif_prep(u32 p, u32 v, struct t10_pi_tuple *pi)
-{
-}
-static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi)
-{
-}
-#endif
-
 static void nvme_print_sgl(struct scatterlist *sgl, int nents)
 {
 	int i;
@@ -827,9 +760,6 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
 		if (blk_rq_map_integrity_sg(q, req->bio, &iod->meta_sg) != 1)
 			goto out_unmap;
 
-		if (req_op(req) == REQ_OP_WRITE)
-			nvme_dif_remap(req, nvme_dif_prep);
-
 		if (!dma_map_sg(dev->dev, &iod->meta_sg, 1, dma_dir))
 			goto out_unmap;
 	}
@@ -852,11 +782,8 @@ static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
 
 	if (iod->nents) {
 		dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir);
-		if (blk_integrity_rq(req)) {
-			if (req_op(req) == REQ_OP_READ)
-				nvme_dif_remap(req, nvme_dif_complete);
+		if (blk_integrity_rq(req))
 			dma_unmap_sg(dev->dev, &iod->meta_sg, 1, dma_dir);
-		}
 	}
 
 	nvme_cleanup_cmd(req);
-- 
2.16.3

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH 2/2] nvme: use blk API to remap ref tags for IOs with metadata
@ 2018-07-22  9:49   ` Max Gurtovoy
  0 siblings, 0 replies; 16+ messages in thread
From: Max Gurtovoy @ 2018-07-22  9:49 UTC (permalink / raw)


Also moved the logic of the remapping to the nvme core driver instead
of implementing it in the nvme pci driver. This way all the other nvme
transport drivers will benefit from it (in case they'll implement metadata
support).

Suggested-by: Christoph Hellwig <hch at lst.de>
Cc: Jens Axboe <axboe at kernel.dk>
Cc: Martin K. Petersen <martin.petersen at oracle.com>
Signed-off-by: Max Gurtovoy <maxg at mellanox.com>
---
 drivers/nvme/host/core.c | 23 +++++++++++++--
 drivers/nvme/host/nvme.h |  9 +-----
 drivers/nvme/host/pci.c  | 75 +-----------------------------------------------
 3 files changed, 23 insertions(+), 84 deletions(-)

diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 46df030b2c3f..0d94d3eb641c 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -591,6 +591,7 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
 		nvme_assign_write_stream(ctrl, req, &control, &dsmgmt);
 
 	if (ns->ms) {
+		u32 ref_tag = nvme_block_nr(ns, blk_rq_pos(req));
 		/*
 		 * If formated with metadata, the block layer always provides a
 		 * metadata buffer if CONFIG_BLK_DEV_INTEGRITY is enabled.  Else
@@ -601,6 +602,8 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
 			if (WARN_ON_ONCE(!nvme_ns_has_pi(ns)))
 				return BLK_STS_NOTSUPP;
 			control |= NVME_RW_PRINFO_PRACT;
+		} else if (req_op(req) == REQ_OP_WRITE) {
+			blk_integrity_dif_prepare(req, ns->pi_type, ref_tag);
 		}
 
 		switch (ns->pi_type) {
@@ -611,8 +614,7 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
 		case NVME_NS_DPS_PI_TYPE2:
 			control |= NVME_RW_PRINFO_PRCHK_GUARD |
 					NVME_RW_PRINFO_PRCHK_REF;
-			cmnd->rw.reftag = cpu_to_le32(
-					nvme_block_nr(ns, blk_rq_pos(req)));
+			cmnd->rw.reftag = cpu_to_le32(ref_tag);
 			break;
 		}
 	}
@@ -622,6 +624,23 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
 	return 0;
 }
 
+void nvme_cleanup_cmd(struct request *req)
+{
+	if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ &&
+	    nvme_error_status(req) == BLK_STS_OK) {
+		struct nvme_ns *ns = req->rq_disk->private_data;
+
+		blk_integrity_dif_complete(req, ns->pi_type,
+					   nvme_block_nr(ns, blk_rq_pos(req)),
+					   blk_rq_bytes(req) >> ns->lba_shift);
+	}
+	if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
+		kfree(page_address(req->special_vec.bv_page) +
+		      req->special_vec.bv_offset);
+	}
+}
+EXPORT_SYMBOL_GPL(nvme_cleanup_cmd);
+
 blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
 		struct nvme_command *cmd)
 {
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 0c4a33df3b2f..dfc01ffb30df 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -356,14 +356,6 @@ static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector)
 	return (sector >> (ns->lba_shift - 9));
 }
 
-static inline void nvme_cleanup_cmd(struct request *req)
-{
-	if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
-		kfree(page_address(req->special_vec.bv_page) +
-		      req->special_vec.bv_offset);
-	}
-}
-
 static inline void nvme_end_request(struct request *req, __le16 status,
 		union nvme_result result)
 {
@@ -420,6 +412,7 @@ void nvme_start_freeze(struct nvme_ctrl *ctrl);
 #define NVME_QID_ANY -1
 struct request *nvme_alloc_request(struct request_queue *q,
 		struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid);
+void nvme_cleanup_cmd(struct request *req);
 blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
 		struct nvme_command *cmd);
 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index ba943f211687..20851d17acf3 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -535,73 +535,6 @@ static void nvme_free_iod(struct nvme_dev *dev, struct request *req)
 		mempool_free(iod->sg, dev->iod_mempool);
 }
 
-#ifdef CONFIG_BLK_DEV_INTEGRITY
-static void nvme_dif_prep(u32 p, u32 v, struct t10_pi_tuple *pi)
-{
-	if (be32_to_cpu(pi->ref_tag) == v)
-		pi->ref_tag = cpu_to_be32(p);
-}
-
-static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi)
-{
-	if (be32_to_cpu(pi->ref_tag) == p)
-		pi->ref_tag = cpu_to_be32(v);
-}
-
-/**
- * nvme_dif_remap - remaps ref tags to bip seed and physical lba
- *
- * The virtual start sector is the one that was originally submitted by the
- * block layer.	Due to partitioning, MD/DM cloning, etc. the actual physical
- * start sector may be different. Remap protection information to match the
- * physical LBA on writes, and back to the original seed on reads.
- *
- * Type 0 and 3 do not have a ref tag, so no remapping required.
- */
-static void nvme_dif_remap(struct request *req,
-			void (*dif_swap)(u32 p, u32 v, struct t10_pi_tuple *pi))
-{
-	struct nvme_ns *ns = req->rq_disk->private_data;
-	struct bio_integrity_payload *bip;
-	struct t10_pi_tuple *pi;
-	void *p, *pmap;
-	u32 i, nlb, ts, phys, virt;
-
-	if (!ns->pi_type || ns->pi_type == NVME_NS_DPS_PI_TYPE3)
-		return;
-
-	bip = bio_integrity(req->bio);
-	if (!bip)
-		return;
-
-	pmap = kmap_atomic(bip->bip_vec->bv_page) + bip->bip_vec->bv_offset;
-
-	p = pmap;
-	virt = bip_get_seed(bip);
-	phys = nvme_block_nr(ns, blk_rq_pos(req));
-	nlb = (blk_rq_bytes(req) >> ns->lba_shift);
-	ts = ns->disk->queue->integrity.tuple_size;
-
-	for (i = 0; i < nlb; i++, virt++, phys++) {
-		pi = (struct t10_pi_tuple *)p;
-		dif_swap(phys, virt, pi);
-		p += ts;
-	}
-	kunmap_atomic(pmap);
-}
-#else /* CONFIG_BLK_DEV_INTEGRITY */
-static void nvme_dif_remap(struct request *req,
-			void (*dif_swap)(u32 p, u32 v, struct t10_pi_tuple *pi))
-{
-}
-static void nvme_dif_prep(u32 p, u32 v, struct t10_pi_tuple *pi)
-{
-}
-static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi)
-{
-}
-#endif
-
 static void nvme_print_sgl(struct scatterlist *sgl, int nents)
 {
 	int i;
@@ -827,9 +760,6 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
 		if (blk_rq_map_integrity_sg(q, req->bio, &iod->meta_sg) != 1)
 			goto out_unmap;
 
-		if (req_op(req) == REQ_OP_WRITE)
-			nvme_dif_remap(req, nvme_dif_prep);
-
 		if (!dma_map_sg(dev->dev, &iod->meta_sg, 1, dma_dir))
 			goto out_unmap;
 	}
@@ -852,11 +782,8 @@ static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
 
 	if (iod->nents) {
 		dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir);
-		if (blk_integrity_rq(req)) {
-			if (req_op(req) == REQ_OP_READ)
-				nvme_dif_remap(req, nvme_dif_complete);
+		if (blk_integrity_rq(req))
 			dma_unmap_sg(dev->dev, &iod->meta_sg, 1, dma_dir);
-		}
 	}
 
 	nvme_cleanup_cmd(req);
-- 
2.16.3

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* Re: [PATCH 1/2] block: move dif_prepare/dif_complete functions to block layer
  2018-07-22  9:49 ` Max Gurtovoy
@ 2018-07-23  7:28   ` Christoph Hellwig
  -1 siblings, 0 replies; 16+ messages in thread
From: Christoph Hellwig @ 2018-07-23  7:28 UTC (permalink / raw)
  To: Max Gurtovoy
  Cc: martin.petersen, linux-block, axboe, keith.busch, linux-nvme, sagi, hch

>  #include <linux/blkdev.h>
> +#include <linux/t10-pi.h>

Sounds like the new functions should move to block/10-pi.c given
that they are specific to the T10 defined formats.

> +/*
> + * The virtual start sector is the one that was originally submitted
> + * by the block layer.	Due to partitioning, MD/DM cloning, etc. the
> + * actual physical start sector is likely to be different.  Remap
> + * protection information to match the physical LBA.
> + *
> + * From a protocol perspective there's a slight difference between
> + * Type 1 and 2.  The latter uses command's 32-byte exclusively, and the
> + * reference tag is seeded in the command.  This gives us the potential to
> + * avoid virt->phys remapping during write.  However, at read time we
> + * don't know whether the virt sector is the same as when we wrote it
> + * (we could be reading from real disk as opposed to MD/DM device.  So
> + * we always remap Type 2 making it identical to Type 1.
> + *
> + * Type 3 does not have a reference tag so no remapping is required.
> + */

Maybe add proper kerneldoc comments given that these are exported
functions?

The 32-byte CDB comment doesn't really make sense here as it is SCSI
specific, so we'll need to drop it or find a good place for it in the
SCSI code.

> +void blk_integrity_dif_prepare(struct request *rq, u8 protection_type,
> +			       u32 ref_tag)
> +{

Maybe call this blk_t10_pi_prepare?

> +	const int tuple_sz = sizeof(struct t10_pi_tuple);
> +	struct bio *bio;
> +	struct t10_pi_tuple *pi;
> +	u32 phys, virt;
> +
> +	if (protection_type == T10_PI_TYPE3_PROTECTION)
> +		return;
> +
> +	phys = ref_tag;

Seems like we could just use the ref_tag variable later instead of
duplicating it.

> +
> +	__rq_for_each_bio(bio, rq) {
> +		struct bio_integrity_payload *bip = bio_integrity(bio);
> +		struct bio_vec iv;
> +		struct bvec_iter iter;
> +		unsigned int j;
> +
> +		/* Already remapped? */
> +		if (bip->bip_flags & BIP_MAPPED_INTEGRITY)
> +			break;
> +
> +		virt = bip_get_seed(bip) & 0xffffffff;

Looks like we could keep the virt variable inside the loop and assign
it where declared, e.g.:

		u32 virt = bip_get_seed(bip) & 0xffffffff;

at the beginning of this block.

> +		bip_for_each_vec(iv, bip, iter) {
> +			pi = kmap_atomic(iv.bv_page) + iv.bv_offset;

Pi can have local scope here, too.

> +			for (j = 0; j < iv.bv_len; j += tuple_sz, pi++) {
> +
> +				if (be32_to_cpu(pi->ref_tag) == virt)
> +					pi->ref_tag = cpu_to_be32(phys);
> +
> +				virt++;
> +				phys++;
> +			}

No need for the empty lines inside this loop.

> +/*
> + * Remap physical sector values in the reference tag to the virtual
> + * values expected by the block layer.
> + */
> +void blk_integrity_dif_complete(struct request *rq, u8 protection_type,
> +				u32 ref_tag, unsigned int intervals)

And pretty much all the  comments apply to this function as well.

> diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
> index 9421d9877730..4186bf027c59 100644
> --- a/drivers/scsi/sd.c
> +++ b/drivers/scsi/sd.c
> @@ -1119,7 +1119,9 @@ static int sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt)
>  		SCpnt->cmnd[0] = WRITE_6;
>  
>  		if (blk_integrity_rq(rq))
> -			sd_dif_prepare(SCpnt);
> +			blk_integrity_dif_prepare(SCpnt->request,
> +						  sdkp->protection_type,
> +						  scsi_prot_ref_tag(SCpnt));

scsi_prot_ref_tag could be move to the block layer as it only uses
the sector in the eequest and the sector size, which we can get
from the gendisk as well.  We then don't need to pass it to the function.

We could also move the protection type to the gendisk, although I'm not
sure it's going to be worth it.

^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PATCH 1/2] block: move dif_prepare/dif_complete functions to block layer
@ 2018-07-23  7:28   ` Christoph Hellwig
  0 siblings, 0 replies; 16+ messages in thread
From: Christoph Hellwig @ 2018-07-23  7:28 UTC (permalink / raw)


>  #include <linux/blkdev.h>
> +#include <linux/t10-pi.h>

Sounds like the new functions should move to block/10-pi.c given
that they are specific to the T10 defined formats.

> +/*
> + * The virtual start sector is the one that was originally submitted
> + * by the block layer.	Due to partitioning, MD/DM cloning, etc. the
> + * actual physical start sector is likely to be different.  Remap
> + * protection information to match the physical LBA.
> + *
> + * From a protocol perspective there's a slight difference between
> + * Type 1 and 2.  The latter uses command's 32-byte exclusively, and the
> + * reference tag is seeded in the command.  This gives us the potential to
> + * avoid virt->phys remapping during write.  However, at read time we
> + * don't know whether the virt sector is the same as when we wrote it
> + * (we could be reading from real disk as opposed to MD/DM device.  So
> + * we always remap Type 2 making it identical to Type 1.
> + *
> + * Type 3 does not have a reference tag so no remapping is required.
> + */

Maybe add proper kerneldoc comments given that these are exported
functions?

The 32-byte CDB comment doesn't really make sense here as it is SCSI
specific, so we'll need to drop it or find a good place for it in the
SCSI code.

> +void blk_integrity_dif_prepare(struct request *rq, u8 protection_type,
> +			       u32 ref_tag)
> +{

Maybe call this blk_t10_pi_prepare?

> +	const int tuple_sz = sizeof(struct t10_pi_tuple);
> +	struct bio *bio;
> +	struct t10_pi_tuple *pi;
> +	u32 phys, virt;
> +
> +	if (protection_type == T10_PI_TYPE3_PROTECTION)
> +		return;
> +
> +	phys = ref_tag;

Seems like we could just use the ref_tag variable later instead of
duplicating it.

> +
> +	__rq_for_each_bio(bio, rq) {
> +		struct bio_integrity_payload *bip = bio_integrity(bio);
> +		struct bio_vec iv;
> +		struct bvec_iter iter;
> +		unsigned int j;
> +
> +		/* Already remapped? */
> +		if (bip->bip_flags & BIP_MAPPED_INTEGRITY)
> +			break;
> +
> +		virt = bip_get_seed(bip) & 0xffffffff;

Looks like we could keep the virt variable inside the loop and assign
it where declared, e.g.:

		u32 virt = bip_get_seed(bip) & 0xffffffff;

at the beginning of this block.

> +		bip_for_each_vec(iv, bip, iter) {
> +			pi = kmap_atomic(iv.bv_page) + iv.bv_offset;

Pi can have local scope here, too.

> +			for (j = 0; j < iv.bv_len; j += tuple_sz, pi++) {
> +
> +				if (be32_to_cpu(pi->ref_tag) == virt)
> +					pi->ref_tag = cpu_to_be32(phys);
> +
> +				virt++;
> +				phys++;
> +			}

No need for the empty lines inside this loop.

> +/*
> + * Remap physical sector values in the reference tag to the virtual
> + * values expected by the block layer.
> + */
> +void blk_integrity_dif_complete(struct request *rq, u8 protection_type,
> +				u32 ref_tag, unsigned int intervals)

And pretty much all the  comments apply to this function as well.

> diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
> index 9421d9877730..4186bf027c59 100644
> --- a/drivers/scsi/sd.c
> +++ b/drivers/scsi/sd.c
> @@ -1119,7 +1119,9 @@ static int sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt)
>  		SCpnt->cmnd[0] = WRITE_6;
>  
>  		if (blk_integrity_rq(rq))
> -			sd_dif_prepare(SCpnt);
> +			blk_integrity_dif_prepare(SCpnt->request,
> +						  sdkp->protection_type,
> +						  scsi_prot_ref_tag(SCpnt));

scsi_prot_ref_tag could be move to the block layer as it only uses
the sector in the eequest and the sector size, which we can get
from the gendisk as well.  We then don't need to pass it to the function.

We could also move the protection type to the gendisk, although I'm not
sure it's going to be worth it.

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 2/2] nvme: use blk API to remap ref tags for IOs with metadata
  2018-07-22  9:49   ` Max Gurtovoy
@ 2018-07-23  7:33     ` Christoph Hellwig
  -1 siblings, 0 replies; 16+ messages in thread
From: Christoph Hellwig @ 2018-07-23  7:33 UTC (permalink / raw)
  To: Max Gurtovoy
  Cc: martin.petersen, linux-block, axboe, keith.busch, linux-nvme, sagi, hch

On Sun, Jul 22, 2018 at 12:49:58PM +0300, Max Gurtovoy wrote:
> Also moved the logic of the remapping to the nvme core driver instead
> of implementing it in the nvme pci driver. This way all the other nvme
> transport drivers will benefit from it (in case they'll implement metadata
> support).
> 
> Suggested-by: Christoph Hellwig <hch@lst.de>
> Cc: Jens Axboe <axboe@kernel.dk>
> Cc: Martin K. Petersen <martin.petersen@oracle.com>
> Signed-off-by: Max Gurtovoy <maxg@mellanox.com>
> ---
>  drivers/nvme/host/core.c | 23 +++++++++++++--
>  drivers/nvme/host/nvme.h |  9 +-----
>  drivers/nvme/host/pci.c  | 75 +-----------------------------------------------
>  3 files changed, 23 insertions(+), 84 deletions(-)
> 
> diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
> index 46df030b2c3f..0d94d3eb641c 100644
> --- a/drivers/nvme/host/core.c
> +++ b/drivers/nvme/host/core.c
> @@ -591,6 +591,7 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
>  		nvme_assign_write_stream(ctrl, req, &control, &dsmgmt);
>  
>  	if (ns->ms) {
> +		u32 ref_tag = nvme_block_nr(ns, blk_rq_pos(req));
>  		/*

Please add an empty line here.

> +void nvme_cleanup_cmd(struct request *req)
> +{
> +	if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ &&
> +	    nvme_error_status(req) == BLK_STS_OK) {

This line can simply be nvme_req(req)->status == 0

^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PATCH 2/2] nvme: use blk API to remap ref tags for IOs with metadata
@ 2018-07-23  7:33     ` Christoph Hellwig
  0 siblings, 0 replies; 16+ messages in thread
From: Christoph Hellwig @ 2018-07-23  7:33 UTC (permalink / raw)


On Sun, Jul 22, 2018@12:49:58PM +0300, Max Gurtovoy wrote:
> Also moved the logic of the remapping to the nvme core driver instead
> of implementing it in the nvme pci driver. This way all the other nvme
> transport drivers will benefit from it (in case they'll implement metadata
> support).
> 
> Suggested-by: Christoph Hellwig <hch at lst.de>
> Cc: Jens Axboe <axboe at kernel.dk>
> Cc: Martin K. Petersen <martin.petersen at oracle.com>
> Signed-off-by: Max Gurtovoy <maxg at mellanox.com>
> ---
>  drivers/nvme/host/core.c | 23 +++++++++++++--
>  drivers/nvme/host/nvme.h |  9 +-----
>  drivers/nvme/host/pci.c  | 75 +-----------------------------------------------
>  3 files changed, 23 insertions(+), 84 deletions(-)
> 
> diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
> index 46df030b2c3f..0d94d3eb641c 100644
> --- a/drivers/nvme/host/core.c
> +++ b/drivers/nvme/host/core.c
> @@ -591,6 +591,7 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
>  		nvme_assign_write_stream(ctrl, req, &control, &dsmgmt);
>  
>  	if (ns->ms) {
> +		u32 ref_tag = nvme_block_nr(ns, blk_rq_pos(req));
>  		/*

Please add an empty line here.

> +void nvme_cleanup_cmd(struct request *req)
> +{
> +	if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ &&
> +	    nvme_error_status(req) == BLK_STS_OK) {

This line can simply be nvme_req(req)->status == 0

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 1/2] block: move dif_prepare/dif_complete functions to block layer
  2018-07-22  9:49 ` Max Gurtovoy
@ 2018-07-23 14:02   ` Keith Busch
  -1 siblings, 0 replies; 16+ messages in thread
From: Keith Busch @ 2018-07-23 14:02 UTC (permalink / raw)
  To: Max Gurtovoy
  Cc: martin.petersen, linux-block, axboe, keith.busch, linux-nvme, sagi, hch

On Sun, Jul 22, 2018 at 12:49:57PM +0300, Max Gurtovoy wrote:
> +void blk_integrity_dif_prepare(struct request *rq, u8 protection_type,
> +			       u32 ref_tag)
> +{
> +	const int tuple_sz = sizeof(struct t10_pi_tuple);
> +	struct bio *bio;
> +	struct t10_pi_tuple *pi;
> +	u32 phys, virt;
> +
> +	if (protection_type == T10_PI_TYPE3_PROTECTION)
> +		return;
> +
> +	phys = ref_tag;
> +
> +	__rq_for_each_bio(bio, rq) {
> +		struct bio_integrity_payload *bip = bio_integrity(bio);
> +		struct bio_vec iv;
> +		struct bvec_iter iter;
> +		unsigned int j;
> +
> +		/* Already remapped? */
> +		if (bip->bip_flags & BIP_MAPPED_INTEGRITY)
> +			break;
> +
> +		virt = bip_get_seed(bip) & 0xffffffff;
> +
> +		bip_for_each_vec(iv, bip, iter) {
> +			pi = kmap_atomic(iv.bv_page) + iv.bv_offset;
> +
> +			for (j = 0; j < iv.bv_len; j += tuple_sz, pi++) {

nvme's data integrity buffer can actually have more space between each
PI field, so we just need to account for that when iterating instead of
assuming each element is the size of a T10 PI tuple.

Otherwise, great idea.

^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PATCH 1/2] block: move dif_prepare/dif_complete functions to block layer
@ 2018-07-23 14:02   ` Keith Busch
  0 siblings, 0 replies; 16+ messages in thread
From: Keith Busch @ 2018-07-23 14:02 UTC (permalink / raw)


On Sun, Jul 22, 2018@12:49:57PM +0300, Max Gurtovoy wrote:
> +void blk_integrity_dif_prepare(struct request *rq, u8 protection_type,
> +			       u32 ref_tag)
> +{
> +	const int tuple_sz = sizeof(struct t10_pi_tuple);
> +	struct bio *bio;
> +	struct t10_pi_tuple *pi;
> +	u32 phys, virt;
> +
> +	if (protection_type == T10_PI_TYPE3_PROTECTION)
> +		return;
> +
> +	phys = ref_tag;
> +
> +	__rq_for_each_bio(bio, rq) {
> +		struct bio_integrity_payload *bip = bio_integrity(bio);
> +		struct bio_vec iv;
> +		struct bvec_iter iter;
> +		unsigned int j;
> +
> +		/* Already remapped? */
> +		if (bip->bip_flags & BIP_MAPPED_INTEGRITY)
> +			break;
> +
> +		virt = bip_get_seed(bip) & 0xffffffff;
> +
> +		bip_for_each_vec(iv, bip, iter) {
> +			pi = kmap_atomic(iv.bv_page) + iv.bv_offset;
> +
> +			for (j = 0; j < iv.bv_len; j += tuple_sz, pi++) {

nvme's data integrity buffer can actually have more space between each
PI field, so we just need to account for that when iterating instead of
assuming each element is the size of a T10 PI tuple.

Otherwise, great idea.

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 1/2] block: move dif_prepare/dif_complete functions to block layer
  2018-07-23  7:28   ` Christoph Hellwig
@ 2018-07-24  1:54     ` Martin K. Petersen
  -1 siblings, 0 replies; 16+ messages in thread
From: Martin K. Petersen @ 2018-07-24  1:54 UTC (permalink / raw)
  To: Christoph Hellwig
  Cc: Max Gurtovoy, martin.petersen, linux-block, axboe, keith.busch,
	linux-nvme, sagi


Christoph,

>> +void blk_integrity_dif_prepare(struct request *rq, u8 protection_type,
>> +			       u32 ref_tag)
>> +{
>
> Maybe call this blk_t10_pi_prepare?

The rest of these functions have a blk_integrity_ prefix. So either
stick with that or put the functions in t10-pi.c and use a t10_pi_
prefix.

I'm a bit torn on placement since the integrity metadata could contain
other stuff than T10 PI. But the remapping is very specific to T10 PI.

>> diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
>> index 9421d9877730..4186bf027c59 100644
>> --- a/drivers/scsi/sd.c
>> +++ b/drivers/scsi/sd.c
>> @@ -1119,7 +1119,9 @@ static int sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt)
>>  		SCpnt->cmnd[0] = WRITE_6;
>>  
>>  		if (blk_integrity_rq(rq))
>> -			sd_dif_prepare(SCpnt);
>> +			blk_integrity_dif_prepare(SCpnt->request,
>> +						  sdkp->protection_type,
>> +						  scsi_prot_ref_tag(SCpnt));
>
> scsi_prot_ref_tag could be move to the block layer as it only uses
> the sector in the eequest and the sector size, which we can get
> from the gendisk as well.  We then don't need to pass it to the function.

For Type 2, the PI can be at intervals different from the logical block
size (although we don't support that yet). We should use the
blk_integrity profile interval instead of assuming sector size.

And wrt. Keith's comment: The tuple_size should be the one from the
integrity profile as well, not sizeof(struct t10_pi_tuple).

-- 
Martin K. Petersen	Oracle Linux Engineering

^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PATCH 1/2] block: move dif_prepare/dif_complete functions to block layer
@ 2018-07-24  1:54     ` Martin K. Petersen
  0 siblings, 0 replies; 16+ messages in thread
From: Martin K. Petersen @ 2018-07-24  1:54 UTC (permalink / raw)



Christoph,

>> +void blk_integrity_dif_prepare(struct request *rq, u8 protection_type,
>> +			       u32 ref_tag)
>> +{
>
> Maybe call this blk_t10_pi_prepare?

The rest of these functions have a blk_integrity_ prefix. So either
stick with that or put the functions in t10-pi.c and use a t10_pi_
prefix.

I'm a bit torn on placement since the integrity metadata could contain
other stuff than T10 PI. But the remapping is very specific to T10 PI.

>> diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
>> index 9421d9877730..4186bf027c59 100644
>> --- a/drivers/scsi/sd.c
>> +++ b/drivers/scsi/sd.c
>> @@ -1119,7 +1119,9 @@ static int sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt)
>>  		SCpnt->cmnd[0] = WRITE_6;
>>  
>>  		if (blk_integrity_rq(rq))
>> -			sd_dif_prepare(SCpnt);
>> +			blk_integrity_dif_prepare(SCpnt->request,
>> +						  sdkp->protection_type,
>> +						  scsi_prot_ref_tag(SCpnt));
>
> scsi_prot_ref_tag could be move to the block layer as it only uses
> the sector in the eequest and the sector size, which we can get
> from the gendisk as well.  We then don't need to pass it to the function.

For Type 2, the PI can be at intervals different from the logical block
size (although we don't support that yet). We should use the
blk_integrity profile interval instead of assuming sector size.

And wrt. Keith's comment: The tuple_size should be the one from the
integrity profile as well, not sizeof(struct t10_pi_tuple).

-- 
Martin K. Petersen	Oracle Linux Engineering

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 1/2] block: move dif_prepare/dif_complete functions to block layer
  2018-07-24  1:54     ` Martin K. Petersen
@ 2018-07-24  7:57       ` Christoph Hellwig
  -1 siblings, 0 replies; 16+ messages in thread
From: Christoph Hellwig @ 2018-07-24  7:57 UTC (permalink / raw)
  To: Martin K. Petersen
  Cc: Christoph Hellwig, Max Gurtovoy, linux-block, axboe, keith.busch,
	linux-nvme, sagi

On Mon, Jul 23, 2018 at 09:54:38PM -0400, Martin K. Petersen wrote:
> 
> Christoph,
> 
> >> +void blk_integrity_dif_prepare(struct request *rq, u8 protection_type,
> >> +			       u32 ref_tag)
> >> +{
> >
> > Maybe call this blk_t10_pi_prepare?
> 
> The rest of these functions have a blk_integrity_ prefix. So either
> stick with that or put the functions in t10-pi.c and use a t10_pi_
> prefix.

Yes, I suggested moving it somewhere in my reply.

^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PATCH 1/2] block: move dif_prepare/dif_complete functions to block layer
@ 2018-07-24  7:57       ` Christoph Hellwig
  0 siblings, 0 replies; 16+ messages in thread
From: Christoph Hellwig @ 2018-07-24  7:57 UTC (permalink / raw)


On Mon, Jul 23, 2018@09:54:38PM -0400, Martin K. Petersen wrote:
> 
> Christoph,
> 
> >> +void blk_integrity_dif_prepare(struct request *rq, u8 protection_type,
> >> +			       u32 ref_tag)
> >> +{
> >
> > Maybe call this blk_t10_pi_prepare?
> 
> The rest of these functions have a blk_integrity_ prefix. So either
> stick with that or put the functions in t10-pi.c and use a t10_pi_
> prefix.

Yes, I suggested moving it somewhere in my reply.

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 1/2] block: move dif_prepare/dif_complete functions to block layer
  2018-07-24  1:54     ` Martin K. Petersen
@ 2018-07-24 12:01       ` Max Gurtovoy
  -1 siblings, 0 replies; 16+ messages in thread
From: Max Gurtovoy @ 2018-07-24 12:01 UTC (permalink / raw)
  To: Martin K. Petersen, Christoph Hellwig
  Cc: linux-block, axboe, keith.busch, linux-nvme, sagi



On 7/24/2018 4:54 AM, Martin K. Petersen wrote:
> 
> Christoph,
> 
>>> +void blk_integrity_dif_prepare(struct request *rq, u8 protection_type,
>>> +			       u32 ref_tag)
>>> +{
>>
>> Maybe call this blk_t10_pi_prepare?
> 
> The rest of these functions have a blk_integrity_ prefix. So either
> stick with that or put the functions in t10-pi.c and use a t10_pi_
> prefix.
> 
> I'm a bit torn on placement since the integrity metadata could contain
> other stuff than T10 PI. But the remapping is very specific to T10 PI.
> 
>>> diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
>>> index 9421d9877730..4186bf027c59 100644
>>> --- a/drivers/scsi/sd.c
>>> +++ b/drivers/scsi/sd.c
>>> @@ -1119,7 +1119,9 @@ static int sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt)
>>>   		SCpnt->cmnd[0] = WRITE_6;
>>>   
>>>   		if (blk_integrity_rq(rq))
>>> -			sd_dif_prepare(SCpnt);
>>> +			blk_integrity_dif_prepare(SCpnt->request,
>>> +						  sdkp->protection_type,
>>> +						  scsi_prot_ref_tag(SCpnt));
>>
>> scsi_prot_ref_tag could be move to the block layer as it only uses
>> the sector in the eequest and the sector size, which we can get
>> from the gendisk as well.  We then don't need to pass it to the function.
> 
> For Type 2, the PI can be at intervals different from the logical block
> size (although we don't support that yet). We should use the
> blk_integrity profile interval instead of assuming sector size.
> 
> And wrt. Keith's comment: The tuple_size should be the one from the
> integrity profile as well, not sizeof(struct t10_pi_tuple).
> 

Ok so I'll use rq->q->integrity.tuple_size as the tuple_sz and increment 
j and pi according to it, right ?

^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PATCH 1/2] block: move dif_prepare/dif_complete functions to block layer
@ 2018-07-24 12:01       ` Max Gurtovoy
  0 siblings, 0 replies; 16+ messages in thread
From: Max Gurtovoy @ 2018-07-24 12:01 UTC (permalink / raw)




On 7/24/2018 4:54 AM, Martin K. Petersen wrote:
> 
> Christoph,
> 
>>> +void blk_integrity_dif_prepare(struct request *rq, u8 protection_type,
>>> +			       u32 ref_tag)
>>> +{
>>
>> Maybe call this blk_t10_pi_prepare?
> 
> The rest of these functions have a blk_integrity_ prefix. So either
> stick with that or put the functions in t10-pi.c and use a t10_pi_
> prefix.
> 
> I'm a bit torn on placement since the integrity metadata could contain
> other stuff than T10 PI. But the remapping is very specific to T10 PI.
> 
>>> diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
>>> index 9421d9877730..4186bf027c59 100644
>>> --- a/drivers/scsi/sd.c
>>> +++ b/drivers/scsi/sd.c
>>> @@ -1119,7 +1119,9 @@ static int sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt)
>>>   		SCpnt->cmnd[0] = WRITE_6;
>>>   
>>>   		if (blk_integrity_rq(rq))
>>> -			sd_dif_prepare(SCpnt);
>>> +			blk_integrity_dif_prepare(SCpnt->request,
>>> +						  sdkp->protection_type,
>>> +						  scsi_prot_ref_tag(SCpnt));
>>
>> scsi_prot_ref_tag could be move to the block layer as it only uses
>> the sector in the eequest and the sector size, which we can get
>> from the gendisk as well.  We then don't need to pass it to the function.
> 
> For Type 2, the PI can be at intervals different from the logical block
> size (although we don't support that yet). We should use the
> blk_integrity profile interval instead of assuming sector size.
> 
> And wrt. Keith's comment: The tuple_size should be the one from the
> integrity profile as well, not sizeof(struct t10_pi_tuple).
> 

Ok so I'll use rq->q->integrity.tuple_size as the tuple_sz and increment 
j and pi according to it, right ?

^ permalink raw reply	[flat|nested] 16+ messages in thread

end of thread, other threads:[~2018-07-24 12:01 UTC | newest]

Thread overview: 16+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-07-22  9:49 [PATCH 1/2] block: move dif_prepare/dif_complete functions to block layer Max Gurtovoy
2018-07-22  9:49 ` Max Gurtovoy
2018-07-22  9:49 ` [PATCH 2/2] nvme: use blk API to remap ref tags for IOs with metadata Max Gurtovoy
2018-07-22  9:49   ` Max Gurtovoy
2018-07-23  7:33   ` Christoph Hellwig
2018-07-23  7:33     ` Christoph Hellwig
2018-07-23  7:28 ` [PATCH 1/2] block: move dif_prepare/dif_complete functions to block layer Christoph Hellwig
2018-07-23  7:28   ` Christoph Hellwig
2018-07-24  1:54   ` Martin K. Petersen
2018-07-24  1:54     ` Martin K. Petersen
2018-07-24  7:57     ` Christoph Hellwig
2018-07-24  7:57       ` Christoph Hellwig
2018-07-24 12:01     ` Max Gurtovoy
2018-07-24 12:01       ` Max Gurtovoy
2018-07-23 14:02 ` Keith Busch
2018-07-23 14:02   ` Keith Busch

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.