linux-block.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* bsg cleanup, part 2
@ 2021-07-29  6:48 Christoph Hellwig
  2021-07-29  6:48 ` [PATCH 1/4] bsg: simplify device registration Christoph Hellwig
                   ` (4 more replies)
  0 siblings, 5 replies; 6+ messages in thread
From: Christoph Hellwig @ 2021-07-29  6:48 UTC (permalink / raw)
  To: Martin K. Petersen; +Cc: Jens Axboe, FUJITA Tomonori, linux-block, linux-scsi

Hi Martin,

this is the next round of bsg cleanups based on the previous scsi ioctl
changes.  The biggest changes are major simplification of how the bsg
nodes are created and found, and a simplification of the interface
between the frontend in bsg.c and the two backends.

Diffstat:
 block/blk-mq.c             |    2 
 block/bsg-lib.c            |   89 +++++------
 block/bsg.c                |  353 ++++++++++-----------------------------------
 drivers/scsi/scsi_bsg.c    |   72 +++++----
 drivers/scsi/scsi_ioctl.c  |   63 +++-----
 drivers/scsi/scsi_priv.h   |   11 -
 drivers/scsi/scsi_scan.c   |    2 
 drivers/scsi/scsi_sysfs.c  |   24 ++-
 include/linux/blkdev.h     |   14 -
 include/linux/bsg-lib.h    |    1 
 include/linux/bsg.h        |   31 +--
 include/scsi/scsi_device.h |    5 
 12 files changed, 229 insertions(+), 438 deletions(-)

^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH 1/4] bsg: simplify device registration
  2021-07-29  6:48 bsg cleanup, part 2 Christoph Hellwig
@ 2021-07-29  6:48 ` Christoph Hellwig
  2021-07-29  6:48 ` [PATCH 2/4] block: remove BLK_SCSI_MAX_CMDS Christoph Hellwig
                   ` (3 subsequent siblings)
  4 siblings, 0 replies; 6+ messages in thread
From: Christoph Hellwig @ 2021-07-29  6:48 UTC (permalink / raw)
  To: Martin K. Petersen; +Cc: Jens Axboe, FUJITA Tomonori, linux-block, linux-scsi

Use the per-device cdev_device_interface to store the bsg data in
the char device inode, and thus remove the need to embedd the
bsg_class_device structure in the request_queue.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 block/bsg-lib.c            |  11 +-
 block/bsg.c                | 304 +++++++++----------------------------
 drivers/scsi/scsi_bsg.c    |   5 +-
 drivers/scsi/scsi_priv.h   |  11 +-
 drivers/scsi/scsi_sysfs.c  |  24 ++-
 include/linux/blkdev.h     |   6 -
 include/linux/bsg-lib.h    |   1 +
 include/linux/bsg.h        |  21 +--
 include/scsi/scsi_device.h |   2 +
 9 files changed, 108 insertions(+), 277 deletions(-)

diff --git a/block/bsg-lib.c b/block/bsg-lib.c
index a89d80102304..fe43f5fda6e5 100644
--- a/block/bsg-lib.c
+++ b/block/bsg-lib.c
@@ -6,6 +6,7 @@
  *  Copyright (C) 2011   Red Hat, Inc.  All rights reserved.
  *  Copyright (C) 2011   Mike Christie
  */
+#include <linux/bsg.h>
 #include <linux/slab.h>
 #include <linux/blk-mq.h>
 #include <linux/delay.h>
@@ -19,6 +20,7 @@
 
 struct bsg_set {
 	struct blk_mq_tag_set	tag_set;
+	struct bsg_device	*bd;
 	bsg_job_fn		*job_fn;
 	bsg_timeout_fn		*timeout_fn;
 };
@@ -327,7 +329,7 @@ void bsg_remove_queue(struct request_queue *q)
 		struct bsg_set *bset =
 			container_of(q->tag_set, struct bsg_set, tag_set);
 
-		bsg_unregister_queue(q);
+		bsg_unregister_queue(bset->bd);
 		blk_cleanup_queue(q);
 		blk_mq_free_tag_set(&bset->tag_set);
 		kfree(bset);
@@ -396,10 +398,9 @@ struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
 	q->queuedata = dev;
 	blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
 
-	ret = bsg_register_queue(q, dev, name, &bsg_transport_ops);
-	if (ret) {
-		printk(KERN_ERR "%s: bsg interface failed to "
-		       "initialize - register queue\n", dev->kobj.name);
+	bset->bd = bsg_register_queue(q, dev, name, &bsg_transport_ops);
+	if (IS_ERR(bset->bd)) {
+		ret = PTR_ERR(bset->bd);
 		goto out_cleanup_queue;
 	}
 
diff --git a/block/bsg.c b/block/bsg.c
index 3dbfd2c6aef3..83a095185d33 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -20,38 +20,29 @@
 #define BSG_DESCRIPTION	"Block layer SCSI generic (bsg) driver"
 #define BSG_VERSION	"0.4"
 
-#define bsg_dbg(bd, fmt, ...) \
-	pr_debug("%s: " fmt, (bd)->name, ##__VA_ARGS__)
-
 struct bsg_device {
 	struct request_queue *queue;
-	spinlock_t lock;
-	struct hlist_node dev_list;
-	refcount_t ref_count;
-	char name[20];
+	const struct bsg_ops *ops;
+	struct device device;
+	struct cdev cdev;
 	int max_queue;
 };
 
+static inline struct bsg_device *to_bsg_device(struct inode *inode)
+{
+	return container_of(inode->i_cdev, struct bsg_device, cdev);
+}
+
 #define BSG_DEFAULT_CMDS	64
 #define BSG_MAX_DEVS		32768
 
-static DEFINE_MUTEX(bsg_mutex);
-static DEFINE_IDR(bsg_minor_idr);
-
-#define BSG_LIST_ARRAY_SIZE	8
-static struct hlist_head bsg_device_list[BSG_LIST_ARRAY_SIZE];
-
+static DEFINE_IDA(bsg_minor_ida);
 static struct class *bsg_class;
 static int bsg_major;
 
-static inline struct hlist_head *bsg_dev_idx_hash(int index)
-{
-	return &bsg_device_list[index & (BSG_LIST_ARRAY_SIZE - 1)];
-}
-
 #define uptr64(val) ((void __user *)(uintptr_t)(val))
 
-static int bsg_sg_io(struct request_queue *q, fmode_t mode, void __user *uarg)
+static int bsg_sg_io(struct bsg_device *bd, fmode_t mode, void __user *uarg)
 {
 	struct request *rq;
 	struct bio *bio;
@@ -61,21 +52,18 @@ static int bsg_sg_io(struct request_queue *q, fmode_t mode, void __user *uarg)
 	if (copy_from_user(&hdr, uarg, sizeof(hdr)))
 		return -EFAULT;
 
-	if (!q->bsg_dev.class_dev)
-		return -ENXIO;
-
 	if (hdr.guard != 'Q')
 		return -EINVAL;
-	ret = q->bsg_dev.ops->check_proto(&hdr);
+	ret = bd->ops->check_proto(&hdr);
 	if (ret)
 		return ret;
 
-	rq = blk_get_request(q, hdr.dout_xfer_len ?
+	rq = blk_get_request(bd->queue, hdr.dout_xfer_len ?
 			REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
 	if (IS_ERR(rq))
 		return PTR_ERR(rq);
 
-	ret = q->bsg_dev.ops->fill_hdr(rq, &hdr, mode);
+	ret = bd->ops->fill_hdr(rq, &hdr, mode);
 	if (ret) {
 		blk_put_request(rq);
 		return ret;
@@ -83,17 +71,17 @@ static int bsg_sg_io(struct request_queue *q, fmode_t mode, void __user *uarg)
 
 	rq->timeout = msecs_to_jiffies(hdr.timeout);
 	if (!rq->timeout)
-		rq->timeout = q->sg_timeout;
+		rq->timeout = rq->q->sg_timeout;
 	if (!rq->timeout)
 		rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
 	if (rq->timeout < BLK_MIN_SG_TIMEOUT)
 		rq->timeout = BLK_MIN_SG_TIMEOUT;
 
 	if (hdr.dout_xfer_len) {
-		ret = blk_rq_map_user(q, rq, NULL, uptr64(hdr.dout_xferp),
+		ret = blk_rq_map_user(rq->q, rq, NULL, uptr64(hdr.dout_xferp),
 				hdr.dout_xfer_len, GFP_KERNEL);
 	} else if (hdr.din_xfer_len) {
-		ret = blk_rq_map_user(q, rq, NULL, uptr64(hdr.din_xferp),
+		ret = blk_rq_map_user(rq->q, rq, NULL, uptr64(hdr.din_xferp),
 				hdr.din_xfer_len, GFP_KERNEL);
 	}
 
@@ -103,171 +91,50 @@ static int bsg_sg_io(struct request_queue *q, fmode_t mode, void __user *uarg)
 	bio = rq->bio;
 
 	blk_execute_rq(NULL, rq, !(hdr.flags & BSG_FLAG_Q_AT_TAIL));
-	ret = rq->q->bsg_dev.ops->complete_rq(rq, &hdr);
+	ret = bd->ops->complete_rq(rq, &hdr);
 	blk_rq_unmap_user(bio);
 
 out_free_rq:
-	rq->q->bsg_dev.ops->free_rq(rq);
+	bd->ops->free_rq(rq);
 	blk_put_request(rq);
 	if (!ret && copy_to_user(uarg, &hdr, sizeof(hdr)))
 		return -EFAULT;
 	return ret;
 }
 
-static struct bsg_device *bsg_alloc_device(void)
-{
-	struct bsg_device *bd;
-
-	bd = kzalloc(sizeof(struct bsg_device), GFP_KERNEL);
-	if (unlikely(!bd))
-		return NULL;
-
-	spin_lock_init(&bd->lock);
-	bd->max_queue = BSG_DEFAULT_CMDS;
-	INIT_HLIST_NODE(&bd->dev_list);
-	return bd;
-}
-
-static int bsg_put_device(struct bsg_device *bd)
-{
-	struct request_queue *q = bd->queue;
-
-	mutex_lock(&bsg_mutex);
-
-	if (!refcount_dec_and_test(&bd->ref_count)) {
-		mutex_unlock(&bsg_mutex);
-		return 0;
-	}
-
-	hlist_del(&bd->dev_list);
-	mutex_unlock(&bsg_mutex);
-
-	bsg_dbg(bd, "tearing down\n");
-
-	/*
-	 * close can always block
-	 */
-	kfree(bd);
-	blk_put_queue(q);
-	return 0;
-}
-
-static struct bsg_device *bsg_add_device(struct inode *inode,
-					 struct request_queue *rq,
-					 struct file *file)
-{
-	struct bsg_device *bd;
-	unsigned char buf[32];
-
-	lockdep_assert_held(&bsg_mutex);
-
-	if (!blk_get_queue(rq))
-		return ERR_PTR(-ENXIO);
-
-	bd = bsg_alloc_device();
-	if (!bd) {
-		blk_put_queue(rq);
-		return ERR_PTR(-ENOMEM);
-	}
-
-	bd->queue = rq;
-
-	refcount_set(&bd->ref_count, 1);
-	hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode)));
-
-	strncpy(bd->name, dev_name(rq->bsg_dev.class_dev), sizeof(bd->name) - 1);
-	bsg_dbg(bd, "bound to <%s>, max queue %d\n",
-		format_dev_t(buf, inode->i_rdev), bd->max_queue);
-
-	return bd;
-}
-
-static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q)
-{
-	struct bsg_device *bd;
-
-	lockdep_assert_held(&bsg_mutex);
-
-	hlist_for_each_entry(bd, bsg_dev_idx_hash(minor), dev_list) {
-		if (bd->queue == q) {
-			refcount_inc(&bd->ref_count);
-			goto found;
-		}
-	}
-	bd = NULL;
-found:
-	return bd;
-}
-
-static struct bsg_device *bsg_get_device(struct inode *inode, struct file *file)
-{
-	struct bsg_device *bd;
-	struct bsg_class_device *bcd;
-
-	/*
-	 * find the class device
-	 */
-	mutex_lock(&bsg_mutex);
-	bcd = idr_find(&bsg_minor_idr, iminor(inode));
-
-	if (!bcd) {
-		bd = ERR_PTR(-ENODEV);
-		goto out_unlock;
-	}
-
-	bd = __bsg_get_device(iminor(inode), bcd->queue);
-	if (!bd)
-		bd = bsg_add_device(inode, bcd->queue, file);
-
-out_unlock:
-	mutex_unlock(&bsg_mutex);
-	return bd;
-}
-
 static int bsg_open(struct inode *inode, struct file *file)
 {
-	struct bsg_device *bd;
-
-	bd = bsg_get_device(inode, file);
-
-	if (IS_ERR(bd))
-		return PTR_ERR(bd);
-
-	file->private_data = bd;
+	if (!blk_get_queue(to_bsg_device(inode)->queue))
+		return -ENXIO;
 	return 0;
 }
 
 static int bsg_release(struct inode *inode, struct file *file)
 {
-	struct bsg_device *bd = file->private_data;
-
-	file->private_data = NULL;
-	return bsg_put_device(bd);
+	blk_put_queue(to_bsg_device(inode)->queue);
+	return 0;
 }
 
 static int bsg_get_command_q(struct bsg_device *bd, int __user *uarg)
 {
-	return put_user(bd->max_queue, uarg);
+	return put_user(READ_ONCE(bd->max_queue), uarg);
 }
 
 static int bsg_set_command_q(struct bsg_device *bd, int __user *uarg)
 {
-	int queue;
+	int max_queue;
 
-	if (get_user(queue, uarg))
+	if (get_user(max_queue, uarg))
 		return -EFAULT;
-	if (queue < 1)
+	if (max_queue < 1)
 		return -EINVAL;
-
-	spin_lock_irq(&bd->lock);
-	bd->max_queue = queue;
-	spin_unlock_irq(&bd->lock);
+	WRITE_ONCE(bd->max_queue, max_queue);
 	return 0;
 }
 
 static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 {
-	struct bsg_device *bd = file->private_data;
+	struct bsg_device *bd = to_bsg_device(file_inode(file));
 	struct request_queue *q = bd->queue;
 	void __user *uarg = (void __user *) arg;
 	int __user *intp = uarg;
@@ -312,7 +179,7 @@ static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 	case SG_EMULATED_HOST:
 		return put_user(1, intp);
 	case SG_IO:
-		return bsg_sg_io(q, file->f_mode, uarg);
+		return bsg_sg_io(bd, file->f_mode, uarg);
 	case SCSI_IOCTL_SEND_COMMAND:
 		pr_warn_ratelimited("%s: calling unsupported SCSI_IOCTL_SEND_COMMAND\n",
 				current->comm);
@@ -331,83 +198,66 @@ static const struct file_operations bsg_fops = {
 	.llseek		=	default_llseek,
 };
 
-void bsg_unregister_queue(struct request_queue *q)
+void bsg_unregister_queue(struct bsg_device *bd)
 {
-	struct bsg_class_device *bcd = &q->bsg_dev;
-
-	if (!bcd->class_dev)
-		return;
-
-	mutex_lock(&bsg_mutex);
-	idr_remove(&bsg_minor_idr, bcd->minor);
-	if (q->kobj.sd)
-		sysfs_remove_link(&q->kobj, "bsg");
-	device_unregister(bcd->class_dev);
-	bcd->class_dev = NULL;
-	mutex_unlock(&bsg_mutex);
+	if (bd->queue->kobj.sd)
+		sysfs_remove_link(&bd->queue->kobj, "bsg");
+	cdev_device_del(&bd->cdev, &bd->device);
+	ida_simple_remove(&bsg_minor_ida, MINOR(bd->device.devt));
+	kfree(bd);
 }
 EXPORT_SYMBOL_GPL(bsg_unregister_queue);
 
-int bsg_register_queue(struct request_queue *q, struct device *parent,
-		const char *name, const struct bsg_ops *ops)
+struct bsg_device *bsg_register_queue(struct request_queue *q,
+		struct device *parent, const char *name,
+		const struct bsg_ops *ops)
 {
-	struct bsg_class_device *bcd;
-	dev_t dev;
+	struct bsg_device *bd;
 	int ret;
-	struct device *class_dev = NULL;
-
-	/*
-	 * we need a proper transport to send commands, not a stacked device
-	 */
-	if (!queue_is_mq(q))
-		return 0;
 
-	bcd = &q->bsg_dev;
-	memset(bcd, 0, sizeof(*bcd));
-
-	mutex_lock(&bsg_mutex);
+	bd = kzalloc(sizeof(*bd), GFP_KERNEL);
+	if (!bd)
+		return ERR_PTR(-ENOMEM);
+	bd->max_queue = BSG_DEFAULT_CMDS;
+	bd->queue = q;
+	bd->ops = ops;
 
-	ret = idr_alloc(&bsg_minor_idr, bcd, 0, BSG_MAX_DEVS, GFP_KERNEL);
+	ret = ida_simple_get(&bsg_minor_ida, 0, BSG_MAX_DEVS, GFP_KERNEL);
 	if (ret < 0) {
-		if (ret == -ENOSPC) {
-			printk(KERN_ERR "bsg: too many bsg devices\n");
-			ret = -EINVAL;
-		}
-		goto unlock;
-	}
-
-	bcd->minor = ret;
-	bcd->queue = q;
-	bcd->ops = ops;
-	dev = MKDEV(bsg_major, bcd->minor);
-	class_dev = device_create(bsg_class, parent, dev, NULL, "%s", name);
-	if (IS_ERR(class_dev)) {
-		ret = PTR_ERR(class_dev);
-		goto idr_remove;
+		if (ret == -ENOSPC)
+			dev_err(parent, "bsg: too many bsg devices\n");
+		goto out_kfree;
 	}
-	bcd->class_dev = class_dev;
+	bd->device.devt = MKDEV(bsg_major, ret);
+	bd->device.class = bsg_class;
+	bd->device.parent = parent;
+	dev_set_name(&bd->device, "%s", name);
+	device_initialize(&bd->device);
+
+	cdev_init(&bd->cdev, &bsg_fops);
+	bd->cdev.owner = THIS_MODULE;
+	ret = cdev_device_add(&bd->cdev, &bd->device);
+	if (ret)
+		goto out_ida_remove;
 
 	if (q->kobj.sd) {
-		ret = sysfs_create_link(&q->kobj, &bcd->class_dev->kobj, "bsg");
+		ret = sysfs_create_link(&q->kobj, &bd->device.kobj, "bsg");
 		if (ret)
-			goto unregister_class_dev;
+			goto out_device_del;
 	}
 
-	mutex_unlock(&bsg_mutex);
-	return 0;
+	return bd;
 
-unregister_class_dev:
-	device_unregister(class_dev);
-idr_remove:
-	idr_remove(&bsg_minor_idr, bcd->minor);
-unlock:
-	mutex_unlock(&bsg_mutex);
-	return ret;
+out_device_del:
+	cdev_device_del(&bd->cdev, &bd->device);
+out_ida_remove:
+	ida_simple_remove(&bsg_minor_ida, MINOR(bd->device.devt));
+out_kfree:
+	kfree(bd);
+	return ERR_PTR(ret);
 }
 EXPORT_SYMBOL_GPL(bsg_register_queue);
 
-static struct cdev bsg_cdev;
-
 static char *bsg_devnode(struct device *dev, umode_t *mode)
 {
 	return kasprintf(GFP_KERNEL, "bsg/%s", dev_name(dev));
@@ -415,11 +265,8 @@ static char *bsg_devnode(struct device *dev, umode_t *mode)
 
 static int __init bsg_init(void)
 {
-	int ret, i;
 	dev_t devid;
-
-	for (i = 0; i < BSG_LIST_ARRAY_SIZE; i++)
-		INIT_HLIST_HEAD(&bsg_device_list[i]);
+	int ret;
 
 	bsg_class = class_create(THIS_MODULE, "bsg");
 	if (IS_ERR(bsg_class))
@@ -429,19 +276,12 @@ static int __init bsg_init(void)
 	ret = alloc_chrdev_region(&devid, 0, BSG_MAX_DEVS, "bsg");
 	if (ret)
 		goto destroy_bsg_class;
-
 	bsg_major = MAJOR(devid);
 
-	cdev_init(&bsg_cdev, &bsg_fops);
-	ret = cdev_add(&bsg_cdev, MKDEV(bsg_major, 0), BSG_MAX_DEVS);
-	if (ret)
-		goto unregister_chrdev;
-
 	printk(KERN_INFO BSG_DESCRIPTION " version " BSG_VERSION
 	       " loaded (major %d)\n", bsg_major);
 	return 0;
-unregister_chrdev:
-	unregister_chrdev_region(MKDEV(bsg_major, 0), BSG_MAX_DEVS);
+
 destroy_bsg_class:
 	class_destroy(bsg_class);
 	return ret;
diff --git a/drivers/scsi/scsi_bsg.c b/drivers/scsi/scsi_bsg.c
index 68f60316adf1..c0d41c45c2be 100644
--- a/drivers/scsi/scsi_bsg.c
+++ b/drivers/scsi/scsi_bsg.c
@@ -89,7 +89,8 @@ static const struct bsg_ops scsi_bsg_ops = {
 	.free_rq		= scsi_bsg_free_rq,
 };
 
-int scsi_bsg_register_queue(struct request_queue *q, struct device *parent)
+struct bsg_device *scsi_bsg_register_queue(struct scsi_device *sdev)
 {
-	return bsg_register_queue(q, parent, dev_name(parent), &scsi_bsg_ops);
+	return bsg_register_queue(sdev->request_queue, &sdev->sdev_gendev,
+				  dev_name(&sdev->sdev_gendev), &scsi_bsg_ops);
 }
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 0a0db35bab04..6d9152031a40 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -7,6 +7,7 @@
 #include <scsi/scsi_device.h>
 #include <linux/sbitmap.h>
 
+struct bsg_device;
 struct request_queue;
 struct request;
 struct scsi_cmnd;
@@ -180,15 +181,7 @@ static inline void scsi_dh_add_device(struct scsi_device *sdev) { }
 static inline void scsi_dh_release_device(struct scsi_device *sdev) { }
 #endif
 
-#ifdef CONFIG_BLK_DEV_BSG
-int scsi_bsg_register_queue(struct request_queue *q, struct device *parent);
-#else
-static inline int scsi_bsg_register_queue(struct request_queue *q,
-		struct device *parent)
-{
-	return 0;
-}
-#endif
+struct bsg_device *scsi_bsg_register_queue(struct scsi_device *sdev);
 
 extern int scsi_device_max_queue_depth(struct scsi_device *sdev);
 
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 4ff9ac3296d8..07cee8dc4100 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -13,6 +13,7 @@
 #include <linux/blkdev.h>
 #include <linux/device.h>
 #include <linux/pm_runtime.h>
+#include <linux/bsg.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_device.h>
@@ -1327,7 +1328,6 @@ static int scsi_target_add(struct scsi_target *starget)
 int scsi_sysfs_add_sdev(struct scsi_device *sdev)
 {
 	int error, i;
-	struct request_queue *rq = sdev->request_queue;
 	struct scsi_target *starget = sdev->sdev_target;
 
 	error = scsi_target_add(starget);
@@ -1366,12 +1366,19 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
 	transport_add_device(&sdev->sdev_gendev);
 	sdev->is_visible = 1;
 
-	error = scsi_bsg_register_queue(rq, &sdev->sdev_gendev);
-	if (error)
-		/* we're treating error on bsg register as non-fatal,
-		 * so pretend nothing went wrong */
-		sdev_printk(KERN_INFO, sdev,
-			    "Failed to register bsg queue, errno=%d\n", error);
+	if (IS_ENABLED(CONFIG_BLK_DEV_BSG)) {
+		sdev->bsg_dev = scsi_bsg_register_queue(sdev);
+		if (IS_ERR(sdev->bsg_dev)) {
+			/*
+			 * We're treating error on bsg register as non-fatal, so
+			 * pretend nothing went wrong.
+			 */
+			sdev_printk(KERN_INFO, sdev,
+				    "Failed to register bsg queue, errno=%d\n",
+				    error);
+			sdev->bsg_dev = NULL;
+		}
+	}
 
 	/* add additional host specific attributes */
 	if (sdev->host->hostt->sdev_attrs) {
@@ -1433,7 +1440,8 @@ void __scsi_remove_device(struct scsi_device *sdev)
 			sysfs_remove_groups(&sdev->sdev_gendev.kobj,
 					sdev->host->hostt->sdev_groups);
 
-		bsg_unregister_queue(sdev->request_queue);
+		if (IS_ENABLED(CONFIG_BLK_DEV_BSG) && sdev->bsg_dev)
+			bsg_unregister_queue(sdev->bsg_dev);
 		device_unregister(&sdev->sdev_dev);
 		transport_remove_device(dev);
 		device_del(dev);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 8c617a5a5d61..28957ccdd9c2 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -18,7 +18,6 @@
 #include <linux/bio.h>
 #include <linux/stringify.h>
 #include <linux/gfp.h>
-#include <linux/bsg.h>
 #include <linux/smp.h>
 #include <linux/rcupdate.h>
 #include <linux/percpu-refcount.h>
@@ -33,7 +32,6 @@ struct elevator_queue;
 struct blk_trace;
 struct request;
 struct sg_io_hdr;
-struct bsg_job;
 struct blkcg_gq;
 struct blk_flush_queue;
 struct pr_ops;
@@ -535,10 +533,6 @@ struct request_queue {
 
 	int			mq_freeze_depth;
 
-#if IS_ENABLED(CONFIG_BLK_DEV_BSG_COMMON)
-	struct bsg_class_device bsg_dev;
-#endif
-
 #ifdef CONFIG_BLK_DEV_THROTTLING
 	/* Throttle data */
 	struct throtl_data *td;
diff --git a/include/linux/bsg-lib.h b/include/linux/bsg-lib.h
index 960988d42f77..6b211323a489 100644
--- a/include/linux/bsg-lib.h
+++ b/include/linux/bsg-lib.h
@@ -12,6 +12,7 @@
 #include <linux/blkdev.h>
 #include <scsi/scsi_request.h>
 
+struct bsg_job;
 struct request;
 struct device;
 struct scatterlist;
diff --git a/include/linux/bsg.h b/include/linux/bsg.h
index b887da20bd41..fa21f79beda2 100644
--- a/include/linux/bsg.h
+++ b/include/linux/bsg.h
@@ -4,10 +4,11 @@
 
 #include <uapi/linux/bsg.h>
 
+struct bsg_device;
+struct device;
 struct request;
 struct request_queue;
 
-#ifdef CONFIG_BLK_DEV_BSG_COMMON
 struct bsg_ops {
 	int	(*check_proto)(struct sg_io_v4 *hdr);
 	int	(*fill_hdr)(struct request *rq, struct sg_io_v4 *hdr,
@@ -16,19 +17,9 @@ struct bsg_ops {
 	void	(*free_rq)(struct request *rq);
 };
 
-struct bsg_class_device {
-	struct device *class_dev;
-	int minor;
-	struct request_queue *queue;
-	const struct bsg_ops *ops;
-};
+struct bsg_device *bsg_register_queue(struct request_queue *q,
+		struct device *parent, const char *name,
+		const struct bsg_ops *ops);
+void bsg_unregister_queue(struct bsg_device *bcd);
 
-int bsg_register_queue(struct request_queue *q, struct device *parent,
-		const char *name, const struct bsg_ops *ops);
-void bsg_unregister_queue(struct request_queue *q);
-#else
-static inline void bsg_unregister_queue(struct request_queue *q)
-{
-}
-#endif /* CONFIG_BLK_DEV_BSG_COMMON */
 #endif /* _LINUX_BSG_H */
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index d1de21f799f4..99082da1b951 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -10,6 +10,7 @@
 #include <linux/atomic.h>
 #include <linux/sbitmap.h>
 
+struct bsg_device;
 struct device;
 struct request_queue;
 struct scsi_cmnd;
@@ -235,6 +236,7 @@ struct scsi_device {
 	size_t			dma_drain_len;
 	void			*dma_drain_buf;
 
+	struct bsg_device	*bsg_dev;
 	unsigned char		access_state;
 	struct mutex		state_mutex;
 	enum scsi_device_state sdev_state;
-- 
2.30.2


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH 2/4] block: remove BLK_SCSI_MAX_CMDS
  2021-07-29  6:48 bsg cleanup, part 2 Christoph Hellwig
  2021-07-29  6:48 ` [PATCH 1/4] bsg: simplify device registration Christoph Hellwig
@ 2021-07-29  6:48 ` Christoph Hellwig
  2021-07-29  6:48 ` [PATCH 3/4] block: remove the remaining SG_IO-related fields from struct request_queue Christoph Hellwig
                   ` (2 subsequent siblings)
  4 siblings, 0 replies; 6+ messages in thread
From: Christoph Hellwig @ 2021-07-29  6:48 UTC (permalink / raw)
  To: Martin K. Petersen; +Cc: Jens Axboe, FUJITA Tomonori, linux-block, linux-scsi

This was used for the table based scsi passthough permission checking
that is gone now.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 include/linux/blkdev.h | 3 ---
 1 file changed, 3 deletions(-)

diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 28957ccdd9c2..e0bb14acb708 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -271,9 +271,6 @@ enum blk_queue_state {
 #define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */
 #define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */
 
-#define BLK_SCSI_MAX_CMDS	(256)
-#define BLK_SCSI_CMD_PER_LONG	(BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
-
 /*
  * Zoned block device models (zoned limit).
  *
-- 
2.30.2


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH 3/4] block: remove the remaining SG_IO-related fields from struct request_queue
  2021-07-29  6:48 bsg cleanup, part 2 Christoph Hellwig
  2021-07-29  6:48 ` [PATCH 1/4] bsg: simplify device registration Christoph Hellwig
  2021-07-29  6:48 ` [PATCH 2/4] block: remove BLK_SCSI_MAX_CMDS Christoph Hellwig
@ 2021-07-29  6:48 ` Christoph Hellwig
  2021-07-29  6:48 ` [PATCH 4/4] bsg: move the whole request execution into the scsi/transport handlers Christoph Hellwig
  2021-07-31  2:29 ` bsg cleanup, part 2 Martin K. Petersen
  4 siblings, 0 replies; 6+ messages in thread
From: Christoph Hellwig @ 2021-07-29  6:48 UTC (permalink / raw)
  To: Martin K. Petersen; +Cc: Jens Axboe, FUJITA Tomonori, linux-block, linux-scsi

Move the sg_timeout and sg_reserved_size fields into the bsg_device and
scsi_device structures as they have nothing to do with generic block I/O.
Note that these values are now separate for bsg vs scsi device node
access, but that just matches how /dev/sg vs the other nodes has always
behaved.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 block/blk-mq.c             |  2 --
 block/bsg.c                | 13 +++++---
 drivers/scsi/scsi_ioctl.c  | 63 ++++++++++++++++++--------------------
 drivers/scsi/scsi_scan.c   |  2 ++
 include/linux/blkdev.h     |  5 ---
 include/scsi/scsi_device.h |  3 ++
 6 files changed, 43 insertions(+), 45 deletions(-)

diff --git a/block/blk-mq.c b/block/blk-mq.c
index 2c4ac51e54eb..495f508c6300 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -3298,8 +3298,6 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
 	    set->map[HCTX_TYPE_POLL].nr_queues)
 		blk_queue_flag_set(QUEUE_FLAG_POLL, q);
 
-	q->sg_reserved_size = INT_MAX;
-
 	INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
 	INIT_LIST_HEAD(&q->requeue_list);
 	spin_lock_init(&q->requeue_lock);
diff --git a/block/bsg.c b/block/bsg.c
index 83a095185d33..3ba74eec4ba2 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -26,6 +26,8 @@ struct bsg_device {
 	struct device device;
 	struct cdev cdev;
 	int max_queue;
+	unsigned int timeout;
+	unsigned int reserved_size;
 };
 
 static inline struct bsg_device *to_bsg_device(struct inode *inode)
@@ -71,7 +73,7 @@ static int bsg_sg_io(struct bsg_device *bd, fmode_t mode, void __user *uarg)
 
 	rq->timeout = msecs_to_jiffies(hdr.timeout);
 	if (!rq->timeout)
-		rq->timeout = rq->q->sg_timeout;
+		rq->timeout = bd->timeout;
 	if (!rq->timeout)
 		rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
 	if (rq->timeout < BLK_MIN_SG_TIMEOUT)
@@ -161,19 +163,19 @@ static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 	case SG_SET_TIMEOUT:
 		if (get_user(val, intp))
 			return -EFAULT;
-		q->sg_timeout = clock_t_to_jiffies(val);
+		bd->timeout = clock_t_to_jiffies(val);
 		return 0;
 	case SG_GET_TIMEOUT:
-		return jiffies_to_clock_t(q->sg_timeout);
+		return jiffies_to_clock_t(bd->timeout);
 	case SG_GET_RESERVED_SIZE:
-		return put_user(min(q->sg_reserved_size, queue_max_bytes(q)),
+		return put_user(min(bd->reserved_size, queue_max_bytes(q)),
 				intp);
 	case SG_SET_RESERVED_SIZE:
 		if (get_user(val, intp))
 			return -EFAULT;
 		if (val < 0)
 			return -EINVAL;
-		q->sg_reserved_size =
+		bd->reserved_size =
 			min_t(unsigned int, val, queue_max_bytes(q));
 		return 0;
 	case SG_EMULATED_HOST:
@@ -219,6 +221,7 @@ struct bsg_device *bsg_register_queue(struct request_queue *q,
 	if (!bd)
 		return ERR_PTR(-ENOMEM);
 	bd->max_queue = BSG_DEFAULT_CMDS;
+	bd->reserved_size = INT_MAX;
 	bd->queue = q;
 	bd->ops = ops;
 
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index 633f016c2bfe..384238a1458f 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -173,29 +173,25 @@ static int sg_get_version(int __user *p)
 	return put_user(sg_version_num, p);
 }
 
-static int sg_get_timeout(struct request_queue *q)
-{
-	return jiffies_to_clock_t(q->sg_timeout);
-}
-
-static int sg_set_timeout(struct request_queue *q, int __user *p)
+static int sg_set_timeout(struct scsi_device *sdev, int __user *p)
 {
 	int timeout, err = get_user(timeout, p);
 
 	if (!err)
-		q->sg_timeout = clock_t_to_jiffies(timeout);
+		sdev->sg_timeout = clock_t_to_jiffies(timeout);
 
 	return err;
 }
 
-static int sg_get_reserved_size(struct request_queue *q, int __user *p)
+static int sg_get_reserved_size(struct scsi_device *sdev, int __user *p)
 {
-	int val = min(q->sg_reserved_size, queue_max_bytes(q));
+	int val = min(sdev->sg_reserved_size,
+		      queue_max_bytes(sdev->request_queue));
 
 	return put_user(val, p);
 }
 
-static int sg_set_reserved_size(struct request_queue *q, int __user *p)
+static int sg_set_reserved_size(struct scsi_device *sdev, int __user *p)
 {
 	int size, err = get_user(size, p);
 
@@ -205,7 +201,8 @@ static int sg_set_reserved_size(struct request_queue *q, int __user *p)
 	if (size < 0)
 		return -EINVAL;
 
-	q->sg_reserved_size = min_t(unsigned int, size, queue_max_bytes(q));
+	sdev->sg_reserved_size = min_t(unsigned int, size,
+				       queue_max_bytes(sdev->request_queue));
 	return 0;
 }
 
@@ -345,7 +342,7 @@ bool scsi_cmd_allowed(unsigned char *cmd, fmode_t mode)
 }
 EXPORT_SYMBOL(scsi_cmd_allowed);
 
-static int scsi_fill_sghdr_rq(struct request_queue *q, struct request *rq,
+static int scsi_fill_sghdr_rq(struct scsi_device *sdev, struct request *rq,
 		struct sg_io_hdr *hdr, fmode_t mode)
 {
 	struct scsi_request *req = scsi_req(rq);
@@ -362,7 +359,7 @@ static int scsi_fill_sghdr_rq(struct request_queue *q, struct request *rq,
 
 	rq->timeout = msecs_to_jiffies(hdr->timeout);
 	if (!rq->timeout)
-		rq->timeout = q->sg_timeout;
+		rq->timeout = sdev->sg_timeout;
 	if (!rq->timeout)
 		rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
 	if (rq->timeout < BLK_MIN_SG_TIMEOUT)
@@ -409,7 +406,7 @@ static int scsi_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
 	return ret;
 }
 
-static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
+static int sg_io(struct scsi_device *sdev, struct gendisk *disk,
 		struct sg_io_hdr *hdr, fmode_t mode)
 {
 	unsigned long start_time;
@@ -423,7 +420,7 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
 	if (hdr->interface_id != 'S')
 		return -EINVAL;
 
-	if (hdr->dxfer_len > (queue_max_hw_sectors(q) << 9))
+	if (hdr->dxfer_len > (queue_max_hw_sectors(sdev->request_queue) << 9))
 		return -EIO;
 
 	if (hdr->dxfer_len)
@@ -441,7 +438,8 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
 		at_head = 1;
 
 	ret = -ENOMEM;
-	rq = blk_get_request(q, writing ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
+	rq = blk_get_request(sdev->request_queue, writing ?
+			     REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
 	if (IS_ERR(rq))
 		return PTR_ERR(rq);
 	req = scsi_req(rq);
@@ -452,7 +450,7 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
 			goto out_put_request;
 	}
 
-	ret = scsi_fill_sghdr_rq(q, rq, hdr, mode);
+	ret = scsi_fill_sghdr_rq(sdev, rq, hdr, mode);
 	if (ret < 0)
 		goto out_free_cdb;
 
@@ -469,11 +467,11 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
 		/* SG_IO howto says that the shorter of the two wins */
 		iov_iter_truncate(&i, hdr->dxfer_len);
 
-		ret = blk_rq_map_user_iov(q, rq, NULL, &i, GFP_KERNEL);
+		ret = blk_rq_map_user_iov(rq->q, rq, NULL, &i, GFP_KERNEL);
 		kfree(iov);
 	} else if (hdr->dxfer_len)
-		ret = blk_rq_map_user(q, rq, NULL, hdr->dxferp, hdr->dxfer_len,
-				      GFP_KERNEL);
+		ret = blk_rq_map_user(rq->q, rq, NULL, hdr->dxferp,
+				      hdr->dxfer_len, GFP_KERNEL);
 
 	if (ret)
 		goto out_free_cdb;
@@ -483,7 +481,7 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
 
 	start_time = jiffies;
 
-	blk_execute_rq(bd_disk, rq, at_head);
+	blk_execute_rq(disk, rq, at_head);
 
 	hdr->duration = jiffies_to_msecs(jiffies - start_time);
 
@@ -806,9 +804,8 @@ static int scsi_put_cdrom_generic_arg(const struct cdrom_generic_command *cgc,
 	return 0;
 }
 
-static int scsi_cdrom_send_packet(struct request_queue *q,
-				  struct gendisk *bd_disk,
-				  fmode_t mode, void __user *arg)
+static int scsi_cdrom_send_packet(struct scsi_device *sdev,struct gendisk *disk,
+		fmode_t mode, void __user *arg)
 {
 	struct cdrom_generic_command cgc;
 	struct sg_io_hdr hdr;
@@ -848,7 +845,7 @@ static int scsi_cdrom_send_packet(struct request_queue *q,
 	hdr.cmdp = ((struct cdrom_generic_command __user *) arg)->cmd;
 	hdr.cmd_len = sizeof(cgc.cmd);
 
-	err = sg_io(q, bd_disk, &hdr, mode);
+	err = sg_io(sdev, disk, &hdr, mode);
 	if (err == -EFAULT)
 		return -EFAULT;
 
@@ -863,7 +860,7 @@ static int scsi_cdrom_send_packet(struct request_queue *q,
 	return err;
 }
 
-static int scsi_ioctl_sg_io(struct request_queue *q, struct gendisk *disk,
+static int scsi_ioctl_sg_io(struct scsi_device *sdev, struct gendisk *disk,
 		fmode_t mode, void __user *argp)
 {
 	struct sg_io_hdr hdr;
@@ -872,7 +869,7 @@ static int scsi_ioctl_sg_io(struct request_queue *q, struct gendisk *disk,
 	error = get_sg_io_hdr(&hdr, argp);
 	if (error)
 		return error;
-	error = sg_io(q, disk, &hdr, mode);
+	error = sg_io(sdev, disk, &hdr, mode);
 	if (error == -EFAULT)
 		return error;
 	if (put_sg_io_hdr(&hdr, argp))
@@ -918,21 +915,21 @@ int scsi_ioctl(struct scsi_device *sdev, struct gendisk *disk, fmode_t mode,
 	case SG_GET_VERSION_NUM:
 		return sg_get_version(arg);
 	case SG_SET_TIMEOUT:
-		return sg_set_timeout(q, arg);
+		return sg_set_timeout(sdev, arg);
 	case SG_GET_TIMEOUT:
-		return sg_get_timeout(q);
+		return jiffies_to_clock_t(sdev->sg_timeout);
 	case SG_GET_RESERVED_SIZE:
-		return sg_get_reserved_size(q, arg);
+		return sg_get_reserved_size(sdev, arg);
 	case SG_SET_RESERVED_SIZE:
-		return sg_set_reserved_size(q, arg);
+		return sg_set_reserved_size(sdev, arg);
 	case SG_EMULATED_HOST:
 		return sg_emulated_host(q, arg);
 	case SG_IO:
-		return scsi_ioctl_sg_io(q, disk, mode, arg);
+		return scsi_ioctl_sg_io(sdev, disk, mode, arg);
 	case SCSI_IOCTL_SEND_COMMAND:
 		return sg_scsi_ioctl(q, disk, mode, arg);
 	case CDROM_SEND_PACKET:
-		return scsi_cdrom_send_packet(q, disk, mode, arg);
+		return scsi_cdrom_send_packet(sdev, disk, mode, arg);
 	case CDROMCLOSETRAY:
 		return scsi_send_start_stop(sdev, 3);
 	case CDROMEJECT:
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 3faedf4970ec..e06a2602fca4 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -267,6 +267,8 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
 	 */
 	sdev->borken = 1;
 
+	sdev->sg_reserved_size = INT_MAX;
+
 	q = blk_mq_init_queue(&sdev->host->tag_set);
 	if (IS_ERR(q)) {
 		/* release fn is set up in scsi_sysfs_device_initialise, so
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index e0bb14acb708..987f15089eeb 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -499,11 +499,6 @@ struct request_queue {
 	unsigned int		max_active_zones;
 #endif /* CONFIG_BLK_DEV_ZONED */
 
-	/*
-	 * sg stuff
-	 */
-	unsigned int		sg_timeout;
-	unsigned int		sg_reserved_size;
 	int			node;
 	struct mutex		debugfs_mutex;
 #ifdef CONFIG_BLK_DEV_IO_TRACE
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 99082da1b951..7137e7924913 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -236,6 +236,9 @@ struct scsi_device {
 	size_t			dma_drain_len;
 	void			*dma_drain_buf;
 
+	unsigned int		sg_timeout;
+	unsigned int		sg_reserved_size;
+
 	struct bsg_device	*bsg_dev;
 	unsigned char		access_state;
 	struct mutex		state_mutex;
-- 
2.30.2


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH 4/4] bsg: move the whole request execution into the scsi/transport handlers
  2021-07-29  6:48 bsg cleanup, part 2 Christoph Hellwig
                   ` (2 preceding siblings ...)
  2021-07-29  6:48 ` [PATCH 3/4] block: remove the remaining SG_IO-related fields from struct request_queue Christoph Hellwig
@ 2021-07-29  6:48 ` Christoph Hellwig
  2021-07-31  2:29 ` bsg cleanup, part 2 Martin K. Petersen
  4 siblings, 0 replies; 6+ messages in thread
From: Christoph Hellwig @ 2021-07-29  6:48 UTC (permalink / raw)
  To: Martin K. Petersen; +Cc: Jens Axboe, FUJITA Tomonori, linux-block, linux-scsi

Remove the amount of indirect calls by making the handler responsible
for the entire execution of the request.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 block/bsg-lib.c         | 80 ++++++++++++++++++++---------------------
 block/bsg.c             | 66 ++++++++--------------------------
 drivers/scsi/scsi_bsg.c | 69 +++++++++++++++++++----------------
 include/linux/bsg.h     | 12 ++-----
 4 files changed, 96 insertions(+), 131 deletions(-)

diff --git a/block/bsg-lib.c b/block/bsg-lib.c
index fe43f5fda6e5..239ebf747141 100644
--- a/block/bsg-lib.c
+++ b/block/bsg-lib.c
@@ -25,32 +25,39 @@ struct bsg_set {
 	bsg_timeout_fn		*timeout_fn;
 };
 
-static int bsg_transport_check_proto(struct sg_io_v4 *hdr)
+static int bsg_transport_sg_io_fn(struct request_queue *q, struct sg_io_v4 *hdr,
+		fmode_t mode, unsigned int timeout)
 {
+	struct bsg_job *job;
+	struct request *rq;
+	struct bio *bio;
+	int ret;
+
 	if (hdr->protocol != BSG_PROTOCOL_SCSI  ||
 	    hdr->subprotocol != BSG_SUB_PROTOCOL_SCSI_TRANSPORT)
 		return -EINVAL;
 	if (!capable(CAP_SYS_RAWIO))
 		return -EPERM;
-	return 0;
-}
 
-static int bsg_transport_fill_hdr(struct request *rq, struct sg_io_v4 *hdr,
-		fmode_t mode)
-{
-	struct bsg_job *job = blk_mq_rq_to_pdu(rq);
-	int ret;
+	rq = blk_get_request(q, hdr->dout_xfer_len ?
+			     REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
+	if (IS_ERR(rq))
+		return PTR_ERR(rq);
+	rq->timeout = timeout;
 
+	job = blk_mq_rq_to_pdu(rq);
 	job->request_len = hdr->request_len;
 	job->request = memdup_user(uptr64(hdr->request), hdr->request_len);
-	if (IS_ERR(job->request))
-		return PTR_ERR(job->request);
+	if (IS_ERR(job->request)) {
+		ret = PTR_ERR(job->request);
+		goto out_put_request;
+	}
 
 	if (hdr->dout_xfer_len && hdr->din_xfer_len) {
 		job->bidi_rq = blk_get_request(rq->q, REQ_OP_DRV_IN, 0);
 		if (IS_ERR(job->bidi_rq)) {
 			ret = PTR_ERR(job->bidi_rq);
-			goto out;
+			goto out_free_job_request;
 		}
 
 		ret = blk_rq_map_user(rq->q, job->bidi_rq, NULL,
@@ -65,20 +72,19 @@ static int bsg_transport_fill_hdr(struct request *rq, struct sg_io_v4 *hdr,
 		job->bidi_bio = NULL;
 	}
 
-	return 0;
+	if (hdr->dout_xfer_len) {
+		ret = blk_rq_map_user(rq->q, rq, NULL, uptr64(hdr->dout_xferp),
+				hdr->dout_xfer_len, GFP_KERNEL);
+	} else if (hdr->din_xfer_len) {
+		ret = blk_rq_map_user(rq->q, rq, NULL, uptr64(hdr->din_xferp),
+				hdr->din_xfer_len, GFP_KERNEL);
+	}
 
-out_free_bidi_rq:
-	if (job->bidi_rq)
-		blk_put_request(job->bidi_rq);
-out:
-	kfree(job->request);
-	return ret;
-}
+	if (ret)
+		goto out_unmap_bidi_rq;
 
-static int bsg_transport_complete_rq(struct request *rq, struct sg_io_v4 *hdr)
-{
-	struct bsg_job *job = blk_mq_rq_to_pdu(rq);
-	int ret = 0;
+	bio = rq->bio;
+	blk_execute_rq(NULL, rq, !(hdr->flags & BSG_FLAG_Q_AT_TAIL));
 
 	/*
 	 * The assignments below don't make much sense, but are kept for
@@ -121,28 +127,20 @@ static int bsg_transport_complete_rq(struct request *rq, struct sg_io_v4 *hdr)
 		hdr->din_resid = 0;
 	}
 
-	return ret;
-}
-
-static void bsg_transport_free_rq(struct request *rq)
-{
-	struct bsg_job *job = blk_mq_rq_to_pdu(rq);
-
-	if (job->bidi_rq) {
+	blk_rq_unmap_user(bio);
+out_unmap_bidi_rq:
+	if (job->bidi_rq)
 		blk_rq_unmap_user(job->bidi_bio);
+out_free_bidi_rq:
+	if (job->bidi_rq)
 		blk_put_request(job->bidi_rq);
-	}
-
+out_free_job_request:
 	kfree(job->request);
+out_put_request:
+	blk_put_request(rq);
+	return ret;
 }
 
-static const struct bsg_ops bsg_transport_ops = {
-	.check_proto		= bsg_transport_check_proto,
-	.fill_hdr		= bsg_transport_fill_hdr,
-	.complete_rq		= bsg_transport_complete_rq,
-	.free_rq		= bsg_transport_free_rq,
-};
-
 /**
  * bsg_teardown_job - routine to teardown a bsg job
  * @kref: kref inside bsg_job that is to be torn down
@@ -398,7 +396,7 @@ struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
 	q->queuedata = dev;
 	blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
 
-	bset->bd = bsg_register_queue(q, dev, name, &bsg_transport_ops);
+	bset->bd = bsg_register_queue(q, dev, name, bsg_transport_sg_io_fn);
 	if (IS_ERR(bset->bd)) {
 		ret = PTR_ERR(bset->bd);
 		goto out_cleanup_queue;
diff --git a/block/bsg.c b/block/bsg.c
index 3ba74eec4ba2..351095193788 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -22,12 +22,12 @@
 
 struct bsg_device {
 	struct request_queue *queue;
-	const struct bsg_ops *ops;
 	struct device device;
 	struct cdev cdev;
 	int max_queue;
 	unsigned int timeout;
 	unsigned int reserved_size;
+	bsg_sg_io_fn *sg_io_fn;
 };
 
 static inline struct bsg_device *to_bsg_device(struct inode *inode)
@@ -42,63 +42,28 @@ static DEFINE_IDA(bsg_minor_ida);
 static struct class *bsg_class;
 static int bsg_major;
 
-#define uptr64(val) ((void __user *)(uintptr_t)(val))
+static unsigned int bsg_timeout(struct bsg_device *bd, struct sg_io_v4 *hdr)
+{
+	unsigned int timeout = BLK_DEFAULT_SG_TIMEOUT;
+
+	if (hdr->timeout)
+		timeout = msecs_to_jiffies(hdr->timeout);
+	else if (bd->timeout)
+		timeout = bd->timeout;
+
+	return max_t(unsigned int, timeout, BLK_MIN_SG_TIMEOUT);
+}
 
 static int bsg_sg_io(struct bsg_device *bd, fmode_t mode, void __user *uarg)
 {
-	struct request *rq;
-	struct bio *bio;
 	struct sg_io_v4 hdr;
 	int ret;
 
 	if (copy_from_user(&hdr, uarg, sizeof(hdr)))
 		return -EFAULT;
-
 	if (hdr.guard != 'Q')
 		return -EINVAL;
-	ret = bd->ops->check_proto(&hdr);
-	if (ret)
-		return ret;
-
-	rq = blk_get_request(bd->queue, hdr.dout_xfer_len ?
-			REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
-	if (IS_ERR(rq))
-		return PTR_ERR(rq);
-
-	ret = bd->ops->fill_hdr(rq, &hdr, mode);
-	if (ret) {
-		blk_put_request(rq);
-		return ret;
-	}
-
-	rq->timeout = msecs_to_jiffies(hdr.timeout);
-	if (!rq->timeout)
-		rq->timeout = bd->timeout;
-	if (!rq->timeout)
-		rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
-	if (rq->timeout < BLK_MIN_SG_TIMEOUT)
-		rq->timeout = BLK_MIN_SG_TIMEOUT;
-
-	if (hdr.dout_xfer_len) {
-		ret = blk_rq_map_user(rq->q, rq, NULL, uptr64(hdr.dout_xferp),
-				hdr.dout_xfer_len, GFP_KERNEL);
-	} else if (hdr.din_xfer_len) {
-		ret = blk_rq_map_user(rq->q, rq, NULL, uptr64(hdr.din_xferp),
-				hdr.din_xfer_len, GFP_KERNEL);
-	}
-
-	if (ret)
-		goto out_free_rq;
-
-	bio = rq->bio;
-
-	blk_execute_rq(NULL, rq, !(hdr.flags & BSG_FLAG_Q_AT_TAIL));
-	ret = bd->ops->complete_rq(rq, &hdr);
-	blk_rq_unmap_user(bio);
-
-out_free_rq:
-	bd->ops->free_rq(rq);
-	blk_put_request(rq);
+	ret = bd->sg_io_fn(bd->queue, &hdr, mode, bsg_timeout(bd, &hdr));
 	if (!ret && copy_to_user(uarg, &hdr, sizeof(hdr)))
 		return -EFAULT;
 	return ret;
@@ -211,8 +176,7 @@ void bsg_unregister_queue(struct bsg_device *bd)
 EXPORT_SYMBOL_GPL(bsg_unregister_queue);
 
 struct bsg_device *bsg_register_queue(struct request_queue *q,
-		struct device *parent, const char *name,
-		const struct bsg_ops *ops)
+		struct device *parent, const char *name, bsg_sg_io_fn *sg_io_fn)
 {
 	struct bsg_device *bd;
 	int ret;
@@ -223,7 +187,7 @@ struct bsg_device *bsg_register_queue(struct request_queue *q,
 	bd->max_queue = BSG_DEFAULT_CMDS;
 	bd->reserved_size = INT_MAX;
 	bd->queue = q;
-	bd->ops = ops;
+	bd->sg_io_fn = sg_io_fn;
 
 	ret = ida_simple_get(&bsg_minor_ida, 0, BSG_MAX_DEVS, GFP_KERNEL);
 	if (ret < 0) {
diff --git a/drivers/scsi/scsi_bsg.c b/drivers/scsi/scsi_bsg.c
index c0d41c45c2be..d13a67b82429 100644
--- a/drivers/scsi/scsi_bsg.c
+++ b/drivers/scsi/scsi_bsg.c
@@ -9,42 +9,57 @@
 
 #define uptr64(val) ((void __user *)(uintptr_t)(val))
 
-static int scsi_bsg_check_proto(struct sg_io_v4 *hdr)
+static int scsi_bsg_sg_io_fn(struct request_queue *q, struct sg_io_v4 *hdr,
+		fmode_t mode, unsigned int timeout)
 {
+	struct scsi_request *sreq;
+	struct request *rq;
+	struct bio *bio;
+	int ret;
+
 	if (hdr->protocol != BSG_PROTOCOL_SCSI  ||
 	    hdr->subprotocol != BSG_SUB_PROTOCOL_SCSI_CMD)
 		return -EINVAL;
-	return 0;
-}
-
-static int scsi_bsg_fill_hdr(struct request *rq, struct sg_io_v4 *hdr,
-		fmode_t mode)
-{
-	struct scsi_request *sreq = scsi_req(rq);
-
 	if (hdr->dout_xfer_len && hdr->din_xfer_len) {
 		pr_warn_once("BIDI support in bsg has been removed.\n");
 		return -EOPNOTSUPP;
 	}
 
+	rq = blk_get_request(q, hdr->dout_xfer_len ?
+			     REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
+	if (IS_ERR(rq))
+		return PTR_ERR(rq);
+	rq->timeout = timeout;
+
+	ret = -ENOMEM;
+	sreq = scsi_req(rq);
 	sreq->cmd_len = hdr->request_len;
 	if (sreq->cmd_len > BLK_MAX_CDB) {
 		sreq->cmd = kzalloc(sreq->cmd_len, GFP_KERNEL);
 		if (!sreq->cmd)
-			return -ENOMEM;
+			goto out_put_request;
 	}
 
+	ret = -EFAULT;
 	if (copy_from_user(sreq->cmd, uptr64(hdr->request), sreq->cmd_len))
-		return -EFAULT;
+		goto out_free_cmd;
+	ret = -EPERM;
 	if (!scsi_cmd_allowed(sreq->cmd, mode))
-		return -EPERM;
-	return 0;
-}
+		goto out_free_cmd;
 
-static int scsi_bsg_complete_rq(struct request *rq, struct sg_io_v4 *hdr)
-{
-	struct scsi_request *sreq = scsi_req(rq);
-	int ret = 0;
+	if (hdr->dout_xfer_len) {
+		ret = blk_rq_map_user(rq->q, rq, NULL, uptr64(hdr->dout_xferp),
+				hdr->dout_xfer_len, GFP_KERNEL);
+	} else if (hdr->din_xfer_len) {
+		ret = blk_rq_map_user(rq->q, rq, NULL, uptr64(hdr->din_xferp),
+				hdr->din_xfer_len, GFP_KERNEL);
+	}
+
+	if (ret)
+		goto out_free_cmd;
+
+	bio = rq->bio;
+	blk_execute_rq(NULL, rq, !(hdr->flags & BSG_FLAG_Q_AT_TAIL));
 
 	/*
 	 * fill in all the output members
@@ -74,23 +89,17 @@ static int scsi_bsg_complete_rq(struct request *rq, struct sg_io_v4 *hdr)
 	else
 		hdr->dout_resid = sreq->resid_len;
 
-	return ret;
-}
+	blk_rq_unmap_user(bio);
 
-static void scsi_bsg_free_rq(struct request *rq)
-{
+out_free_cmd:
 	scsi_req_free_cmd(scsi_req(rq));
+out_put_request:
+	blk_put_request(rq);
+	return ret;
 }
 
-static const struct bsg_ops scsi_bsg_ops = {
-	.check_proto		= scsi_bsg_check_proto,
-	.fill_hdr		= scsi_bsg_fill_hdr,
-	.complete_rq		= scsi_bsg_complete_rq,
-	.free_rq		= scsi_bsg_free_rq,
-};
-
 struct bsg_device *scsi_bsg_register_queue(struct scsi_device *sdev)
 {
 	return bsg_register_queue(sdev->request_queue, &sdev->sdev_gendev,
-				  dev_name(&sdev->sdev_gendev), &scsi_bsg_ops);
+			dev_name(&sdev->sdev_gendev), scsi_bsg_sg_io_fn);
 }
diff --git a/include/linux/bsg.h b/include/linux/bsg.h
index fa21f79beda2..1ac81c809da9 100644
--- a/include/linux/bsg.h
+++ b/include/linux/bsg.h
@@ -6,20 +6,14 @@
 
 struct bsg_device;
 struct device;
-struct request;
 struct request_queue;
 
-struct bsg_ops {
-	int	(*check_proto)(struct sg_io_v4 *hdr);
-	int	(*fill_hdr)(struct request *rq, struct sg_io_v4 *hdr,
-				fmode_t mode);
-	int	(*complete_rq)(struct request *rq, struct sg_io_v4 *hdr);
-	void	(*free_rq)(struct request *rq);
-};
+typedef int (bsg_sg_io_fn)(struct request_queue *, struct sg_io_v4 *hdr,
+		fmode_t mode, unsigned int timeout);
 
 struct bsg_device *bsg_register_queue(struct request_queue *q,
 		struct device *parent, const char *name,
-		const struct bsg_ops *ops);
+		bsg_sg_io_fn *sg_io_fn);
 void bsg_unregister_queue(struct bsg_device *bcd);
 
 #endif /* _LINUX_BSG_H */
-- 
2.30.2


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: bsg cleanup, part 2
  2021-07-29  6:48 bsg cleanup, part 2 Christoph Hellwig
                   ` (3 preceding siblings ...)
  2021-07-29  6:48 ` [PATCH 4/4] bsg: move the whole request execution into the scsi/transport handlers Christoph Hellwig
@ 2021-07-31  2:29 ` Martin K. Petersen
  4 siblings, 0 replies; 6+ messages in thread
From: Martin K. Petersen @ 2021-07-31  2:29 UTC (permalink / raw)
  To: Christoph Hellwig
  Cc: Martin K. Petersen, Jens Axboe, FUJITA Tomonori, linux-block, linux-scsi


Christoph,

> this is the next round of bsg cleanups based on the previous scsi
> ioctl changes.  The biggest changes are major simplification of how
> the bsg nodes are created and found, and a simplification of the
> interface between the frontend in bsg.c and the two backends.

Applied to 5.15/scsi-staging, thanks!

-- 
Martin K. Petersen	Oracle Linux Engineering

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2021-07-31  2:29 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-07-29  6:48 bsg cleanup, part 2 Christoph Hellwig
2021-07-29  6:48 ` [PATCH 1/4] bsg: simplify device registration Christoph Hellwig
2021-07-29  6:48 ` [PATCH 2/4] block: remove BLK_SCSI_MAX_CMDS Christoph Hellwig
2021-07-29  6:48 ` [PATCH 3/4] block: remove the remaining SG_IO-related fields from struct request_queue Christoph Hellwig
2021-07-29  6:48 ` [PATCH 4/4] bsg: move the whole request execution into the scsi/transport handlers Christoph Hellwig
2021-07-31  2:29 ` bsg cleanup, part 2 Martin K. Petersen

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).