linux-block.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* start removing block bounce buffering support v2
@ 2021-03-26  5:58 Christoph Hellwig
  2021-03-26  5:58 ` [PATCH 1/8] aha1542: use a local bounce buffer Christoph Hellwig
                   ` (8 more replies)
  0 siblings, 9 replies; 33+ messages in thread
From: Christoph Hellwig @ 2021-03-26  5:58 UTC (permalink / raw)
  To: Jens Axboe, Khalid Aziz, Martin K. Petersen, Matthew Wilcox,
	Hannes Reinecke, Ondrej Zary
  Cc: linux-block, linux-scsi

Hi all,

this series starts to clean up and remove the impact of the legacy old
block layer bounce buffering code.

First it removes support for ISA bouncing.  This was used by three SCSI
drivers.  One of them actually had an active user and developer 5 years
ago so I've converted it to use a local bounce buffer - Ondrej, can you
test the coversion?  The next one has been known broken for years, and
the third one looks like it has no users for the ISA support so they
are just dropped.

It then removes support for dealing with bounce buffering highmem pages
for passthrough requests as we can just use the copy instead of the map
path for them.  This will reduce efficiency for such setups on highmem
systems (e.g. usb-storage attached DVD drives), but then again that is
what you get for using a driver not using modern interfaces on a 32-bit
highmem system.  It does allow to streamline the common path pretty nicely.


Changes since v1:
 - remove more dead code in advansys.c
 - fix the bounce limit stacking in blk_stack_limits

^ permalink raw reply	[flat|nested] 33+ messages in thread

* [PATCH 1/8] aha1542: use a local bounce buffer
  2021-03-26  5:58 start removing block bounce buffering support v2 Christoph Hellwig
@ 2021-03-26  5:58 ` Christoph Hellwig
  2021-03-29  6:22   ` Hannes Reinecke
  2021-03-26  5:58 ` [PATCH 2/8] Buslogic: remove ISA support Christoph Hellwig
                   ` (7 subsequent siblings)
  8 siblings, 1 reply; 33+ messages in thread
From: Christoph Hellwig @ 2021-03-26  5:58 UTC (permalink / raw)
  To: Jens Axboe, Khalid Aziz, Martin K. Petersen, Matthew Wilcox,
	Hannes Reinecke, Ondrej Zary
  Cc: linux-block, linux-scsi

To remove the last user of the unchecked_isa_dma flag and thus the block
layer ISA bounce buffering switch this driver to use its own local bounce
buffer.  This has the effect of not needing the chain indirection and
supporting and unlimited number of segments.  It does however limit the
transfer size for each command to something that can be reasonable
allocated by dma_alloc_coherent like 8K.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 drivers/scsi/aha1542.c | 105 ++++++++++++++++++++++-------------------
 1 file changed, 57 insertions(+), 48 deletions(-)

diff --git a/drivers/scsi/aha1542.c b/drivers/scsi/aha1542.c
index 21aab9f5b1172a..1210e61afb1838 100644
--- a/drivers/scsi/aha1542.c
+++ b/drivers/scsi/aha1542.c
@@ -65,9 +65,12 @@ struct aha1542_hostdata {
 	dma_addr_t ccb_handle;
 };
 
+#define AHA1542_MAX_SECTORS       16
+
 struct aha1542_cmd {
-	struct chain *chain;
-	dma_addr_t chain_handle;
+	/* bounce buffer */
+	void *data_buffer;
+	dma_addr_t data_buffer_handle;
 };
 
 static inline void aha1542_intr_reset(u16 base)
@@ -257,15 +260,19 @@ static int aha1542_test_port(struct Scsi_Host *sh)
 static void aha1542_free_cmd(struct scsi_cmnd *cmd)
 {
 	struct aha1542_cmd *acmd = scsi_cmd_priv(cmd);
-	struct device *dev = cmd->device->host->dma_dev;
-	size_t len = scsi_sg_count(cmd) * sizeof(struct chain);
 
-	if (acmd->chain) {
-		dma_unmap_single(dev, acmd->chain_handle, len, DMA_TO_DEVICE);
-		kfree(acmd->chain);
+	if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
+		void *buf = acmd->data_buffer;
+		struct req_iterator iter;
+		struct bio_vec bv;
+
+		rq_for_each_segment(bv, cmd->request, iter) {
+			memcpy_to_page(bv.bv_page, bv.bv_offset, buf,
+				       bv.bv_len);
+			buf += bv.bv_len;
+		}
 	}
 
-	acmd->chain = NULL;
 	scsi_dma_unmap(cmd);
 }
 
@@ -416,7 +423,7 @@ static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
 	u8 lun = cmd->device->lun;
 	unsigned long flags;
 	int bufflen = scsi_bufflen(cmd);
-	int mbo, sg_count;
+	int mbo;
 	struct mailbox *mb = aha1542->mb;
 	struct ccb *ccb = aha1542->ccb;
 
@@ -438,17 +445,17 @@ static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
 		print_hex_dump_bytes("command: ", DUMP_PREFIX_NONE, cmd->cmnd, cmd->cmd_len);
 	}
 #endif
-	sg_count = scsi_dma_map(cmd);
-	if (sg_count) {
-		size_t len = sg_count * sizeof(struct chain);
-
-		acmd->chain = kmalloc(len, GFP_DMA);
-		if (!acmd->chain)
-			goto out_unmap;
-		acmd->chain_handle = dma_map_single(sh->dma_dev, acmd->chain,
-				len, DMA_TO_DEVICE);
-		if (dma_mapping_error(sh->dma_dev, acmd->chain_handle))
-			goto out_free_chain;
+
+	if (cmd->sc_data_direction == DMA_TO_DEVICE) {
+		void *buf = acmd->data_buffer;
+		struct req_iterator iter;
+		struct bio_vec bv;
+
+		rq_for_each_segment(bv, cmd->request, iter) {
+			memcpy_from_page(buf, bv.bv_page, bv.bv_offset,
+					 bv.bv_len);
+			buf += bv.bv_len;
+		}
 	}
 
 	/*
@@ -496,27 +503,12 @@ static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
 		direction = 16;
 
 	memcpy(ccb[mbo].cdb, cmd->cmnd, ccb[mbo].cdblen);
-
-	if (bufflen) {
-		struct scatterlist *sg;
-		int i;
-
-		ccb[mbo].op = 2;	/* SCSI Initiator Command  w/scatter-gather */
-		scsi_for_each_sg(cmd, sg, sg_count, i) {
-			any2scsi(acmd->chain[i].dataptr, sg_dma_address(sg));
-			any2scsi(acmd->chain[i].datalen, sg_dma_len(sg));
-		};
-		any2scsi(ccb[mbo].datalen, sg_count * sizeof(struct chain));
-		any2scsi(ccb[mbo].dataptr, acmd->chain_handle);
-#ifdef DEBUG
-		shost_printk(KERN_DEBUG, sh, "cptr %p: ", acmd->chain);
-		print_hex_dump_bytes("cptr: ", DUMP_PREFIX_NONE, acmd->chain, 18);
-#endif
-	} else {
-		ccb[mbo].op = 0;	/* SCSI Initiator Command */
-		any2scsi(ccb[mbo].datalen, 0);
+	ccb[mbo].op = 0;	/* SCSI Initiator Command */
+	any2scsi(ccb[mbo].datalen, bufflen);
+	if (bufflen)
+		any2scsi(ccb[mbo].dataptr, acmd->data_buffer_handle);
+	else
 		any2scsi(ccb[mbo].dataptr, 0);
-	};
 	ccb[mbo].idlun = (target & 7) << 5 | direction | (lun & 7);	/*SCSI Target Id */
 	ccb[mbo].rsalen = 16;
 	ccb[mbo].linkptr[0] = ccb[mbo].linkptr[1] = ccb[mbo].linkptr[2] = 0;
@@ -531,12 +523,6 @@ static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
 	spin_unlock_irqrestore(sh->host_lock, flags);
 
 	return 0;
-out_free_chain:
-	kfree(acmd->chain);
-	acmd->chain = NULL;
-out_unmap:
-	scsi_dma_unmap(cmd);
-	return SCSI_MLQUEUE_HOST_BUSY;
 }
 
 /* Initialize mailboxes */
@@ -1027,6 +1013,27 @@ static int aha1542_biosparam(struct scsi_device *sdev,
 }
 MODULE_LICENSE("GPL");
 
+static int aha1542_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
+{
+	struct aha1542_cmd *acmd = scsi_cmd_priv(cmd);
+
+	acmd->data_buffer = dma_alloc_coherent(shost->dma_dev,
+			SECTOR_SIZE * AHA1542_MAX_SECTORS,
+			&acmd->data_buffer_handle, GFP_KERNEL);
+	if (!acmd->data_buffer)
+		return -ENOMEM;
+	return 0;
+}
+
+static int aha1542_exit_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
+{
+	struct aha1542_cmd *acmd = scsi_cmd_priv(cmd);
+
+	dma_free_coherent(shost->dma_dev, SECTOR_SIZE * AHA1542_MAX_SECTORS,
+			acmd->data_buffer, acmd->data_buffer_handle);
+	return 0;
+}
+
 static struct scsi_host_template driver_template = {
 	.module			= THIS_MODULE,
 	.proc_name		= "aha1542",
@@ -1037,10 +1044,12 @@ static struct scsi_host_template driver_template = {
 	.eh_bus_reset_handler	= aha1542_bus_reset,
 	.eh_host_reset_handler	= aha1542_host_reset,
 	.bios_param		= aha1542_biosparam,
+	.init_cmd_priv		= aha1542_init_cmd_priv,
+	.exit_cmd_priv		= aha1542_exit_cmd_priv,
 	.can_queue		= AHA1542_MAILBOXES,
 	.this_id		= 7,
-	.sg_tablesize		= 16,
-	.unchecked_isa_dma	= 1,
+	.max_sectors		= AHA1542_MAX_SECTORS,
+	.sg_tablesize		= SG_ALL,
 };
 
 static int aha1542_isa_match(struct device *pdev, unsigned int ndev)
-- 
2.30.1


^ permalink raw reply related	[flat|nested] 33+ messages in thread

* [PATCH 2/8] Buslogic: remove ISA support
  2021-03-26  5:58 start removing block bounce buffering support v2 Christoph Hellwig
  2021-03-26  5:58 ` [PATCH 1/8] aha1542: use a local bounce buffer Christoph Hellwig
@ 2021-03-26  5:58 ` Christoph Hellwig
  2021-03-29  6:22   ` Hannes Reinecke
  2021-03-29 20:29   ` Khalid Aziz
  2021-03-26  5:58 ` [PATCH 3/8] BusLogic: reject broken old firmware that requires ISA-style bounce buffering Christoph Hellwig
                   ` (6 subsequent siblings)
  8 siblings, 2 replies; 33+ messages in thread
From: Christoph Hellwig @ 2021-03-26  5:58 UTC (permalink / raw)
  To: Jens Axboe, Khalid Aziz, Martin K. Petersen, Matthew Wilcox,
	Hannes Reinecke, Ondrej Zary
  Cc: linux-block, linux-scsi

The ISA support in Buslogic has been broken for a long time, as all
the I/O path expects a struct device for DMA mapping that is derived from
the PCI device, which would simply crash for ISA adapters.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 drivers/scsi/BusLogic.c | 156 ++--------------------------------------
 drivers/scsi/BusLogic.h |   3 -
 drivers/scsi/Kconfig    |   2 +-
 3 files changed, 6 insertions(+), 155 deletions(-)

diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index ccb061ab0a0ad2..c3ed03c4b3f5cb 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -561,60 +561,6 @@ static int blogic_cmd(struct blogic_adapter *adapter, enum blogic_opcode opcode,
 }
 
 
-/*
-  blogic_add_probeaddr_isa appends a single ISA I/O Address to the list
-  of I/O Address and Bus Probe Information to be checked for potential BusLogic
-  Host Adapters.
-*/
-
-static void __init blogic_add_probeaddr_isa(unsigned long io_addr)
-{
-	struct blogic_probeinfo *probeinfo;
-	if (blogic_probeinfo_count >= BLOGIC_MAX_ADAPTERS)
-		return;
-	probeinfo = &blogic_probeinfo_list[blogic_probeinfo_count++];
-	probeinfo->adapter_type = BLOGIC_MULTIMASTER;
-	probeinfo->adapter_bus_type = BLOGIC_ISA_BUS;
-	probeinfo->io_addr = io_addr;
-	probeinfo->pci_device = NULL;
-}
-
-
-/*
-  blogic_init_probeinfo_isa initializes the list of I/O Address and
-  Bus Probe Information to be checked for potential BusLogic SCSI Host Adapters
-  only from the list of standard BusLogic MultiMaster ISA I/O Addresses.
-*/
-
-static void __init blogic_init_probeinfo_isa(struct blogic_adapter *adapter)
-{
-	/*
-	   If BusLogic Driver Options specifications requested that ISA
-	   Bus Probes be inhibited, do not proceed further.
-	 */
-	if (blogic_probe_options.noprobe_isa)
-		return;
-	/*
-	   Append the list of standard BusLogic MultiMaster ISA I/O Addresses.
-	 */
-	if (!blogic_probe_options.limited_isa || blogic_probe_options.probe330)
-		blogic_add_probeaddr_isa(0x330);
-	if (!blogic_probe_options.limited_isa || blogic_probe_options.probe334)
-		blogic_add_probeaddr_isa(0x334);
-	if (!blogic_probe_options.limited_isa || blogic_probe_options.probe230)
-		blogic_add_probeaddr_isa(0x230);
-	if (!blogic_probe_options.limited_isa || blogic_probe_options.probe234)
-		blogic_add_probeaddr_isa(0x234);
-	if (!blogic_probe_options.limited_isa || blogic_probe_options.probe130)
-		blogic_add_probeaddr_isa(0x130);
-	if (!blogic_probe_options.limited_isa || blogic_probe_options.probe134)
-		blogic_add_probeaddr_isa(0x134);
-}
-
-
-#ifdef CONFIG_PCI
-
-
 /*
   blogic_sort_probeinfo sorts a section of blogic_probeinfo_list in order
   of increasing PCI Bus and Device Number.
@@ -667,14 +613,11 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
 	int nonpr_mmcount = 0, mmcount = 0;
 	bool force_scan_order = false;
 	bool force_scan_order_checked = false;
-	bool addr_seen[6];
 	struct pci_dev *pci_device = NULL;
 	int i;
 	if (blogic_probeinfo_count >= BLOGIC_MAX_ADAPTERS)
 		return 0;
 	blogic_probeinfo_count++;
-	for (i = 0; i < 6; i++)
-		addr_seen[i] = false;
 	/*
 	   Iterate over the MultiMaster PCI Host Adapters.  For each
 	   enumerated host adapter, determine whether its ISA Compatible
@@ -744,11 +687,8 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
 		host_adapter->io_addr = io_addr;
 		blogic_intreset(host_adapter);
 		if (blogic_cmd(host_adapter, BLOGIC_INQ_PCI_INFO, NULL, 0,
-				&adapter_info, sizeof(adapter_info)) ==
-				sizeof(adapter_info)) {
-			if (adapter_info.isa_port < 6)
-				addr_seen[adapter_info.isa_port] = true;
-		} else
+				&adapter_info, sizeof(adapter_info)) !=
+				sizeof(adapter_info))
 			adapter_info.isa_port = BLOGIC_IO_DISABLE;
 		/*
 		   Issue the Modify I/O Address command to disable the
@@ -835,45 +775,6 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
 	if (force_scan_order)
 		blogic_sort_probeinfo(&blogic_probeinfo_list[nonpr_mmindex],
 					nonpr_mmcount);
-	/*
-	   If no PCI MultiMaster Host Adapter is assigned the Primary
-	   I/O Address, then the Primary I/O Address must be probed
-	   explicitly before any PCI host adapters are probed.
-	 */
-	if (!blogic_probe_options.noprobe_isa)
-		if (pr_probeinfo->io_addr == 0 &&
-				(!blogic_probe_options.limited_isa ||
-				 blogic_probe_options.probe330)) {
-			pr_probeinfo->adapter_type = BLOGIC_MULTIMASTER;
-			pr_probeinfo->adapter_bus_type = BLOGIC_ISA_BUS;
-			pr_probeinfo->io_addr = 0x330;
-		}
-	/*
-	   Append the list of standard BusLogic MultiMaster ISA I/O Addresses,
-	   omitting the Primary I/O Address which has already been handled.
-	 */
-	if (!blogic_probe_options.noprobe_isa) {
-		if (!addr_seen[1] &&
-				(!blogic_probe_options.limited_isa ||
-				 blogic_probe_options.probe334))
-			blogic_add_probeaddr_isa(0x334);
-		if (!addr_seen[2] &&
-				(!blogic_probe_options.limited_isa ||
-				 blogic_probe_options.probe230))
-			blogic_add_probeaddr_isa(0x230);
-		if (!addr_seen[3] &&
-				(!blogic_probe_options.limited_isa ||
-				 blogic_probe_options.probe234))
-			blogic_add_probeaddr_isa(0x234);
-		if (!addr_seen[4] &&
-				(!blogic_probe_options.limited_isa ||
-				 blogic_probe_options.probe130))
-			blogic_add_probeaddr_isa(0x130);
-		if (!addr_seen[5] &&
-				(!blogic_probe_options.limited_isa ||
-				 blogic_probe_options.probe134))
-			blogic_add_probeaddr_isa(0x134);
-	}
 	/*
 	   Iterate over the older non-compliant MultiMaster PCI Host Adapters,
 	   noting the PCI bus location and assigned IRQ Channel.
@@ -1078,18 +979,10 @@ static void __init blogic_init_probeinfo_list(struct blogic_adapter *adapter)
 				}
 			}
 		}
-	} else {
-		blogic_init_probeinfo_isa(adapter);
 	}
 }
 
 
-#else
-#define blogic_init_probeinfo_list(adapter) \
-		blogic_init_probeinfo_isa(adapter)
-#endif				/* CONFIG_PCI */
-
-
 /*
   blogic_failure prints a standardized error message, and then returns false.
 */
@@ -1539,14 +1432,6 @@ static bool __init blogic_rdconfig(struct blogic_adapter *adapter)
 		else if (config.irq_ch15)
 			adapter->irq_ch = 15;
 	}
-	if (adapter->adapter_bus_type == BLOGIC_ISA_BUS) {
-		if (config.dma_ch5)
-			adapter->dma_ch = 5;
-		else if (config.dma_ch6)
-			adapter->dma_ch = 6;
-		else if (config.dma_ch7)
-			adapter->dma_ch = 7;
-	}
 	/*
 	   Determine whether Extended Translation is enabled and save it in
 	   the Host Adapter structure.
@@ -1686,8 +1571,7 @@ static bool __init blogic_rdconfig(struct blogic_adapter *adapter)
 	if (adapter->fw_ver[0] == '5')
 		adapter->adapter_qdepth = 192;
 	else if (adapter->fw_ver[0] == '4')
-		adapter->adapter_qdepth = (adapter->adapter_bus_type !=
-						BLOGIC_ISA_BUS ? 100 : 50);
+		adapter->adapter_qdepth = 100;
 	else
 		adapter->adapter_qdepth = 30;
 	if (strcmp(adapter->fw_ver, "3.31") >= 0) {
@@ -1727,13 +1611,6 @@ static bool __init blogic_rdconfig(struct blogic_adapter *adapter)
 	   bios_addr is 0.
 	 */
 	adapter->bios_addr = ext_setupinfo.bios_addr << 12;
-	/*
-	   ISA Host Adapters require Bounce Buffers if there is more than
-	   16MB memory.
-	 */
-	if (adapter->adapter_bus_type == BLOGIC_ISA_BUS &&
-			(void *) high_memory > (void *) MAX_DMA_ADDRESS)
-		adapter->need_bouncebuf = true;
 	/*
 	   BusLogic BT-445S Host Adapters prior to board revision E have a
 	   hardware bug whereby when the BIOS is enabled, transfers to/from
@@ -1839,11 +1716,7 @@ static bool __init blogic_reportconfig(struct blogic_adapter *adapter)
 	blogic_info("Configuring BusLogic Model %s %s%s%s%s SCSI Host Adapter\n", adapter, adapter->model, blogic_adapter_busnames[adapter->adapter_bus_type], (adapter->wide ? " Wide" : ""), (adapter->differential ? " Differential" : ""), (adapter->ultra ? " Ultra" : ""));
 	blogic_info("  Firmware Version: %s, I/O Address: 0x%lX, IRQ Channel: %d/%s\n", adapter, adapter->fw_ver, adapter->io_addr, adapter->irq_ch, (adapter->level_int ? "Level" : "Edge"));
 	if (adapter->adapter_bus_type != BLOGIC_PCI_BUS) {
-		blogic_info("  DMA Channel: ", adapter);
-		if (adapter->dma_ch > 0)
-			blogic_info("%d, ", adapter, adapter->dma_ch);
-		else
-			blogic_info("None, ", adapter);
+		blogic_info("  DMA Channel: None, ", adapter);
 		if (adapter->bios_addr > 0)
 			blogic_info("BIOS Address: 0x%lX, ", adapter,
 					adapter->bios_addr);
@@ -1995,18 +1868,6 @@ static bool __init blogic_getres(struct blogic_adapter *adapter)
 		return false;
 	}
 	adapter->irq_acquired = true;
-	/*
-	   Acquire exclusive access to the DMA Channel.
-	 */
-	if (adapter->dma_ch > 0) {
-		if (request_dma(adapter->dma_ch, adapter->full_model) < 0) {
-			blogic_err("UNABLE TO ACQUIRE DMA CHANNEL %d - DETACHING\n", adapter, adapter->dma_ch);
-			return false;
-		}
-		set_dma_mode(adapter->dma_ch, DMA_MODE_CASCADE);
-		enable_dma(adapter->dma_ch);
-		adapter->dma_chan_acquired = true;
-	}
 	/*
 	   Indicate the System Resource Acquisition completed successfully,
 	 */
@@ -2026,11 +1887,6 @@ static void blogic_relres(struct blogic_adapter *adapter)
 	 */
 	if (adapter->irq_acquired)
 		free_irq(adapter->irq_ch, adapter);
-	/*
-	   Release exclusive access to the DMA Channel.
-	 */
-	if (adapter->dma_chan_acquired)
-		free_dma(adapter->dma_ch);
 	/*
 	   Release any allocated memory structs not released elsewhere
 	 */
@@ -3694,9 +3550,7 @@ static int __init blogic_parseopts(char *options)
 					blogic_err("BusLogic: Invalid Driver Options (invalid I/O Address 0x%lX)\n", NULL, io_addr);
 					return 0;
 				}
-			} else if (blogic_parse(&options, "NoProbeISA"))
-				blogic_probe_options.noprobe_isa = true;
-			else if (blogic_parse(&options, "NoProbePCI"))
+			} else if (blogic_parse(&options, "NoProbePCI"))
 				blogic_probe_options.noprobe_pci = true;
 			else if (blogic_parse(&options, "NoProbe"))
 				blogic_probe_options.noprobe = true;
diff --git a/drivers/scsi/BusLogic.h b/drivers/scsi/BusLogic.h
index 6182cc8a0344a8..6eaddc009b5c55 100644
--- a/drivers/scsi/BusLogic.h
+++ b/drivers/scsi/BusLogic.h
@@ -237,7 +237,6 @@ struct blogic_probeinfo {
 
 struct blogic_probe_options {
 	bool noprobe:1;			/* Bit 0 */
-	bool noprobe_isa:1;		/* Bit 1 */
 	bool noprobe_pci:1;		/* Bit 2 */
 	bool nosort_pci:1;		/* Bit 3 */
 	bool multimaster_first:1;	/* Bit 4 */
@@ -997,10 +996,8 @@ struct blogic_adapter {
 	unsigned char bus;
 	unsigned char dev;
 	unsigned char irq_ch;
-	unsigned char dma_ch;
 	unsigned char scsi_id;
 	bool irq_acquired:1;
-	bool dma_chan_acquired:1;
 	bool ext_trans_enable:1;
 	bool parity:1;
 	bool reset_enabled:1;
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 06b87c7f6babd3..3d114be5b662df 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -497,7 +497,7 @@ config SCSI_HPTIOP
 
 config SCSI_BUSLOGIC
 	tristate "BusLogic SCSI support"
-	depends on (PCI || ISA) && SCSI && ISA_DMA_API && VIRT_TO_BUS
+	depends on PCI && SCSI && VIRT_TO_BUS
 	help
 	  This is support for BusLogic MultiMaster and FlashPoint SCSI Host
 	  Adapters. Consult the SCSI-HOWTO, available from
-- 
2.30.1


^ permalink raw reply related	[flat|nested] 33+ messages in thread

* [PATCH 3/8] BusLogic: reject broken old firmware that requires ISA-style bounce buffering
  2021-03-26  5:58 start removing block bounce buffering support v2 Christoph Hellwig
  2021-03-26  5:58 ` [PATCH 1/8] aha1542: use a local bounce buffer Christoph Hellwig
  2021-03-26  5:58 ` [PATCH 2/8] Buslogic: remove ISA support Christoph Hellwig
@ 2021-03-26  5:58 ` Christoph Hellwig
  2021-03-29  6:23   ` Hannes Reinecke
  2021-03-29 20:33   ` Khalid Aziz
  2021-03-26  5:58 ` [PATCH 4/8] advansys: remove ISA support Christoph Hellwig
                   ` (5 subsequent siblings)
  8 siblings, 2 replies; 33+ messages in thread
From: Christoph Hellwig @ 2021-03-26  5:58 UTC (permalink / raw)
  To: Jens Axboe, Khalid Aziz, Martin K. Petersen, Matthew Wilcox,
	Hannes Reinecke, Ondrej Zary
  Cc: linux-block, linux-scsi

Warn on and don't support adapters that have a DMA bug that forces ISA-style
bounce buffering.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 drivers/scsi/BusLogic.c | 21 ++++++---------------
 drivers/scsi/BusLogic.h |  1 -
 2 files changed, 6 insertions(+), 16 deletions(-)

diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index c3ed03c4b3f5cb..c8977e4bdba8c2 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -1616,14 +1616,12 @@ static bool __init blogic_rdconfig(struct blogic_adapter *adapter)
 	   hardware bug whereby when the BIOS is enabled, transfers to/from
 	   the same address range the BIOS occupies modulo 16MB are handled
 	   incorrectly.  Only properly functioning BT-445S Host Adapters
-	   have firmware version 3.37, so require that ISA Bounce Buffers
-	   be used for the buggy BT-445S models if there is more than 16MB
-	   memory.
+	   have firmware version 3.37.
 	 */
-	if (adapter->bios_addr > 0 && strcmp(adapter->model, "BT-445S") == 0 &&
-			strcmp(adapter->fw_ver, "3.37") < 0 &&
-			(void *) high_memory > (void *) MAX_DMA_ADDRESS)
-		adapter->need_bouncebuf = true;
+	if (adapter->bios_addr > 0 &&
+	    strcmp(adapter->model, "BT-445S") == 0 &&
+	    strcmp(adapter->fw_ver, "3.37") < 0)
+		return blogic_failure(adapter, "Too old firmware");
 	/*
 	   Initialize parameters common to MultiMaster and FlashPoint
 	   Host Adapters.
@@ -1646,14 +1644,9 @@ static bool __init blogic_rdconfig(struct blogic_adapter *adapter)
 		if (adapter->drvr_opts != NULL &&
 				adapter->drvr_opts->qdepth[tgt_id] > 0)
 			qdepth = adapter->drvr_opts->qdepth[tgt_id];
-		else if (adapter->need_bouncebuf)
-			qdepth = BLOGIC_TAG_DEPTH_BB;
 		adapter->qdepth[tgt_id] = qdepth;
 	}
-	if (adapter->need_bouncebuf)
-		adapter->untag_qdepth = BLOGIC_UNTAG_DEPTH_BB;
-	else
-		adapter->untag_qdepth = BLOGIC_UNTAG_DEPTH;
+	adapter->untag_qdepth = BLOGIC_UNTAG_DEPTH;
 	if (adapter->drvr_opts != NULL)
 		adapter->common_qdepth = adapter->drvr_opts->common_qdepth;
 	if (adapter->common_qdepth > 0 &&
@@ -2155,7 +2148,6 @@ static void __init blogic_inithoststruct(struct blogic_adapter *adapter,
 	host->this_id = adapter->scsi_id;
 	host->can_queue = adapter->drvr_qdepth;
 	host->sg_tablesize = adapter->drvr_sglimit;
-	host->unchecked_isa_dma = adapter->need_bouncebuf;
 	host->cmd_per_lun = adapter->untag_qdepth;
 }
 
@@ -3705,7 +3697,6 @@ static struct scsi_host_template blogic_template = {
 #if 0
 	.eh_abort_handler = blogic_abort,
 #endif
-	.unchecked_isa_dma = 1,
 	.max_sectors = 128,
 };
 
diff --git a/drivers/scsi/BusLogic.h b/drivers/scsi/BusLogic.h
index 6eaddc009b5c55..858187af8fd1e8 100644
--- a/drivers/scsi/BusLogic.h
+++ b/drivers/scsi/BusLogic.h
@@ -1010,7 +1010,6 @@ struct blogic_adapter {
 	bool terminfo_valid:1;
 	bool low_term:1;
 	bool high_term:1;
-	bool need_bouncebuf:1;
 	bool strict_rr:1;
 	bool scam_enabled:1;
 	bool scam_lev2:1;
-- 
2.30.1


^ permalink raw reply related	[flat|nested] 33+ messages in thread

* [PATCH 4/8] advansys: remove ISA support
  2021-03-26  5:58 start removing block bounce buffering support v2 Christoph Hellwig
                   ` (2 preceding siblings ...)
  2021-03-26  5:58 ` [PATCH 3/8] BusLogic: reject broken old firmware that requires ISA-style bounce buffering Christoph Hellwig
@ 2021-03-26  5:58 ` Christoph Hellwig
  2021-03-29  6:31   ` Hannes Reinecke
  2021-03-26  5:58 ` [PATCH 5/8] scsi: remove the unchecked_isa_dma flag Christoph Hellwig
                   ` (4 subsequent siblings)
  8 siblings, 1 reply; 33+ messages in thread
From: Christoph Hellwig @ 2021-03-26  5:58 UTC (permalink / raw)
  To: Jens Axboe, Khalid Aziz, Martin K. Petersen, Matthew Wilcox,
	Hannes Reinecke, Ondrej Zary
  Cc: linux-block, linux-scsi

This is the last piece in the kernel requiring the block layer ISA
bounce buffering, and it does not actually look used.  So remove it
to see if anyone screams, in which case we'll need to find a solution
to fix it back up.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 drivers/scsi/advansys.c | 283 ++++------------------------------------
 1 file changed, 25 insertions(+), 258 deletions(-)

diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index ec5627890809e6..ccdd78ac7abd95 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -84,8 +84,6 @@ typedef unsigned char uchar;
 
 #define ASC_CS_TYPE  unsigned short
 
-#define ASC_IS_ISA          (0x0001)
-#define ASC_IS_ISAPNP       (0x0081)
 #define ASC_IS_EISA         (0x0002)
 #define ASC_IS_PCI          (0x0004)
 #define ASC_IS_PCI_ULTRA    (0x0104)
@@ -2415,8 +2413,8 @@ static void asc_prt_scsi_host(struct Scsi_Host *s)
 	printk(" dma_channel %d, this_id %d, can_queue %d,\n",
 	       s->dma_channel, s->this_id, s->can_queue);
 
-	printk(" cmd_per_lun %d, sg_tablesize %d, unchecked_isa_dma %d\n",
-	       s->cmd_per_lun, s->sg_tablesize, s->unchecked_isa_dma);
+	printk(" cmd_per_lun %d, sg_tablesize %d\n",
+	       s->cmd_per_lun, s->sg_tablesize);
 
 	if (ASC_NARROW_BOARD(boardp)) {
 		asc_prt_asc_dvc_var(&boardp->dvc_var.asc_dvc_var);
@@ -2632,42 +2630,28 @@ static const char *advansys_info(struct Scsi_Host *shost)
 	if (ASC_NARROW_BOARD(boardp)) {
 		asc_dvc_varp = &boardp->dvc_var.asc_dvc_var;
 		ASC_DBG(1, "begin\n");
-		if (asc_dvc_varp->bus_type & ASC_IS_ISA) {
-			if ((asc_dvc_varp->bus_type & ASC_IS_ISAPNP) ==
-			    ASC_IS_ISAPNP) {
-				busname = "ISA PnP";
+
+		if (asc_dvc_varp->bus_type & ASC_IS_VL) {
+			busname = "VL";
+		} else if (asc_dvc_varp->bus_type & ASC_IS_EISA) {
+			busname = "EISA";
+		} else if (asc_dvc_varp->bus_type & ASC_IS_PCI) {
+			if ((asc_dvc_varp->bus_type & ASC_IS_PCI_ULTRA)
+			    == ASC_IS_PCI_ULTRA) {
+				busname = "PCI Ultra";
 			} else {
-				busname = "ISA";
+				busname = "PCI";
 			}
-			sprintf(info,
-				"AdvanSys SCSI %s: %s: IO 0x%lX-0x%lX, IRQ 0x%X, DMA 0x%X",
-				ASC_VERSION, busname,
-				(ulong)shost->io_port,
-				(ulong)shost->io_port + ASC_IOADR_GAP - 1,
-				boardp->irq, shost->dma_channel);
 		} else {
-			if (asc_dvc_varp->bus_type & ASC_IS_VL) {
-				busname = "VL";
-			} else if (asc_dvc_varp->bus_type & ASC_IS_EISA) {
-				busname = "EISA";
-			} else if (asc_dvc_varp->bus_type & ASC_IS_PCI) {
-				if ((asc_dvc_varp->bus_type & ASC_IS_PCI_ULTRA)
-				    == ASC_IS_PCI_ULTRA) {
-					busname = "PCI Ultra";
-				} else {
-					busname = "PCI";
-				}
-			} else {
-				busname = "?";
-				shost_printk(KERN_ERR, shost, "unknown bus "
-					"type %d\n", asc_dvc_varp->bus_type);
-			}
-			sprintf(info,
-				"AdvanSys SCSI %s: %s: IO 0x%lX-0x%lX, IRQ 0x%X",
-				ASC_VERSION, busname, (ulong)shost->io_port,
-				(ulong)shost->io_port + ASC_IOADR_GAP - 1,
-				boardp->irq);
+			busname = "?";
+			shost_printk(KERN_ERR, shost, "unknown bus "
+				"type %d\n", asc_dvc_varp->bus_type);
 		}
+		sprintf(info,
+			"AdvanSys SCSI %s: %s: IO 0x%lX-0x%lX, IRQ 0x%X",
+			ASC_VERSION, busname, (ulong)shost->io_port,
+			(ulong)shost->io_port + ASC_IOADR_GAP - 1,
+			boardp->irq);
 	} else {
 		/*
 		 * Wide Adapter Information
@@ -2873,12 +2857,7 @@ static void asc_prt_asc_board_eeprom(struct seq_file *m, struct Scsi_Host *shost
 	ASCEEP_CONFIG *ep;
 	int i;
 	uchar serialstr[13];
-#ifdef CONFIG_ISA
-	ASC_DVC_VAR *asc_dvc_varp;
-	int isa_dma_speed[] = { 10, 8, 7, 6, 5, 4, 3, 2 };
 
-	asc_dvc_varp = &boardp->dvc_var.asc_dvc_var;
-#endif /* CONFIG_ISA */
 	ep = &boardp->eep_config.asc_eep;
 
 	seq_printf(m,
@@ -2926,14 +2905,6 @@ static void asc_prt_asc_board_eeprom(struct seq_file *m, struct Scsi_Host *shost
 		seq_printf(m, " %c",
 			   (ep->init_sdtr & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
 	seq_putc(m, '\n');
-
-#ifdef CONFIG_ISA
-	if (asc_dvc_varp->bus_type & ASC_IS_ISA) {
-		seq_printf(m,
-			   " Host ISA DMA speed:   %d MB/S\n",
-			   isa_dma_speed[ASC_EEP_GET_DMA_SPD(ep)]);
-	}
-#endif /* CONFIG_ISA */
 }
 
 /*
@@ -3180,10 +3151,6 @@ static void asc_prt_driver_conf(struct seq_file *m, struct Scsi_Host *shost)
 		   shost->unique_id, shost->can_queue, shost->this_id,
 		   shost->sg_tablesize, shost->cmd_per_lun);
 
-	seq_printf(m,
-		   " unchecked_isa_dma %d\n",
-		   shost->unchecked_isa_dma);
-
 	seq_printf(m,
 		   " flags 0x%x, last_reset 0x%lx, jiffies 0x%lx, asc_n_io_port 0x%x\n",
 		   boardp->flags, shost->last_reset, jiffies,
@@ -8563,12 +8530,6 @@ static unsigned short AscGetChipBiosAddress(PortAddr iop_base,
 	}
 
 	cfg_lsw = AscGetChipCfgLsw(iop_base);
-
-	/*
-	 *  ISA PnP uses the top bit as the 32K BIOS flag
-	 */
-	if (bus_type == ASC_IS_ISAPNP)
-		cfg_lsw &= 0x7FFF;
 	bios_addr = ASC_BIOS_MIN_ADDR + (cfg_lsw >> 12) * ASC_BIOS_BANK_SIZE;
 	return bios_addr;
 }
@@ -8611,19 +8572,6 @@ static unsigned char AscGetChipVersion(PortAddr iop_base,
 	return AscGetChipVerNo(iop_base);
 }
 
-#ifdef CONFIG_ISA
-static void AscEnableIsaDma(uchar dma_channel)
-{
-	if (dma_channel < 4) {
-		outp(0x000B, (ushort)(0xC0 | dma_channel));
-		outp(0x000A, dma_channel);
-	} else if (dma_channel < 8) {
-		outp(0x00D6, (ushort)(0xC0 | (dma_channel - 4)));
-		outp(0x00D4, (ushort)(dma_channel - 4));
-	}
-}
-#endif /* CONFIG_ISA */
-
 static int AscStopQueueExe(PortAddr iop_base)
 {
 	int count = 0;
@@ -8644,65 +8592,11 @@ static int AscStopQueueExe(PortAddr iop_base)
 
 static unsigned int AscGetMaxDmaCount(ushort bus_type)
 {
-	if (bus_type & ASC_IS_ISA)
-		return ASC_MAX_ISA_DMA_COUNT;
-	else if (bus_type & (ASC_IS_EISA | ASC_IS_VL))
+	if (bus_type & (ASC_IS_EISA | ASC_IS_VL))
 		return ASC_MAX_VL_DMA_COUNT;
 	return ASC_MAX_PCI_DMA_COUNT;
 }
 
-#ifdef CONFIG_ISA
-static ushort AscGetIsaDmaChannel(PortAddr iop_base)
-{
-	ushort channel;
-
-	channel = AscGetChipCfgLsw(iop_base) & 0x0003;
-	if (channel == 0x03)
-		return (0);
-	else if (channel == 0x00)
-		return (7);
-	return (channel + 4);
-}
-
-static ushort AscSetIsaDmaChannel(PortAddr iop_base, ushort dma_channel)
-{
-	ushort cfg_lsw;
-	uchar value;
-
-	if ((dma_channel >= 5) && (dma_channel <= 7)) {
-		if (dma_channel == 7)
-			value = 0x00;
-		else
-			value = dma_channel - 4;
-		cfg_lsw = AscGetChipCfgLsw(iop_base) & 0xFFFC;
-		cfg_lsw |= value;
-		AscSetChipCfgLsw(iop_base, cfg_lsw);
-		return (AscGetIsaDmaChannel(iop_base));
-	}
-	return 0;
-}
-
-static uchar AscGetIsaDmaSpeed(PortAddr iop_base)
-{
-	uchar speed_value;
-
-	AscSetBank(iop_base, 1);
-	speed_value = AscReadChipDmaSpeed(iop_base);
-	speed_value &= 0x07;
-	AscSetBank(iop_base, 0);
-	return speed_value;
-}
-
-static uchar AscSetIsaDmaSpeed(PortAddr iop_base, uchar speed_value)
-{
-	speed_value &= 0x07;
-	AscSetBank(iop_base, 1);
-	AscWriteChipDmaSpeed(iop_base, speed_value);
-	AscSetBank(iop_base, 0);
-	return AscGetIsaDmaSpeed(iop_base);
-}
-#endif /* CONFIG_ISA */
-
 static void AscInitAscDvcVar(ASC_DVC_VAR *asc_dvc)
 {
 	int i;
@@ -8712,7 +8606,7 @@ static void AscInitAscDvcVar(ASC_DVC_VAR *asc_dvc)
 	iop_base = asc_dvc->iop_base;
 	asc_dvc->err_code = 0;
 	if ((asc_dvc->bus_type &
-	     (ASC_IS_ISA | ASC_IS_PCI | ASC_IS_EISA | ASC_IS_VL)) == 0) {
+	     (ASC_IS_PCI | ASC_IS_EISA | ASC_IS_VL)) == 0) {
 		asc_dvc->err_code |= ASC_IERR_NO_BUS_TYPE;
 	}
 	AscSetChipControl(iop_base, CC_HALT);
@@ -8768,16 +8662,6 @@ static void AscInitAscDvcVar(ASC_DVC_VAR *asc_dvc)
 	}
 
 	asc_dvc->cfg->isa_dma_speed = ASC_DEF_ISA_DMA_SPEED;
-#ifdef CONFIG_ISA
-	if ((asc_dvc->bus_type & ASC_IS_ISA) != 0) {
-		if (chip_version >= ASC_CHIP_MIN_VER_ISA_PNP) {
-			AscSetChipIFC(iop_base, IFC_INIT_DEFAULT);
-			asc_dvc->bus_type = ASC_IS_ISAPNP;
-		}
-		asc_dvc->cfg->isa_dma_channel =
-		    (uchar)AscGetIsaDmaChannel(iop_base);
-	}
-#endif /* CONFIG_ISA */
 	for (i = 0; i <= ASC_MAX_TID; i++) {
 		asc_dvc->cur_dvc_qng[i] = 0;
 		asc_dvc->max_dvc_qng[i] = ASC_MAX_SCSI1_QNG;
@@ -9314,22 +9198,10 @@ static int AscInitSetConfig(struct pci_dev *pdev, struct Scsi_Host *shost)
 		}
 	} else
 #endif /* CONFIG_PCI */
-	if (asc_dvc->bus_type == ASC_IS_ISAPNP) {
-		if (AscGetChipVersion(iop_base, asc_dvc->bus_type)
-		    == ASC_CHIP_VER_ASYN_BUG) {
-			asc_dvc->bug_fix_cntl |= ASC_BUG_FIX_ASYN_USE_SYN;
-		}
-	}
 	if (AscSetChipScsiID(iop_base, asc_dvc->cfg->chip_scsi_id) !=
 	    asc_dvc->cfg->chip_scsi_id) {
 		asc_dvc->err_code |= ASC_IERR_SET_SCSI_ID;
 	}
-#ifdef CONFIG_ISA
-	if (asc_dvc->bus_type & ASC_IS_ISA) {
-		AscSetIsaDmaChannel(iop_base, asc_dvc->cfg->isa_dma_channel);
-		AscSetIsaDmaSpeed(iop_base, asc_dvc->cfg->isa_dma_speed);
-	}
-#endif /* CONFIG_ISA */
 
 	asc_dvc->init_state |= ASC_INIT_STATE_END_SET_CFG;
 
@@ -10752,12 +10624,6 @@ static struct scsi_host_template advansys_template = {
 	.eh_host_reset_handler = advansys_reset,
 	.bios_param = advansys_biosparam,
 	.slave_configure = advansys_slave_configure,
-	/*
-	 * Because the driver may control an ISA adapter 'unchecked_isa_dma'
-	 * must be set. The flag will be cleared in advansys_board_found
-	 * for non-ISA adapters.
-	 */
-	.unchecked_isa_dma = true,
 };
 
 static int advansys_wide_init_chip(struct Scsi_Host *shost)
@@ -10923,29 +10789,21 @@ static int advansys_board_found(struct Scsi_Host *shost, unsigned int iop,
 		 */
 		switch (asc_dvc_varp->bus_type) {
 #ifdef CONFIG_ISA
-		case ASC_IS_ISA:
-			shost->unchecked_isa_dma = true;
-			share_irq = 0;
-			break;
 		case ASC_IS_VL:
-			shost->unchecked_isa_dma = false;
 			share_irq = 0;
 			break;
 		case ASC_IS_EISA:
-			shost->unchecked_isa_dma = false;
 			share_irq = IRQF_SHARED;
 			break;
 #endif /* CONFIG_ISA */
 #ifdef CONFIG_PCI
 		case ASC_IS_PCI:
-			shost->unchecked_isa_dma = false;
 			share_irq = IRQF_SHARED;
 			break;
 #endif /* CONFIG_PCI */
 		default:
 			shost_printk(KERN_ERR, shost, "unknown adapter type: "
 					"%d\n", asc_dvc_varp->bus_type);
-			shost->unchecked_isa_dma = false;
 			share_irq = 0;
 			break;
 		}
@@ -10964,7 +10822,6 @@ static int advansys_board_found(struct Scsi_Host *shost, unsigned int iop,
 		 * For Wide boards set PCI information before calling
 		 * AdvInitGetConfig().
 		 */
-		shost->unchecked_isa_dma = false;
 		share_irq = IRQF_SHARED;
 		ASC_DBG(2, "AdvInitGetConfig()\n");
 
@@ -11228,22 +11085,6 @@ static int advansys_board_found(struct Scsi_Host *shost, unsigned int iop,
 
 	/* Register DMA Channel for Narrow boards. */
 	shost->dma_channel = NO_ISA_DMA;	/* Default to no ISA DMA. */
-#ifdef CONFIG_ISA
-	if (ASC_NARROW_BOARD(boardp)) {
-		/* Register DMA channel for ISA bus. */
-		if (asc_dvc_varp->bus_type & ASC_IS_ISA) {
-			shost->dma_channel = asc_dvc_varp->cfg->isa_dma_channel;
-			ret = request_dma(shost->dma_channel, DRV_NAME);
-			if (ret) {
-				shost_printk(KERN_ERR, shost, "request_dma() "
-						"%d failed %d\n",
-						shost->dma_channel, ret);
-				goto err_unmap;
-			}
-			AscEnableIsaDma(shost->dma_channel);
-		}
-	}
-#endif /* CONFIG_ISA */
 
 	/* Register IRQ Number. */
 	ASC_DBG(2, "request_irq(%d, %p)\n", boardp->irq, shost);
@@ -11366,79 +11207,13 @@ static PortAddr _asc_def_iop_base[ASC_IOADR_TABLE_MAX_IX] = {
 	0x0210, 0x0230, 0x0250, 0x0330
 };
 
-/*
- * The ISA IRQ number is found in bits 2 and 3 of the CfgLsw.  It decodes as:
- * 00: 10
- * 01: 11
- * 10: 12
- * 11: 15
- */
-static unsigned int advansys_isa_irq_no(PortAddr iop_base)
-{
-	unsigned short cfg_lsw = AscGetChipCfgLsw(iop_base);
-	unsigned int chip_irq = ((cfg_lsw >> 2) & 0x03) + 10;
-	if (chip_irq == 13)
-		chip_irq = 15;
-	return chip_irq;
-}
-
-static int advansys_isa_probe(struct device *dev, unsigned int id)
-{
-	int err = -ENODEV;
-	PortAddr iop_base = _asc_def_iop_base[id];
-	struct Scsi_Host *shost;
-	struct asc_board *board;
-
-	if (!request_region(iop_base, ASC_IOADR_GAP, DRV_NAME)) {
-		ASC_DBG(1, "I/O port 0x%x busy\n", iop_base);
-		return -ENODEV;
-	}
-	ASC_DBG(1, "probing I/O port 0x%x\n", iop_base);
-	if (!AscFindSignature(iop_base))
-		goto release_region;
-	if (!(AscGetChipVersion(iop_base, ASC_IS_ISA) & ASC_CHIP_VER_ISA_BIT))
-		goto release_region;
-
-	err = -ENOMEM;
-	shost = scsi_host_alloc(&advansys_template, sizeof(*board));
-	if (!shost)
-		goto release_region;
-
-	board = shost_priv(shost);
-	board->irq = advansys_isa_irq_no(iop_base);
-	board->dev = dev;
-	board->shost = shost;
-
-	err = advansys_board_found(shost, iop_base, ASC_IS_ISA);
-	if (err)
-		goto free_host;
-
-	dev_set_drvdata(dev, shost);
-	return 0;
-
- free_host:
-	scsi_host_put(shost);
- release_region:
-	release_region(iop_base, ASC_IOADR_GAP);
-	return err;
-}
-
-static void advansys_isa_remove(struct device *dev, unsigned int id)
+static void advansys_vlb_remove(struct device *dev, unsigned int id)
 {
 	int ioport = _asc_def_iop_base[id];
 	advansys_release(dev_get_drvdata(dev));
 	release_region(ioport, ASC_IOADR_GAP);
 }
 
-static struct isa_driver advansys_isa_driver = {
-	.probe		= advansys_isa_probe,
-	.remove		= advansys_isa_remove,
-	.driver = {
-		.owner	= THIS_MODULE,
-		.name	= DRV_NAME,
-	},
-};
-
 /*
  * The VLB IRQ number is found in bits 2 to 4 of the CfgLsw.  It decodes as:
  * 000: invalid
@@ -11507,7 +11282,7 @@ static int advansys_vlb_probe(struct device *dev, unsigned int id)
 
 static struct isa_driver advansys_vlb_driver = {
 	.probe		= advansys_vlb_probe,
-	.remove		= advansys_isa_remove,
+	.remove		= advansys_vlb_remove,
 	.driver = {
 		.owner	= THIS_MODULE,
 		.name	= "advansys_vlb",
@@ -11757,15 +11532,10 @@ static int __init advansys_init(void)
 {
 	int error;
 
-	error = isa_register_driver(&advansys_isa_driver,
-				    ASC_IOADR_TABLE_MAX_IX);
-	if (error)
-		goto fail;
-
 	error = isa_register_driver(&advansys_vlb_driver,
 				    ASC_IOADR_TABLE_MAX_IX);
 	if (error)
-		goto unregister_isa;
+		goto fail;
 
 	error = eisa_driver_register(&advansys_eisa_driver);
 	if (error)
@@ -11781,8 +11551,6 @@ static int __init advansys_init(void)
 	eisa_driver_unregister(&advansys_eisa_driver);
  unregister_vlb:
 	isa_unregister_driver(&advansys_vlb_driver);
- unregister_isa:
-	isa_unregister_driver(&advansys_isa_driver);
  fail:
 	return error;
 }
@@ -11792,7 +11560,6 @@ static void __exit advansys_exit(void)
 	pci_unregister_driver(&advansys_pci_driver);
 	eisa_driver_unregister(&advansys_eisa_driver);
 	isa_unregister_driver(&advansys_vlb_driver);
-	isa_unregister_driver(&advansys_isa_driver);
 }
 
 module_init(advansys_init);
-- 
2.30.1


^ permalink raw reply related	[flat|nested] 33+ messages in thread

* [PATCH 5/8] scsi: remove the unchecked_isa_dma flag
  2021-03-26  5:58 start removing block bounce buffering support v2 Christoph Hellwig
                   ` (3 preceding siblings ...)
  2021-03-26  5:58 ` [PATCH 4/8] advansys: remove ISA support Christoph Hellwig
@ 2021-03-26  5:58 ` Christoph Hellwig
  2021-03-29  6:31   ` Hannes Reinecke
  2021-03-26  5:58 ` [PATCH 6/8] block: remove BLK_BOUNCE_ISA support Christoph Hellwig
                   ` (3 subsequent siblings)
  8 siblings, 1 reply; 33+ messages in thread
From: Christoph Hellwig @ 2021-03-26  5:58 UTC (permalink / raw)
  To: Jens Axboe, Khalid Aziz, Martin K. Petersen, Matthew Wilcox,
	Hannes Reinecke, Ondrej Zary
  Cc: linux-block, linux-scsi

Remove the unchecked_isa_dma now that all users are gone.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 Documentation/scsi/scsi_mid_low_api.rst |  4 --
 drivers/scsi/esas2r/esas2r_main.c       |  1 -
 drivers/scsi/hosts.c                    |  7 +---
 drivers/scsi/scsi_debugfs.c             |  1 -
 drivers/scsi/scsi_lib.c                 | 52 +++----------------------
 drivers/scsi/scsi_scan.c                |  6 +--
 drivers/scsi/scsi_sysfs.c               |  2 -
 drivers/scsi/sg.c                       | 10 +----
 drivers/scsi/sr_ioctl.c                 | 12 ++----
 drivers/scsi/st.c                       | 20 ++++------
 drivers/scsi/st.h                       |  2 -
 include/scsi/scsi_cmnd.h                |  7 ++--
 include/scsi/scsi_host.h                |  6 ---
 13 files changed, 25 insertions(+), 105 deletions(-)

diff --git a/Documentation/scsi/scsi_mid_low_api.rst b/Documentation/scsi/scsi_mid_low_api.rst
index 5bc17d012b2560..096ffe9cae0e04 100644
--- a/Documentation/scsi/scsi_mid_low_api.rst
+++ b/Documentation/scsi/scsi_mid_low_api.rst
@@ -1095,10 +1095,6 @@ of interest:
 		 - maximum number of commands that can be queued on devices
                    controlled by the host. Overridden by LLD calls to
                    scsi_change_queue_depth().
-    unchecked_isa_dma
-		 - 1=>only use bottom 16 MB of ram (ISA DMA addressing
-                   restriction), 0=>can use full 32 bit (or better) DMA
-                   address space
     no_async_abort
 		 - 1=>Asynchronous aborts are not supported
 		 - 0=>Timed-out commands will be aborted asynchronously
diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c
index a9dd6345f064c9..5d9eeac6717abd 100644
--- a/drivers/scsi/esas2r/esas2r_main.c
+++ b/drivers/scsi/esas2r/esas2r_main.c
@@ -249,7 +249,6 @@ static struct scsi_host_template driver_template = {
 	.cmd_per_lun			=
 		ESAS2R_DEFAULT_CMD_PER_LUN,
 	.present			= 0,
-	.unchecked_isa_dma		= 0,
 	.emulated			= 0,
 	.proc_name			= ESAS2R_DRVR_NAME,
 	.change_queue_depth		= scsi_change_queue_depth,
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 2f162603876f9e..697c09ef259b3f 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -371,13 +371,9 @@ static struct device_type scsi_host_type = {
 struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
 {
 	struct Scsi_Host *shost;
-	gfp_t gfp_mask = GFP_KERNEL;
 	int index;
 
-	if (sht->unchecked_isa_dma && privsize)
-		gfp_mask |= __GFP_DMA;
-
-	shost = kzalloc(sizeof(struct Scsi_Host) + privsize, gfp_mask);
+	shost = kzalloc(sizeof(struct Scsi_Host) + privsize, GFP_KERNEL);
 	if (!shost)
 		return NULL;
 
@@ -419,7 +415,6 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
 	shost->sg_tablesize = sht->sg_tablesize;
 	shost->sg_prot_tablesize = sht->sg_prot_tablesize;
 	shost->cmd_per_lun = sht->cmd_per_lun;
-	shost->unchecked_isa_dma = sht->unchecked_isa_dma;
 	shost->no_write_same = sht->no_write_same;
 	shost->host_tagset = sht->host_tagset;
 
diff --git a/drivers/scsi/scsi_debugfs.c b/drivers/scsi/scsi_debugfs.c
index c19ea7ab54cbd2..d9109771f274d1 100644
--- a/drivers/scsi/scsi_debugfs.c
+++ b/drivers/scsi/scsi_debugfs.c
@@ -8,7 +8,6 @@
 #define SCSI_CMD_FLAG_NAME(name)[const_ilog2(SCMD_##name)] = #name
 static const char *const scsi_cmd_flags[] = {
 	SCSI_CMD_FLAG_NAME(TAGGED),
-	SCSI_CMD_FLAG_NAME(UNCHECKED_ISA_DMA),
 	SCSI_CMD_FLAG_NAME(INITIALIZED),
 };
 #undef SCSI_CMD_FLAG_NAME
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 7d52a11e1b6115..c289991ffaed2f 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -53,49 +53,16 @@
 #endif
 
 static struct kmem_cache *scsi_sense_cache;
-static struct kmem_cache *scsi_sense_isadma_cache;
 static DEFINE_MUTEX(scsi_sense_cache_mutex);
 
 static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd);
 
-static inline struct kmem_cache *
-scsi_select_sense_cache(bool unchecked_isa_dma)
-{
-	return unchecked_isa_dma ? scsi_sense_isadma_cache : scsi_sense_cache;
-}
-
-static void scsi_free_sense_buffer(bool unchecked_isa_dma,
-				   unsigned char *sense_buffer)
-{
-	kmem_cache_free(scsi_select_sense_cache(unchecked_isa_dma),
-			sense_buffer);
-}
-
-static unsigned char *scsi_alloc_sense_buffer(bool unchecked_isa_dma,
-	gfp_t gfp_mask, int numa_node)
-{
-	return kmem_cache_alloc_node(scsi_select_sense_cache(unchecked_isa_dma),
-				     gfp_mask, numa_node);
-}
-
 int scsi_init_sense_cache(struct Scsi_Host *shost)
 {
-	struct kmem_cache *cache;
 	int ret = 0;
 
 	mutex_lock(&scsi_sense_cache_mutex);
-	cache = scsi_select_sense_cache(shost->unchecked_isa_dma);
-	if (cache)
-		goto exit;
-
-	if (shost->unchecked_isa_dma) {
-		scsi_sense_isadma_cache =
-			kmem_cache_create("scsi_sense_cache(DMA)",
-				SCSI_SENSE_BUFFERSIZE, 0,
-				SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA, NULL);
-		if (!scsi_sense_isadma_cache)
-			ret = -ENOMEM;
-	} else {
+	if (!scsi_sense_cache) {
 		scsi_sense_cache =
 			kmem_cache_create_usercopy("scsi_sense_cache",
 				SCSI_SENSE_BUFFERSIZE, 0, SLAB_HWCACHE_ALIGN,
@@ -103,7 +70,6 @@ int scsi_init_sense_cache(struct Scsi_Host *shost)
 		if (!scsi_sense_cache)
 			ret = -ENOMEM;
 	}
- exit:
 	mutex_unlock(&scsi_sense_cache_mutex);
 	return ret;
 }
@@ -1748,15 +1714,12 @@ static int scsi_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
 				unsigned int hctx_idx, unsigned int numa_node)
 {
 	struct Scsi_Host *shost = set->driver_data;
-	const bool unchecked_isa_dma = shost->unchecked_isa_dma;
 	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
 	struct scatterlist *sg;
 	int ret = 0;
 
-	if (unchecked_isa_dma)
-		cmd->flags |= SCMD_UNCHECKED_ISA_DMA;
-	cmd->sense_buffer = scsi_alloc_sense_buffer(unchecked_isa_dma,
-						    GFP_KERNEL, numa_node);
+	cmd->sense_buffer =
+		kmem_cache_alloc_node(scsi_sense_cache, GFP_KERNEL, numa_node);
 	if (!cmd->sense_buffer)
 		return -ENOMEM;
 	cmd->req.sense = cmd->sense_buffer;
@@ -1770,8 +1733,7 @@ static int scsi_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
 	if (shost->hostt->init_cmd_priv) {
 		ret = shost->hostt->init_cmd_priv(shost, cmd);
 		if (ret < 0)
-			scsi_free_sense_buffer(unchecked_isa_dma,
-					       cmd->sense_buffer);
+			kmem_cache_free(scsi_sense_cache, cmd->sense_buffer);
 	}
 
 	return ret;
@@ -1785,8 +1747,7 @@ static void scsi_mq_exit_request(struct blk_mq_tag_set *set, struct request *rq,
 
 	if (shost->hostt->exit_cmd_priv)
 		shost->hostt->exit_cmd_priv(shost, cmd);
-	scsi_free_sense_buffer(cmd->flags & SCMD_UNCHECKED_ISA_DMA,
-			       cmd->sense_buffer);
+	kmem_cache_free(scsi_sense_cache, cmd->sense_buffer);
 }
 
 static int scsi_map_queues(struct blk_mq_tag_set *set)
@@ -1821,8 +1782,6 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
 				dma_max_mapping_size(dev) >> SECTOR_SHIFT);
 	}
 	blk_queue_max_hw_sectors(q, shost->max_sectors);
-	if (shost->unchecked_isa_dma)
-		blk_queue_bounce_limit(q, BLK_BOUNCE_ISA);
 	blk_queue_segment_boundary(q, shost->dma_boundary);
 	dma_set_seg_boundary(dev, shost->dma_boundary);
 
@@ -1988,7 +1947,6 @@ EXPORT_SYMBOL(scsi_unblock_requests);
 void scsi_exit_queue(void)
 {
 	kmem_cache_destroy(scsi_sense_cache);
-	kmem_cache_destroy(scsi_sense_isadma_cache);
 }
 
 /**
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 9af50e6f94c4c4..9b73aa506382ea 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -1078,8 +1078,7 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
 	if (!sdev)
 		goto out;
 
-	result = kmalloc(result_len, GFP_KERNEL |
-			((shost->unchecked_isa_dma) ? __GFP_DMA : 0));
+	result = kmalloc(result_len, GFP_KERNEL);
 	if (!result)
 		goto out_free_sdev;
 
@@ -1336,8 +1335,7 @@ static int scsi_report_lun_scan(struct scsi_target *starget, blist_flags_t bflag
 	 */
 	length = (511 + 1) * sizeof(struct scsi_lun);
 retry:
-	lun_data = kmalloc(length, GFP_KERNEL |
-			   (sdev->host->unchecked_isa_dma ? __GFP_DMA : 0));
+	lun_data = kmalloc(length, GFP_KERNEL);
 	if (!lun_data) {
 		printk(ALLOC_FAILURE_MSG, __func__);
 		goto out;
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index b6378c8ca783ea..b71ea1a69c8b60 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -373,7 +373,6 @@ shost_rd_attr(cmd_per_lun, "%hd\n");
 shost_rd_attr(can_queue, "%d\n");
 shost_rd_attr(sg_tablesize, "%hu\n");
 shost_rd_attr(sg_prot_tablesize, "%hu\n");
-shost_rd_attr(unchecked_isa_dma, "%d\n");
 shost_rd_attr(prot_capabilities, "%u\n");
 shost_rd_attr(prot_guard_type, "%hd\n");
 shost_rd_attr2(proc_name, hostt->proc_name, "%s\n");
@@ -411,7 +410,6 @@ static struct attribute *scsi_sysfs_shost_attrs[] = {
 	&dev_attr_can_queue.attr,
 	&dev_attr_sg_tablesize.attr,
 	&dev_attr_sg_prot_tablesize.attr,
-	&dev_attr_unchecked_isa_dma.attr,
 	&dev_attr_proc_name.attr,
 	&dev_attr_scan.attr,
 	&dev_attr_hstate.attr,
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 4383d93110f835..70f38715641eec 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -974,7 +974,7 @@ sg_ioctl_common(struct file *filp, Sg_device *sdp, Sg_fd *sfp,
 		 */
 		return 0;
 	case SG_GET_LOW_DMA:
-		return put_user((int) sdp->device->host->unchecked_isa_dma, ip);
+		return put_user(0, ip);
 	case SG_GET_SCSI_ID:
 		{
 			sg_scsi_id_t v;
@@ -1777,7 +1777,6 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
 
 	if (sg_allow_dio && hp->flags & SG_FLAG_DIRECT_IO &&
 	    dxfer_dir != SG_DXFER_UNKNOWN && !iov_count &&
-	    !sfp->parentdp->device->host->unchecked_isa_dma &&
 	    blk_rq_aligned(q, (unsigned long)hp->dxferp, dxfer_len))
 		md = NULL;
 	else
@@ -1893,7 +1892,6 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
 	int sg_tablesize = sfp->parentdp->sg_tablesize;
 	int blk_size = buff_size, order;
 	gfp_t gfp_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN | __GFP_ZERO;
-	struct sg_device *sdp = sfp->parentdp;
 
 	if (blk_size < 0)
 		return -EFAULT;
@@ -1919,9 +1917,6 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
 			scatter_elem_sz_prev = num;
 	}
 
-	if (sdp->device->host->unchecked_isa_dma)
-		gfp_mask |= GFP_DMA;
-
 	order = get_order(num);
 retry:
 	ret_sz = 1 << (PAGE_SHIFT + order);
@@ -2547,8 +2542,7 @@ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
 			   "(res)sgat=%d low_dma=%d\n", k,
 			   jiffies_to_msecs(fp->timeout),
 			   fp->reserve.bufflen,
-			   (int) fp->reserve.k_use_sg,
-			   (int) sdp->device->host->unchecked_isa_dma);
+			   (int) fp->reserve.k_use_sg, 0);
 		seq_printf(s, "   cmd_q=%d f_packid=%d k_orphan=%d closed=0\n",
 			   (int) fp->cmd_q, (int) fp->force_packid,
 			   (int) fp->keep_orphan);
diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c
index 5703f8400b73ca..15c305283b6cc1 100644
--- a/drivers/scsi/sr_ioctl.c
+++ b/drivers/scsi/sr_ioctl.c
@@ -33,10 +33,6 @@ static int xa_test = 0;
 
 module_param(xa_test, int, S_IRUGO | S_IWUSR);
 
-/* primitive to determine whether we need to have GFP_DMA set based on
- * the status of the unchecked_isa_dma flag in the host structure */
-#define SR_GFP_DMA(cd) (((cd)->device->host->unchecked_isa_dma) ? GFP_DMA : 0)
-
 static int sr_read_tochdr(struct cdrom_device_info *cdi,
 		struct cdrom_tochdr *tochdr)
 {
@@ -45,7 +41,7 @@ static int sr_read_tochdr(struct cdrom_device_info *cdi,
 	int result;
 	unsigned char *buffer;
 
-	buffer = kmalloc(32, GFP_KERNEL | SR_GFP_DMA(cd));
+	buffer = kmalloc(32, GFP_KERNEL);
 	if (!buffer)
 		return -ENOMEM;
 
@@ -75,7 +71,7 @@ static int sr_read_tocentry(struct cdrom_device_info *cdi,
 	int result;
 	unsigned char *buffer;
 
-	buffer = kmalloc(32, GFP_KERNEL | SR_GFP_DMA(cd));
+	buffer = kmalloc(32, GFP_KERNEL);
 	if (!buffer)
 		return -ENOMEM;
 
@@ -384,7 +380,7 @@ int sr_get_mcn(struct cdrom_device_info *cdi, struct cdrom_mcn *mcn)
 {
 	Scsi_CD *cd = cdi->handle;
 	struct packet_command cgc;
-	char *buffer = kmalloc(32, GFP_KERNEL | SR_GFP_DMA(cd));
+	char *buffer = kmalloc(32, GFP_KERNEL);
 	int result;
 
 	if (!buffer)
@@ -567,7 +563,7 @@ int sr_is_xa(Scsi_CD *cd)
 	if (!xa_test)
 		return 0;
 
-	raw_sector = kmalloc(2048, GFP_KERNEL | SR_GFP_DMA(cd));
+	raw_sector = kmalloc(2048, GFP_KERNEL);
 	if (!raw_sector)
 		return -ENOMEM;
 	if (0 == sr_read_sector(cd, cd->ms_offset + 16,
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 9ca536aae78491..3b1afe1d5b2708 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -188,7 +188,7 @@ static int st_max_sg_segs = ST_MAX_SG;
 
 static int modes_defined;
 
-static int enlarge_buffer(struct st_buffer *, int, int);
+static int enlarge_buffer(struct st_buffer *, int);
 static void clear_buffer(struct st_buffer *);
 static void normalize_buffer(struct st_buffer *);
 static int append_to_buffer(const char __user *, struct st_buffer *, int);
@@ -1289,7 +1289,7 @@ static int st_open(struct inode *inode, struct file *filp)
 	}
 
 	/* See that we have at least a one page buffer available */
-	if (!enlarge_buffer(STp->buffer, PAGE_SIZE, STp->restr_dma)) {
+	if (!enlarge_buffer(STp->buffer, PAGE_SIZE)) {
 		st_printk(KERN_WARNING, STp,
 			  "Can't allocate one page tape buffer.\n");
 		retval = (-EOVERFLOW);
@@ -1586,7 +1586,7 @@ static int setup_buffering(struct scsi_tape *STp, const char __user *buf,
 		}
 
 		if (bufsize > STbp->buffer_size &&
-		    !enlarge_buffer(STbp, bufsize, STp->restr_dma)) {
+		    !enlarge_buffer(STbp, bufsize)) {
 			st_printk(KERN_WARNING, STp,
 				  "Can't allocate %d byte tape buffer.\n",
 				  bufsize);
@@ -3894,7 +3894,7 @@ static long st_compat_ioctl(struct file *file, unsigned int cmd_in, unsigned lon
 
 /* Try to allocate a new tape buffer. Calling function must not hold
    dev_arr_lock. */
-static struct st_buffer *new_tape_buffer(int need_dma, int max_sg)
+static struct st_buffer *new_tape_buffer(int max_sg)
 {
 	struct st_buffer *tb;
 
@@ -3905,7 +3905,6 @@ static struct st_buffer *new_tape_buffer(int need_dma, int max_sg)
 	}
 	tb->frp_segs = 0;
 	tb->use_sg = max_sg;
-	tb->dma = need_dma;
 	tb->buffer_size = 0;
 
 	tb->reserved_pages = kcalloc(max_sg, sizeof(struct page *),
@@ -3922,7 +3921,7 @@ static struct st_buffer *new_tape_buffer(int need_dma, int max_sg)
 /* Try to allocate enough space in the tape buffer */
 #define ST_MAX_ORDER 6
 
-static int enlarge_buffer(struct st_buffer * STbuffer, int new_size, int need_dma)
+static int enlarge_buffer(struct st_buffer * STbuffer, int new_size)
 {
 	int segs, max_segs, b_size, order, got;
 	gfp_t priority;
@@ -3936,8 +3935,6 @@ static int enlarge_buffer(struct st_buffer * STbuffer, int new_size, int need_dm
 	max_segs = STbuffer->use_sg;
 
 	priority = GFP_KERNEL | __GFP_NOWARN;
-	if (need_dma)
-		priority |= GFP_DMA;
 
 	if (STbuffer->cleared)
 		priority |= __GFP_ZERO;
@@ -3957,7 +3954,7 @@ static int enlarge_buffer(struct st_buffer * STbuffer, int new_size, int need_dm
 		if (order == ST_MAX_ORDER)
 			return 0;
 		normalize_buffer(STbuffer);
-		return enlarge_buffer(STbuffer, new_size, need_dma);
+		return enlarge_buffer(STbuffer, new_size);
 	}
 
 	for (segs = STbuffer->frp_segs, got = STbuffer->buffer_size;
@@ -4296,7 +4293,7 @@ static int st_probe(struct device *dev)
 	i = queue_max_segments(SDp->request_queue);
 	if (st_max_sg_segs < i)
 		i = st_max_sg_segs;
-	buffer = new_tape_buffer((SDp->host)->unchecked_isa_dma, i);
+	buffer = new_tape_buffer(i);
 	if (buffer == NULL) {
 		sdev_printk(KERN_ERR, SDp,
 			    "st: Can't allocate new tape buffer. "
@@ -4340,7 +4337,6 @@ static int st_probe(struct device *dev)
 	tpnt->dirty = 0;
 	tpnt->in_use = 0;
 	tpnt->drv_buffer = 1;	/* Try buffering if no mode sense */
-	tpnt->restr_dma = (SDp->host)->unchecked_isa_dma;
 	tpnt->use_pf = (SDp->scsi_level >= SCSI_2);
 	tpnt->density = 0;
 	tpnt->do_auto_lock = ST_AUTO_LOCK;
@@ -4358,7 +4354,7 @@ static int st_probe(struct device *dev)
 	tpnt->nbr_partitions = 0;
 	blk_queue_rq_timeout(tpnt->device->request_queue, ST_TIMEOUT);
 	tpnt->long_timeout = ST_LONG_TIMEOUT;
-	tpnt->try_dio = try_direct_io && !SDp->host->unchecked_isa_dma;
+	tpnt->try_dio = try_direct_io;
 
 	for (i = 0; i < ST_NBR_MODES; i++) {
 		STm = &(tpnt->modes[i]);
diff --git a/drivers/scsi/st.h b/drivers/scsi/st.h
index 95d2e7a7988dea..9d3c38bb0794ab 100644
--- a/drivers/scsi/st.h
+++ b/drivers/scsi/st.h
@@ -35,7 +35,6 @@ struct st_request {
 
 /* The tape buffer descriptor. */
 struct st_buffer {
-	unsigned char dma;	/* DMA-able buffer */
 	unsigned char cleared;  /* internal buffer cleared after open? */
 	unsigned short do_dio;  /* direct i/o set up? */
 	int buffer_size;
@@ -133,7 +132,6 @@ struct scsi_tape {
 	unsigned char two_fm;
 	unsigned char fast_mteom;
 	unsigned char immediate;
-	unsigned char restr_dma;
 	unsigned char scsi2_logical;
 	unsigned char default_drvbuffer;	/* 0xff = don't touch, value 3 bits */
 	unsigned char cln_mode;			/* 0 = none, otherwise sense byte nbr */
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index ace15b5dc956da..0fd17a5344bd3c 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -55,11 +55,10 @@ struct scsi_pointer {
 
 /* for scmd->flags */
 #define SCMD_TAGGED		(1 << 0)
-#define SCMD_UNCHECKED_ISA_DMA	(1 << 1)
-#define SCMD_INITIALIZED	(1 << 2)
-#define SCMD_LAST		(1 << 3)
+#define SCMD_INITIALIZED	(1 << 1)
+#define SCMD_LAST		(1 << 2)
 /* flags preserved across unprep / reprep */
-#define SCMD_PRESERVED_FLAGS	(SCMD_UNCHECKED_ISA_DMA | SCMD_INITIALIZED)
+#define SCMD_PRESERVED_FLAGS	(SCMD_INITIALIZED)
 
 /* for scmd->state */
 #define SCMD_STATE_COMPLETE	0
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index e30fd963b97d0c..8343c6f9fec19e 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -424,11 +424,6 @@ struct scsi_host_template {
 	 */
 	unsigned supported_mode:2;
 
-	/*
-	 * True if this host adapter uses unchecked DMA onto an ISA bus.
-	 */
-	unsigned unchecked_isa_dma:1;
-
 	/*
 	 * True for emulated SCSI host adapters (e.g. ATAPI).
 	 */
@@ -617,7 +612,6 @@ struct Scsi_Host {
 	 */
 	unsigned nr_hw_queues;
 	unsigned active_mode:2;
-	unsigned unchecked_isa_dma:1;
 
 	/*
 	 * Host has requested that no further requests come through for the
-- 
2.30.1


^ permalink raw reply related	[flat|nested] 33+ messages in thread

* [PATCH 6/8] block: remove BLK_BOUNCE_ISA support
  2021-03-26  5:58 start removing block bounce buffering support v2 Christoph Hellwig
                   ` (4 preceding siblings ...)
  2021-03-26  5:58 ` [PATCH 5/8] scsi: remove the unchecked_isa_dma flag Christoph Hellwig
@ 2021-03-26  5:58 ` Christoph Hellwig
  2021-03-29  6:32   ` Hannes Reinecke
  2021-03-26  5:58 ` [PATCH 7/8] block: refactor the bounce buffering code Christoph Hellwig
                   ` (2 subsequent siblings)
  8 siblings, 1 reply; 33+ messages in thread
From: Christoph Hellwig @ 2021-03-26  5:58 UTC (permalink / raw)
  To: Jens Axboe, Khalid Aziz, Martin K. Petersen, Matthew Wilcox,
	Hannes Reinecke, Ondrej Zary
  Cc: linux-block, linux-scsi

Remove the BLK_BOUNCE_ISA support now that all users are gone.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 block/bio-integrity.c     |   3 +-
 block/blk-map.c           |   4 +-
 block/blk-settings.c      |  11 ----
 block/blk.h               |   5 --
 block/bounce.c            | 124 ++++++++------------------------------
 block/scsi_ioctl.c        |   2 +-
 drivers/ata/libata-scsi.c |   3 +-
 include/linux/blkdev.h    |   7 ---
 mm/Kconfig                |   9 ++-
 9 files changed, 35 insertions(+), 133 deletions(-)

diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index dfa652122a2dc8..4b4eb8964a6f98 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -204,7 +204,6 @@ bool bio_integrity_prep(struct bio *bio)
 {
 	struct bio_integrity_payload *bip;
 	struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
-	struct request_queue *q = bio->bi_bdev->bd_disk->queue;
 	void *buf;
 	unsigned long start, end;
 	unsigned int len, nr_pages;
@@ -238,7 +237,7 @@ bool bio_integrity_prep(struct bio *bio)
 
 	/* Allocate kernel buffer for protection data */
 	len = intervals * bi->tuple_size;
-	buf = kmalloc(len, GFP_NOIO | q->bounce_gfp);
+	buf = kmalloc(len, GFP_NOIO);
 	status = BLK_STS_RESOURCE;
 	if (unlikely(buf == NULL)) {
 		printk(KERN_ERR "could not allocate integrity buffer\n");
diff --git a/block/blk-map.c b/block/blk-map.c
index 1ffef782fcf2dd..b62b52dcb61d97 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -181,7 +181,7 @@ static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data,
 
 			i++;
 		} else {
-			page = alloc_page(rq->q->bounce_gfp | gfp_mask);
+			page = alloc_page(GFP_NOIO | gfp_mask);
 			if (!page) {
 				ret = -ENOMEM;
 				goto cleanup;
@@ -486,7 +486,7 @@ static struct bio *bio_copy_kern(struct request_queue *q, void *data,
 		if (bytes > len)
 			bytes = len;
 
-		page = alloc_page(q->bounce_gfp | gfp_mask);
+		page = alloc_page(GFP_NOIO | gfp_mask);
 		if (!page)
 			goto cleanup;
 
diff --git a/block/blk-settings.c b/block/blk-settings.c
index b4aa2f37fab6f5..f9937dd2810e25 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -103,28 +103,17 @@ EXPORT_SYMBOL(blk_set_stacking_limits);
 void blk_queue_bounce_limit(struct request_queue *q, u64 max_addr)
 {
 	unsigned long b_pfn = max_addr >> PAGE_SHIFT;
-	int dma = 0;
 
-	q->bounce_gfp = GFP_NOIO;
 #if BITS_PER_LONG == 64
 	/*
 	 * Assume anything <= 4GB can be handled by IOMMU.  Actually
 	 * some IOMMUs can handle everything, but I don't know of a
 	 * way to test this here.
 	 */
-	if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
-		dma = 1;
 	q->limits.bounce_pfn = max(max_low_pfn, b_pfn);
 #else
-	if (b_pfn < blk_max_low_pfn)
-		dma = 1;
 	q->limits.bounce_pfn = b_pfn;
 #endif
-	if (dma) {
-		init_emergency_isa_pool();
-		q->bounce_gfp = GFP_NOIO | GFP_DMA;
-		q->limits.bounce_pfn = b_pfn;
-	}
 }
 EXPORT_SYMBOL(blk_queue_bounce_limit);
 
diff --git a/block/blk.h b/block/blk.h
index 3b53e44b967e4e..895c9f4a5182a7 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -312,13 +312,8 @@ static inline void blk_throtl_stat_add(struct request *rq, u64 time) { }
 #endif
 
 #ifdef CONFIG_BOUNCE
-extern int init_emergency_isa_pool(void);
 extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
 #else
-static inline int init_emergency_isa_pool(void)
-{
-	return 0;
-}
 static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
 {
 }
diff --git a/block/bounce.c b/block/bounce.c
index 6c441f4f1cd4aa..debd5b0bd31890 100644
--- a/block/bounce.c
+++ b/block/bounce.c
@@ -29,7 +29,7 @@
 #define ISA_POOL_SIZE	16
 
 static struct bio_set bounce_bio_set, bounce_bio_split;
-static mempool_t page_pool, isa_page_pool;
+static mempool_t page_pool;
 
 static void init_bounce_bioset(void)
 {
@@ -89,41 +89,6 @@ static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom)
 
 #endif /* CONFIG_HIGHMEM */
 
-/*
- * allocate pages in the DMA region for the ISA pool
- */
-static void *mempool_alloc_pages_isa(gfp_t gfp_mask, void *data)
-{
-	return mempool_alloc_pages(gfp_mask | GFP_DMA, data);
-}
-
-static DEFINE_MUTEX(isa_mutex);
-
-/*
- * gets called "every" time someone init's a queue with BLK_BOUNCE_ISA
- * as the max address, so check if the pool has already been created.
- */
-int init_emergency_isa_pool(void)
-{
-	int ret;
-
-	mutex_lock(&isa_mutex);
-
-	if (mempool_initialized(&isa_page_pool)) {
-		mutex_unlock(&isa_mutex);
-		return 0;
-	}
-
-	ret = mempool_init(&isa_page_pool, ISA_POOL_SIZE, mempool_alloc_pages_isa,
-			   mempool_free_pages, (void *) 0);
-	BUG_ON(ret);
-
-	pr_info("isa pool size: %d pages\n", ISA_POOL_SIZE);
-	init_bounce_bioset();
-	mutex_unlock(&isa_mutex);
-	return 0;
-}
-
 /*
  * Simple bounce buffer support for highmem pages. Depending on the
  * queue gfp mask set, *to may or may not be a highmem page. kmap it
@@ -159,7 +124,7 @@ static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
 	}
 }
 
-static void bounce_end_io(struct bio *bio, mempool_t *pool)
+static void bounce_end_io(struct bio *bio)
 {
 	struct bio *bio_orig = bio->bi_private;
 	struct bio_vec *bvec, orig_vec;
@@ -173,7 +138,7 @@ static void bounce_end_io(struct bio *bio, mempool_t *pool)
 		orig_vec = bio_iter_iovec(bio_orig, orig_iter);
 		if (bvec->bv_page != orig_vec.bv_page) {
 			dec_zone_page_state(bvec->bv_page, NR_BOUNCE);
-			mempool_free(bvec->bv_page, pool);
+			mempool_free(bvec->bv_page, &page_pool);
 		}
 		bio_advance_iter(bio_orig, &orig_iter, orig_vec.bv_len);
 	}
@@ -185,33 +150,17 @@ static void bounce_end_io(struct bio *bio, mempool_t *pool)
 
 static void bounce_end_io_write(struct bio *bio)
 {
-	bounce_end_io(bio, &page_pool);
-}
-
-static void bounce_end_io_write_isa(struct bio *bio)
-{
-
-	bounce_end_io(bio, &isa_page_pool);
+	bounce_end_io(bio);
 }
 
-static void __bounce_end_io_read(struct bio *bio, mempool_t *pool)
+static void bounce_end_io_read(struct bio *bio)
 {
 	struct bio *bio_orig = bio->bi_private;
 
 	if (!bio->bi_status)
 		copy_to_high_bio_irq(bio_orig, bio);
 
-	bounce_end_io(bio, pool);
-}
-
-static void bounce_end_io_read(struct bio *bio)
-{
-	__bounce_end_io_read(bio, &page_pool);
-}
-
-static void bounce_end_io_read_isa(struct bio *bio)
-{
-	__bounce_end_io_read(bio, &isa_page_pool);
+	bounce_end_io(bio);
 }
 
 static struct bio *bounce_clone_bio(struct bio *bio_src)
@@ -287,8 +236,8 @@ static struct bio *bounce_clone_bio(struct bio *bio_src)
 	return NULL;
 }
 
-static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
-			       mempool_t *pool)
+
+void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
 {
 	struct bio *bio;
 	int rw = bio_data_dir(*bio_orig);
@@ -298,6 +247,20 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
 	bool bounce = false;
 	int sectors = 0;
 
+	/*
+	 * Data-less bio, nothing to bounce
+	 */
+	if (!bio_has_data(*bio_orig))
+		return;
+
+	/*
+	 * Just check if the bounce pfn is equal to or bigger than the highest
+	 * pfn in the system -- in that case, don't waste time iterating over
+	 * bio segments
+	 */
+	if (q->limits.bounce_pfn >= blk_max_pfn)
+		return;
+
 	bio_for_each_segment(from, *bio_orig, iter) {
 		if (i++ < BIO_MAX_VECS)
 			sectors += from.bv_len >> 9;
@@ -327,7 +290,7 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
 		if (page_to_pfn(page) <= q->limits.bounce_pfn)
 			continue;
 
-		to->bv_page = mempool_alloc(pool, q->bounce_gfp);
+		to->bv_page = mempool_alloc(&page_pool, GFP_NOIO);
 		inc_zone_page_state(to->bv_page, NR_BOUNCE);
 
 		if (rw == WRITE) {
@@ -346,46 +309,11 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
 
 	bio->bi_flags |= (1 << BIO_BOUNCED);
 
-	if (pool == &page_pool) {
+	if (rw == READ)
+		bio->bi_end_io = bounce_end_io_read;
+	else
 		bio->bi_end_io = bounce_end_io_write;
-		if (rw == READ)
-			bio->bi_end_io = bounce_end_io_read;
-	} else {
-		bio->bi_end_io = bounce_end_io_write_isa;
-		if (rw == READ)
-			bio->bi_end_io = bounce_end_io_read_isa;
-	}
 
 	bio->bi_private = *bio_orig;
 	*bio_orig = bio;
 }
-
-void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
-{
-	mempool_t *pool;
-
-	/*
-	 * Data-less bio, nothing to bounce
-	 */
-	if (!bio_has_data(*bio_orig))
-		return;
-
-	/*
-	 * for non-isa bounce case, just check if the bounce pfn is equal
-	 * to or bigger than the highest pfn in the system -- in that case,
-	 * don't waste time iterating over bio segments
-	 */
-	if (!(q->bounce_gfp & GFP_DMA)) {
-		if (q->limits.bounce_pfn >= blk_max_pfn)
-			return;
-		pool = &page_pool;
-	} else {
-		BUG_ON(!mempool_initialized(&isa_page_pool));
-		pool = &isa_page_pool;
-	}
-
-	/*
-	 * slow path
-	 */
-	__blk_queue_bounce(q, bio_orig, pool);
-}
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index 6599bac0a78cb0..1048b09255678c 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -431,7 +431,7 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
 
 	bytes = max(in_len, out_len);
 	if (bytes) {
-		buffer = kzalloc(bytes, q->bounce_gfp | GFP_USER| __GFP_NOWARN);
+		buffer = kzalloc(bytes, GFP_NOIO | GFP_USER | __GFP_NOWARN);
 		if (!buffer)
 			return -ENOMEM;
 
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 48b8934970f36a..fd8b6febbf70c4 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1043,8 +1043,7 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
 		blk_queue_max_segments(q, queue_max_segments(q) - 1);
 
 		sdev->dma_drain_len = ATAPI_MAX_DRAIN;
-		sdev->dma_drain_buf = kmalloc(sdev->dma_drain_len,
-				q->bounce_gfp | GFP_KERNEL);
+		sdev->dma_drain_buf = kmalloc(sdev->dma_drain_len, GFP_NOIO);
 		if (!sdev->dma_drain_buf) {
 			ata_dev_err(dev, "drain buffer allocation failed\n");
 			return -ENOMEM;
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index bc6bc8383b434e..0dbb72ea373529 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -436,11 +436,6 @@ struct request_queue {
 	 */
 	int			id;
 
-	/*
-	 * queue needs bounce pages for pages above this limit
-	 */
-	gfp_t			bounce_gfp;
-
 	spinlock_t		queue_lock;
 
 	/*
@@ -847,7 +842,6 @@ extern unsigned long blk_max_low_pfn, blk_max_pfn;
  *
  * BLK_BOUNCE_HIGH	: bounce all highmem pages
  * BLK_BOUNCE_ANY	: don't bounce anything
- * BLK_BOUNCE_ISA	: bounce pages above ISA DMA boundary
  */
 
 #if BITS_PER_LONG == 32
@@ -856,7 +850,6 @@ extern unsigned long blk_max_low_pfn, blk_max_pfn;
 #define BLK_BOUNCE_HIGH		-1ULL
 #endif
 #define BLK_BOUNCE_ANY		(-1ULL)
-#define BLK_BOUNCE_ISA		(DMA_BIT_MASK(24))
 
 /*
  * default timeout for SG_IO if none specified
diff --git a/mm/Kconfig b/mm/Kconfig
index 24c045b24b9506..d0808a23e54bc8 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -283,12 +283,11 @@ config PHYS_ADDR_T_64BIT
 config BOUNCE
 	bool "Enable bounce buffers"
 	default y
-	depends on BLOCK && MMU && (ZONE_DMA || HIGHMEM)
+	depends on BLOCK && MMU && HIGHMEM
 	help
-	  Enable bounce buffers for devices that cannot access
-	  the full range of memory available to the CPU. Enabled
-	  by default when ZONE_DMA or HIGHMEM is selected, but you
-	  may say n to override this.
+	  Enable bounce buffers for devices that cannot access the full range of
+	  memory available to the CPU. Enabled by default when HIGHMEM is
+	  selected, but you may say n to override this.
 
 config VIRT_TO_BUS
 	bool
-- 
2.30.1


^ permalink raw reply related	[flat|nested] 33+ messages in thread

* [PATCH 7/8] block: refactor the bounce buffering code
  2021-03-26  5:58 start removing block bounce buffering support v2 Christoph Hellwig
                   ` (5 preceding siblings ...)
  2021-03-26  5:58 ` [PATCH 6/8] block: remove BLK_BOUNCE_ISA support Christoph Hellwig
@ 2021-03-26  5:58 ` Christoph Hellwig
  2021-03-29  6:34   ` Hannes Reinecke
  2021-03-26  5:58 ` [PATCH 8/8] block: stop calling blk_queue_bounce for passthrough requests Christoph Hellwig
  2021-03-26 23:15 ` start removing block bounce buffering support v2 Jens Axboe
  8 siblings, 1 reply; 33+ messages in thread
From: Christoph Hellwig @ 2021-03-26  5:58 UTC (permalink / raw)
  To: Jens Axboe, Khalid Aziz, Martin K. Petersen, Matthew Wilcox,
	Hannes Reinecke, Ondrej Zary
  Cc: linux-block, linux-scsi

Get rid of all the PFN arithmetics and just use an enum for the two
remaining options, and use PageHighMem for the actual bounce decision.

Add a fast path to entirely avoid the call for the common case of a queue
not using the legacy bouncing code.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 block/blk-core.c       |  6 ++----
 block/blk-settings.c   | 42 ++++++++----------------------------------
 block/blk.h            | 16 ++++++++++++----
 block/bounce.c         | 35 +++++------------------------------
 include/linux/blkdev.h | 29 +++++++++++------------------
 5 files changed, 38 insertions(+), 90 deletions(-)

diff --git a/block/blk-core.c b/block/blk-core.c
index fc60ff20849738..9bcdae93f6d4f7 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1161,10 +1161,8 @@ static blk_status_t blk_cloned_rq_check_limits(struct request_queue *q,
 	}
 
 	/*
-	 * queue's settings related to segment counting like q->bounce_pfn
-	 * may differ from that of other stacking queues.
-	 * Recalculate it to check the request correctly on this queue's
-	 * limitation.
+	 * The queue settings related to segment counting may differ from the
+	 * original queue.
 	 */
 	rq->nr_phys_segments = blk_recalc_rq_segments(rq);
 	if (rq->nr_phys_segments > queue_max_segments(q)) {
diff --git a/block/blk-settings.c b/block/blk-settings.c
index f9937dd2810e25..9c009090c4b5bf 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -7,7 +7,6 @@
 #include <linux/init.h>
 #include <linux/bio.h>
 #include <linux/blkdev.h>
-#include <linux/memblock.h>	/* for max_pfn/max_low_pfn */
 #include <linux/gcd.h>
 #include <linux/lcm.h>
 #include <linux/jiffies.h>
@@ -17,11 +16,6 @@
 #include "blk.h"
 #include "blk-wbt.h"
 
-unsigned long blk_max_low_pfn;
-EXPORT_SYMBOL(blk_max_low_pfn);
-
-unsigned long blk_max_pfn;
-
 void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
 {
 	q->rq_timeout = timeout;
@@ -55,7 +49,7 @@ void blk_set_default_limits(struct queue_limits *lim)
 	lim->discard_alignment = 0;
 	lim->discard_misaligned = 0;
 	lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
-	lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT);
+	lim->bounce = BLK_BOUNCE_NONE;
 	lim->alignment_offset = 0;
 	lim->io_opt = 0;
 	lim->misaligned = 0;
@@ -92,28 +86,16 @@ EXPORT_SYMBOL(blk_set_stacking_limits);
 /**
  * blk_queue_bounce_limit - set bounce buffer limit for queue
  * @q: the request queue for the device
- * @max_addr: the maximum address the device can handle
+ * @bounce: bounce limit to enforce
  *
  * Description:
- *    Different hardware can have different requirements as to what pages
- *    it can do I/O directly to. A low level driver can call
- *    blk_queue_bounce_limit to have lower memory pages allocated as bounce
- *    buffers for doing I/O to pages residing above @max_addr.
+ *    Force bouncing for ISA DMA ranges or highmem.
+ *
+ *    DEPRECATED, don't use in new code.
  **/
-void blk_queue_bounce_limit(struct request_queue *q, u64 max_addr)
+void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce bounce)
 {
-	unsigned long b_pfn = max_addr >> PAGE_SHIFT;
-
-#if BITS_PER_LONG == 64
-	/*
-	 * Assume anything <= 4GB can be handled by IOMMU.  Actually
-	 * some IOMMUs can handle everything, but I don't know of a
-	 * way to test this here.
-	 */
-	q->limits.bounce_pfn = max(max_low_pfn, b_pfn);
-#else
-	q->limits.bounce_pfn = b_pfn;
-#endif
+	q->limits.bounce = bounce;
 }
 EXPORT_SYMBOL(blk_queue_bounce_limit);
 
@@ -536,7 +518,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
 					b->max_write_zeroes_sectors);
 	t->max_zone_append_sectors = min(t->max_zone_append_sectors,
 					b->max_zone_append_sectors);
-	t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
+	t->bounce = max(t->bounce, b->bounce);
 
 	t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
 					    b->seg_boundary_mask);
@@ -916,11 +898,3 @@ void blk_queue_set_zoned(struct gendisk *disk, enum blk_zoned_model model)
 	}
 }
 EXPORT_SYMBOL_GPL(blk_queue_set_zoned);
-
-static int __init blk_settings_init(void)
-{
-	blk_max_low_pfn = max_low_pfn - 1;
-	blk_max_pfn = max_pfn - 1;
-	return 0;
-}
-subsys_initcall(blk_settings_init);
diff --git a/block/blk.h b/block/blk.h
index 895c9f4a5182a7..8f4337c5a9e66c 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -6,6 +6,7 @@
 #include <linux/blk-mq.h>
 #include <linux/part_stat.h>
 #include <linux/blk-crypto.h>
+#include <linux/memblock.h>	/* for max_pfn/max_low_pfn */
 #include <xen/xen.h>
 #include "blk-crypto-internal.h"
 #include "blk-mq.h"
@@ -311,13 +312,20 @@ static inline void blk_throtl_bio_endio(struct bio *bio) { }
 static inline void blk_throtl_stat_add(struct request *rq, u64 time) { }
 #endif
 
-#ifdef CONFIG_BOUNCE
-extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
-#else
+void __blk_queue_bounce(struct request_queue *q, struct bio **bio);
+
+static inline bool blk_queue_may_bounce(struct request_queue *q)
+{
+	return IS_ENABLED(CONFIG_BOUNCE) &&
+		q->limits.bounce == BLK_BOUNCE_HIGH &&
+		max_low_pfn >= max_pfn;
+}
+
 static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
 {
+	if (unlikely(blk_queue_may_bounce(q) && bio_has_data(*bio)))
+		__blk_queue_bounce(q, bio);	
 }
-#endif /* CONFIG_BOUNCE */
 
 #ifdef CONFIG_BLK_CGROUP_IOLATENCY
 extern int blk_iolatency_init(struct request_queue *q);
diff --git a/block/bounce.c b/block/bounce.c
index debd5b0bd31890..6bafc0d1f867a1 100644
--- a/block/bounce.c
+++ b/block/bounce.c
@@ -18,7 +18,6 @@
 #include <linux/init.h>
 #include <linux/hash.h>
 #include <linux/highmem.h>
-#include <linux/memblock.h>
 #include <linux/printk.h>
 #include <asm/tlbflush.h>
 
@@ -49,11 +48,11 @@ static void init_bounce_bioset(void)
 	bounce_bs_setup = true;
 }
 
-#if defined(CONFIG_HIGHMEM)
 static __init int init_emergency_pool(void)
 {
 	int ret;
-#if defined(CONFIG_HIGHMEM) && !defined(CONFIG_MEMORY_HOTPLUG)
+
+#ifndef CONFIG_MEMORY_HOTPLUG
 	if (max_pfn <= max_low_pfn)
 		return 0;
 #endif
@@ -67,9 +66,7 @@ static __init int init_emergency_pool(void)
 }
 
 __initcall(init_emergency_pool);
-#endif
 
-#ifdef CONFIG_HIGHMEM
 /*
  * highmem version, map in to vec
  */
@@ -82,13 +79,6 @@ static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom)
 	kunmap_atomic(vto);
 }
 
-#else /* CONFIG_HIGHMEM */
-
-#define bounce_copy_vec(to, vfrom)	\
-	memcpy(page_address((to)->bv_page) + (to)->bv_offset, vfrom, (to)->bv_len)
-
-#endif /* CONFIG_HIGHMEM */
-
 /*
  * Simple bounce buffer support for highmem pages. Depending on the
  * queue gfp mask set, *to may or may not be a highmem page. kmap it
@@ -236,8 +226,7 @@ static struct bio *bounce_clone_bio(struct bio *bio_src)
 	return NULL;
 }
 
-
-void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
+void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
 {
 	struct bio *bio;
 	int rw = bio_data_dir(*bio_orig);
@@ -247,24 +236,10 @@ void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
 	bool bounce = false;
 	int sectors = 0;
 
-	/*
-	 * Data-less bio, nothing to bounce
-	 */
-	if (!bio_has_data(*bio_orig))
-		return;
-
-	/*
-	 * Just check if the bounce pfn is equal to or bigger than the highest
-	 * pfn in the system -- in that case, don't waste time iterating over
-	 * bio segments
-	 */
-	if (q->limits.bounce_pfn >= blk_max_pfn)
-		return;
-
 	bio_for_each_segment(from, *bio_orig, iter) {
 		if (i++ < BIO_MAX_VECS)
 			sectors += from.bv_len >> 9;
-		if (page_to_pfn(from.bv_page) > q->limits.bounce_pfn)
+		if (PageHighMem(from.bv_page))
 			bounce = true;
 	}
 	if (!bounce)
@@ -287,7 +262,7 @@ void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
 	for (i = 0, to = bio->bi_io_vec; i < bio->bi_vcnt; to++, i++) {
 		struct page *page = to->bv_page;
 
-		if (page_to_pfn(page) <= q->limits.bounce_pfn)
+		if (!PageHighMem(page))
 			continue;
 
 		to->bv_page = mempool_alloc(&page_pool, GFP_NOIO);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 0dbb72ea373529..55cc8b96c84427 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -313,8 +313,17 @@ enum blk_zoned_model {
 	BLK_ZONED_HM,		/* Host-managed zoned block device */
 };
 
+/*
+ * BLK_BOUNCE_NONE:	never bounce (default)
+ * BLK_BOUNCE_HIGH:	bounce all highmem pages
+ */
+enum blk_bounce {
+	BLK_BOUNCE_NONE,
+	BLK_BOUNCE_HIGH,
+};
+
 struct queue_limits {
-	unsigned long		bounce_pfn;
+	enum blk_bounce		bounce;
 	unsigned long		seg_boundary_mask;
 	unsigned long		virt_boundary_mask;
 
@@ -835,22 +844,6 @@ static inline unsigned int blk_queue_depth(struct request_queue *q)
 	return q->nr_requests;
 }
 
-extern unsigned long blk_max_low_pfn, blk_max_pfn;
-
-/*
- * standard bounce addresses:
- *
- * BLK_BOUNCE_HIGH	: bounce all highmem pages
- * BLK_BOUNCE_ANY	: don't bounce anything
- */
-
-#if BITS_PER_LONG == 32
-#define BLK_BOUNCE_HIGH		((u64)blk_max_low_pfn << PAGE_SHIFT)
-#else
-#define BLK_BOUNCE_HIGH		-1ULL
-#endif
-#define BLK_BOUNCE_ANY		(-1ULL)
-
 /*
  * default timeout for SG_IO if none specified
  */
@@ -1134,7 +1127,7 @@ extern void blk_abort_request(struct request *);
  * Access functions for manipulating queue properties
  */
 extern void blk_cleanup_queue(struct request_queue *);
-extern void blk_queue_bounce_limit(struct request_queue *, u64);
+void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce limit);
 extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
 extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
 extern void blk_queue_max_segments(struct request_queue *, unsigned short);
-- 
2.30.1


^ permalink raw reply related	[flat|nested] 33+ messages in thread

* [PATCH 8/8] block: stop calling blk_queue_bounce for passthrough requests
  2021-03-26  5:58 start removing block bounce buffering support v2 Christoph Hellwig
                   ` (6 preceding siblings ...)
  2021-03-26  5:58 ` [PATCH 7/8] block: refactor the bounce buffering code Christoph Hellwig
@ 2021-03-26  5:58 ` Christoph Hellwig
  2021-03-29  6:37   ` Hannes Reinecke
  2021-03-26 23:15 ` start removing block bounce buffering support v2 Jens Axboe
  8 siblings, 1 reply; 33+ messages in thread
From: Christoph Hellwig @ 2021-03-26  5:58 UTC (permalink / raw)
  To: Jens Axboe, Khalid Aziz, Martin K. Petersen, Matthew Wilcox,
	Hannes Reinecke, Ondrej Zary
  Cc: linux-block, linux-scsi

Instead of overloading the passthrough fast path with the deprecated
block layer bounce buffering let the users that combine an old
undermaintained driver with a highmem system pay the price by always
falling back to copies in that case.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 block/blk-map.c                    | 116 ++++++++---------------------
 block/bounce.c                     |  11 +--
 drivers/nvme/host/lightnvm.c       |   2 +-
 drivers/target/target_core_pscsi.c |   4 +-
 include/linux/blkdev.h             |   2 +-
 5 files changed, 36 insertions(+), 99 deletions(-)

diff --git a/block/blk-map.c b/block/blk-map.c
index b62b52dcb61d97..dac78376acc899 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -123,7 +123,6 @@ static int bio_uncopy_user(struct bio *bio)
 			bio_free_pages(bio);
 	}
 	kfree(bmd);
-	bio_put(bio);
 	return ret;
 }
 
@@ -132,7 +131,7 @@ static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data,
 {
 	struct bio_map_data *bmd;
 	struct page *page;
-	struct bio *bio, *bounce_bio;
+	struct bio *bio;
 	int i = 0, ret;
 	int nr_pages;
 	unsigned int len = iter->count;
@@ -218,16 +217,9 @@ static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data,
 
 	bio->bi_private = bmd;
 
-	bounce_bio = bio;
-	ret = blk_rq_append_bio(rq, &bounce_bio);
+	ret = blk_rq_append_bio(rq, bio);
 	if (ret)
 		goto cleanup;
-
-	/*
-	 * We link the bounce buffer in and could have to traverse it later, so
-	 * we have to get a ref to prevent it from being freed
-	 */
-	bio_get(bounce_bio);
 	return 0;
 cleanup:
 	if (!map_data)
@@ -242,7 +234,7 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
 		gfp_t gfp_mask)
 {
 	unsigned int max_sectors = queue_max_hw_sectors(rq->q);
-	struct bio *bio, *bounce_bio;
+	struct bio *bio;
 	int ret;
 	int j;
 
@@ -304,49 +296,17 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
 			break;
 	}
 
-	/*
-	 * Subtle: if we end up needing to bounce a bio, it would normally
-	 * disappear when its bi_end_io is run.  However, we need the original
-	 * bio for the unmap, so grab an extra reference to it
-	 */
-	bio_get(bio);
-
-	bounce_bio = bio;
-	ret = blk_rq_append_bio(rq, &bounce_bio);
+	ret = blk_rq_append_bio(rq, bio);
 	if (ret)
-		goto out_put_orig;
-
-	/*
-	 * We link the bounce buffer in and could have to traverse it
-	 * later, so we have to get a ref to prevent it from being freed
-	 */
-	bio_get(bounce_bio);
+		goto out_unmap;
 	return 0;
 
- out_put_orig:
-	bio_put(bio);
  out_unmap:
 	bio_release_pages(bio, false);
 	bio_put(bio);
 	return ret;
 }
 
-/**
- *	bio_unmap_user	-	unmap a bio
- *	@bio:		the bio being unmapped
- *
- *	Unmap a bio previously mapped by bio_map_user_iov(). Must be called from
- *	process context.
- *
- *	bio_unmap_user() may sleep.
- */
-static void bio_unmap_user(struct bio *bio)
-{
-	bio_release_pages(bio, bio_data_dir(bio) == READ);
-	bio_put(bio);
-	bio_put(bio);
-}
-
 static void bio_invalidate_vmalloc_pages(struct bio *bio)
 {
 #ifdef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
@@ -519,33 +479,27 @@ static struct bio *bio_copy_kern(struct request_queue *q, void *data,
  * Append a bio to a passthrough request.  Only works if the bio can be merged
  * into the request based on the driver constraints.
  */
-int blk_rq_append_bio(struct request *rq, struct bio **bio)
+int blk_rq_append_bio(struct request *rq, struct bio *bio)
 {
-	struct bio *orig_bio = *bio;
 	struct bvec_iter iter;
 	struct bio_vec bv;
 	unsigned int nr_segs = 0;
 
-	blk_queue_bounce(rq->q, bio);
+	if (WARN_ON_ONCE(rq->q->limits.bounce != BLK_BOUNCE_NONE))
+		return -EINVAL;
 
-	bio_for_each_bvec(bv, *bio, iter)
+	bio_for_each_bvec(bv, bio, iter)
 		nr_segs++;
 
 	if (!rq->bio) {
-		blk_rq_bio_prep(rq, *bio, nr_segs);
+		blk_rq_bio_prep(rq, bio, nr_segs);
 	} else {
-		if (!ll_back_merge_fn(rq, *bio, nr_segs)) {
-			if (orig_bio != *bio) {
-				bio_put(*bio);
-				*bio = orig_bio;
-			}
+		if (!ll_back_merge_fn(rq, bio, nr_segs))
 			return -EINVAL;
-		}
-
-		rq->biotail->bi_next = *bio;
-		rq->biotail = *bio;
-		rq->__data_len += (*bio)->bi_iter.bi_size;
-		bio_crypt_free_ctx(*bio);
+		rq->biotail->bi_next = bio;
+		rq->biotail = bio;
+		rq->__data_len += (bio)->bi_iter.bi_size;
+		bio_crypt_free_ctx(bio);
 	}
 
 	return 0;
@@ -566,12 +520,6 @@ EXPORT_SYMBOL(blk_rq_append_bio);
  *
  *    A matching blk_rq_unmap_user() must be issued at the end of I/O, while
  *    still in process context.
- *
- *    Note: The mapped bio may need to be bounced through blk_queue_bounce()
- *    before being submitted to the device, as pages mapped may be out of
- *    reach. It's the callers responsibility to make sure this happens. The
- *    original bio must be passed back in to blk_rq_unmap_user() for proper
- *    unmapping.
  */
 int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
 			struct rq_map_data *map_data,
@@ -588,6 +536,8 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
 
 	if (map_data)
 		copy = true;
+	else if (blk_queue_may_bounce(q))
+		copy = true;
 	else if (iov_iter_alignment(iter) & align)
 		copy = true;
 	else if (queue_virt_boundary(q))
@@ -641,25 +591,21 @@ EXPORT_SYMBOL(blk_rq_map_user);
  */
 int blk_rq_unmap_user(struct bio *bio)
 {
-	struct bio *mapped_bio;
+	struct bio *next_bio;
 	int ret = 0, ret2;
 
 	while (bio) {
-		mapped_bio = bio;
-		if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
-			mapped_bio = bio->bi_private;
-
 		if (bio->bi_private) {
-			ret2 = bio_uncopy_user(mapped_bio);
+			ret2 = bio_uncopy_user(bio);
 			if (ret2 && !ret)
 				ret = ret2;
 		} else {
-			bio_unmap_user(mapped_bio);
+			bio_release_pages(bio, bio_data_dir(bio) == READ);
 		}
 
-		mapped_bio = bio;
+		next_bio = bio;
 		bio = bio->bi_next;
-		bio_put(mapped_bio);
+		bio_put(next_bio);
 	}
 
 	return ret;
@@ -684,7 +630,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
 {
 	int reading = rq_data_dir(rq) == READ;
 	unsigned long addr = (unsigned long) kbuf;
-	struct bio *bio, *orig_bio;
+	struct bio *bio;
 	int ret;
 
 	if (len > (queue_max_hw_sectors(q) << 9))
@@ -692,7 +638,8 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
 	if (!len || !kbuf)
 		return -EINVAL;
 
-	if (!blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf))
+	if (!blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf) ||
+	    blk_queue_may_bounce(q))
 		bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
 	else
 		bio = bio_map_kern(q, kbuf, len, gfp_mask);
@@ -703,14 +650,9 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
 	bio->bi_opf &= ~REQ_OP_MASK;
 	bio->bi_opf |= req_op(rq);
 
-	orig_bio = bio;
-	ret = blk_rq_append_bio(rq, &bio);
-	if (unlikely(ret)) {
-		/* request is too big */
-		bio_put(orig_bio);
-		return ret;
-	}
-
-	return 0;
+	ret = blk_rq_append_bio(rq, bio);
+	if (unlikely(ret))
+		bio_put(bio);
+	return ret;
 }
 EXPORT_SYMBOL(blk_rq_map_kern);
diff --git a/block/bounce.c b/block/bounce.c
index 6bafc0d1f867a1..94081e013c58cc 100644
--- a/block/bounce.c
+++ b/block/bounce.c
@@ -180,12 +180,8 @@ static struct bio *bounce_clone_bio(struct bio *bio_src)
 	 *    asking for trouble and would force extra work on
 	 *    __bio_clone_fast() anyways.
 	 */
-	if (bio_is_passthrough(bio_src))
-		bio = bio_kmalloc(GFP_NOIO | __GFP_NOFAIL,
-				  bio_segments(bio_src));
-	else
-		bio = bio_alloc_bioset(GFP_NOIO, bio_segments(bio_src),
-				       &bounce_bio_set);
+	bio = bio_alloc_bioset(GFP_NOIO, bio_segments(bio_src),
+			       &bounce_bio_set);
 	bio->bi_bdev		= bio_src->bi_bdev;
 	if (bio_flagged(bio_src, BIO_REMAPPED))
 		bio_set_flag(bio, BIO_REMAPPED);
@@ -245,8 +241,7 @@ void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
 	if (!bounce)
 		return;
 
-	if (!bio_is_passthrough(*bio_orig) &&
-	    sectors < bio_sectors(*bio_orig)) {
+	if (sectors < bio_sectors(*bio_orig)) {
 		bio = bio_split(*bio_orig, sectors, GFP_NOIO, &bounce_bio_split);
 		bio_chain(bio, *bio_orig);
 		submit_bio_noacct(*bio_orig);
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index b705988629f224..f6ca2fbb711e98 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -660,7 +660,7 @@ static struct request *nvme_nvm_alloc_request(struct request_queue *q,
 	rq->cmd_flags &= ~REQ_FAILFAST_DRIVER;
 
 	if (rqd->bio)
-		blk_rq_append_bio(rq, &rqd->bio);
+		blk_rq_append_bio(rq, rqd->bio);
 	else
 		rq->ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
 
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 3cbc074992bc86..7df4a9c9c7ffaa 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -911,7 +911,7 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
 					" %d i: %d bio: %p, allocating another"
 					" bio\n", bio->bi_vcnt, i, bio);
 
-				rc = blk_rq_append_bio(req, &bio);
+				rc = blk_rq_append_bio(req, bio);
 				if (rc) {
 					pr_err("pSCSI: failed to append bio\n");
 					goto fail;
@@ -930,7 +930,7 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
 	}
 
 	if (bio) {
-		rc = blk_rq_append_bio(req, &bio);
+		rc = blk_rq_append_bio(req, bio);
 		if (rc) {
 			pr_err("pSCSI: failed to append bio\n");
 			goto fail;
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 55cc8b96c84427..d5d320da51f8bf 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -909,7 +909,7 @@ extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
 extern void blk_rq_unprep_clone(struct request *rq);
 extern blk_status_t blk_insert_cloned_request(struct request_queue *q,
 				     struct request *rq);
-extern int blk_rq_append_bio(struct request *rq, struct bio **bio);
+int blk_rq_append_bio(struct request *rq, struct bio *bio);
 extern void blk_queue_split(struct bio **);
 extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
 extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t,
-- 
2.30.1


^ permalink raw reply related	[flat|nested] 33+ messages in thread

* Re: start removing block bounce buffering support v2
  2021-03-26  5:58 start removing block bounce buffering support v2 Christoph Hellwig
                   ` (7 preceding siblings ...)
  2021-03-26  5:58 ` [PATCH 8/8] block: stop calling blk_queue_bounce for passthrough requests Christoph Hellwig
@ 2021-03-26 23:15 ` Jens Axboe
  2021-03-30  3:08   ` Martin K. Petersen
  8 siblings, 1 reply; 33+ messages in thread
From: Jens Axboe @ 2021-03-26 23:15 UTC (permalink / raw)
  To: Christoph Hellwig, Khalid Aziz, Martin K. Petersen,
	Matthew Wilcox, Hannes Reinecke, Ondrej Zary
  Cc: linux-block, linux-scsi

On 3/25/21 11:58 PM, Christoph Hellwig wrote:
> Hi all,
> 
> this series starts to clean up and remove the impact of the legacy old
> block layer bounce buffering code.
> 
> First it removes support for ISA bouncing.  This was used by three SCSI
> drivers.  One of them actually had an active user and developer 5 years
> ago so I've converted it to use a local bounce buffer - Ondrej, can you
> test the coversion?  The next one has been known broken for years, and
> the third one looks like it has no users for the ISA support so they
> are just dropped.
> 
> It then removes support for dealing with bounce buffering highmem pages
> for passthrough requests as we can just use the copy instead of the map
> path for them.  This will reduce efficiency for such setups on highmem
> systems (e.g. usb-storage attached DVD drives), but then again that is
> what you get for using a driver not using modern interfaces on a 32-bit
> highmem system.  It does allow to streamline the common path pretty nicely.

The core parts look good to me. If we can get the SCSI side to sign off
on those changes, I can take it for 5.13.

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 33+ messages in thread

* Re: [PATCH 1/8] aha1542: use a local bounce buffer
  2021-03-26  5:58 ` [PATCH 1/8] aha1542: use a local bounce buffer Christoph Hellwig
@ 2021-03-29  6:22   ` Hannes Reinecke
  0 siblings, 0 replies; 33+ messages in thread
From: Hannes Reinecke @ 2021-03-29  6:22 UTC (permalink / raw)
  To: Christoph Hellwig, Jens Axboe, Khalid Aziz, Martin K. Petersen,
	Matthew Wilcox, Hannes Reinecke, Ondrej Zary
  Cc: linux-block, linux-scsi

On 3/26/21 6:58 AM, Christoph Hellwig wrote:
> To remove the last user of the unchecked_isa_dma flag and thus the block
> layer ISA bounce buffering switch this driver to use its own local bounce
> buffer.  This has the effect of not needing the chain indirection and
> supporting and unlimited number of segments.  It does however limit the
> transfer size for each command to something that can be reasonable
> allocated by dma_alloc_coherent like 8K.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>  drivers/scsi/aha1542.c | 105 ++++++++++++++++++++++-------------------
>  1 file changed, 57 insertions(+), 48 deletions(-)
> 
I doubt anyone will notice the reduced transfer size; these things are
slow anyway :-)

Reviewed-by: Hannes Reinecke <hare@suse.de>

Cheers,

Hannes
-- 
Dr. Hannes Reinecke		           Kernel Storage Architect
hare@suse.de			                  +49 911 74053 688
SUSE Software Solutions Germany GmbH, Maxfeldstr. 5, 90409 Nürnberg
HRB 36809 (AG Nürnberg), GF: Felix Imendörffer

^ permalink raw reply	[flat|nested] 33+ messages in thread

* Re: [PATCH 2/8] Buslogic: remove ISA support
  2021-03-26  5:58 ` [PATCH 2/8] Buslogic: remove ISA support Christoph Hellwig
@ 2021-03-29  6:22   ` Hannes Reinecke
  2021-03-29 20:29   ` Khalid Aziz
  1 sibling, 0 replies; 33+ messages in thread
From: Hannes Reinecke @ 2021-03-29  6:22 UTC (permalink / raw)
  To: Christoph Hellwig, Jens Axboe, Khalid Aziz, Martin K. Petersen,
	Matthew Wilcox, Hannes Reinecke, Ondrej Zary
  Cc: linux-block, linux-scsi

On 3/26/21 6:58 AM, Christoph Hellwig wrote:
> The ISA support in Buslogic has been broken for a long time, as all
> the I/O path expects a struct device for DMA mapping that is derived from
> the PCI device, which would simply crash for ISA adapters.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>  drivers/scsi/BusLogic.c | 156 ++--------------------------------------
>  drivers/scsi/BusLogic.h |   3 -
>  drivers/scsi/Kconfig    |   2 +-
>  3 files changed, 6 insertions(+), 155 deletions(-)
> 
Reviewed-by: Hannes Reinecke <hare@suse.de>

Cheers,

Hannes
-- 
Dr. Hannes Reinecke		           Kernel Storage Architect
hare@suse.de			                  +49 911 74053 688
SUSE Software Solutions Germany GmbH, Maxfeldstr. 5, 90409 Nürnberg
HRB 36809 (AG Nürnberg), GF: Felix Imendörffer

^ permalink raw reply	[flat|nested] 33+ messages in thread

* Re: [PATCH 3/8] BusLogic: reject broken old firmware that requires ISA-style bounce buffering
  2021-03-26  5:58 ` [PATCH 3/8] BusLogic: reject broken old firmware that requires ISA-style bounce buffering Christoph Hellwig
@ 2021-03-29  6:23   ` Hannes Reinecke
  2021-03-29 20:33   ` Khalid Aziz
  1 sibling, 0 replies; 33+ messages in thread
From: Hannes Reinecke @ 2021-03-29  6:23 UTC (permalink / raw)
  To: Christoph Hellwig, Jens Axboe, Khalid Aziz, Martin K. Petersen,
	Matthew Wilcox, Hannes Reinecke, Ondrej Zary
  Cc: linux-block, linux-scsi

On 3/26/21 6:58 AM, Christoph Hellwig wrote:
> Warn on and don't support adapters that have a DMA bug that forces ISA-style
> bounce buffering.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>  drivers/scsi/BusLogic.c | 21 ++++++---------------
>  drivers/scsi/BusLogic.h |  1 -
>  2 files changed, 6 insertions(+), 16 deletions(-)
> 
Reviewed-by: Hannes Reinecke <hare@suse.de>

Cheers,

Hannes
-- 
Dr. Hannes Reinecke		           Kernel Storage Architect
hare@suse.de			                  +49 911 74053 688
SUSE Software Solutions Germany GmbH, Maxfeldstr. 5, 90409 Nürnberg
HRB 36809 (AG Nürnberg), GF: Felix Imendörffer

^ permalink raw reply	[flat|nested] 33+ messages in thread

* Re: [PATCH 4/8] advansys: remove ISA support
  2021-03-26  5:58 ` [PATCH 4/8] advansys: remove ISA support Christoph Hellwig
@ 2021-03-29  6:31   ` Hannes Reinecke
  2021-03-30 17:26     ` Christoph Hellwig
  0 siblings, 1 reply; 33+ messages in thread
From: Hannes Reinecke @ 2021-03-29  6:31 UTC (permalink / raw)
  To: Christoph Hellwig, Jens Axboe, Khalid Aziz, Martin K. Petersen,
	Matthew Wilcox, Hannes Reinecke, Ondrej Zary
  Cc: linux-block, linux-scsi

On 3/26/21 6:58 AM, Christoph Hellwig wrote:
> This is the last piece in the kernel requiring the block layer ISA
> bounce buffering, and it does not actually look used.  So remove it
> to see if anyone screams, in which case we'll need to find a solution
> to fix it back up.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> ---
>  drivers/scsi/advansys.c | 283 ++++------------------------------------
>  1 file changed, 25 insertions(+), 258 deletions(-)
> 
> diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
> index ec5627890809e6..ccdd78ac7abd95 100644
> --- a/drivers/scsi/advansys.c
> +++ b/drivers/scsi/advansys.c
> @@ -84,8 +84,6 @@ typedef unsigned char uchar;
>  
>  #define ASC_CS_TYPE  unsigned short
>  
> -#define ASC_IS_ISA          (0x0001)
> -#define ASC_IS_ISAPNP       (0x0081)
>  #define ASC_IS_EISA         (0x0002)
>  #define ASC_IS_PCI          (0x0004)
>  #define ASC_IS_PCI_ULTRA    (0x0104)

Any particular reason why the remaining ISA defines (like
ASC_CHIP_MIN_VER_ISA etc) are being left intact?

[ .. ]
> @@ -8768,16 +8662,6 @@ static void AscInitAscDvcVar(ASC_DVC_VAR *asc_dvc)
>  	}
>  
>  	asc_dvc->cfg->isa_dma_speed = ASC_DEF_ISA_DMA_SPEED;
> -#ifdef CONFIG_ISA
> -	if ((asc_dvc->bus_type & ASC_IS_ISA) != 0) {
> -		if (chip_version >= ASC_CHIP_MIN_VER_ISA_PNP) {
> -			AscSetChipIFC(iop_base, IFC_INIT_DEFAULT);
> -			asc_dvc->bus_type = ASC_IS_ISAPNP;
> -		}
> -		asc_dvc->cfg->isa_dma_channel =
> -		    (uchar)AscGetIsaDmaChannel(iop_base);
> -	}
> -#endif /* CONFIG_ISA */
>  	for (i = 0; i <= ASC_MAX_TID; i++) {
>  		asc_dvc->cur_dvc_qng[i] = 0;
>  		asc_dvc->max_dvc_qng[i] = ASC_MAX_SCSI1_QNG;

Please remove the 'isa_dma_channel' field from struct asc_dvc_cfg, too.

Cheers,

Hannes
-- 
Dr. Hannes Reinecke		           Kernel Storage Architect
hare@suse.de			                  +49 911 74053 688
SUSE Software Solutions Germany GmbH, Maxfeldstr. 5, 90409 Nürnberg
HRB 36809 (AG Nürnberg), GF: Felix Imendörffer

^ permalink raw reply	[flat|nested] 33+ messages in thread

* Re: [PATCH 5/8] scsi: remove the unchecked_isa_dma flag
  2021-03-26  5:58 ` [PATCH 5/8] scsi: remove the unchecked_isa_dma flag Christoph Hellwig
@ 2021-03-29  6:31   ` Hannes Reinecke
  0 siblings, 0 replies; 33+ messages in thread
From: Hannes Reinecke @ 2021-03-29  6:31 UTC (permalink / raw)
  To: Christoph Hellwig, Jens Axboe, Khalid Aziz, Martin K. Petersen,
	Matthew Wilcox, Hannes Reinecke, Ondrej Zary
  Cc: linux-block, linux-scsi

On 3/26/21 6:58 AM, Christoph Hellwig wrote:
> Remove the unchecked_isa_dma now that all users are gone.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>  Documentation/scsi/scsi_mid_low_api.rst |  4 --
>  drivers/scsi/esas2r/esas2r_main.c       |  1 -
>  drivers/scsi/hosts.c                    |  7 +---
>  drivers/scsi/scsi_debugfs.c             |  1 -
>  drivers/scsi/scsi_lib.c                 | 52 +++----------------------
>  drivers/scsi/scsi_scan.c                |  6 +--
>  drivers/scsi/scsi_sysfs.c               |  2 -
>  drivers/scsi/sg.c                       | 10 +----
>  drivers/scsi/sr_ioctl.c                 | 12 ++----
>  drivers/scsi/st.c                       | 20 ++++------
>  drivers/scsi/st.h                       |  2 -
>  include/scsi/scsi_cmnd.h                |  7 ++--
>  include/scsi/scsi_host.h                |  6 ---
>  13 files changed, 25 insertions(+), 105 deletions(-)
> 
Reviewed-by: Hannes Reinecke <hare@suse.de>

Cheers,

Hannes
-- 
Dr. Hannes Reinecke		           Kernel Storage Architect
hare@suse.de			                  +49 911 74053 688
SUSE Software Solutions Germany GmbH, Maxfeldstr. 5, 90409 Nürnberg
HRB 36809 (AG Nürnberg), GF: Felix Imendörffer

^ permalink raw reply	[flat|nested] 33+ messages in thread

* Re: [PATCH 6/8] block: remove BLK_BOUNCE_ISA support
  2021-03-26  5:58 ` [PATCH 6/8] block: remove BLK_BOUNCE_ISA support Christoph Hellwig
@ 2021-03-29  6:32   ` Hannes Reinecke
  0 siblings, 0 replies; 33+ messages in thread
From: Hannes Reinecke @ 2021-03-29  6:32 UTC (permalink / raw)
  To: Christoph Hellwig, Jens Axboe, Khalid Aziz, Martin K. Petersen,
	Matthew Wilcox, Hannes Reinecke, Ondrej Zary
  Cc: linux-block, linux-scsi

On 3/26/21 6:58 AM, Christoph Hellwig wrote:
> Remove the BLK_BOUNCE_ISA support now that all users are gone.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>  block/bio-integrity.c     |   3 +-
>  block/blk-map.c           |   4 +-
>  block/blk-settings.c      |  11 ----
>  block/blk.h               |   5 --
>  block/bounce.c            | 124 ++++++++------------------------------
>  block/scsi_ioctl.c        |   2 +-
>  drivers/ata/libata-scsi.c |   3 +-
>  include/linux/blkdev.h    |   7 ---
>  mm/Kconfig                |   9 ++-
>  9 files changed, 35 insertions(+), 133 deletions(-)
> Reviewed-by: Hannes Reinecke <hare@suse.de>

Cheers,

Hannes
-- 
Dr. Hannes Reinecke		           Kernel Storage Architect
hare@suse.de			                  +49 911 74053 688
SUSE Software Solutions Germany GmbH, Maxfeldstr. 5, 90409 Nürnberg
HRB 36809 (AG Nürnberg), GF: Felix Imendörffer

^ permalink raw reply	[flat|nested] 33+ messages in thread

* Re: [PATCH 7/8] block: refactor the bounce buffering code
  2021-03-26  5:58 ` [PATCH 7/8] block: refactor the bounce buffering code Christoph Hellwig
@ 2021-03-29  6:34   ` Hannes Reinecke
  0 siblings, 0 replies; 33+ messages in thread
From: Hannes Reinecke @ 2021-03-29  6:34 UTC (permalink / raw)
  To: Christoph Hellwig, Jens Axboe, Khalid Aziz, Martin K. Petersen,
	Matthew Wilcox, Hannes Reinecke, Ondrej Zary
  Cc: linux-block, linux-scsi

On 3/26/21 6:58 AM, Christoph Hellwig wrote:
> Get rid of all the PFN arithmetics and just use an enum for the two
> remaining options, and use PageHighMem for the actual bounce decision.
> 
> Add a fast path to entirely avoid the call for the common case of a queue
> not using the legacy bouncing code.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>  block/blk-core.c       |  6 ++----
>  block/blk-settings.c   | 42 ++++++++----------------------------------
>  block/blk.h            | 16 ++++++++++++----
>  block/bounce.c         | 35 +++++------------------------------
>  include/linux/blkdev.h | 29 +++++++++++------------------
>  5 files changed, 38 insertions(+), 90 deletions(-)
> 
Reviewed-by: Hannes Reinecke <hare@suse.de>

Cheers,

Hannes
-- 
Dr. Hannes Reinecke		           Kernel Storage Architect
hare@suse.de			                  +49 911 74053 688
SUSE Software Solutions Germany GmbH, Maxfeldstr. 5, 90409 Nürnberg
HRB 36809 (AG Nürnberg), GF: Felix Imendörffer

^ permalink raw reply	[flat|nested] 33+ messages in thread

* Re: [PATCH 8/8] block: stop calling blk_queue_bounce for passthrough requests
  2021-03-26  5:58 ` [PATCH 8/8] block: stop calling blk_queue_bounce for passthrough requests Christoph Hellwig
@ 2021-03-29  6:37   ` Hannes Reinecke
  0 siblings, 0 replies; 33+ messages in thread
From: Hannes Reinecke @ 2021-03-29  6:37 UTC (permalink / raw)
  To: Christoph Hellwig, Jens Axboe, Khalid Aziz, Martin K. Petersen,
	Matthew Wilcox, Hannes Reinecke, Ondrej Zary
  Cc: linux-block, linux-scsi

On 3/26/21 6:58 AM, Christoph Hellwig wrote:
> Instead of overloading the passthrough fast path with the deprecated
> block layer bounce buffering let the users that combine an old
> undermaintained driver with a highmem system pay the price by always
> falling back to copies in that case.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>  block/blk-map.c                    | 116 ++++++++---------------------
>  block/bounce.c                     |  11 +--
>  drivers/nvme/host/lightnvm.c       |   2 +-
>  drivers/target/target_core_pscsi.c |   4 +-
>  include/linux/blkdev.h             |   2 +-
>  5 files changed, 36 insertions(+), 99 deletions(-)
> 
Reviewed-by: Hannes Reinecke <hare@suse.de>

Cheers,

Hannes
-- 
Dr. Hannes Reinecke		           Kernel Storage Architect
hare@suse.de			                  +49 911 74053 688
SUSE Software Solutions Germany GmbH, Maxfeldstr. 5, 90409 Nürnberg
HRB 36809 (AG Nürnberg), GF: Felix Imendörffer

^ permalink raw reply	[flat|nested] 33+ messages in thread

* Re: [PATCH 2/8] Buslogic: remove ISA support
  2021-03-26  5:58 ` [PATCH 2/8] Buslogic: remove ISA support Christoph Hellwig
  2021-03-29  6:22   ` Hannes Reinecke
@ 2021-03-29 20:29   ` Khalid Aziz
  2021-03-30 17:03     ` Christoph Hellwig
  1 sibling, 1 reply; 33+ messages in thread
From: Khalid Aziz @ 2021-03-29 20:29 UTC (permalink / raw)
  To: Christoph Hellwig, Jens Axboe, Martin K. Petersen,
	Matthew Wilcox, Hannes Reinecke, Ondrej Zary
  Cc: linux-block, linux-scsi

On 3/25/21 11:58 PM, Christoph Hellwig wrote:
> The ISA support in Buslogic has been broken for a long time, as all
> the I/O path expects a struct device for DMA mapping that is derived from
> the PCI device, which would simply crash for ISA adapters.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>  drivers/scsi/BusLogic.c | 156 ++--------------------------------------
>  drivers/scsi/BusLogic.h |   3 -
>  drivers/scsi/Kconfig    |   2 +-
>  3 files changed, 6 insertions(+), 155 deletions(-)
> 

Hi Chris,

This looks good. There is more code that can be removed, for instance
all of the code that supports "IO:" driver option to specify ISA port
addresses. enum blogic_adapter_bus_type can shrink. "limited_isa" and
"probe*" members of struct blogic_probe_options can go away. You could
add those to this patch, or if you would like, I can create a follow-on
patch to remove that code.

Thanks,
Khalid


> diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
> index ccb061ab0a0ad2..c3ed03c4b3f5cb 100644
> --- a/drivers/scsi/BusLogic.c
> +++ b/drivers/scsi/BusLogic.c
> @@ -561,60 +561,6 @@ static int blogic_cmd(struct blogic_adapter *adapter, enum blogic_opcode opcode,
>  }
>  
>  
> -/*
> -  blogic_add_probeaddr_isa appends a single ISA I/O Address to the list
> -  of I/O Address and Bus Probe Information to be checked for potential BusLogic
> -  Host Adapters.
> -*/
> -
> -static void __init blogic_add_probeaddr_isa(unsigned long io_addr)
> -{
> -	struct blogic_probeinfo *probeinfo;
> -	if (blogic_probeinfo_count >= BLOGIC_MAX_ADAPTERS)
> -		return;
> -	probeinfo = &blogic_probeinfo_list[blogic_probeinfo_count++];
> -	probeinfo->adapter_type = BLOGIC_MULTIMASTER;
> -	probeinfo->adapter_bus_type = BLOGIC_ISA_BUS;
> -	probeinfo->io_addr = io_addr;
> -	probeinfo->pci_device = NULL;
> -}
> -
> -
> -/*
> -  blogic_init_probeinfo_isa initializes the list of I/O Address and
> -  Bus Probe Information to be checked for potential BusLogic SCSI Host Adapters
> -  only from the list of standard BusLogic MultiMaster ISA I/O Addresses.
> -*/
> -
> -static void __init blogic_init_probeinfo_isa(struct blogic_adapter *adapter)
> -{
> -	/*
> -	   If BusLogic Driver Options specifications requested that ISA
> -	   Bus Probes be inhibited, do not proceed further.
> -	 */
> -	if (blogic_probe_options.noprobe_isa)
> -		return;
> -	/*
> -	   Append the list of standard BusLogic MultiMaster ISA I/O Addresses.
> -	 */
> -	if (!blogic_probe_options.limited_isa || blogic_probe_options.probe330)
> -		blogic_add_probeaddr_isa(0x330);
> -	if (!blogic_probe_options.limited_isa || blogic_probe_options.probe334)
> -		blogic_add_probeaddr_isa(0x334);
> -	if (!blogic_probe_options.limited_isa || blogic_probe_options.probe230)
> -		blogic_add_probeaddr_isa(0x230);
> -	if (!blogic_probe_options.limited_isa || blogic_probe_options.probe234)
> -		blogic_add_probeaddr_isa(0x234);
> -	if (!blogic_probe_options.limited_isa || blogic_probe_options.probe130)
> -		blogic_add_probeaddr_isa(0x130);
> -	if (!blogic_probe_options.limited_isa || blogic_probe_options.probe134)
> -		blogic_add_probeaddr_isa(0x134);
> -}
> -
> -
> -#ifdef CONFIG_PCI
> -
> -
>  /*
>    blogic_sort_probeinfo sorts a section of blogic_probeinfo_list in order
>    of increasing PCI Bus and Device Number.
> @@ -667,14 +613,11 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
>  	int nonpr_mmcount = 0, mmcount = 0;
>  	bool force_scan_order = false;
>  	bool force_scan_order_checked = false;
> -	bool addr_seen[6];
>  	struct pci_dev *pci_device = NULL;
>  	int i;
>  	if (blogic_probeinfo_count >= BLOGIC_MAX_ADAPTERS)
>  		return 0;
>  	blogic_probeinfo_count++;
> -	for (i = 0; i < 6; i++)
> -		addr_seen[i] = false;
>  	/*
>  	   Iterate over the MultiMaster PCI Host Adapters.  For each
>  	   enumerated host adapter, determine whether its ISA Compatible
> @@ -744,11 +687,8 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
>  		host_adapter->io_addr = io_addr;
>  		blogic_intreset(host_adapter);
>  		if (blogic_cmd(host_adapter, BLOGIC_INQ_PCI_INFO, NULL, 0,
> -				&adapter_info, sizeof(adapter_info)) ==
> -				sizeof(adapter_info)) {
> -			if (adapter_info.isa_port < 6)
> -				addr_seen[adapter_info.isa_port] = true;
> -		} else
> +				&adapter_info, sizeof(adapter_info)) !=
> +				sizeof(adapter_info))
>  			adapter_info.isa_port = BLOGIC_IO_DISABLE;
>  		/*
>  		   Issue the Modify I/O Address command to disable the
> @@ -835,45 +775,6 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
>  	if (force_scan_order)
>  		blogic_sort_probeinfo(&blogic_probeinfo_list[nonpr_mmindex],
>  					nonpr_mmcount);
> -	/*
> -	   If no PCI MultiMaster Host Adapter is assigned the Primary
> -	   I/O Address, then the Primary I/O Address must be probed
> -	   explicitly before any PCI host adapters are probed.
> -	 */
> -	if (!blogic_probe_options.noprobe_isa)
> -		if (pr_probeinfo->io_addr == 0 &&
> -				(!blogic_probe_options.limited_isa ||
> -				 blogic_probe_options.probe330)) {
> -			pr_probeinfo->adapter_type = BLOGIC_MULTIMASTER;
> -			pr_probeinfo->adapter_bus_type = BLOGIC_ISA_BUS;
> -			pr_probeinfo->io_addr = 0x330;
> -		}
> -	/*
> -	   Append the list of standard BusLogic MultiMaster ISA I/O Addresses,
> -	   omitting the Primary I/O Address which has already been handled.
> -	 */
> -	if (!blogic_probe_options.noprobe_isa) {
> -		if (!addr_seen[1] &&
> -				(!blogic_probe_options.limited_isa ||
> -				 blogic_probe_options.probe334))
> -			blogic_add_probeaddr_isa(0x334);
> -		if (!addr_seen[2] &&
> -				(!blogic_probe_options.limited_isa ||
> -				 blogic_probe_options.probe230))
> -			blogic_add_probeaddr_isa(0x230);
> -		if (!addr_seen[3] &&
> -				(!blogic_probe_options.limited_isa ||
> -				 blogic_probe_options.probe234))
> -			blogic_add_probeaddr_isa(0x234);
> -		if (!addr_seen[4] &&
> -				(!blogic_probe_options.limited_isa ||
> -				 blogic_probe_options.probe130))
> -			blogic_add_probeaddr_isa(0x130);
> -		if (!addr_seen[5] &&
> -				(!blogic_probe_options.limited_isa ||
> -				 blogic_probe_options.probe134))
> -			blogic_add_probeaddr_isa(0x134);
> -	}
>  	/*
>  	   Iterate over the older non-compliant MultiMaster PCI Host Adapters,
>  	   noting the PCI bus location and assigned IRQ Channel.
> @@ -1078,18 +979,10 @@ static void __init blogic_init_probeinfo_list(struct blogic_adapter *adapter)
>  				}
>  			}
>  		}
> -	} else {
> -		blogic_init_probeinfo_isa(adapter);
>  	}
>  }
>  
>  
> -#else
> -#define blogic_init_probeinfo_list(adapter) \
> -		blogic_init_probeinfo_isa(adapter)
> -#endif				/* CONFIG_PCI */
> -
> -
>  /*
>    blogic_failure prints a standardized error message, and then returns false.
>  */
> @@ -1539,14 +1432,6 @@ static bool __init blogic_rdconfig(struct blogic_adapter *adapter)
>  		else if (config.irq_ch15)
>  			adapter->irq_ch = 15;
>  	}
> -	if (adapter->adapter_bus_type == BLOGIC_ISA_BUS) {
> -		if (config.dma_ch5)
> -			adapter->dma_ch = 5;
> -		else if (config.dma_ch6)
> -			adapter->dma_ch = 6;
> -		else if (config.dma_ch7)
> -			adapter->dma_ch = 7;
> -	}
>  	/*
>  	   Determine whether Extended Translation is enabled and save it in
>  	   the Host Adapter structure.
> @@ -1686,8 +1571,7 @@ static bool __init blogic_rdconfig(struct blogic_adapter *adapter)
>  	if (adapter->fw_ver[0] == '5')
>  		adapter->adapter_qdepth = 192;
>  	else if (adapter->fw_ver[0] == '4')
> -		adapter->adapter_qdepth = (adapter->adapter_bus_type !=
> -						BLOGIC_ISA_BUS ? 100 : 50);
> +		adapter->adapter_qdepth = 100;
>  	else
>  		adapter->adapter_qdepth = 30;
>  	if (strcmp(adapter->fw_ver, "3.31") >= 0) {
> @@ -1727,13 +1611,6 @@ static bool __init blogic_rdconfig(struct blogic_adapter *adapter)
>  	   bios_addr is 0.
>  	 */
>  	adapter->bios_addr = ext_setupinfo.bios_addr << 12;
> -	/*
> -	   ISA Host Adapters require Bounce Buffers if there is more than
> -	   16MB memory.
> -	 */
> -	if (adapter->adapter_bus_type == BLOGIC_ISA_BUS &&
> -			(void *) high_memory > (void *) MAX_DMA_ADDRESS)
> -		adapter->need_bouncebuf = true;
>  	/*
>  	   BusLogic BT-445S Host Adapters prior to board revision E have a
>  	   hardware bug whereby when the BIOS is enabled, transfers to/from
> @@ -1839,11 +1716,7 @@ static bool __init blogic_reportconfig(struct blogic_adapter *adapter)
>  	blogic_info("Configuring BusLogic Model %s %s%s%s%s SCSI Host Adapter\n", adapter, adapter->model, blogic_adapter_busnames[adapter->adapter_bus_type], (adapter->wide ? " Wide" : ""), (adapter->differential ? " Differential" : ""), (adapter->ultra ? " Ultra" : ""));
>  	blogic_info("  Firmware Version: %s, I/O Address: 0x%lX, IRQ Channel: %d/%s\n", adapter, adapter->fw_ver, adapter->io_addr, adapter->irq_ch, (adapter->level_int ? "Level" : "Edge"));
>  	if (adapter->adapter_bus_type != BLOGIC_PCI_BUS) {
> -		blogic_info("  DMA Channel: ", adapter);
> -		if (adapter->dma_ch > 0)
> -			blogic_info("%d, ", adapter, adapter->dma_ch);
> -		else
> -			blogic_info("None, ", adapter);
> +		blogic_info("  DMA Channel: None, ", adapter);
>  		if (adapter->bios_addr > 0)
>  			blogic_info("BIOS Address: 0x%lX, ", adapter,
>  					adapter->bios_addr);
> @@ -1995,18 +1868,6 @@ static bool __init blogic_getres(struct blogic_adapter *adapter)
>  		return false;
>  	}
>  	adapter->irq_acquired = true;
> -	/*
> -	   Acquire exclusive access to the DMA Channel.
> -	 */
> -	if (adapter->dma_ch > 0) {
> -		if (request_dma(adapter->dma_ch, adapter->full_model) < 0) {
> -			blogic_err("UNABLE TO ACQUIRE DMA CHANNEL %d - DETACHING\n", adapter, adapter->dma_ch);
> -			return false;
> -		}
> -		set_dma_mode(adapter->dma_ch, DMA_MODE_CASCADE);
> -		enable_dma(adapter->dma_ch);
> -		adapter->dma_chan_acquired = true;
> -	}
>  	/*
>  	   Indicate the System Resource Acquisition completed successfully,
>  	 */
> @@ -2026,11 +1887,6 @@ static void blogic_relres(struct blogic_adapter *adapter)
>  	 */
>  	if (adapter->irq_acquired)
>  		free_irq(adapter->irq_ch, adapter);
> -	/*
> -	   Release exclusive access to the DMA Channel.
> -	 */
> -	if (adapter->dma_chan_acquired)
> -		free_dma(adapter->dma_ch);
>  	/*
>  	   Release any allocated memory structs not released elsewhere
>  	 */
> @@ -3694,9 +3550,7 @@ static int __init blogic_parseopts(char *options)
>  					blogic_err("BusLogic: Invalid Driver Options (invalid I/O Address 0x%lX)\n", NULL, io_addr);
>  					return 0;
>  				}
> -			} else if (blogic_parse(&options, "NoProbeISA"))
> -				blogic_probe_options.noprobe_isa = true;
> -			else if (blogic_parse(&options, "NoProbePCI"))
> +			} else if (blogic_parse(&options, "NoProbePCI"))
>  				blogic_probe_options.noprobe_pci = true;
>  			else if (blogic_parse(&options, "NoProbe"))
>  				blogic_probe_options.noprobe = true;
> diff --git a/drivers/scsi/BusLogic.h b/drivers/scsi/BusLogic.h
> index 6182cc8a0344a8..6eaddc009b5c55 100644
> --- a/drivers/scsi/BusLogic.h
> +++ b/drivers/scsi/BusLogic.h
> @@ -237,7 +237,6 @@ struct blogic_probeinfo {
>  
>  struct blogic_probe_options {
>  	bool noprobe:1;			/* Bit 0 */
> -	bool noprobe_isa:1;		/* Bit 1 */
>  	bool noprobe_pci:1;		/* Bit 2 */
>  	bool nosort_pci:1;		/* Bit 3 */
>  	bool multimaster_first:1;	/* Bit 4 */
> @@ -997,10 +996,8 @@ struct blogic_adapter {
>  	unsigned char bus;
>  	unsigned char dev;
>  	unsigned char irq_ch;
> -	unsigned char dma_ch;
>  	unsigned char scsi_id;
>  	bool irq_acquired:1;
> -	bool dma_chan_acquired:1;
>  	bool ext_trans_enable:1;
>  	bool parity:1;
>  	bool reset_enabled:1;
> diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
> index 06b87c7f6babd3..3d114be5b662df 100644
> --- a/drivers/scsi/Kconfig
> +++ b/drivers/scsi/Kconfig
> @@ -497,7 +497,7 @@ config SCSI_HPTIOP
>  
>  config SCSI_BUSLOGIC
>  	tristate "BusLogic SCSI support"
> -	depends on (PCI || ISA) && SCSI && ISA_DMA_API && VIRT_TO_BUS
> +	depends on PCI && SCSI && VIRT_TO_BUS
>  	help
>  	  This is support for BusLogic MultiMaster and FlashPoint SCSI Host
>  	  Adapters. Consult the SCSI-HOWTO, available from
> 


^ permalink raw reply	[flat|nested] 33+ messages in thread

* Re: [PATCH 3/8] BusLogic: reject broken old firmware that requires ISA-style bounce buffering
  2021-03-26  5:58 ` [PATCH 3/8] BusLogic: reject broken old firmware that requires ISA-style bounce buffering Christoph Hellwig
  2021-03-29  6:23   ` Hannes Reinecke
@ 2021-03-29 20:33   ` Khalid Aziz
  1 sibling, 0 replies; 33+ messages in thread
From: Khalid Aziz @ 2021-03-29 20:33 UTC (permalink / raw)
  To: Christoph Hellwig, Jens Axboe, Martin K. Petersen,
	Matthew Wilcox, Hannes Reinecke, Ondrej Zary
  Cc: linux-block, linux-scsi

On 3/25/21 11:58 PM, Christoph Hellwig wrote:
> Warn on and don't support adapters that have a DMA bug that forces ISA-style
> bounce buffering.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>  drivers/scsi/BusLogic.c | 21 ++++++---------------
>  drivers/scsi/BusLogic.h |  1 -
>  2 files changed, 6 insertions(+), 16 deletions(-)
> 
> diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
> index c3ed03c4b3f5cb..c8977e4bdba8c2 100644
> --- a/drivers/scsi/BusLogic.c
> +++ b/drivers/scsi/BusLogic.c
> @@ -1616,14 +1616,12 @@ static bool __init blogic_rdconfig(struct blogic_adapter *adapter)
>  	   hardware bug whereby when the BIOS is enabled, transfers to/from
>  	   the same address range the BIOS occupies modulo 16MB are handled
>  	   incorrectly.  Only properly functioning BT-445S Host Adapters
> -	   have firmware version 3.37, so require that ISA Bounce Buffers
> -	   be used for the buggy BT-445S models if there is more than 16MB
> -	   memory.
> +	   have firmware version 3.37.
>  	 */
> -	if (adapter->bios_addr > 0 && strcmp(adapter->model, "BT-445S") == 0 &&
> -			strcmp(adapter->fw_ver, "3.37") < 0 &&
> -			(void *) high_memory > (void *) MAX_DMA_ADDRESS)
> -		adapter->need_bouncebuf = true;
> +	if (adapter->bios_addr > 0 &&
> +	    strcmp(adapter->model, "BT-445S") == 0 &&
> +	    strcmp(adapter->fw_ver, "3.37") < 0)
> +		return blogic_failure(adapter, "Too old firmware");
>  	/*
>  	   Initialize parameters common to MultiMaster and FlashPoint
>  	   Host Adapters.
> @@ -1646,14 +1644,9 @@ static bool __init blogic_rdconfig(struct blogic_adapter *adapter)
>  		if (adapter->drvr_opts != NULL &&
>  				adapter->drvr_opts->qdepth[tgt_id] > 0)
>  			qdepth = adapter->drvr_opts->qdepth[tgt_id];
> -		else if (adapter->need_bouncebuf)
> -			qdepth = BLOGIC_TAG_DEPTH_BB;
>  		adapter->qdepth[tgt_id] = qdepth;
>  	}
> -	if (adapter->need_bouncebuf)
> -		adapter->untag_qdepth = BLOGIC_UNTAG_DEPTH_BB;
> -	else
> -		adapter->untag_qdepth = BLOGIC_UNTAG_DEPTH;
> +	adapter->untag_qdepth = BLOGIC_UNTAG_DEPTH;
>  	if (adapter->drvr_opts != NULL)
>  		adapter->common_qdepth = adapter->drvr_opts->common_qdepth;
>  	if (adapter->common_qdepth > 0 &&
> @@ -2155,7 +2148,6 @@ static void __init blogic_inithoststruct(struct blogic_adapter *adapter,
>  	host->this_id = adapter->scsi_id;
>  	host->can_queue = adapter->drvr_qdepth;
>  	host->sg_tablesize = adapter->drvr_sglimit;
> -	host->unchecked_isa_dma = adapter->need_bouncebuf;
>  	host->cmd_per_lun = adapter->untag_qdepth;
>  }
>  
> @@ -3705,7 +3697,6 @@ static struct scsi_host_template blogic_template = {
>  #if 0
>  	.eh_abort_handler = blogic_abort,
>  #endif
> -	.unchecked_isa_dma = 1,
>  	.max_sectors = 128,
>  };
>  
> diff --git a/drivers/scsi/BusLogic.h b/drivers/scsi/BusLogic.h
> index 6eaddc009b5c55..858187af8fd1e8 100644
> --- a/drivers/scsi/BusLogic.h
> +++ b/drivers/scsi/BusLogic.h
> @@ -1010,7 +1010,6 @@ struct blogic_adapter {
>  	bool terminfo_valid:1;
>  	bool low_term:1;
>  	bool high_term:1;
> -	bool need_bouncebuf:1;
>  	bool strict_rr:1;
>  	bool scam_enabled:1;
>  	bool scam_lev2:1;
> 
Acked-by: Khalid Aziz <khalid@gonehiking.org>

^ permalink raw reply	[flat|nested] 33+ messages in thread

* Re: start removing block bounce buffering support v2
  2021-03-26 23:15 ` start removing block bounce buffering support v2 Jens Axboe
@ 2021-03-30  3:08   ` Martin K. Petersen
  0 siblings, 0 replies; 33+ messages in thread
From: Martin K. Petersen @ 2021-03-30  3:08 UTC (permalink / raw)
  To: Jens Axboe
  Cc: Christoph Hellwig, Khalid Aziz, Martin K. Petersen,
	Matthew Wilcox, Hannes Reinecke, Ondrej Zary, linux-block,
	linux-scsi


Jens,

> The core parts look good to me. If we can get the SCSI side to sign off
> on those changes, I can take it for 5.13.

No objections from me assuming the ISA vestiges pointed out are removed.

Acked-by: Martin K. Petersen <martin.petersen@oracle.com>

-- 
Martin K. Petersen	Oracle Linux Engineering

^ permalink raw reply	[flat|nested] 33+ messages in thread

* Re: [PATCH 2/8] Buslogic: remove ISA support
  2021-03-29 20:29   ` Khalid Aziz
@ 2021-03-30 17:03     ` Christoph Hellwig
  2021-03-30 17:15       ` Khalid Aziz
  0 siblings, 1 reply; 33+ messages in thread
From: Christoph Hellwig @ 2021-03-30 17:03 UTC (permalink / raw)
  To: Khalid Aziz
  Cc: Christoph Hellwig, Jens Axboe, Martin K. Petersen,
	Matthew Wilcox, Hannes Reinecke, Ondrej Zary, linux-block,
	linux-scsi

On Mon, Mar 29, 2021 at 02:29:21PM -0600, Khalid Aziz wrote:
> On 3/25/21 11:58 PM, Christoph Hellwig wrote:
> > The ISA support in Buslogic has been broken for a long time, as all
> > the I/O path expects a struct device for DMA mapping that is derived from
> > the PCI device, which would simply crash for ISA adapters.
> > 
> > Signed-off-by: Christoph Hellwig <hch@lst.de>
> > ---
> >  drivers/scsi/BusLogic.c | 156 ++--------------------------------------
> >  drivers/scsi/BusLogic.h |   3 -
> >  drivers/scsi/Kconfig    |   2 +-
> >  3 files changed, 6 insertions(+), 155 deletions(-)
> > 
> 
> Hi Chris,
> 
> This looks good. There is more code that can be removed, for instance
> all of the code that supports "IO:" driver option to specify ISA port
> addresses. enum blogic_adapter_bus_type can shrink. "limited_isa" and
> "probe*" members of struct blogic_probe_options can go away. You could
> add those to this patch, or if you would like, I can create a follow-on
> patch to remove that code.

I've added the above suggestions.  If there is anything more you
can easily think of let me know.

^ permalink raw reply	[flat|nested] 33+ messages in thread

* Re: [PATCH 2/8] Buslogic: remove ISA support
  2021-03-30 17:03     ` Christoph Hellwig
@ 2021-03-30 17:15       ` Khalid Aziz
  2021-03-30 17:31         ` Christoph Hellwig
  0 siblings, 1 reply; 33+ messages in thread
From: Khalid Aziz @ 2021-03-30 17:15 UTC (permalink / raw)
  To: Christoph Hellwig
  Cc: Jens Axboe, Martin K. Petersen, Matthew Wilcox, Hannes Reinecke,
	Ondrej Zary, linux-block, linux-scsi

On 3/30/21 11:03 AM, Christoph Hellwig wrote:
> On Mon, Mar 29, 2021 at 02:29:21PM -0600, Khalid Aziz wrote:
>> On 3/25/21 11:58 PM, Christoph Hellwig wrote:
>>> The ISA support in Buslogic has been broken for a long time, as all
>>> the I/O path expects a struct device for DMA mapping that is derived from
>>> the PCI device, which would simply crash for ISA adapters.
>>>
>>> Signed-off-by: Christoph Hellwig <hch@lst.de>
>>> ---
>>>  drivers/scsi/BusLogic.c | 156 ++--------------------------------------
>>>  drivers/scsi/BusLogic.h |   3 -
>>>  drivers/scsi/Kconfig    |   2 +-
>>>  3 files changed, 6 insertions(+), 155 deletions(-)
>>>
>>
>> Hi Chris,
>>
>> This looks good. There is more code that can be removed, for instance
>> all of the code that supports "IO:" driver option to specify ISA port
>> addresses. enum blogic_adapter_bus_type can shrink. "limited_isa" and
>> "probe*" members of struct blogic_probe_options can go away. You could
>> add those to this patch, or if you would like, I can create a follow-on
>> patch to remove that code.
> 
> I've added the above suggestions.  If there is anything more you
> can easily think of let me know.
> 

Awesome! Thanks. Updates to Documentation/scsi/BusLogic.rst to match
these changes would be great. Doc currently lists "IO:" and "NoProbeISA"
which can go away. "Supported Host Adapters: section lists ISA and EISA
adapters that can go away as well. There is reference to ISA in
"QueueDepth:<integer>" - "For Host Adapters that require ISA Bounce
Buffers, the Queue Depth is automatically set by default to
BusLogic_TaggedQueueDepthBB or BusLogic_UntaggedQueueDepthBB to avoid
excessive preallocation of DMA Bounce Buffer memory." which is
irrelevant now.

Thanks,
Khalid

^ permalink raw reply	[flat|nested] 33+ messages in thread

* Re: [PATCH 4/8] advansys: remove ISA support
  2021-03-29  6:31   ` Hannes Reinecke
@ 2021-03-30 17:26     ` Christoph Hellwig
  0 siblings, 0 replies; 33+ messages in thread
From: Christoph Hellwig @ 2021-03-30 17:26 UTC (permalink / raw)
  To: Hannes Reinecke
  Cc: Christoph Hellwig, Jens Axboe, Khalid Aziz, Martin K. Petersen,
	Matthew Wilcox, Hannes Reinecke, Ondrej Zary, linux-block,
	linux-scsi

On Mon, Mar 29, 2021 at 08:31:21AM +0200, Hannes Reinecke wrote:
> >  #define ASC_IS_PCI          (0x0004)
> >  #define ASC_IS_PCI_ULTRA    (0x0104)
> 
> Any particular reason why the remaining ISA defines (like
> ASC_CHIP_MIN_VER_ISA etc) are being left intact?

I can do that.

> Please remove the 'isa_dma_channel' field from struct asc_dvc_cfg, too.

Sure.  This needs a little tweak as it seems to get written back to
the eeprom, though.

^ permalink raw reply	[flat|nested] 33+ messages in thread

* Re: [PATCH 2/8] Buslogic: remove ISA support
  2021-03-30 17:15       ` Khalid Aziz
@ 2021-03-30 17:31         ` Christoph Hellwig
  0 siblings, 0 replies; 33+ messages in thread
From: Christoph Hellwig @ 2021-03-30 17:31 UTC (permalink / raw)
  To: Khalid Aziz
  Cc: Christoph Hellwig, Jens Axboe, Martin K. Petersen,
	Matthew Wilcox, Hannes Reinecke, Ondrej Zary, linux-block,
	linux-scsi

On Tue, Mar 30, 2021 at 11:15:22AM -0600, Khalid Aziz wrote:
> Awesome! Thanks. Updates to Documentation/scsi/BusLogic.rst to match
> these changes would be great. Doc currently lists "IO:" and "NoProbeISA"
> which can go away. "Supported Host Adapters: section lists ISA and EISA
> adapters that can go away as well. There is reference to ISA in
> "QueueDepth:<integer>" - "For Host Adapters that require ISA Bounce
> Buffers, the Queue Depth is automatically set by default to
> BusLogic_TaggedQueueDepthBB or BusLogic_UntaggedQueueDepthBB to avoid
> excessive preallocation of DMA Bounce Buffer memory." which is
> irrelevant now.

Thanks, I've added these changes as well.

^ permalink raw reply	[flat|nested] 33+ messages in thread

* [PATCH 7/8] block: refactor the bounce buffering code
  2021-03-31  7:29 start removing block bounce buffering support v3 Christoph Hellwig
@ 2021-03-31  7:30 ` Christoph Hellwig
  0 siblings, 0 replies; 33+ messages in thread
From: Christoph Hellwig @ 2021-03-31  7:30 UTC (permalink / raw)
  To: Jens Axboe, Khalid Aziz, Martin K. Petersen, Matthew Wilcox,
	Hannes Reinecke, Ondrej Zary
  Cc: linux-block, linux-scsi, Hannes Reinecke

Get rid of all the PFN arithmetics and just use an enum for the two
remaining options, and use PageHighMem for the actual bounce decision.

Add a fast path to entirely avoid the call for the common case of a queue
not using the legacy bouncing code.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: Martin K. Petersen <martin.petersen@oracle.com>
Reviewed-by: Hannes Reinecke <hare@suse.de>
---
 block/blk-core.c       |  6 ++----
 block/blk-settings.c   | 42 ++++++++----------------------------------
 block/blk.h            | 16 ++++++++++++----
 block/bounce.c         | 35 +++++------------------------------
 include/linux/blkdev.h | 29 +++++++++++------------------
 5 files changed, 38 insertions(+), 90 deletions(-)

diff --git a/block/blk-core.c b/block/blk-core.c
index fc60ff20849738..9bcdae93f6d4f7 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1161,10 +1161,8 @@ static blk_status_t blk_cloned_rq_check_limits(struct request_queue *q,
 	}
 
 	/*
-	 * queue's settings related to segment counting like q->bounce_pfn
-	 * may differ from that of other stacking queues.
-	 * Recalculate it to check the request correctly on this queue's
-	 * limitation.
+	 * The queue settings related to segment counting may differ from the
+	 * original queue.
 	 */
 	rq->nr_phys_segments = blk_recalc_rq_segments(rq);
 	if (rq->nr_phys_segments > queue_max_segments(q)) {
diff --git a/block/blk-settings.c b/block/blk-settings.c
index f9937dd2810e25..9c009090c4b5bf 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -7,7 +7,6 @@
 #include <linux/init.h>
 #include <linux/bio.h>
 #include <linux/blkdev.h>
-#include <linux/memblock.h>	/* for max_pfn/max_low_pfn */
 #include <linux/gcd.h>
 #include <linux/lcm.h>
 #include <linux/jiffies.h>
@@ -17,11 +16,6 @@
 #include "blk.h"
 #include "blk-wbt.h"
 
-unsigned long blk_max_low_pfn;
-EXPORT_SYMBOL(blk_max_low_pfn);
-
-unsigned long blk_max_pfn;
-
 void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
 {
 	q->rq_timeout = timeout;
@@ -55,7 +49,7 @@ void blk_set_default_limits(struct queue_limits *lim)
 	lim->discard_alignment = 0;
 	lim->discard_misaligned = 0;
 	lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
-	lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT);
+	lim->bounce = BLK_BOUNCE_NONE;
 	lim->alignment_offset = 0;
 	lim->io_opt = 0;
 	lim->misaligned = 0;
@@ -92,28 +86,16 @@ EXPORT_SYMBOL(blk_set_stacking_limits);
 /**
  * blk_queue_bounce_limit - set bounce buffer limit for queue
  * @q: the request queue for the device
- * @max_addr: the maximum address the device can handle
+ * @bounce: bounce limit to enforce
  *
  * Description:
- *    Different hardware can have different requirements as to what pages
- *    it can do I/O directly to. A low level driver can call
- *    blk_queue_bounce_limit to have lower memory pages allocated as bounce
- *    buffers for doing I/O to pages residing above @max_addr.
+ *    Force bouncing for ISA DMA ranges or highmem.
+ *
+ *    DEPRECATED, don't use in new code.
  **/
-void blk_queue_bounce_limit(struct request_queue *q, u64 max_addr)
+void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce bounce)
 {
-	unsigned long b_pfn = max_addr >> PAGE_SHIFT;
-
-#if BITS_PER_LONG == 64
-	/*
-	 * Assume anything <= 4GB can be handled by IOMMU.  Actually
-	 * some IOMMUs can handle everything, but I don't know of a
-	 * way to test this here.
-	 */
-	q->limits.bounce_pfn = max(max_low_pfn, b_pfn);
-#else
-	q->limits.bounce_pfn = b_pfn;
-#endif
+	q->limits.bounce = bounce;
 }
 EXPORT_SYMBOL(blk_queue_bounce_limit);
 
@@ -536,7 +518,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
 					b->max_write_zeroes_sectors);
 	t->max_zone_append_sectors = min(t->max_zone_append_sectors,
 					b->max_zone_append_sectors);
-	t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
+	t->bounce = max(t->bounce, b->bounce);
 
 	t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
 					    b->seg_boundary_mask);
@@ -916,11 +898,3 @@ void blk_queue_set_zoned(struct gendisk *disk, enum blk_zoned_model model)
 	}
 }
 EXPORT_SYMBOL_GPL(blk_queue_set_zoned);
-
-static int __init blk_settings_init(void)
-{
-	blk_max_low_pfn = max_low_pfn - 1;
-	blk_max_pfn = max_pfn - 1;
-	return 0;
-}
-subsys_initcall(blk_settings_init);
diff --git a/block/blk.h b/block/blk.h
index 895c9f4a5182a7..8f4337c5a9e66c 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -6,6 +6,7 @@
 #include <linux/blk-mq.h>
 #include <linux/part_stat.h>
 #include <linux/blk-crypto.h>
+#include <linux/memblock.h>	/* for max_pfn/max_low_pfn */
 #include <xen/xen.h>
 #include "blk-crypto-internal.h"
 #include "blk-mq.h"
@@ -311,13 +312,20 @@ static inline void blk_throtl_bio_endio(struct bio *bio) { }
 static inline void blk_throtl_stat_add(struct request *rq, u64 time) { }
 #endif
 
-#ifdef CONFIG_BOUNCE
-extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
-#else
+void __blk_queue_bounce(struct request_queue *q, struct bio **bio);
+
+static inline bool blk_queue_may_bounce(struct request_queue *q)
+{
+	return IS_ENABLED(CONFIG_BOUNCE) &&
+		q->limits.bounce == BLK_BOUNCE_HIGH &&
+		max_low_pfn >= max_pfn;
+}
+
 static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
 {
+	if (unlikely(blk_queue_may_bounce(q) && bio_has_data(*bio)))
+		__blk_queue_bounce(q, bio);	
 }
-#endif /* CONFIG_BOUNCE */
 
 #ifdef CONFIG_BLK_CGROUP_IOLATENCY
 extern int blk_iolatency_init(struct request_queue *q);
diff --git a/block/bounce.c b/block/bounce.c
index debd5b0bd31890..6bafc0d1f867a1 100644
--- a/block/bounce.c
+++ b/block/bounce.c
@@ -18,7 +18,6 @@
 #include <linux/init.h>
 #include <linux/hash.h>
 #include <linux/highmem.h>
-#include <linux/memblock.h>
 #include <linux/printk.h>
 #include <asm/tlbflush.h>
 
@@ -49,11 +48,11 @@ static void init_bounce_bioset(void)
 	bounce_bs_setup = true;
 }
 
-#if defined(CONFIG_HIGHMEM)
 static __init int init_emergency_pool(void)
 {
 	int ret;
-#if defined(CONFIG_HIGHMEM) && !defined(CONFIG_MEMORY_HOTPLUG)
+
+#ifndef CONFIG_MEMORY_HOTPLUG
 	if (max_pfn <= max_low_pfn)
 		return 0;
 #endif
@@ -67,9 +66,7 @@ static __init int init_emergency_pool(void)
 }
 
 __initcall(init_emergency_pool);
-#endif
 
-#ifdef CONFIG_HIGHMEM
 /*
  * highmem version, map in to vec
  */
@@ -82,13 +79,6 @@ static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom)
 	kunmap_atomic(vto);
 }
 
-#else /* CONFIG_HIGHMEM */
-
-#define bounce_copy_vec(to, vfrom)	\
-	memcpy(page_address((to)->bv_page) + (to)->bv_offset, vfrom, (to)->bv_len)
-
-#endif /* CONFIG_HIGHMEM */
-
 /*
  * Simple bounce buffer support for highmem pages. Depending on the
  * queue gfp mask set, *to may or may not be a highmem page. kmap it
@@ -236,8 +226,7 @@ static struct bio *bounce_clone_bio(struct bio *bio_src)
 	return NULL;
 }
 
-
-void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
+void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
 {
 	struct bio *bio;
 	int rw = bio_data_dir(*bio_orig);
@@ -247,24 +236,10 @@ void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
 	bool bounce = false;
 	int sectors = 0;
 
-	/*
-	 * Data-less bio, nothing to bounce
-	 */
-	if (!bio_has_data(*bio_orig))
-		return;
-
-	/*
-	 * Just check if the bounce pfn is equal to or bigger than the highest
-	 * pfn in the system -- in that case, don't waste time iterating over
-	 * bio segments
-	 */
-	if (q->limits.bounce_pfn >= blk_max_pfn)
-		return;
-
 	bio_for_each_segment(from, *bio_orig, iter) {
 		if (i++ < BIO_MAX_VECS)
 			sectors += from.bv_len >> 9;
-		if (page_to_pfn(from.bv_page) > q->limits.bounce_pfn)
+		if (PageHighMem(from.bv_page))
 			bounce = true;
 	}
 	if (!bounce)
@@ -287,7 +262,7 @@ void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
 	for (i = 0, to = bio->bi_io_vec; i < bio->bi_vcnt; to++, i++) {
 		struct page *page = to->bv_page;
 
-		if (page_to_pfn(page) <= q->limits.bounce_pfn)
+		if (!PageHighMem(page))
 			continue;
 
 		to->bv_page = mempool_alloc(&page_pool, GFP_NOIO);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 0dbb72ea373529..55cc8b96c84427 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -313,8 +313,17 @@ enum blk_zoned_model {
 	BLK_ZONED_HM,		/* Host-managed zoned block device */
 };
 
+/*
+ * BLK_BOUNCE_NONE:	never bounce (default)
+ * BLK_BOUNCE_HIGH:	bounce all highmem pages
+ */
+enum blk_bounce {
+	BLK_BOUNCE_NONE,
+	BLK_BOUNCE_HIGH,
+};
+
 struct queue_limits {
-	unsigned long		bounce_pfn;
+	enum blk_bounce		bounce;
 	unsigned long		seg_boundary_mask;
 	unsigned long		virt_boundary_mask;
 
@@ -835,22 +844,6 @@ static inline unsigned int blk_queue_depth(struct request_queue *q)
 	return q->nr_requests;
 }
 
-extern unsigned long blk_max_low_pfn, blk_max_pfn;
-
-/*
- * standard bounce addresses:
- *
- * BLK_BOUNCE_HIGH	: bounce all highmem pages
- * BLK_BOUNCE_ANY	: don't bounce anything
- */
-
-#if BITS_PER_LONG == 32
-#define BLK_BOUNCE_HIGH		((u64)blk_max_low_pfn << PAGE_SHIFT)
-#else
-#define BLK_BOUNCE_HIGH		-1ULL
-#endif
-#define BLK_BOUNCE_ANY		(-1ULL)
-
 /*
  * default timeout for SG_IO if none specified
  */
@@ -1134,7 +1127,7 @@ extern void blk_abort_request(struct request *);
  * Access functions for manipulating queue properties
  */
 extern void blk_cleanup_queue(struct request_queue *);
-extern void blk_queue_bounce_limit(struct request_queue *, u64);
+void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce limit);
 extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
 extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
 extern void blk_queue_max_segments(struct request_queue *, unsigned short);
-- 
2.30.1


^ permalink raw reply related	[flat|nested] 33+ messages in thread

* Re: [PATCH 7/8] block: refactor the bounce buffering code
  2021-03-24 18:07           ` Benjamin Block
@ 2021-03-24 18:13             ` Christoph Hellwig
  0 siblings, 0 replies; 33+ messages in thread
From: Christoph Hellwig @ 2021-03-24 18:13 UTC (permalink / raw)
  To: Benjamin Block
  Cc: Christoph Hellwig, Matthew Wilcox, Jens Axboe, Khalid Aziz,
	Martin K. Petersen, Hannes Reinecke, Ondrej Zary, linux-block,
	linux-scsi

On Wed, Mar 24, 2021 at 07:07:40PM +0100, Benjamin Block wrote:
> But map_request() -> multipath_clone_and_map() -> dm_dispatch_clone_request() 
> doesn't call blk_mq_submit_bio() for requests that have been queued in a
> request based mpath device. The requests gets cloned and then dispatched
> on the lower queue. Or am I missing something?

Indeed.  I keep forgetting about the fact that dm-mpath bypassed
the normal submit_bio_noacct path.

^ permalink raw reply	[flat|nested] 33+ messages in thread

* Re: [PATCH 7/8] block: refactor the bounce buffering code
  2021-03-24 17:44         ` Christoph Hellwig
@ 2021-03-24 18:07           ` Benjamin Block
  2021-03-24 18:13             ` Christoph Hellwig
  0 siblings, 1 reply; 33+ messages in thread
From: Benjamin Block @ 2021-03-24 18:07 UTC (permalink / raw)
  To: Christoph Hellwig
  Cc: Matthew Wilcox, Jens Axboe, Khalid Aziz, Martin K. Petersen,
	Hannes Reinecke, Ondrej Zary, linux-block, linux-scsi

On Wed, Mar 24, 2021 at 06:44:58PM +0100, Christoph Hellwig wrote:
> On Wed, Mar 24, 2021 at 06:40:32PM +0100, Benjamin Block wrote:
> > Is blk_queue_bounce() called again when mpath submits the request to the
> > lower device? I thought when I looked at this code some time ago
> > bouncing would only be checked the first time a request is created
> > (dm-mpath), and then not again, so when we don't check for whether
> > bouncing is necessary in mpath, we still might screw the LLD - hence why
> > we might inherit this via the limits.
> 
> Every call to blk_mq_submit_bio also calls blk_queue_bounce, 

But map_request() -> multipath_clone_and_map() -> dm_dispatch_clone_request() 
doesn't call blk_mq_submit_bio() for requests that have been queued in a
request based mpath device. The requests gets cloned and then dispatched
on the lower queue. Or am I missing something?

> and
> blk_queue_bounce then checks if it needs to do anything based on the
> bounce limit and max_pfn, and if needed proceeds to check every bvec.
> 
> So for extremely unlikely case thay someone is running multipath over one
> of the few remaining drivers that need block layering bounce buffering
> this inheritance just leads to (harmless) extra work.

Yeah fair enough, I don't know whether anyone would care for those old
drivers; it just crossed my mind.

-- 
Best Regards, Benjamin Block  / Linux on IBM Z Kernel Development / IBM Systems
IBM Deutschland Research & Development GmbH    /    https://www.ibm.com/privacy
Vorsitz. AufsR.: Gregor Pillen         /        Geschäftsführung: Dirk Wittkopp
Sitz der Gesellschaft: Böblingen / Registergericht: AmtsG Stuttgart, HRB 243294

^ permalink raw reply	[flat|nested] 33+ messages in thread

* Re: [PATCH 7/8] block: refactor the bounce buffering code
  2021-03-24 17:40       ` Benjamin Block
@ 2021-03-24 17:44         ` Christoph Hellwig
  2021-03-24 18:07           ` Benjamin Block
  0 siblings, 1 reply; 33+ messages in thread
From: Christoph Hellwig @ 2021-03-24 17:44 UTC (permalink / raw)
  To: Benjamin Block
  Cc: Christoph Hellwig, Matthew Wilcox, Jens Axboe, Khalid Aziz,
	Martin K. Petersen, Hannes Reinecke, Ondrej Zary, linux-block,
	linux-scsi

On Wed, Mar 24, 2021 at 06:40:32PM +0100, Benjamin Block wrote:
> Is blk_queue_bounce() called again when mpath submits the request to the
> lower device? I thought when I looked at this code some time ago
> bouncing would only be checked the first time a request is created
> (dm-mpath), and then not again, so when we don't check for whether
> bouncing is necessary in mpath, we still might screw the LLD - hence why
> we might inherit this via the limits.

Every call to blk_mq_submit_bio also calls blk_queue_bounce, and
blk_queue_bounce then checks if it needs to do anything based on the
bounce limit and max_pfn, and if needed proceeds to check every bvec.

So for extremely unlikely case thay someone is running multipath over one
of the few remaining drivers that need block layering bounce buffering
this inheritance just leads to (harmless) extra work.

^ permalink raw reply	[flat|nested] 33+ messages in thread

* Re: [PATCH 7/8] block: refactor the bounce buffering code
  2021-03-18 12:53     ` Christoph Hellwig
@ 2021-03-24 17:40       ` Benjamin Block
  2021-03-24 17:44         ` Christoph Hellwig
  0 siblings, 1 reply; 33+ messages in thread
From: Benjamin Block @ 2021-03-24 17:40 UTC (permalink / raw)
  To: Christoph Hellwig
  Cc: Matthew Wilcox, Jens Axboe, Khalid Aziz, Martin K. Petersen,
	Hannes Reinecke, Ondrej Zary, linux-block, linux-scsi

On Thu, Mar 18, 2021 at 01:53:40PM +0100, Christoph Hellwig wrote:
> On Thu, Mar 18, 2021 at 11:29:50AM +0000, Matthew Wilcox wrote:
> > On Thu, Mar 18, 2021 at 07:39:22AM +0100, Christoph Hellwig wrote:
> > > @@ -536,7 +518,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
> > >  					b->max_write_zeroes_sectors);
> > >  	t->max_zone_append_sectors = min(t->max_zone_append_sectors,
> > >  					b->max_zone_append_sectors);
> > > -	t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
> > > +	t->bounce = min_not_zero(t->bounce, b->bounce);
> > 
> > I see how min_not_zero() made sense when it was a pfn.  Does it still
> > make sense now it's an enum?  I would have thought it'd now be 'max()',
> > given the definitions later on.
> 
> Actually, blk_stack_limits should not look at ->bounce_pfn / ->bounce
> at all.  blk_queue_bounce is only called my blk_mq_submit_bio, and
> the only stacked blk-mq driver (dm-mpath) does not need bouncing.
> 
> I'll add a patch to fix this up to the front of the series.

Is blk_queue_bounce() called again when mpath submits the request to the
lower device? I thought when I looked at this code some time ago
bouncing would only be checked the first time a request is created
(dm-mpath), and then not again, so when we don't check for whether
bouncing is necessary in mpath, we still might screw the LLD - hence why
we might inherit this via the limits.

-- 
Best Regards, Benjamin Block  / Linux on IBM Z Kernel Development / IBM Systems
IBM Deutschland Research & Development GmbH    /    https://www.ibm.com/privacy
Vorsitz. AufsR.: Gregor Pillen         /        Geschäftsführung: Dirk Wittkopp
Sitz der Gesellschaft: Böblingen / Registergericht: AmtsG Stuttgart, HRB 243294

^ permalink raw reply	[flat|nested] 33+ messages in thread

* Re: [PATCH 7/8] block: refactor the bounce buffering code
  2021-03-18 11:29   ` Matthew Wilcox
@ 2021-03-18 12:53     ` Christoph Hellwig
  2021-03-24 17:40       ` Benjamin Block
  0 siblings, 1 reply; 33+ messages in thread
From: Christoph Hellwig @ 2021-03-18 12:53 UTC (permalink / raw)
  To: Matthew Wilcox
  Cc: Christoph Hellwig, Jens Axboe, Khalid Aziz, Martin K. Petersen,
	Hannes Reinecke, Ondrej Zary, linux-block, linux-scsi

On Thu, Mar 18, 2021 at 11:29:50AM +0000, Matthew Wilcox wrote:
> On Thu, Mar 18, 2021 at 07:39:22AM +0100, Christoph Hellwig wrote:
> > @@ -536,7 +518,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
> >  					b->max_write_zeroes_sectors);
> >  	t->max_zone_append_sectors = min(t->max_zone_append_sectors,
> >  					b->max_zone_append_sectors);
> > -	t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
> > +	t->bounce = min_not_zero(t->bounce, b->bounce);
> 
> I see how min_not_zero() made sense when it was a pfn.  Does it still
> make sense now it's an enum?  I would have thought it'd now be 'max()',
> given the definitions later on.

Actually, blk_stack_limits should not look at ->bounce_pfn / ->bounce
at all.  blk_queue_bounce is only called my blk_mq_submit_bio, and
the only stacked blk-mq driver (dm-mpath) does not need bouncing.

I'll add a patch to fix this up to the front of the series.

^ permalink raw reply	[flat|nested] 33+ messages in thread

* Re: [PATCH 7/8] block: refactor the bounce buffering code
  2021-03-18  6:39 ` [PATCH 7/8] block: refactor the bounce buffering code Christoph Hellwig
@ 2021-03-18 11:29   ` Matthew Wilcox
  2021-03-18 12:53     ` Christoph Hellwig
  0 siblings, 1 reply; 33+ messages in thread
From: Matthew Wilcox @ 2021-03-18 11:29 UTC (permalink / raw)
  To: Christoph Hellwig
  Cc: Jens Axboe, Khalid Aziz, Martin K. Petersen, Hannes Reinecke,
	Ondrej Zary, linux-block, linux-scsi

On Thu, Mar 18, 2021 at 07:39:22AM +0100, Christoph Hellwig wrote:
> @@ -536,7 +518,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
>  					b->max_write_zeroes_sectors);
>  	t->max_zone_append_sectors = min(t->max_zone_append_sectors,
>  					b->max_zone_append_sectors);
> -	t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
> +	t->bounce = min_not_zero(t->bounce, b->bounce);

I see how min_not_zero() made sense when it was a pfn.  Does it still
make sense now it's an enum?  I would have thought it'd now be 'max()',
given the definitions later on.

> +/*
> + * BLK_BOUNCE_NONE:	never bounce (default)
> + * BLK_BOUNCE_HIGH:	bounce all highmem pages
> + */
> +enum blk_bounce {
> +	BLK_BOUNCE_NONE,
> +	BLK_BOUNCE_HIGH,
> +};
> +
>  struct queue_limits {
> -	unsigned long		bounce_pfn;
> +	enum blk_bounce		bounce;
>  	unsigned long		seg_boundary_mask;
>  	unsigned long		virt_boundary_mask;
>  

^ permalink raw reply	[flat|nested] 33+ messages in thread

* [PATCH 7/8] block: refactor the bounce buffering code
  2021-03-18  6:39 start removing block bounce buffering support Christoph Hellwig
@ 2021-03-18  6:39 ` Christoph Hellwig
  2021-03-18 11:29   ` Matthew Wilcox
  0 siblings, 1 reply; 33+ messages in thread
From: Christoph Hellwig @ 2021-03-18  6:39 UTC (permalink / raw)
  To: Jens Axboe, Khalid Aziz, Martin K. Petersen, Matthew Wilcox,
	Hannes Reinecke, Ondrej Zary
  Cc: linux-block, linux-scsi

Get rid of all the PFN arithmetics and just use an enum for the two
remaining options, and use PageHighMem for the actual bounce decision.

Add a fast path to entirely avoid the call for the common case of a queue
not using the legacy bouncing code.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 block/blk-core.c       |  6 ++----
 block/blk-settings.c   | 42 ++++++++----------------------------------
 block/blk.h            | 16 ++++++++++++----
 block/bounce.c         | 35 +++++------------------------------
 include/linux/blkdev.h | 29 +++++++++++------------------
 5 files changed, 38 insertions(+), 90 deletions(-)

diff --git a/block/blk-core.c b/block/blk-core.c
index fc60ff20849738..9bcdae93f6d4f7 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1161,10 +1161,8 @@ static blk_status_t blk_cloned_rq_check_limits(struct request_queue *q,
 	}
 
 	/*
-	 * queue's settings related to segment counting like q->bounce_pfn
-	 * may differ from that of other stacking queues.
-	 * Recalculate it to check the request correctly on this queue's
-	 * limitation.
+	 * The queue settings related to segment counting may differ from the
+	 * original queue.
 	 */
 	rq->nr_phys_segments = blk_recalc_rq_segments(rq);
 	if (rq->nr_phys_segments > queue_max_segments(q)) {
diff --git a/block/blk-settings.c b/block/blk-settings.c
index f9937dd2810e25..c7e26d16c59c0e 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -7,7 +7,6 @@
 #include <linux/init.h>
 #include <linux/bio.h>
 #include <linux/blkdev.h>
-#include <linux/memblock.h>	/* for max_pfn/max_low_pfn */
 #include <linux/gcd.h>
 #include <linux/lcm.h>
 #include <linux/jiffies.h>
@@ -17,11 +16,6 @@
 #include "blk.h"
 #include "blk-wbt.h"
 
-unsigned long blk_max_low_pfn;
-EXPORT_SYMBOL(blk_max_low_pfn);
-
-unsigned long blk_max_pfn;
-
 void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
 {
 	q->rq_timeout = timeout;
@@ -55,7 +49,7 @@ void blk_set_default_limits(struct queue_limits *lim)
 	lim->discard_alignment = 0;
 	lim->discard_misaligned = 0;
 	lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
-	lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT);
+	lim->bounce = BLK_BOUNCE_NONE;
 	lim->alignment_offset = 0;
 	lim->io_opt = 0;
 	lim->misaligned = 0;
@@ -92,28 +86,16 @@ EXPORT_SYMBOL(blk_set_stacking_limits);
 /**
  * blk_queue_bounce_limit - set bounce buffer limit for queue
  * @q: the request queue for the device
- * @max_addr: the maximum address the device can handle
+ * @bounce: bounce limit to enforce
  *
  * Description:
- *    Different hardware can have different requirements as to what pages
- *    it can do I/O directly to. A low level driver can call
- *    blk_queue_bounce_limit to have lower memory pages allocated as bounce
- *    buffers for doing I/O to pages residing above @max_addr.
+ *    Force bouncing for ISA DMA ranges or highmem.
+ *
+ *    DEPRECATED, don't use in new code.
  **/
-void blk_queue_bounce_limit(struct request_queue *q, u64 max_addr)
+void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce bounce)
 {
-	unsigned long b_pfn = max_addr >> PAGE_SHIFT;
-
-#if BITS_PER_LONG == 64
-	/*
-	 * Assume anything <= 4GB can be handled by IOMMU.  Actually
-	 * some IOMMUs can handle everything, but I don't know of a
-	 * way to test this here.
-	 */
-	q->limits.bounce_pfn = max(max_low_pfn, b_pfn);
-#else
-	q->limits.bounce_pfn = b_pfn;
-#endif
+	q->limits.bounce = bounce;
 }
 EXPORT_SYMBOL(blk_queue_bounce_limit);
 
@@ -536,7 +518,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
 					b->max_write_zeroes_sectors);
 	t->max_zone_append_sectors = min(t->max_zone_append_sectors,
 					b->max_zone_append_sectors);
-	t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
+	t->bounce = min_not_zero(t->bounce, b->bounce);
 
 	t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
 					    b->seg_boundary_mask);
@@ -916,11 +898,3 @@ void blk_queue_set_zoned(struct gendisk *disk, enum blk_zoned_model model)
 	}
 }
 EXPORT_SYMBOL_GPL(blk_queue_set_zoned);
-
-static int __init blk_settings_init(void)
-{
-	blk_max_low_pfn = max_low_pfn - 1;
-	blk_max_pfn = max_pfn - 1;
-	return 0;
-}
-subsys_initcall(blk_settings_init);
diff --git a/block/blk.h b/block/blk.h
index 895c9f4a5182a7..8f4337c5a9e66c 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -6,6 +6,7 @@
 #include <linux/blk-mq.h>
 #include <linux/part_stat.h>
 #include <linux/blk-crypto.h>
+#include <linux/memblock.h>	/* for max_pfn/max_low_pfn */
 #include <xen/xen.h>
 #include "blk-crypto-internal.h"
 #include "blk-mq.h"
@@ -311,13 +312,20 @@ static inline void blk_throtl_bio_endio(struct bio *bio) { }
 static inline void blk_throtl_stat_add(struct request *rq, u64 time) { }
 #endif
 
-#ifdef CONFIG_BOUNCE
-extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
-#else
+void __blk_queue_bounce(struct request_queue *q, struct bio **bio);
+
+static inline bool blk_queue_may_bounce(struct request_queue *q)
+{
+	return IS_ENABLED(CONFIG_BOUNCE) &&
+		q->limits.bounce == BLK_BOUNCE_HIGH &&
+		max_low_pfn >= max_pfn;
+}
+
 static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
 {
+	if (unlikely(blk_queue_may_bounce(q) && bio_has_data(*bio)))
+		__blk_queue_bounce(q, bio);	
 }
-#endif /* CONFIG_BOUNCE */
 
 #ifdef CONFIG_BLK_CGROUP_IOLATENCY
 extern int blk_iolatency_init(struct request_queue *q);
diff --git a/block/bounce.c b/block/bounce.c
index debd5b0bd31890..6bafc0d1f867a1 100644
--- a/block/bounce.c
+++ b/block/bounce.c
@@ -18,7 +18,6 @@
 #include <linux/init.h>
 #include <linux/hash.h>
 #include <linux/highmem.h>
-#include <linux/memblock.h>
 #include <linux/printk.h>
 #include <asm/tlbflush.h>
 
@@ -49,11 +48,11 @@ static void init_bounce_bioset(void)
 	bounce_bs_setup = true;
 }
 
-#if defined(CONFIG_HIGHMEM)
 static __init int init_emergency_pool(void)
 {
 	int ret;
-#if defined(CONFIG_HIGHMEM) && !defined(CONFIG_MEMORY_HOTPLUG)
+
+#ifndef CONFIG_MEMORY_HOTPLUG
 	if (max_pfn <= max_low_pfn)
 		return 0;
 #endif
@@ -67,9 +66,7 @@ static __init int init_emergency_pool(void)
 }
 
 __initcall(init_emergency_pool);
-#endif
 
-#ifdef CONFIG_HIGHMEM
 /*
  * highmem version, map in to vec
  */
@@ -82,13 +79,6 @@ static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom)
 	kunmap_atomic(vto);
 }
 
-#else /* CONFIG_HIGHMEM */
-
-#define bounce_copy_vec(to, vfrom)	\
-	memcpy(page_address((to)->bv_page) + (to)->bv_offset, vfrom, (to)->bv_len)
-
-#endif /* CONFIG_HIGHMEM */
-
 /*
  * Simple bounce buffer support for highmem pages. Depending on the
  * queue gfp mask set, *to may or may not be a highmem page. kmap it
@@ -236,8 +226,7 @@ static struct bio *bounce_clone_bio(struct bio *bio_src)
 	return NULL;
 }
 
-
-void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
+void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
 {
 	struct bio *bio;
 	int rw = bio_data_dir(*bio_orig);
@@ -247,24 +236,10 @@ void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
 	bool bounce = false;
 	int sectors = 0;
 
-	/*
-	 * Data-less bio, nothing to bounce
-	 */
-	if (!bio_has_data(*bio_orig))
-		return;
-
-	/*
-	 * Just check if the bounce pfn is equal to or bigger than the highest
-	 * pfn in the system -- in that case, don't waste time iterating over
-	 * bio segments
-	 */
-	if (q->limits.bounce_pfn >= blk_max_pfn)
-		return;
-
 	bio_for_each_segment(from, *bio_orig, iter) {
 		if (i++ < BIO_MAX_VECS)
 			sectors += from.bv_len >> 9;
-		if (page_to_pfn(from.bv_page) > q->limits.bounce_pfn)
+		if (PageHighMem(from.bv_page))
 			bounce = true;
 	}
 	if (!bounce)
@@ -287,7 +262,7 @@ void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
 	for (i = 0, to = bio->bi_io_vec; i < bio->bi_vcnt; to++, i++) {
 		struct page *page = to->bv_page;
 
-		if (page_to_pfn(page) <= q->limits.bounce_pfn)
+		if (!PageHighMem(page))
 			continue;
 
 		to->bv_page = mempool_alloc(&page_pool, GFP_NOIO);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 0dbb72ea373529..55cc8b96c84427 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -313,8 +313,17 @@ enum blk_zoned_model {
 	BLK_ZONED_HM,		/* Host-managed zoned block device */
 };
 
+/*
+ * BLK_BOUNCE_NONE:	never bounce (default)
+ * BLK_BOUNCE_HIGH:	bounce all highmem pages
+ */
+enum blk_bounce {
+	BLK_BOUNCE_NONE,
+	BLK_BOUNCE_HIGH,
+};
+
 struct queue_limits {
-	unsigned long		bounce_pfn;
+	enum blk_bounce		bounce;
 	unsigned long		seg_boundary_mask;
 	unsigned long		virt_boundary_mask;
 
@@ -835,22 +844,6 @@ static inline unsigned int blk_queue_depth(struct request_queue *q)
 	return q->nr_requests;
 }
 
-extern unsigned long blk_max_low_pfn, blk_max_pfn;
-
-/*
- * standard bounce addresses:
- *
- * BLK_BOUNCE_HIGH	: bounce all highmem pages
- * BLK_BOUNCE_ANY	: don't bounce anything
- */
-
-#if BITS_PER_LONG == 32
-#define BLK_BOUNCE_HIGH		((u64)blk_max_low_pfn << PAGE_SHIFT)
-#else
-#define BLK_BOUNCE_HIGH		-1ULL
-#endif
-#define BLK_BOUNCE_ANY		(-1ULL)
-
 /*
  * default timeout for SG_IO if none specified
  */
@@ -1134,7 +1127,7 @@ extern void blk_abort_request(struct request *);
  * Access functions for manipulating queue properties
  */
 extern void blk_cleanup_queue(struct request_queue *);
-extern void blk_queue_bounce_limit(struct request_queue *, u64);
+void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce limit);
 extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
 extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
 extern void blk_queue_max_segments(struct request_queue *, unsigned short);
-- 
2.30.1


^ permalink raw reply related	[flat|nested] 33+ messages in thread

end of thread, other threads:[~2021-03-31  7:31 UTC | newest]

Thread overview: 33+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-03-26  5:58 start removing block bounce buffering support v2 Christoph Hellwig
2021-03-26  5:58 ` [PATCH 1/8] aha1542: use a local bounce buffer Christoph Hellwig
2021-03-29  6:22   ` Hannes Reinecke
2021-03-26  5:58 ` [PATCH 2/8] Buslogic: remove ISA support Christoph Hellwig
2021-03-29  6:22   ` Hannes Reinecke
2021-03-29 20:29   ` Khalid Aziz
2021-03-30 17:03     ` Christoph Hellwig
2021-03-30 17:15       ` Khalid Aziz
2021-03-30 17:31         ` Christoph Hellwig
2021-03-26  5:58 ` [PATCH 3/8] BusLogic: reject broken old firmware that requires ISA-style bounce buffering Christoph Hellwig
2021-03-29  6:23   ` Hannes Reinecke
2021-03-29 20:33   ` Khalid Aziz
2021-03-26  5:58 ` [PATCH 4/8] advansys: remove ISA support Christoph Hellwig
2021-03-29  6:31   ` Hannes Reinecke
2021-03-30 17:26     ` Christoph Hellwig
2021-03-26  5:58 ` [PATCH 5/8] scsi: remove the unchecked_isa_dma flag Christoph Hellwig
2021-03-29  6:31   ` Hannes Reinecke
2021-03-26  5:58 ` [PATCH 6/8] block: remove BLK_BOUNCE_ISA support Christoph Hellwig
2021-03-29  6:32   ` Hannes Reinecke
2021-03-26  5:58 ` [PATCH 7/8] block: refactor the bounce buffering code Christoph Hellwig
2021-03-29  6:34   ` Hannes Reinecke
2021-03-26  5:58 ` [PATCH 8/8] block: stop calling blk_queue_bounce for passthrough requests Christoph Hellwig
2021-03-29  6:37   ` Hannes Reinecke
2021-03-26 23:15 ` start removing block bounce buffering support v2 Jens Axboe
2021-03-30  3:08   ` Martin K. Petersen
  -- strict thread matches above, loose matches on Subject: below --
2021-03-31  7:29 start removing block bounce buffering support v3 Christoph Hellwig
2021-03-31  7:30 ` [PATCH 7/8] block: refactor the bounce buffering code Christoph Hellwig
2021-03-18  6:39 start removing block bounce buffering support Christoph Hellwig
2021-03-18  6:39 ` [PATCH 7/8] block: refactor the bounce buffering code Christoph Hellwig
2021-03-18 11:29   ` Matthew Wilcox
2021-03-18 12:53     ` Christoph Hellwig
2021-03-24 17:40       ` Benjamin Block
2021-03-24 17:44         ` Christoph Hellwig
2021-03-24 18:07           ` Benjamin Block
2021-03-24 18:13             ` Christoph Hellwig

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).