dmaengine.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] dmaengine: idxd: add work queue drain support
@ 2020-06-15 20:54 Dave Jiang
  2020-06-24  7:10 ` Vinod Koul
  0 siblings, 1 reply; 3+ messages in thread
From: Dave Jiang @ 2020-06-15 20:54 UTC (permalink / raw)
  To: vkoul; +Cc: Tony Luck, Dan Williams, dmaengine

Add wq drain support. When a wq is being released, it needs to wait for
all in-flight operation to complete.  A device control function
idxd_wq_drain() has been added to facilitate this. A wq drain call
is added to the char dev on release to make sure all user operations are
complete. A wq drain is also added before the wq is being disabled.

A drain command can take an unpredictable period of time. Interrupt support
for device commands is added to allow waiting on the command to
finish. If a previous command is in progress, the new submitter can block
until the current command is finished before proceeding. The interrupt
based submission will submit the command and then wait until a command
completion interrupt happens to complete. All commands are moved to the
interrupt based command submission except for the device reset during
probe, which will be polled.

Fixes: 42d279f9137a ("dmaengine: idxd: add char driver to expose submission portal to userland")

Signed-off-by: Dave Jiang <dave.jiang@intel.com>
Reviewed-by: Tony Luck <tony.luck@intel.com>
Reviewed-by: Dan Williams <dan.j.williams@intel.com>
---
 drivers/dma/idxd/cdev.c   |    3 +
 drivers/dma/idxd/device.c |  156 ++++++++++++++++++++-------------------------
 drivers/dma/idxd/idxd.h   |   11 ++-
 drivers/dma/idxd/init.c   |   14 ++--
 drivers/dma/idxd/irq.c    |   41 +++++-------
 drivers/dma/idxd/sysfs.c  |   22 ++----
 6 files changed, 115 insertions(+), 132 deletions(-)

diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
index 207555296913..cd9f01134fd3 100644
--- a/drivers/dma/idxd/cdev.c
+++ b/drivers/dma/idxd/cdev.c
@@ -117,6 +117,9 @@ static int idxd_cdev_release(struct inode *node, struct file *filep)
 	dev_dbg(dev, "%s called\n", __func__);
 	filep->private_data = NULL;
 
+	/* Wait for in-flight operations to complete. */
+	idxd_wq_drain(wq);
+
 	kfree(ctx);
 	mutex_lock(&wq->wq_lock);
 	idxd_wq_put(wq);
diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
index f4f64d4678a4..8fe344412bd9 100644
--- a/drivers/dma/idxd/device.c
+++ b/drivers/dma/idxd/device.c
@@ -11,8 +11,8 @@
 #include "idxd.h"
 #include "registers.h"
 
-static int idxd_cmd_wait(struct idxd_device *idxd, u32 *status, int timeout);
-static int idxd_cmd_send(struct idxd_device *idxd, int cmd_code, u32 operand);
+static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
+			  u32 *status);
 
 /* Interrupt control bits */
 int idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id)
@@ -235,21 +235,13 @@ int idxd_wq_enable(struct idxd_wq *wq)
 	struct idxd_device *idxd = wq->idxd;
 	struct device *dev = &idxd->pdev->dev;
 	u32 status;
-	int rc;
-
-	lockdep_assert_held(&idxd->dev_lock);
 
 	if (wq->state == IDXD_WQ_ENABLED) {
 		dev_dbg(dev, "WQ %d already enabled\n", wq->id);
 		return -ENXIO;
 	}
 
-	rc = idxd_cmd_send(idxd, IDXD_CMD_ENABLE_WQ, wq->id);
-	if (rc < 0)
-		return rc;
-	rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT);
-	if (rc < 0)
-		return rc;
+	idxd_cmd_exec(idxd, IDXD_CMD_ENABLE_WQ, wq->id, &status);
 
 	if (status != IDXD_CMDSTS_SUCCESS &&
 	    status != IDXD_CMDSTS_ERR_WQ_ENABLED) {
@@ -267,9 +259,7 @@ int idxd_wq_disable(struct idxd_wq *wq)
 	struct idxd_device *idxd = wq->idxd;
 	struct device *dev = &idxd->pdev->dev;
 	u32 status, operand;
-	int rc;
 
-	lockdep_assert_held(&idxd->dev_lock);
 	dev_dbg(dev, "Disabling WQ %d\n", wq->id);
 
 	if (wq->state != IDXD_WQ_ENABLED) {
@@ -278,12 +268,7 @@ int idxd_wq_disable(struct idxd_wq *wq)
 	}
 
 	operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
-	rc = idxd_cmd_send(idxd, IDXD_CMD_DISABLE_WQ, operand);
-	if (rc < 0)
-		return rc;
-	rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT);
-	if (rc < 0)
-		return rc;
+	idxd_cmd_exec(idxd, IDXD_CMD_DISABLE_WQ, operand, &status);
 
 	if (status != IDXD_CMDSTS_SUCCESS) {
 		dev_dbg(dev, "WQ disable failed: %#x\n", status);
@@ -295,6 +280,23 @@ int idxd_wq_disable(struct idxd_wq *wq)
 	return 0;
 }
 
+void idxd_wq_drain(struct idxd_wq *wq)
+{
+	struct idxd_device *idxd = wq->idxd;
+	struct device *dev = &idxd->pdev->dev;
+	u32 operand;
+
+	if (wq->state != IDXD_WQ_ENABLED) {
+		dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state);
+		return;
+	}
+
+	dev_dbg(dev, "Draining WQ %d\n", wq->id);
+	operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
+	idxd_cmd_exec(idxd, IDXD_CMD_DRAIN_WQ, operand, NULL);
+	dev_dbg(dev, "WQ %d drained\n", wq->id);
+}
+
 int idxd_wq_map_portal(struct idxd_wq *wq)
 {
 	struct idxd_device *idxd = wq->idxd;
@@ -356,66 +358,79 @@ static inline bool idxd_is_enabled(struct idxd_device *idxd)
 	return false;
 }
 
-static int idxd_cmd_wait(struct idxd_device *idxd, u32 *status, int timeout)
+/*
+ * This is function is only used for reset during probe and will
+ * poll for completion. Once the device is setup with interrupts,
+ * all commands will be done via interrupt completion.
+ */
+void idxd_device_init_reset(struct idxd_device *idxd)
 {
-	u32 sts, to = timeout;
-
-	lockdep_assert_held(&idxd->dev_lock);
-	sts = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
-	while (sts & IDXD_CMDSTS_ACTIVE && --to) {
-		cpu_relax();
-		sts = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
-	}
+	struct device *dev = &idxd->pdev->dev;
+	union idxd_command_reg cmd;
+	unsigned long flags;
 
-	if (to == 0 && sts & IDXD_CMDSTS_ACTIVE) {
-		dev_warn(&idxd->pdev->dev, "%s timed out!\n", __func__);
-		*status = 0;
-		return -EBUSY;
-	}
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.cmd = IDXD_CMD_RESET_DEVICE;
+	dev_dbg(dev, "%s: sending reset for init.\n", __func__);
+	spin_lock_irqsave(&idxd->dev_lock, flags);
+	iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
 
-	*status = sts;
-	return 0;
+	while (ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET) &
+	       IDXD_CMDSTS_ACTIVE)
+		cpu_relax();
+	spin_unlock_irqrestore(&idxd->dev_lock, flags);
 }
 
-static int idxd_cmd_send(struct idxd_device *idxd, int cmd_code, u32 operand)
+static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
+			  u32 *status)
 {
 	union idxd_command_reg cmd;
-	int rc;
-	u32 status;
-
-	lockdep_assert_held(&idxd->dev_lock);
-	rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT);
-	if (rc < 0)
-		return rc;
+	DECLARE_COMPLETION_ONSTACK(done);
+	unsigned long flags;
 
 	memset(&cmd, 0, sizeof(cmd));
 	cmd.cmd = cmd_code;
 	cmd.operand = operand;
+	cmd.int_req = 1;
+
+	spin_lock_irqsave(&idxd->dev_lock, flags);
+	wait_event_lock_irq(idxd->cmd_waitq,
+			    !test_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags),
+			    idxd->dev_lock);
+
 	dev_dbg(&idxd->pdev->dev, "%s: sending cmd: %#x op: %#x\n",
 		__func__, cmd_code, operand);
+
+	__set_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags);
+	idxd->cmd_done = &done;
 	iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
 
-	return 0;
+	/*
+	 * After command submitted, release lock and go to sleep until
+	 * the command completes via interrupt.
+	 */
+	spin_unlock_irqrestore(&idxd->dev_lock, flags);
+	wait_for_completion(&done);
+	spin_lock_irqsave(&idxd->dev_lock, flags);
+	if (status)
+		*status = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
+	__clear_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags);
+	/* Wake up other pending commands */
+	wake_up(&idxd->cmd_waitq);
+	spin_unlock_irqrestore(&idxd->dev_lock, flags);
 }
 
 int idxd_device_enable(struct idxd_device *idxd)
 {
 	struct device *dev = &idxd->pdev->dev;
-	int rc;
 	u32 status;
 
-	lockdep_assert_held(&idxd->dev_lock);
 	if (idxd_is_enabled(idxd)) {
 		dev_dbg(dev, "Device already enabled\n");
 		return -ENXIO;
 	}
 
-	rc = idxd_cmd_send(idxd, IDXD_CMD_ENABLE_DEVICE, 0);
-	if (rc < 0)
-		return rc;
-	rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT);
-	if (rc < 0)
-		return rc;
+	idxd_cmd_exec(idxd, IDXD_CMD_ENABLE_DEVICE, 0, &status);
 
 	/* If the command is successful or if the device was enabled */
 	if (status != IDXD_CMDSTS_SUCCESS &&
@@ -431,58 +446,29 @@ int idxd_device_enable(struct idxd_device *idxd)
 int idxd_device_disable(struct idxd_device *idxd)
 {
 	struct device *dev = &idxd->pdev->dev;
-	int rc;
 	u32 status;
 
-	lockdep_assert_held(&idxd->dev_lock);
 	if (!idxd_is_enabled(idxd)) {
 		dev_dbg(dev, "Device is not enabled\n");
 		return 0;
 	}
 
-	rc = idxd_cmd_send(idxd, IDXD_CMD_DISABLE_DEVICE, 0);
-	if (rc < 0)
-		return rc;
-	rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT);
-	if (rc < 0)
-		return rc;
+	idxd_cmd_exec(idxd, IDXD_CMD_DISABLE_DEVICE, 0, &status);
 
 	/* If the command is successful or if the device was disabled */
 	if (status != IDXD_CMDSTS_SUCCESS &&
 	    !(status & IDXD_CMDSTS_ERR_DIS_DEV_EN)) {
 		dev_dbg(dev, "%s: err_code: %#x\n", __func__, status);
-		rc = -ENXIO;
-		return rc;
+		return -ENXIO;
 	}
 
 	idxd->state = IDXD_DEV_CONF_READY;
 	return 0;
 }
 
-int __idxd_device_reset(struct idxd_device *idxd)
-{
-	u32 status;
-	int rc;
-
-	rc = idxd_cmd_send(idxd, IDXD_CMD_RESET_DEVICE, 0);
-	if (rc < 0)
-		return rc;
-	rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT);
-	if (rc < 0)
-		return rc;
-
-	return 0;
-}
-
-int idxd_device_reset(struct idxd_device *idxd)
+void idxd_device_reset(struct idxd_device *idxd)
 {
-	unsigned long flags;
-	int rc;
-
-	spin_lock_irqsave(&idxd->dev_lock, flags);
-	rc = __idxd_device_reset(idxd);
-	spin_unlock_irqrestore(&idxd->dev_lock, flags);
-	return rc;
+	idxd_cmd_exec(idxd, IDXD_CMD_RESET_DEVICE, 0, NULL);
 }
 
 /* Device configuration bits */
diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
index 908c8d0ef3ab..013d68d42bc4 100644
--- a/drivers/dma/idxd/idxd.h
+++ b/drivers/dma/idxd/idxd.h
@@ -145,6 +145,7 @@ enum idxd_device_state {
 
 enum idxd_device_flag {
 	IDXD_FLAG_CONFIGURABLE = 0,
+	IDXD_FLAG_CMD_RUNNING,
 };
 
 struct idxd_device {
@@ -161,6 +162,7 @@ struct idxd_device {
 	void __iomem *reg_base;
 
 	spinlock_t dev_lock;	/* spinlock for device */
+	struct completion *cmd_done;
 	struct idxd_group *groups;
 	struct idxd_wq *wqs;
 	struct idxd_engine *engines;
@@ -183,12 +185,14 @@ struct idxd_device {
 	int nr_tokens;		/* non-reserved tokens */
 
 	union sw_err_reg sw_err;
-
+	wait_queue_head_t cmd_waitq;
 	struct msix_entry *msix_entries;
 	int num_wq_irqs;
 	struct idxd_irq_entry *irq_entries;
 
 	struct dma_device dma_dev;
+	struct workqueue_struct *wq;
+	struct work_struct work;
 };
 
 /* IDXD software descriptor */
@@ -275,10 +279,10 @@ int idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id);
 int idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id);
 
 /* device control */
+void idxd_device_init_reset(struct idxd_device *idxd);
 int idxd_device_enable(struct idxd_device *idxd);
 int idxd_device_disable(struct idxd_device *idxd);
-int idxd_device_reset(struct idxd_device *idxd);
-int __idxd_device_reset(struct idxd_device *idxd);
+void idxd_device_reset(struct idxd_device *idxd);
 void idxd_device_cleanup(struct idxd_device *idxd);
 int idxd_device_config(struct idxd_device *idxd);
 void idxd_device_wqs_clear_state(struct idxd_device *idxd);
@@ -288,6 +292,7 @@ int idxd_wq_alloc_resources(struct idxd_wq *wq);
 void idxd_wq_free_resources(struct idxd_wq *wq);
 int idxd_wq_enable(struct idxd_wq *wq);
 int idxd_wq_disable(struct idxd_wq *wq);
+void idxd_wq_drain(struct idxd_wq *wq);
 int idxd_wq_map_portal(struct idxd_wq *wq);
 void idxd_wq_unmap_portal(struct idxd_wq *wq);
 void idxd_wq_disable_cleanup(struct idxd_wq *wq);
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index 7778c05deb5d..51cdf77038dc 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -157,6 +157,7 @@ static int idxd_setup_internals(struct idxd_device *idxd)
 	struct device *dev = &idxd->pdev->dev;
 	int i;
 
+	init_waitqueue_head(&idxd->cmd_waitq);
 	idxd->groups = devm_kcalloc(dev, idxd->max_groups,
 				    sizeof(struct idxd_group), GFP_KERNEL);
 	if (!idxd->groups)
@@ -201,6 +202,10 @@ static int idxd_setup_internals(struct idxd_device *idxd)
 		idxd->engines[i].id = i;
 	}
 
+	idxd->wq = create_workqueue(dev_name(dev));
+	if (!idxd->wq)
+		return -ENOMEM;
+
 	return 0;
 }
 
@@ -296,9 +301,7 @@ static int idxd_probe(struct idxd_device *idxd)
 	int rc;
 
 	dev_dbg(dev, "%s entered and resetting device\n", __func__);
-	rc = idxd_device_reset(idxd);
-	if (rc < 0)
-		return rc;
+	idxd_device_init_reset(idxd);
 	dev_dbg(dev, "IDXD reset complete\n");
 
 	idxd_read_caps(idxd);
@@ -433,11 +436,8 @@ static void idxd_shutdown(struct pci_dev *pdev)
 	int rc, i;
 	struct idxd_irq_entry *irq_entry;
 	int msixcnt = pci_msix_vec_count(pdev);
-	unsigned long flags;
 
-	spin_lock_irqsave(&idxd->dev_lock, flags);
 	rc = idxd_device_disable(idxd);
-	spin_unlock_irqrestore(&idxd->dev_lock, flags);
 	if (rc)
 		dev_err(&pdev->dev, "Disabling device failed\n");
 
@@ -453,6 +453,8 @@ static void idxd_shutdown(struct pci_dev *pdev)
 		idxd_flush_pending_llist(irq_entry);
 		idxd_flush_work_list(irq_entry);
 	}
+
+	destroy_workqueue(idxd->wq);
 }
 
 static void idxd_remove(struct pci_dev *pdev)
diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
index 6510791b9921..6052765ca3c8 100644
--- a/drivers/dma/idxd/irq.c
+++ b/drivers/dma/idxd/irq.c
@@ -23,16 +23,13 @@ void idxd_device_wqs_clear_state(struct idxd_device *idxd)
 	}
 }
 
-static int idxd_restart(struct idxd_device *idxd)
+static void idxd_device_reinit(struct work_struct *work)
 {
-	int i, rc;
-
-	lockdep_assert_held(&idxd->dev_lock);
-
-	rc = __idxd_device_reset(idxd);
-	if (rc < 0)
-		goto out;
+	struct idxd_device *idxd = container_of(work, struct idxd_device, work);
+	struct device *dev = &idxd->pdev->dev;
+	int rc, i;
 
+	idxd_device_reset(idxd);
 	rc = idxd_device_config(idxd);
 	if (rc < 0)
 		goto out;
@@ -47,19 +44,16 @@ static int idxd_restart(struct idxd_device *idxd)
 		if (wq->state == IDXD_WQ_ENABLED) {
 			rc = idxd_wq_enable(wq);
 			if (rc < 0) {
-				dev_warn(&idxd->pdev->dev,
-					 "Unable to re-enable wq %s\n",
+				dev_warn(dev, "Unable to re-enable wq %s\n",
 					 dev_name(&wq->conf_dev));
 			}
 		}
 	}
 
-	return 0;
+	return;
 
  out:
 	idxd_device_wqs_clear_state(idxd);
-	idxd->state = IDXD_DEV_HALTED;
-	return rc;
 }
 
 irqreturn_t idxd_irq_handler(int vec, void *data)
@@ -78,7 +72,7 @@ irqreturn_t idxd_misc_thread(int vec, void *data)
 	struct device *dev = &idxd->pdev->dev;
 	union gensts_reg gensts;
 	u32 cause, val = 0;
-	int i, rc;
+	int i;
 	bool err = false;
 
 	cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET);
@@ -117,8 +111,8 @@ irqreturn_t idxd_misc_thread(int vec, void *data)
 	}
 
 	if (cause & IDXD_INTC_CMD) {
-		/* Driver does use command interrupts */
 		val |= IDXD_INTC_CMD;
+		complete(idxd->cmd_done);
 	}
 
 	if (cause & IDXD_INTC_OCCUPY) {
@@ -145,21 +139,24 @@ irqreturn_t idxd_misc_thread(int vec, void *data)
 
 	gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
 	if (gensts.state == IDXD_DEVICE_STATE_HALT) {
-		spin_lock_bh(&idxd->dev_lock);
+		idxd->state = IDXD_DEV_HALTED;
 		if (gensts.reset_type == IDXD_DEVICE_RESET_SOFTWARE) {
-			rc = idxd_restart(idxd);
-			if (rc < 0)
-				dev_err(&idxd->pdev->dev,
-					"idxd restart failed, device halt.");
+			/*
+			 * If we need a software reset, we will throw the work
+			 * on a system workqueue in order to allow interrupts
+			 * for the device command completions.
+			 */
+			INIT_WORK(&idxd->work, idxd_device_reinit);
+			queue_work(idxd->wq, &idxd->work);
 		} else {
+			spin_lock_bh(&idxd->dev_lock);
 			idxd_device_wqs_clear_state(idxd);
-			idxd->state = IDXD_DEV_HALTED;
 			dev_err(&idxd->pdev->dev,
 				"idxd halted, need %s.\n",
 				gensts.reset_type == IDXD_DEVICE_RESET_FLR ?
 				"FLR" : "system reset");
+			spin_unlock_bh(&idxd->dev_lock);
 		}
-		spin_unlock_bh(&idxd->dev_lock);
 	}
 
 	idxd_unmask_msix_vector(idxd, irq_entry->id);
diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
index 2e2c5082f322..dcba60953217 100644
--- a/drivers/dma/idxd/sysfs.c
+++ b/drivers/dma/idxd/sysfs.c
@@ -118,12 +118,11 @@ static int idxd_config_bus_probe(struct device *dev)
 		if (!try_module_get(THIS_MODULE))
 			return -ENXIO;
 
-		spin_lock_irqsave(&idxd->dev_lock, flags);
-
 		/* Perform IDXD configuration and enabling */
+		spin_lock_irqsave(&idxd->dev_lock, flags);
 		rc = idxd_device_config(idxd);
+		spin_unlock_irqrestore(&idxd->dev_lock, flags);
 		if (rc < 0) {
-			spin_unlock_irqrestore(&idxd->dev_lock, flags);
 			module_put(THIS_MODULE);
 			dev_warn(dev, "Device config failed: %d\n", rc);
 			return rc;
@@ -132,18 +131,15 @@ static int idxd_config_bus_probe(struct device *dev)
 		/* start device */
 		rc = idxd_device_enable(idxd);
 		if (rc < 0) {
-			spin_unlock_irqrestore(&idxd->dev_lock, flags);
 			module_put(THIS_MODULE);
 			dev_warn(dev, "Device enable failed: %d\n", rc);
 			return rc;
 		}
 
-		spin_unlock_irqrestore(&idxd->dev_lock, flags);
 		dev_info(dev, "Device %s enabled\n", dev_name(dev));
 
 		rc = idxd_register_dma_device(idxd);
 		if (rc < 0) {
-			spin_unlock_irqrestore(&idxd->dev_lock, flags);
 			module_put(THIS_MODULE);
 			dev_dbg(dev, "Failed to register dmaengine device\n");
 			return rc;
@@ -188,8 +184,8 @@ static int idxd_config_bus_probe(struct device *dev)
 
 		spin_lock_irqsave(&idxd->dev_lock, flags);
 		rc = idxd_device_config(idxd);
+		spin_unlock_irqrestore(&idxd->dev_lock, flags);
 		if (rc < 0) {
-			spin_unlock_irqrestore(&idxd->dev_lock, flags);
 			mutex_unlock(&wq->wq_lock);
 			dev_warn(dev, "Writing WQ %d config failed: %d\n",
 				 wq->id, rc);
@@ -198,13 +194,11 @@ static int idxd_config_bus_probe(struct device *dev)
 
 		rc = idxd_wq_enable(wq);
 		if (rc < 0) {
-			spin_unlock_irqrestore(&idxd->dev_lock, flags);
 			mutex_unlock(&wq->wq_lock);
 			dev_warn(dev, "WQ %d enabling failed: %d\n",
 				 wq->id, rc);
 			return rc;
 		}
-		spin_unlock_irqrestore(&idxd->dev_lock, flags);
 
 		rc = idxd_wq_map_portal(wq);
 		if (rc < 0) {
@@ -212,7 +206,6 @@ static int idxd_config_bus_probe(struct device *dev)
 			rc = idxd_wq_disable(wq);
 			if (rc < 0)
 				dev_warn(dev, "IDXD wq disable failed\n");
-			spin_unlock_irqrestore(&idxd->dev_lock, flags);
 			mutex_unlock(&wq->wq_lock);
 			return rc;
 		}
@@ -248,7 +241,6 @@ static void disable_wq(struct idxd_wq *wq)
 {
 	struct idxd_device *idxd = wq->idxd;
 	struct device *dev = &idxd->pdev->dev;
-	unsigned long flags;
 	int rc;
 
 	mutex_lock(&wq->wq_lock);
@@ -269,9 +261,8 @@ static void disable_wq(struct idxd_wq *wq)
 
 	idxd_wq_unmap_portal(wq);
 
-	spin_lock_irqsave(&idxd->dev_lock, flags);
+	idxd_wq_drain(wq);
 	rc = idxd_wq_disable(wq);
-	spin_unlock_irqrestore(&idxd->dev_lock, flags);
 
 	idxd_wq_free_resources(wq);
 	wq->client_count = 0;
@@ -287,7 +278,6 @@ static void disable_wq(struct idxd_wq *wq)
 static int idxd_config_bus_remove(struct device *dev)
 {
 	int rc;
-	unsigned long flags;
 
 	dev_dbg(dev, "%s called for %s\n", __func__, dev_name(dev));
 
@@ -313,14 +303,14 @@ static int idxd_config_bus_remove(struct device *dev)
 		}
 
 		idxd_unregister_dma_device(idxd);
-		spin_lock_irqsave(&idxd->dev_lock, flags);
 		rc = idxd_device_disable(idxd);
 		for (i = 0; i < idxd->max_wqs; i++) {
 			struct idxd_wq *wq = &idxd->wqs[i];
 
+			mutex_lock(&wq->wq_lock);
 			idxd_wq_disable_cleanup(wq);
+			mutex_unlock(&wq->wq_lock);
 		}
-		spin_unlock_irqrestore(&idxd->dev_lock, flags);
 		module_put(THIS_MODULE);
 		if (rc < 0)
 			dev_warn(dev, "Device disable failed\n");


^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [PATCH] dmaengine: idxd: add work queue drain support
  2020-06-15 20:54 [PATCH] dmaengine: idxd: add work queue drain support Dave Jiang
@ 2020-06-24  7:10 ` Vinod Koul
  2020-06-25  2:05   ` Dave Jiang
  0 siblings, 1 reply; 3+ messages in thread
From: Vinod Koul @ 2020-06-24  7:10 UTC (permalink / raw)
  To: Dave Jiang; +Cc: Tony Luck, Dan Williams, dmaengine

On 15-06-20, 13:54, Dave Jiang wrote:
> Add wq drain support. When a wq is being released, it needs to wait for
> all in-flight operation to complete.  A device control function
> idxd_wq_drain() has been added to facilitate this. A wq drain call
> is added to the char dev on release to make sure all user operations are
> complete. A wq drain is also added before the wq is being disabled.
> 
> A drain command can take an unpredictable period of time. Interrupt support
> for device commands is added to allow waiting on the command to
> finish. If a previous command is in progress, the new submitter can block
> until the current command is finished before proceeding. The interrupt
> based submission will submit the command and then wait until a command
> completion interrupt happens to complete. All commands are moved to the
> interrupt based command submission except for the device reset during
> probe, which will be polled.
> 
> Fixes: 42d279f9137a ("dmaengine: idxd: add char driver to expose submission portal to userland")
> 

no empty line here

> Signed-off-by: Dave Jiang <dave.jiang@intel.com>
> Reviewed-by: Tony Luck <tony.luck@intel.com>
> Reviewed-by: Dan Williams <dan.j.williams@intel.com>
> ---
>  drivers/dma/idxd/cdev.c   |    3 +
>  drivers/dma/idxd/device.c |  156 ++++++++++++++++++++-------------------------
>  drivers/dma/idxd/idxd.h   |   11 ++-
>  drivers/dma/idxd/init.c   |   14 ++--
>  drivers/dma/idxd/irq.c    |   41 +++++-------
>  drivers/dma/idxd/sysfs.c  |   22 ++----
>  6 files changed, 115 insertions(+), 132 deletions(-)
> 
> diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
> index 207555296913..cd9f01134fd3 100644
> --- a/drivers/dma/idxd/cdev.c
> +++ b/drivers/dma/idxd/cdev.c
> @@ -117,6 +117,9 @@ static int idxd_cdev_release(struct inode *node, struct file *filep)
>  	dev_dbg(dev, "%s called\n", __func__);
>  	filep->private_data = NULL;
>  
> +	/* Wait for in-flight operations to complete. */
> +	idxd_wq_drain(wq);
> +
>  	kfree(ctx);
>  	mutex_lock(&wq->wq_lock);
>  	idxd_wq_put(wq);
> diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
> index f4f64d4678a4..8fe344412bd9 100644
> --- a/drivers/dma/idxd/device.c
> +++ b/drivers/dma/idxd/device.c
> @@ -11,8 +11,8 @@
>  #include "idxd.h"
>  #include "registers.h"
>  
> -static int idxd_cmd_wait(struct idxd_device *idxd, u32 *status, int timeout);
> -static int idxd_cmd_send(struct idxd_device *idxd, int cmd_code, u32 operand);
> +static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
> +			  u32 *status);
>  
>  /* Interrupt control bits */
>  int idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id)
> @@ -235,21 +235,13 @@ int idxd_wq_enable(struct idxd_wq *wq)
>  	struct idxd_device *idxd = wq->idxd;
>  	struct device *dev = &idxd->pdev->dev;
>  	u32 status;
> -	int rc;
> -
> -	lockdep_assert_held(&idxd->dev_lock);
>  
>  	if (wq->state == IDXD_WQ_ENABLED) {
>  		dev_dbg(dev, "WQ %d already enabled\n", wq->id);
>  		return -ENXIO;
>  	}
>  
> -	rc = idxd_cmd_send(idxd, IDXD_CMD_ENABLE_WQ, wq->id);
> -	if (rc < 0)
> -		return rc;
> -	rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT);
> -	if (rc < 0)
> -		return rc;
> +	idxd_cmd_exec(idxd, IDXD_CMD_ENABLE_WQ, wq->id, &status);
>  
>  	if (status != IDXD_CMDSTS_SUCCESS &&
>  	    status != IDXD_CMDSTS_ERR_WQ_ENABLED) {
> @@ -267,9 +259,7 @@ int idxd_wq_disable(struct idxd_wq *wq)
>  	struct idxd_device *idxd = wq->idxd;
>  	struct device *dev = &idxd->pdev->dev;
>  	u32 status, operand;
> -	int rc;
>  
> -	lockdep_assert_held(&idxd->dev_lock);
>  	dev_dbg(dev, "Disabling WQ %d\n", wq->id);
>  
>  	if (wq->state != IDXD_WQ_ENABLED) {
> @@ -278,12 +268,7 @@ int idxd_wq_disable(struct idxd_wq *wq)
>  	}
>  
>  	operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
> -	rc = idxd_cmd_send(idxd, IDXD_CMD_DISABLE_WQ, operand);
> -	if (rc < 0)
> -		return rc;
> -	rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT);
> -	if (rc < 0)
> -		return rc;
> +	idxd_cmd_exec(idxd, IDXD_CMD_DISABLE_WQ, operand, &status);
>  
>  	if (status != IDXD_CMDSTS_SUCCESS) {
>  		dev_dbg(dev, "WQ disable failed: %#x\n", status);
> @@ -295,6 +280,23 @@ int idxd_wq_disable(struct idxd_wq *wq)
>  	return 0;
>  }
>  
> +void idxd_wq_drain(struct idxd_wq *wq)
> +{
> +	struct idxd_device *idxd = wq->idxd;
> +	struct device *dev = &idxd->pdev->dev;
> +	u32 operand;
> +
> +	if (wq->state != IDXD_WQ_ENABLED) {
> +		dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state);
> +		return;
> +	}
> +
> +	dev_dbg(dev, "Draining WQ %d\n", wq->id);
> +	operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
> +	idxd_cmd_exec(idxd, IDXD_CMD_DRAIN_WQ, operand, NULL);
> +	dev_dbg(dev, "WQ %d drained\n", wq->id);

too much debug artifacts, pls remove

> +}
> +
>  int idxd_wq_map_portal(struct idxd_wq *wq)
>  {
>  	struct idxd_device *idxd = wq->idxd;
> @@ -356,66 +358,79 @@ static inline bool idxd_is_enabled(struct idxd_device *idxd)
>  	return false;
>  }
>  
> -static int idxd_cmd_wait(struct idxd_device *idxd, u32 *status, int timeout)
> +/*
> + * This is function is only used for reset during probe and will
> + * poll for completion. Once the device is setup with interrupts,
> + * all commands will be done via interrupt completion.
> + */
> +void idxd_device_init_reset(struct idxd_device *idxd)
>  {
> -	u32 sts, to = timeout;
> -
> -	lockdep_assert_held(&idxd->dev_lock);
> -	sts = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
> -	while (sts & IDXD_CMDSTS_ACTIVE && --to) {
> -		cpu_relax();
> -		sts = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
> -	}
> +	struct device *dev = &idxd->pdev->dev;
> +	union idxd_command_reg cmd;
> +	unsigned long flags;
>  
> -	if (to == 0 && sts & IDXD_CMDSTS_ACTIVE) {
> -		dev_warn(&idxd->pdev->dev, "%s timed out!\n", __func__);
> -		*status = 0;
> -		return -EBUSY;
> -	}
> +	memset(&cmd, 0, sizeof(cmd));
> +	cmd.cmd = IDXD_CMD_RESET_DEVICE;
> +	dev_dbg(dev, "%s: sending reset for init.\n", __func__);
> +	spin_lock_irqsave(&idxd->dev_lock, flags);
> +	iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
>  
> -	*status = sts;
> -	return 0;
> +	while (ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET) &
> +	       IDXD_CMDSTS_ACTIVE)
> +		cpu_relax();
> +	spin_unlock_irqrestore(&idxd->dev_lock, flags);
>  }
>  
> -static int idxd_cmd_send(struct idxd_device *idxd, int cmd_code, u32 operand)
> +static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
> +			  u32 *status)
>  {
>  	union idxd_command_reg cmd;
> -	int rc;
> -	u32 status;
> -
> -	lockdep_assert_held(&idxd->dev_lock);
> -	rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT);
> -	if (rc < 0)
> -		return rc;
> +	DECLARE_COMPLETION_ONSTACK(done);
> +	unsigned long flags;
>  
>  	memset(&cmd, 0, sizeof(cmd));
>  	cmd.cmd = cmd_code;
>  	cmd.operand = operand;
> +	cmd.int_req = 1;
> +
> +	spin_lock_irqsave(&idxd->dev_lock, flags);
> +	wait_event_lock_irq(idxd->cmd_waitq,
> +			    !test_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags),
> +			    idxd->dev_lock);
> +
>  	dev_dbg(&idxd->pdev->dev, "%s: sending cmd: %#x op: %#x\n",
>  		__func__, cmd_code, operand);
> +
> +	__set_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags);
> +	idxd->cmd_done = &done;
>  	iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
>  
> -	return 0;
> +	/*
> +	 * After command submitted, release lock and go to sleep until
> +	 * the command completes via interrupt.
> +	 */
> +	spin_unlock_irqrestore(&idxd->dev_lock, flags);
> +	wait_for_completion(&done);

waiting with locks held and interrupts disabled?
-- 
~Vinod

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH] dmaengine: idxd: add work queue drain support
  2020-06-24  7:10 ` Vinod Koul
@ 2020-06-25  2:05   ` Dave Jiang
  0 siblings, 0 replies; 3+ messages in thread
From: Dave Jiang @ 2020-06-25  2:05 UTC (permalink / raw)
  To: Vinod Koul; +Cc: Tony Luck, Dan Williams, dmaengine



On 6/24/2020 12:10 AM, Vinod Koul wrote:
> On 15-06-20, 13:54, Dave Jiang wrote:
>> Add wq drain support. When a wq is being released, it needs to wait for
>> all in-flight operation to complete.  A device control function
>> idxd_wq_drain() has been added to facilitate this. A wq drain call
>> is added to the char dev on release to make sure all user operations are
>> complete. A wq drain is also added before the wq is being disabled.
>>
>> A drain command can take an unpredictable period of time. Interrupt support
>> for device commands is added to allow waiting on the command to
>> finish. If a previous command is in progress, the new submitter can block
>> until the current command is finished before proceeding. The interrupt
>> based submission will submit the command and then wait until a command
>> completion interrupt happens to complete. All commands are moved to the
>> interrupt based command submission except for the device reset during
>> probe, which will be polled.
>>
>> Fixes: 42d279f9137a ("dmaengine: idxd: add char driver to expose submission portal to userland")
>>
> 
> no empty line here
> 
>> Signed-off-by: Dave Jiang <dave.jiang@intel.com>
>> Reviewed-by: Tony Luck <tony.luck@intel.com>
>> Reviewed-by: Dan Williams <dan.j.williams@intel.com>
>> ---
>>   drivers/dma/idxd/cdev.c   |    3 +
>>   drivers/dma/idxd/device.c |  156 ++++++++++++++++++++-------------------------
>>   drivers/dma/idxd/idxd.h   |   11 ++-
>>   drivers/dma/idxd/init.c   |   14 ++--
>>   drivers/dma/idxd/irq.c    |   41 +++++-------
>>   drivers/dma/idxd/sysfs.c  |   22 ++----
>>   6 files changed, 115 insertions(+), 132 deletions(-)
>>
>> diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
>> index 207555296913..cd9f01134fd3 100644
>> --- a/drivers/dma/idxd/cdev.c
>> +++ b/drivers/dma/idxd/cdev.c
>> @@ -117,6 +117,9 @@ static int idxd_cdev_release(struct inode *node, struct file *filep)
>>   	dev_dbg(dev, "%s called\n", __func__);
>>   	filep->private_data = NULL;
>>   
>> +	/* Wait for in-flight operations to complete. */
>> +	idxd_wq_drain(wq);
>> +
>>   	kfree(ctx);
>>   	mutex_lock(&wq->wq_lock);
>>   	idxd_wq_put(wq);
>> diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
>> index f4f64d4678a4..8fe344412bd9 100644
>> --- a/drivers/dma/idxd/device.c
>> +++ b/drivers/dma/idxd/device.c
>> @@ -11,8 +11,8 @@
>>   #include "idxd.h"
>>   #include "registers.h"
>>   
>> -static int idxd_cmd_wait(struct idxd_device *idxd, u32 *status, int timeout);
>> -static int idxd_cmd_send(struct idxd_device *idxd, int cmd_code, u32 operand);
>> +static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
>> +			  u32 *status);
>>   
>>   /* Interrupt control bits */
>>   int idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id)
>> @@ -235,21 +235,13 @@ int idxd_wq_enable(struct idxd_wq *wq)
>>   	struct idxd_device *idxd = wq->idxd;
>>   	struct device *dev = &idxd->pdev->dev;
>>   	u32 status;
>> -	int rc;
>> -
>> -	lockdep_assert_held(&idxd->dev_lock);
>>   
>>   	if (wq->state == IDXD_WQ_ENABLED) {
>>   		dev_dbg(dev, "WQ %d already enabled\n", wq->id);
>>   		return -ENXIO;
>>   	}
>>   
>> -	rc = idxd_cmd_send(idxd, IDXD_CMD_ENABLE_WQ, wq->id);
>> -	if (rc < 0)
>> -		return rc;
>> -	rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT);
>> -	if (rc < 0)
>> -		return rc;
>> +	idxd_cmd_exec(idxd, IDXD_CMD_ENABLE_WQ, wq->id, &status);
>>   
>>   	if (status != IDXD_CMDSTS_SUCCESS &&
>>   	    status != IDXD_CMDSTS_ERR_WQ_ENABLED) {
>> @@ -267,9 +259,7 @@ int idxd_wq_disable(struct idxd_wq *wq)
>>   	struct idxd_device *idxd = wq->idxd;
>>   	struct device *dev = &idxd->pdev->dev;
>>   	u32 status, operand;
>> -	int rc;
>>   
>> -	lockdep_assert_held(&idxd->dev_lock);
>>   	dev_dbg(dev, "Disabling WQ %d\n", wq->id);
>>   
>>   	if (wq->state != IDXD_WQ_ENABLED) {
>> @@ -278,12 +268,7 @@ int idxd_wq_disable(struct idxd_wq *wq)
>>   	}
>>   
>>   	operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
>> -	rc = idxd_cmd_send(idxd, IDXD_CMD_DISABLE_WQ, operand);
>> -	if (rc < 0)
>> -		return rc;
>> -	rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT);
>> -	if (rc < 0)
>> -		return rc;
>> +	idxd_cmd_exec(idxd, IDXD_CMD_DISABLE_WQ, operand, &status);
>>   
>>   	if (status != IDXD_CMDSTS_SUCCESS) {
>>   		dev_dbg(dev, "WQ disable failed: %#x\n", status);
>> @@ -295,6 +280,23 @@ int idxd_wq_disable(struct idxd_wq *wq)
>>   	return 0;
>>   }
>>   
>> +void idxd_wq_drain(struct idxd_wq *wq)
>> +{
>> +	struct idxd_device *idxd = wq->idxd;
>> +	struct device *dev = &idxd->pdev->dev;
>> +	u32 operand;
>> +
>> +	if (wq->state != IDXD_WQ_ENABLED) {
>> +		dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state);
>> +		return;
>> +	}
>> +
>> +	dev_dbg(dev, "Draining WQ %d\n", wq->id);
>> +	operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
>> +	idxd_cmd_exec(idxd, IDXD_CMD_DRAIN_WQ, operand, NULL);
>> +	dev_dbg(dev, "WQ %d drained\n", wq->id);
> 
> too much debug artifacts, pls remove

Will fix

> 
>> +}
>> +
>>   int idxd_wq_map_portal(struct idxd_wq *wq)
>>   {
>>   	struct idxd_device *idxd = wq->idxd;
>> @@ -356,66 +358,79 @@ static inline bool idxd_is_enabled(struct idxd_device *idxd)
>>   	return false;
>>   }
>>   
>> -static int idxd_cmd_wait(struct idxd_device *idxd, u32 *status, int timeout)
>> +/*
>> + * This is function is only used for reset during probe and will
>> + * poll for completion. Once the device is setup with interrupts,
>> + * all commands will be done via interrupt completion.
>> + */
>> +void idxd_device_init_reset(struct idxd_device *idxd)
>>   {
>> -	u32 sts, to = timeout;
>> -
>> -	lockdep_assert_held(&idxd->dev_lock);
>> -	sts = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
>> -	while (sts & IDXD_CMDSTS_ACTIVE && --to) {
>> -		cpu_relax();
>> -		sts = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
>> -	}
>> +	struct device *dev = &idxd->pdev->dev;
>> +	union idxd_command_reg cmd;
>> +	unsigned long flags;
>>   
>> -	if (to == 0 && sts & IDXD_CMDSTS_ACTIVE) {
>> -		dev_warn(&idxd->pdev->dev, "%s timed out!\n", __func__);
>> -		*status = 0;
>> -		return -EBUSY;
>> -	}
>> +	memset(&cmd, 0, sizeof(cmd));
>> +	cmd.cmd = IDXD_CMD_RESET_DEVICE;
>> +	dev_dbg(dev, "%s: sending reset for init.\n", __func__);
>> +	spin_lock_irqsave(&idxd->dev_lock, flags);
>> +	iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
>>   
>> -	*status = sts;
>> -	return 0;
>> +	while (ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET) &
>> +	       IDXD_CMDSTS_ACTIVE)
>> +		cpu_relax();
>> +	spin_unlock_irqrestore(&idxd->dev_lock, flags);
>>   }
>>   
>> -static int idxd_cmd_send(struct idxd_device *idxd, int cmd_code, u32 operand)
>> +static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
>> +			  u32 *status)
>>   {
>>   	union idxd_command_reg cmd;
>> -	int rc;
>> -	u32 status;
>> -
>> -	lockdep_assert_held(&idxd->dev_lock);
>> -	rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT);
>> -	if (rc < 0)
>> -		return rc;
>> +	DECLARE_COMPLETION_ONSTACK(done);
>> +	unsigned long flags;
>>   
>>   	memset(&cmd, 0, sizeof(cmd));
>>   	cmd.cmd = cmd_code;
>>   	cmd.operand = operand;
>> +	cmd.int_req = 1;
>> +
>> +	spin_lock_irqsave(&idxd->dev_lock, flags);
>> +	wait_event_lock_irq(idxd->cmd_waitq,
>> +			    !test_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags),
>> +			    idxd->dev_lock);
>> +
>>   	dev_dbg(&idxd->pdev->dev, "%s: sending cmd: %#x op: %#x\n",
>>   		__func__, cmd_code, operand);
>> +
>> +	__set_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags);
>> +	idxd->cmd_done = &done;
>>   	iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
>>   
>> -	return 0;
>> +	/*
>> +	 * After command submitted, release lock and go to sleep until
>> +	 * the command completes via interrupt.
>> +	 */
>> +	spin_unlock_irqrestore(&idxd->dev_lock, flags);
>> +	wait_for_completion(&done);
> 
> waiting with locks held and interrupts disabled? >

No we release lock here before we wait. And it gets re-acquired once we are 
woken up.


^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2020-06-25  2:05 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-06-15 20:54 [PATCH] dmaengine: idxd: add work queue drain support Dave Jiang
2020-06-24  7:10 ` Vinod Koul
2020-06-25  2:05   ` Dave Jiang

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).