netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: M Chetan Kumar <m.chetan.kumar@intel.com>
To: netdev@vger.kernel.org, linux-wireless@vger.kernel.org
Cc: johannes@sipsolutions.net, krishna.c.sudi@intel.com,
	m.chetan.kumar@intel.com
Subject: [PATCH 08/18] net: iosm: MBIM control device
Date: Thu,  7 Jan 2021 22:35:13 +0530	[thread overview]
Message-ID: <20210107170523.26531-9-m.chetan.kumar@intel.com> (raw)
In-Reply-To: <20210107170523.26531-1-m.chetan.kumar@intel.com>

Implements a char device for MBIM protocol communication &
provides a simple IOCTL for max transfer buffer size
configuration.

Signed-off-by: M Chetan Kumar <m.chetan.kumar@intel.com>
---
 drivers/net/wwan/iosm/iosm_ipc_mbim.c | 286 ++++++++++++++++++++++++++++++++++
 drivers/net/wwan/iosm/iosm_ipc_mbim.h |  25 +++
 2 files changed, 311 insertions(+)
 create mode 100644 drivers/net/wwan/iosm/iosm_ipc_mbim.c
 create mode 100644 drivers/net/wwan/iosm/iosm_ipc_mbim.h

diff --git a/drivers/net/wwan/iosm/iosm_ipc_mbim.c b/drivers/net/wwan/iosm/iosm_ipc_mbim.c
new file mode 100644
index 000000000000..885037a5642e
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_mbim.c
@@ -0,0 +1,286 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 Intel Corporation.
+ */
+
+#include <linux/poll.h>
+#include <linux/uaccess.h>
+
+#include "iosm_ipc_imem_ops.h"
+#include "iosm_ipc_mbim.h"
+#include "iosm_ipc_sio.h"
+
+#define IOCTL_WDM_MAX_COMMAND _IOR('H', 0xA0, __u16)
+#define WDM_MAX_SIZE 4096
+
+static struct mutex mbim_floc;		/* Mutex Lock for mbim read */
+static struct mutex mbim_floc_wr;	/* Mutex Lock for mbim write */
+
+/* MBIM IOCTL for configuring max MBIM packet size. */
+static long ipc_mbim_fop_unlocked_ioctl(struct file *filp, unsigned int cmd,
+					unsigned long arg)
+{
+	struct iosm_sio *ipc_mbim =
+		container_of(filp->private_data, struct iosm_sio, misc);
+
+	if (cmd != IOCTL_WDM_MAX_COMMAND ||
+	    !access_ok((void __user *)arg, sizeof(ipc_mbim->wmaxcommand)))
+		return -EINVAL;
+
+	if (copy_to_user((void __user *)arg, &ipc_mbim->wmaxcommand,
+			 sizeof(ipc_mbim->wmaxcommand)))
+		return -EFAULT;
+
+	return 0;
+}
+
+/* Open a shared memory device and initialize the head of the rx skbuf list. */
+static int ipc_mbim_fop_open(struct inode *inode, struct file *filp)
+{
+	struct iosm_sio *ipc_mbim =
+		container_of(filp->private_data, struct iosm_sio, misc);
+
+	struct iosm_sio_open_file *mbim_op = kzalloc(sizeof(*mbim_op),
+						      GFP_KERNEL);
+	if (!mbim_op)
+		return -ENOMEM;
+
+	if (test_and_set_bit(IS_OPEN, &ipc_mbim->flag)) {
+		kfree(mbim_op);
+		return -EBUSY;
+	}
+
+	ipc_mbim->channel = imem_sys_mbim_open(ipc_mbim->ipc_imem);
+
+	if (!ipc_mbim->channel) {
+		kfree(mbim_op);
+		return -EIO;
+	}
+
+	mutex_lock(&mbim_floc);
+
+	inode->i_private = mbim_op;
+	ipc_mbim->sio_fop = mbim_op;
+	mbim_op->sio_dev = ipc_mbim;
+
+	mutex_unlock(&mbim_floc);
+	return 0;
+}
+
+/* Close a shared memory control device and free the rx skbuf list. */
+static int ipc_mbim_fop_release(struct inode *inode, struct file *filp)
+{
+	struct iosm_sio_open_file *mbim_op = inode->i_private;
+
+	mutex_lock(&mbim_floc);
+	if (mbim_op->sio_dev) {
+		clear_bit(IS_OPEN, &mbim_op->sio_dev->flag);
+		imem_sys_sio_close(mbim_op->sio_dev);
+		mbim_op->sio_dev->sio_fop = NULL;
+	}
+	kfree(mbim_op);
+	mutex_unlock(&mbim_floc);
+	return 0;
+}
+
+/* Copy the data from skbuff to the user buffer */
+static ssize_t ipc_mbim_fop_read(struct file *filp, char __user *buf,
+				 size_t size, loff_t *l)
+{
+	struct iosm_sio_open_file *mbim_op = filp->f_inode->i_private;
+	struct sk_buff *skb = NULL;
+	struct iosm_sio *ipc_mbim;
+	ssize_t read_byt;
+	int ret_err;
+
+	if (!access_ok(buf, size)) {
+		ret_err = -EINVAL;
+		goto err;
+	}
+
+	mutex_lock(&mbim_floc);
+
+	if (!mbim_op->sio_dev) {
+		ret_err = -EIO;
+		goto err_free_lock;
+	}
+
+	ipc_mbim = mbim_op->sio_dev;
+
+	if (!(filp->f_flags & O_NONBLOCK))
+		set_bit(IS_BLOCKING, &ipc_mbim->flag);
+
+	/* First provide the pending skbuf to the user. */
+	if (ipc_mbim->rx_pending_buf) {
+		skb = ipc_mbim->rx_pending_buf;
+		ipc_mbim->rx_pending_buf = NULL;
+	}
+
+	/* Check rx queue until skb is available */
+	while (!skb && !(skb = skb_dequeue(&ipc_mbim->rx_list))) {
+		if (!test_bit(IS_BLOCKING, &ipc_mbim->flag)) {
+			ret_err = -EAGAIN;
+			goto err_free_lock;
+		}
+
+		/* Suspend the user app and wait a certain time for data
+		 * from CP.
+		 */
+		wait_for_completion_interruptible_timeout
+		(&ipc_mbim->read_sem, msecs_to_jiffies(IPC_READ_TIMEOUT));
+
+		if (test_bit(IS_DEINIT, &ipc_mbim->flag)) {
+			ret_err = -EPERM;
+			goto err_free_lock;
+		}
+	}
+
+	read_byt = imem_sys_sio_read(ipc_mbim, buf, size, skb);
+	mutex_unlock(&mbim_floc);
+	return read_byt;
+
+err_free_lock:
+	mutex_unlock(&mbim_floc);
+err:
+	return ret_err;
+}
+
+/* Route the user data to the shared memory layer. */
+static ssize_t ipc_mbim_fop_write(struct file *filp, const char __user *buf,
+				  size_t size, loff_t *l)
+{
+	struct iosm_sio_open_file *mbim_op = filp->f_inode->i_private;
+	struct iosm_sio *ipc_mbim;
+	bool is_blocking;
+	ssize_t write_byt;
+	int ret_err;
+
+	if (!access_ok(buf, size)) {
+		ret_err = -EINVAL;
+		goto err;
+	}
+
+	mutex_lock(&mbim_floc_wr);
+
+	if (!mbim_op->sio_dev) {
+		ret_err = -EIO;
+		goto err_free_lock;
+	}
+
+	ipc_mbim = mbim_op->sio_dev;
+
+	is_blocking = !(filp->f_flags & O_NONBLOCK);
+
+	if (test_bit(WRITE_IN_USE, &ipc_mbim->flag)) {
+		ret_err = -EAGAIN;
+		goto err_free_lock;
+	}
+	write_byt = imem_sys_sio_write(ipc_mbim, buf, size, is_blocking);
+
+	mutex_unlock(&mbim_floc_wr);
+	return write_byt;
+
+err_free_lock:
+	mutex_unlock(&mbim_floc_wr);
+err:
+	return ret_err;
+}
+
+/* Poll mechanism for applications that use nonblocking IO */
+static __poll_t ipc_mbim_fop_poll(struct file *filp, poll_table *wait)
+{
+	struct iosm_sio *ipc_mbim =
+		container_of(filp->private_data, struct iosm_sio, misc);
+	__poll_t mask = 0;
+
+	/* Just registers wait_queue hook. This doesn't really wait. */
+	poll_wait(filp, &ipc_mbim->poll_inq, wait);
+
+	/* Test the fill level of the skbuf rx queue. */
+	if (!test_bit(WRITE_IN_USE, &ipc_mbim->flag))
+		mask |= EPOLLOUT | EPOLLWRNORM; /* writable */
+
+	if (!skb_queue_empty(&ipc_mbim->rx_list) || ipc_mbim->rx_pending_buf)
+		mask |= EPOLLIN | EPOLLRDNORM; /* readable */
+
+	return mask;
+}
+
+struct iosm_sio *ipc_mbim_init(struct iosm_imem *ipc_imem, const char *name)
+{
+	struct iosm_sio *ipc_mbim = kzalloc(sizeof(*ipc_mbim), GFP_KERNEL);
+
+	static const struct file_operations fops = {
+		.owner = THIS_MODULE,
+		.open = ipc_mbim_fop_open,
+		.release = ipc_mbim_fop_release,
+		.read = ipc_mbim_fop_read,
+		.write = ipc_mbim_fop_write,
+		.poll = ipc_mbim_fop_poll,
+		.unlocked_ioctl = ipc_mbim_fop_unlocked_ioctl,
+	};
+
+	if (!ipc_mbim)
+		return NULL;
+
+	ipc_mbim->dev = ipc_imem->dev;
+	ipc_mbim->pcie = ipc_imem->pcie;
+	ipc_mbim->ipc_imem = ipc_imem;
+
+	ipc_mbim->wmaxcommand = WDM_MAX_SIZE;
+
+	mutex_init(&mbim_floc);
+	mutex_init(&mbim_floc_wr);
+	init_completion(&ipc_mbim->read_sem);
+
+	skb_queue_head_init(&ipc_mbim->rx_list);
+	init_waitqueue_head(&ipc_mbim->poll_inq);
+
+	strncpy(ipc_mbim->devname, name, sizeof(ipc_mbim->devname) - 1);
+	ipc_mbim->devname[IPC_SIO_DEVNAME_LEN - 1] = '\0';
+
+	ipc_mbim->misc.minor = MISC_DYNAMIC_MINOR;
+	ipc_mbim->misc.name = ipc_mbim->devname;
+	ipc_mbim->misc.fops = &fops;
+	ipc_mbim->misc.mode = IPC_CHAR_DEVICE_DEFAULT_MODE;
+
+	if (misc_register(&ipc_mbim->misc)) {
+		kfree(ipc_mbim);
+		return NULL;
+	}
+
+	dev_set_drvdata(ipc_mbim->misc.this_device, ipc_mbim);
+
+	return ipc_mbim;
+}
+
+void ipc_mbim_deinit(struct iosm_sio *ipc_mbim)
+{
+	misc_deregister(&ipc_mbim->misc);
+
+	set_bit(IS_DEINIT, &ipc_mbim->flag);
+	/* Applying memory barrier so that ipc_mbim->flag is updated
+	 * before being read
+	 */
+	smp_mb__after_atomic();
+
+	if (test_bit(IS_BLOCKING, &ipc_mbim->flag)) {
+		complete(&ipc_mbim->read_sem);
+		complete(&ipc_mbim->channel->ul_sem);
+	}
+
+	mutex_lock(&mbim_floc);
+	mutex_lock(&mbim_floc_wr);
+
+	ipc_pcie_kfree_skb(ipc_mbim->pcie, ipc_mbim->rx_pending_buf);
+	ipc_mbim->rx_pending_buf = NULL;
+	skb_queue_purge(&ipc_mbim->rx_list);
+
+	if (ipc_mbim->sio_fop)
+		ipc_mbim->sio_fop->sio_dev = NULL;
+
+	mutex_unlock(&mbim_floc_wr);
+	mutex_unlock(&mbim_floc);
+
+	kfree(ipc_mbim);
+}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_mbim.h b/drivers/net/wwan/iosm/iosm_ipc_mbim.h
new file mode 100644
index 000000000000..0ca0be6fd4d8
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_mbim.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (C) 2020 Intel Corporation.
+ */
+
+#ifndef IOSM_IPC_MBIM_H
+#define IOSM_IPC_MBIM_H
+
+/**
+ * ipc_mbim_init - Initialize and create a character device for MBIM
+ *		   communication.
+ * @ipc_imem:	Pointer to iosm_imem structure
+ * @name:	Pointer to character device name
+ *
+ * Returns: 0 on success
+ */
+struct iosm_sio *ipc_mbim_init(struct iosm_imem *ipc_imem, const char *name);
+
+/**
+ * ipc_mbim_deinit - Frees all the memory allocated for the ipc mbim structure.
+ * @ipc_mbim:	Pointer to the ipc mbim data-struct
+ */
+void ipc_mbim_deinit(struct iosm_sio *ipc_mbim);
+
+#endif
-- 
2.12.3


  parent reply	other threads:[~2021-01-07 17:06 UTC|newest]

Thread overview: 29+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-01-07 17:05 [PATCH 00/18] net: iosm: PCIe Driver for Intel M.2 Modem M Chetan Kumar
2021-01-07 17:05 ` [PATCH 01/18] net: iosm: entry point M Chetan Kumar
2021-01-07 17:05 ` [PATCH 02/18] net: iosm: irq handling M Chetan Kumar
2021-01-07 17:05 ` [PATCH 03/18] net: iosm: mmio scratchpad M Chetan Kumar
2021-01-07 17:05 ` [PATCH 04/18] net: iosm: shared memory IPC interface M Chetan Kumar
2021-01-07 17:05 ` [PATCH 05/18] net: iosm: shared memory I/O operations M Chetan Kumar
2021-01-07 17:05 ` [PATCH 06/18] net: iosm: channel configuration M Chetan Kumar
2021-01-07 17:05 ` [PATCH 07/18] net: iosm: char device for FW flash & coredump M Chetan Kumar
2021-01-07 19:35   ` Andrew Lunn
2021-01-07 17:05 ` M Chetan Kumar [this message]
2021-01-07 17:05 ` [PATCH 09/18] net: iosm: bottom half M Chetan Kumar
2021-01-07 17:05 ` [PATCH 10/18] net: iosm: multiplex IP sessions M Chetan Kumar
2021-01-07 17:05 ` [PATCH 11/18] net: iosm: encode or decode datagram M Chetan Kumar
2021-01-07 21:56   ` Andrew Lunn
2021-01-07 17:05 ` [PATCH 12/18] net: iosm: power management M Chetan Kumar
2021-01-07 17:05 ` [PATCH 13/18] net: iosm: shared memory protocol M Chetan Kumar
2021-01-07 17:05 ` [PATCH 14/18] net: iosm: protocol operations M Chetan Kumar
2021-01-07 17:05 ` [PATCH 15/18] net: iosm: uevent support M Chetan Kumar
2021-01-07 17:05 ` [PATCH 16/18] net: iosm: net driver M Chetan Kumar
2021-01-07 20:11   ` Andrew Lunn
2021-01-07 17:05 ` [PATCH 17/18] net: iosm: readme file M Chetan Kumar
2021-01-07 22:23   ` Andrew Lunn
2021-01-15  9:15     ` Johannes Berg
2021-01-17 17:26     ` Bjørn Mork
2021-01-20 19:34       ` Andrew Lunn
2021-01-20 23:32         ` Jakub Kicinski
2021-01-21  1:34           ` Dan Williams
2021-01-22 23:45             ` Andrew Lunn
2021-01-07 17:05 ` [PATCH 18/18] net: iosm: infrastructure M Chetan Kumar

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210107170523.26531-9-m.chetan.kumar@intel.com \
    --to=m.chetan.kumar@intel.com \
    --cc=johannes@sipsolutions.net \
    --cc=krishna.c.sudi@intel.com \
    --cc=linux-wireless@vger.kernel.org \
    --cc=netdev@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).