All of lore.kernel.org
 help / color / mirror / Atom feed
From: sunil.kovvuri@gmail.com
To: linux-kernel@vger.kernel.org, arnd@arndb.de, olof@lixom.net
Cc: linux-arm-kernel@lists.infradead.org, linux-soc@vger.kernel.org,
	andrew@lunn.ch, davem@davemloft.net,
	Linu Cherian <lcherian@marvell.com>
Subject: [PATCH v2 14/15] soc: octeontx2: Register for CGX lmac events
Date: Tue,  4 Sep 2018 17:24:49 +0530	[thread overview]
Message-ID: <1536062090-30446-15-git-send-email-sunil.kovvuri@gmail.com> (raw)
In-Reply-To: <1536062090-30446-1-git-send-email-sunil.kovvuri@gmail.com>

From: Linu Cherian <lcherian@marvell.com>

Added support in RVU AF driver to register for
CGX LMAC link status change events from firmware
and managing them. Processing part will be added
in followup patches.

- Introduced eventqueue for posting events from cgx lmac.
  Queueing mechanism will ensure that events can be posted
  and firmware can be acked immediately and hence event
  reception and processing are decoupled.
- Events gets added to the queue by notification callback.
  Notification callback is expected to be atomic, since it
  is called from interrupt context.
- Events are dequeued and processed in a worker thread.

Signed-off-by: Linu Cherian <lcherian@marvell.com>
---
 drivers/soc/marvell/octeontx2/rvu.c     |   6 +-
 drivers/soc/marvell/octeontx2/rvu.h     |   5 ++
 drivers/soc/marvell/octeontx2/rvu_cgx.c | 101 +++++++++++++++++++++++++++++++-
 3 files changed, 108 insertions(+), 4 deletions(-)

diff --git a/drivers/soc/marvell/octeontx2/rvu.c b/drivers/soc/marvell/octeontx2/rvu.c
index faf7d0f..282982f 100644
--- a/drivers/soc/marvell/octeontx2/rvu.c
+++ b/drivers/soc/marvell/octeontx2/rvu.c
@@ -1564,10 +1564,11 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
 	err = rvu_register_interrupts(rvu);
 	if (err)
-		goto err_mbox;
+		goto err_cgx;
 
 	return 0;
-
+err_cgx:
+	rvu_cgx_wq_destroy(rvu);
 err_mbox:
 	rvu_mbox_destroy(rvu);
 err_hwsetup:
@@ -1589,6 +1590,7 @@ static void rvu_remove(struct pci_dev *pdev)
 	struct rvu *rvu = pci_get_drvdata(pdev);
 
 	rvu_unregister_interrupts(rvu);
+	rvu_cgx_wq_destroy(rvu);
 	rvu_mbox_destroy(rvu);
 	rvu_reset_all_blocks(rvu);
 	rvu_free_hw_resources(rvu);
diff --git a/drivers/soc/marvell/octeontx2/rvu.h b/drivers/soc/marvell/octeontx2/rvu.h
index 385f597..d169fa9 100644
--- a/drivers/soc/marvell/octeontx2/rvu.h
+++ b/drivers/soc/marvell/octeontx2/rvu.h
@@ -110,6 +110,10 @@ struct rvu {
 						  * every cgx lmac port
 						  */
 	void			**cgx_idmap; /* cgx id to cgx data map table */
+	struct			work_struct cgx_evh_work;
+	struct			workqueue_struct *cgx_evh_wq;
+	spinlock_t		cgx_evq_lock; /* cgx event queue lock */
+	struct list_head	cgx_evq_head; /* cgx event queue head */
 };
 
 static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val)
@@ -150,4 +154,5 @@ int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero);
 
 /* CGX APIs */
 int rvu_cgx_probe(struct rvu *rvu);
+void rvu_cgx_wq_destroy(struct rvu *rvu);
 #endif /* RVU_H */
diff --git a/drivers/soc/marvell/octeontx2/rvu_cgx.c b/drivers/soc/marvell/octeontx2/rvu_cgx.c
index bf81507..2359806e 100644
--- a/drivers/soc/marvell/octeontx2/rvu_cgx.c
+++ b/drivers/soc/marvell/octeontx2/rvu_cgx.c
@@ -15,6 +15,11 @@
 #include "rvu.h"
 #include "cgx.h"
 
+struct cgx_evq_entry {
+	struct list_head evq_node;
+	struct cgx_link_event link_event;
+};
+
 static inline u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id)
 {
 	return ((cgx_id & 0xF) << 4) | (lmac_id & 0xF);
@@ -72,9 +77,95 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
 	return 0;
 }
 
+/* This is called from interrupt context and is expected to be atomic */
+static int cgx_lmac_postevent(struct cgx_link_event *event, void *data)
+{
+	struct rvu *rvu = data;
+	struct cgx_evq_entry *qentry;
+
+	/* post event to the event queue */
+	qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC);
+	if (!qentry)
+		return -ENOMEM;
+	qentry->link_event = *event;
+	spin_lock(&rvu->cgx_evq_lock);
+	list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head);
+	spin_unlock(&rvu->cgx_evq_lock);
+
+	/* start worker to process the events */
+	queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work);
+
+	return 0;
+}
+
+static void cgx_evhandler_task(struct work_struct *work)
+{
+	struct rvu *rvu = container_of(work, struct rvu, cgx_evh_work);
+	struct cgx_evq_entry *qentry;
+	struct cgx_link_event *event;
+	unsigned long flags;
+
+	do {
+		/* Dequeue an event */
+		spin_lock_irqsave(&rvu->cgx_evq_lock, flags);
+		qentry = list_first_entry_or_null(&rvu->cgx_evq_head,
+						  struct cgx_evq_entry,
+						  evq_node);
+		if (qentry)
+			list_del(&qentry->evq_node);
+		spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags);
+		if (!qentry)
+			break; /* nothing more to process */
+
+		event = &qentry->link_event;
+
+		/* Do nothing for now */
+		kfree(qentry);
+	} while (1);
+}
+
+static void cgx_lmac_event_handler_init(struct rvu *rvu)
+{
+	struct cgx_event_cb cb;
+	int cgx, lmac, err;
+	void *cgxd;
+
+	spin_lock_init(&rvu->cgx_evq_lock);
+	INIT_LIST_HEAD(&rvu->cgx_evq_head);
+	INIT_WORK(&rvu->cgx_evh_work, cgx_evhandler_task);
+	rvu->cgx_evh_wq = alloc_workqueue("rvu_evh_wq", 0, 0);
+	if (!rvu->cgx_evh_wq) {
+		dev_err(rvu->dev, "alloc workqueue failed");
+		return;
+	}
+
+	cb.notify_link_chg = cgx_lmac_postevent; /* link change call back */
+	cb.data = rvu;
+
+	for (cgx = 0; cgx < rvu->cgx_cnt; cgx++) {
+		cgxd = rvu_cgx_pdata(cgx, rvu);
+		for (lmac = 0; lmac < cgx_get_lmac_cnt(cgxd); lmac++) {
+			err = cgx_lmac_evh_register(&cb, cgxd, lmac);
+			if (err)
+				dev_err(rvu->dev,
+					"%d:%d handler register failed\n",
+					cgx, lmac);
+		}
+	}
+}
+
+void rvu_cgx_wq_destroy(struct rvu *rvu)
+{
+	if (rvu->cgx_evh_wq) {
+		flush_workqueue(rvu->cgx_evh_wq);
+		destroy_workqueue(rvu->cgx_evh_wq);
+		rvu->cgx_evh_wq = NULL;
+	}
+}
+
 int rvu_cgx_probe(struct rvu *rvu)
 {
-	int i;
+	int i, err;
 
 	/* find available cgx ports */
 	rvu->cgx_cnt = cgx_get_cgx_cnt();
@@ -93,5 +184,11 @@ int rvu_cgx_probe(struct rvu *rvu)
 		rvu->cgx_idmap[i] = cgx_get_pdata(i);
 
 	/* Map CGX LMAC interfaces to RVU PFs */
-	return rvu_map_cgx_lmac_pf(rvu);
+	err = rvu_map_cgx_lmac_pf(rvu);
+	if (err)
+		return err;
+
+	/* Register for CGX events */
+	cgx_lmac_event_handler_init(rvu);
+	return 0;
 }
-- 
2.7.4


WARNING: multiple messages have this Message-ID (diff)
From: sunil.kovvuri@gmail.com (sunil.kovvuri at gmail.com)
To: linux-arm-kernel@lists.infradead.org
Subject: [PATCH v2 14/15] soc: octeontx2: Register for CGX lmac events
Date: Tue,  4 Sep 2018 17:24:49 +0530	[thread overview]
Message-ID: <1536062090-30446-15-git-send-email-sunil.kovvuri@gmail.com> (raw)
In-Reply-To: <1536062090-30446-1-git-send-email-sunil.kovvuri@gmail.com>

From: Linu Cherian <lcherian@marvell.com>

Added support in RVU AF driver to register for
CGX LMAC link status change events from firmware
and managing them. Processing part will be added
in followup patches.

- Introduced eventqueue for posting events from cgx lmac.
  Queueing mechanism will ensure that events can be posted
  and firmware can be acked immediately and hence event
  reception and processing are decoupled.
- Events gets added to the queue by notification callback.
  Notification callback is expected to be atomic, since it
  is called from interrupt context.
- Events are dequeued and processed in a worker thread.

Signed-off-by: Linu Cherian <lcherian@marvell.com>
---
 drivers/soc/marvell/octeontx2/rvu.c     |   6 +-
 drivers/soc/marvell/octeontx2/rvu.h     |   5 ++
 drivers/soc/marvell/octeontx2/rvu_cgx.c | 101 +++++++++++++++++++++++++++++++-
 3 files changed, 108 insertions(+), 4 deletions(-)

diff --git a/drivers/soc/marvell/octeontx2/rvu.c b/drivers/soc/marvell/octeontx2/rvu.c
index faf7d0f..282982f 100644
--- a/drivers/soc/marvell/octeontx2/rvu.c
+++ b/drivers/soc/marvell/octeontx2/rvu.c
@@ -1564,10 +1564,11 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
 	err = rvu_register_interrupts(rvu);
 	if (err)
-		goto err_mbox;
+		goto err_cgx;
 
 	return 0;
-
+err_cgx:
+	rvu_cgx_wq_destroy(rvu);
 err_mbox:
 	rvu_mbox_destroy(rvu);
 err_hwsetup:
@@ -1589,6 +1590,7 @@ static void rvu_remove(struct pci_dev *pdev)
 	struct rvu *rvu = pci_get_drvdata(pdev);
 
 	rvu_unregister_interrupts(rvu);
+	rvu_cgx_wq_destroy(rvu);
 	rvu_mbox_destroy(rvu);
 	rvu_reset_all_blocks(rvu);
 	rvu_free_hw_resources(rvu);
diff --git a/drivers/soc/marvell/octeontx2/rvu.h b/drivers/soc/marvell/octeontx2/rvu.h
index 385f597..d169fa9 100644
--- a/drivers/soc/marvell/octeontx2/rvu.h
+++ b/drivers/soc/marvell/octeontx2/rvu.h
@@ -110,6 +110,10 @@ struct rvu {
 						  * every cgx lmac port
 						  */
 	void			**cgx_idmap; /* cgx id to cgx data map table */
+	struct			work_struct cgx_evh_work;
+	struct			workqueue_struct *cgx_evh_wq;
+	spinlock_t		cgx_evq_lock; /* cgx event queue lock */
+	struct list_head	cgx_evq_head; /* cgx event queue head */
 };
 
 static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val)
@@ -150,4 +154,5 @@ int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero);
 
 /* CGX APIs */
 int rvu_cgx_probe(struct rvu *rvu);
+void rvu_cgx_wq_destroy(struct rvu *rvu);
 #endif /* RVU_H */
diff --git a/drivers/soc/marvell/octeontx2/rvu_cgx.c b/drivers/soc/marvell/octeontx2/rvu_cgx.c
index bf81507..2359806e 100644
--- a/drivers/soc/marvell/octeontx2/rvu_cgx.c
+++ b/drivers/soc/marvell/octeontx2/rvu_cgx.c
@@ -15,6 +15,11 @@
 #include "rvu.h"
 #include "cgx.h"
 
+struct cgx_evq_entry {
+	struct list_head evq_node;
+	struct cgx_link_event link_event;
+};
+
 static inline u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id)
 {
 	return ((cgx_id & 0xF) << 4) | (lmac_id & 0xF);
@@ -72,9 +77,95 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
 	return 0;
 }
 
+/* This is called from interrupt context and is expected to be atomic */
+static int cgx_lmac_postevent(struct cgx_link_event *event, void *data)
+{
+	struct rvu *rvu = data;
+	struct cgx_evq_entry *qentry;
+
+	/* post event to the event queue */
+	qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC);
+	if (!qentry)
+		return -ENOMEM;
+	qentry->link_event = *event;
+	spin_lock(&rvu->cgx_evq_lock);
+	list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head);
+	spin_unlock(&rvu->cgx_evq_lock);
+
+	/* start worker to process the events */
+	queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work);
+
+	return 0;
+}
+
+static void cgx_evhandler_task(struct work_struct *work)
+{
+	struct rvu *rvu = container_of(work, struct rvu, cgx_evh_work);
+	struct cgx_evq_entry *qentry;
+	struct cgx_link_event *event;
+	unsigned long flags;
+
+	do {
+		/* Dequeue an event */
+		spin_lock_irqsave(&rvu->cgx_evq_lock, flags);
+		qentry = list_first_entry_or_null(&rvu->cgx_evq_head,
+						  struct cgx_evq_entry,
+						  evq_node);
+		if (qentry)
+			list_del(&qentry->evq_node);
+		spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags);
+		if (!qentry)
+			break; /* nothing more to process */
+
+		event = &qentry->link_event;
+
+		/* Do nothing for now */
+		kfree(qentry);
+	} while (1);
+}
+
+static void cgx_lmac_event_handler_init(struct rvu *rvu)
+{
+	struct cgx_event_cb cb;
+	int cgx, lmac, err;
+	void *cgxd;
+
+	spin_lock_init(&rvu->cgx_evq_lock);
+	INIT_LIST_HEAD(&rvu->cgx_evq_head);
+	INIT_WORK(&rvu->cgx_evh_work, cgx_evhandler_task);
+	rvu->cgx_evh_wq = alloc_workqueue("rvu_evh_wq", 0, 0);
+	if (!rvu->cgx_evh_wq) {
+		dev_err(rvu->dev, "alloc workqueue failed");
+		return;
+	}
+
+	cb.notify_link_chg = cgx_lmac_postevent; /* link change call back */
+	cb.data = rvu;
+
+	for (cgx = 0; cgx < rvu->cgx_cnt; cgx++) {
+		cgxd = rvu_cgx_pdata(cgx, rvu);
+		for (lmac = 0; lmac < cgx_get_lmac_cnt(cgxd); lmac++) {
+			err = cgx_lmac_evh_register(&cb, cgxd, lmac);
+			if (err)
+				dev_err(rvu->dev,
+					"%d:%d handler register failed\n",
+					cgx, lmac);
+		}
+	}
+}
+
+void rvu_cgx_wq_destroy(struct rvu *rvu)
+{
+	if (rvu->cgx_evh_wq) {
+		flush_workqueue(rvu->cgx_evh_wq);
+		destroy_workqueue(rvu->cgx_evh_wq);
+		rvu->cgx_evh_wq = NULL;
+	}
+}
+
 int rvu_cgx_probe(struct rvu *rvu)
 {
-	int i;
+	int i, err;
 
 	/* find available cgx ports */
 	rvu->cgx_cnt = cgx_get_cgx_cnt();
@@ -93,5 +184,11 @@ int rvu_cgx_probe(struct rvu *rvu)
 		rvu->cgx_idmap[i] = cgx_get_pdata(i);
 
 	/* Map CGX LMAC interfaces to RVU PFs */
-	return rvu_map_cgx_lmac_pf(rvu);
+	err = rvu_map_cgx_lmac_pf(rvu);
+	if (err)
+		return err;
+
+	/* Register for CGX events */
+	cgx_lmac_event_handler_init(rvu);
+	return 0;
 }
-- 
2.7.4

  parent reply	other threads:[~2018-09-04 11:56 UTC|newest]

Thread overview: 36+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-09-04 11:54 [PATCH v2 00/15] soc: octeontx2: Add RVU admin function driver sunil.kovvuri
2018-09-04 11:54 ` sunil.kovvuri at gmail.com
2018-09-04 11:54 ` [PATCH v2 01/15] soc: octeontx2: Add Marvell OcteonTX2 RVU AF driver sunil.kovvuri
2018-09-04 11:54   ` sunil.kovvuri at gmail.com
2018-09-04 11:54 ` [PATCH v2 02/15] soc: octeontx2: Reset all RVU blocks sunil.kovvuri
2018-09-04 11:54   ` sunil.kovvuri at gmail.com
2018-09-04 11:54 ` [PATCH v2 03/15] soc: octeontx2: Gather RVU blocks HW info sunil.kovvuri
2018-09-04 11:54   ` sunil.kovvuri at gmail.com
2018-09-04 11:54 ` [PATCH v2 04/15] soc: octeontx2: Add mailbox support infra sunil.kovvuri
2018-09-04 11:54   ` sunil.kovvuri at gmail.com
2018-09-04 11:54 ` [PATCH v2 05/15] soc: octeontx2: Add mailbox IRQ and msg handlers sunil.kovvuri
2018-09-04 11:54   ` sunil.kovvuri at gmail.com
2018-09-04 11:54 ` [PATCH v2 06/15] soc: octeontx2: Convert mbox msg id check to a macro sunil.kovvuri
2018-09-04 11:54   ` sunil.kovvuri at gmail.com
2018-09-04 11:54 ` [PATCH v2 07/15] soc: octeontx2: Scan blocks for LFs provisioned to PF/VF sunil.kovvuri
2018-09-04 11:54   ` sunil.kovvuri at gmail.com
2018-09-04 11:54 ` [PATCH v2 08/15] soc: octeontx2: Add RVU block LF provisioning support sunil.kovvuri
2018-09-04 11:54   ` sunil.kovvuri at gmail.com
2018-09-04 11:54 ` [PATCH v2 09/15] soc: octeontx2: Configure block LF's MSIX vector offset sunil.kovvuri
2018-09-04 11:54   ` sunil.kovvuri at gmail.com
2018-09-04 11:54 ` [PATCH v2 10/15] soc: octeontx2: Reconfig MSIX base with IOVA sunil.kovvuri
2018-09-04 11:54   ` sunil.kovvuri at gmail.com
2018-09-04 11:54 ` [PATCH v2 11/15] soc: octeontx2: Add Marvell OcteonTX2 CGX driver sunil.kovvuri
2018-09-04 11:54   ` sunil.kovvuri at gmail.com
2018-09-04 11:54 ` [PATCH v2 12/15] soc: octeontx2: Set RVU PFs to CGX LMACs mapping sunil.kovvuri
2018-09-04 11:54   ` sunil.kovvuri at gmail.com
2018-09-04 11:54 ` [PATCH v2 13/15] soc: octeontx2: Add support for CGX link management sunil.kovvuri
2018-09-04 11:54   ` sunil.kovvuri at gmail.com
2018-09-04 11:54 ` sunil.kovvuri [this message]
2018-09-04 11:54   ` [PATCH v2 14/15] soc: octeontx2: Register for CGX lmac events sunil.kovvuri at gmail.com
2018-09-04 11:54 ` [PATCH v2 15/15] MAINTAINERS: Add entry for Marvell OcteonTX2 Admin Function driver sunil.kovvuri
2018-09-04 11:54   ` sunil.kovvuri at gmail.com
2018-09-04 12:46 ` [PATCH v2 00/15] soc: octeontx2: Add RVU admin function driver Andrew Lunn
2018-09-04 12:46   ` Andrew Lunn
2018-09-04 16:14   ` Sunil Kovvuri
2018-09-04 16:14     ` Sunil Kovvuri

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1536062090-30446-15-git-send-email-sunil.kovvuri@gmail.com \
    --to=sunil.kovvuri@gmail.com \
    --cc=andrew@lunn.ch \
    --cc=arnd@arndb.de \
    --cc=davem@davemloft.net \
    --cc=lcherian@marvell.com \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-soc@vger.kernel.org \
    --cc=olof@lixom.net \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.