All of lore.kernel.org
 help / color / mirror / Atom feed
From: viktor.barna@celeno.com
To: linux-wireless@vger.kernel.org
Cc: Kalle Valo <kvalo@codeaurora.org>,
	"David S . Miller" <davem@davemloft.net>,
	Jakub Kicinski <kuba@kernel.org>,
	Aviad Brikman <aviad.brikman@celeno.com>,
	Eliav Farber <eliav.farber@gmail.com>,
	Oleksandr Savchenko <oleksandr.savchenko@celeno.com>,
	Shay Bar <shay.bar@celeno.com>,
	Viktor Barna <viktor.barna@celeno.com>
Subject: [RFC v1 017/256] cl8k: add bus/pci/ipc.c
Date: Thu, 17 Jun 2021 15:58:24 +0000	[thread overview]
Message-ID: <20210617160223.160998-18-viktor.barna@celeno.com> (raw)
In-Reply-To: <20210617160223.160998-1-viktor.barna@celeno.com>

From: Viktor Barna <viktor.barna@celeno.com>

(Part of the split. Please, take a look at the cover letter for more
details).

Signed-off-by: Viktor Barna <viktor.barna@celeno.com>
---
 .../net/wireless/celeno/cl8k/bus/pci/ipc.c    | 1278 +++++++++++++++++
 1 file changed, 1278 insertions(+)
 create mode 100644 drivers/net/wireless/celeno/cl8k/bus/pci/ipc.c

diff --git a/drivers/net/wireless/celeno/cl8k/bus/pci/ipc.c b/drivers/net/wireless/celeno/cl8k/bus/pci/ipc.c
new file mode 100644
index 000000000000..c7ba5eb09be0
--- /dev/null
+++ b/drivers/net/wireless/celeno/cl8k/bus/pci/ipc.c
@@ -0,0 +1,1278 @@
+// SPDX-License-Identifier: MIT
+/* Copyright(c) 2019-2021, Celeno Communications Ltd. */
+
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+
+#include "bus/pci/ipc.h"
+#include "rx/rx.h"
+#include "bus/pci/rx_pci.h"
+#include "tx/tx.h"
+#include "bus/pci/tx_pci.h"
+#include "bus/pci/irq.h"
+#include "fw/fw_dbg.h"
+#include "reg/reg_access.h"
+#include "reg/reg_ipc.h"
+#include "enhanced_tim.h"
+#include "fw/msg_rx.h"
+#include "dbgfile.h"
+#ifdef TRACE_SUPPORT
+#include "trace.h"
+#endif
+
+#define DMA_CFM_QUEUE_SIZE 1024
+#define DMA_CFM_TOTAL_SIZE (8 * sizeof(struct cl_ipc_cfm_msg) * DMA_CFM_QUEUE_SIZE)
+
+static void ipc_env_free(struct cl_hw *cl_hw)
+{
+       kfree(cl_hw->ipc_env);
+       cl_hw->ipc_env = NULL;
+}
+
+static void ring_indices_dealloc(struct cl_hw *cl_hw)
+{
+       struct cl_ipc_host_env *ipc_env = cl_hw->ipc_env;
+
+       if (!ipc_env->ring_indices_elem)
+               return;
+
+       memset(ipc_env->ring_indices_elem->indices, 0, sizeof(struct cl_ipc_ring_indices));
+       ipc_env->ring_indices_elem->indices = NULL;
+       kfree(ipc_env->ring_indices_elem);
+       ipc_env->ring_indices_elem = NULL;
+}
+
+static void _txdesc_dealloc(struct cl_hw *cl_hw,
+                           struct txdesc *txdesc,
+                           __le32 dma_addr,
+                           u32 desc_num)
+{
+       dma_addr_t phys_dma_addr = le32_to_cpu(dma_addr);
+       u32 size = (desc_num * sizeof(struct txdesc));
+
+       if (size < PAGE_SIZE)
+               dma_pool_free(cl_hw->txdesc_pool, txdesc, phys_dma_addr);
+       else
+               dma_free_coherent(cl_hw->chip->dev, size, txdesc, phys_dma_addr);
+}
+
+static void txdesc_dealloc(struct cl_hw *cl_hw)
+{
+       struct cl_ipc_tx_queues *tx_queues = &cl_hw->ipc_env->tx_queues;
+       struct tx_queues_dma_addr *queues_dma_addr = tx_queues->queues_dma_addr;
+       u32 i;
+
+       if (queues_dma_addr->bcmc) {
+               _txdesc_dealloc(cl_hw, tx_queues->ipc_txdesc_bcmc, queues_dma_addr->bcmc,
+                               IPC_TXDESC_CNT_BCMC);
+               queues_dma_addr->bcmc = 0;
+       }
+
+       for (i = 0; i < MAX_SINGLE_QUEUES; i++)
+               if (queues_dma_addr->single[i]) {
+                       _txdesc_dealloc(cl_hw, tx_queues->ipc_txdesc_single[i],
+                                       queues_dma_addr->single[i], IPC_TXDESC_CNT_SINGLE);
+                       queues_dma_addr->single[i] = 0;
+               }
+
+       for (i = 0; i < IPC_MAX_BA_SESSIONS; i++)
+               if (queues_dma_addr->agg[i]) {
+                       _txdesc_dealloc(cl_hw, tx_queues->ipc_txdesc_agg[i],
+                                       queues_dma_addr->agg[i], cl_hw->max_agg_tx_q_size);
+                       queues_dma_addr->agg[i] = 0;
+               }
+
+       dma_pool_destroy(cl_hw->txdesc_pool);
+       cl_hw->txdesc_pool = NULL;
+}
+
+static void tx_queues_dealloc(struct cl_hw *cl_hw)
+{
+       u32 len = sizeof(struct tx_queues_dma_addr);
+       dma_addr_t phys_dma_addr = cl_hw->ipc_env->tx_queues.dma_addr;
+
+       if (!cl_hw->ipc_env->tx_queues.queues_dma_addr)
+               return;
+
+       dma_free_coherent(cl_hw->chip->dev, len,
+                         (void *)cl_hw->ipc_env->tx_queues.queues_dma_addr,
+                         phys_dma_addr);
+       cl_hw->ipc_env->tx_queues.queues_dma_addr = NULL;
+}
+
+static void rx_dealloc_skb(struct cl_hw *cl_hw, struct cl_rx_elem *rx_elem,
+                          u16 len)
+{
+       dma_unmap_single(cl_hw->chip->dev, rx_elem->dma_addr, len,
+                        DMA_FROM_DEVICE);
+       kfree_skb(rx_elem->skb);
+       rx_elem->skb = NULL;
+}
+
+static void _rx_dealloc_buff(struct cl_hw *cl_hw,
+                            u32 *rxbuf,
+                            __le32 dma_addr,
+                            u32 desc_num)
+{
+       dma_addr_t phys_dma_addr = le32_to_cpu(dma_addr);
+       u32 size = (desc_num * sizeof(u32));
+
+       dma_free_coherent(cl_hw->chip->dev, size, rxbuf, phys_dma_addr);
+}
+
+static void rx_dealloc_buff(struct cl_hw *cl_hw)
+{
+       struct cl_ipc_host_env *ipc_env = cl_hw->ipc_env;
+       struct cl_ipc_host_rxbuf *rxbuf_rxm = &ipc_env->rx_hostbuf_array[CL_RX_BUF_RXM];
+       struct cl_ipc_host_rxbuf *rxbuf_fw = &ipc_env->rx_hostbuf_array[CL_RX_BUF_FW];
+
+       if (rxbuf_rxm->dma_payload_base_addr)
+               _rx_dealloc_buff(cl_hw,
+                                rxbuf_rxm->dma_payload_addr,
+                                rxbuf_rxm->dma_payload_base_addr,
+                                IPC_RXBUF_CNT_RXM);
+
+       if (rxbuf_fw->dma_payload_base_addr)
+               _rx_dealloc_buff(cl_hw,
+                                rxbuf_fw->dma_payload_addr,
+                                rxbuf_fw->dma_payload_base_addr,
+                                IPC_RXBUF_CNT_FW);
+}
+
+static void rx_dealloc(struct cl_hw *cl_hw)
+{
+       struct cl_rx_elem *rx_elem = cl_hw->rx_elems;
+       u16 rxbuf_size_rxm = cl_hw->conf->ci_ipc_rxbuf_size[CL_RX_BUF_RXM];
+       u16 rxbuf_size_fw = cl_hw->conf->ci_ipc_rxbuf_size[CL_RX_BUF_FW];
+       int i;
+
+       if (!cl_hw->rx_elems)
+               return;
+
+       for (i = 0; i < IPC_RXBUF_CNT_RXM; i++, rx_elem++)
+               if (rx_elem->skb && !rx_elem->passed)
+                       rx_dealloc_skb(cl_hw, rx_elem, rxbuf_size_rxm);
+
+       for (i = 0; i < IPC_RXBUF_CNT_FW; i++, rx_elem++)
+               if (rx_elem->skb && !rx_elem->passed)
+                       rx_dealloc_skb(cl_hw, rx_elem, rxbuf_size_fw);
+
+       kfree(cl_hw->rx_elems);
+       cl_hw->rx_elems = NULL;
+
+       rx_dealloc_buff(cl_hw);
+}
+
+static void msg_dealloc(struct cl_hw *cl_hw)
+{
+       struct cl_e2a_msg_elem *msg_elem;
+       int i;
+
+       if (!cl_hw->e2a_msg_elems || !cl_hw->e2a_msg_pool)
+               return;
+
+       for (i = 0, msg_elem = cl_hw->e2a_msg_elems;
+            i < IPC_E2A_MSG_BUF_CNT; i++, msg_elem++) {
+               if (msg_elem->msgbuf_ptr) {
+                       dma_pool_free(cl_hw->e2a_msg_pool, msg_elem->msgbuf_ptr,
+                                     msg_elem->dma_addr);
+                       msg_elem->msgbuf_ptr = NULL;
+               }
+       }
+
+       dma_pool_destroy(cl_hw->e2a_msg_pool);
+       cl_hw->e2a_msg_pool = NULL;
+
+       kfree(cl_hw->e2a_msg_elems);
+       cl_hw->e2a_msg_elems = NULL;
+}
+
+static void radar_dealloc(struct cl_hw *cl_hw)
+{
+       struct cl_radar_elem *radar_elem;
+       int i;
+
+       if (!cl_hw->radar_pool || !cl_hw->radar_elems)
+               return;
+
+       for (i = 0, radar_elem = cl_hw->radar_elems;
+            i < IPC_RADAR_BUF_CNT; i++, radar_elem++) {
+               if (radar_elem->radarbuf_ptr) {
+                       dma_pool_free(cl_hw->radar_pool, radar_elem->radarbuf_ptr,
+                                     radar_elem->dma_addr);
+                       radar_elem->radarbuf_ptr = NULL;
+               }
+       }
+
+       dma_pool_destroy(cl_hw->radar_pool);
+       cl_hw->radar_pool = NULL;
+
+       kfree(cl_hw->radar_elems);
+       cl_hw->radar_elems = NULL;
+}
+
+static void dbg_dealloc(struct cl_hw *cl_hw)
+{
+       struct cl_dbg_elem *dbg_elem;
+       int i;
+
+       if (!cl_hw->dbg_pool || !cl_hw->dbg_elems)
+               return;
+
+       for (i = 0, dbg_elem = cl_hw->dbg_elems;
+            i < IPC_DBG_BUF_CNT; i++, dbg_elem++) {
+               if (dbg_elem->dbgbuf_ptr) {
+                       dma_pool_free(cl_hw->dbg_pool, dbg_elem->dbgbuf_ptr,
+                                     dbg_elem->dma_addr);
+                       dbg_elem->dbgbuf_ptr = NULL;
+               }
+       }
+
+       dma_pool_destroy(cl_hw->dbg_pool);
+       cl_hw->dbg_pool = NULL;
+
+       kfree(cl_hw->dbg_elems);
+       cl_hw->dbg_elems = NULL;
+}
+
+static void cfm_dealloc(struct cl_hw *cl_hw)
+{
+       struct cl_ipc_host_env *ipc_env = cl_hw->ipc_env;
+
+       dma_free_coherent(cl_hw->chip->dev,
+                         DMA_CFM_TOTAL_SIZE,
+                         ipc_env->cfm_virt_base_addr,
+                         ipc_env->cfm_dma_base_addr);
+
+       ipc_env->cfm_dma_base_addr = 0;
+       ipc_env->cfm_virt_base_addr = NULL;
+}
+
+static void dbg_info_dealloc(struct cl_hw *cl_hw)
+{
+       if (!cl_hw->dbginfo.buf)
+               return;
+
+       dma_free_coherent(cl_hw->chip->dev,
+                         cl_hw->dbginfo.bufsz,
+                         cl_hw->dbginfo.buf,
+                         cl_hw->dbginfo.dma_addr);
+
+       cl_hw->dbginfo.buf = NULL;
+}
+
+static void ipc_elems_dealloc(struct cl_hw *cl_hw)
+{
+       ring_indices_dealloc(cl_hw);
+       txdesc_dealloc(cl_hw);
+       tx_queues_dealloc(cl_hw);
+       rx_dealloc(cl_hw);
+       msg_dealloc(cl_hw);
+       radar_dealloc(cl_hw);
+       dbg_dealloc(cl_hw);
+       cfm_dealloc(cl_hw);
+       dbg_info_dealloc(cl_hw);
+}
+
+static int ring_indices_alloc(struct cl_hw *cl_hw)
+{
+       struct cl_chip *chip = cl_hw->chip;
+       struct cl_ipc_host_env *ipc_env = cl_hw->ipc_env;
+
+       ipc_env->ring_indices_elem = kzalloc(sizeof(*ipc_env->ring_indices_elem), GFP_KERNEL);
+
+       if (!ipc_env->ring_indices_elem)
+               return -ENOMEM;
+
+       if (cl_hw_is_tcv0(cl_hw)) {
+               ipc_env->ring_indices_elem->indices = chip->ring_indices.params;
+               ipc_env->ring_indices_elem->dma_addr = chip->ring_indices.dma_addr;
+       } else {
+               ipc_env->ring_indices_elem->indices = chip->ring_indices.params + 1;
+               ipc_env->ring_indices_elem->dma_addr =
+                       (u32)chip->ring_indices.dma_addr + sizeof(struct cl_ipc_ring_indices);
+       }
+
+       memset(ipc_env->ring_indices_elem->indices, 0, sizeof(struct cl_ipc_ring_indices));
+
+       return 0;
+}
+
+static int tx_queues_alloc(struct cl_hw *cl_hw)
+{
+       struct tx_queues_dma_addr *buf = NULL;
+       u32 size = sizeof(struct tx_queues_dma_addr);
+       dma_addr_t phys_dma_addr;
+
+       buf = dma_alloc_coherent(cl_hw->chip->dev, size, &phys_dma_addr, GFP_KERNEL);
+
+       if (!buf)
+               return -ENOMEM;
+
+       cl_hw->ipc_env->tx_queues.queues_dma_addr = buf;
+       cl_hw->ipc_env->tx_queues.dma_addr = phys_dma_addr;
+
+       return 0;
+}
+
+static int __txdesc_alloc(struct cl_hw *cl_hw,
+                         struct txdesc **txdesc,
+                         u32 *dma_addr,
+                         u32 desc_num)
+{
+       dma_addr_t phys_dma_addr;
+       u32 size = (desc_num * sizeof(struct txdesc));
+
+       if (size < PAGE_SIZE) {
+               *txdesc = dma_pool_alloc(cl_hw->txdesc_pool, GFP_KERNEL, &phys_dma_addr);
+
+               if (!(*txdesc)) {
+                       cl_dbg_err(cl_hw, "dma_pool_alloc failed size=%d\n", size);
+                       return -ENOMEM;
+               }
+       } else {
+               *txdesc = dma_alloc_coherent(cl_hw->chip->dev, size, &phys_dma_addr, GFP_KERNEL);
+
+               if (!(*txdesc)) {
+                       cl_dbg_err(cl_hw, "dma_alloc_coherent failed size=%d\n", size);
+                       return -ENOMEM;
+               }
+       }
+
+       *dma_addr = cpu_to_le32(phys_dma_addr);
+       memset(*txdesc, 0, size);
+
+       return 0;
+}
+
+static int _txdesc_alloc(struct cl_hw *cl_hw)
+{
+       /*
+        * Allocate ipc txdesc for each queue, map the base
+        * address to the DMA and set the queues size
+        */
+       struct cl_ipc_tx_queues *tx_queues = &cl_hw->ipc_env->tx_queues;
+       struct tx_queues_dma_addr *queues_dma_addr = tx_queues->queues_dma_addr;
+       u32 i;
+       int ret = 0;
+
+       cl_hw->max_agg_tx_q_size = LMAC_TXDESC_AGG_Q_SIZE_MAX;
+
+       ret = __txdesc_alloc(cl_hw, &tx_queues->ipc_txdesc_bcmc,
+                            &queues_dma_addr->bcmc, IPC_TXDESC_CNT_BCMC);
+       if (ret)
+               return ret;
+
+       for (i = 0; i < MAX_SINGLE_QUEUES; i++) {
+               ret = __txdesc_alloc(cl_hw, &tx_queues->ipc_txdesc_single[i],
+                                    &queues_dma_addr->single[i], IPC_TXDESC_CNT_SINGLE);
+               if (ret)
+                       return ret;
+       }
+
+       for (i = 0; i < IPC_MAX_BA_SESSIONS; i++) {
+               ret = __txdesc_alloc(cl_hw, &tx_queues->ipc_txdesc_agg[i],
+                                    &queues_dma_addr->agg[i], cl_hw->max_agg_tx_q_size);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+static int txdesc_alloc(struct cl_hw *cl_hw)
+{
+       u32 pool_size = IPC_TXDESC_CNT_SINGLE * sizeof(struct txdesc);
+
+       cl_hw->txdesc_pool = dma_pool_create("cl_txdesc_pool", cl_hw->chip->dev, pool_size,
+                                            cache_line_size(), 0);
+
+       if (!cl_hw->txdesc_pool) {
+               cl_dbg_verbose(cl_hw, "dma_pool_create failed !!!\n");
+               return -ENOMEM;
+       }
+
+       return _txdesc_alloc(cl_hw);
+}
+
+static int rx_skb_alloc(struct cl_hw *cl_hw)
+{
+       /*
+        * This function allocates Rx elements for DMA
+        * transfers and pushes the DMA address to FW.
+        */
+       struct cl_rx_elem *rx_elem = cl_hw->rx_elems;
+       struct cl_ipc_host_env *ipc_env = cl_hw->ipc_env;
+       int i = 0;
+       u16 rxbuf_size_rxm = cl_hw->conf->ci_ipc_rxbuf_size[CL_RX_BUF_RXM];
+       u16 rxbuf_size_fw = cl_hw->conf->ci_ipc_rxbuf_size[CL_RX_BUF_FW];
+
+       /* Allocate and push RXM buffers */
+       for (i = 0; i < IPC_RXBUF_CNT_RXM; rx_elem++, i++) {
+               if (cl_ipc_rx_elem_alloc(cl_hw, rx_elem, rxbuf_size_rxm)) {
+                       cl_dbg_verbose(cl_hw, "RXM rx_elem allocation failed !!!\n");
+                       return -ENOMEM;
+               }
+               cl_ipc_rxbuf_push(ipc_env, rx_elem, i, i, CL_RX_BUF_RXM);
+       }
+
+       /* Allocate and push FW buffers */
+       for (i = 0; i < IPC_RXBUF_CNT_FW; rx_elem++, i++) {
+               if (cl_ipc_rx_elem_alloc(cl_hw, rx_elem, rxbuf_size_fw)) {
+                       cl_dbg_verbose(cl_hw, "FW rx_elem allocation failed !!!\n");
+                       return -ENOMEM;
+               }
+               cl_ipc_rxbuf_push(ipc_env, rx_elem, i, i, CL_RX_BUF_FW);
+       }
+
+       return 0;
+}
+
+static int _rx_buf_alloc(struct cl_hw *cl_hw, u32 **rxbuf, u32 *dma_addr, u32 desc_num)
+{
+       dma_addr_t phys_dma_addr;
+       u32 size = (desc_num * sizeof(u32));
+
+       *rxbuf = dma_alloc_coherent(cl_hw->chip->dev,
+                                   size,
+                                   &phys_dma_addr,
+                                   GFP_KERNEL);
+
+       if (!(*rxbuf))
+               return -ENOMEM;
+
+       *dma_addr = cpu_to_le32(phys_dma_addr);
+       memset(*rxbuf, 0, size);
+
+       return 0;
+}
+
+static int rx_buf_alloc(struct cl_hw *cl_hw)
+{
+       struct cl_ipc_host_env *ipc_env = cl_hw->ipc_env;
+       struct cl_ipc_host_rxbuf *rxbuf_rxm = &ipc_env->rx_hostbuf_array[CL_RX_BUF_RXM];
+       struct cl_ipc_host_rxbuf *rxbuf_fw = &ipc_env->rx_hostbuf_array[CL_RX_BUF_FW];
+       int ret = 0;
+
+       rxbuf_rxm->ipc_host_rxdesc_ptr = ipc_env->ipc_host_rxdesc_rxm;
+       rxbuf_fw->ipc_host_rxdesc_ptr = ipc_env->ipc_host_rxdesc_fw;
+
+       /* Allocate RXM RX write/read indexes */
+       ret = _rx_buf_alloc(cl_hw,
+                           (u32 **)&rxbuf_rxm->dma_payload_addr,
+                           &rxbuf_rxm->dma_payload_base_addr,
+                           IPC_RXBUF_CNT_RXM);
+       if (ret)
+               return ret;
+
+       /* Allocate FW RX write/read indexes */
+       ret = _rx_buf_alloc(cl_hw,
+                           (u32 **)&rxbuf_fw->dma_payload_addr,
+                           &rxbuf_fw->dma_payload_base_addr,
+                           IPC_RXBUF_CNT_FW);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static int rx_alloc(struct cl_hw *cl_hw)
+{
+       u32 total_rx_elems = IPC_RXBUF_CNT_RXM + IPC_RXBUF_CNT_FW;
+       u32 alloc_size = total_rx_elems * sizeof(struct cl_rx_elem);
+       int ret = rx_buf_alloc(cl_hw);
+
+       if (ret)
+               return ret;
+
+       cl_hw->rx_elems = kzalloc(alloc_size, GFP_KERNEL);
+
+       if (!cl_hw->rx_elems)
+               return -ENOMEM;
+
+       return rx_skb_alloc(cl_hw);
+}
+
+static int _msg_alloc(struct cl_hw *cl_hw, struct cl_e2a_msg_elem *msg_elem)
+{
+       dma_addr_t dma_addr;
+       struct cl_ipc_e2a_msg *msg;
+
+       /* Initialize the message pattern to NULL */
+       msg = dma_pool_alloc(cl_hw->e2a_msg_pool, GFP_KERNEL, &dma_addr);
+       if (!msg)
+               return -ENOMEM;
+
+       msg->pattern = 0;
+
+       /* Save the msg pointer (for deallocation) and the dma_addr */
+       msg_elem->msgbuf_ptr = msg;
+       msg_elem->dma_addr = dma_addr;
+
+       return 0;
+}
+
+static int msg_alloc(struct cl_hw *cl_hw)
+{
+       struct cl_ipc_host_env *ipc_env = cl_hw->ipc_env;
+       struct cl_e2a_msg_elem *msg_elem;
+       u32 alloc_size = IPC_E2A_MSG_BUF_CNT * sizeof(struct cl_e2a_msg_elem);
+       u32 i;
+
+       cl_hw->e2a_msg_elems = kzalloc(alloc_size, GFP_KERNEL);
+
+       if (!cl_hw->e2a_msg_elems)
+               return -ENOMEM;
+
+       cl_hw->e2a_msg_pool = dma_pool_create("dma_pool_msg",
+                                             cl_hw->chip->dev,
+                                             sizeof(struct cl_ipc_e2a_msg),
+                                             cache_line_size(),
+                                             0);
+
+       if (!cl_hw->e2a_msg_pool) {
+               cl_dbg_verbose(cl_hw, "dma_pool_create failed !!!\n");
+               return -ENOMEM;
+       }
+
+       /* Initialize the msg buffers in the global IPC array. */
+       for (i = 0, msg_elem = cl_hw->e2a_msg_elems;
+            i < IPC_E2A_MSG_BUF_CNT; msg_elem++, i++) {
+               if (_msg_alloc(cl_hw, msg_elem)) {
+                       cl_dbg_verbose(cl_hw, "msg allocation failed !!!\n");
+                       return -ENOMEM;
+               }
+
+               cl_ipc_msgbuf_push(ipc_env, (ptrdiff_t)msg_elem, msg_elem->dma_addr);
+       }
+
+       return 0;
+}
+
+static int _radar_alloc(struct cl_hw *cl_hw, struct cl_radar_elem *radar_elem)
+{
+       dma_addr_t dma_addr;
+       struct cl_radar_pulse_array *radar;
+
+       /* Initialize the message pattern to NULL */
+       radar = dma_pool_alloc(cl_hw->radar_pool, GFP_KERNEL, &dma_addr);
+       if (!radar)
+               return -ENOMEM;
+
+       radar->cnt = 0;
+
+       /* Save the msg pointer (for deallocation) and the dma_addr */
+       radar_elem->radarbuf_ptr = radar;
+       radar_elem->dma_addr = dma_addr;
+
+       return 0;
+}
+
+static int radar_alloc(struct cl_hw *cl_hw)
+{
+       struct cl_ipc_host_env *ipc_env = cl_hw->ipc_env;
+       struct cl_radar_elem *radar_elem;
+       u32 alloc_size = IPC_RADAR_BUF_CNT * sizeof(struct cl_radar_elem);
+       u32 i;
+
+       cl_hw->radar_elems = kzalloc(alloc_size, GFP_KERNEL);
+
+       if (!cl_hw->radar_elems)
+               return -ENOMEM;
+
+       cl_hw->radar_pool = dma_pool_create("dma_pool_radar",
+                                           cl_hw->chip->dev,
+                                           sizeof(struct cl_radar_pulse_array),
+                                           cache_line_size(),
+                                           0);
+
+       if (!cl_hw->radar_pool) {
+               cl_dbg_verbose(cl_hw, "dma_pool_create failed !!!\n");
+               return -ENOMEM;
+       }
+
+       /* Initialize the radar buffers in the global IPC array. */
+       for (i = 0, radar_elem = cl_hw->radar_elems;
+            i < IPC_RADAR_BUF_CNT; radar_elem++, i++) {
+               if (_radar_alloc(cl_hw, radar_elem)) {
+                       cl_dbg_verbose(cl_hw, "radar allocation failed !!!\n");
+                       return -ENOMEM;
+               }
+
+               cl_ipc_radarbuf_push(ipc_env, (ptrdiff_t)radar_elem, radar_elem->dma_addr);
+       }
+
+       return 0;
+}
+
+static int _dbg_alloc(struct cl_hw *cl_hw, struct cl_dbg_elem *dbg_elem)
+{
+       dma_addr_t dma_addr;
+       struct cl_ipc_dbg_msg *dbg_msg;
+
+       dbg_msg = dma_pool_alloc(cl_hw->dbg_pool, GFP_KERNEL, &dma_addr);
+       if (!dbg_msg)
+               return -ENOMEM;
+
+       dbg_msg->pattern = 0;
+
+       /* Save the Debug msg pointer (for deallocation) and the dma_addr */
+       dbg_elem->dbgbuf_ptr = dbg_msg;
+       dbg_elem->dma_addr = dma_addr;
+
+       return 0;
+}
+
+static int dbg_alloc(struct cl_hw *cl_hw)
+{
+       struct cl_ipc_host_env *ipc_env = cl_hw->ipc_env;
+       struct cl_dbg_elem *dbg_elem;
+       u32 alloc_size = IPC_DBG_BUF_CNT * sizeof(struct cl_dbg_elem);
+       u32 i;
+
+       cl_hw->dbg_elems = kzalloc(alloc_size, GFP_KERNEL);
+
+       if (!cl_hw->dbg_elems)
+               return -ENOMEM;
+
+       cl_hw->dbg_pool = dma_pool_create("dma_pool_dbg",
+                                         cl_hw->chip->dev,
+                                         sizeof(struct cl_ipc_dbg_msg),
+                                         cache_line_size(),
+                                         0);
+
+       if (!cl_hw->dbg_pool) {
+               cl_dbg_verbose(cl_hw, "dma_pool_create failed !!!\n");
+               return -ENOMEM;
+       }
+
+       /* Initialize the dbg buffers in the global IPC array. */
+       for (i = 0, dbg_elem = cl_hw->dbg_elems;
+            i < IPC_DBG_BUF_CNT; dbg_elem++, i++) {
+               if (_dbg_alloc(cl_hw, dbg_elem)) {
+                       cl_dbg_verbose(cl_hw, "dbgelem allocation failed !!!\n");
+                       return -ENOMEM;
+               }
+
+               cl_ipc_dbgbuf_push(ipc_env, (ptrdiff_t)dbg_elem, dbg_elem->dma_addr);
+       }
+
+       return 0;
+}
+
+static int cfm_alloc(struct cl_hw *cl_hw)
+{
+       dma_addr_t dma_addr;
+       u8 *host_virt_addr;
+       struct cl_ipc_host_env *ipc_env = cl_hw->ipc_env;
+
+       host_virt_addr = dma_alloc_coherent(cl_hw->chip->dev,
+                                           DMA_CFM_TOTAL_SIZE,
+                                           &dma_addr,
+                                           GFP_KERNEL);
+
+       if (!host_virt_addr)
+               return -ENOMEM;
+
+       memset(host_virt_addr, 0, DMA_CFM_TOTAL_SIZE);
+       ipc_env->cfm_dma_base_addr = dma_addr;
+       ipc_env->cfm_virt_base_addr = host_virt_addr;
+
+       memset(ipc_env->cfm_virt_base_addr, 0, IPC_CFM_SIZE);
+
+       return 0;
+}
+
+static int _dbg_info_alloc(struct cl_hw *cl_hw)
+{
+       dma_addr_t dma_addr;
+       u32 len = sizeof(struct dbg_info);
+       struct dbg_info *buf = dma_alloc_coherent(cl_hw->chip->dev, len, &dma_addr, GFP_KERNEL);
+
+       if (!buf) {
+               cl_dbg_verbose(cl_hw, "buffer alloc of size %u failed\n", len);
+               return -ENOMEM;
+       }
+
+       memset(buf, 0, sizeof(struct dbg_info));
+       buf->u.type = DBG_INFO_UNSET;
+
+       cl_hw->dbginfo.buf = buf;
+       cl_hw->dbginfo.dma_addr = dma_addr;
+       cl_hw->dbginfo.bufsz = len;
+
+       return 0;
+}
+
+static int dbg_info_alloc(struct cl_hw *cl_hw)
+{
+       /* Initialize the debug information buffer */
+       if (_dbg_info_alloc(cl_hw)) {
+               cl_dbg_verbose(cl_hw, "dbginfo allocation failed !!!\n");
+               return -ENOMEM;
+       }
+
+       cl_ipc_dbginfobuf_push(cl_hw->ipc_env, cl_hw->dbginfo.dma_addr);
+
+       return 0;
+}
+
+static int ipc_elems_alloc(struct cl_hw *cl_hw)
+{
+       /* Allocate all the elements required for communications with firmware */
+       if (ring_indices_alloc(cl_hw))
+               goto out_err;
+
+       if (tx_queues_alloc(cl_hw))
+               goto out_err;
+
+       if (txdesc_alloc(cl_hw))
+               goto out_err;
+
+       if (rx_alloc(cl_hw))
+               goto out_err;
+
+       if (msg_alloc(cl_hw))
+               goto out_err;
+
+       if (radar_alloc(cl_hw))
+               goto out_err;
+
+       if (dbg_alloc(cl_hw))
+               goto out_err;
+
+       if (cfm_alloc(cl_hw))
+               goto out_err;
+
+       if (dbg_info_alloc(cl_hw))
+               goto out_err;
+
+       return 0;
+
+out_err:
+       ipc_elems_dealloc(cl_hw);
+       return -ENOMEM;
+}
+
+static u8 cl_ipc_dbgfile_handler(struct cl_hw *cl_hw, ptrdiff_t hostid)
+{
+       struct cl_dbg_elem *dbg_elem = (struct cl_dbg_elem *)hostid;
+       struct cl_ipc_dbg_msg *dbg_msg;
+       u8 ret = 0;
+
+       /* Retrieve the message structure */
+       dbg_msg = (struct cl_ipc_dbg_msg *)dbg_elem->dbgbuf_ptr;
+
+       if (!dbg_msg) {
+               ret = -1;
+               cl_dbg_err(cl_hw, "dbgbuf_ptr is NULL!!!!\n");
+               goto dbg_push;
+       }
+
+       /* Look for pattern which means that this hostbuf has been used for a MSG */
+       if (le32_to_cpu(dbg_msg->pattern) != IPC_DBG_VALID_PATTERN) {
+               ret = -1;
+               goto dbg_no_push;
+       }
+
+       /* Reset the msg element and re-use it */
+       dbg_msg->pattern = 0;
+
+       /* Display the firmware string */
+       cl_dbgfile_print_fw_str(cl_hw, dbg_msg->string, IPC_DBG_PARAM_SIZE);
+
+dbg_push:
+       /* make sure memory is written before push to HW */
+       wmb();
+
+       /* Push back the buffer to the firmware */
+       cl_ipc_dbgbuf_push(cl_hw->ipc_env, (ptrdiff_t)dbg_elem, dbg_elem->dma_addr);
+
+dbg_no_push:
+       return ret;
+}
+
+static void cl_ipc_dbgfile_tasklet(unsigned long data)
+{
+       struct cl_hw *cl_hw = (struct cl_hw *)data;
+       struct cl_ipc_host_env *ipc_env = cl_hw->ipc_env;
+       struct cl_ipc_hostbuf *dbg_array = ipc_env->dbg_hostbuf_array;
+       int dbg_handled = 0;
+
+#ifdef TRACE_SUPPORT
+       trace_cl_trace_dbgfile_tasklet_start(cl_hw->idx);
+#endif
+
+       while (!cl_ipc_dbgfile_handler(cl_hw, dbg_array[ipc_env->dbg_host_idx].hostid))
+               dbg_handled++;
+
+#ifdef TRACE_SUPPORT
+       trace_cl_trace_dbgfile_tasklet_end(cl_hw->idx, dbg_handled);
+#endif
+
+       /* Enable the DBG interrupt */
+       if (!test_bit(CL_DEV_STOP_HW, &cl_hw->drv_flags))
+               cl_irq_enable(cl_hw, cl_hw->ipc_e2a_irq.dbg);
+}
+
+static void ipc_tasklet_init(struct cl_hw *cl_hw)
+{
+       struct cl_ipc_host_env *ipc_env = cl_hw->ipc_env;
+
+       tasklet_init(&ipc_env->rxdesc_tasklet,
+                    cl_rx_pci_desc_tasklet,
+                    (unsigned long)cl_hw);
+       tasklet_init(&ipc_env->tx_single_cfm_tasklet,
+                    cl_tx_pci_single_cfm_tasklet,
+                    (unsigned long)cl_hw);
+       tasklet_init(&ipc_env->tx_agg_cfm_tasklet,
+                    cl_tx_pci_agg_cfm_tasklet,
+                    (unsigned long)cl_hw);
+       tasklet_init(&ipc_env->msg_tasklet,
+                    cl_msg_rx_tasklet,
+                    (unsigned long)cl_hw);
+       tasklet_init(&ipc_env->dbg_tasklet,
+                    cl_ipc_dbgfile_tasklet,
+                    (unsigned long)cl_hw);
+}
+
+static int ipc_env_init(struct cl_hw *cl_hw)
+{
+       u32 *dst;
+       u32 i;
+
+       BUILD_BUG_ON_NOT_POWER_OF_2(IPC_RXBUF_CNT_RXM);
+       BUILD_BUG_ON_NOT_POWER_OF_2(IPC_RXBUF_CNT_FW);
+
+       /* Allocate the IPC environment */
+       cl_hw->ipc_env = kzalloc(sizeof(*cl_hw->ipc_env), GFP_KERNEL);
+       if (!cl_hw->ipc_env)
+               return -ENOMEM;
+
+       dst = (u32 *)(cl_hw->ipc_env);
+
+       /*
+        * Reset the IPC Host environment.
+        * Perform the reset word per word because memset() does
+        * not correctly reset all (due to misaligned accesses)
+        */
+       for (i = 0; i < sizeof(*cl_hw->ipc_env); i += sizeof(u32))
+               *dst++ = 0;
+
+       return 0;
+}
+
+static bool is_la_enabled(struct cl_chip *chip)
+{
+       s8 *ela_mode = chip->conf->ce_ela_mode;
+
+       return (!strcmp(ela_mode, "default") ||
+               !strncmp(ela_mode, "lcu_mac", 7) ||
+               !strncmp(ela_mode, "lcu_phy", 7));
+}
+
+static void ipc_shared_env_init(struct cl_hw *cl_hw)
+{
+       struct cl_chip *chip = cl_hw->chip;
+       struct cl_ipc_host_env *ipc_env = cl_hw->ipc_env;
+       struct cl_ipc_shared_env *shared_env =
+               (struct cl_ipc_shared_env *)(chip->pci_bar0_virt_addr + SHARED_RAM_START_ADDR);
+       u32 *dst, i;
+
+       /* The shared environment of TCV1 is located after the shared environment of TCV0. */
+       if (cl_hw_is_tcv1(cl_hw))
+               shared_env++;
+
+       dst = (u32 *)(shared_env);
+
+       /* Reset the shared environment */
+       for (i = 0; i < sizeof(struct cl_ipc_shared_env); i += sizeof(u32))
+               *dst++ = 0;
+
+       shared_env->la_enable = is_la_enabled(chip);
+       shared_env->max_retry = cl_hw->chip->conf->ce_production_mode ?
+               0 : cpu_to_le16(cl_hw->conf->ce_max_retry);
+       shared_env->lft_limit_ms = cpu_to_le16(CL_TX_LIFETIME_MS);
+       shared_env->phy_dev = cpu_to_le16(chip->conf->ci_phy_dev);
+
+       /* Initialize the shared environment pointer */
+       ipc_env->shared = shared_env;
+}
+
+static void ipc_e2a_irq_init(struct cl_hw *cl_hw)
+{
+       struct cl_ipc_e2a_irq *ipc_e2a_irq = &cl_hw->ipc_e2a_irq;
+
+       if (cl_hw_is_tcv0(cl_hw)) {
+               ipc_e2a_irq->dbg = IPC_IRQ_L2H_DBG;
+               ipc_e2a_irq->msg = IPC_IRQ_L2H_MSG;
+               ipc_e2a_irq->rxdesc = IPC_IRQ_L2H_RXDESC;
+               ipc_e2a_irq->txcfm = IPC_IRQ_L2H_TXCFM;
+               ipc_e2a_irq->radar = IPC_IRQ_L2H_RADAR;
+               ipc_e2a_irq->txdesc_ind = IPC_IRQ_L2H_TXDESC_IND;
+               ipc_e2a_irq->tbtt = IPC_IRQ_L2H_TBTT;
+               ipc_e2a_irq->sync = IPC_IRQ_L2H_SYNC;
+               ipc_e2a_irq->all = IPC_IRQ_L2H_ALL;
+       } else {
+               ipc_e2a_irq->dbg = IPC_IRQ_S2H_DBG;
+               ipc_e2a_irq->msg = IPC_IRQ_S2H_MSG;
+               ipc_e2a_irq->rxdesc = IPC_IRQ_S2H_RXDESC;
+               ipc_e2a_irq->txcfm = IPC_IRQ_S2H_TXCFM;
+               ipc_e2a_irq->radar = IPC_IRQ_S2H_RADAR;
+               ipc_e2a_irq->txdesc_ind = IPC_IRQ_S2H_TXDESC_IND;
+               ipc_e2a_irq->tbtt = IPC_IRQ_S2H_TBTT;
+               ipc_e2a_irq->sync = IPC_IRQ_S2H_SYNC;
+               ipc_e2a_irq->all = IPC_IRQ_S2H_ALL;
+       }
+}
+
+int cl_ipc_init(struct cl_hw *cl_hw)
+{
+       /*
+        * This function initializes IPC interface by registering callbacks, setting
+        * shared memory area and calling IPC Init function.
+        * This function should be called only once during driver's lifetime.
+        */
+       int ret = ipc_env_init(cl_hw);
+
+       if (ret)
+               return ret;
+
+       ipc_e2a_irq_init(cl_hw);
+       if (cl_hw_is_tcv0(cl_hw))
+               cl_hw->ipc_host2xmac_trigger_set = ipc_host_2_lmac_trigger_set;
+       else
+               cl_hw->ipc_host2xmac_trigger_set = ipc_host_2_smac_trigger_set;
+
+       ipc_shared_env_init(cl_hw);
+
+       ret = ipc_elems_alloc(cl_hw);
+       if (ret) {
+               ipc_env_free(cl_hw);
+               return ret;
+       }
+
+       ipc_tasklet_init(cl_hw);
+
+       return ret;
+}
+
+static void ring_indices_reset(struct cl_hw *cl_hw)
+{
+       struct cl_ipc_host_env *ipc_env = cl_hw->ipc_env;
+
+       memset(ipc_env->ring_indices_elem->indices, 0,
+              sizeof(*ipc_env->ring_indices_elem->indices));
+
+       /* Reset host desc read idx follower */
+       ipc_env->host_rxdesc_read_idx[CL_RX_BUF_RXM] = 0;
+       ipc_env->host_rxdesc_read_idx[CL_RX_BUF_FW] = 0;
+}
+
+static void _txdesc_reset(struct txdesc **txdesc, u32 desc_num)
+{
+       u32 size = (desc_num * sizeof(struct txdesc));
+
+       memset(*txdesc, 0, size);
+}
+
+static void txdesc_reset(struct cl_hw *cl_hw)
+{
+       struct cl_ipc_tx_queues *tx_queues = &cl_hw->ipc_env->tx_queues;
+       u32 i;
+
+       _txdesc_reset(&tx_queues->ipc_txdesc_bcmc, IPC_TXDESC_CNT_BCMC);
+
+       for (i = 0; i < MAX_SINGLE_QUEUES; i++)
+               _txdesc_reset(&tx_queues->ipc_txdesc_single[i], IPC_TXDESC_CNT_SINGLE);
+
+       for (i = 0; i < IPC_MAX_BA_SESSIONS; i++)
+               _txdesc_reset(&tx_queues->ipc_txdesc_agg[i], cl_hw->max_agg_tx_q_size);
+}
+
+static void rx_skb_reset(struct cl_hw *cl_hw)
+{
+       /*
+        * This function allocates Rx elements for DMA
+        * transfers and pushes the DMA address to FW.
+        */
+       struct cl_ipc_host_env *ipc_env = cl_hw->ipc_env;
+       struct cl_rx_elem *rx_elem = cl_hw->rx_elems;
+       int i = 0;
+
+       /* Push RXM buffers */
+       for (i = 0; i < IPC_RXBUF_CNT_RXM; rx_elem++, i++)
+               cl_ipc_rxbuf_push(ipc_env, rx_elem, i, i, CL_RX_BUF_RXM);
+
+       /* Push FW buffers */
+       for (i = 0; i < IPC_RXBUF_CNT_FW; rx_elem++, i++)
+               cl_ipc_rxbuf_push(ipc_env, rx_elem, i, i, CL_RX_BUF_FW);
+}
+
+static void _rx_buf_reset(u32 **rxbuf, u32 desc_num)
+{
+       u32 size = (desc_num * sizeof(u32));
+
+       memset(*rxbuf, 0, size);
+}
+
+static void rx_buf_reset(struct cl_hw *cl_hw)
+{
+       struct cl_ipc_host_env *ipc_env = cl_hw->ipc_env;
+       struct cl_ipc_host_rxbuf *rxbuf_rxm = &ipc_env->rx_hostbuf_array[CL_RX_BUF_RXM];
+       struct cl_ipc_host_rxbuf *rxbuf_fw = &ipc_env->rx_hostbuf_array[CL_RX_BUF_FW];
+
+       /* Reset RXM RX buffer */
+       _rx_buf_reset((u32 **)&rxbuf_rxm->dma_payload_addr,
+                     IPC_RXBUF_CNT_RXM);
+
+       /* Reset FW RX buffer */
+       _rx_buf_reset((u32 **)&rxbuf_fw->dma_payload_addr,
+                     IPC_RXBUF_CNT_FW);
+}
+
+static void rx_reset(struct cl_hw *cl_hw)
+{
+       rx_buf_reset(cl_hw);
+       rx_skb_reset(cl_hw);
+}
+
+static void msg_reset(struct cl_hw *cl_hw)
+{
+       struct cl_ipc_host_env *ipc_env = cl_hw->ipc_env;
+       struct cl_e2a_msg_elem *msg_elem;
+       u32 i;
+
+       ipc_env->e2a_msg_host_idx = 0;
+
+       /* Initialize the msg buffers in the global IPC array. */
+       for (i = 0, msg_elem = cl_hw->e2a_msg_elems;
+            i < IPC_E2A_MSG_BUF_CNT; msg_elem++, i++) {
+               msg_elem->msgbuf_ptr->pattern = 0;
+               cl_ipc_msgbuf_push(ipc_env, (ptrdiff_t)msg_elem, msg_elem->dma_addr);
+       }
+}
+
+static void radar_reset(struct cl_hw *cl_hw)
+{
+       struct cl_ipc_host_env *ipc_env = cl_hw->ipc_env;
+       struct cl_radar_elem *radar_elem;
+       u32 i;
+
+       ipc_env->radar_host_idx = 0;
+
+       /* Initialize the radar buffers in the global IPC array. */
+       for (i = 0, radar_elem = cl_hw->radar_elems;
+            i < IPC_RADAR_BUF_CNT; radar_elem++, i++) {
+               radar_elem->radarbuf_ptr->cnt = 0;
+               cl_ipc_radarbuf_push(ipc_env, (ptrdiff_t)radar_elem, radar_elem->dma_addr);
+       }
+}
+
+static void dbg_reset(struct cl_hw *cl_hw)
+{
+       struct cl_ipc_host_env *ipc_env = cl_hw->ipc_env;
+       struct cl_dbg_elem *dbg_elem;
+       u32 i;
+
+       ipc_env->dbg_host_idx = 0;
+
+       /* Initialize the dbg buffers in the global IPC array. */
+       for (i = 0, dbg_elem = cl_hw->dbg_elems;
+            i < IPC_DBG_BUF_CNT; dbg_elem++, i++) {
+               dbg_elem->dbgbuf_ptr->pattern = 0;
+               cl_ipc_dbgbuf_push(ipc_env, (ptrdiff_t)dbg_elem, dbg_elem->dma_addr);
+       }
+}
+
+static void cfm_reset(struct cl_hw *cl_hw)
+{
+       struct cl_ipc_host_env *ipc_env = cl_hw->ipc_env;
+
+       ipc_env->cfm_used_idx = 0;
+       memset(ipc_env->cfm_virt_base_addr, 0, IPC_CFM_SIZE);
+}
+
+static void dbg_info_reset(struct cl_hw *cl_hw)
+{
+       struct dbg_info *buf = cl_hw->dbginfo.buf;
+
+       memset(buf, 0, sizeof(struct dbg_info));
+       buf->u.type = DBG_INFO_UNSET;
+
+       cl_ipc_dbginfobuf_push(cl_hw->ipc_env, cl_hw->dbginfo.dma_addr);
+}
+
+static void ipc_elems_reset(struct cl_hw *cl_hw)
+{
+       ring_indices_reset(cl_hw);
+       txdesc_reset(cl_hw);
+       rx_reset(cl_hw);
+       msg_reset(cl_hw);
+       radar_reset(cl_hw);
+       dbg_reset(cl_hw);
+       cfm_reset(cl_hw);
+       dbg_info_reset(cl_hw);
+       cl_enhanced_tim_reset(cl_hw);
+}
+
+void cl_ipc_recovery(struct cl_hw *cl_hw)
+{
+       ipc_shared_env_init(cl_hw);
+       ipc_elems_reset(cl_hw);
+}
+
+void cl_ipc_reset(struct cl_hw *cl_hw)
+{
+       cl_hw->ipc_env->shared->cfm_read_pointer = 0;
+       cl_hw->ipc_env->cfm_used_idx = 0;
+}
+
+void cl_ipc_deinit(struct cl_hw *cl_hw)
+{
+       ipc_elems_dealloc(cl_hw);
+       ipc_env_free(cl_hw);
+}
+
+void cl_ipc_stop(struct cl_hw *cl_hw)
+{
+       struct cl_ipc_host_env *ipc_env = cl_hw->ipc_env;
+
+       tasklet_kill(&ipc_env->rxdesc_tasklet);
+       tasklet_kill(&ipc_env->tx_single_cfm_tasklet);
+       tasklet_kill(&ipc_env->tx_agg_cfm_tasklet);
+       tasklet_kill(&ipc_env->msg_tasklet);
+       tasklet_kill(&ipc_env->dbg_tasklet);
+}
+
+int cl_ipc_rx_elem_alloc(struct cl_hw *cl_hw, struct cl_rx_elem *rx_elem, u32 size)
+{
+       struct sk_buff *skb;
+       dma_addr_t dma_addr;
+       struct hw_rxhdr *rxhdr;
+
+       rx_elem->passed = 0;
+
+       skb = dev_alloc_skb(size);
+
+       if (unlikely(!skb)) {
+               cl_dbg_verbose(cl_hw, "skb alloc failed (size %u)\n", size);
+               rx_elem->dma_addr = (dma_addr_t)0;
+               return -ENOMEM;
+       }
+
+       /* Reserve room for RX vector */
+       skb_reserve(skb, IPC_RXBUF_EXTRA_HEADROOM);
+
+       rxhdr = (struct hw_rxhdr *)skb->data;
+       rxhdr->pattern = 0;
+
+       dma_addr = dma_map_single(cl_hw->chip->dev, skb->data, size, DMA_FROM_DEVICE);
+
+       if (unlikely(dma_mapping_error(cl_hw->chip->dev, dma_addr))) {
+               cl_dbg_verbose(cl_hw, "dma_mapping_error\n");
+               kfree_skb(skb);
+               return -1;
+       }
+
+       rx_elem->skb = skb;
+       rx_elem->dma_addr = dma_addr;
+
+       cl_rx_skb_alloc_handler(skb);
+
+       return 0;
+}
+
+void cl_ipc_msgbuf_push(struct cl_ipc_host_env *ipc_env, ptrdiff_t hostid, dma_addr_t hostbuf)
+{
+       /*
+        * Push a pre-allocated buffer descriptor for MSGs
+        * This function is only called at Init time since the MSGs will be handled directly
+        * and buffer can be re-used as soon as the message is handled, no need to re-allocate
+        * new buffers in the meantime.
+        */
+       struct cl_ipc_shared_env *shared_env = ipc_env->shared;
+       u8 e2a_msg_host_idx = ipc_env->e2a_msg_host_idx;
+
+       /* Save the hostid and the hostbuf in global array */
+       ipc_env->e2a_msg_hostbuf_array[e2a_msg_host_idx].hostid = hostid;
+       ipc_env->e2a_msg_hostbuf_array[e2a_msg_host_idx].dma_addr = hostbuf;
+
+       /* Copy the hostbuf (DMA address) in the ipc shared memory */
+       shared_env->e2a_msg_hostbuf_addr[e2a_msg_host_idx] = cpu_to_le32(hostbuf);
+
+       /* Increment the array index */
+       ipc_env->e2a_msg_host_idx = (e2a_msg_host_idx + 1) % IPC_E2A_MSG_BUF_CNT;
+}
+
+void cl_ipc_rxbuf_push(struct cl_ipc_host_env *ipc_env, struct cl_rx_elem *rx_elem,
+                      u32 rxdesc_read_idx, u32 host_read_idx, enum rx_buf_type type)
+{
+       /*
+        * Push a pre-allocated buffer descriptor for Rx packet.
+        * This function is called to supply the firmware with new empty buffer.
+        */
+       struct cl_ipc_ring_indices *indices = ipc_env->ring_indices_elem->indices;
+       struct cl_ipc_host_rxbuf *host_rxbuf = &ipc_env->rx_hostbuf_array[type];
+
+       /* Save the hostid and the hostbuf in global array */
+       host_rxbuf->ipc_host_rxdesc_ptr[host_read_idx] = (ptrdiff_t *)rx_elem;
+       host_rxbuf->dma_payload_addr[host_read_idx] = rx_elem->dma_addr;
+
+       /* Update rxbuff metadata */
+       indices->rxdesc_read_idx[type] = cpu_to_le32(rxdesc_read_idx + 1);
+}
+
+void cl_ipc_radarbuf_push(struct cl_ipc_host_env *ipc_env, ptrdiff_t hostid, dma_addr_t hostbuf)
+{
+       /*
+        * Push a pre-allocated radar event buffer descriptor.
+        * This function should be called by the host IRQ handler to supply the embedded
+        * side with new empty buffer.
+        */
+       struct cl_ipc_shared_env *shared_env = ipc_env->shared;
+       u8 radar_host_idx = ipc_env->radar_host_idx;
+
+       /* Save the hostid and the hostbuf in global array */
+       ipc_env->radar_hostbuf_array[radar_host_idx].hostid = hostid;
+       ipc_env->radar_hostbuf_array[radar_host_idx].dma_addr = hostbuf;
+
+       /* Copy the hostbuf (DMA address) in the ipc shared memory */
+       shared_env->radarbuf_hostbuf[radar_host_idx] = cpu_to_le32(hostbuf);
+
+       /* Increment the array index */
+       ipc_env->radar_host_idx = (radar_host_idx + 1) % IPC_RADAR_BUF_CNT;
+}
+
+void cl_ipc_dbgbuf_push(struct cl_ipc_host_env *ipc_env, ptrdiff_t hostid, dma_addr_t hostbuf)
+{
+       /*
+        * Push a pre-allocated buffer descriptor for Debug messages.
+        * This function is only called at Init time since the Debug messages will be
+        * handled directly and buffer can be re-used as soon as the message is handled,
+        * no need to re-allocate new buffers in the meantime.
+        */
+       struct cl_ipc_shared_env *shared_env = ipc_env->shared;
+       u8 dbg_host_idx = ipc_env->dbg_host_idx;
+
+       /* Save the hostid and the hostbuf in global array */
+       ipc_env->dbg_hostbuf_array[dbg_host_idx].hostid = hostid;
+       ipc_env->dbg_hostbuf_array[dbg_host_idx].dma_addr = hostbuf;
+
+       /* Copy the hostbuf (DMA address) in the ipc shared memory */
+       shared_env->dbg_hostbuf_addr[dbg_host_idx] = cpu_to_le32(hostbuf);
+
+       /* Increment the array index */
+       ipc_env->dbg_host_idx = (dbg_host_idx + 1) % IPC_DBG_BUF_CNT;
+}
+
+void cl_ipc_dbginfobuf_push(struct cl_ipc_host_env *ipc_env, dma_addr_t infobuf)
+{
+       /*Push the pre-allocated logic analyzer and debug information buffer */
+       struct cl_ipc_shared_env *shared_env = ipc_env->shared;
+
+       /* Copy the hostbuf (DMA address) in the ipc shared memory */
+       shared_env->dbginfo_addr = cpu_to_le32(infobuf);
+       /* Copy the hostbuf size in the ipc shared memory */
+       shared_env->dbginfo_size = cpu_to_le32(DBG_DUMP_BUFFER_SIZE);
+}
--
2.30.0

________________________________
The information transmitted is intended only for the person or entity to which it is addressed and may contain confidential and/or privileged material. Any retransmission, dissemination, copying or other use of, or taking of any action in reliance upon this information is prohibited. If you received this in error, please contact the sender and delete the material from any computer. Nothing contained herein shall be deemed as a representation, warranty or a commitment by Celeno. No warranties are expressed or implied, including, but not limited to, any implied warranties of non-infringement, merchantability and fitness for a particular purpose.
________________________________


  parent reply	other threads:[~2021-06-17 16:03 UTC|newest]

Thread overview: 262+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-06-17 15:58 [RFC v1 000/256] wireless: cl8k driver for Celeno IEEE 802.11ax devices viktor.barna
2021-06-17 15:58 ` [RFC v1 001/256] celeno: add Kconfig viktor.barna
2021-06-17 15:58 ` [RFC v1 002/256] celeno: add Makefile viktor.barna
2021-06-17 15:58 ` [RFC v1 003/256] cl8k: add Kconfig viktor.barna
2021-06-17 15:58 ` [RFC v1 004/256] cl8k: add Makefile viktor.barna
2021-06-17 15:58 ` [RFC v1 005/256] cl8k: add afe.c viktor.barna
2021-06-17 15:58 ` [RFC v1 006/256] cl8k: add afe.h viktor.barna
2021-06-17 15:58 ` [RFC v1 007/256] cl8k: add agc_params.c viktor.barna
2021-06-17 15:58 ` [RFC v1 008/256] cl8k: add agc_params.h viktor.barna
2021-06-17 15:58 ` [RFC v1 009/256] cl8k: add ampdu.c viktor.barna
2021-06-17 15:58 ` [RFC v1 010/256] cl8k: add ampdu.h viktor.barna
2021-06-17 15:58 ` [RFC v1 011/256] cl8k: add ate.c viktor.barna
2021-06-17 15:58 ` [RFC v1 012/256] cl8k: add ate.h viktor.barna
2021-06-17 15:58 ` [RFC v1 013/256] cl8k: add band.c viktor.barna
2021-06-17 15:58 ` [RFC v1 014/256] cl8k: add band.h viktor.barna
2021-06-17 15:58 ` [RFC v1 015/256] cl8k: add bf.c viktor.barna
2021-06-17 15:58 ` [RFC v1 016/256] cl8k: add bf.h viktor.barna
2021-06-17 15:58 ` viktor.barna [this message]
2021-06-17 15:58 ` [RFC v1 018/256] cl8k: add bus/pci/ipc.h viktor.barna
2021-06-17 15:58 ` [RFC v1 019/256] cl8k: add bus/pci/irq.c viktor.barna
2021-06-17 15:58 ` [RFC v1 020/256] cl8k: add bus/pci/irq.h viktor.barna
2021-06-17 15:58 ` [RFC v1 021/256] cl8k: add bus/pci/msg_pci.c viktor.barna
2021-06-17 15:58 ` [RFC v1 022/256] cl8k: add bus/pci/msg_pci.h viktor.barna
2021-06-17 15:58 ` [RFC v1 023/256] cl8k: add bus/pci/pci.c viktor.barna
2021-06-17 15:58 ` [RFC v1 024/256] cl8k: add bus/pci/rx_pci.c viktor.barna
2021-06-17 15:58 ` [RFC v1 025/256] cl8k: add bus/pci/rx_pci.h viktor.barna
2021-06-17 15:58 ` [RFC v1 026/256] cl8k: add bus/pci/tx_pci.c viktor.barna
2021-06-17 15:58 ` [RFC v1 027/256] cl8k: add bus/pci/tx_pci.h viktor.barna
2021-06-17 15:58 ` [RFC v1 028/256] cl8k: add calib.c viktor.barna
2021-06-17 15:58 ` [RFC v1 029/256] cl8k: add calib.h viktor.barna
2021-06-17 15:58 ` [RFC v1 030/256] cl8k: add cap.c viktor.barna
2021-06-17 15:58 ` [RFC v1 031/256] cl8k: add cap.h viktor.barna
2021-06-17 15:58 ` [RFC v1 032/256] cl8k: add cca.c viktor.barna
2021-06-17 15:58 ` [RFC v1 033/256] cl8k: add cca.h viktor.barna
2021-06-17 15:58 ` [RFC v1 034/256] cl8k: add cecli.c viktor.barna
2021-06-17 15:58 ` [RFC v1 035/256] cl8k: add cecli.h viktor.barna
2021-06-17 15:58 ` [RFC v1 036/256] cl8k: add chandef.c viktor.barna
2021-06-17 15:58 ` [RFC v1 037/256] cl8k: add chandef.h viktor.barna
2021-06-17 15:58 ` [RFC v1 038/256] cl8k: add channel.c viktor.barna
2021-06-17 15:58 ` [RFC v1 039/256] cl8k: add channel.h viktor.barna
2021-06-17 15:58 ` [RFC v1 040/256] cl8k: add chan_info.c viktor.barna
2021-06-17 15:58 ` [RFC v1 041/256] cl8k: add chan_info.h viktor.barna
2021-06-17 15:58 ` [RFC v1 042/256] cl8k: add chip.c viktor.barna
2021-06-17 15:58 ` [RFC v1 043/256] cl8k: add chip.h viktor.barna
2021-06-17 15:58 ` [RFC v1 044/256] cl8k: add chip_config.c viktor.barna
2021-06-17 15:58 ` [RFC v1 045/256] cl8k: add chip_config.h viktor.barna
2021-06-17 15:58 ` [RFC v1 046/256] cl8k: add config.c viktor.barna
2021-06-17 15:58 ` [RFC v1 047/256] cl8k: add config.h viktor.barna
2021-06-17 15:58 ` [RFC v1 048/256] cl8k: add coredump.c viktor.barna
2021-06-17 15:58 ` [RFC v1 049/256] cl8k: add coredump.h viktor.barna
2021-06-17 15:58 ` [RFC v1 050/256] cl8k: add data_rates.c viktor.barna
2021-06-17 15:58 ` [RFC v1 051/256] cl8k: add data_rates.h viktor.barna
2021-06-17 15:58 ` [RFC v1 052/256] cl8k: add dbgfile.c viktor.barna
2021-06-17 15:59 ` [RFC v1 053/256] cl8k: add dbgfile.h viktor.barna
2021-06-17 15:59 ` [RFC v1 054/256] cl8k: add debug.h viktor.barna
2021-06-17 15:59 ` [RFC v1 055/256] cl8k: add debugfs.c viktor.barna
2021-06-17 15:59 ` [RFC v1 056/256] cl8k: add debugfs.h viktor.barna
2021-06-17 15:59 ` [RFC v1 057/256] cl8k: add debugfs_defs.h viktor.barna
2021-06-17 15:59 ` [RFC v1 058/256] cl8k: add def.h viktor.barna
2021-06-17 15:59 ` [RFC v1 059/256] cl8k: add dfs/dfs.c viktor.barna
2021-06-17 15:59 ` [RFC v1 060/256] cl8k: add dfs/dfs.h viktor.barna
2021-06-17 15:59 ` [RFC v1 061/256] cl8k: add dfs/dfs_db.h viktor.barna
2021-06-17 15:59 ` [RFC v1 062/256] cl8k: add dfs/radar.c viktor.barna
2021-06-17 15:59 ` [RFC v1 063/256] cl8k: add dfs/radar.h viktor.barna
2021-06-17 15:59 ` [RFC v1 064/256] cl8k: add drv_ops.h viktor.barna
2021-06-17 15:59 ` [RFC v1 065/256] cl8k: add dsp.c viktor.barna
2021-06-17 15:59 ` [RFC v1 066/256] cl8k: add dsp.h viktor.barna
2021-06-17 15:59 ` [RFC v1 067/256] cl8k: add e2p.c viktor.barna
2021-06-17 15:59 ` [RFC v1 068/256] cl8k: add e2p.h viktor.barna
2021-06-17 15:59 ` [RFC v1 069/256] cl8k: add edca.c viktor.barna
2021-06-17 15:59 ` [RFC v1 070/256] cl8k: add edca.h viktor.barna
2021-06-17 15:59 ` [RFC v1 071/256] cl8k: add ela.c viktor.barna
2021-06-17 15:59 ` [RFC v1 072/256] cl8k: add ela.h viktor.barna
2021-06-17 15:59 ` [RFC v1 073/256] cl8k: add enhanced_tim.c viktor.barna
2021-06-17 15:59 ` [RFC v1 074/256] cl8k: add enhanced_tim.h viktor.barna
2021-06-17 15:59 ` [RFC v1 075/256] cl8k: add env_det.c viktor.barna
2021-06-17 15:59 ` [RFC v1 076/256] cl8k: add env_det.h viktor.barna
2021-06-17 15:59 ` [RFC v1 077/256] cl8k: add ext/dyn_bcast_rate.c viktor.barna
2021-06-17 15:59 ` [RFC v1 078/256] cl8k: add ext/dyn_bcast_rate.h viktor.barna
2021-06-17 15:59 ` [RFC v1 079/256] cl8k: add ext/dyn_mcast_rate.c viktor.barna
2021-06-17 15:59 ` [RFC v1 080/256] cl8k: add ext/dyn_mcast_rate.h viktor.barna
2021-06-17 15:59 ` [RFC v1 081/256] cl8k: add ext/vlan_dscp.c viktor.barna
2021-06-17 15:59 ` [RFC v1 082/256] cl8k: add ext/vlan_dscp.h viktor.barna
2021-06-17 15:59 ` [RFC v1 083/256] cl8k: add fem.c viktor.barna
2021-06-17 15:59 ` [RFC v1 084/256] cl8k: add fem.h viktor.barna
2021-06-17 15:59 ` [RFC v1 085/256] cl8k: add fem_common.h viktor.barna
2021-06-17 15:59 ` [RFC v1 086/256] cl8k: add fw/fw_dbg.c viktor.barna
2021-06-17 15:59 ` [RFC v1 087/256] cl8k: add fw/fw_dbg.h viktor.barna
2021-06-17 15:59 ` [RFC v1 088/256] cl8k: add fw/fw_file.c viktor.barna
2021-06-17 15:59 ` [RFC v1 089/256] cl8k: add fw/fw_file.h viktor.barna
2021-06-17 15:59 ` [RFC v1 090/256] cl8k: add fw/fw_msg.c viktor.barna
2021-06-17 15:59 ` [RFC v1 091/256] cl8k: add fw/fw_msg.h viktor.barna
2021-06-17 15:59 ` [RFC v1 092/256] cl8k: add fw/msg_cfm.c viktor.barna
2021-06-17 15:59 ` [RFC v1 093/256] cl8k: add fw/msg_cfm.h viktor.barna
2021-06-17 15:59 ` [RFC v1 094/256] cl8k: add fw/msg_rx.c viktor.barna
2021-06-17 15:59 ` [RFC v1 095/256] cl8k: add fw/msg_rx.h viktor.barna
2021-06-17 15:59 ` [RFC v1 096/256] cl8k: add fw/msg_tx.c viktor.barna
2021-06-17 15:59 ` [RFC v1 097/256] cl8k: add fw/msg_tx.h viktor.barna
2021-06-17 15:59 ` [RFC v1 098/256] cl8k: add hw.c viktor.barna
2021-06-17 15:59 ` [RFC v1 099/256] cl8k: add hw.h viktor.barna
2021-06-17 15:59 ` [RFC v1 100/256] cl8k: add hw_assert.c viktor.barna
2021-06-17 15:59 ` [RFC v1 101/256] cl8k: add hw_assert.h viktor.barna
2021-06-17 15:59 ` [RFC v1 102/256] cl8k: add ipc_shared.h viktor.barna
2021-06-17 15:59 ` [RFC v1 103/256] cl8k: add key.c viktor.barna
2021-06-17 15:59 ` [RFC v1 104/256] cl8k: add key.h viktor.barna
2021-06-17 15:59 ` [RFC v1 105/256] cl8k: add mac80211.c viktor.barna
2021-06-17 15:59 ` [RFC v1 106/256] cl8k: add mac80211.h viktor.barna
2021-06-17 15:59 ` [RFC v1 107/256] cl8k: add mac_addr.c viktor.barna
2021-06-17 15:59 ` [RFC v1 108/256] cl8k: add mac_addr.h viktor.barna
2021-06-17 15:59 ` [RFC v1 109/256] cl8k: add main.c viktor.barna
2021-06-17 15:59 ` [RFC v1 110/256] cl8k: add main.h viktor.barna
2021-06-17 15:59 ` [RFC v1 111/256] cl8k: add maintenance.c viktor.barna
2021-06-17 15:59 ` [RFC v1 112/256] cl8k: add maintenance.h viktor.barna
2021-06-17 16:00 ` [RFC v1 113/256] cl8k: add mib.c viktor.barna
2021-06-17 16:00 ` [RFC v1 114/256] cl8k: add mib.h viktor.barna
2021-06-17 16:00 ` [RFC v1 115/256] cl8k: add motion_sense.c viktor.barna
2021-06-17 16:00 ` [RFC v1 116/256] cl8k: add motion_sense.h viktor.barna
2021-06-17 16:00 ` [RFC v1 117/256] cl8k: add netlink.c viktor.barna
2021-06-17 16:00 ` [RFC v1 118/256] cl8k: add netlink.h viktor.barna
2021-06-17 16:00 ` [RFC v1 119/256] cl8k: add noise.c viktor.barna
2021-06-17 16:00 ` [RFC v1 120/256] cl8k: add noise.h viktor.barna
2021-06-17 16:00 ` [RFC v1 121/256] cl8k: add omi.c viktor.barna
2021-06-17 16:00 ` [RFC v1 122/256] cl8k: add omi.h viktor.barna
2021-06-17 16:00 ` [RFC v1 123/256] cl8k: add ops.c viktor.barna
2021-06-17 16:00 ` [RFC v1 124/256] cl8k: add ops.h viktor.barna
2021-06-17 16:00 ` [RFC v1 125/256] cl8k: add phy/phy.c viktor.barna
2021-06-17 16:00 ` [RFC v1 126/256] cl8k: add phy/phy.h viktor.barna
2021-06-17 16:00 ` [RFC v1 127/256] cl8k: add phy/phy_athos_lut.c viktor.barna
2021-06-17 16:00 ` [RFC v1 128/256] cl8k: add phy/phy_athos_lut.h viktor.barna
2021-06-17 16:00 ` [RFC v1 129/256] cl8k: add phy/phy_common_lut.c viktor.barna
2021-06-17 16:00 ` [RFC v1 130/256] cl8k: add phy/phy_common_lut.h viktor.barna
2021-06-17 16:00 ` [RFC v1 131/256] cl8k: add phy/phy_olympus_lut.c viktor.barna
2021-06-17 16:00 ` [RFC v1 132/256] cl8k: add phy/phy_olympus_lut.h viktor.barna
2021-06-17 16:00 ` [RFC v1 133/256] cl8k: add power.c viktor.barna
2021-06-17 16:00 ` [RFC v1 134/256] cl8k: add power.h viktor.barna
2021-06-17 16:00 ` [RFC v1 135/256] cl8k: add power_cli.c viktor.barna
2021-06-17 16:00 ` [RFC v1 136/256] cl8k: add power_cli.h viktor.barna
2021-06-17 16:00 ` [RFC v1 137/256] cl8k: add power_table.c viktor.barna
2021-06-17 16:00 ` [RFC v1 138/256] cl8k: add power_table.h viktor.barna
2021-06-17 16:00 ` [RFC v1 139/256] cl8k: add prot_mode.c viktor.barna
2021-06-17 16:00 ` [RFC v1 140/256] cl8k: add prot_mode.h viktor.barna
2021-06-17 16:00 ` [RFC v1 141/256] cl8k: add radio.c viktor.barna
2021-06-17 16:00 ` [RFC v1 142/256] cl8k: add radio.h viktor.barna
2021-06-17 16:00 ` [RFC v1 143/256] cl8k: add rate_ctrl.c viktor.barna
2021-06-17 16:00 ` [RFC v1 144/256] cl8k: add rate_ctrl.h viktor.barna
2021-06-17 16:00 ` [RFC v1 145/256] cl8k: add recovery.c viktor.barna
2021-06-17 16:00 ` [RFC v1 146/256] cl8k: add recovery.h viktor.barna
2021-06-17 16:00 ` [RFC v1 147/256] cl8k: add reg/ceva.h viktor.barna
2021-06-17 16:00 ` [RFC v1 148/256] cl8k: add reg/reg_access.h viktor.barna
2021-06-17 16:00 ` [RFC v1 149/256] cl8k: add reg/reg_cli.c viktor.barna
2021-06-17 16:00 ` [RFC v1 150/256] cl8k: add reg/reg_cli.h viktor.barna
2021-06-17 16:00 ` [RFC v1 151/256] cl8k: add reg/reg_cmu.h viktor.barna
2021-06-17 16:00 ` [RFC v1 152/256] cl8k: add reg/reg_fem.h viktor.barna
2021-06-17 16:00 ` [RFC v1 153/256] cl8k: add reg/reg_io_ctrl.h viktor.barna
2021-06-17 16:00 ` [RFC v1 154/256] cl8k: add reg/reg_ipc.h viktor.barna
2021-06-17 16:00 ` [RFC v1 155/256] cl8k: add reg/reg_lcu_common.h viktor.barna
2021-06-17 16:00 ` [RFC v1 156/256] cl8k: add reg/reg_lcu_phy.h viktor.barna
2021-06-17 16:00 ` [RFC v1 157/256] cl8k: add reg/reg_macdsp_api.h viktor.barna
2021-06-17 16:00 ` [RFC v1 158/256] cl8k: add reg/reg_macsys_gcu.h viktor.barna
2021-06-17 16:00 ` [RFC v1 159/256] cl8k: add reg/reg_mac_hw.h viktor.barna
2021-06-17 16:00 ` [RFC v1 160/256] cl8k: add reg/reg_mac_hw_mu.h viktor.barna
2021-06-17 16:00 ` [RFC v1 161/256] cl8k: add reg/reg_modem_gcu.h viktor.barna
2021-06-17 16:00 ` [RFC v1 162/256] cl8k: add reg/reg_otp_pvt.h viktor.barna
2021-06-17 16:00 ` [RFC v1 163/256] cl8k: add reg/reg_ricu.h viktor.barna
2021-06-17 16:00 ` [RFC v1 164/256] cl8k: add reg/reg_riu.h viktor.barna
2021-06-17 16:00 ` [RFC v1 165/256] cl8k: add reg/reg_riu_rc.h viktor.barna
2021-06-17 16:00 ` [RFC v1 166/256] cl8k: add rf_boot.c viktor.barna
2021-06-17 16:00 ` [RFC v1 167/256] cl8k: add rf_boot.h viktor.barna
2021-06-17 16:00 ` [RFC v1 168/256] cl8k: add rsrc_mgmt.c viktor.barna
2021-06-17 16:00 ` [RFC v1 169/256] cl8k: add rsrc_mgmt.h viktor.barna
2021-06-17 16:00 ` [RFC v1 170/256] cl8k: add rssi.c viktor.barna
2021-06-17 16:00 ` [RFC v1 171/256] cl8k: add rssi.h viktor.barna
2021-06-17 16:00 ` [RFC v1 172/256] cl8k: add rx/rx.c viktor.barna
2021-06-17 16:01 ` [RFC v1 173/256] cl8k: add rx/rx.h viktor.barna
2021-06-17 16:01 ` [RFC v1 174/256] cl8k: add rx/rx_amsdu.c viktor.barna
2021-06-17 16:01 ` [RFC v1 175/256] cl8k: add rx/rx_amsdu.h viktor.barna
2021-06-17 16:01 ` [RFC v1 176/256] cl8k: add rx/rx_filter.c viktor.barna
2021-06-17 16:01 ` [RFC v1 177/256] cl8k: add rx/rx_filter.h viktor.barna
2021-06-17 16:01 ` [RFC v1 178/256] cl8k: add rx/rx_reorder.c viktor.barna
2021-06-17 16:01 ` [RFC v1 179/256] cl8k: add rx/rx_reorder.h viktor.barna
2021-06-17 16:01 ` [RFC v1 180/256] cl8k: add sounding.c viktor.barna
2021-06-17 16:01 ` [RFC v1 181/256] cl8k: add sounding.h viktor.barna
2021-06-17 16:01 ` [RFC v1 182/256] cl8k: add sta.c viktor.barna
2021-06-17 16:01 ` [RFC v1 183/256] cl8k: add sta.h viktor.barna
2021-06-17 16:01 ` [RFC v1 184/256] cl8k: add stats.c viktor.barna
2021-06-17 16:01 ` [RFC v1 185/256] cl8k: add stats.h viktor.barna
2021-06-17 16:01 ` [RFC v1 186/256] cl8k: add tcv_config.c viktor.barna
2021-06-17 16:01 ` [RFC v1 187/256] cl8k: add tcv_config.h viktor.barna
2021-06-17 16:01 ` [RFC v1 188/256] cl8k: add temperature.c viktor.barna
2021-06-17 16:01 ` [RFC v1 189/256] cl8k: add temperature.h viktor.barna
2021-06-17 16:01 ` [RFC v1 190/256] cl8k: add trace.c viktor.barna
2021-06-17 16:01 ` [RFC v1 191/256] cl8k: add trace.h viktor.barna
2021-06-17 16:01 ` [RFC v1 192/256] cl8k: add traffic.c viktor.barna
2021-06-17 16:01 ` [RFC v1 193/256] cl8k: add traffic.h viktor.barna
2021-06-17 16:01 ` [RFC v1 194/256] cl8k: add twt.c viktor.barna
2021-06-17 16:01 ` [RFC v1 195/256] cl8k: add twt.h viktor.barna
2021-06-17 16:01 ` [RFC v1 196/256] cl8k: add twt_cli.c viktor.barna
2021-06-17 16:01 ` [RFC v1 197/256] cl8k: add twt_cli.h viktor.barna
2021-06-17 16:01 ` [RFC v1 198/256] cl8k: add twt_frame.c viktor.barna
2021-06-17 16:01 ` [RFC v1 199/256] cl8k: add twt_frame.h viktor.barna
2021-06-17 16:01 ` [RFC v1 200/256] cl8k: add tx/agg_cfm.c viktor.barna
2021-06-17 16:01 ` [RFC v1 201/256] cl8k: add tx/agg_cfm.h viktor.barna
2021-06-17 16:01 ` [RFC v1 202/256] cl8k: add tx/agg_tx_report.c viktor.barna
2021-06-17 16:01 ` [RFC v1 203/256] cl8k: add tx/agg_tx_report.h viktor.barna
2021-06-17 16:01 ` [RFC v1 204/256] cl8k: add tx/baw.c viktor.barna
2021-06-17 16:01 ` [RFC v1 205/256] cl8k: add tx/baw.h viktor.barna
2021-06-17 16:01 ` [RFC v1 206/256] cl8k: add tx/bcmc_cfm.c viktor.barna
2021-06-17 16:01 ` [RFC v1 207/256] cl8k: add tx/bcmc_cfm.h viktor.barna
2021-06-17 16:01 ` [RFC v1 208/256] cl8k: add tx/single_cfm.c viktor.barna
2021-06-17 16:01 ` [RFC v1 209/256] cl8k: add tx/single_cfm.h viktor.barna
2021-06-17 16:01 ` [RFC v1 210/256] cl8k: add tx/sw_txhdr.c viktor.barna
2021-06-17 16:01 ` [RFC v1 211/256] cl8k: add tx/sw_txhdr.h viktor.barna
2021-06-17 16:01 ` [RFC v1 212/256] cl8k: add tx/tx.c viktor.barna
2021-06-17 16:01 ` [RFC v1 213/256] cl8k: add tx/tx.h viktor.barna
2021-06-17 16:01 ` [RFC v1 214/256] cl8k: add tx/tx_amsdu.c viktor.barna
2021-06-17 16:01 ` [RFC v1 215/256] cl8k: add tx/tx_amsdu.h viktor.barna
2021-06-17 16:01 ` [RFC v1 216/256] cl8k: add tx/tx_inject.c viktor.barna
2021-06-17 16:01 ` [RFC v1 217/256] cl8k: add tx/tx_inject.h viktor.barna
2021-06-17 16:01 ` [RFC v1 218/256] cl8k: add tx/tx_queue.c viktor.barna
2021-06-17 16:01 ` [RFC v1 219/256] cl8k: add tx/tx_queue.h viktor.barna
2021-06-17 16:01 ` [RFC v1 220/256] cl8k: add utils/file.c viktor.barna
2021-06-17 16:01 ` [RFC v1 221/256] cl8k: add utils/file.h viktor.barna
2021-06-17 16:01 ` [RFC v1 222/256] cl8k: add utils/ip.c viktor.barna
2021-06-17 16:01 ` [RFC v1 223/256] cl8k: add utils/ip.h viktor.barna
2021-06-17 16:01 ` [RFC v1 224/256] cl8k: add utils/math.h viktor.barna
2021-06-17 16:01 ` [RFC v1 225/256] cl8k: add utils/string.c viktor.barna
2021-06-17 16:01 ` [RFC v1 226/256] cl8k: add utils/string.h viktor.barna
2021-06-17 16:01 ` [RFC v1 227/256] cl8k: add utils/timer.c viktor.barna
2021-06-17 16:01 ` [RFC v1 228/256] cl8k: add utils/timer.h viktor.barna
2021-06-17 16:01 ` [RFC v1 229/256] cl8k: add utils/utils.c viktor.barna
2021-06-17 16:01 ` [RFC v1 230/256] cl8k: add utils/utils.h viktor.barna
2021-06-17 16:01 ` [RFC v1 231/256] cl8k: add vendor_cmd.c viktor.barna
2021-06-17 16:01 ` [RFC v1 232/256] cl8k: add vendor_cmd.h viktor.barna
2021-06-17 16:02 ` [RFC v1 233/256] cl8k: add version.c viktor.barna
2021-06-17 16:02 ` [RFC v1 234/256] cl8k: add version.h viktor.barna
2021-06-17 16:02 ` [RFC v1 235/256] cl8k: add vif.c viktor.barna
2021-06-17 16:02 ` [RFC v1 236/256] cl8k: add vif.h viktor.barna
2021-06-17 16:02 ` [RFC v1 237/256] cl8k: add vns.c viktor.barna
2021-06-17 16:02 ` [RFC v1 238/256] cl8k: add vns.h viktor.barna
2021-06-17 16:02 ` [RFC v1 239/256] cl8k: add wrs/wrs.c viktor.barna
2021-06-17 16:02 ` [RFC v1 240/256] cl8k: add wrs/wrs.h viktor.barna
2021-06-17 16:02 ` [RFC v1 241/256] cl8k: add wrs/wrs_ap.c viktor.barna
2021-06-17 16:02 ` [RFC v1 242/256] cl8k: add wrs/wrs_ap.h viktor.barna
2021-06-17 16:02 ` [RFC v1 243/256] cl8k: add wrs/wrs_api.c viktor.barna
2021-06-17 16:02 ` [RFC v1 244/256] cl8k: add wrs/wrs_api.h viktor.barna
2021-06-17 16:02 ` [RFC v1 245/256] cl8k: add wrs/wrs_cli.c viktor.barna
2021-06-17 16:02 ` [RFC v1 246/256] cl8k: add wrs/wrs_cli.h viktor.barna
2021-06-17 16:02 ` [RFC v1 247/256] cl8k: add wrs/wrs_db.h viktor.barna
2021-06-17 16:02 ` [RFC v1 248/256] cl8k: add wrs/wrs_rssi.c viktor.barna
2021-06-17 16:02 ` [RFC v1 249/256] cl8k: add wrs/wrs_rssi.h viktor.barna
2021-06-17 16:02 ` [RFC v1 250/256] cl8k: add wrs/wrs_sta.c viktor.barna
2021-06-17 16:02 ` [RFC v1 251/256] cl8k: add wrs/wrs_sta.h viktor.barna
2021-06-17 16:02 ` [RFC v1 252/256] cl8k: add wrs/wrs_stats.c viktor.barna
2021-06-17 16:02 ` [RFC v1 253/256] cl8k: add wrs/wrs_stats.h viktor.barna
2021-06-17 16:02 ` [RFC v1 254/256] cl8k: add wrs/wrs_tables.c viktor.barna
2021-06-17 16:02 ` [RFC v1 255/256] cl8k: add wrs/wrs_tables.h viktor.barna
2021-06-17 16:02 ` [RFC v1 256/256] wireless: add Celeno vendor viktor.barna
2021-06-17 17:23 ` [RFC v1 000/256] wireless: cl8k driver for Celeno IEEE 802.11ax devices Johannes Berg
2022-05-22 17:51   ` viktor.barna
2021-06-19  6:39 ` Kalle Valo
2022-05-13 21:11   ` viktor.barna
2022-05-14  4:25     ` Kalle Valo

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210617160223.160998-18-viktor.barna@celeno.com \
    --to=viktor.barna@celeno.com \
    --cc=aviad.brikman@celeno.com \
    --cc=davem@davemloft.net \
    --cc=eliav.farber@gmail.com \
    --cc=kuba@kernel.org \
    --cc=kvalo@codeaurora.org \
    --cc=linux-wireless@vger.kernel.org \
    --cc=oleksandr.savchenko@celeno.com \
    --cc=shay.bar@celeno.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.