All of lore.kernel.org
 help / color / mirror / Atom feed
From: viktor.barna@celeno.com
To: linux-wireless@vger.kernel.org
Cc: Kalle Valo <kvalo@codeaurora.org>,
	"David S . Miller" <davem@davemloft.net>,
	Jakub Kicinski <kuba@kernel.org>,
	Aviad Brikman <aviad.brikman@celeno.com>,
	Eliav Farber <eliav.farber@gmail.com>,
	Oleksandr Savchenko <oleksandr.savchenko@celeno.com>,
	Shay Bar <shay.bar@celeno.com>,
	Viktor Barna <viktor.barna@celeno.com>
Subject: [RFC v1 218/256] cl8k: add tx/tx_queue.c
Date: Thu, 17 Jun 2021 16:01:45 +0000	[thread overview]
Message-ID: <20210617160223.160998-219-viktor.barna@celeno.com> (raw)
In-Reply-To: <20210617160223.160998-1-viktor.barna@celeno.com>

From: Viktor Barna <viktor.barna@celeno.com>

(Part of the split. Please, take a look at the cover letter for more
details).

Signed-off-by: Viktor Barna <viktor.barna@celeno.com>
---
 .../net/wireless/celeno/cl8k/tx/tx_queue.c    | 1620 +++++++++++++++++
 1 file changed, 1620 insertions(+)
 create mode 100644 drivers/net/wireless/celeno/cl8k/tx/tx_queue.c

diff --git a/drivers/net/wireless/celeno/cl8k/tx/tx_queue.c b/drivers/net/wireless/celeno/cl8k/tx/tx_queue.c
new file mode 100644
index 000000000000..18c5bd2b81f7
--- /dev/null
+++ b/drivers/net/wireless/celeno/cl8k/tx/tx_queue.c
@@ -0,0 +1,1620 @@
+// SPDX-License-Identifier: MIT
+/* Copyright(c) 2019-2021, Celeno Communications Ltd. */
+
+#include <net/cfg80211.h>
+#include <net/mac80211.h>
+#include <linux/ieee80211.h>
+#include <linux/types.h>
+
+#include "tx/tx_queue.h"
+#include "tx/tx.h"
+#include "tx/sw_txhdr.h"
+#include "tx/tx_amsdu.h"
+#include "tx/baw.h"
+#ifdef CONFIG_CL_PCIE
+#include "bus/pci/ipc.h"
+#endif
+#include "tx/agg_cfm.h"
+
+const u8 cl_tid2hwq[IEEE80211_NUM_TIDS] = {
+       CL_HWQ_BE,
+       CL_HWQ_BK,
+       CL_HWQ_BK,
+       CL_HWQ_BE,
+       CL_HWQ_VI,
+       CL_HWQ_VI,
+       CL_HWQ_VO,
+       CL_HWQ_VO,
+       /* At the moment, all others TID are mapped to BE */
+       CL_HWQ_BE,
+       CL_HWQ_BE,
+       CL_HWQ_BE,
+       CL_HWQ_BE,
+       CL_HWQ_BE,
+       CL_HWQ_BE,
+       CL_HWQ_BE,
+       CL_HWQ_BE,
+};
+
+static u32 cl_txq_total_dump_drv(struct cl_tx_queue *tx_queue)
+{
+       return tx_queue->dump_queue_full + tx_queue->dump_dma_map_fail;
+}
+
+static void cl_txq_sched_list_add(struct cl_tx_queue *tx_queue, struct cl_hw *cl_hw)
+{
+       /* Add to schedule queue */
+       if (tx_queue->sched)
+               return;
+
+       tx_queue->sched = true;
+       if (tx_queue->type == QUEUE_TYPE_AGG)
+               list_add_tail(&tx_queue->sched_list, &cl_hw->list_sched_q_agg);
+       else
+               list_add_tail(&tx_queue->sched_list, &cl_hw->list_sched_q_single);
+}
+
+static void cl_txq_sched_list_remove(struct cl_tx_queue *tx_queue)
+{
+       /* Remove from schedule queue */
+       if (tx_queue->sched) {
+               tx_queue->sched = false;
+               list_del(&tx_queue->sched_list);
+       }
+}
+
+static void cl_txq_sched_list_remove_if_empty(struct cl_tx_queue *tx_queue)
+{
+       /* If queue is empty remove it from schedule list */
+       if (list_empty(&tx_queue->hdrs))
+               cl_txq_sched_list_remove(tx_queue);
+}
+
+static void cl_txq_transfer_single_to_agg(struct cl_hw *cl_hw,
+                                         struct cl_tx_queue *single_queue,
+                                         struct cl_tx_queue *agg_queue, u8 tid)
+{
+       struct cl_sw_txhdr *sw_txhdr, *sw_txhdr_tmp;
+       struct ieee80211_tx_info *tx_info;
+       struct sk_buff *skb;
+       u8 hdr_pads;
+
+       spin_lock_bh(&cl_hw->tx_lock_single);
+
+       if (single_queue->num_packets == 0)
+               goto out;
+
+       list_for_each_entry_safe(sw_txhdr, sw_txhdr_tmp, &single_queue->hdrs, tx_queue_list) {
+               if (sw_txhdr->tid != tid)
+                       continue;
+
+               if (!ieee80211_is_data_qos(sw_txhdr->fc))
+                       continue;
+
+               cl_hw->tx_packet_cntr.transfer.single_to_agg++;
+
+               /* Remove from single queue */
+               list_del(&sw_txhdr->tx_queue_list);
+
+               /* Update single queue counters */
+               single_queue->num_packets--;
+               single_queue->total_packets--;
+
+               /* Turn on AMPDU flag */
+               skb = sw_txhdr->skb;
+               tx_info = IEEE80211_SKB_CB(skb);
+               tx_info->flags |= IEEE80211_TX_CTL_AMPDU;
+
+               /* Push skb to agg queue */
+               hdr_pads = CL_SKB_DATA_ALIGN_PADS(skb->data);
+               cl_tx_agg_prep(cl_hw, sw_txhdr, skb->len, hdr_pads, false);
+               agg_queue->total_packets++;
+               sw_txhdr->tx_queue = agg_queue;
+               cl_txq_push(cl_hw, sw_txhdr);
+       }
+
+       /* If single queue is empty remove it from schedule list */
+       cl_txq_sched_list_remove_if_empty(single_queue);
+
+out:
+       spin_unlock_bh(&cl_hw->tx_lock_single);
+}
+
+static void cl_txq_delete_packets(struct cl_hw *cl_hw, struct cl_tx_queue *tx_queue, u8 sta_idx)
+{
+       struct cl_sw_txhdr *sw_txhdr, *sw_txhdr_tmp;
+
+       list_for_each_entry_safe(sw_txhdr, sw_txhdr_tmp, &tx_queue->hdrs, tx_queue_list) {
+               /*
+                * Brodcast frames do not have cl_sta and should not be
+                * deleted at station remove sequence.
+                */
+               if (!sw_txhdr->cl_sta)
+                       continue;
+
+               if (sw_txhdr->sta_idx != sta_idx)
+                       continue;
+
+               list_del(&sw_txhdr->tx_queue_list);
+               tx_queue->num_packets--;
+
+               cl_tx_single_free_skb(cl_hw, sw_txhdr->skb);
+               cl_sw_txhdr_free(cl_hw, sw_txhdr);
+       }
+
+       /* If queue is empty remove it from schedule list */
+       cl_txq_sched_list_remove_if_empty(tx_queue);
+}
+
+static void cl_txq_reset_counters(struct cl_tx_queue *tx_queue)
+{
+       tx_queue->total_fw_push_desc = 0;
+       tx_queue->total_fw_push_skb = 0;
+       tx_queue->total_fw_cfm = 0;
+       tx_queue->total_packets = 0;
+       tx_queue->dump_queue_full = 0;
+       tx_queue->dump_dma_map_fail = 0;
+
+       memset(tx_queue->stats_hw_amsdu_cnt, 0,
+              sizeof(tx_queue->stats_hw_amsdu_cnt));
+
+       memset(tx_queue->stats_sw_amsdu_cnt, 0,
+              sizeof(tx_queue->stats_sw_amsdu_cnt));
+}
+
+static u16 cl_txq_desc_in_fw(struct cl_tx_queue *tx_queue)
+{
+       return (tx_queue->fw_max_size - tx_queue->fw_free_space);
+}
+
+static void cl_txq_reset_counters_during_traffic(struct cl_tx_queue *tx_queue)
+{
+       /*
+        * This function can be called during traffic, while descriptors
+        * are waiting in firmware. We set total_fw_cfm to minus the number
+        * of descriptors in firmware so that after confirmation arrives
+        * total_fw_cfm will be equal to total_fw_push_desc.
+        */
+       u32 desc_in_fw = cl_txq_desc_in_fw(tx_queue);
+
+       cl_txq_reset_counters(tx_queue);
+       tx_queue->total_fw_cfm = -desc_in_fw;
+}
+
+static void cl_txq_agg_size_set(struct cl_hw *cl_hw)
+{
+       struct cl_tx_queue *tx_queue = NULL;
+       u16 new_size = 0;
+       u16 drv_max_size = 0;
+       int i = 0;
+       int j = 0;
+
+       if (!cl_hw->used_agg_queues || !cl_hw->conf->ci_tx_packet_limit)
+               return;
+
+       new_size = cl_hw->conf->ci_tx_packet_limit / cl_hw->used_agg_queues;
+       drv_max_size = max(new_size, cl_hw->conf->ci_tx_queue_size_agg);
+
+       for (i = 0; i < IPC_MAX_BA_SESSIONS; i++) {
+               tx_queue = &cl_hw->tx_queues.agg[i];
+
+               if (!tx_queue->cl_sta)
+                       continue;
+
+               tx_queue->max_packets = drv_max_size;
+
+               j++;
+               if (j == cl_hw->used_agg_queues)
+                       break;
+       }
+
+       cl_dbg_trace(cl_hw, "drv_max_size = %u\n", drv_max_size);
+}
+
+static int cl_txq_request_find(struct cl_hw *cl_hw, u8 sta_idx, u8 tid)
+{
+       int i = 0;
+       struct cl_req_agg_db *req_agg_db = NULL;
+       u8 req_agg_queues = 0;
+
+       for (i = 0; (i < IPC_MAX_BA_SESSIONS) && (req_agg_queues < cl_hw->req_agg_queues); i++) {
+               req_agg_db = &cl_hw->req_agg_db[i];
+
+               if (!req_agg_db->is_used)
+                       continue;
+
+               req_agg_queues++;
+
+               if (sta_idx == req_agg_db->sta_idx && tid == req_agg_db->tid)
+                       return i;
+       }
+
+       return -1;
+}
+
+static void cl_txq_traffic_counters_print_bcmc(struct cl_hw *cl_hw,
+                                              char **buf, int *len, ssize_t *buf_size)
+{
+       struct cl_tx_queue *tx_queue = &cl_hw->tx_queues.bcmc;
+       unsigned long flags;
+       u8 hw_index;
+       u32 total_packets;
+       u16 fw_curr;
+       u32 total_push;
+       u32 total_cfm;
+       u32 dump;
+
+       spin_lock_irqsave(&cl_hw->tx_lock_bcmc, flags);
+
+       hw_index = tx_queue->hw_index;
+       total_packets = tx_queue->total_packets;
+       fw_curr = cl_txq_desc_in_fw(tx_queue);
+       total_push = tx_queue->total_fw_push_skb;
+       total_cfm = tx_queue->total_fw_cfm;
+       dump = cl_txq_total_dump_drv(tx_queue);
+
+       spin_unlock_irqrestore(&cl_hw->tx_lock_bcmc, flags);
+
+       if (total_packets == 0)
+               return;
+
+       cl_snprintf(buf, len, buf_size,
+                   "\nTX MULTICAST AND BOROADCAST QUEUE (MAX 1):\n"
+                   "|-----------------------------------------------------------|\n"
+                   "| hw  | driver   | fw      | fw total | fw total | dump     |\n"
+                   "| idx | total    | current | push     | cfm      |          |\n"
+                   "|-----+----------+---------+----------+----------+----------|\n"
+                   "| %3u |%10u|%9u|%10u|%10u|%10u|\n",
+                   hw_index, total_packets, fw_curr, total_push, total_cfm, dump);
+       cl_snprintf(buf, len, buf_size,
+                   "|-----------------------------------------------------------|\n");
+}
+
+static void cl_txq_traffic_counters_print_single(struct cl_hw *cl_hw,
+                                                char **buf, int *len, ssize_t *buf_size)
+{
+       u16 queue_idx = 0;
+       u32 sta_idx = 0, ac = 0;
+       struct cl_tx_queue *tx_queue;
+
+       cl_snprintf(buf, len, buf_size,
+                   "\nTX SINGLE QUEUES (MAX %d):\n", MAX_SINGLE_QUEUES);
+       cl_snprintf(buf, len, buf_size,
+                   "|----------------------------------------------------------------------"
+                   "----------|\n"
+                   "| idx | sta | ac | driver   | driver  | fw      | fw total | fw total |"
+                   " dump     |\n"
+                   "|     |     |    | total    | current | current | push     | cfm      |"
+                   "          |\n"
+                   "|-----+-----+----+----------+---------+---------+----------+----------+"
+                   "----------|\n");
+
+       spin_lock_bh(&cl_hw->tx_lock_single);
+
+       for (sta_idx = 0; sta_idx < FW_MAX_NUM_STA; sta_idx++) {
+               for (ac = 0; ac < AC_MAX; ac++) {
+                       queue_idx = QUEUE_IDX(sta_idx, ac);
+                       tx_queue = &cl_hw->tx_queues.single[queue_idx];
+
+                       if (tx_queue->total_packets == 0)
+                               continue;
+
+                       if (tx_queue->index == HIGH_PRIORITY_QUEUE)
+                               cl_snprintf(buf, len, buf_size,
+                                           "|-----+-----+----+----------+---------+---------+"
+                                           "----------+----------+----------|\n");
+
+                       cl_snprintf(buf, len, buf_size,
+                                   "| %3u | %3u | %2u |%10u|%9u|%9u|%10u|%10u|%10u|\n",
+                                   tx_queue->index,
+                                   sta_idx,
+                                   tx_queue->hw_index,
+                                   tx_queue->total_packets,
+                                   tx_queue->num_packets,
+                                   cl_txq_desc_in_fw(tx_queue),
+                                   tx_queue->total_fw_push_skb,
+                                   tx_queue->total_fw_cfm,
+                                   cl_txq_total_dump_drv(tx_queue));
+               }
+       }
+
+       cl_snprintf(buf, len, buf_size,
+                   "|------------------------------------------------------------------------"
+                   "--------|\n");
+
+       spin_unlock_bh(&cl_hw->tx_lock_single);
+}
+
+static void cl_txq_traffic_counters_print_agg(struct cl_hw *cl_hw,
+                                             char **buf, int *len, ssize_t *buf_size)
+{
+       u32 ba_idx = 0;
+       struct cl_tx_queue *tx_queue;
+
+       spin_lock_bh(&cl_hw->tx_lock_agg);
+
+       if (cl_hw->used_agg_queues == 0)
+               goto out;
+
+       cl_snprintf(buf, len, buf_size,
+                   "\nTX AGGREGATION QUEUES (MAX %d):\n", IPC_MAX_BA_SESSIONS);
+       cl_snprintf(buf, len, buf_size,
+                   "|-----------------------------------------------------------------------"
+                   "---------------------|\n"
+                   "| idx | sta | tid | driver   | driver  | fw      | fw total | fw total |"
+                   " fw total | dump     |\n"
+                   "|     | idx |     | total    | current | current | push skb | push desc|"
+                   " cfm      |          |\n"
+                   "|-----+-----+-----+----------+---------+---------+----------+----------+"
+                   "----------+----------|\n");
+
+       for (ba_idx = 0; ba_idx < IPC_MAX_BA_SESSIONS; ba_idx++) {
+               tx_queue = &cl_hw->tx_queues.agg[ba_idx];
+
+               if (!cl_hw->tx_queues.agg[ba_idx].cl_sta)
+                       continue;
+
+               if (tx_queue->total_packets == 0)
+                       continue;
+
+               cl_snprintf(buf, len, buf_size,
+                           "| %3u | %3u | %3u |%10u|%9u|%9u|%10u|%10u|%10u|%10u|\n",
+                           tx_queue->index,
+                           tx_queue->cl_sta->sta_idx,
+                           tx_queue->tid,
+                           tx_queue->total_packets,
+                           tx_queue->num_packets,
+                           cl_txq_desc_in_fw(tx_queue),
+                           tx_queue->total_fw_push_skb,
+                           tx_queue->total_fw_push_desc,
+                           tx_queue->total_fw_cfm,
+                           cl_txq_total_dump_drv(tx_queue));
+       }
+
+       cl_snprintf(buf, len, buf_size,
+                   "|------------------------------------------------------------------------"
+                   "--------------------|\n");
+
+out:
+       spin_unlock_bh(&cl_hw->tx_lock_agg);
+}
+
+static void cl_txq_traffic_counters_print_mac(struct cl_hw *cl_hw,
+                                             char **buf, int *len, ssize_t *buf_size)
+{
+       struct ieee80211_local *local = hw_to_local(cl_hw->hw);
+       u32 i = 0, total_len = 0, q_len[IEEE80211_MAX_QUEUES] = {0};
+       unsigned long flags;
+
+       spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
+
+       for (i = 0; i < IEEE80211_MAX_QUEUES; i++) {
+               q_len[i] = skb_queue_len(&local->pending[i]);
+               total_len += q_len[i];
+       }
+
+       spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
+
+       if (total_len == 0)
+               return;
+
+       cl_snprintf(buf, len, buf_size,
+                   "\nMAC80211 PENDING QUEUES (MAX %d):\n", IEEE80211_MAX_QUEUES);
+       cl_snprintf(buf, len, buf_size,
+                   "|--------------------|\n"
+                   "| queue |  current   |\n"
+                   "|-------+------------|\n");
+
+       for (i = 0; i < IEEE80211_MAX_QUEUES; i++)
+               if (q_len[i] > 0)
+                       cl_snprintf(buf, len, buf_size, "| %5u | %10u |\n", i, q_len[i]);
+
+       cl_snprintf(buf, len, buf_size, "|--------------------|\n");
+}
+
+static int cl_txq_traffic_counters_print(struct cl_hw *cl_hw)
+{
+       char *buf = NULL;
+       ssize_t buf_size;
+       int err = 0;
+       int len = 0;
+
+       cl_txq_traffic_counters_print_bcmc(cl_hw, &buf, &len, &buf_size);
+       cl_txq_traffic_counters_print_single(cl_hw, &buf, &len, &buf_size);
+       cl_txq_traffic_counters_print_agg(cl_hw, &buf, &len, &buf_size);
+       cl_txq_traffic_counters_print_mac(cl_hw, &buf, &len, &buf_size);
+
+       err = cl_vendor_reply(cl_hw, buf, len);
+       kfree(buf);
+
+       return err;
+}
+
+static void cl_txq_drop_reasons_print_bcmc(struct cl_hw *cl_hw,
+                                          char **buf, int *len, ssize_t *buf_size)
+{
+       u32 total = 0;
+       u32 dump_queue_full = 0;
+       u32 dump_dma_map_fail = 0;
+       struct cl_tx_queue *tx_queue = &cl_hw->tx_queues.bcmc;
+       unsigned long flags;
+
+       spin_lock_irqsave(&cl_hw->tx_lock_bcmc, flags);
+
+       total = cl_txq_total_dump_drv(tx_queue);
+       dump_queue_full = tx_queue->dump_queue_full;
+       dump_dma_map_fail = tx_queue->dump_dma_map_fail;
+
+       spin_unlock_irqrestore(&cl_hw->tx_lock_bcmc, flags);
+
+       if (total > 0)
+               cl_snprintf(buf, len, buf_size,
+                           "|bcmc  |     |%10u|%10u|%10u|\n",
+                           dump_queue_full, dump_dma_map_fail, total);
+}
+
+static void cl_txq_drop_reasons_print_single(struct cl_hw *cl_hw,
+                                            char **buf, int *len, ssize_t *buf_size)
+{
+       u32 i = 0, total = 0;
+       struct cl_tx_queue *tx_queue;
+
+       spin_lock_bh(&cl_hw->tx_lock_single);
+
+       for (i = 0; i < MAX_SINGLE_QUEUES; i++) {
+               tx_queue = &cl_hw->tx_queues.single[i];
+               total = cl_txq_total_dump_drv(tx_queue);
+
+               if (total == 0)
+                       continue;
+
+               cl_snprintf(buf, len, buf_size,
+                           "|single|%5u|%10u|%10u|%10u|\n",
+                           tx_queue->index,
+                           tx_queue->dump_queue_full,
+                           tx_queue->dump_dma_map_fail,
+                           total);
+       }
+
+       spin_unlock_bh(&cl_hw->tx_lock_single);
+}
+
+static void cl_txq_drop_reasons_print_agg(struct cl_hw *cl_hw,
+                                         char **buf, int *len, ssize_t *buf_size)
+{
+       u32 i = 0, total = 0;
+       struct cl_tx_queue *tx_queue;
+
+       spin_lock_bh(&cl_hw->tx_lock_agg);
+
+       for (i = 0; i < IPC_MAX_BA_SESSIONS; i++) {
+               tx_queue = &cl_hw->tx_queues.agg[i];
+               total = cl_txq_total_dump_drv(tx_queue);
+
+               if (total == 0)
+                       continue;
+
+               cl_snprintf(buf, len, buf_size,
+                           "|agg   |%5u|%10u|%10u|%10u|\n",
+                           tx_queue->index,
+                           tx_queue->dump_queue_full,
+                           tx_queue->dump_dma_map_fail,
+                           total);
+       }
+
+       spin_unlock_bh(&cl_hw->tx_lock_agg);
+}
+
+static int cl_txq_drop_reasons_print(struct cl_hw *cl_hw)
+{
+       char *buf = NULL;
+       ssize_t buf_size;
+       int err = 0;
+       int len = 0;
+
+       cl_snprintf(&buf, &len, &buf_size,
+                   "-----------------------------------------------\n"
+                   "| type | idx | queue    | dma map  | total    |\n"
+                   "|      |     | full     | fail     | dump     |\n"
+                   "|------+-----+----------+----------+----------|\n");
+
+       cl_txq_drop_reasons_print_bcmc(cl_hw, &buf, &len, &buf_size);
+       cl_txq_drop_reasons_print_single(cl_hw, &buf, &len, &buf_size);
+       cl_txq_drop_reasons_print_agg(cl_hw, &buf, &len, &buf_size);
+
+       cl_snprintf(&buf, &len, &buf_size,
+                   "-----------------------------------------------\n");
+
+       err = cl_vendor_reply(cl_hw, buf, len);
+       kfree(buf);
+
+       return err;
+}
+
+static int cl_txq_global_counters_print(struct cl_hw *cl_hw)
+{
+       struct cl_tx_forward_cntr *forward = &cl_hw->tx_packet_cntr.forward;
+       struct cl_tx_drop_cntr *drop = &cl_hw->tx_packet_cntr.drop;
+       struct cl_tx_transfer_cntr *transfer = &cl_hw->tx_packet_cntr.transfer;
+       char *buf = NULL;
+       ssize_t buf_size;
+       int err = 0;
+       int len = 0;
+
+       cl_snprintf(&buf, &len, &buf_size,
+                   "FORWARD\n"
+                   "----------------------------\n"
+                   "tx_start          = %u\n", forward->tx_start);
+       cl_snprintf(&buf, &len, &buf_size,
+                   "drv_fast_agg      = %u\n", forward->drv_fast_agg);
+       cl_snprintf(&buf, &len, &buf_size,
+                   "drv_fast_single   = %u\n", forward->drv_fast_single);
+       cl_snprintf(&buf, &len, &buf_size,
+                   "to_mac            = %u\n", forward->to_mac);
+       cl_snprintf(&buf, &len, &buf_size,
+                   "from_mac_single   = %u\n", forward->from_mac_single);
+       cl_snprintf(&buf, &len, &buf_size,
+                   "from_mac_agg      = %u\n", forward->from_mac_agg);
+       cl_snprintf(&buf, &len, &buf_size,
+                   "DROP\n"
+                   "----------------------------\n"
+                   "wd_restart        = %u\n", drop->wd_restart);
+       cl_snprintf(&buf, &len, &buf_size,
+                   "radio_off         = %u\n", drop->radio_off);
+       cl_snprintf(&buf, &len, &buf_size,
+                   "in_recovery       = %u\n", drop->in_recovery);
+       cl_snprintf(&buf, &len, &buf_size,
+                   "short_length      = %u\n", drop->short_length);
+       cl_snprintf(&buf, &len, &buf_size,
+                   "pending_full      = %u\n", drop->pending_full);
+       cl_snprintf(&buf, &len, &buf_size,
+                   "packet_limit      = %u\n", drop->packet_limit);
+       cl_snprintf(&buf, &len, &buf_size,
+                   "dev_flags         = %u\n", drop->dev_flags);
+       cl_snprintf(&buf, &len, &buf_size,
+                   "length_limit      = %u\n", drop->length_limit);
+       cl_snprintf(&buf, &len, &buf_size,
+                   "txhdr_alloc_fail  = %u\n", drop->txhdr_alloc_fail);
+       cl_snprintf(&buf, &len, &buf_size,
+                   "queue_null        = %u\n", drop->queue_null);
+       cl_snprintf(&buf, &len, &buf_size,
+                   "amsdu_alloc_fail  = %u\n", drop->amsdu_alloc_fail);
+       cl_snprintf(&buf, &len, &buf_size,
+                   "amsdu_dma_map_err = %u\n", drop->amsdu_dma_map_err);
+       cl_snprintf(&buf, &len, &buf_size,
+                   "build_hdr_fail    = %u\n", drop->build_hdr_fail);
+       cl_snprintf(&buf, &len, &buf_size,
+                   "key_disable       = %u\n", drop->key_disable);
+       cl_snprintf(&buf, &len, &buf_size,
+                   "queue_flush       = %u\n", drop->queue_flush);
+       cl_snprintf(&buf, &len, &buf_size,
+                   "sta_null_in_agg   = %u\n", drop->sta_null_in_agg);
+       cl_snprintf(&buf, &len, &buf_size,
+                   "TRANSFER\n"
+                   "----------------------------\n"
+                   "single_to_agg     = %u\n", transfer->single_to_agg);
+       cl_snprintf(&buf, &len, &buf_size,
+                   "agg_to_single     = %u\n", transfer->agg_to_single);
+
+       err = cl_vendor_reply(cl_hw, buf, len);
+       kfree(buf);
+
+       return err;
+}
+
+static int cl_txq_stop_reasons_print(struct cl_hw *cl_hw)
+{
+       struct ieee80211_local *local = hw_to_local(cl_hw->hw);
+       unsigned long queue_stop_reasons;
+       unsigned long flags;
+       u8 i = 0;
+       char *buf = NULL;
+       ssize_t buf_size;
+       int err = 0;
+       int len = 0;
+
+       cl_snprintf(&buf, &len, &buf_size,
+                   "|------------------------|\n"
+                   "|queue|queue_stop_reasons|\n"
+                   "|-----+------------------|\n");
+
+       for (i = 0; i < IEEE80211_MAX_QUEUES; i++) {
+               spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
+               queue_stop_reasons = local->queue_stop_reasons[i];
+               spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
+
+               if (queue_stop_reasons)
+                       cl_snprintf(&buf, &len, &buf_size, "|%5u|0x%-16lx|\n",
+                                   i, queue_stop_reasons);
+       }
+
+       cl_snprintf(&buf, &len, &buf_size, "|------------------------|\n");
+
+       err = cl_vendor_reply(cl_hw, buf, len);
+       kfree(buf);
+
+       return err;
+}
+
+static int cl_txq_requested_agg_print(struct cl_hw *cl_hw)
+{
+       u8 i = 0;
+       char *buf = NULL;
+       ssize_t buf_size;
+       int err = 0;
+       int len = 0;
+
+       spin_lock_bh(&cl_hw->tx_lock_agg);
+
+       cl_snprintf(&buf, &len, &buf_size,
+                   "## used_agg_queues = %u\n", cl_hw->used_agg_queues);
+
+       if (cl_hw->used_agg_queues) {
+               for (i = 0; i < IPC_MAX_BA_SESSIONS; i++) {
+                       if (!cl_hw->tx_queues.agg[i].cl_sta)
+                               continue;
+
+                       cl_snprintf(&buf, &len, &buf_size,
+                                   "%u) sta_idx = %u, tid = %u\n", i + 1,
+                                   cl_hw->tx_queues.agg[i].cl_sta->sta_idx,
+                                   cl_hw->tx_queues.agg[i].tid);
+               }
+       }
+
+       cl_snprintf(&buf, &len, &buf_size, "## req_agg_queues = %u\n", cl_hw->req_agg_queues);
+
+       if (cl_hw->req_agg_queues) {
+               for (i = 0; i < IPC_MAX_BA_SESSIONS; i++) {
+                       if (!cl_hw->req_agg_db[i].is_used)
+                               continue;
+
+                       cl_snprintf(&buf, &len, &buf_size, "%u) sta_idx = %u, tid = %u\n",
+                                   i + 1, cl_hw->req_agg_db[i].sta_idx,
+                                   cl_hw->req_agg_db[i].tid);
+               }
+       }
+
+       spin_unlock_bh(&cl_hw->tx_lock_agg);
+       err = cl_vendor_reply(cl_hw, buf, len);
+       kfree(buf);
+
+       return err;
+}
+
+static void cl_txq_hw_amsdu_stats_print(struct cl_hw *cl_hw,
+                                       char **buf, int *len, ssize_t *buf_size)
+{
+       u32 i, j;
+       struct cl_tx_queue *tx_queue;
+
+       cl_snprintf(buf, len, buf_size, "HW TX-AMSDU STATS:\n");
+
+       cl_snprintf(buf, len, buf_size, "|-----------");
+       for (i = 0; i < CL_AMSDU_TX_PAYLOAD_MAX; i++)
+               cl_snprintf(buf, len, buf_size, "-----------");
+
+       cl_snprintf(buf, len, buf_size, "|\n|agg|sta|tid");
+       for (i = 0; i < CL_AMSDU_TX_PAYLOAD_MAX; i++)
+               cl_snprintf(buf, len, buf_size, "| amsdu #%u ", i + 1);
+
+       cl_snprintf(buf, len, buf_size, "|\n|---+---+---");
+       for (i = 0; i < CL_AMSDU_TX_PAYLOAD_MAX; i++)
+               cl_snprintf(buf, len, buf_size, "+----------");
+
+       cl_snprintf(buf, len, buf_size, "|\n");
+
+       for (j = 0; j < IPC_MAX_BA_SESSIONS; j++) {
+               tx_queue = &cl_hw->tx_queues.agg[j];
+
+               if (!tx_queue->cl_sta)
+                       continue;
+
+               if (tx_queue->total_packets == 0)
+                       continue;
+
+               cl_snprintf(buf, len, buf_size, "|%3u|%3u|%3u",
+                           tx_queue->index,
+                           tx_queue->cl_sta->sta_idx,
+                           tx_queue->tid);
+
+               for (i = 0; i < CL_AMSDU_TX_PAYLOAD_MAX; i++)
+                       cl_snprintf(buf, len, buf_size, "|%10u", tx_queue->stats_hw_amsdu_cnt[i]);
+
+               cl_snprintf(buf, len, buf_size, "|\n");
+       }
+
+       cl_snprintf(buf, len, buf_size,  "|-----------");
+       for (i = 0; i < CL_AMSDU_TX_PAYLOAD_MAX; i++)
+               cl_snprintf(buf, len, buf_size, "-----------");
+
+       cl_snprintf(buf, len, buf_size, "|\n");
+}
+
+static void cl_txq_sw_amsdu_stats_print(struct cl_hw *cl_hw,
+                                       char **buf, int *len, ssize_t *buf_size)
+{
+       u32 i, j;
+       struct cl_tx_queue *tx_queue;
+
+       if (cl_hw->conf->ci_tx_sw_amsdu_max_packets < 2)
+               return;
+
+       cl_snprintf(buf, len, buf_size, "SW TX-AMSDU STATS:\n");
+
+       for (j = 0; j < IPC_MAX_BA_SESSIONS; j++) {
+               tx_queue = &cl_hw->tx_queues.agg[j];
+
+               if (!tx_queue->cl_sta)
+                       continue;
+
+               if (tx_queue->total_packets == 0)
+                       continue;
+
+               cl_snprintf(buf, len, buf_size,
+                           "\nagg idx %u, sta %u, tid %u :\n",
+                           tx_queue->index,
+                           tx_queue->cl_sta->sta_idx,
+                           tx_queue->tid);
+               cl_snprintf(buf, len, buf_size, "----------------------------\n");
+
+               for (i = 0; i < cl_hw->conf->ci_tx_sw_amsdu_max_packets; i++)
+                       if (tx_queue->stats_sw_amsdu_cnt[i] > 0)
+                               cl_snprintf(buf, len, buf_size,
+                                           "amsdu #%u = %u\n", i + 1,
+                                           tx_queue->stats_sw_amsdu_cnt[i]);
+       }
+}
+
+static int cl_txq_amsdu_stats_print(struct cl_hw *cl_hw)
+{
+       char *buf = NULL;
+       ssize_t buf_size;
+       int err = 0;
+       int len = 0;
+
+       spin_lock_bh(&cl_hw->tx_lock_agg);
+
+       if (cl_hw->used_agg_queues == 0)
+               goto out;
+
+       cl_txq_hw_amsdu_stats_print(cl_hw, &buf, &len, &buf_size);
+       cl_txq_sw_amsdu_stats_print(cl_hw, &buf, &len, &buf_size);
+out:
+       spin_unlock_bh(&cl_hw->tx_lock_agg);
+       err = cl_vendor_reply(cl_hw, buf, len);
+       kfree(buf);
+
+       return err;
+}
+
+static void cl_txq_max_size_print_single(struct cl_hw *cl_hw,
+                                        char **buf, int *len, ssize_t *buf_size)
+{
+       u8 ac = 0;
+       u8 sta_idx = 0;
+       u16 queue_idx = 0;
+       struct cl_tx_queue *tx_queue;
+
+       cl_snprintf(buf, len, buf_size,
+                   "MAX SIZE SINGLE QUEUES:\n"
+                   "|----------------------|\n"
+                   "|idx|sta|ac| drv | fw  |\n"
+                   "|---+---+--+-----+-----|\n");
+
+       spin_lock_bh(&cl_hw->tx_lock_single);
+
+       for (sta_idx = 0; sta_idx < FW_MAX_NUM_STA; sta_idx++) {
+               for (ac = 0; ac < AC_MAX; ac++) {
+                       queue_idx = QUEUE_IDX(sta_idx, ac);
+                       tx_queue = &cl_hw->tx_queues.single[queue_idx];
+
+                       if (tx_queue->total_fw_push_skb == 0)
+                               continue;
+
+                       cl_snprintf(buf, len, buf_size,
+                                   "|%3u|%3u|%2u|%5u|%5u|\n",
+                                   tx_queue->index,
+                                   sta_idx,
+                                   tx_queue->hw_index,
+                                   tx_queue->max_packets,
+                                   tx_queue->fw_max_size);
+               }
+       }
+
+       spin_unlock_bh(&cl_hw->tx_lock_single);
+
+       cl_snprintf(buf, len, buf_size, "|----------------------|\n");
+}
+
+static void cl_txq_max_size_print_agg(struct cl_hw *cl_hw,
+                                     char **buf, int *len, ssize_t *buf_size)
+{
+       u8 agg_idx = 0;
+       struct cl_tx_queue *tx_queue;
+
+       spin_lock_bh(&cl_hw->tx_lock_agg);
+
+       if (cl_hw->used_agg_queues == 0)
+               goto out;
+
+       cl_snprintf(buf, len, buf_size,
+                   "MAX SIZE AGGREGATION QUEUES:\n"
+                   "|-----------------------|\n"
+                   "|idx|sta|tid| drv | fw  |\n"
+                   "|---+---+---+-----+-----|\n");
+
+       for (agg_idx = 0; agg_idx < IPC_MAX_BA_SESSIONS; agg_idx++) {
+               tx_queue = &cl_hw->tx_queues.agg[agg_idx];
+
+               if (!tx_queue->cl_sta)
+                       continue;
+
+               cl_snprintf(buf, len, buf_size,
+                           "|%3u|%3u|%3u|%5u|%5u|\n",
+                           tx_queue->index,
+                           tx_queue->cl_sta->sta_idx,
+                           tx_queue->tid,
+                           tx_queue->max_packets,
+                           tx_queue->fw_max_size);
+       }
+
+       cl_snprintf(buf, len, buf_size, "|----------------------|\n");
+
+out:
+       spin_unlock_bh(&cl_hw->tx_lock_agg);
+}
+
+static int cl_txq_max_size_print(struct cl_hw *cl_hw)
+{
+       char *buf = NULL;
+       ssize_t buf_size;
+       int err = 0;
+       int len = 0;
+
+       cl_txq_max_size_print_single(cl_hw, &buf, &len, &buf_size);
+       cl_txq_max_size_print_agg(cl_hw, &buf, &len, &buf_size);
+
+       err = cl_vendor_reply(cl_hw, buf, len);
+       kfree(buf);
+
+       return err;
+}
+
+static void cl_txq_stats_reset_bcmc(struct cl_hw *cl_hw)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&cl_hw->tx_lock_bcmc, flags);
+       cl_txq_reset_counters_during_traffic(&cl_hw->tx_queues.bcmc);
+       spin_unlock_irqrestore(&cl_hw->tx_lock_bcmc, flags);
+}
+
+static void cl_txq_stats_reset_single(struct cl_hw *cl_hw)
+{
+       u16 i = 0;
+
+       spin_lock_bh(&cl_hw->tx_lock_single);
+
+       for (i = 0; i < MAX_SINGLE_QUEUES; i++)
+               cl_txq_reset_counters_during_traffic(&cl_hw->tx_queues.single[i]);
+
+       spin_unlock_bh(&cl_hw->tx_lock_single);
+}
+
+static void cl_txq_stats_reset_agg(struct cl_hw *cl_hw)
+{
+       u16 i = 0;
+
+       spin_lock_bh(&cl_hw->tx_lock_agg);
+
+       for (i = 0; i < IPC_MAX_BA_SESSIONS; i++)
+               cl_txq_reset_counters_during_traffic(&cl_hw->tx_queues.agg[i]);
+
+       spin_unlock_bh(&cl_hw->tx_lock_agg);
+}
+
+static void cl_txq_stats_reset(struct cl_hw *cl_hw)
+{
+       cl_txq_stats_reset_bcmc(cl_hw);
+       cl_txq_stats_reset_single(cl_hw);
+       cl_txq_stats_reset_agg(cl_hw);
+
+       memset(&cl_hw->tx_packet_cntr, 0, sizeof(struct cl_tx_packet_cntr));
+
+       pr_debug("Reset queue stats\n");
+}
+
+static void cl_txq_sched_list_print_single(struct cl_hw *cl_hw,
+                                          char **buf, int *len, ssize_t *buf_size)
+{
+       struct cl_tx_queue *tx_queue;
+       u32 num_queues = 0;
+
+       cl_snprintf(buf, len, buf_size, "TX single sched list:\n");
+
+       spin_lock_bh(&cl_hw->tx_lock_single);
+
+       list_for_each_entry(tx_queue, &cl_hw->list_sched_q_single, sched_list) {
+               num_queues++;
+               cl_snprintf(buf, len, buf_size, "%u) Index = %u\n", num_queues, tx_queue->index);
+       }
+
+       spin_unlock_bh(&cl_hw->tx_lock_single);
+
+       if (num_queues == 0)
+               cl_snprintf(buf, len, buf_size, "empty\n");
+}
+
+static void cl_txq_sched_list_print_agg(struct cl_hw *cl_hw,
+                                       char **buf, int *len, ssize_t *buf_size)
+{
+       struct cl_tx_queue *tx_queue;
+       u32 num_queues = 0;
+
+       cl_snprintf(buf, len, buf_size, "\nTX agg sched list:\n");
+
+       spin_lock_bh(&cl_hw->tx_lock_agg);
+
+       list_for_each_entry(tx_queue, &cl_hw->list_sched_q_agg, sched_list) {
+               num_queues++;
+               cl_snprintf(buf, len, buf_size, "%u) Index = %u\n", num_queues, tx_queue->index);
+       }
+
+       spin_unlock_bh(&cl_hw->tx_lock_agg);
+
+       if (num_queues == 0)
+               cl_snprintf(buf, len, buf_size, "empty\n");
+}
+
+static int cl_txq_sched_list_print(struct cl_hw *cl_hw)
+{
+       char *buf = NULL;
+       ssize_t buf_size;
+       int err = 0;
+       int len = 0;
+
+       cl_txq_sched_list_print_single(cl_hw, &buf, &len, &buf_size);
+       cl_txq_sched_list_print_agg(cl_hw, &buf, &len, &buf_size);
+
+       err = cl_vendor_reply(cl_hw, buf, len);
+       kfree(buf);
+
+       return err;
+}
+
+static int cl_txq_cli_help(struct cl_hw *cl_hw)
+{
+       char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+       int err = 0;
+
+       if (!buf)
+               return -ENOMEM;
+
+       snprintf(buf, PAGE_SIZE,
+                "txq usage:\n"
+                "-a : Print traffic counters\n"
+                "-b : Print drop counters\n"
+                "-c : Print global counters\n"
+                "-d : Print stop reasons\n"
+                "-e : Print requested aggregations\n"
+                "-f : Print AMSDU statistics\n"
+                "-m : Print maximum queues size\n"
+                "-r : Reset queue stats\n"
+                "-s : Print schedule list\n");
+
+       err = cl_vendor_reply(cl_hw, buf, strlen(buf));
+       kfree(buf);
+
+       return err;
+}
+
+static void cl_txq_task_single(struct cl_hw *cl_hw)
+{
+       /* Schedule single queues */
+       struct cl_tx_queue *tx_queue, *tx_queue_tmp;
+
+       spin_lock(&cl_hw->tx_lock_single);
+
+       list_for_each_entry_safe(tx_queue, tx_queue_tmp, &cl_hw->list_sched_q_single, sched_list)
+               cl_txq_sched(cl_hw, tx_queue);
+
+       /* Rotate the queue so next schedule will start with a different queue */
+       list_rotate_left(&cl_hw->list_sched_q_single);
+
+       spin_unlock(&cl_hw->tx_lock_single);
+}
+
+static void cl_txq_task_agg(struct cl_hw *cl_hw)
+{
+       /* Schedule agg queueus */
+       struct cl_tx_queue *tx_queue, *tx_queue_tmp;
+
+       spin_lock(&cl_hw->tx_lock_agg);
+
+       list_for_each_entry_safe(tx_queue, tx_queue_tmp, &cl_hw->list_sched_q_agg, sched_list)
+               cl_txq_sched(cl_hw, tx_queue);
+
+       /* Rotate the queue so next schedule will start with a different queue */
+       list_rotate_left(&cl_hw->list_sched_q_agg);
+
+       spin_unlock(&cl_hw->tx_lock_agg);
+}
+
+static void cl_txq_task(unsigned long data)
+{
+       struct cl_hw *cl_hw = (struct cl_hw *)data;
+
+       cl_txq_task_single(cl_hw);
+       cl_txq_task_agg(cl_hw);
+}
+
+static void cl_txq_agg_inc_usage_cntr(struct cl_hw *cl_hw)
+{
+       /* Should be called in cl_hw->tx_lock_agg context */
+       cl_hw->used_agg_queues++;
+       cl_txq_agg_size_set(cl_hw);
+}
+
+static void cl_txq_agg_dec_usage_cntr(struct cl_hw *cl_hw)
+{
+       /* Should be called in cl_hw->tx_lock_agg context */
+       WARN_ON_ONCE(cl_hw->used_agg_queues == 0);
+
+       cl_hw->used_agg_queues--;
+       cl_txq_agg_size_set(cl_hw);
+}
+
+static void cl_txq_init_single(struct cl_hw *cl_hw)
+{
+       struct cl_tx_queue *tx_queue;
+       u32 i;
+
+       spin_lock_bh(&cl_hw->tx_lock_single);
+
+       INIT_LIST_HEAD(&cl_hw->list_sched_q_single);
+
+       for (i = 0; i < MAX_SINGLE_QUEUES; i++) {
+               tx_queue = &cl_hw->tx_queues.single[i];
+               memset(tx_queue, 0, sizeof(struct cl_tx_queue));
+               INIT_LIST_HEAD(&tx_queue->hdrs);
+               tx_queue->hw_index = i / FW_MAX_NUM_STA;
+               tx_queue->fw_max_size = IPC_TXDESC_CNT_SINGLE;
+               tx_queue->fw_free_space = IPC_TXDESC_CNT_SINGLE;
+               tx_queue->index = i;
+               tx_queue->max_packets = cl_hw->conf->ci_tx_queue_size_single;
+               tx_queue->type = QUEUE_TYPE_SINGLE;
+       }
+
+       spin_unlock_bh(&cl_hw->tx_lock_single);
+}
+
+static void cl_txq_init_bcmc(struct cl_hw *cl_hw)
+{
+       unsigned long flags;
+       struct cl_tx_queue *tx_queue;
+
+       spin_lock_irqsave(&cl_hw->tx_lock_bcmc, flags);
+
+       tx_queue = &cl_hw->tx_queues.bcmc;
+       memset(tx_queue, 0, sizeof(struct cl_tx_queue));
+       INIT_LIST_HEAD(&tx_queue->hdrs);
+       tx_queue->hw_index = CL_HWQ_BCN;
+       tx_queue->fw_max_size = IPC_TXDESC_CNT_BCMC;
+       tx_queue->fw_free_space = IPC_TXDESC_CNT_BCMC;
+       tx_queue->index = 0;
+       tx_queue->max_packets = 0;
+       tx_queue->type = QUEUE_TYPE_BCMC;
+
+       spin_unlock_irqrestore(&cl_hw->tx_lock_bcmc, flags);
+}
+
+static void cl_txq_init_agg(struct cl_hw *cl_hw)
+{
+       spin_lock_bh(&cl_hw->tx_lock_agg);
+       INIT_LIST_HEAD(&cl_hw->list_sched_q_agg);
+       spin_unlock_bh(&cl_hw->tx_lock_agg);
+}
+
+static void cl_txq_agg_request_reset(struct cl_hw *cl_hw)
+{
+       cl_hw->req_agg_queues = 0;
+       memset(cl_hw->req_agg_db, 0, sizeof(cl_hw->req_agg_db));
+}
+
+void cl_txq_init(struct cl_hw *cl_hw)
+{
+       tasklet_init(&cl_hw->tx_task, cl_txq_task, (unsigned long)cl_hw);
+
+       cl_txq_agg_request_reset(cl_hw);
+       cl_txq_init_single(cl_hw);
+       cl_txq_init_bcmc(cl_hw);
+       cl_txq_init_agg(cl_hw);
+}
+
+void cl_txq_stop(struct cl_hw *cl_hw)
+{
+       tasklet_kill(&cl_hw->tx_task);
+}
+
+struct cl_tx_queue *cl_txq_get(struct cl_hw *cl_hw, struct cl_sw_txhdr *sw_txhdr)
+{
+       struct cl_sta *cl_sta = sw_txhdr->cl_sta;
+       struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(sw_txhdr->skb);
+       u8 hw_queue = sw_txhdr->hw_queue;
+
+       if (!cl_sta &&
+           hw_queue == CL_HWQ_VO &&
+           is_multicast_ether_addr(sw_txhdr->hdr80211->addr1)) {
+               /*
+                * If HW queue is VO and packet is multicast, it was not buffered
+                * by mac80211, and it should be pushed to the high-priority queue
+                * and not to the bcmc queue.
+                */
+               return &cl_hw->tx_queues.single[HIGH_PRIORITY_QUEUE];
+       } else if (!cl_sta &&
+                  (hw_queue != CL_HWQ_BCN) &&
+                  !(tx_info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER)) {
+               /*
+                * If station is NULL, but HW queue is not BCN,
+                * it most go to the high-priority queue.
+                */
+               tx_info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER;
+               sw_txhdr->hw_queue = CL_HWQ_VO;
+               return &cl_hw->tx_queues.single[HIGH_PRIORITY_QUEUE];
+       } else if (cl_sta && (tx_info->flags & IEEE80211_TX_CTL_AMPDU)) {
+               /* Agg packet */
+               return cl_sta->agg_tx_queues[sw_txhdr->tid];
+       } else if (hw_queue == CL_HWQ_BCN) {
+               return &cl_hw->tx_queues.bcmc;
+       } else if (tx_info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER) {
+               /*
+                * Only frames that are power save response or non-bufferable MMPDU
+                * will have this flag set our driver will push those frame to the
+                * highiest priority queue.
+                */
+               return &cl_hw->tx_queues.single[HIGH_PRIORITY_QUEUE];
+       }
+
+       return &cl_hw->tx_queues.single[QUEUE_IDX(sw_txhdr->sta_idx, hw_queue)];
+}
+
+void cl_txq_push(struct cl_hw *cl_hw, struct cl_sw_txhdr *sw_txhdr)
+{
+       struct cl_tx_queue *tx_queue = sw_txhdr->tx_queue;
+
+       if (tx_queue->num_packets < tx_queue->max_packets) {
+               tx_queue->num_packets++;
+
+               /*
+                * This prioritization of action frames helps Samsung Galaxy Note 8 to
+                * open BA session more easily, when phy dev is PHY_DEV_OLYMPUS
+                */
+               if (ieee80211_is_action(sw_txhdr->fc))
+                       list_add(&sw_txhdr->tx_queue_list, &tx_queue->hdrs);
+               else
+                       list_add_tail(&sw_txhdr->tx_queue_list, &tx_queue->hdrs);
+
+               /* If it is the first packet in the queue, add the queue to the sched list */
+               cl_txq_sched_list_add(tx_queue, cl_hw);
+       } else {
+               struct cl_sta *cl_sta = sw_txhdr->cl_sta;
+               u8 tid = sw_txhdr->tid;
+
+               /* If the SW queue full, release the packet */
+               tx_queue->dump_queue_full++;
+
+               if (cl_sta && cl_sta->amsdu_anchor[tid].sw_txhdr) {
+                       if (cl_sta->amsdu_anchor[tid].sw_txhdr == sw_txhdr) {
+                               cl_sta->amsdu_anchor[tid].sw_txhdr = NULL;
+                               cl_sta->amsdu_anchor[tid].packet_cnt = 0;
+                       }
+               }
+
+               dev_kfree_skb_any(sw_txhdr->skb);
+               cl_sw_txhdr_free(cl_hw, sw_txhdr);
+
+               /* Schedule tasklet to try and empty the queue */
+               tasklet_schedule(&cl_hw->tx_task);
+       }
+}
+
+void cl_txq_sched(struct cl_hw *cl_hw, struct cl_tx_queue *tx_queue)
+{
+       struct cl_sw_txhdr *sw_txhdr, *sw_txhdr_tmp;
+
+       if (!test_bit(CL_DEV_STARTED, &cl_hw->drv_flags) ||
+           cl_hw->tx_disable_flags ||
+           cl_txq_is_fw_full(tx_queue))
+               return;
+
+       /* Go over all descriptors in queue */
+       list_for_each_entry_safe(sw_txhdr, sw_txhdr_tmp, &tx_queue->hdrs, tx_queue_list) {
+               list_del(&sw_txhdr->tx_queue_list);
+               tx_queue->num_packets--;
+
+               cl_tx_push(cl_hw, sw_txhdr, tx_queue);
+
+               if (cl_txq_is_fw_full(tx_queue))
+                       break;
+       }
+
+       /* If queue is empty remove it from schedule list */
+       cl_txq_sched_list_remove_if_empty(tx_queue);
+}
+
+void cl_txq_agg_alloc(struct cl_hw *cl_hw, struct cl_sta *cl_sta,
+                     struct mm_ba_add_cfm *ba_add_cfm, u16 buf_size)
+{
+       u8 tid = ba_add_cfm->tid;
+       u8 fw_agg_idx = ba_add_cfm->agg_idx;
+       u8 sta_idx = cl_sta->sta_idx;
+       u8 ac = cl_tid2hwq[tid];
+       u16 single_queue_idx = QUEUE_IDX(sta_idx, ac);
+       struct cl_tx_queue *tx_queue = &cl_hw->tx_queues.agg[fw_agg_idx];
+
+       spin_lock_bh(&cl_hw->tx_lock_agg);
+
+       /* Init aggregated queue struct */
+       memset(tx_queue, 0, sizeof(struct cl_tx_queue));
+       INIT_LIST_HEAD(&tx_queue->hdrs);
+
+       /*
+        * Firmware agg queues size is static and set to 512, so that for the worst
+        * case of HE stations,that support AMPDU of 256, it has room for two full
+        * aggregation.
+        * To keep this logic, of room for two aggregations, for non-HE stations, or
+        * for HE stations that do not support AMPDU of 256, we initialize fw_max_size
+        to twice the buffer size supported by the station.
+        */
+       tx_queue->fw_max_size = min_t(u16, cl_hw->max_agg_tx_q_size, buf_size * 2);
+       tx_queue->fw_free_space = tx_queue->fw_max_size;
+
+       tx_queue->max_packets = cl_hw->conf->ci_tx_queue_size_agg;
+       tx_queue->hw_index = ac;
+       tx_queue->cl_sta = cl_sta;
+       tx_queue->type = QUEUE_TYPE_AGG;
+       tx_queue->tid = tid;
+       tx_queue->index = fw_agg_idx;
+
+#ifdef CONFIG_CL_PCIE
+       /* Reset the synchronization counters between the fw and the IPC layer */
+       cl_hw->ipc_env->ring_indices_elem->indices->txdesc_write_idx.agg[fw_agg_idx] = 0;
+#endif
+
+       /* Attach the cl_hw chosen queue to the station and agg queues DB */
+       cl_sta->agg_tx_queues[tid] = tx_queue;
+       cl_agg_cfm_set_tx_queue(cl_hw, tx_queue, fw_agg_idx);
+
+       /* Notify upper mac80211 layer of queues resources status */
+       cl_txq_agg_inc_usage_cntr(cl_hw);
+       cl_txq_agg_request_del(cl_hw, sta_idx, tid);
+
+       /*
+        * Move the qos descriptors to the new allocated aggregated queues,
+        * otherwise we might reorder packets)
+        */
+       cl_txq_transfer_single_to_agg(cl_hw, &cl_hw->tx_queues.single[single_queue_idx],
+                                     tx_queue, tid);
+       /* Move the BA window pending packets to agg path */
+       cl_baw_pending_to_agg(cl_hw, cl_sta, tid);
+
+       spin_unlock_bh(&cl_hw->tx_lock_agg);
+
+       cl_dbg_trace(cl_hw, "Allocate queue [%u] to station [%u] tid [%u]\n",
+                    fw_agg_idx, sta_idx, tid);
+}
+
+void cl_txq_agg_free(struct cl_hw *cl_hw, struct cl_tx_queue *tx_queue,
+                    struct cl_sta *cl_sta, u8 tid)
+{
+       spin_lock_bh(&cl_hw->tx_lock_agg);
+
+       cl_dbg_trace(cl_hw, "Free queue [%u] of station [%u] tid [%u]\n",
+                    tx_queue->index, cl_sta->sta_idx, tid);
+
+       memset(tx_queue, 0, sizeof(struct cl_tx_queue));
+
+       cl_txq_agg_dec_usage_cntr(cl_hw);
+
+       spin_unlock_bh(&cl_hw->tx_lock_agg);
+}
+
+void cl_txq_agg_stop(struct cl_sta *cl_sta, u8 tid)
+{
+       cl_sta->agg_tx_queues[tid] = NULL;
+}
+
+void cl_txq_sta_add(struct cl_hw *cl_hw, struct cl_sta *cl_sta)
+{
+       /* Set cl_sta field for all single queues of this station */
+       u8 ac;
+       u16 queue_idx;
+
+       for (ac = 0; ac < AC_MAX; ac++) {
+               queue_idx = QUEUE_IDX(cl_sta->sta_idx, ac);
+               cl_hw->tx_queues.single[queue_idx].cl_sta = cl_sta;
+       }
+
+       /* Reset pointers to TX agg queues */
+       memset(cl_sta->agg_tx_queues, 0, sizeof(cl_sta->agg_tx_queues));
+}
+
+void cl_txq_sta_remove(struct cl_hw *cl_hw, u8 sta_idx)
+{
+       /* Clear cl_sta field for all single queues of this station */
+       u8 ac;
+       u16 queue_idx;
+
+       for (ac = 0; ac < AC_MAX; ac++) {
+               queue_idx = QUEUE_IDX(sta_idx, ac);
+               cl_hw->tx_queues.single[queue_idx].cl_sta = NULL;
+       }
+}
+
+void cl_txq_transfer_agg_to_single(struct cl_hw *cl_hw, struct cl_tx_queue *agg_queue)
+{
+       /*
+        * 1) Remove from aggregation queue
+        * 2) Free sw_txhdr
+        * 3) Push to single queue
+        */
+       struct cl_sw_txhdr *sw_txhdr, *sw_txhdr_tmp;
+       struct sk_buff *skb;
+       struct ieee80211_tx_info *tx_info;
+       struct cl_tx_queue *single_queue;
+       struct cl_sta *cl_sta = agg_queue->cl_sta;
+       u16 single_queue_idx = 0;
+
+       if (agg_queue->num_packets == 0)
+               return;
+
+       single_queue_idx = QUEUE_IDX(cl_sta->sta_idx, agg_queue->hw_index);
+       single_queue = &cl_hw->tx_queues.single[single_queue_idx];
+
+       list_for_each_entry_safe(sw_txhdr, sw_txhdr_tmp, &agg_queue->hdrs, tx_queue_list) {
+               list_del(&sw_txhdr->tx_queue_list);
+               agg_queue->num_packets--;
+
+               skb = sw_txhdr->skb;
+               tx_info = IEEE80211_SKB_CB(skb);
+
+               if (cl_tx_ctrl_is_amsdu(tx_info)) {
+                       cl_tx_amsdu_transfer_single(cl_hw, sw_txhdr);
+               } else {
+                       tx_info->flags &= ~IEEE80211_TX_CTL_AMPDU;
+
+                       if (cl_tx_8023_to_wlan(cl_hw, skb, cl_sta, sw_txhdr->tid) == 0) {
+                               cl_hw->tx_packet_cntr.transfer.agg_to_single++;
+                               cl_tx_single(cl_hw, cl_sta, skb, false, false);
+                       }
+               }
+
+               cl_sw_txhdr_free(cl_hw, sw_txhdr);
+       }
+
+       /* If queue is empty remove it from schedule list */
+       cl_txq_sched_list_remove_if_empty(agg_queue);
+}
+
+void cl_txq_flush(struct cl_hw *cl_hw, struct cl_tx_queue *tx_queue)
+{
+       struct cl_sw_txhdr *sw_txhdr, *sw_txhdr_tmp;
+       struct ieee80211_tx_info *tx_info;
+
+       if (tx_queue->num_packets == 0)
+               return;
+
+       list_for_each_entry_safe(sw_txhdr, sw_txhdr_tmp, &tx_queue->hdrs, tx_queue_list) {
+               list_del(&sw_txhdr->tx_queue_list);
+               tx_queue->num_packets--;
+
+               /* Can not send AMSDU frames as singles */
+               tx_info = IEEE80211_SKB_CB(sw_txhdr->skb);
+
+               /* Free mid & last AMSDU sub frames */
+               if (cl_tx_ctrl_is_amsdu(tx_info)) {
+                       cl_tx_amsdu_flush_sub_frames(cl_hw, sw_txhdr);
+               } else {
+                       if (tx_queue->type == QUEUE_TYPE_SINGLE)
+                               cl_tx_single_free_skb(cl_hw, sw_txhdr->skb);
+                       else
+                               kfree_skb(sw_txhdr->skb);
+
+                       cl_sw_txhdr_free(cl_hw, sw_txhdr);
+                       cl_hw->tx_packet_cntr.drop.queue_flush++;
+               }
+       }
+
+       /* Remove from schedule queue */
+       cl_txq_sched_list_remove(tx_queue);
+
+       /* Sanity check that queue is empty */
+       WARN_ON(tx_queue->num_packets > 0);
+}
+
+void cl_txq_flush_single(struct cl_hw *cl_hw, u16 idx)
+{
+       spin_lock_bh(&cl_hw->tx_lock_single);
+       cl_txq_flush(cl_hw, &cl_hw->tx_queues.single[idx]);
+       spin_unlock_bh(&cl_hw->tx_lock_single);
+}
+
+void cl_txq_flush_all_agg(struct cl_hw *cl_hw)
+{
+       int i = 0;
+
+       for (i = 0; i < IPC_MAX_BA_SESSIONS; i++)
+               cl_txq_flush(cl_hw, &cl_hw->tx_queues.agg[i]);
+}
+
+void cl_txq_flush_all_single(struct cl_hw *cl_hw)
+{
+       int i = 0;
+
+       for (i = 0; i < MAX_SINGLE_QUEUES; i++)
+               cl_txq_flush(cl_hw, &cl_hw->tx_queues.single[i]);
+}
+
+void cl_txq_flush_sta(struct cl_hw *cl_hw, struct cl_sta *cl_sta)
+{
+       int i = 0;
+       u8 sta_idx = cl_sta->sta_idx;
+       u32 queue_idx = 0;
+       struct cl_tx_queue *tx_queue = NULL;
+
+       spin_lock_bh(&cl_hw->tx_lock_agg);
+
+       /* Flush all aggregation queues for this station */
+       for (i = 0; i < IEEE80211_NUM_TIDS; i++)
+               if (cl_sta->agg_tx_queues[i])
+                       cl_txq_flush(cl_hw, cl_sta->agg_tx_queues[i]);
+
+       spin_unlock_bh(&cl_hw->tx_lock_agg);
+
+       spin_lock_bh(&cl_hw->tx_lock_single);
+
+       /* Flush all single queues for this station */
+       for (i = 0; i < AC_MAX; i++) {
+               queue_idx = QUEUE_IDX(sta_idx, i);
+               tx_queue = &cl_hw->tx_queues.single[queue_idx];
+               cl_txq_flush(cl_hw, tx_queue);
+               cl_txq_reset_counters(tx_queue);
+       }
+
+       /* Go over high prioirty queue and delete packets belonging to this station */
+       cl_txq_delete_packets(cl_hw, &cl_hw->tx_queues.single[HIGH_PRIORITY_QUEUE], sta_idx);
+
+       spin_unlock_bh(&cl_hw->tx_lock_single);
+}
+
+void cl_txq_agg_request_add(struct cl_hw *cl_hw, u8 sta_idx, u8 tid)
+{
+       int i = cl_txq_request_find(cl_hw, sta_idx, tid);
+       struct cl_req_agg_db *req_agg_db = NULL;
+
+       if (i != -1) {
+               cl_dbg_trace(cl_hw, "ALREADY_ADDED - entry = %d, sta_idx = %u, tid = %u\n",
+                            i, sta_idx, tid);
+               return;
+       }
+
+       for (i = 0; i < IPC_MAX_BA_SESSIONS; i++) {
+               req_agg_db = &cl_hw->req_agg_db[i];
+
+               if (!req_agg_db->is_used) {
+                       cl_dbg_trace(cl_hw, "ADD - entry = %d, sta_idx = %u, tid = %u\n",
+                                    i, sta_idx, tid);
+                       req_agg_db->is_used = true;
+                       req_agg_db->sta_idx = sta_idx;
+                       req_agg_db->tid = tid;
+                       cl_hw->req_agg_queues++;
+                       return;
+               }
+       }
+}
+
+void cl_txq_agg_request_del(struct cl_hw *cl_hw, u8 sta_idx, u8 tid)
+{
+       int i = cl_txq_request_find(cl_hw, sta_idx, tid);
+
+       if (i != -1) {
+               cl_dbg_trace(cl_hw, "DEL - entry = %d, sta_idx = %u, tid = %u\n",
+                            i, sta_idx, tid);
+               cl_hw->req_agg_db[i].is_used = false;
+               cl_hw->req_agg_queues--;
+       }
+}
+
+bool cl_txq_is_agg_available(struct cl_hw *cl_hw)
+{
+       u8 total_agg_queues = cl_hw->used_agg_queues + cl_hw->req_agg_queues;
+
+       return (total_agg_queues < IPC_MAX_BA_SESSIONS);
+}
+
+bool cl_txq_single_is_full(struct cl_hw *cl_hw, u16 idx)
+{
+       struct cl_tx_queue *tx_queue = &cl_hw->tx_queues.single[idx];
+       bool is_full = 0;
+
+       spin_lock_bh(&cl_hw->tx_lock_single);
+       is_full = (tx_queue->max_packets == tx_queue->num_packets);
+       spin_unlock_bh(&cl_hw->tx_lock_single);
+
+       return is_full;
+}
+
+void cl_txq_single_sched(struct cl_hw *cl_hw, u16 idx)
+{
+       /*
+        * Don't take lock because it is already taken by
+        * all functions that call cl_txq_single_sched().
+        */
+       struct cl_tx_queue *tx_queue = &cl_hw->tx_queues.single[idx];
+
+       if (tx_queue->num_packets)
+               cl_txq_sched(cl_hw, tx_queue);
+}
+
+bool cl_txq_is_fw_empty(struct cl_tx_queue *tx_queue)
+{
+       return (tx_queue->fw_free_space == tx_queue->fw_max_size);
+}
+
+bool cl_txq_is_fw_full(struct cl_tx_queue *tx_queue)
+{
+       return (tx_queue->fw_free_space == 0);
+}
+
+bool cl_txq_frames_pending(struct cl_hw *cl_hw)
+{
+       int i = 0;
+
+       /* Check if we have multicast/bradcast frame in FW queues */
+       if (!cl_txq_is_fw_empty(&cl_hw->tx_queues.bcmc))
+               return true;
+
+       /* Check if we have singles frame in FW queues */
+       for (i = 0; i < MAX_SINGLE_QUEUES; i++)
+               if (!cl_txq_is_fw_empty(&cl_hw->tx_queues.single[i]))
+                       return true;
+
+       /* Check if we have aggregation frame in FW queues */
+       for (i = 0; i < IPC_MAX_BA_SESSIONS; i++)
+               if (!cl_txq_is_fw_empty(&cl_hw->tx_queues.agg[i]))
+                       return true;
+
+       return false;
+}
+
+int cl_txq_cli(struct cl_hw *cl_hw, struct cli_params *cli_params)
+{
+       switch (cli_params->option) {
+       case 'a':
+               return cl_txq_traffic_counters_print(cl_hw);
+       case 'b':
+               return cl_txq_drop_reasons_print(cl_hw);
+       case 'c':
+               return cl_txq_global_counters_print(cl_hw);
+       case 'd':
+               return cl_txq_stop_reasons_print(cl_hw);
+       case 'e':
+               return cl_txq_requested_agg_print(cl_hw);
+       case 'f':
+               return cl_txq_amsdu_stats_print(cl_hw);
+       case 'm':
+               return cl_txq_max_size_print(cl_hw);
+       case 'r':
+               cl_txq_stats_reset(cl_hw);
+               break;
+       case 's':
+               return cl_txq_sched_list_print(cl_hw);
+       case '?':
+               return cl_txq_cli_help(cl_hw);
+       default:
+               cl_dbg_err(cl_hw, "Illegal option (%c) - try '?' for help\n", cli_params->option);
+               break;
+       }
+
+       return 0;
+}
--
2.30.0

________________________________
The information transmitted is intended only for the person or entity to which it is addressed and may contain confidential and/or privileged material. Any retransmission, dissemination, copying or other use of, or taking of any action in reliance upon this information is prohibited. If you received this in error, please contact the sender and delete the material from any computer. Nothing contained herein shall be deemed as a representation, warranty or a commitment by Celeno. No warranties are expressed or implied, including, but not limited to, any implied warranties of non-infringement, merchantability and fitness for a particular purpose.
________________________________


  parent reply	other threads:[~2021-06-17 16:11 UTC|newest]

Thread overview: 262+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-06-17 15:58 [RFC v1 000/256] wireless: cl8k driver for Celeno IEEE 802.11ax devices viktor.barna
2021-06-17 15:58 ` [RFC v1 001/256] celeno: add Kconfig viktor.barna
2021-06-17 15:58 ` [RFC v1 002/256] celeno: add Makefile viktor.barna
2021-06-17 15:58 ` [RFC v1 003/256] cl8k: add Kconfig viktor.barna
2021-06-17 15:58 ` [RFC v1 004/256] cl8k: add Makefile viktor.barna
2021-06-17 15:58 ` [RFC v1 005/256] cl8k: add afe.c viktor.barna
2021-06-17 15:58 ` [RFC v1 006/256] cl8k: add afe.h viktor.barna
2021-06-17 15:58 ` [RFC v1 007/256] cl8k: add agc_params.c viktor.barna
2021-06-17 15:58 ` [RFC v1 008/256] cl8k: add agc_params.h viktor.barna
2021-06-17 15:58 ` [RFC v1 009/256] cl8k: add ampdu.c viktor.barna
2021-06-17 15:58 ` [RFC v1 010/256] cl8k: add ampdu.h viktor.barna
2021-06-17 15:58 ` [RFC v1 011/256] cl8k: add ate.c viktor.barna
2021-06-17 15:58 ` [RFC v1 012/256] cl8k: add ate.h viktor.barna
2021-06-17 15:58 ` [RFC v1 013/256] cl8k: add band.c viktor.barna
2021-06-17 15:58 ` [RFC v1 014/256] cl8k: add band.h viktor.barna
2021-06-17 15:58 ` [RFC v1 015/256] cl8k: add bf.c viktor.barna
2021-06-17 15:58 ` [RFC v1 016/256] cl8k: add bf.h viktor.barna
2021-06-17 15:58 ` [RFC v1 017/256] cl8k: add bus/pci/ipc.c viktor.barna
2021-06-17 15:58 ` [RFC v1 018/256] cl8k: add bus/pci/ipc.h viktor.barna
2021-06-17 15:58 ` [RFC v1 019/256] cl8k: add bus/pci/irq.c viktor.barna
2021-06-17 15:58 ` [RFC v1 020/256] cl8k: add bus/pci/irq.h viktor.barna
2021-06-17 15:58 ` [RFC v1 021/256] cl8k: add bus/pci/msg_pci.c viktor.barna
2021-06-17 15:58 ` [RFC v1 022/256] cl8k: add bus/pci/msg_pci.h viktor.barna
2021-06-17 15:58 ` [RFC v1 023/256] cl8k: add bus/pci/pci.c viktor.barna
2021-06-17 15:58 ` [RFC v1 024/256] cl8k: add bus/pci/rx_pci.c viktor.barna
2021-06-17 15:58 ` [RFC v1 025/256] cl8k: add bus/pci/rx_pci.h viktor.barna
2021-06-17 15:58 ` [RFC v1 026/256] cl8k: add bus/pci/tx_pci.c viktor.barna
2021-06-17 15:58 ` [RFC v1 027/256] cl8k: add bus/pci/tx_pci.h viktor.barna
2021-06-17 15:58 ` [RFC v1 028/256] cl8k: add calib.c viktor.barna
2021-06-17 15:58 ` [RFC v1 029/256] cl8k: add calib.h viktor.barna
2021-06-17 15:58 ` [RFC v1 030/256] cl8k: add cap.c viktor.barna
2021-06-17 15:58 ` [RFC v1 031/256] cl8k: add cap.h viktor.barna
2021-06-17 15:58 ` [RFC v1 032/256] cl8k: add cca.c viktor.barna
2021-06-17 15:58 ` [RFC v1 033/256] cl8k: add cca.h viktor.barna
2021-06-17 15:58 ` [RFC v1 034/256] cl8k: add cecli.c viktor.barna
2021-06-17 15:58 ` [RFC v1 035/256] cl8k: add cecli.h viktor.barna
2021-06-17 15:58 ` [RFC v1 036/256] cl8k: add chandef.c viktor.barna
2021-06-17 15:58 ` [RFC v1 037/256] cl8k: add chandef.h viktor.barna
2021-06-17 15:58 ` [RFC v1 038/256] cl8k: add channel.c viktor.barna
2021-06-17 15:58 ` [RFC v1 039/256] cl8k: add channel.h viktor.barna
2021-06-17 15:58 ` [RFC v1 040/256] cl8k: add chan_info.c viktor.barna
2021-06-17 15:58 ` [RFC v1 041/256] cl8k: add chan_info.h viktor.barna
2021-06-17 15:58 ` [RFC v1 042/256] cl8k: add chip.c viktor.barna
2021-06-17 15:58 ` [RFC v1 043/256] cl8k: add chip.h viktor.barna
2021-06-17 15:58 ` [RFC v1 044/256] cl8k: add chip_config.c viktor.barna
2021-06-17 15:58 ` [RFC v1 045/256] cl8k: add chip_config.h viktor.barna
2021-06-17 15:58 ` [RFC v1 046/256] cl8k: add config.c viktor.barna
2021-06-17 15:58 ` [RFC v1 047/256] cl8k: add config.h viktor.barna
2021-06-17 15:58 ` [RFC v1 048/256] cl8k: add coredump.c viktor.barna
2021-06-17 15:58 ` [RFC v1 049/256] cl8k: add coredump.h viktor.barna
2021-06-17 15:58 ` [RFC v1 050/256] cl8k: add data_rates.c viktor.barna
2021-06-17 15:58 ` [RFC v1 051/256] cl8k: add data_rates.h viktor.barna
2021-06-17 15:58 ` [RFC v1 052/256] cl8k: add dbgfile.c viktor.barna
2021-06-17 15:59 ` [RFC v1 053/256] cl8k: add dbgfile.h viktor.barna
2021-06-17 15:59 ` [RFC v1 054/256] cl8k: add debug.h viktor.barna
2021-06-17 15:59 ` [RFC v1 055/256] cl8k: add debugfs.c viktor.barna
2021-06-17 15:59 ` [RFC v1 056/256] cl8k: add debugfs.h viktor.barna
2021-06-17 15:59 ` [RFC v1 057/256] cl8k: add debugfs_defs.h viktor.barna
2021-06-17 15:59 ` [RFC v1 058/256] cl8k: add def.h viktor.barna
2021-06-17 15:59 ` [RFC v1 059/256] cl8k: add dfs/dfs.c viktor.barna
2021-06-17 15:59 ` [RFC v1 060/256] cl8k: add dfs/dfs.h viktor.barna
2021-06-17 15:59 ` [RFC v1 061/256] cl8k: add dfs/dfs_db.h viktor.barna
2021-06-17 15:59 ` [RFC v1 062/256] cl8k: add dfs/radar.c viktor.barna
2021-06-17 15:59 ` [RFC v1 063/256] cl8k: add dfs/radar.h viktor.barna
2021-06-17 15:59 ` [RFC v1 064/256] cl8k: add drv_ops.h viktor.barna
2021-06-17 15:59 ` [RFC v1 065/256] cl8k: add dsp.c viktor.barna
2021-06-17 15:59 ` [RFC v1 066/256] cl8k: add dsp.h viktor.barna
2021-06-17 15:59 ` [RFC v1 067/256] cl8k: add e2p.c viktor.barna
2021-06-17 15:59 ` [RFC v1 068/256] cl8k: add e2p.h viktor.barna
2021-06-17 15:59 ` [RFC v1 069/256] cl8k: add edca.c viktor.barna
2021-06-17 15:59 ` [RFC v1 070/256] cl8k: add edca.h viktor.barna
2021-06-17 15:59 ` [RFC v1 071/256] cl8k: add ela.c viktor.barna
2021-06-17 15:59 ` [RFC v1 072/256] cl8k: add ela.h viktor.barna
2021-06-17 15:59 ` [RFC v1 073/256] cl8k: add enhanced_tim.c viktor.barna
2021-06-17 15:59 ` [RFC v1 074/256] cl8k: add enhanced_tim.h viktor.barna
2021-06-17 15:59 ` [RFC v1 075/256] cl8k: add env_det.c viktor.barna
2021-06-17 15:59 ` [RFC v1 076/256] cl8k: add env_det.h viktor.barna
2021-06-17 15:59 ` [RFC v1 077/256] cl8k: add ext/dyn_bcast_rate.c viktor.barna
2021-06-17 15:59 ` [RFC v1 078/256] cl8k: add ext/dyn_bcast_rate.h viktor.barna
2021-06-17 15:59 ` [RFC v1 079/256] cl8k: add ext/dyn_mcast_rate.c viktor.barna
2021-06-17 15:59 ` [RFC v1 080/256] cl8k: add ext/dyn_mcast_rate.h viktor.barna
2021-06-17 15:59 ` [RFC v1 081/256] cl8k: add ext/vlan_dscp.c viktor.barna
2021-06-17 15:59 ` [RFC v1 082/256] cl8k: add ext/vlan_dscp.h viktor.barna
2021-06-17 15:59 ` [RFC v1 083/256] cl8k: add fem.c viktor.barna
2021-06-17 15:59 ` [RFC v1 084/256] cl8k: add fem.h viktor.barna
2021-06-17 15:59 ` [RFC v1 085/256] cl8k: add fem_common.h viktor.barna
2021-06-17 15:59 ` [RFC v1 086/256] cl8k: add fw/fw_dbg.c viktor.barna
2021-06-17 15:59 ` [RFC v1 087/256] cl8k: add fw/fw_dbg.h viktor.barna
2021-06-17 15:59 ` [RFC v1 088/256] cl8k: add fw/fw_file.c viktor.barna
2021-06-17 15:59 ` [RFC v1 089/256] cl8k: add fw/fw_file.h viktor.barna
2021-06-17 15:59 ` [RFC v1 090/256] cl8k: add fw/fw_msg.c viktor.barna
2021-06-17 15:59 ` [RFC v1 091/256] cl8k: add fw/fw_msg.h viktor.barna
2021-06-17 15:59 ` [RFC v1 092/256] cl8k: add fw/msg_cfm.c viktor.barna
2021-06-17 15:59 ` [RFC v1 093/256] cl8k: add fw/msg_cfm.h viktor.barna
2021-06-17 15:59 ` [RFC v1 094/256] cl8k: add fw/msg_rx.c viktor.barna
2021-06-17 15:59 ` [RFC v1 095/256] cl8k: add fw/msg_rx.h viktor.barna
2021-06-17 15:59 ` [RFC v1 096/256] cl8k: add fw/msg_tx.c viktor.barna
2021-06-17 15:59 ` [RFC v1 097/256] cl8k: add fw/msg_tx.h viktor.barna
2021-06-17 15:59 ` [RFC v1 098/256] cl8k: add hw.c viktor.barna
2021-06-17 15:59 ` [RFC v1 099/256] cl8k: add hw.h viktor.barna
2021-06-17 15:59 ` [RFC v1 100/256] cl8k: add hw_assert.c viktor.barna
2021-06-17 15:59 ` [RFC v1 101/256] cl8k: add hw_assert.h viktor.barna
2021-06-17 15:59 ` [RFC v1 102/256] cl8k: add ipc_shared.h viktor.barna
2021-06-17 15:59 ` [RFC v1 103/256] cl8k: add key.c viktor.barna
2021-06-17 15:59 ` [RFC v1 104/256] cl8k: add key.h viktor.barna
2021-06-17 15:59 ` [RFC v1 105/256] cl8k: add mac80211.c viktor.barna
2021-06-17 15:59 ` [RFC v1 106/256] cl8k: add mac80211.h viktor.barna
2021-06-17 15:59 ` [RFC v1 107/256] cl8k: add mac_addr.c viktor.barna
2021-06-17 15:59 ` [RFC v1 108/256] cl8k: add mac_addr.h viktor.barna
2021-06-17 15:59 ` [RFC v1 109/256] cl8k: add main.c viktor.barna
2021-06-17 15:59 ` [RFC v1 110/256] cl8k: add main.h viktor.barna
2021-06-17 15:59 ` [RFC v1 111/256] cl8k: add maintenance.c viktor.barna
2021-06-17 15:59 ` [RFC v1 112/256] cl8k: add maintenance.h viktor.barna
2021-06-17 16:00 ` [RFC v1 113/256] cl8k: add mib.c viktor.barna
2021-06-17 16:00 ` [RFC v1 114/256] cl8k: add mib.h viktor.barna
2021-06-17 16:00 ` [RFC v1 115/256] cl8k: add motion_sense.c viktor.barna
2021-06-17 16:00 ` [RFC v1 116/256] cl8k: add motion_sense.h viktor.barna
2021-06-17 16:00 ` [RFC v1 117/256] cl8k: add netlink.c viktor.barna
2021-06-17 16:00 ` [RFC v1 118/256] cl8k: add netlink.h viktor.barna
2021-06-17 16:00 ` [RFC v1 119/256] cl8k: add noise.c viktor.barna
2021-06-17 16:00 ` [RFC v1 120/256] cl8k: add noise.h viktor.barna
2021-06-17 16:00 ` [RFC v1 121/256] cl8k: add omi.c viktor.barna
2021-06-17 16:00 ` [RFC v1 122/256] cl8k: add omi.h viktor.barna
2021-06-17 16:00 ` [RFC v1 123/256] cl8k: add ops.c viktor.barna
2021-06-17 16:00 ` [RFC v1 124/256] cl8k: add ops.h viktor.barna
2021-06-17 16:00 ` [RFC v1 125/256] cl8k: add phy/phy.c viktor.barna
2021-06-17 16:00 ` [RFC v1 126/256] cl8k: add phy/phy.h viktor.barna
2021-06-17 16:00 ` [RFC v1 127/256] cl8k: add phy/phy_athos_lut.c viktor.barna
2021-06-17 16:00 ` [RFC v1 128/256] cl8k: add phy/phy_athos_lut.h viktor.barna
2021-06-17 16:00 ` [RFC v1 129/256] cl8k: add phy/phy_common_lut.c viktor.barna
2021-06-17 16:00 ` [RFC v1 130/256] cl8k: add phy/phy_common_lut.h viktor.barna
2021-06-17 16:00 ` [RFC v1 131/256] cl8k: add phy/phy_olympus_lut.c viktor.barna
2021-06-17 16:00 ` [RFC v1 132/256] cl8k: add phy/phy_olympus_lut.h viktor.barna
2021-06-17 16:00 ` [RFC v1 133/256] cl8k: add power.c viktor.barna
2021-06-17 16:00 ` [RFC v1 134/256] cl8k: add power.h viktor.barna
2021-06-17 16:00 ` [RFC v1 135/256] cl8k: add power_cli.c viktor.barna
2021-06-17 16:00 ` [RFC v1 136/256] cl8k: add power_cli.h viktor.barna
2021-06-17 16:00 ` [RFC v1 137/256] cl8k: add power_table.c viktor.barna
2021-06-17 16:00 ` [RFC v1 138/256] cl8k: add power_table.h viktor.barna
2021-06-17 16:00 ` [RFC v1 139/256] cl8k: add prot_mode.c viktor.barna
2021-06-17 16:00 ` [RFC v1 140/256] cl8k: add prot_mode.h viktor.barna
2021-06-17 16:00 ` [RFC v1 141/256] cl8k: add radio.c viktor.barna
2021-06-17 16:00 ` [RFC v1 142/256] cl8k: add radio.h viktor.barna
2021-06-17 16:00 ` [RFC v1 143/256] cl8k: add rate_ctrl.c viktor.barna
2021-06-17 16:00 ` [RFC v1 144/256] cl8k: add rate_ctrl.h viktor.barna
2021-06-17 16:00 ` [RFC v1 145/256] cl8k: add recovery.c viktor.barna
2021-06-17 16:00 ` [RFC v1 146/256] cl8k: add recovery.h viktor.barna
2021-06-17 16:00 ` [RFC v1 147/256] cl8k: add reg/ceva.h viktor.barna
2021-06-17 16:00 ` [RFC v1 148/256] cl8k: add reg/reg_access.h viktor.barna
2021-06-17 16:00 ` [RFC v1 149/256] cl8k: add reg/reg_cli.c viktor.barna
2021-06-17 16:00 ` [RFC v1 150/256] cl8k: add reg/reg_cli.h viktor.barna
2021-06-17 16:00 ` [RFC v1 151/256] cl8k: add reg/reg_cmu.h viktor.barna
2021-06-17 16:00 ` [RFC v1 152/256] cl8k: add reg/reg_fem.h viktor.barna
2021-06-17 16:00 ` [RFC v1 153/256] cl8k: add reg/reg_io_ctrl.h viktor.barna
2021-06-17 16:00 ` [RFC v1 154/256] cl8k: add reg/reg_ipc.h viktor.barna
2021-06-17 16:00 ` [RFC v1 155/256] cl8k: add reg/reg_lcu_common.h viktor.barna
2021-06-17 16:00 ` [RFC v1 156/256] cl8k: add reg/reg_lcu_phy.h viktor.barna
2021-06-17 16:00 ` [RFC v1 157/256] cl8k: add reg/reg_macdsp_api.h viktor.barna
2021-06-17 16:00 ` [RFC v1 158/256] cl8k: add reg/reg_macsys_gcu.h viktor.barna
2021-06-17 16:00 ` [RFC v1 159/256] cl8k: add reg/reg_mac_hw.h viktor.barna
2021-06-17 16:00 ` [RFC v1 160/256] cl8k: add reg/reg_mac_hw_mu.h viktor.barna
2021-06-17 16:00 ` [RFC v1 161/256] cl8k: add reg/reg_modem_gcu.h viktor.barna
2021-06-17 16:00 ` [RFC v1 162/256] cl8k: add reg/reg_otp_pvt.h viktor.barna
2021-06-17 16:00 ` [RFC v1 163/256] cl8k: add reg/reg_ricu.h viktor.barna
2021-06-17 16:00 ` [RFC v1 164/256] cl8k: add reg/reg_riu.h viktor.barna
2021-06-17 16:00 ` [RFC v1 165/256] cl8k: add reg/reg_riu_rc.h viktor.barna
2021-06-17 16:00 ` [RFC v1 166/256] cl8k: add rf_boot.c viktor.barna
2021-06-17 16:00 ` [RFC v1 167/256] cl8k: add rf_boot.h viktor.barna
2021-06-17 16:00 ` [RFC v1 168/256] cl8k: add rsrc_mgmt.c viktor.barna
2021-06-17 16:00 ` [RFC v1 169/256] cl8k: add rsrc_mgmt.h viktor.barna
2021-06-17 16:00 ` [RFC v1 170/256] cl8k: add rssi.c viktor.barna
2021-06-17 16:00 ` [RFC v1 171/256] cl8k: add rssi.h viktor.barna
2021-06-17 16:00 ` [RFC v1 172/256] cl8k: add rx/rx.c viktor.barna
2021-06-17 16:01 ` [RFC v1 173/256] cl8k: add rx/rx.h viktor.barna
2021-06-17 16:01 ` [RFC v1 174/256] cl8k: add rx/rx_amsdu.c viktor.barna
2021-06-17 16:01 ` [RFC v1 175/256] cl8k: add rx/rx_amsdu.h viktor.barna
2021-06-17 16:01 ` [RFC v1 176/256] cl8k: add rx/rx_filter.c viktor.barna
2021-06-17 16:01 ` [RFC v1 177/256] cl8k: add rx/rx_filter.h viktor.barna
2021-06-17 16:01 ` [RFC v1 178/256] cl8k: add rx/rx_reorder.c viktor.barna
2021-06-17 16:01 ` [RFC v1 179/256] cl8k: add rx/rx_reorder.h viktor.barna
2021-06-17 16:01 ` [RFC v1 180/256] cl8k: add sounding.c viktor.barna
2021-06-17 16:01 ` [RFC v1 181/256] cl8k: add sounding.h viktor.barna
2021-06-17 16:01 ` [RFC v1 182/256] cl8k: add sta.c viktor.barna
2021-06-17 16:01 ` [RFC v1 183/256] cl8k: add sta.h viktor.barna
2021-06-17 16:01 ` [RFC v1 184/256] cl8k: add stats.c viktor.barna
2021-06-17 16:01 ` [RFC v1 185/256] cl8k: add stats.h viktor.barna
2021-06-17 16:01 ` [RFC v1 186/256] cl8k: add tcv_config.c viktor.barna
2021-06-17 16:01 ` [RFC v1 187/256] cl8k: add tcv_config.h viktor.barna
2021-06-17 16:01 ` [RFC v1 188/256] cl8k: add temperature.c viktor.barna
2021-06-17 16:01 ` [RFC v1 189/256] cl8k: add temperature.h viktor.barna
2021-06-17 16:01 ` [RFC v1 190/256] cl8k: add trace.c viktor.barna
2021-06-17 16:01 ` [RFC v1 191/256] cl8k: add trace.h viktor.barna
2021-06-17 16:01 ` [RFC v1 192/256] cl8k: add traffic.c viktor.barna
2021-06-17 16:01 ` [RFC v1 193/256] cl8k: add traffic.h viktor.barna
2021-06-17 16:01 ` [RFC v1 194/256] cl8k: add twt.c viktor.barna
2021-06-17 16:01 ` [RFC v1 195/256] cl8k: add twt.h viktor.barna
2021-06-17 16:01 ` [RFC v1 196/256] cl8k: add twt_cli.c viktor.barna
2021-06-17 16:01 ` [RFC v1 197/256] cl8k: add twt_cli.h viktor.barna
2021-06-17 16:01 ` [RFC v1 198/256] cl8k: add twt_frame.c viktor.barna
2021-06-17 16:01 ` [RFC v1 199/256] cl8k: add twt_frame.h viktor.barna
2021-06-17 16:01 ` [RFC v1 200/256] cl8k: add tx/agg_cfm.c viktor.barna
2021-06-17 16:01 ` [RFC v1 201/256] cl8k: add tx/agg_cfm.h viktor.barna
2021-06-17 16:01 ` [RFC v1 202/256] cl8k: add tx/agg_tx_report.c viktor.barna
2021-06-17 16:01 ` [RFC v1 203/256] cl8k: add tx/agg_tx_report.h viktor.barna
2021-06-17 16:01 ` [RFC v1 204/256] cl8k: add tx/baw.c viktor.barna
2021-06-17 16:01 ` [RFC v1 205/256] cl8k: add tx/baw.h viktor.barna
2021-06-17 16:01 ` [RFC v1 206/256] cl8k: add tx/bcmc_cfm.c viktor.barna
2021-06-17 16:01 ` [RFC v1 207/256] cl8k: add tx/bcmc_cfm.h viktor.barna
2021-06-17 16:01 ` [RFC v1 208/256] cl8k: add tx/single_cfm.c viktor.barna
2021-06-17 16:01 ` [RFC v1 209/256] cl8k: add tx/single_cfm.h viktor.barna
2021-06-17 16:01 ` [RFC v1 210/256] cl8k: add tx/sw_txhdr.c viktor.barna
2021-06-17 16:01 ` [RFC v1 211/256] cl8k: add tx/sw_txhdr.h viktor.barna
2021-06-17 16:01 ` [RFC v1 212/256] cl8k: add tx/tx.c viktor.barna
2021-06-17 16:01 ` [RFC v1 213/256] cl8k: add tx/tx.h viktor.barna
2021-06-17 16:01 ` [RFC v1 214/256] cl8k: add tx/tx_amsdu.c viktor.barna
2021-06-17 16:01 ` [RFC v1 215/256] cl8k: add tx/tx_amsdu.h viktor.barna
2021-06-17 16:01 ` [RFC v1 216/256] cl8k: add tx/tx_inject.c viktor.barna
2021-06-17 16:01 ` [RFC v1 217/256] cl8k: add tx/tx_inject.h viktor.barna
2021-06-17 16:01 ` viktor.barna [this message]
2021-06-17 16:01 ` [RFC v1 219/256] cl8k: add tx/tx_queue.h viktor.barna
2021-06-17 16:01 ` [RFC v1 220/256] cl8k: add utils/file.c viktor.barna
2021-06-17 16:01 ` [RFC v1 221/256] cl8k: add utils/file.h viktor.barna
2021-06-17 16:01 ` [RFC v1 222/256] cl8k: add utils/ip.c viktor.barna
2021-06-17 16:01 ` [RFC v1 223/256] cl8k: add utils/ip.h viktor.barna
2021-06-17 16:01 ` [RFC v1 224/256] cl8k: add utils/math.h viktor.barna
2021-06-17 16:01 ` [RFC v1 225/256] cl8k: add utils/string.c viktor.barna
2021-06-17 16:01 ` [RFC v1 226/256] cl8k: add utils/string.h viktor.barna
2021-06-17 16:01 ` [RFC v1 227/256] cl8k: add utils/timer.c viktor.barna
2021-06-17 16:01 ` [RFC v1 228/256] cl8k: add utils/timer.h viktor.barna
2021-06-17 16:01 ` [RFC v1 229/256] cl8k: add utils/utils.c viktor.barna
2021-06-17 16:01 ` [RFC v1 230/256] cl8k: add utils/utils.h viktor.barna
2021-06-17 16:01 ` [RFC v1 231/256] cl8k: add vendor_cmd.c viktor.barna
2021-06-17 16:01 ` [RFC v1 232/256] cl8k: add vendor_cmd.h viktor.barna
2021-06-17 16:02 ` [RFC v1 233/256] cl8k: add version.c viktor.barna
2021-06-17 16:02 ` [RFC v1 234/256] cl8k: add version.h viktor.barna
2021-06-17 16:02 ` [RFC v1 235/256] cl8k: add vif.c viktor.barna
2021-06-17 16:02 ` [RFC v1 236/256] cl8k: add vif.h viktor.barna
2021-06-17 16:02 ` [RFC v1 237/256] cl8k: add vns.c viktor.barna
2021-06-17 16:02 ` [RFC v1 238/256] cl8k: add vns.h viktor.barna
2021-06-17 16:02 ` [RFC v1 239/256] cl8k: add wrs/wrs.c viktor.barna
2021-06-17 16:02 ` [RFC v1 240/256] cl8k: add wrs/wrs.h viktor.barna
2021-06-17 16:02 ` [RFC v1 241/256] cl8k: add wrs/wrs_ap.c viktor.barna
2021-06-17 16:02 ` [RFC v1 242/256] cl8k: add wrs/wrs_ap.h viktor.barna
2021-06-17 16:02 ` [RFC v1 243/256] cl8k: add wrs/wrs_api.c viktor.barna
2021-06-17 16:02 ` [RFC v1 244/256] cl8k: add wrs/wrs_api.h viktor.barna
2021-06-17 16:02 ` [RFC v1 245/256] cl8k: add wrs/wrs_cli.c viktor.barna
2021-06-17 16:02 ` [RFC v1 246/256] cl8k: add wrs/wrs_cli.h viktor.barna
2021-06-17 16:02 ` [RFC v1 247/256] cl8k: add wrs/wrs_db.h viktor.barna
2021-06-17 16:02 ` [RFC v1 248/256] cl8k: add wrs/wrs_rssi.c viktor.barna
2021-06-17 16:02 ` [RFC v1 249/256] cl8k: add wrs/wrs_rssi.h viktor.barna
2021-06-17 16:02 ` [RFC v1 250/256] cl8k: add wrs/wrs_sta.c viktor.barna
2021-06-17 16:02 ` [RFC v1 251/256] cl8k: add wrs/wrs_sta.h viktor.barna
2021-06-17 16:02 ` [RFC v1 252/256] cl8k: add wrs/wrs_stats.c viktor.barna
2021-06-17 16:02 ` [RFC v1 253/256] cl8k: add wrs/wrs_stats.h viktor.barna
2021-06-17 16:02 ` [RFC v1 254/256] cl8k: add wrs/wrs_tables.c viktor.barna
2021-06-17 16:02 ` [RFC v1 255/256] cl8k: add wrs/wrs_tables.h viktor.barna
2021-06-17 16:02 ` [RFC v1 256/256] wireless: add Celeno vendor viktor.barna
2021-06-17 17:23 ` [RFC v1 000/256] wireless: cl8k driver for Celeno IEEE 802.11ax devices Johannes Berg
2022-05-22 17:51   ` viktor.barna
2021-06-19  6:39 ` Kalle Valo
2022-05-13 21:11   ` viktor.barna
2022-05-14  4:25     ` Kalle Valo

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210617160223.160998-219-viktor.barna@celeno.com \
    --to=viktor.barna@celeno.com \
    --cc=aviad.brikman@celeno.com \
    --cc=davem@davemloft.net \
    --cc=eliav.farber@gmail.com \
    --cc=kuba@kernel.org \
    --cc=kvalo@codeaurora.org \
    --cc=linux-wireless@vger.kernel.org \
    --cc=oleksandr.savchenko@celeno.com \
    --cc=shay.bar@celeno.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.