linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/2] Bluetooth: hci_qca: struct qca_data refactoring
@ 2020-03-16 23:02 Matthias Kaehlcke
  2020-03-16 23:02 ` [PATCH 1/2] WIP: Bluetooth: hci_qca: consolidate IBS fields in a struct Matthias Kaehlcke
  2020-03-16 23:02 ` [PATCH 2/2] Bluetooth: hci_qca: consolidate memdump " Matthias Kaehlcke
  0 siblings, 2 replies; 4+ messages in thread
From: Matthias Kaehlcke @ 2020-03-16 23:02 UTC (permalink / raw)
  To: Marcel Holtmann, Johan Hedberg
  Cc: Harish Bandi, Balakrishna Godavarthi, Abhishek Pandit-Subedi,
	Venkata Lakshmi Narayana Gubba, Rocky Liao, linux-bluetooth,
	linux-kernel, Matthias Kaehlcke

This series declutters 'struct qca_data' by grouping IBS and
memdump related fields in dedicated structs.


Matthias Kaehlcke (2):
  WIP: Bluetooth: hci_qca: consolidate IBS fields in a struct
  Bluetooth: hci_qca: consolidate memdump fields in a struct

 drivers/bluetooth/hci_qca.c | 467 +++++++++++++++++++-----------------
 1 file changed, 246 insertions(+), 221 deletions(-)

-- 
2.25.1.481.gfbce0eb801-goog


^ permalink raw reply	[flat|nested] 4+ messages in thread

* [PATCH 1/2] WIP: Bluetooth: hci_qca: consolidate IBS fields in a struct
  2020-03-16 23:02 [PATCH 0/2] Bluetooth: hci_qca: struct qca_data refactoring Matthias Kaehlcke
@ 2020-03-16 23:02 ` Matthias Kaehlcke
  2020-03-16 23:05   ` Matthias Kaehlcke
  2020-03-16 23:02 ` [PATCH 2/2] Bluetooth: hci_qca: consolidate memdump " Matthias Kaehlcke
  1 sibling, 1 reply; 4+ messages in thread
From: Matthias Kaehlcke @ 2020-03-16 23:02 UTC (permalink / raw)
  To: Marcel Holtmann, Johan Hedberg
  Cc: Harish Bandi, Balakrishna Godavarthi, Abhishek Pandit-Subedi,
	Venkata Lakshmi Narayana Gubba, Rocky Liao, linux-bluetooth,
	linux-kernel, Matthias Kaehlcke

About two thirds of the fields in 'struct qca_data' are related with
in band sleep (IBS). Move these fields to a dedicated IBS struct which
is part of 'struct qca_data'. Also congregate the IBS stats fields for
debugfs in a separate sub-struct.

Signed-off-by: Matthias Kaehlcke <mka@chromium.org>
---

 drivers/bluetooth/hci_qca.c | 362 +++++++++++++++++++-----------------
 1 file changed, 191 insertions(+), 171 deletions(-)

diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 3abf4e68de7a8c..c578c7c92680a6 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -125,51 +125,60 @@ struct qca_dump_size {
 	u32 dump_size;
 } __packed;
 
-struct qca_data {
-	struct hci_uart *hu;
-	struct sk_buff *rx_skb;
-	struct sk_buff_head txq;
+struct qca_ibs_stats {
+	/* For debugging purpose */
+	u64 sent_wacks;
+	u64 sent_slps;
+	u64 sent_wakes;
+	u64 recv_wacks;
+	u64 recv_slps;
+	u64 recv_wakes;
+	u64 vote_last_jif;
+	u32 vote_on_ms;
+	u32 vote_off_ms;
+	u64 tx_votes_on;
+	u64 rx_votes_on;
+	u64 tx_votes_off;
+	u64 rx_votes_off;
+	u64 votes_on;
+	u64 votes_off;
+};
+
+struct qca_ibs {
 	struct sk_buff_head tx_wait_q;	/* HCI_IBS wait queue	*/
-	struct sk_buff_head rx_memdump_q;	/* Memdump wait queue	*/
-	spinlock_t hci_ibs_lock;	/* HCI_IBS state lock	*/
-	u8 tx_ibs_state;	/* HCI_IBS transmit side power state*/
-	u8 rx_ibs_state;	/* HCI_IBS receive side power state */
+	spinlock_t lock;	/* HCI_IBS state lock	*/
+	u8 tx_state;		/* HCI_IBS transmit side power state*/
+	u8 rx_state;		/* HCI_IBS receive side power state */
 	bool tx_vote;		/* Clock must be on for TX */
 	bool rx_vote;		/* Clock must be on for RX */
 	struct timer_list tx_idle_timer;
 	u32 tx_idle_delay;
 	struct timer_list wake_retrans_timer;
 	u32 wake_retrans;
-	struct workqueue_struct *workqueue;
+
 	struct work_struct ws_awake_rx;
 	struct work_struct ws_awake_device;
 	struct work_struct ws_rx_vote_off;
 	struct work_struct ws_tx_vote_off;
+
+	struct qca_ibs_stats stats;
+};
+
+struct qca_data {
+	struct hci_uart *hu;
+	struct sk_buff *rx_skb;
+	struct sk_buff_head txq;
+	struct sk_buff_head rx_memdump_q;	/* Memdump wait queue	*/
+	struct workqueue_struct *workqueue;
 	struct work_struct ctrl_memdump_evt;
 	struct delayed_work ctrl_memdump_timeout;
 	struct qca_memdump_data *qca_memdump;
+	struct qca_ibs ibs;
 	unsigned long flags;
 	struct completion drop_ev_comp;
 	wait_queue_head_t suspend_wait_q;
 	enum qca_memdump_states memdump_state;
 	struct mutex hci_memdump_lock;
-
-	/* For debugging purpose */
-	u64 ibs_sent_wacks;
-	u64 ibs_sent_slps;
-	u64 ibs_sent_wakes;
-	u64 ibs_recv_wacks;
-	u64 ibs_recv_slps;
-	u64 ibs_recv_wakes;
-	u64 vote_last_jif;
-	u32 vote_on_ms;
-	u32 vote_off_ms;
-	u64 tx_votes_on;
-	u64 rx_votes_on;
-	u64 tx_votes_off;
-	u64 rx_votes_off;
-	u64 votes_on;
-	u64 votes_off;
 };
 
 enum qca_speed_type {
@@ -267,41 +276,41 @@ static void serial_clock_vote(unsigned long vote, struct hci_uart *hu)
 	struct qca_data *qca = hu->priv;
 	unsigned int diff;
 
-	bool old_vote = (qca->tx_vote | qca->rx_vote);
+	bool old_vote = (qca->ibs.tx_vote | qca->ibs.rx_vote);
 	bool new_vote;
 
 	switch (vote) {
 	case HCI_IBS_VOTE_STATS_UPDATE:
-		diff = jiffies_to_msecs(jiffies - qca->vote_last_jif);
+		diff = jiffies_to_msecs(jiffies - qca->ibs.stats.vote_last_jif);
 
 		if (old_vote)
-			qca->vote_off_ms += diff;
+			qca->ibs.stats.vote_off_ms += diff;
 		else
-			qca->vote_on_ms += diff;
+			qca->ibs.stats.vote_on_ms += diff;
 		return;
 
 	case HCI_IBS_TX_VOTE_CLOCK_ON:
-		qca->tx_vote = true;
-		qca->tx_votes_on++;
+		qca->ibs.tx_vote = true;
+		qca->ibs.stats.tx_votes_on++;
 		new_vote = true;
 		break;
 
 	case HCI_IBS_RX_VOTE_CLOCK_ON:
-		qca->rx_vote = true;
-		qca->rx_votes_on++;
+		qca->ibs.rx_vote = true;
+		qca->ibs.stats.rx_votes_on++;
 		new_vote = true;
 		break;
 
 	case HCI_IBS_TX_VOTE_CLOCK_OFF:
-		qca->tx_vote = false;
-		qca->tx_votes_off++;
-		new_vote = qca->rx_vote | qca->tx_vote;
+		qca->ibs.tx_vote = false;
+		qca->ibs.stats.tx_votes_off++;
+		new_vote = qca->ibs.rx_vote | qca->ibs.tx_vote;
 		break;
 
 	case HCI_IBS_RX_VOTE_CLOCK_OFF:
-		qca->rx_vote = false;
-		qca->rx_votes_off++;
-		new_vote = qca->rx_vote | qca->tx_vote;
+		qca->ibs.rx_vote = false;
+		qca->ibs.stats.rx_votes_off++;
+		new_vote = qca->ibs.rx_vote | qca->ibs.tx_vote;
 		break;
 
 	default:
@@ -318,16 +327,16 @@ static void serial_clock_vote(unsigned long vote, struct hci_uart *hu)
 		BT_DBG("Vote serial clock %s(%s)", new_vote ? "true" : "false",
 		       vote ? "true" : "false");
 
-		diff = jiffies_to_msecs(jiffies - qca->vote_last_jif);
+		diff = jiffies_to_msecs(jiffies - qca->ibs.stats.vote_last_jif);
 
 		if (new_vote) {
-			qca->votes_on++;
-			qca->vote_off_ms += diff;
+			qca->ibs.stats.votes_on++;
+			qca->ibs.stats.vote_off_ms += diff;
 		} else {
-			qca->votes_off++;
-			qca->vote_on_ms += diff;
+			qca->ibs.stats.votes_off++;
+			qca->ibs.stats.vote_on_ms += diff;
 		}
-		qca->vote_last_jif = jiffies;
+		qca->ibs.stats.vote_last_jif = jiffies;
 	}
 }
 
@@ -359,7 +368,7 @@ static int send_hci_ibs_cmd(u8 cmd, struct hci_uart *hu)
 static void qca_wq_awake_device(struct work_struct *work)
 {
 	struct qca_data *qca = container_of(work, struct qca_data,
-					    ws_awake_device);
+					    ibs.ws_awake_device);
 	struct hci_uart *hu = qca->hu;
 	unsigned long retrans_delay;
 	unsigned long flags;
@@ -369,19 +378,19 @@ static void qca_wq_awake_device(struct work_struct *work)
 	/* Vote for serial clock */
 	serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_ON, hu);
 
-	spin_lock_irqsave(&qca->hci_ibs_lock, flags);
+	spin_lock_irqsave(&qca->ibs.lock, flags);
 
 	/* Send wake indication to device */
 	if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0)
 		BT_ERR("Failed to send WAKE to device");
 
-	qca->ibs_sent_wakes++;
+	qca->ibs.stats.sent_wakes++;
 
 	/* Start retransmit timer */
-	retrans_delay = msecs_to_jiffies(qca->wake_retrans);
-	mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay);
+	retrans_delay = msecs_to_jiffies(qca->ibs.wake_retrans);
+	mod_timer(&qca->ibs.wake_retrans_timer, jiffies + retrans_delay);
 
-	spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
+	spin_unlock_irqrestore(&qca->ibs.lock, flags);
 
 	/* Actually send the packets */
 	hci_uart_tx_wakeup(hu);
@@ -390,7 +399,7 @@ static void qca_wq_awake_device(struct work_struct *work)
 static void qca_wq_awake_rx(struct work_struct *work)
 {
 	struct qca_data *qca = container_of(work, struct qca_data,
-					    ws_awake_rx);
+					    ibs.ws_awake_rx);
 	struct hci_uart *hu = qca->hu;
 	unsigned long flags;
 
@@ -398,8 +407,8 @@ static void qca_wq_awake_rx(struct work_struct *work)
 
 	serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_ON, hu);
 
-	spin_lock_irqsave(&qca->hci_ibs_lock, flags);
-	qca->rx_ibs_state = HCI_IBS_RX_AWAKE;
+	spin_lock_irqsave(&qca->ibs.lock, flags);
+	qca->ibs.rx_state = HCI_IBS_RX_AWAKE;
 
 	/* Always acknowledge device wake up,
 	 * sending IBS message doesn't count as TX ON.
@@ -407,9 +416,9 @@ static void qca_wq_awake_rx(struct work_struct *work)
 	if (send_hci_ibs_cmd(HCI_IBS_WAKE_ACK, hu) < 0)
 		BT_ERR("Failed to acknowledge device wake up");
 
-	qca->ibs_sent_wacks++;
+	qca->ibs.stats.sent_wacks++;
 
-	spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
+	spin_unlock_irqrestore(&qca->ibs.lock, flags);
 
 	/* Actually send the packets */
 	hci_uart_tx_wakeup(hu);
@@ -418,7 +427,7 @@ static void qca_wq_awake_rx(struct work_struct *work)
 static void qca_wq_serial_rx_clock_vote_off(struct work_struct *work)
 {
 	struct qca_data *qca = container_of(work, struct qca_data,
-					    ws_rx_vote_off);
+					    ibs.ws_rx_vote_off);
 	struct hci_uart *hu = qca->hu;
 
 	BT_DBG("hu %p rx clock vote off", hu);
@@ -429,7 +438,7 @@ static void qca_wq_serial_rx_clock_vote_off(struct work_struct *work)
 static void qca_wq_serial_tx_clock_vote_off(struct work_struct *work)
 {
 	struct qca_data *qca = container_of(work, struct qca_data,
-					    ws_tx_vote_off);
+					    ibs.ws_tx_vote_off);
 	struct hci_uart *hu = qca->hu;
 
 	BT_DBG("hu %p tx clock vote off", hu);
@@ -445,25 +454,25 @@ static void qca_wq_serial_tx_clock_vote_off(struct work_struct *work)
 
 static void hci_ibs_tx_idle_timeout(struct timer_list *t)
 {
-	struct qca_data *qca = from_timer(qca, t, tx_idle_timer);
+	struct qca_data *qca = from_timer(qca, t, ibs.tx_idle_timer);
 	struct hci_uart *hu = qca->hu;
 	unsigned long flags;
 
-	BT_DBG("hu %p idle timeout in %d state", hu, qca->tx_ibs_state);
+	BT_DBG("hu %p idle timeout in %d state", hu, qca->ibs.tx_state);
 
-	spin_lock_irqsave_nested(&qca->hci_ibs_lock,
+	spin_lock_irqsave_nested(&qca->ibs.lock,
 				 flags, SINGLE_DEPTH_NESTING);
 
-	switch (qca->tx_ibs_state) {
+	switch (qca->ibs.tx_state) {
 	case HCI_IBS_TX_AWAKE:
 		/* TX_IDLE, go to SLEEP */
 		if (send_hci_ibs_cmd(HCI_IBS_SLEEP_IND, hu) < 0) {
 			BT_ERR("Failed to send SLEEP to device");
 			break;
 		}
-		qca->tx_ibs_state = HCI_IBS_TX_ASLEEP;
-		qca->ibs_sent_slps++;
-		queue_work(qca->workqueue, &qca->ws_tx_vote_off);
+		qca->ibs.tx_state = HCI_IBS_TX_ASLEEP;
+		qca->ibs.stats.sent_slps++;
+		queue_work(qca->workqueue, &qca->ibs.ws_tx_vote_off);
 		break;
 
 	case HCI_IBS_TX_ASLEEP:
@@ -471,33 +480,33 @@ static void hci_ibs_tx_idle_timeout(struct timer_list *t)
 		/* Fall through */
 
 	default:
-		BT_ERR("Spurious timeout tx state %d", qca->tx_ibs_state);
+		BT_ERR("Spurious timeout tx state %d", qca->ibs.tx_state);
 		break;
 	}
 
-	spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
+	spin_unlock_irqrestore(&qca->ibs.lock, flags);
 }
 
 static void hci_ibs_wake_retrans_timeout(struct timer_list *t)
 {
-	struct qca_data *qca = from_timer(qca, t, wake_retrans_timer);
+	struct qca_data *qca = from_timer(qca, t, ibs.wake_retrans_timer);
 	struct hci_uart *hu = qca->hu;
 	unsigned long flags, retrans_delay;
 	bool retransmit = false;
 
 	BT_DBG("hu %p wake retransmit timeout in %d state",
-		hu, qca->tx_ibs_state);
+		hu, qca->ibs.tx_state);
 
-	spin_lock_irqsave_nested(&qca->hci_ibs_lock,
+	spin_lock_irqsave_nested(&qca->ibs.lock,
 				 flags, SINGLE_DEPTH_NESTING);
 
 	/* Don't retransmit the HCI_IBS_WAKE_IND when suspending. */
 	if (test_bit(QCA_SUSPENDING, &qca->flags)) {
-		spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
+		spin_unlock_irqrestore(&qca->ibs.lock, flags);
 		return;
 	}
 
-	switch (qca->tx_ibs_state) {
+	switch (qca->ibs.tx_state) {
 	case HCI_IBS_TX_WAKING:
 		/* No WAKE_ACK, retransmit WAKE */
 		retransmit = true;
@@ -505,9 +514,10 @@ static void hci_ibs_wake_retrans_timeout(struct timer_list *t)
 			BT_ERR("Failed to acknowledge device wake up");
 			break;
 		}
-		qca->ibs_sent_wakes++;
-		retrans_delay = msecs_to_jiffies(qca->wake_retrans);
-		mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay);
+		qca->ibs.stats.sent_wakes++;
+		retrans_delay = msecs_to_jiffies(qca->ibs.wake_retrans);
+		mod_timer(&qca->ibs.wake_retrans_timer,
+			  jiffies + retrans_delay);
 		break;
 
 	case HCI_IBS_TX_ASLEEP:
@@ -515,11 +525,11 @@ static void hci_ibs_wake_retrans_timeout(struct timer_list *t)
 		/* Fall through */
 
 	default:
-		BT_ERR("Spurious timeout tx state %d", qca->tx_ibs_state);
+		BT_ERR("Spurious timeout tx state %d", qca->ibs.tx_state);
 		break;
 	}
 
-	spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
+	spin_unlock_irqrestore(&qca->ibs.lock, flags);
 
 	if (retransmit)
 		hci_uart_tx_wakeup(hu);
@@ -563,9 +573,9 @@ static int qca_open(struct hci_uart *hu)
 		return -ENOMEM;
 
 	skb_queue_head_init(&qca->txq);
-	skb_queue_head_init(&qca->tx_wait_q);
+	skb_queue_head_init(&qca->ibs.tx_wait_q);
 	skb_queue_head_init(&qca->rx_memdump_q);
-	spin_lock_init(&qca->hci_ibs_lock);
+	spin_lock_init(&qca->ibs.lock);
 	mutex_init(&qca->hci_memdump_lock);
 	qca->workqueue = alloc_ordered_workqueue("qca_wq", 0);
 	if (!qca->workqueue) {
@@ -574,10 +584,10 @@ static int qca_open(struct hci_uart *hu)
 		return -ENOMEM;
 	}
 
-	INIT_WORK(&qca->ws_awake_rx, qca_wq_awake_rx);
-	INIT_WORK(&qca->ws_awake_device, qca_wq_awake_device);
-	INIT_WORK(&qca->ws_rx_vote_off, qca_wq_serial_rx_clock_vote_off);
-	INIT_WORK(&qca->ws_tx_vote_off, qca_wq_serial_tx_clock_vote_off);
+	INIT_WORK(&qca->ibs.ws_awake_rx, qca_wq_awake_rx);
+	INIT_WORK(&qca->ibs.ws_awake_device, qca_wq_awake_device);
+	INIT_WORK(&qca->ibs.ws_rx_vote_off, qca_wq_serial_rx_clock_vote_off);
+	INIT_WORK(&qca->ibs.ws_tx_vote_off, qca_wq_serial_tx_clock_vote_off);
 	INIT_WORK(&qca->ctrl_memdump_evt, qca_controller_memdump);
 	INIT_DELAYED_WORK(&qca->ctrl_memdump_timeout,
 			  qca_controller_memdump_timeout);
@@ -587,10 +597,10 @@ static int qca_open(struct hci_uart *hu)
 	init_completion(&qca->drop_ev_comp);
 
 	/* Assume we start with both sides asleep -- extra wakes OK */
-	qca->tx_ibs_state = HCI_IBS_TX_ASLEEP;
-	qca->rx_ibs_state = HCI_IBS_RX_ASLEEP;
+	qca->ibs.tx_state = HCI_IBS_TX_ASLEEP;
+	qca->ibs.rx_state = HCI_IBS_RX_ASLEEP;
 
-	qca->vote_last_jif = jiffies;
+	qca->ibs.stats.vote_last_jif = jiffies;
 
 	hu->priv = qca;
 
@@ -602,14 +612,15 @@ static int qca_open(struct hci_uart *hu)
 		}
 	}
 
-	timer_setup(&qca->wake_retrans_timer, hci_ibs_wake_retrans_timeout, 0);
-	qca->wake_retrans = IBS_WAKE_RETRANS_TIMEOUT_MS;
+	timer_setup(&qca->ibs.wake_retrans_timer, hci_ibs_wake_retrans_timeout,
+		    0);
+	qca->ibs.wake_retrans = IBS_WAKE_RETRANS_TIMEOUT_MS;
 
-	timer_setup(&qca->tx_idle_timer, hci_ibs_tx_idle_timeout, 0);
-	qca->tx_idle_delay = IBS_HOST_TX_IDLE_TIMEOUT_MS;
+	timer_setup(&qca->ibs.tx_idle_timer, hci_ibs_tx_idle_timeout, 0);
+	qca->ibs.tx_idle_delay = IBS_HOST_TX_IDLE_TIMEOUT_MS;
 
 	BT_DBG("HCI_UART_QCA open, tx_idle_delay=%u, wake_retrans=%u",
-	       qca->tx_idle_delay, qca->wake_retrans);
+	       qca->ibs.tx_idle_delay, qca->ibs.wake_retrans);
 
 	return 0;
 }
@@ -628,36 +639,45 @@ static void qca_debugfs_init(struct hci_dev *hdev)
 
 	/* read only */
 	mode = S_IRUGO;
-	debugfs_create_u8("tx_ibs_state", mode, ibs_dir, &qca->tx_ibs_state);
-	debugfs_create_u8("rx_ibs_state", mode, ibs_dir, &qca->rx_ibs_state);
+	debugfs_create_u8("tx_ibs_state", mode, ibs_dir, &qca->ibs.tx_state);
+	debugfs_create_u8("rx_ibs_state", mode, ibs_dir, &qca->ibs.rx_state);
 	debugfs_create_u64("ibs_sent_sleeps", mode, ibs_dir,
-			   &qca->ibs_sent_slps);
+			   &qca->ibs.stats.sent_slps);
 	debugfs_create_u64("ibs_sent_wakes", mode, ibs_dir,
-			   &qca->ibs_sent_wakes);
+			   &qca->ibs.stats.sent_wakes);
 	debugfs_create_u64("ibs_sent_wake_acks", mode, ibs_dir,
-			   &qca->ibs_sent_wacks);
+			   &qca->ibs.stats.sent_wacks);
 	debugfs_create_u64("ibs_recv_sleeps", mode, ibs_dir,
-			   &qca->ibs_recv_slps);
+			   &qca->ibs.stats.recv_slps);
 	debugfs_create_u64("ibs_recv_wakes", mode, ibs_dir,
-			   &qca->ibs_recv_wakes);
+			   &qca->ibs.stats.recv_wakes);
 	debugfs_create_u64("ibs_recv_wake_acks", mode, ibs_dir,
-			   &qca->ibs_recv_wacks);
-	debugfs_create_bool("tx_vote", mode, ibs_dir, &qca->tx_vote);
-	debugfs_create_u64("tx_votes_on", mode, ibs_dir, &qca->tx_votes_on);
-	debugfs_create_u64("tx_votes_off", mode, ibs_dir, &qca->tx_votes_off);
-	debugfs_create_bool("rx_vote", mode, ibs_dir, &qca->rx_vote);
-	debugfs_create_u64("rx_votes_on", mode, ibs_dir, &qca->rx_votes_on);
-	debugfs_create_u64("rx_votes_off", mode, ibs_dir, &qca->rx_votes_off);
-	debugfs_create_u64("votes_on", mode, ibs_dir, &qca->votes_on);
-	debugfs_create_u64("votes_off", mode, ibs_dir, &qca->votes_off);
-	debugfs_create_u32("vote_on_ms", mode, ibs_dir, &qca->vote_on_ms);
-	debugfs_create_u32("vote_off_ms", mode, ibs_dir, &qca->vote_off_ms);
+			   &qca->ibs.stats.recv_wacks);
+	debugfs_create_bool("tx_vote", mode, ibs_dir, &qca->ibs.tx_vote);
+	debugfs_create_u64("tx_votes_on", mode, ibs_dir,
+			   &qca->ibs.stats.tx_votes_on);
+	debugfs_create_u64("tx_votes_off", mode, ibs_dir,
+			   &qca->ibs.stats.tx_votes_off);
+	debugfs_create_bool("rx_vote", mode, ibs_dir, &qca->ibs.rx_vote);
+	debugfs_create_u64("rx_votes_on", mode, ibs_dir,
+			   &qca->ibs.stats.rx_votes_on);
+	debugfs_create_u64("rx_votes_off", mode, ibs_dir,
+			   &qca->ibs.stats.rx_votes_off);
+	debugfs_create_u64("votes_on", mode, ibs_dir,
+			   &qca->ibs.stats.votes_on);
+	debugfs_create_u64("votes_off", mode, ibs_dir,
+			   &qca->ibs.stats.votes_off);
+	debugfs_create_u32("vote_on_ms", mode, ibs_dir,
+			   &qca->ibs.stats.vote_on_ms);
+	debugfs_create_u32("vote_off_ms", mode, ibs_dir,
+			   &qca->ibs.stats.vote_off_ms);
 
 	/* read/write */
 	mode = S_IRUGO | S_IWUSR;
-	debugfs_create_u32("wake_retrans", mode, ibs_dir, &qca->wake_retrans);
+	debugfs_create_u32("wake_retrans", mode, ibs_dir,
+			   &qca->ibs.wake_retrans);
 	debugfs_create_u32("tx_idle_delay", mode, ibs_dir,
-			   &qca->tx_idle_delay);
+			   &qca->ibs.tx_idle_delay);
 }
 
 /* Flush protocol data */
@@ -667,7 +687,7 @@ static int qca_flush(struct hci_uart *hu)
 
 	BT_DBG("hu %p qca flush", hu);
 
-	skb_queue_purge(&qca->tx_wait_q);
+	skb_queue_purge(&qca->ibs.tx_wait_q);
 	skb_queue_purge(&qca->txq);
 
 	return 0;
@@ -682,11 +702,11 @@ static int qca_close(struct hci_uart *hu)
 
 	serial_clock_vote(HCI_IBS_VOTE_STATS_UPDATE, hu);
 
-	skb_queue_purge(&qca->tx_wait_q);
+	skb_queue_purge(&qca->ibs.tx_wait_q);
 	skb_queue_purge(&qca->txq);
 	skb_queue_purge(&qca->rx_memdump_q);
-	del_timer(&qca->tx_idle_timer);
-	del_timer(&qca->wake_retrans_timer);
+	del_timer(&qca->ibs.tx_idle_timer);
+	del_timer(&qca->ibs.wake_retrans_timer);
 	destroy_workqueue(qca->workqueue);
 	qca->hu = NULL;
 
@@ -710,23 +730,23 @@ static void device_want_to_wakeup(struct hci_uart *hu)
 
 	BT_DBG("hu %p want to wake up", hu);
 
-	spin_lock_irqsave(&qca->hci_ibs_lock, flags);
+	spin_lock_irqsave(&qca->ibs.lock, flags);
 
-	qca->ibs_recv_wakes++;
+	qca->ibs.stats.recv_wakes++;
 
 	/* Don't wake the rx up when suspending. */
 	if (test_bit(QCA_SUSPENDING, &qca->flags)) {
-		spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
+		spin_unlock_irqrestore(&qca->ibs.lock, flags);
 		return;
 	}
 
-	switch (qca->rx_ibs_state) {
+	switch (qca->ibs.rx_state) {
 	case HCI_IBS_RX_ASLEEP:
 		/* Make sure clock is on - we may have turned clock off since
 		 * receiving the wake up indicator awake rx clock.
 		 */
-		queue_work(qca->workqueue, &qca->ws_awake_rx);
-		spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
+		queue_work(qca->workqueue, &qca->ibs.ws_awake_rx);
+		spin_unlock_irqrestore(&qca->ibs.lock, flags);
 		return;
 
 	case HCI_IBS_RX_AWAKE:
@@ -737,17 +757,17 @@ static void device_want_to_wakeup(struct hci_uart *hu)
 			BT_ERR("Failed to acknowledge device wake up");
 			break;
 		}
-		qca->ibs_sent_wacks++;
+		qca->ibs.stats.sent_wacks++;
 		break;
 
 	default:
 		/* Any other state is illegal */
 		BT_ERR("Received HCI_IBS_WAKE_IND in rx state %d",
-		       qca->rx_ibs_state);
+		       qca->ibs.rx_state);
 		break;
 	}
 
-	spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
+	spin_unlock_irqrestore(&qca->ibs.lock, flags);
 
 	/* Actually send the packets */
 	hci_uart_tx_wakeup(hu);
@@ -760,18 +780,18 @@ static void device_want_to_sleep(struct hci_uart *hu)
 	unsigned long flags;
 	struct qca_data *qca = hu->priv;
 
-	BT_DBG("hu %p want to sleep in %d state", hu, qca->rx_ibs_state);
+	BT_DBG("hu %p want to sleep in %d state", hu, qca->ibs.rx_state);
 
-	spin_lock_irqsave(&qca->hci_ibs_lock, flags);
+	spin_lock_irqsave(&qca->ibs.lock, flags);
 
-	qca->ibs_recv_slps++;
+	qca->ibs.stats.recv_slps++;
 
-	switch (qca->rx_ibs_state) {
+	switch (qca->ibs.rx_state) {
 	case HCI_IBS_RX_AWAKE:
 		/* Update state */
-		qca->rx_ibs_state = HCI_IBS_RX_ASLEEP;
+		qca->ibs.rx_state = HCI_IBS_RX_ASLEEP;
 		/* Vote off rx clock under workqueue */
-		queue_work(qca->workqueue, &qca->ws_rx_vote_off);
+		queue_work(qca->workqueue, &qca->ibs.ws_rx_vote_off);
 		break;
 
 	case HCI_IBS_RX_ASLEEP:
@@ -780,13 +800,13 @@ static void device_want_to_sleep(struct hci_uart *hu)
 	default:
 		/* Any other state is illegal */
 		BT_ERR("Received HCI_IBS_SLEEP_IND in rx state %d",
-		       qca->rx_ibs_state);
+		       qca->ibs.rx_state);
 		break;
 	}
 
 	wake_up_interruptible(&qca->suspend_wait_q);
 
-	spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
+	spin_unlock_irqrestore(&qca->ibs.lock, flags);
 }
 
 /* Called upon wake-up-acknowledgement from the device
@@ -799,33 +819,33 @@ static void device_woke_up(struct hci_uart *hu)
 
 	BT_DBG("hu %p woke up", hu);
 
-	spin_lock_irqsave(&qca->hci_ibs_lock, flags);
+	spin_lock_irqsave(&qca->ibs.lock, flags);
 
-	qca->ibs_recv_wacks++;
+	qca->ibs.stats.recv_wacks++;
 
 	/* Don't react to the wake-up-acknowledgment when suspending. */
 	if (test_bit(QCA_SUSPENDING, &qca->flags)) {
-		spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
+		spin_unlock_irqrestore(&qca->ibs.lock, flags);
 		return;
 	}
 
-	switch (qca->tx_ibs_state) {
+	switch (qca->ibs.tx_state) {
 	case HCI_IBS_TX_AWAKE:
 		/* Expect one if we send 2 WAKEs */
 		BT_DBG("Received HCI_IBS_WAKE_ACK in tx state %d",
-		       qca->tx_ibs_state);
+		       qca->ibs.tx_state);
 		break;
 
 	case HCI_IBS_TX_WAKING:
 		/* Send pending packets */
-		while ((skb = skb_dequeue(&qca->tx_wait_q)))
+		while ((skb = skb_dequeue(&qca->ibs.tx_wait_q)))
 			skb_queue_tail(&qca->txq, skb);
 
 		/* Switch timers and change state to HCI_IBS_TX_AWAKE */
-		del_timer(&qca->wake_retrans_timer);
-		idle_delay = msecs_to_jiffies(qca->tx_idle_delay);
-		mod_timer(&qca->tx_idle_timer, jiffies + idle_delay);
-		qca->tx_ibs_state = HCI_IBS_TX_AWAKE;
+		del_timer(&qca->ibs.wake_retrans_timer);
+		idle_delay = msecs_to_jiffies(qca->ibs.tx_idle_delay);
+		mod_timer(&qca->ibs.tx_idle_timer, jiffies + idle_delay);
+		qca->ibs.tx_state = HCI_IBS_TX_AWAKE;
 		break;
 
 	case HCI_IBS_TX_ASLEEP:
@@ -833,11 +853,11 @@ static void device_woke_up(struct hci_uart *hu)
 
 	default:
 		BT_ERR("Received HCI_IBS_WAKE_ACK in tx state %d",
-		       qca->tx_ibs_state);
+		       qca->ibs.tx_state);
 		break;
 	}
 
-	spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
+	spin_unlock_irqrestore(&qca->ibs.lock, flags);
 
 	/* Actually send the packets */
 	hci_uart_tx_wakeup(hu);
@@ -852,12 +872,12 @@ static int qca_enqueue(struct hci_uart *hu, struct sk_buff *skb)
 	struct qca_data *qca = hu->priv;
 
 	BT_DBG("hu %p qca enq skb %p tx_ibs_state %d", hu, skb,
-	       qca->tx_ibs_state);
+	       qca->ibs.tx_state);
 
 	/* Prepend skb with frame type */
 	memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
 
-	spin_lock_irqsave(&qca->hci_ibs_lock, flags);
+	spin_lock_irqsave(&qca->ibs.lock, flags);
 
 	/* Don't go to sleep in middle of patch download or
 	 * Out-Of-Band(GPIOs control) sleep is selected.
@@ -866,43 +886,43 @@ static int qca_enqueue(struct hci_uart *hu, struct sk_buff *skb)
 	if (!test_bit(QCA_IBS_ENABLED, &qca->flags) ||
 	    test_bit(QCA_SUSPENDING, &qca->flags)) {
 		skb_queue_tail(&qca->txq, skb);
-		spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
+		spin_unlock_irqrestore(&qca->ibs.lock, flags);
 		return 0;
 	}
 
 	/* Act according to current state */
-	switch (qca->tx_ibs_state) {
+	switch (qca->ibs.tx_state) {
 	case HCI_IBS_TX_AWAKE:
 		BT_DBG("Device awake, sending normally");
 		skb_queue_tail(&qca->txq, skb);
-		idle_delay = msecs_to_jiffies(qca->tx_idle_delay);
-		mod_timer(&qca->tx_idle_timer, jiffies + idle_delay);
+		idle_delay = msecs_to_jiffies(qca->ibs.tx_idle_delay);
+		mod_timer(&qca->ibs.tx_idle_timer, jiffies + idle_delay);
 		break;
 
 	case HCI_IBS_TX_ASLEEP:
 		BT_DBG("Device asleep, waking up and queueing packet");
 		/* Save packet for later */
-		skb_queue_tail(&qca->tx_wait_q, skb);
+		skb_queue_tail(&qca->ibs.tx_wait_q, skb);
 
-		qca->tx_ibs_state = HCI_IBS_TX_WAKING;
+		qca->ibs.tx_state = HCI_IBS_TX_WAKING;
 		/* Schedule a work queue to wake up device */
-		queue_work(qca->workqueue, &qca->ws_awake_device);
+		queue_work(qca->workqueue, &qca->ibs.ws_awake_device);
 		break;
 
 	case HCI_IBS_TX_WAKING:
 		BT_DBG("Device waking up, queueing packet");
 		/* Transient state; just keep packet for later */
-		skb_queue_tail(&qca->tx_wait_q, skb);
+		skb_queue_tail(&qca->ibs.tx_wait_q, skb);
 		break;
 
 	default:
 		BT_ERR("Illegal tx state: %d (losing packet)",
-		       qca->tx_ibs_state);
+		       qca->ibs.tx_state);
 		kfree_skb(skb);
 		break;
 	}
 
-	spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
+	spin_unlock_irqrestore(&qca->ibs.lock, flags);
 
 	return 0;
 }
@@ -1732,10 +1752,10 @@ static void qca_power_shutdown(struct hci_uart *hu)
 	 * still open, stop queueing the IBS data and flush all the buffered
 	 * data in skb's.
 	 */
-	spin_lock_irqsave(&qca->hci_ibs_lock, flags);
+	spin_lock_irqsave(&qca->ibs.lock, flags);
 	clear_bit(QCA_IBS_ENABLED, &qca->flags);
 	qca_flush(hu);
-	spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
+	spin_unlock_irqrestore(&qca->ibs.lock, flags);
 
 	hu->hdev->hw_error = NULL;
 	hu->hdev->cmd_timeout = NULL;
@@ -1959,18 +1979,18 @@ static int __maybe_unused qca_suspend(struct device *dev)
 	if (!test_bit(QCA_IBS_ENABLED, &qca->flags))
 		return 0;
 
-	cancel_work_sync(&qca->ws_awake_device);
-	cancel_work_sync(&qca->ws_awake_rx);
+	cancel_work_sync(&qca->ibs.ws_awake_device);
+	cancel_work_sync(&qca->ibs.ws_awake_rx);
 
-	spin_lock_irqsave_nested(&qca->hci_ibs_lock,
+	spin_lock_irqsave_nested(&qca->ibs.lock,
 				 flags, SINGLE_DEPTH_NESTING);
 
-	switch (qca->tx_ibs_state) {
+	switch (qca->ibs.tx_state) {
 	case HCI_IBS_TX_WAKING:
-		del_timer(&qca->wake_retrans_timer);
+		del_timer(&qca->ibs.wake_retrans_timer);
 		/* Fall through */
 	case HCI_IBS_TX_AWAKE:
-		del_timer(&qca->tx_idle_timer);
+		del_timer(&qca->ibs.tx_idle_timer);
 
 		serdev_device_write_flush(hu->serdev);
 		cmd = HCI_IBS_SLEEP_IND;
@@ -1981,22 +2001,22 @@ static int __maybe_unused qca_suspend(struct device *dev)
 			break;
 		}
 
-		qca->tx_ibs_state = HCI_IBS_TX_ASLEEP;
-		qca->ibs_sent_slps++;
+		qca->ibs.tx_state = HCI_IBS_TX_ASLEEP;
+		qca->ibs.stats.sent_slps++;
 
-		qca_wq_serial_tx_clock_vote_off(&qca->ws_tx_vote_off);
+		qca_wq_serial_tx_clock_vote_off(&qca->ibs.ws_tx_vote_off);
 		break;
 
 	case HCI_IBS_TX_ASLEEP:
 		break;
 
 	default:
-		BT_ERR("Spurious tx state %d", qca->tx_ibs_state);
+		BT_ERR("Spurious tx state %d", qca->ibs.tx_state);
 		ret = -EINVAL;
 		break;
 	}
 
-	spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
+	spin_unlock_irqrestore(&qca->ibs.lock, flags);
 
 	if (ret < 0)
 		goto error;
@@ -2009,7 +2029,7 @@ static int __maybe_unused qca_suspend(struct device *dev)
 	 */
 
 	ret = wait_event_interruptible_timeout(qca->suspend_wait_q,
-			qca->rx_ibs_state == HCI_IBS_RX_ASLEEP,
+			qca->ibs.rx_state == HCI_IBS_RX_ASLEEP,
 			msecs_to_jiffies(IBS_BTSOC_TX_IDLE_TIMEOUT_MS));
 
 	if (ret > 0)
-- 
2.25.1.481.gfbce0eb801-goog


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH 2/2] Bluetooth: hci_qca: consolidate memdump fields in a struct
  2020-03-16 23:02 [PATCH 0/2] Bluetooth: hci_qca: struct qca_data refactoring Matthias Kaehlcke
  2020-03-16 23:02 ` [PATCH 1/2] WIP: Bluetooth: hci_qca: consolidate IBS fields in a struct Matthias Kaehlcke
@ 2020-03-16 23:02 ` Matthias Kaehlcke
  1 sibling, 0 replies; 4+ messages in thread
From: Matthias Kaehlcke @ 2020-03-16 23:02 UTC (permalink / raw)
  To: Marcel Holtmann, Johan Hedberg
  Cc: Harish Bandi, Balakrishna Godavarthi, Abhishek Pandit-Subedi,
	Venkata Lakshmi Narayana Gubba, Rocky Liao, linux-bluetooth,
	linux-kernel, Matthias Kaehlcke

Move memdump related fields from 'struct qca_data' to a dedicated
struct which is part of 'struct qca_data'.

Signed-off-by: Matthias Kaehlcke <mka@chromium.org>
---

 drivers/bluetooth/hci_qca.c | 107 +++++++++++++++++++-----------------
 1 file changed, 56 insertions(+), 51 deletions(-)

diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index c578c7c92680a6..662fa9b5554d4d 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -164,21 +164,26 @@ struct qca_ibs {
 	struct qca_ibs_stats stats;
 };
 
+struct qca_memdump {
+	struct sk_buff_head rx_q;	/* Memdump wait queue	*/
+	struct work_struct ctrl_evt;
+	struct delayed_work ctrl_timeout;
+	struct qca_memdump_data *data;
+	enum qca_memdump_states state;
+	struct mutex lock;
+};
+
 struct qca_data {
 	struct hci_uart *hu;
 	struct sk_buff *rx_skb;
 	struct sk_buff_head txq;
-	struct sk_buff_head rx_memdump_q;	/* Memdump wait queue	*/
+
 	struct workqueue_struct *workqueue;
-	struct work_struct ctrl_memdump_evt;
-	struct delayed_work ctrl_memdump_timeout;
-	struct qca_memdump_data *qca_memdump;
 	struct qca_ibs ibs;
+	struct qca_memdump memdump;
 	unsigned long flags;
 	struct completion drop_ev_comp;
 	wait_queue_head_t suspend_wait_q;
-	enum qca_memdump_states memdump_state;
-	struct mutex hci_memdump_lock;
 };
 
 enum qca_speed_type {
@@ -539,12 +544,12 @@ static void hci_ibs_wake_retrans_timeout(struct timer_list *t)
 static void qca_controller_memdump_timeout(struct work_struct *work)
 {
 	struct qca_data *qca = container_of(work, struct qca_data,
-					ctrl_memdump_timeout.work);
+					memdump.ctrl_timeout.work);
 	struct hci_uart *hu = qca->hu;
 
-	mutex_lock(&qca->hci_memdump_lock);
+	mutex_lock(&qca->memdump.lock);
 	if (test_bit(QCA_MEMDUMP_COLLECTION, &qca->flags)) {
-		qca->memdump_state = QCA_MEMDUMP_TIMEOUT;
+		qca->memdump.state = QCA_MEMDUMP_TIMEOUT;
 		if (!test_bit(QCA_HW_ERROR_EVENT, &qca->flags)) {
 			/* Inject hw error event to reset the device
 			 * and driver.
@@ -553,7 +558,7 @@ static void qca_controller_memdump_timeout(struct work_struct *work)
 		}
 	}
 
-	mutex_unlock(&qca->hci_memdump_lock);
+	mutex_unlock(&qca->memdump.lock);
 }
 
 
@@ -574,9 +579,9 @@ static int qca_open(struct hci_uart *hu)
 
 	skb_queue_head_init(&qca->txq);
 	skb_queue_head_init(&qca->ibs.tx_wait_q);
-	skb_queue_head_init(&qca->rx_memdump_q);
+	skb_queue_head_init(&qca->memdump.rx_q);
 	spin_lock_init(&qca->ibs.lock);
-	mutex_init(&qca->hci_memdump_lock);
+	mutex_init(&qca->memdump.lock);
 	qca->workqueue = alloc_ordered_workqueue("qca_wq", 0);
 	if (!qca->workqueue) {
 		BT_ERR("QCA Workqueue not initialized properly");
@@ -588,8 +593,8 @@ static int qca_open(struct hci_uart *hu)
 	INIT_WORK(&qca->ibs.ws_awake_device, qca_wq_awake_device);
 	INIT_WORK(&qca->ibs.ws_rx_vote_off, qca_wq_serial_rx_clock_vote_off);
 	INIT_WORK(&qca->ibs.ws_tx_vote_off, qca_wq_serial_tx_clock_vote_off);
-	INIT_WORK(&qca->ctrl_memdump_evt, qca_controller_memdump);
-	INIT_DELAYED_WORK(&qca->ctrl_memdump_timeout,
+	INIT_WORK(&qca->memdump.ctrl_evt, qca_controller_memdump);
+	INIT_DELAYED_WORK(&qca->memdump.ctrl_timeout,
 			  qca_controller_memdump_timeout);
 	init_waitqueue_head(&qca->suspend_wait_q);
 
@@ -704,7 +709,7 @@ static int qca_close(struct hci_uart *hu)
 
 	skb_queue_purge(&qca->ibs.tx_wait_q);
 	skb_queue_purge(&qca->txq);
-	skb_queue_purge(&qca->rx_memdump_q);
+	skb_queue_purge(&qca->memdump.rx_q);
 	del_timer(&qca->ibs.tx_idle_timer);
 	del_timer(&qca->ibs.wake_retrans_timer);
 	destroy_workqueue(qca->workqueue);
@@ -979,23 +984,23 @@ static int qca_recv_acl_data(struct hci_dev *hdev, struct sk_buff *skb)
 static void qca_controller_memdump(struct work_struct *work)
 {
 	struct qca_data *qca = container_of(work, struct qca_data,
-					    ctrl_memdump_evt);
+					    memdump.ctrl_evt);
 	struct hci_uart *hu = qca->hu;
 	struct sk_buff *skb;
 	struct qca_memdump_event_hdr *cmd_hdr;
-	struct qca_memdump_data *qca_memdump = qca->qca_memdump;
+	struct qca_memdump_data *qca_memdump = qca->memdump.data;
 	struct qca_dump_size *dump;
 	char *memdump_buf;
 	char nullBuff[QCA_DUMP_PACKET_SIZE] = { 0 };
 	u16 seq_no;
 	u32 dump_size;
 
-	while ((skb = skb_dequeue(&qca->rx_memdump_q))) {
+	while ((skb = skb_dequeue(&qca->memdump.rx_q))) {
 
-		mutex_lock(&qca->hci_memdump_lock);
+		mutex_lock(&qca->memdump.lock);
 		/* Skip processing the received packets if timeout detected. */
-		if (qca->memdump_state == QCA_MEMDUMP_TIMEOUT) {
-			mutex_unlock(&qca->hci_memdump_lock);
+		if (qca->memdump.state == QCA_MEMDUMP_TIMEOUT) {
+			mutex_unlock(&qca->memdump.lock);
 			return;
 		}
 
@@ -1003,14 +1008,14 @@ static void qca_controller_memdump(struct work_struct *work)
 			qca_memdump = kzalloc(sizeof(struct qca_memdump_data),
 					      GFP_ATOMIC);
 			if (!qca_memdump) {
-				mutex_unlock(&qca->hci_memdump_lock);
+				mutex_unlock(&qca->memdump.lock);
 				return;
 			}
 
-			qca->qca_memdump = qca_memdump;
+			qca->memdump.data = qca_memdump;
 		}
 
-		qca->memdump_state = QCA_MEMDUMP_COLLECTING;
+		qca->memdump.state = QCA_MEMDUMP_COLLECTING;
 		cmd_hdr = (void *) skb->data;
 		seq_no = __le16_to_cpu(cmd_hdr->seq_no);
 		skb_pull(skb, sizeof(struct qca_memdump_event_hdr));
@@ -1030,14 +1035,14 @@ static void qca_controller_memdump(struct work_struct *work)
 			if (!(dump_size)) {
 				bt_dev_err(hu->hdev, "Rx invalid memdump size");
 				kfree_skb(skb);
-				mutex_unlock(&qca->hci_memdump_lock);
+				mutex_unlock(&qca->memdump.lock);
 				return;
 			}
 
 			bt_dev_info(hu->hdev, "QCA collecting dump of size:%u",
 				    dump_size);
 			queue_delayed_work(qca->workqueue,
-					   &qca->ctrl_memdump_timeout,
+					   &qca->memdump.ctrl_timeout,
 					msecs_to_jiffies(MEMDUMP_TIMEOUT_MS));
 
 			skb_pull(skb, sizeof(dump_size));
@@ -1055,8 +1060,8 @@ static void qca_controller_memdump(struct work_struct *work)
 			bt_dev_err(hu->hdev, "QCA: Discarding other packets");
 			kfree(qca_memdump);
 			kfree_skb(skb);
-			qca->qca_memdump = NULL;
-			mutex_unlock(&qca->hci_memdump_lock);
+			qca->memdump.data = NULL;
+			mutex_unlock(&qca->memdump.lock);
 			return;
 		}
 
@@ -1079,7 +1084,7 @@ static void qca_controller_memdump(struct work_struct *work)
 		qca_memdump->memdump_buf_tail = memdump_buf;
 		qca_memdump->current_seq_no = seq_no + 1;
 		qca_memdump->received_dump += skb->len;
-		qca->qca_memdump = qca_memdump;
+		qca->memdump.data = qca_memdump;
 		kfree_skb(skb);
 		if (seq_no == QCA_LAST_SEQUENCE_NUM) {
 			bt_dev_info(hu->hdev, "QCA writing crash dump of size %d bytes",
@@ -1087,14 +1092,14 @@ static void qca_controller_memdump(struct work_struct *work)
 			memdump_buf = qca_memdump->memdump_buf_head;
 			dev_coredumpv(&hu->serdev->dev, memdump_buf,
 				      qca_memdump->received_dump, GFP_KERNEL);
-			cancel_delayed_work(&qca->ctrl_memdump_timeout);
-			kfree(qca->qca_memdump);
-			qca->qca_memdump = NULL;
-			qca->memdump_state = QCA_MEMDUMP_COLLECTED;
+			cancel_delayed_work(&qca->memdump.ctrl_timeout);
+			kfree(qca->memdump.data);
+			qca->memdump.data = NULL;
+			qca->memdump.state = QCA_MEMDUMP_COLLECTED;
 			clear_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
 		}
 
-		mutex_unlock(&qca->hci_memdump_lock);
+		mutex_unlock(&qca->memdump.lock);
 	}
 
 }
@@ -1105,8 +1110,8 @@ static int qca_controller_memdump_event(struct hci_dev *hdev,
 	struct hci_uart *hu = hci_get_drvdata(hdev);
 	struct qca_data *qca = hu->priv;
 
-	skb_queue_tail(&qca->rx_memdump_q, skb);
-	queue_work(qca->workqueue, &qca->ctrl_memdump_evt);
+	skb_queue_tail(&qca->memdump.rx_q, skb);
+	queue_work(qca->workqueue, &qca->memdump.ctrl_evt);
 
 	return 0;
 }
@@ -1462,13 +1467,13 @@ static void qca_hw_error(struct hci_dev *hdev, u8 code)
 {
 	struct hci_uart *hu = hci_get_drvdata(hdev);
 	struct qca_data *qca = hu->priv;
-	struct qca_memdump_data *qca_memdump = qca->qca_memdump;
+	struct qca_memdump_data *qca_memdump = qca->memdump.data;
 	char *memdump_buf = NULL;
 
 	set_bit(QCA_HW_ERROR_EVENT, &qca->flags);
-	bt_dev_info(hdev, "mem_dump_status: %d", qca->memdump_state);
+	bt_dev_info(hdev, "mem_dump_status: %d", qca->memdump.state);
 
-	if (qca->memdump_state == QCA_MEMDUMP_IDLE) {
+	if (qca->memdump.state == QCA_MEMDUMP_IDLE) {
 		/* If hardware error event received for other than QCA
 		 * soc memory dump event, then we need to crash the SOC
 		 * and wait here for 8 seconds to get the dump packets.
@@ -1478,7 +1483,7 @@ static void qca_hw_error(struct hci_dev *hdev, u8 code)
 		set_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
 		qca_send_crashbuffer(hu);
 		qca_wait_for_dump_collection(hdev);
-	} else if (qca->memdump_state == QCA_MEMDUMP_COLLECTING) {
+	} else if (qca->memdump.state == QCA_MEMDUMP_COLLECTING) {
 		/* Let us wait here until memory dump collected or
 		 * memory dump timer expired.
 		 */
@@ -1486,19 +1491,19 @@ static void qca_hw_error(struct hci_dev *hdev, u8 code)
 		qca_wait_for_dump_collection(hdev);
 	}
 
-	if (qca->memdump_state != QCA_MEMDUMP_COLLECTED) {
+	if (qca->memdump.state != QCA_MEMDUMP_COLLECTED) {
 		bt_dev_err(hu->hdev, "clearing allocated memory due to memdump timeout");
-		mutex_lock(&qca->hci_memdump_lock);
+		mutex_lock(&qca->memdump.lock);
 		if (qca_memdump)
 			memdump_buf = qca_memdump->memdump_buf_head;
 		vfree(memdump_buf);
 		kfree(qca_memdump);
-		qca->qca_memdump = NULL;
-		qca->memdump_state = QCA_MEMDUMP_TIMEOUT;
-		cancel_delayed_work(&qca->ctrl_memdump_timeout);
-		skb_queue_purge(&qca->rx_memdump_q);
-		mutex_unlock(&qca->hci_memdump_lock);
-		cancel_work_sync(&qca->ctrl_memdump_evt);
+		qca->memdump.data = NULL;
+		qca->memdump.state = QCA_MEMDUMP_TIMEOUT;
+		cancel_delayed_work(&qca->memdump.ctrl_timeout);
+		skb_queue_purge(&qca->memdump.rx_q);
+		mutex_unlock(&qca->memdump.lock);
+		cancel_work_sync(&qca->memdump.ctrl_evt);
 	}
 
 	clear_bit(QCA_HW_ERROR_EVENT, &qca->flags);
@@ -1509,7 +1514,7 @@ static void qca_cmd_timeout(struct hci_dev *hdev)
 	struct hci_uart *hu = hci_get_drvdata(hdev);
 	struct qca_data *qca = hu->priv;
 
-	if (qca->memdump_state == QCA_MEMDUMP_IDLE)
+	if (qca->memdump.state == QCA_MEMDUMP_IDLE)
 		qca_send_crashbuffer(hu);
 	else
 		bt_dev_info(hdev, "Dump collection is in process");
@@ -1781,12 +1786,12 @@ static int qca_power_off(struct hci_dev *hdev)
 	struct qca_data *qca = hu->priv;
 
 	/* Stop sending shutdown command if soc crashes. */
-	if (qca->memdump_state == QCA_MEMDUMP_IDLE) {
+	if (qca->memdump.state == QCA_MEMDUMP_IDLE) {
 		qca_send_pre_shutdown_cmd(hdev);
 		usleep_range(8000, 10000);
 	}
 
-	qca->memdump_state = QCA_MEMDUMP_IDLE;
+	qca->memdump.state = QCA_MEMDUMP_IDLE;
 	qca_power_shutdown(hu);
 	return 0;
 }
-- 
2.25.1.481.gfbce0eb801-goog


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH 1/2] WIP: Bluetooth: hci_qca: consolidate IBS fields in a struct
  2020-03-16 23:02 ` [PATCH 1/2] WIP: Bluetooth: hci_qca: consolidate IBS fields in a struct Matthias Kaehlcke
@ 2020-03-16 23:05   ` Matthias Kaehlcke
  0 siblings, 0 replies; 4+ messages in thread
From: Matthias Kaehlcke @ 2020-03-16 23:05 UTC (permalink / raw)
  To: Marcel Holtmann, Johan Hedberg
  Cc: Harish Bandi, Balakrishna Godavarthi, Abhishek Pandit-Subedi,
	Venkata Lakshmi Narayana Gubba, Rocky Liao, linux-bluetooth,
	linux-kernel

On Mon, Mar 16, 2020 at 04:02:32PM -0700, Matthias Kaehlcke wrote:

> Subject: WIP: Bluetooth: hci_qca: consolidate IBS fields in a struct

The WIP prefix wasn't intended, I'll remove it in the next version.

> About two thirds of the fields in 'struct qca_data' are related with
> in band sleep (IBS). Move these fields to a dedicated IBS struct which
> is part of 'struct qca_data'. Also congregate the IBS stats fields for
> debugfs in a separate sub-struct.
> 
> Signed-off-by: Matthias Kaehlcke <mka@chromium.org>
> ---
> 
>  drivers/bluetooth/hci_qca.c | 362 +++++++++++++++++++-----------------
>  1 file changed, 191 insertions(+), 171 deletions(-)
> 
> diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
> index 3abf4e68de7a8c..c578c7c92680a6 100644
> --- a/drivers/bluetooth/hci_qca.c
> +++ b/drivers/bluetooth/hci_qca.c
> @@ -125,51 +125,60 @@ struct qca_dump_size {
>  	u32 dump_size;
>  } __packed;
>  
> -struct qca_data {
> -	struct hci_uart *hu;
> -	struct sk_buff *rx_skb;
> -	struct sk_buff_head txq;
> +struct qca_ibs_stats {
> +	/* For debugging purpose */
> +	u64 sent_wacks;
> +	u64 sent_slps;
> +	u64 sent_wakes;
> +	u64 recv_wacks;
> +	u64 recv_slps;
> +	u64 recv_wakes;
> +	u64 vote_last_jif;
> +	u32 vote_on_ms;
> +	u32 vote_off_ms;
> +	u64 tx_votes_on;
> +	u64 rx_votes_on;
> +	u64 tx_votes_off;
> +	u64 rx_votes_off;
> +	u64 votes_on;
> +	u64 votes_off;
> +};
> +
> +struct qca_ibs {
>  	struct sk_buff_head tx_wait_q;	/* HCI_IBS wait queue	*/
> -	struct sk_buff_head rx_memdump_q;	/* Memdump wait queue	*/
> -	spinlock_t hci_ibs_lock;	/* HCI_IBS state lock	*/
> -	u8 tx_ibs_state;	/* HCI_IBS transmit side power state*/
> -	u8 rx_ibs_state;	/* HCI_IBS receive side power state */
> +	spinlock_t lock;	/* HCI_IBS state lock	*/
> +	u8 tx_state;		/* HCI_IBS transmit side power state*/
> +	u8 rx_state;		/* HCI_IBS receive side power state */
>  	bool tx_vote;		/* Clock must be on for TX */
>  	bool rx_vote;		/* Clock must be on for RX */
>  	struct timer_list tx_idle_timer;
>  	u32 tx_idle_delay;
>  	struct timer_list wake_retrans_timer;
>  	u32 wake_retrans;
> -	struct workqueue_struct *workqueue;
> +
>  	struct work_struct ws_awake_rx;
>  	struct work_struct ws_awake_device;
>  	struct work_struct ws_rx_vote_off;
>  	struct work_struct ws_tx_vote_off;
> +
> +	struct qca_ibs_stats stats;
> +};
> +
> +struct qca_data {
> +	struct hci_uart *hu;
> +	struct sk_buff *rx_skb;
> +	struct sk_buff_head txq;
> +	struct sk_buff_head rx_memdump_q;	/* Memdump wait queue	*/
> +	struct workqueue_struct *workqueue;
>  	struct work_struct ctrl_memdump_evt;
>  	struct delayed_work ctrl_memdump_timeout;
>  	struct qca_memdump_data *qca_memdump;
> +	struct qca_ibs ibs;
>  	unsigned long flags;
>  	struct completion drop_ev_comp;
>  	wait_queue_head_t suspend_wait_q;
>  	enum qca_memdump_states memdump_state;
>  	struct mutex hci_memdump_lock;
> -
> -	/* For debugging purpose */
> -	u64 ibs_sent_wacks;
> -	u64 ibs_sent_slps;
> -	u64 ibs_sent_wakes;
> -	u64 ibs_recv_wacks;
> -	u64 ibs_recv_slps;
> -	u64 ibs_recv_wakes;
> -	u64 vote_last_jif;
> -	u32 vote_on_ms;
> -	u32 vote_off_ms;
> -	u64 tx_votes_on;
> -	u64 rx_votes_on;
> -	u64 tx_votes_off;
> -	u64 rx_votes_off;
> -	u64 votes_on;
> -	u64 votes_off;
>  };
>  
>  enum qca_speed_type {
> @@ -267,41 +276,41 @@ static void serial_clock_vote(unsigned long vote, struct hci_uart *hu)
>  	struct qca_data *qca = hu->priv;
>  	unsigned int diff;
>  
> -	bool old_vote = (qca->tx_vote | qca->rx_vote);
> +	bool old_vote = (qca->ibs.tx_vote | qca->ibs.rx_vote);
>  	bool new_vote;
>  
>  	switch (vote) {
>  	case HCI_IBS_VOTE_STATS_UPDATE:
> -		diff = jiffies_to_msecs(jiffies - qca->vote_last_jif);
> +		diff = jiffies_to_msecs(jiffies - qca->ibs.stats.vote_last_jif);
>  
>  		if (old_vote)
> -			qca->vote_off_ms += diff;
> +			qca->ibs.stats.vote_off_ms += diff;
>  		else
> -			qca->vote_on_ms += diff;
> +			qca->ibs.stats.vote_on_ms += diff;
>  		return;
>  
>  	case HCI_IBS_TX_VOTE_CLOCK_ON:
> -		qca->tx_vote = true;
> -		qca->tx_votes_on++;
> +		qca->ibs.tx_vote = true;
> +		qca->ibs.stats.tx_votes_on++;
>  		new_vote = true;
>  		break;
>  
>  	case HCI_IBS_RX_VOTE_CLOCK_ON:
> -		qca->rx_vote = true;
> -		qca->rx_votes_on++;
> +		qca->ibs.rx_vote = true;
> +		qca->ibs.stats.rx_votes_on++;
>  		new_vote = true;
>  		break;
>  
>  	case HCI_IBS_TX_VOTE_CLOCK_OFF:
> -		qca->tx_vote = false;
> -		qca->tx_votes_off++;
> -		new_vote = qca->rx_vote | qca->tx_vote;
> +		qca->ibs.tx_vote = false;
> +		qca->ibs.stats.tx_votes_off++;
> +		new_vote = qca->ibs.rx_vote | qca->ibs.tx_vote;
>  		break;
>  
>  	case HCI_IBS_RX_VOTE_CLOCK_OFF:
> -		qca->rx_vote = false;
> -		qca->rx_votes_off++;
> -		new_vote = qca->rx_vote | qca->tx_vote;
> +		qca->ibs.rx_vote = false;
> +		qca->ibs.stats.rx_votes_off++;
> +		new_vote = qca->ibs.rx_vote | qca->ibs.tx_vote;
>  		break;
>  
>  	default:
> @@ -318,16 +327,16 @@ static void serial_clock_vote(unsigned long vote, struct hci_uart *hu)
>  		BT_DBG("Vote serial clock %s(%s)", new_vote ? "true" : "false",
>  		       vote ? "true" : "false");
>  
> -		diff = jiffies_to_msecs(jiffies - qca->vote_last_jif);
> +		diff = jiffies_to_msecs(jiffies - qca->ibs.stats.vote_last_jif);
>  
>  		if (new_vote) {
> -			qca->votes_on++;
> -			qca->vote_off_ms += diff;
> +			qca->ibs.stats.votes_on++;
> +			qca->ibs.stats.vote_off_ms += diff;
>  		} else {
> -			qca->votes_off++;
> -			qca->vote_on_ms += diff;
> +			qca->ibs.stats.votes_off++;
> +			qca->ibs.stats.vote_on_ms += diff;
>  		}
> -		qca->vote_last_jif = jiffies;
> +		qca->ibs.stats.vote_last_jif = jiffies;
>  	}
>  }
>  
> @@ -359,7 +368,7 @@ static int send_hci_ibs_cmd(u8 cmd, struct hci_uart *hu)
>  static void qca_wq_awake_device(struct work_struct *work)
>  {
>  	struct qca_data *qca = container_of(work, struct qca_data,
> -					    ws_awake_device);
> +					    ibs.ws_awake_device);
>  	struct hci_uart *hu = qca->hu;
>  	unsigned long retrans_delay;
>  	unsigned long flags;
> @@ -369,19 +378,19 @@ static void qca_wq_awake_device(struct work_struct *work)
>  	/* Vote for serial clock */
>  	serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_ON, hu);
>  
> -	spin_lock_irqsave(&qca->hci_ibs_lock, flags);
> +	spin_lock_irqsave(&qca->ibs.lock, flags);
>  
>  	/* Send wake indication to device */
>  	if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0)
>  		BT_ERR("Failed to send WAKE to device");
>  
> -	qca->ibs_sent_wakes++;
> +	qca->ibs.stats.sent_wakes++;
>  
>  	/* Start retransmit timer */
> -	retrans_delay = msecs_to_jiffies(qca->wake_retrans);
> -	mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay);
> +	retrans_delay = msecs_to_jiffies(qca->ibs.wake_retrans);
> +	mod_timer(&qca->ibs.wake_retrans_timer, jiffies + retrans_delay);
>  
> -	spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
> +	spin_unlock_irqrestore(&qca->ibs.lock, flags);
>  
>  	/* Actually send the packets */
>  	hci_uart_tx_wakeup(hu);
> @@ -390,7 +399,7 @@ static void qca_wq_awake_device(struct work_struct *work)
>  static void qca_wq_awake_rx(struct work_struct *work)
>  {
>  	struct qca_data *qca = container_of(work, struct qca_data,
> -					    ws_awake_rx);
> +					    ibs.ws_awake_rx);
>  	struct hci_uart *hu = qca->hu;
>  	unsigned long flags;
>  
> @@ -398,8 +407,8 @@ static void qca_wq_awake_rx(struct work_struct *work)
>  
>  	serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_ON, hu);
>  
> -	spin_lock_irqsave(&qca->hci_ibs_lock, flags);
> -	qca->rx_ibs_state = HCI_IBS_RX_AWAKE;
> +	spin_lock_irqsave(&qca->ibs.lock, flags);
> +	qca->ibs.rx_state = HCI_IBS_RX_AWAKE;
>  
>  	/* Always acknowledge device wake up,
>  	 * sending IBS message doesn't count as TX ON.
> @@ -407,9 +416,9 @@ static void qca_wq_awake_rx(struct work_struct *work)
>  	if (send_hci_ibs_cmd(HCI_IBS_WAKE_ACK, hu) < 0)
>  		BT_ERR("Failed to acknowledge device wake up");
>  
> -	qca->ibs_sent_wacks++;
> +	qca->ibs.stats.sent_wacks++;
>  
> -	spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
> +	spin_unlock_irqrestore(&qca->ibs.lock, flags);
>  
>  	/* Actually send the packets */
>  	hci_uart_tx_wakeup(hu);
> @@ -418,7 +427,7 @@ static void qca_wq_awake_rx(struct work_struct *work)
>  static void qca_wq_serial_rx_clock_vote_off(struct work_struct *work)
>  {
>  	struct qca_data *qca = container_of(work, struct qca_data,
> -					    ws_rx_vote_off);
> +					    ibs.ws_rx_vote_off);
>  	struct hci_uart *hu = qca->hu;
>  
>  	BT_DBG("hu %p rx clock vote off", hu);
> @@ -429,7 +438,7 @@ static void qca_wq_serial_rx_clock_vote_off(struct work_struct *work)
>  static void qca_wq_serial_tx_clock_vote_off(struct work_struct *work)
>  {
>  	struct qca_data *qca = container_of(work, struct qca_data,
> -					    ws_tx_vote_off);
> +					    ibs.ws_tx_vote_off);
>  	struct hci_uart *hu = qca->hu;
>  
>  	BT_DBG("hu %p tx clock vote off", hu);
> @@ -445,25 +454,25 @@ static void qca_wq_serial_tx_clock_vote_off(struct work_struct *work)
>  
>  static void hci_ibs_tx_idle_timeout(struct timer_list *t)
>  {
> -	struct qca_data *qca = from_timer(qca, t, tx_idle_timer);
> +	struct qca_data *qca = from_timer(qca, t, ibs.tx_idle_timer);
>  	struct hci_uart *hu = qca->hu;
>  	unsigned long flags;
>  
> -	BT_DBG("hu %p idle timeout in %d state", hu, qca->tx_ibs_state);
> +	BT_DBG("hu %p idle timeout in %d state", hu, qca->ibs.tx_state);
>  
> -	spin_lock_irqsave_nested(&qca->hci_ibs_lock,
> +	spin_lock_irqsave_nested(&qca->ibs.lock,
>  				 flags, SINGLE_DEPTH_NESTING);
>  
> -	switch (qca->tx_ibs_state) {
> +	switch (qca->ibs.tx_state) {
>  	case HCI_IBS_TX_AWAKE:
>  		/* TX_IDLE, go to SLEEP */
>  		if (send_hci_ibs_cmd(HCI_IBS_SLEEP_IND, hu) < 0) {
>  			BT_ERR("Failed to send SLEEP to device");
>  			break;
>  		}
> -		qca->tx_ibs_state = HCI_IBS_TX_ASLEEP;
> -		qca->ibs_sent_slps++;
> -		queue_work(qca->workqueue, &qca->ws_tx_vote_off);
> +		qca->ibs.tx_state = HCI_IBS_TX_ASLEEP;
> +		qca->ibs.stats.sent_slps++;
> +		queue_work(qca->workqueue, &qca->ibs.ws_tx_vote_off);
>  		break;
>  
>  	case HCI_IBS_TX_ASLEEP:
> @@ -471,33 +480,33 @@ static void hci_ibs_tx_idle_timeout(struct timer_list *t)
>  		/* Fall through */
>  
>  	default:
> -		BT_ERR("Spurious timeout tx state %d", qca->tx_ibs_state);
> +		BT_ERR("Spurious timeout tx state %d", qca->ibs.tx_state);
>  		break;
>  	}
>  
> -	spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
> +	spin_unlock_irqrestore(&qca->ibs.lock, flags);
>  }
>  
>  static void hci_ibs_wake_retrans_timeout(struct timer_list *t)
>  {
> -	struct qca_data *qca = from_timer(qca, t, wake_retrans_timer);
> +	struct qca_data *qca = from_timer(qca, t, ibs.wake_retrans_timer);
>  	struct hci_uart *hu = qca->hu;
>  	unsigned long flags, retrans_delay;
>  	bool retransmit = false;
>  
>  	BT_DBG("hu %p wake retransmit timeout in %d state",
> -		hu, qca->tx_ibs_state);
> +		hu, qca->ibs.tx_state);
>  
> -	spin_lock_irqsave_nested(&qca->hci_ibs_lock,
> +	spin_lock_irqsave_nested(&qca->ibs.lock,
>  				 flags, SINGLE_DEPTH_NESTING);
>  
>  	/* Don't retransmit the HCI_IBS_WAKE_IND when suspending. */
>  	if (test_bit(QCA_SUSPENDING, &qca->flags)) {
> -		spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
> +		spin_unlock_irqrestore(&qca->ibs.lock, flags);
>  		return;
>  	}
>  
> -	switch (qca->tx_ibs_state) {
> +	switch (qca->ibs.tx_state) {
>  	case HCI_IBS_TX_WAKING:
>  		/* No WAKE_ACK, retransmit WAKE */
>  		retransmit = true;
> @@ -505,9 +514,10 @@ static void hci_ibs_wake_retrans_timeout(struct timer_list *t)
>  			BT_ERR("Failed to acknowledge device wake up");
>  			break;
>  		}
> -		qca->ibs_sent_wakes++;
> -		retrans_delay = msecs_to_jiffies(qca->wake_retrans);
> -		mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay);
> +		qca->ibs.stats.sent_wakes++;
> +		retrans_delay = msecs_to_jiffies(qca->ibs.wake_retrans);
> +		mod_timer(&qca->ibs.wake_retrans_timer,
> +			  jiffies + retrans_delay);
>  		break;
>  
>  	case HCI_IBS_TX_ASLEEP:
> @@ -515,11 +525,11 @@ static void hci_ibs_wake_retrans_timeout(struct timer_list *t)
>  		/* Fall through */
>  
>  	default:
> -		BT_ERR("Spurious timeout tx state %d", qca->tx_ibs_state);
> +		BT_ERR("Spurious timeout tx state %d", qca->ibs.tx_state);
>  		break;
>  	}
>  
> -	spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
> +	spin_unlock_irqrestore(&qca->ibs.lock, flags);
>  
>  	if (retransmit)
>  		hci_uart_tx_wakeup(hu);
> @@ -563,9 +573,9 @@ static int qca_open(struct hci_uart *hu)
>  		return -ENOMEM;
>  
>  	skb_queue_head_init(&qca->txq);
> -	skb_queue_head_init(&qca->tx_wait_q);
> +	skb_queue_head_init(&qca->ibs.tx_wait_q);
>  	skb_queue_head_init(&qca->rx_memdump_q);
> -	spin_lock_init(&qca->hci_ibs_lock);
> +	spin_lock_init(&qca->ibs.lock);
>  	mutex_init(&qca->hci_memdump_lock);
>  	qca->workqueue = alloc_ordered_workqueue("qca_wq", 0);
>  	if (!qca->workqueue) {
> @@ -574,10 +584,10 @@ static int qca_open(struct hci_uart *hu)
>  		return -ENOMEM;
>  	}
>  
> -	INIT_WORK(&qca->ws_awake_rx, qca_wq_awake_rx);
> -	INIT_WORK(&qca->ws_awake_device, qca_wq_awake_device);
> -	INIT_WORK(&qca->ws_rx_vote_off, qca_wq_serial_rx_clock_vote_off);
> -	INIT_WORK(&qca->ws_tx_vote_off, qca_wq_serial_tx_clock_vote_off);
> +	INIT_WORK(&qca->ibs.ws_awake_rx, qca_wq_awake_rx);
> +	INIT_WORK(&qca->ibs.ws_awake_device, qca_wq_awake_device);
> +	INIT_WORK(&qca->ibs.ws_rx_vote_off, qca_wq_serial_rx_clock_vote_off);
> +	INIT_WORK(&qca->ibs.ws_tx_vote_off, qca_wq_serial_tx_clock_vote_off);
>  	INIT_WORK(&qca->ctrl_memdump_evt, qca_controller_memdump);
>  	INIT_DELAYED_WORK(&qca->ctrl_memdump_timeout,
>  			  qca_controller_memdump_timeout);
> @@ -587,10 +597,10 @@ static int qca_open(struct hci_uart *hu)
>  	init_completion(&qca->drop_ev_comp);
>  
>  	/* Assume we start with both sides asleep -- extra wakes OK */
> -	qca->tx_ibs_state = HCI_IBS_TX_ASLEEP;
> -	qca->rx_ibs_state = HCI_IBS_RX_ASLEEP;
> +	qca->ibs.tx_state = HCI_IBS_TX_ASLEEP;
> +	qca->ibs.rx_state = HCI_IBS_RX_ASLEEP;
>  
> -	qca->vote_last_jif = jiffies;
> +	qca->ibs.stats.vote_last_jif = jiffies;
>  
>  	hu->priv = qca;
>  
> @@ -602,14 +612,15 @@ static int qca_open(struct hci_uart *hu)
>  		}
>  	}
>  
> -	timer_setup(&qca->wake_retrans_timer, hci_ibs_wake_retrans_timeout, 0);
> -	qca->wake_retrans = IBS_WAKE_RETRANS_TIMEOUT_MS;
> +	timer_setup(&qca->ibs.wake_retrans_timer, hci_ibs_wake_retrans_timeout,
> +		    0);
> +	qca->ibs.wake_retrans = IBS_WAKE_RETRANS_TIMEOUT_MS;
>  
> -	timer_setup(&qca->tx_idle_timer, hci_ibs_tx_idle_timeout, 0);
> -	qca->tx_idle_delay = IBS_HOST_TX_IDLE_TIMEOUT_MS;
> +	timer_setup(&qca->ibs.tx_idle_timer, hci_ibs_tx_idle_timeout, 0);
> +	qca->ibs.tx_idle_delay = IBS_HOST_TX_IDLE_TIMEOUT_MS;
>  
>  	BT_DBG("HCI_UART_QCA open, tx_idle_delay=%u, wake_retrans=%u",
> -	       qca->tx_idle_delay, qca->wake_retrans);
> +	       qca->ibs.tx_idle_delay, qca->ibs.wake_retrans);
>  
>  	return 0;
>  }
> @@ -628,36 +639,45 @@ static void qca_debugfs_init(struct hci_dev *hdev)
>  
>  	/* read only */
>  	mode = S_IRUGO;
> -	debugfs_create_u8("tx_ibs_state", mode, ibs_dir, &qca->tx_ibs_state);
> -	debugfs_create_u8("rx_ibs_state", mode, ibs_dir, &qca->rx_ibs_state);
> +	debugfs_create_u8("tx_ibs_state", mode, ibs_dir, &qca->ibs.tx_state);
> +	debugfs_create_u8("rx_ibs_state", mode, ibs_dir, &qca->ibs.rx_state);
>  	debugfs_create_u64("ibs_sent_sleeps", mode, ibs_dir,
> -			   &qca->ibs_sent_slps);
> +			   &qca->ibs.stats.sent_slps);
>  	debugfs_create_u64("ibs_sent_wakes", mode, ibs_dir,
> -			   &qca->ibs_sent_wakes);
> +			   &qca->ibs.stats.sent_wakes);
>  	debugfs_create_u64("ibs_sent_wake_acks", mode, ibs_dir,
> -			   &qca->ibs_sent_wacks);
> +			   &qca->ibs.stats.sent_wacks);
>  	debugfs_create_u64("ibs_recv_sleeps", mode, ibs_dir,
> -			   &qca->ibs_recv_slps);
> +			   &qca->ibs.stats.recv_slps);
>  	debugfs_create_u64("ibs_recv_wakes", mode, ibs_dir,
> -			   &qca->ibs_recv_wakes);
> +			   &qca->ibs.stats.recv_wakes);
>  	debugfs_create_u64("ibs_recv_wake_acks", mode, ibs_dir,
> -			   &qca->ibs_recv_wacks);
> -	debugfs_create_bool("tx_vote", mode, ibs_dir, &qca->tx_vote);
> -	debugfs_create_u64("tx_votes_on", mode, ibs_dir, &qca->tx_votes_on);
> -	debugfs_create_u64("tx_votes_off", mode, ibs_dir, &qca->tx_votes_off);
> -	debugfs_create_bool("rx_vote", mode, ibs_dir, &qca->rx_vote);
> -	debugfs_create_u64("rx_votes_on", mode, ibs_dir, &qca->rx_votes_on);
> -	debugfs_create_u64("rx_votes_off", mode, ibs_dir, &qca->rx_votes_off);
> -	debugfs_create_u64("votes_on", mode, ibs_dir, &qca->votes_on);
> -	debugfs_create_u64("votes_off", mode, ibs_dir, &qca->votes_off);
> -	debugfs_create_u32("vote_on_ms", mode, ibs_dir, &qca->vote_on_ms);
> -	debugfs_create_u32("vote_off_ms", mode, ibs_dir, &qca->vote_off_ms);
> +			   &qca->ibs.stats.recv_wacks);
> +	debugfs_create_bool("tx_vote", mode, ibs_dir, &qca->ibs.tx_vote);
> +	debugfs_create_u64("tx_votes_on", mode, ibs_dir,
> +			   &qca->ibs.stats.tx_votes_on);
> +	debugfs_create_u64("tx_votes_off", mode, ibs_dir,
> +			   &qca->ibs.stats.tx_votes_off);
> +	debugfs_create_bool("rx_vote", mode, ibs_dir, &qca->ibs.rx_vote);
> +	debugfs_create_u64("rx_votes_on", mode, ibs_dir,
> +			   &qca->ibs.stats.rx_votes_on);
> +	debugfs_create_u64("rx_votes_off", mode, ibs_dir,
> +			   &qca->ibs.stats.rx_votes_off);
> +	debugfs_create_u64("votes_on", mode, ibs_dir,
> +			   &qca->ibs.stats.votes_on);
> +	debugfs_create_u64("votes_off", mode, ibs_dir,
> +			   &qca->ibs.stats.votes_off);
> +	debugfs_create_u32("vote_on_ms", mode, ibs_dir,
> +			   &qca->ibs.stats.vote_on_ms);
> +	debugfs_create_u32("vote_off_ms", mode, ibs_dir,
> +			   &qca->ibs.stats.vote_off_ms);
>  
>  	/* read/write */
>  	mode = S_IRUGO | S_IWUSR;
> -	debugfs_create_u32("wake_retrans", mode, ibs_dir, &qca->wake_retrans);
> +	debugfs_create_u32("wake_retrans", mode, ibs_dir,
> +			   &qca->ibs.wake_retrans);
>  	debugfs_create_u32("tx_idle_delay", mode, ibs_dir,
> -			   &qca->tx_idle_delay);
> +			   &qca->ibs.tx_idle_delay);
>  }
>  
>  /* Flush protocol data */
> @@ -667,7 +687,7 @@ static int qca_flush(struct hci_uart *hu)
>  
>  	BT_DBG("hu %p qca flush", hu);
>  
> -	skb_queue_purge(&qca->tx_wait_q);
> +	skb_queue_purge(&qca->ibs.tx_wait_q);
>  	skb_queue_purge(&qca->txq);
>  
>  	return 0;
> @@ -682,11 +702,11 @@ static int qca_close(struct hci_uart *hu)
>  
>  	serial_clock_vote(HCI_IBS_VOTE_STATS_UPDATE, hu);
>  
> -	skb_queue_purge(&qca->tx_wait_q);
> +	skb_queue_purge(&qca->ibs.tx_wait_q);
>  	skb_queue_purge(&qca->txq);
>  	skb_queue_purge(&qca->rx_memdump_q);
> -	del_timer(&qca->tx_idle_timer);
> -	del_timer(&qca->wake_retrans_timer);
> +	del_timer(&qca->ibs.tx_idle_timer);
> +	del_timer(&qca->ibs.wake_retrans_timer);
>  	destroy_workqueue(qca->workqueue);
>  	qca->hu = NULL;
>  
> @@ -710,23 +730,23 @@ static void device_want_to_wakeup(struct hci_uart *hu)
>  
>  	BT_DBG("hu %p want to wake up", hu);
>  
> -	spin_lock_irqsave(&qca->hci_ibs_lock, flags);
> +	spin_lock_irqsave(&qca->ibs.lock, flags);
>  
> -	qca->ibs_recv_wakes++;
> +	qca->ibs.stats.recv_wakes++;
>  
>  	/* Don't wake the rx up when suspending. */
>  	if (test_bit(QCA_SUSPENDING, &qca->flags)) {
> -		spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
> +		spin_unlock_irqrestore(&qca->ibs.lock, flags);
>  		return;
>  	}
>  
> -	switch (qca->rx_ibs_state) {
> +	switch (qca->ibs.rx_state) {
>  	case HCI_IBS_RX_ASLEEP:
>  		/* Make sure clock is on - we may have turned clock off since
>  		 * receiving the wake up indicator awake rx clock.
>  		 */
> -		queue_work(qca->workqueue, &qca->ws_awake_rx);
> -		spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
> +		queue_work(qca->workqueue, &qca->ibs.ws_awake_rx);
> +		spin_unlock_irqrestore(&qca->ibs.lock, flags);
>  		return;
>  
>  	case HCI_IBS_RX_AWAKE:
> @@ -737,17 +757,17 @@ static void device_want_to_wakeup(struct hci_uart *hu)
>  			BT_ERR("Failed to acknowledge device wake up");
>  			break;
>  		}
> -		qca->ibs_sent_wacks++;
> +		qca->ibs.stats.sent_wacks++;
>  		break;
>  
>  	default:
>  		/* Any other state is illegal */
>  		BT_ERR("Received HCI_IBS_WAKE_IND in rx state %d",
> -		       qca->rx_ibs_state);
> +		       qca->ibs.rx_state);
>  		break;
>  	}
>  
> -	spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
> +	spin_unlock_irqrestore(&qca->ibs.lock, flags);
>  
>  	/* Actually send the packets */
>  	hci_uart_tx_wakeup(hu);
> @@ -760,18 +780,18 @@ static void device_want_to_sleep(struct hci_uart *hu)
>  	unsigned long flags;
>  	struct qca_data *qca = hu->priv;
>  
> -	BT_DBG("hu %p want to sleep in %d state", hu, qca->rx_ibs_state);
> +	BT_DBG("hu %p want to sleep in %d state", hu, qca->ibs.rx_state);
>  
> -	spin_lock_irqsave(&qca->hci_ibs_lock, flags);
> +	spin_lock_irqsave(&qca->ibs.lock, flags);
>  
> -	qca->ibs_recv_slps++;
> +	qca->ibs.stats.recv_slps++;
>  
> -	switch (qca->rx_ibs_state) {
> +	switch (qca->ibs.rx_state) {
>  	case HCI_IBS_RX_AWAKE:
>  		/* Update state */
> -		qca->rx_ibs_state = HCI_IBS_RX_ASLEEP;
> +		qca->ibs.rx_state = HCI_IBS_RX_ASLEEP;
>  		/* Vote off rx clock under workqueue */
> -		queue_work(qca->workqueue, &qca->ws_rx_vote_off);
> +		queue_work(qca->workqueue, &qca->ibs.ws_rx_vote_off);
>  		break;
>  
>  	case HCI_IBS_RX_ASLEEP:
> @@ -780,13 +800,13 @@ static void device_want_to_sleep(struct hci_uart *hu)
>  	default:
>  		/* Any other state is illegal */
>  		BT_ERR("Received HCI_IBS_SLEEP_IND in rx state %d",
> -		       qca->rx_ibs_state);
> +		       qca->ibs.rx_state);
>  		break;
>  	}
>  
>  	wake_up_interruptible(&qca->suspend_wait_q);
>  
> -	spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
> +	spin_unlock_irqrestore(&qca->ibs.lock, flags);
>  }
>  
>  /* Called upon wake-up-acknowledgement from the device
> @@ -799,33 +819,33 @@ static void device_woke_up(struct hci_uart *hu)
>  
>  	BT_DBG("hu %p woke up", hu);
>  
> -	spin_lock_irqsave(&qca->hci_ibs_lock, flags);
> +	spin_lock_irqsave(&qca->ibs.lock, flags);
>  
> -	qca->ibs_recv_wacks++;
> +	qca->ibs.stats.recv_wacks++;
>  
>  	/* Don't react to the wake-up-acknowledgment when suspending. */
>  	if (test_bit(QCA_SUSPENDING, &qca->flags)) {
> -		spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
> +		spin_unlock_irqrestore(&qca->ibs.lock, flags);
>  		return;
>  	}
>  
> -	switch (qca->tx_ibs_state) {
> +	switch (qca->ibs.tx_state) {
>  	case HCI_IBS_TX_AWAKE:
>  		/* Expect one if we send 2 WAKEs */
>  		BT_DBG("Received HCI_IBS_WAKE_ACK in tx state %d",
> -		       qca->tx_ibs_state);
> +		       qca->ibs.tx_state);
>  		break;
>  
>  	case HCI_IBS_TX_WAKING:
>  		/* Send pending packets */
> -		while ((skb = skb_dequeue(&qca->tx_wait_q)))
> +		while ((skb = skb_dequeue(&qca->ibs.tx_wait_q)))
>  			skb_queue_tail(&qca->txq, skb);
>  
>  		/* Switch timers and change state to HCI_IBS_TX_AWAKE */
> -		del_timer(&qca->wake_retrans_timer);
> -		idle_delay = msecs_to_jiffies(qca->tx_idle_delay);
> -		mod_timer(&qca->tx_idle_timer, jiffies + idle_delay);
> -		qca->tx_ibs_state = HCI_IBS_TX_AWAKE;
> +		del_timer(&qca->ibs.wake_retrans_timer);
> +		idle_delay = msecs_to_jiffies(qca->ibs.tx_idle_delay);
> +		mod_timer(&qca->ibs.tx_idle_timer, jiffies + idle_delay);
> +		qca->ibs.tx_state = HCI_IBS_TX_AWAKE;
>  		break;
>  
>  	case HCI_IBS_TX_ASLEEP:
> @@ -833,11 +853,11 @@ static void device_woke_up(struct hci_uart *hu)
>  
>  	default:
>  		BT_ERR("Received HCI_IBS_WAKE_ACK in tx state %d",
> -		       qca->tx_ibs_state);
> +		       qca->ibs.tx_state);
>  		break;
>  	}
>  
> -	spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
> +	spin_unlock_irqrestore(&qca->ibs.lock, flags);
>  
>  	/* Actually send the packets */
>  	hci_uart_tx_wakeup(hu);
> @@ -852,12 +872,12 @@ static int qca_enqueue(struct hci_uart *hu, struct sk_buff *skb)
>  	struct qca_data *qca = hu->priv;
>  
>  	BT_DBG("hu %p qca enq skb %p tx_ibs_state %d", hu, skb,
> -	       qca->tx_ibs_state);
> +	       qca->ibs.tx_state);
>  
>  	/* Prepend skb with frame type */
>  	memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
>  
> -	spin_lock_irqsave(&qca->hci_ibs_lock, flags);
> +	spin_lock_irqsave(&qca->ibs.lock, flags);
>  
>  	/* Don't go to sleep in middle of patch download or
>  	 * Out-Of-Band(GPIOs control) sleep is selected.
> @@ -866,43 +886,43 @@ static int qca_enqueue(struct hci_uart *hu, struct sk_buff *skb)
>  	if (!test_bit(QCA_IBS_ENABLED, &qca->flags) ||
>  	    test_bit(QCA_SUSPENDING, &qca->flags)) {
>  		skb_queue_tail(&qca->txq, skb);
> -		spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
> +		spin_unlock_irqrestore(&qca->ibs.lock, flags);
>  		return 0;
>  	}
>  
>  	/* Act according to current state */
> -	switch (qca->tx_ibs_state) {
> +	switch (qca->ibs.tx_state) {
>  	case HCI_IBS_TX_AWAKE:
>  		BT_DBG("Device awake, sending normally");
>  		skb_queue_tail(&qca->txq, skb);
> -		idle_delay = msecs_to_jiffies(qca->tx_idle_delay);
> -		mod_timer(&qca->tx_idle_timer, jiffies + idle_delay);
> +		idle_delay = msecs_to_jiffies(qca->ibs.tx_idle_delay);
> +		mod_timer(&qca->ibs.tx_idle_timer, jiffies + idle_delay);
>  		break;
>  
>  	case HCI_IBS_TX_ASLEEP:
>  		BT_DBG("Device asleep, waking up and queueing packet");
>  		/* Save packet for later */
> -		skb_queue_tail(&qca->tx_wait_q, skb);
> +		skb_queue_tail(&qca->ibs.tx_wait_q, skb);
>  
> -		qca->tx_ibs_state = HCI_IBS_TX_WAKING;
> +		qca->ibs.tx_state = HCI_IBS_TX_WAKING;
>  		/* Schedule a work queue to wake up device */
> -		queue_work(qca->workqueue, &qca->ws_awake_device);
> +		queue_work(qca->workqueue, &qca->ibs.ws_awake_device);
>  		break;
>  
>  	case HCI_IBS_TX_WAKING:
>  		BT_DBG("Device waking up, queueing packet");
>  		/* Transient state; just keep packet for later */
> -		skb_queue_tail(&qca->tx_wait_q, skb);
> +		skb_queue_tail(&qca->ibs.tx_wait_q, skb);
>  		break;
>  
>  	default:
>  		BT_ERR("Illegal tx state: %d (losing packet)",
> -		       qca->tx_ibs_state);
> +		       qca->ibs.tx_state);
>  		kfree_skb(skb);
>  		break;
>  	}
>  
> -	spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
> +	spin_unlock_irqrestore(&qca->ibs.lock, flags);
>  
>  	return 0;
>  }
> @@ -1732,10 +1752,10 @@ static void qca_power_shutdown(struct hci_uart *hu)
>  	 * still open, stop queueing the IBS data and flush all the buffered
>  	 * data in skb's.
>  	 */
> -	spin_lock_irqsave(&qca->hci_ibs_lock, flags);
> +	spin_lock_irqsave(&qca->ibs.lock, flags);
>  	clear_bit(QCA_IBS_ENABLED, &qca->flags);
>  	qca_flush(hu);
> -	spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
> +	spin_unlock_irqrestore(&qca->ibs.lock, flags);
>  
>  	hu->hdev->hw_error = NULL;
>  	hu->hdev->cmd_timeout = NULL;
> @@ -1959,18 +1979,18 @@ static int __maybe_unused qca_suspend(struct device *dev)
>  	if (!test_bit(QCA_IBS_ENABLED, &qca->flags))
>  		return 0;
>  
> -	cancel_work_sync(&qca->ws_awake_device);
> -	cancel_work_sync(&qca->ws_awake_rx);
> +	cancel_work_sync(&qca->ibs.ws_awake_device);
> +	cancel_work_sync(&qca->ibs.ws_awake_rx);
>  
> -	spin_lock_irqsave_nested(&qca->hci_ibs_lock,
> +	spin_lock_irqsave_nested(&qca->ibs.lock,
>  				 flags, SINGLE_DEPTH_NESTING);
>  
> -	switch (qca->tx_ibs_state) {
> +	switch (qca->ibs.tx_state) {
>  	case HCI_IBS_TX_WAKING:
> -		del_timer(&qca->wake_retrans_timer);
> +		del_timer(&qca->ibs.wake_retrans_timer);
>  		/* Fall through */
>  	case HCI_IBS_TX_AWAKE:
> -		del_timer(&qca->tx_idle_timer);
> +		del_timer(&qca->ibs.tx_idle_timer);
>  
>  		serdev_device_write_flush(hu->serdev);
>  		cmd = HCI_IBS_SLEEP_IND;
> @@ -1981,22 +2001,22 @@ static int __maybe_unused qca_suspend(struct device *dev)
>  			break;
>  		}
>  
> -		qca->tx_ibs_state = HCI_IBS_TX_ASLEEP;
> -		qca->ibs_sent_slps++;
> +		qca->ibs.tx_state = HCI_IBS_TX_ASLEEP;
> +		qca->ibs.stats.sent_slps++;
>  
> -		qca_wq_serial_tx_clock_vote_off(&qca->ws_tx_vote_off);
> +		qca_wq_serial_tx_clock_vote_off(&qca->ibs.ws_tx_vote_off);
>  		break;
>  
>  	case HCI_IBS_TX_ASLEEP:
>  		break;
>  
>  	default:
> -		BT_ERR("Spurious tx state %d", qca->tx_ibs_state);
> +		BT_ERR("Spurious tx state %d", qca->ibs.tx_state);
>  		ret = -EINVAL;
>  		break;
>  	}
>  
> -	spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
> +	spin_unlock_irqrestore(&qca->ibs.lock, flags);
>  
>  	if (ret < 0)
>  		goto error;
> @@ -2009,7 +2029,7 @@ static int __maybe_unused qca_suspend(struct device *dev)
>  	 */
>  
>  	ret = wait_event_interruptible_timeout(qca->suspend_wait_q,
> -			qca->rx_ibs_state == HCI_IBS_RX_ASLEEP,
> +			qca->ibs.rx_state == HCI_IBS_RX_ASLEEP,
>  			msecs_to_jiffies(IBS_BTSOC_TX_IDLE_TIMEOUT_MS));
>  
>  	if (ret > 0)
> -- 
> 2.25.1.481.gfbce0eb801-goog
> 

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2020-03-16 23:05 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-03-16 23:02 [PATCH 0/2] Bluetooth: hci_qca: struct qca_data refactoring Matthias Kaehlcke
2020-03-16 23:02 ` [PATCH 1/2] WIP: Bluetooth: hci_qca: consolidate IBS fields in a struct Matthias Kaehlcke
2020-03-16 23:05   ` Matthias Kaehlcke
2020-03-16 23:02 ` [PATCH 2/2] Bluetooth: hci_qca: consolidate memdump " Matthias Kaehlcke

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).