All of lore.kernel.org
 help / color / mirror / Atom feed
From: Luca Coelho <luca@coelho.fi>
To: kvalo@codeaurora.org
Cc: linux-wireless@vger.kernel.org,
	Sara Sharon <sara.sharon@intel.com>,
	Luca Coelho <luciano.coelho@intel.com>
Subject: [PATCH 08/25] iwlwifi: pcie: assign and access a000 TFD & TBs
Date: Thu, 15 Sep 2016 21:11:14 +0300	[thread overview]
Message-ID: <20160915181131.32213-8-luca@coelho.fi> (raw)
In-Reply-To: <1473962425.5664.41.camel@coelho.fi>

From: Sara Sharon <sara.sharon@intel.com>

Previous patch introduced the new formats. This patch
allocates the new structures and adjusts code accordingly.

Signed-off-by: Sara Sharon <sara.sharon@intel.com>
Signed-off-by: Luca Coelho <luciano.coelho@intel.com>
---
 drivers/net/wireless/intel/iwlwifi/pcie/internal.h |  19 ++-
 drivers/net/wireless/intel/iwlwifi/pcie/trans.c    |  16 ++-
 drivers/net/wireless/intel/iwlwifi/pcie/tx.c       | 141 ++++++++++++++-------
 3 files changed, 120 insertions(+), 56 deletions(-)

diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index b9dc82b..d185692 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -280,7 +280,7 @@ struct iwl_pcie_first_tb_buf {
  */
 struct iwl_txq {
 	struct iwl_queue q;
-	struct iwl_tfd *tfds;
+	void *tfds;
 	struct iwl_pcie_first_tb_buf *first_tb_bufs;
 	dma_addr_t first_tb_dma;
 	struct iwl_pcie_txq_entry *entries;
@@ -393,6 +393,7 @@ struct iwl_trans_pcie {
 	u8 n_no_reclaim_cmds;
 	u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
 	u8 max_tbs;
+	u16 tfd_size;
 
 	enum iwl_amsdu_size rx_buf_size;
 	bool bc_table_dword;
@@ -489,9 +490,21 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
 			    struct sk_buff_head *skbs);
 void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
 
-static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
+static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_trans *trans, void *tfd,
+					  u8 idx)
 {
-	struct iwl_tfd_tb *tb = &tfd->tbs[idx];
+	struct iwl_tfd *tfd_fh;
+	struct iwl_tfd_tb *tb;
+
+	if (trans->cfg->use_tfh) {
+		struct iwl_tfh_tfd *tfd_fh = (void *)tfd;
+		struct iwl_tfh_tb *tb = &tfd_fh->tbs[idx];
+
+		return le16_to_cpu(tb->tb_len);
+	}
+
+	tfd_fh = (void *)tfd;
+	tb = &tfd_fh->tbs[idx];
 
 	return le16_to_cpu(tb->hi_n_len) >> 4;
 }
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 21b1be1..e908bb8 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -2437,15 +2437,14 @@ err:
 }
 #endif /*CONFIG_IWLWIFI_DEBUGFS */
 
-static u32 iwl_trans_pcie_get_cmdlen(struct iwl_trans *trans,
-				     struct iwl_tfd *tfd)
+static u32 iwl_trans_pcie_get_cmdlen(struct iwl_trans *trans, void *tfd)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	u32 cmdlen = 0;
 	int i;
 
 	for (i = 0; i < trans_pcie->max_tbs; i++)
-		cmdlen += iwl_pcie_tfd_tb_get_len(tfd, i);
+		cmdlen += iwl_pcie_tfd_tb_get_len(trans, tfd, i);
 
 	return cmdlen;
 }
@@ -2733,7 +2732,8 @@ static struct iwl_trans_dump_data
 		u8 idx = get_cmd_index(&cmdq->q, ptr);
 		u32 caplen, cmdlen;
 
-		cmdlen = iwl_trans_pcie_get_cmdlen(trans, &cmdq->tfds[ptr]);
+		cmdlen = iwl_trans_pcie_get_cmdlen(trans, cmdq->tfds +
+						   trans_pcie->tfd_size * ptr);
 		caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen);
 
 		if (cmdlen) {
@@ -2876,10 +2876,14 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
 	else
 		addr_size = 36;
 
-	if (cfg->use_tfh)
+	if (cfg->use_tfh) {
 		trans_pcie->max_tbs = IWL_TFH_NUM_TBS;
-	else
+		trans_pcie->tfd_size = sizeof(struct iwl_tfh_tb);
+
+	} else {
 		trans_pcie->max_tbs = IWL_NUM_OF_TBS;
+		trans_pcie->tfd_size = sizeof(struct iwl_tfd);
+	}
 	trans->max_skb_frags = IWL_PCIE_MAX_FRAGS(trans_pcie);
 
 	pci_set_master(pdev);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 57a657a..f893ee1 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -312,11 +312,30 @@ void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
 	}
 }
 
-static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
+static inline void *iwl_pcie_get_tfd(struct iwl_trans_pcie *trans_pcie,
+				     struct iwl_txq *txq, int idx)
 {
-	struct iwl_tfd_tb *tb = &tfd->tbs[idx];
+	return txq->tfds + trans_pcie->tfd_size * idx;
+}
+
+static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_trans *trans,
+						  void *tfd, u8 idx)
+{
+	struct iwl_tfd *tfd_fh;
+	struct iwl_tfd_tb *tb;
+	dma_addr_t addr;
+
+	if (trans->cfg->use_tfh) {
+		struct iwl_tfh_tfd *tfd_fh = (void *)tfd;
+		struct iwl_tfh_tb *tb = &tfd_fh->tbs[idx];
+
+		return (dma_addr_t)(le64_to_cpu(tb->addr));
+	}
+
+	tfd_fh = (void *)tfd;
+	tb = &tfd_fh->tbs[idx];
+	addr = get_unaligned_le32(&tb->lo);
 
-	dma_addr_t addr = get_unaligned_le32(&tb->lo);
 	if (sizeof(dma_addr_t) > sizeof(u32))
 		addr |=
 		((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
@@ -324,35 +343,57 @@ static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
 	return addr;
 }
 
-static inline void iwl_pcie_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
-				       dma_addr_t addr, u16 len)
+static inline void iwl_pcie_tfd_set_tb(struct iwl_trans *trans, void *tfd,
+				       u8 idx, dma_addr_t addr, u16 len)
 {
-	struct iwl_tfd_tb *tb = &tfd->tbs[idx];
-	u16 hi_n_len = len << 4;
+	if (trans->cfg->use_tfh) {
+		struct iwl_tfh_tfd *tfd_fh = (void *)tfd;
+		struct iwl_tfh_tb *tb = &tfd_fh->tbs[idx];
 
-	put_unaligned_le32(addr, &tb->lo);
-	if (sizeof(dma_addr_t) > sizeof(u32))
-		hi_n_len |= ((addr >> 16) >> 16) & 0xF;
+		put_unaligned_le64(addr, &tb->addr);
+		tb->tb_len = cpu_to_le16(len);
+
+		tfd_fh->num_tbs = cpu_to_le16(idx + 1);
+	} else {
+		struct iwl_tfd *tfd_fh = (void *)tfd;
+		struct iwl_tfd_tb *tb = &tfd_fh->tbs[idx];
 
-	tb->hi_n_len = cpu_to_le16(hi_n_len);
+		u16 hi_n_len = len << 4;
 
-	tfd->num_tbs = idx + 1;
+		put_unaligned_le32(addr, &tb->lo);
+		if (sizeof(dma_addr_t) > sizeof(u32))
+			hi_n_len |= ((addr >> 16) >> 16) & 0xF;
+
+		tb->hi_n_len = cpu_to_le16(hi_n_len);
+
+		tfd_fh->num_tbs = idx + 1;
+	}
 }
 
-static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_tfd *tfd)
+static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_trans *trans, void *tfd)
 {
-	return tfd->num_tbs & 0x1f;
+	struct iwl_tfd *tfd_fh;
+
+	if (trans->cfg->use_tfh) {
+		struct iwl_tfh_tfd *tfd_fh = (void *)tfd;
+
+		return le16_to_cpu(tfd_fh->num_tbs) & 0x1f;
+	}
+
+	tfd_fh = (void *)tfd;
+	return tfd_fh->num_tbs & 0x1f;
 }
 
 static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
 			       struct iwl_cmd_meta *meta,
-			       struct iwl_tfd *tfd)
+			       struct iwl_txq *txq, int index)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	int i, num_tbs;
+	void *tfd = iwl_pcie_get_tfd(trans_pcie, txq, index);
 
 	/* Sanity check on number of chunks */
-	num_tbs = iwl_pcie_tfd_get_num_tbs(tfd);
+	num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd);
 
 	if (num_tbs >= trans_pcie->max_tbs) {
 		IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
@@ -365,16 +406,28 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
 	for (i = 1; i < num_tbs; i++) {
 		if (meta->tbs & BIT(i))
 			dma_unmap_page(trans->dev,
-				       iwl_pcie_tfd_tb_get_addr(tfd, i),
-				       iwl_pcie_tfd_tb_get_len(tfd, i),
+				       iwl_pcie_tfd_tb_get_addr(trans, tfd, i),
+				       iwl_pcie_tfd_tb_get_len(trans, tfd, i),
 				       DMA_TO_DEVICE);
 		else
 			dma_unmap_single(trans->dev,
-					 iwl_pcie_tfd_tb_get_addr(tfd, i),
-					 iwl_pcie_tfd_tb_get_len(tfd, i),
+					 iwl_pcie_tfd_tb_get_addr(trans, tfd,
+								  i),
+					 iwl_pcie_tfd_tb_get_len(trans, tfd,
+								 i),
 					 DMA_TO_DEVICE);
 	}
-	tfd->num_tbs = 0;
+
+	if (trans->cfg->use_tfh) {
+		struct iwl_tfh_tfd *tfd_fh = (void *)tfd;
+
+		tfd_fh->num_tbs = 0;
+	} else {
+		struct iwl_tfd *tfd_fh = (void *)tfd;
+
+		tfd_fh->num_tbs = 0;
+	}
+
 }
 
 /*
@@ -388,8 +441,6 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
  */
 static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
 {
-	struct iwl_tfd *tfd_tmp = txq->tfds;
-
 	/* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
 	 * idx is bounded by n_window
 	 */
@@ -401,7 +452,7 @@ static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
 	/* We have only q->n_window txq->entries, but we use
 	 * TFD_QUEUE_SIZE_MAX tfds
 	 */
-	iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr]);
+	iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, txq, rd_ptr);
 
 	/* free SKB */
 	if (txq->entries) {
@@ -425,19 +476,18 @@ static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	struct iwl_queue *q;
-	struct iwl_tfd *tfd, *tfd_tmp;
+	void *tfd;
 	u32 num_tbs;
 
 	q = &txq->q;
-	tfd_tmp = txq->tfds;
-	tfd = &tfd_tmp[q->write_ptr];
+	tfd = txq->tfds + trans_pcie->tfd_size * q->write_ptr;
 
 	if (reset)
-		memset(tfd, 0, sizeof(*tfd));
+		memset(tfd, 0, trans_pcie->tfd_size);
 
-	num_tbs = iwl_pcie_tfd_get_num_tbs(tfd);
+	num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd);
 
-	/* Each TFD can point to a maximum 20 Tx buffers */
+	/* Each TFD can point to a maximum max_tbs Tx buffers */
 	if (num_tbs >= trans_pcie->max_tbs) {
 		IWL_ERR(trans, "Error can not send more than %d chunks\n",
 			trans_pcie->max_tbs);
@@ -448,7 +498,7 @@ static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
 		 "Unaligned address = %llx\n", (unsigned long long)addr))
 		return -EINVAL;
 
-	iwl_pcie_tfd_set_tb(tfd, num_tbs, addr, len);
+	iwl_pcie_tfd_set_tb(trans, tfd, num_tbs, addr, len);
 
 	return num_tbs;
 }
@@ -458,7 +508,7 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
 			       u32 txq_id)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-	size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
+	size_t tfd_sz = trans_pcie->tfd_size * TFD_QUEUE_SIZE_MAX;
 	size_t tb0_buf_sz;
 	int i;
 
@@ -672,7 +722,7 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
 	/* De-alloc circular buffer of TFDs */
 	if (txq->tfds) {
 		dma_free_coherent(dev,
-				  sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX,
+				  trans_pcie->tfd_size * TFD_QUEUE_SIZE_MAX,
 				  txq->tfds, txq->q.dma_addr);
 		txq->q.dma_addr = 0;
 		txq->tfds = NULL;
@@ -1616,8 +1666,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
 					   copy_size - tb0_size,
 					   DMA_TO_DEVICE);
 		if (dma_mapping_error(trans->dev, phys_addr)) {
-			iwl_pcie_tfd_unmap(trans, out_meta,
-					   &txq->tfds[q->write_ptr]);
+			iwl_pcie_tfd_unmap(trans, out_meta, txq, q->write_ptr);
 			idx = -ENOMEM;
 			goto out;
 		}
@@ -1640,8 +1689,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
 		phys_addr = dma_map_single(trans->dev, (void *)data,
 					   cmdlen[i], DMA_TO_DEVICE);
 		if (dma_mapping_error(trans->dev, phys_addr)) {
-			iwl_pcie_tfd_unmap(trans, out_meta,
-					   &txq->tfds[q->write_ptr]);
+			iwl_pcie_tfd_unmap(trans, out_meta, txq, q->write_ptr);
 			idx = -ENOMEM;
 			goto out;
 		}
@@ -1721,7 +1769,7 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
 	meta = &txq->entries[cmd_index].meta;
 	cmd_id = iwl_cmd_id(cmd->hdr.cmd, group_id, 0);
 
-	iwl_pcie_tfd_unmap(trans, meta, &txq->tfds[index]);
+	iwl_pcie_tfd_unmap(trans, meta, txq, index);
 
 	/* Input error checking is done when commands are added to queue. */
 	if (meta->flags & CMD_WANT_SKB) {
@@ -1919,6 +1967,7 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
 			     struct iwl_cmd_meta *out_meta,
 			     struct iwl_device_cmd *dev_cmd, u16 tb1_len)
 {
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	struct iwl_queue *q = &txq->q;
 	u16 tb2_len;
 	int i;
@@ -1934,8 +1983,7 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
 						     skb->data + hdr_len,
 						     tb2_len, DMA_TO_DEVICE);
 		if (unlikely(dma_mapping_error(trans->dev, tb2_phys))) {
-			iwl_pcie_tfd_unmap(trans, out_meta,
-					   &txq->tfds[q->write_ptr]);
+			iwl_pcie_tfd_unmap(trans, out_meta, txq, q->write_ptr);
 			return -EINVAL;
 		}
 		iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, false);
@@ -1954,8 +2002,7 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
 					   skb_frag_size(frag), DMA_TO_DEVICE);
 
 		if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
-			iwl_pcie_tfd_unmap(trans, out_meta,
-					   &txq->tfds[q->write_ptr]);
+			iwl_pcie_tfd_unmap(trans, out_meta, txq, q->write_ptr);
 			return -EINVAL;
 		}
 		tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
@@ -1965,8 +2012,8 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
 	}
 
 	trace_iwlwifi_dev_tx(trans->dev, skb,
-			     &txq->tfds[txq->q.write_ptr],
-			     sizeof(struct iwl_tfd),
+			     iwl_pcie_get_tfd(trans_pcie, txq, q->write_ptr),
+			     trans_pcie->tfd_size,
 			     &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
 			     skb->data + hdr_len, tb2_len);
 	trace_iwlwifi_dev_tx_data(trans->dev, skb,
@@ -2041,8 +2088,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
 		IEEE80211_CCMP_HDR_LEN : 0;
 
 	trace_iwlwifi_dev_tx(trans->dev, skb,
-			     &txq->tfds[txq->q.write_ptr],
-			     sizeof(struct iwl_tfd),
+			     iwl_pcie_get_tfd(trans_pcie, txq, q->write_ptr),
+			     trans_pcie->tfd_size,
 			     &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
 			     NULL, 0);
 
@@ -2198,7 +2245,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
 	return 0;
 
 out_unmap:
-	iwl_pcie_tfd_unmap(trans, out_meta, &txq->tfds[q->write_ptr]);
+	iwl_pcie_tfd_unmap(trans, out_meta, txq, q->write_ptr);
 	return ret;
 }
 #else /* CONFIG_INET */
-- 
2.9.3

  parent reply	other threads:[~2016-09-15 18:11 UTC|newest]

Thread overview: 30+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-09-15 18:00 pull-request: iwlwifi-next 2016-09-15 Luca Coelho
2016-09-15 18:11 ` [PATCH 01/25] iwlwifi: mvm: call a different txq_enable function Luca Coelho
2016-09-15 18:11 ` [PATCH 02/25] iwlwifi: mvm: don't free queue after delba in dqa Luca Coelho
2016-09-15 18:11 ` [PATCH 03/25] iwlwifi: fix semicolon.cocci warnings Luca Coelho
2016-09-15 18:11 ` [PATCH 04/25] iwlwifi: pcie: introduce new tfd and tb formats Luca Coelho
2016-09-15 18:11 ` [PATCH 05/25] iwlwifi: mvm: remove dump of locked registers Luca Coelho
2016-09-15 18:11 ` [PATCH 06/25] iwlwifi: mvm: support new shared memory config API Luca Coelho
2016-09-15 18:11 ` [PATCH 07/25] iwlwifi: introduce trans API to get byte count table Luca Coelho
2016-09-15 18:11 ` Luca Coelho [this message]
2016-09-15 18:11 ` [PATCH 09/25] iwlwifi: change byte count table for a000 devices Luca Coelho
2016-09-15 18:11 ` [PATCH 10/25] iwlwifi: pcie: merge iwl_queue and iwl_txq Luca Coelho
2016-09-15 18:11 ` [PATCH 11/25] iwlwifi: mvm: support new BA notification response Luca Coelho
2016-09-15 18:11 ` [PATCH 12/25] iwlwifi: pcie: Configure shared interrupt vector in MSIX mode Luca Coelho
2016-09-15 18:11 ` [PATCH 13/25] iwlwifi: mvm: fix pending frames tracking on tx resp Luca Coelho
2016-09-15 18:11 ` [PATCH 14/25] iwlwifi: mvm: free reserved queue on STA removal Luca Coelho
2016-09-15 18:11 ` [PATCH 15/25] iwlwifi: pcie: Set affinity mask for rx interrupt vectors per cpu Luca Coelho
2016-09-15 18:11 ` [PATCH 16/25] iwlwifi: add the new 9560 series Luca Coelho
2016-09-15 18:11 ` [PATCH 17/25] iwlwifi: add the new 8275 series Luca Coelho
2016-09-15 18:11 ` [PATCH 18/25] iwlwifi: mvm: use setup_timer instead of init_timer and data fields Luca Coelho
2016-09-15 18:11 ` [PATCH 19/25] iwlwifi: pcie: change indentation of iwl_pcie_set_interrupt_capa() Luca Coelho
2016-09-15 18:11 ` [PATCH 20/25] iwlwifi: mvm: bump max API to 26 Luca Coelho
2016-09-15 18:11 ` [PATCH 21/25] iwlwifi: pcie: replace possible_cpus() with online_cpus() in MSIX mode Luca Coelho
2016-09-15 18:11 ` [PATCH 22/25] iwlwifi: unify iwl_get_ucode_image() implementations Luca Coelho
2016-09-15 18:11 ` [PATCH 23/25] iwlwifi: mvm: make RSS RX more robust Luca Coelho
2016-09-15 18:11 ` [PATCH 24/25] iwlwifi: mvm: remove pointless _bh from spinlock in timer Luca Coelho
2016-09-15 18:11 ` [PATCH 25/25] iwlwifi: mvm: tighten BAID range check Luca Coelho
2016-09-16  4:36 ` pull-request: iwlwifi-next 2016-09-15 Luca Coelho
2016-09-16  7:53   ` pull-request: iwlwifi-next 2016-09-15-2 Luca Coelho
2016-09-16  7:55     ` [PATCH v2 07/25] iwlwifi: introduce trans API to get byte count table Luca Coelho
2016-09-17 15:12     ` pull-request: iwlwifi-next 2016-09-15-2 Kalle Valo

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20160915181131.32213-8-luca@coelho.fi \
    --to=luca@coelho.fi \
    --cc=kvalo@codeaurora.org \
    --cc=linux-wireless@vger.kernel.org \
    --cc=luciano.coelho@intel.com \
    --cc=sara.sharon@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.