All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH for v4.9 LTS 073/111] nvmet-rdma: Fix missing dma sync to nvme data structures
@ 2017-06-04  8:15 Levin, Alexander (Sasha Levin)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 074/111] r8152: avoid start_xmit to call napi_schedule during autosuspend Levin, Alexander (Sasha Levin)
                   ` (37 more replies)
  0 siblings, 38 replies; 39+ messages in thread
From: Levin, Alexander (Sasha Levin) @ 2017-06-04  8:15 UTC (permalink / raw)
  To: stable; +Cc: Parav Pandit, Levin, Alexander (Sasha Levin)

From: Parav Pandit <parav@mellanox.com>

[ Upstream commit 748ff8408f8e208f279ba221e5c12612fbb4dddb ]

This patch performs dma sync operations on nvme_command
and nvme_completion.

nvme_command is synced
(a) on receiving of the recv queue completion for cpu access.
(b) before posting recv wqe back to rdma adapter for device access.

nvme_completion is synced
(a) on receiving of the recv queue completion of associated
nvme_command for cpu access.
(b) before posting send wqe to rdma adapter for device access.

This patch is generated for git://git.infradead.org/nvme-fabrics.git
Branch: nvmf-4.10

Signed-off-by: Parav Pandit <parav@mellanox.com>
Reviewed-by: Max Gurtovoy <maxg@mellanox.com>
Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
---
 drivers/nvme/target/rdma.c | 17 +++++++++++++++++
 1 file changed, 17 insertions(+)

diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 005ef5d17a19..ca8ddc3fb19e 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -438,6 +438,10 @@ static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev,
 {
 	struct ib_recv_wr *bad_wr;
 
+	ib_dma_sync_single_for_device(ndev->device,
+		cmd->sge[0].addr, cmd->sge[0].length,
+		DMA_FROM_DEVICE);
+
 	if (ndev->srq)
 		return ib_post_srq_recv(ndev->srq, &cmd->wr, &bad_wr);
 	return ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, &bad_wr);
@@ -538,6 +542,11 @@ static void nvmet_rdma_queue_response(struct nvmet_req *req)
 		first_wr = &rsp->send_wr;
 
 	nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd);
+
+	ib_dma_sync_single_for_device(rsp->queue->dev->device,
+		rsp->send_sge.addr, rsp->send_sge.length,
+		DMA_TO_DEVICE);
+
 	if (ib_post_send(cm_id->qp, first_wr, &bad_wr)) {
 		pr_err("sending cmd response failed\n");
 		nvmet_rdma_release_rsp(rsp);
@@ -698,6 +707,14 @@ static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue,
 	cmd->n_rdma = 0;
 	cmd->req.port = queue->port;
 
+
+	ib_dma_sync_single_for_cpu(queue->dev->device,
+		cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length,
+		DMA_FROM_DEVICE);
+	ib_dma_sync_single_for_cpu(queue->dev->device,
+		cmd->send_sge.addr, cmd->send_sge.length,
+		DMA_TO_DEVICE);
+
 	if (!nvmet_req_init(&cmd->req, &queue->nvme_cq,
 			&queue->nvme_sq, &nvmet_rdma_ops))
 		return;
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 39+ messages in thread

* [PATCH for v4.9 LTS 074/111] r8152: avoid start_xmit to call napi_schedule during autosuspend
  2017-06-04  8:15 [PATCH for v4.9 LTS 073/111] nvmet-rdma: Fix missing dma sync to nvme data structures Levin, Alexander (Sasha Levin)
@ 2017-06-04  8:15 ` Levin, Alexander (Sasha Levin)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 075/111] r8152: check rx after napi is enabled Levin, Alexander (Sasha Levin)
                   ` (36 subsequent siblings)
  37 siblings, 0 replies; 39+ messages in thread
From: Levin, Alexander (Sasha Levin) @ 2017-06-04  8:15 UTC (permalink / raw)
  To: stable; +Cc: hayeswang, David S . Miller, Levin, Alexander (Sasha Levin)

From: hayeswang <hayeswang@realtek.com>

[ Upstream commit 26afec39306926654e9cd320f19bbf3685bb0997 ]

Adjust the setting of the flag of SELECTIVE_SUSPEND to prevent start_xmit()
from calling napi_schedule() directly during runtime suspend.

After calling napi_disable() or clearing the flag of WORK_ENABLE,
scheduling the napi is useless.

Signed-off-by: Hayes Wang <hayeswang@realtek.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
---
 drivers/net/usb/r8152.c | 12 +++++++++---
 1 file changed, 9 insertions(+), 3 deletions(-)

diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 90b426c5ffce..92c53d64fdc2 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -3583,10 +3583,15 @@ static int rtl8152_rumtime_suspend(struct r8152 *tp)
 	struct net_device *netdev = tp->netdev;
 	int ret = 0;
 
+	set_bit(SELECTIVE_SUSPEND, &tp->flags);
+	smp_mb__after_atomic();
+
 	if (netif_running(netdev) && test_bit(WORK_ENABLE, &tp->flags)) {
 		u32 rcr = 0;
 
 		if (delay_autosuspend(tp)) {
+			clear_bit(SELECTIVE_SUSPEND, &tp->flags);
+			smp_mb__after_atomic();
 			ret = -EBUSY;
 			goto out1;
 		}
@@ -3603,6 +3608,8 @@ static int rtl8152_rumtime_suspend(struct r8152 *tp)
 			if (!(ocp_data & RXFIFO_EMPTY)) {
 				rxdy_gated_en(tp, false);
 				ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, rcr);
+				clear_bit(SELECTIVE_SUSPEND, &tp->flags);
+				smp_mb__after_atomic();
 				ret = -EBUSY;
 				goto out1;
 			}
@@ -3622,8 +3629,6 @@ static int rtl8152_rumtime_suspend(struct r8152 *tp)
 		}
 	}
 
-	set_bit(SELECTIVE_SUSPEND, &tp->flags);
-
 out1:
 	return ret;
 }
@@ -3679,12 +3684,13 @@ static int rtl8152_resume(struct usb_interface *intf)
 	if (netif_running(tp->netdev) && tp->netdev->flags & IFF_UP) {
 		if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
 			tp->rtl_ops.autosuspend_en(tp, false);
-			clear_bit(SELECTIVE_SUSPEND, &tp->flags);
 			napi_disable(&tp->napi);
 			set_bit(WORK_ENABLE, &tp->flags);
 			if (netif_carrier_ok(tp->netdev))
 				rtl_start_rx(tp);
 			napi_enable(&tp->napi);
+			clear_bit(SELECTIVE_SUSPEND, &tp->flags);
+			smp_mb__after_atomic();
 		} else {
 			tp->rtl_ops.up(tp);
 			netif_carrier_off(tp->netdev);
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 39+ messages in thread

* [PATCH for v4.9 LTS 075/111] r8152: check rx after napi is enabled
  2017-06-04  8:15 [PATCH for v4.9 LTS 073/111] nvmet-rdma: Fix missing dma sync to nvme data structures Levin, Alexander (Sasha Levin)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 074/111] r8152: avoid start_xmit to call napi_schedule during autosuspend Levin, Alexander (Sasha Levin)
@ 2017-06-04  8:15 ` Levin, Alexander (Sasha Levin)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 077/111] r8152: fix rtl8152_post_reset function Levin, Alexander (Sasha Levin)
                   ` (35 subsequent siblings)
  37 siblings, 0 replies; 39+ messages in thread
From: Levin, Alexander (Sasha Levin) @ 2017-06-04  8:15 UTC (permalink / raw)
  To: stable; +Cc: hayeswang, David S . Miller, Levin, Alexander (Sasha Levin)

From: hayeswang <hayeswang@realtek.com>

[ Upstream commit 7489bdadb7d17d3c81e39b85688500f700beb790 ]

Schedule the napi after napi_enable() for rx, if it is necessary.

If the rx is completed when napi is disabled, the sheduling of napi
would be lost. Then, no one handles the rx packet until next napi
is scheduled.

Signed-off-by: Hayes Wang <hayeswang@realtek.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
---
 drivers/net/usb/r8152.c | 7 ++++++-
 1 file changed, 6 insertions(+), 1 deletion(-)

diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 92c53d64fdc2..32f1a4c46e71 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -32,7 +32,7 @@
 #define NETNEXT_VERSION		"08"
 
 /* Information for net */
-#define NET_VERSION		"7"
+#define NET_VERSION		"8"
 
 #define DRIVER_VERSION		"v1." NETNEXT_VERSION "." NET_VERSION
 #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
@@ -3552,6 +3552,9 @@ static int rtl8152_post_reset(struct usb_interface *intf)
 
 	napi_enable(&tp->napi);
 
+	if (!list_empty(&tp->rx_done))
+		napi_schedule(&tp->napi);
+
 	return 0;
 }
 
@@ -3691,6 +3694,8 @@ static int rtl8152_resume(struct usb_interface *intf)
 			napi_enable(&tp->napi);
 			clear_bit(SELECTIVE_SUSPEND, &tp->flags);
 			smp_mb__after_atomic();
+			if (!list_empty(&tp->rx_done))
+				napi_schedule(&tp->napi);
 		} else {
 			tp->rtl_ops.up(tp);
 			netif_carrier_off(tp->netdev);
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 39+ messages in thread

* [PATCH for v4.9 LTS 076/111] r8152: re-schedule napi for tx
  2017-06-04  8:15 [PATCH for v4.9 LTS 073/111] nvmet-rdma: Fix missing dma sync to nvme data structures Levin, Alexander (Sasha Levin)
                   ` (3 preceding siblings ...)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 078/111] r8152: avoid start_xmit to schedule napi when napi is disabled Levin, Alexander (Sasha Levin)
@ 2017-06-04  8:15 ` Levin, Alexander (Sasha Levin)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 079/111] net-next: ethernet: mediatek: change the compatible string Levin, Alexander (Sasha Levin)
                   ` (32 subsequent siblings)
  37 siblings, 0 replies; 39+ messages in thread
From: Levin, Alexander (Sasha Levin) @ 2017-06-04  8:15 UTC (permalink / raw)
  To: stable; +Cc: hayeswang, David S . Miller, Levin, Alexander (Sasha Levin)

From: hayeswang <hayeswang@realtek.com>

[ Upstream commit 248b213ad908b88db15941202ef7cb7eb137c1a0 ]

Re-schedule napi after napi_complete() for tx, if it is necessay.

In r8152_poll(), if the tx is completed after tx_bottom() and before
napi_complete(), the scheduling of napi would be lost. Then, no
one handles the next tx until the next napi_schedule() is called.

Signed-off-by: Hayes Wang <hayeswang@realtek.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
---
 drivers/net/usb/r8152.c | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 32f1a4c46e71..8b8343b3fc39 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -1936,6 +1936,9 @@ static int r8152_poll(struct napi_struct *napi, int budget)
 		napi_complete(napi);
 		if (!list_empty(&tp->rx_done))
 			napi_schedule(napi);
+		else if (!skb_queue_empty(&tp->tx_queue) &&
+			 !list_empty(&tp->tx_free))
+			napi_schedule(napi);
 	}
 
 	return work_done;
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 39+ messages in thread

* [PATCH for v4.9 LTS 077/111] r8152: fix rtl8152_post_reset function
  2017-06-04  8:15 [PATCH for v4.9 LTS 073/111] nvmet-rdma: Fix missing dma sync to nvme data structures Levin, Alexander (Sasha Levin)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 074/111] r8152: avoid start_xmit to call napi_schedule during autosuspend Levin, Alexander (Sasha Levin)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 075/111] r8152: check rx after napi is enabled Levin, Alexander (Sasha Levin)
@ 2017-06-04  8:15 ` Levin, Alexander (Sasha Levin)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 078/111] r8152: avoid start_xmit to schedule napi when napi is disabled Levin, Alexander (Sasha Levin)
                   ` (34 subsequent siblings)
  37 siblings, 0 replies; 39+ messages in thread
From: Levin, Alexander (Sasha Levin) @ 2017-06-04  8:15 UTC (permalink / raw)
  To: stable; +Cc: hayeswang, David S . Miller, Levin, Alexander (Sasha Levin)

From: hayeswang <hayeswang@realtek.com>

[ Upstream commit 2c561b2b728ca4013e76d6439bde2c137503745e ]

The rtl8152_post_reset() should sumbit rx urb and interrupt transfer,
otherwise the rx wouldn't work and the linking change couldn't be
detected.

Signed-off-by: Hayes Wang <hayeswang@realtek.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
---
 drivers/net/usb/r8152.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 8b8343b3fc39..039607dcfa8d 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -3548,12 +3548,14 @@ static int rtl8152_post_reset(struct usb_interface *intf)
 	if (netif_carrier_ok(netdev)) {
 		mutex_lock(&tp->control);
 		tp->rtl_ops.enable(tp);
+		rtl_start_rx(tp);
 		rtl8152_set_rx_mode(netdev);
 		mutex_unlock(&tp->control);
 		netif_wake_queue(netdev);
 	}
 
 	napi_enable(&tp->napi);
+	usb_submit_urb(tp->intr_urb, GFP_KERNEL);
 
 	if (!list_empty(&tp->rx_done))
 		napi_schedule(&tp->napi);
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 39+ messages in thread

* [PATCH for v4.9 LTS 078/111] r8152: avoid start_xmit to schedule napi when napi is disabled
  2017-06-04  8:15 [PATCH for v4.9 LTS 073/111] nvmet-rdma: Fix missing dma sync to nvme data structures Levin, Alexander (Sasha Levin)
                   ` (2 preceding siblings ...)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 077/111] r8152: fix rtl8152_post_reset function Levin, Alexander (Sasha Levin)
@ 2017-06-04  8:15 ` Levin, Alexander (Sasha Levin)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 076/111] r8152: re-schedule napi for tx Levin, Alexander (Sasha Levin)
                   ` (33 subsequent siblings)
  37 siblings, 0 replies; 39+ messages in thread
From: Levin, Alexander (Sasha Levin) @ 2017-06-04  8:15 UTC (permalink / raw)
  To: stable; +Cc: hayeswang, David S . Miller, Levin, Alexander (Sasha Levin)

From: hayeswang <hayeswang@realtek.com>

[ Upstream commit de9bf29dd6e4a8a874cb92f8901aed50a9d0b1d3 ]

Stop the tx when the napi is disabled to prevent napi_schedule() is
called.

Signed-off-by: Hayes Wang <hayeswang@realtek.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
---
 drivers/net/usb/r8152.c | 8 ++++++--
 1 file changed, 6 insertions(+), 2 deletions(-)

diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 039607dcfa8d..afb953a258cd 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -3158,10 +3158,13 @@ static void set_carrier(struct r8152 *tp)
 		if (!netif_carrier_ok(netdev)) {
 			tp->rtl_ops.enable(tp);
 			set_bit(RTL8152_SET_RX_MODE, &tp->flags);
+			netif_stop_queue(netdev);
 			napi_disable(&tp->napi);
 			netif_carrier_on(netdev);
 			rtl_start_rx(tp);
 			napi_enable(&tp->napi);
+			netif_wake_queue(netdev);
+			netif_info(tp, link, netdev, "carrier on\n");
 		}
 	} else {
 		if (netif_carrier_ok(netdev)) {
@@ -3169,6 +3172,7 @@ static void set_carrier(struct r8152 *tp)
 			napi_disable(&tp->napi);
 			tp->rtl_ops.disable(tp);
 			napi_enable(&tp->napi);
+			netif_info(tp, link, netdev, "carrier off\n");
 		}
 	}
 }
@@ -3518,12 +3522,12 @@ static int rtl8152_pre_reset(struct usb_interface *intf)
 	if (!netif_running(netdev))
 		return 0;
 
+	netif_stop_queue(netdev);
 	napi_disable(&tp->napi);
 	clear_bit(WORK_ENABLE, &tp->flags);
 	usb_kill_urb(tp->intr_urb);
 	cancel_delayed_work_sync(&tp->schedule);
 	if (netif_carrier_ok(netdev)) {
-		netif_stop_queue(netdev);
 		mutex_lock(&tp->control);
 		tp->rtl_ops.disable(tp);
 		mutex_unlock(&tp->control);
@@ -3551,10 +3555,10 @@ static int rtl8152_post_reset(struct usb_interface *intf)
 		rtl_start_rx(tp);
 		rtl8152_set_rx_mode(netdev);
 		mutex_unlock(&tp->control);
-		netif_wake_queue(netdev);
 	}
 
 	napi_enable(&tp->napi);
+	netif_wake_queue(netdev);
 	usb_submit_urb(tp->intr_urb, GFP_KERNEL);
 
 	if (!list_empty(&tp->rx_done))
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 39+ messages in thread

* [PATCH for v4.9 LTS 079/111] net-next: ethernet: mediatek: change the compatible string
  2017-06-04  8:15 [PATCH for v4.9 LTS 073/111] nvmet-rdma: Fix missing dma sync to nvme data structures Levin, Alexander (Sasha Levin)
                   ` (4 preceding siblings ...)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 076/111] r8152: re-schedule napi for tx Levin, Alexander (Sasha Levin)
@ 2017-06-04  8:15 ` Levin, Alexander (Sasha Levin)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 081/111] bnxt_en: Enhance autoneg support Levin, Alexander (Sasha Levin)
                   ` (31 subsequent siblings)
  37 siblings, 0 replies; 39+ messages in thread
From: Levin, Alexander (Sasha Levin) @ 2017-06-04  8:15 UTC (permalink / raw)
  To: stable; +Cc: John Crispin, David S . Miller, Levin, Alexander (Sasha Levin)

From: John Crispin <john@phrozen.org>

[ Upstream commit 8b901f6bbcf12a20e43105d161bedde093431e61 ]

When the binding was defined, I was not aware that mt2701 was an earlier
version of the SoC. For sake of consistency, the ethernet driver should
use mt2701 inside the compat string as this is the earliest SoC with the
ethernet core.

The ethernet driver is currently of no real use until we finish and
upstream the DSA driver. There are no users of this binding yet. It should
be safe to fix this now before it is too late and we need to provide
backward compatibility for the mt7623-eth compat string.

Reported-by: Sean Wang <sean.wang@mediatek.com>
Signed-off-by: John Crispin <john@phrozen.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
---
 drivers/net/ethernet/mediatek/mtk_eth_soc.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 86a89cbd3ec9..4832223f1500 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -2518,7 +2518,7 @@ static int mtk_remove(struct platform_device *pdev)
 }
 
 const struct of_device_id of_mtk_match[] = {
-	{ .compatible = "mediatek,mt7623-eth" },
+	{ .compatible = "mediatek,mt2701-eth" },
 	{},
 };
 MODULE_DEVICE_TABLE(of, of_mtk_match);
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 39+ messages in thread

* [PATCH for v4.9 LTS 080/111] bnxt_en: Fix bnxt_reset() in the slow path task.
  2017-06-04  8:15 [PATCH for v4.9 LTS 073/111] nvmet-rdma: Fix missing dma sync to nvme data structures Levin, Alexander (Sasha Levin)
                   ` (6 preceding siblings ...)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 081/111] bnxt_en: Enhance autoneg support Levin, Alexander (Sasha Levin)
@ 2017-06-04  8:15 ` Levin, Alexander (Sasha Levin)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 082/111] bnxt_en: Fix RTNL lock usage on bnxt_update_link() Levin, Alexander (Sasha Levin)
                   ` (29 subsequent siblings)
  37 siblings, 0 replies; 39+ messages in thread
From: Levin, Alexander (Sasha Levin) @ 2017-06-04  8:15 UTC (permalink / raw)
  To: stable; +Cc: Michael Chan, David S . Miller, Levin, Alexander (Sasha Levin)

From: Michael Chan <michael.chan@broadcom.com>

[ Upstream commit a551ee94ea723b4af9b827c7460f108bc13425ee ]

In bnxt_sp_task(), we set a bit BNXT_STATE_IN_SP_TASK so that bnxt_close()
will synchronize and wait for bnxt_sp_task() to finish.  Some functions
in bnxt_sp_task() require us to clear BNXT_STATE_IN_SP_TASK and then
acquire rtnl_lock() to prevent race conditions.

There are some bugs related to this logic. This patch refactors the code
to have common bnxt_rtnl_lock_sp() and bnxt_rtnl_unlock_sp() to handle
the RTNL and the clearing/setting of the bit.  Multiple functions will
need the same logic.  We also need to move bnxt_reset() to the end of
bnxt_sp_task().  Functions that clear BNXT_STATE_IN_SP_TASK must be the
last functions to be called in bnxt_sp_task().  The common scheme will
handle the condition properly.

Signed-off-by: Michael Chan <michael.chan@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
---
 drivers/net/ethernet/broadcom/bnxt/bnxt.c | 38 ++++++++++++++++++++-----------
 1 file changed, 25 insertions(+), 13 deletions(-)

diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 48ee4110ef6e..b37108e077c3 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -6080,23 +6080,32 @@ static void bnxt_timer(unsigned long data)
 	mod_timer(&bp->timer, jiffies + bp->current_interval);
 }
 
-/* Only called from bnxt_sp_task() */
-static void bnxt_reset(struct bnxt *bp, bool silent)
+static void bnxt_rtnl_lock_sp(struct bnxt *bp)
 {
-	/* bnxt_reset_task() calls bnxt_close_nic() which waits
-	 * for BNXT_STATE_IN_SP_TASK to clear.
-	 * If there is a parallel dev_close(), bnxt_close() may be holding
+	/* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
+	 * set.  If the device is being closed, bnxt_close() may be holding
 	 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear.  So we
 	 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
 	 */
 	clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
 	rtnl_lock();
-	if (test_bit(BNXT_STATE_OPEN, &bp->state))
-		bnxt_reset_task(bp, silent);
+}
+
+static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
+{
 	set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
 	rtnl_unlock();
 }
 
+/* Only called from bnxt_sp_task() */
+static void bnxt_reset(struct bnxt *bp, bool silent)
+{
+	bnxt_rtnl_lock_sp(bp);
+	if (test_bit(BNXT_STATE_OPEN, &bp->state))
+		bnxt_reset_task(bp, silent);
+	bnxt_rtnl_unlock_sp(bp);
+}
+
 static void bnxt_cfg_ntp_filters(struct bnxt *);
 
 static void bnxt_sp_task(struct work_struct *work)
@@ -6142,18 +6151,21 @@ static void bnxt_sp_task(struct work_struct *work)
 		bnxt_hwrm_tunnel_dst_port_free(
 			bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
 	}
-	if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
-		bnxt_reset(bp, false);
-
-	if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
-		bnxt_reset(bp, true);
-
 	if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event))
 		bnxt_get_port_module_status(bp);
 
 	if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event))
 		bnxt_hwrm_port_qstats(bp);
 
+	/* These functions below will clear BNXT_STATE_IN_SP_TASK.  They
+	 * must be the last functions to be called before exiting.
+	 */
+	if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
+		bnxt_reset(bp, false);
+
+	if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
+		bnxt_reset(bp, true);
+
 	smp_mb__before_atomic();
 	clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
 }
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 39+ messages in thread

* [PATCH for v4.9 LTS 081/111] bnxt_en: Enhance autoneg support.
  2017-06-04  8:15 [PATCH for v4.9 LTS 073/111] nvmet-rdma: Fix missing dma sync to nvme data structures Levin, Alexander (Sasha Levin)
                   ` (5 preceding siblings ...)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 079/111] net-next: ethernet: mediatek: change the compatible string Levin, Alexander (Sasha Levin)
@ 2017-06-04  8:15 ` Levin, Alexander (Sasha Levin)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 080/111] bnxt_en: Fix bnxt_reset() in the slow path task Levin, Alexander (Sasha Levin)
                   ` (30 subsequent siblings)
  37 siblings, 0 replies; 39+ messages in thread
From: Levin, Alexander (Sasha Levin) @ 2017-06-04  8:15 UTC (permalink / raw)
  To: stable; +Cc: Michael Chan, David S . Miller, Levin, Alexander (Sasha Levin)

From: Michael Chan <michael.chan@broadcom.com>

[ Upstream commit 286ef9d64ea7435a1e323d12b44a309e15cbff0e ]

On some dual port NICs, the speed setting on one port can affect the
available speed on the other port.  Add logic to detect these changes
and adjust the advertised speed settings when necessary.

Signed-off-by: Michael Chan <michael.chan@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
---
 drivers/net/ethernet/broadcom/bnxt/bnxt.c | 23 +++++++++++++++++++++++
 drivers/net/ethernet/broadcom/bnxt/bnxt.h |  1 +
 2 files changed, 24 insertions(+)

diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index b37108e077c3..b30d447f8833 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1499,6 +1499,7 @@ static int bnxt_async_event_process(struct bnxt *bp,
 			netdev_warn(bp->dev, "Link speed %d no longer supported\n",
 				    speed);
 		}
+		set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
 		/* fall thru */
 	}
 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
@@ -5110,6 +5111,7 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
 	struct hwrm_port_phy_qcfg_input req = {0};
 	struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
 	u8 link_up = link_info->link_up;
+	u16 diff;
 
 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
 
@@ -5197,6 +5199,23 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
 		link_info->link_up = 0;
 	}
 	mutex_unlock(&bp->hwrm_cmd_lock);
+
+	diff = link_info->support_auto_speeds ^ link_info->advertising;
+	if ((link_info->support_auto_speeds | diff) !=
+	    link_info->support_auto_speeds) {
+		/* An advertised speed is no longer supported, so we need to
+		 * update the advertisement settings.  See bnxt_reset() for
+		 * comments about the rtnl_lock() sequence below.
+		 */
+		clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
+		rtnl_lock();
+		link_info->advertising = link_info->support_auto_speeds;
+		if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
+		    (link_info->autoneg & BNXT_AUTONEG_SPEED))
+			bnxt_hwrm_set_link_setting(bp, true, false);
+		set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
+		rtnl_unlock();
+	}
 	return 0;
 }
 
@@ -6126,6 +6145,10 @@ static void bnxt_sp_task(struct work_struct *work)
 	if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
 		bnxt_cfg_ntp_filters(bp);
 	if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
+		if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
+				       &bp->sp_event))
+			bnxt_hwrm_phy_qcaps(bp);
+
 		rc = bnxt_update_link(bp, true);
 		if (rc)
 			netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 51b164a0e844..666bc0608ed7 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1089,6 +1089,7 @@ struct bnxt {
 #define BNXT_RESET_TASK_SILENT_SP_EVENT	11
 #define BNXT_GENEVE_ADD_PORT_SP_EVENT	12
 #define BNXT_GENEVE_DEL_PORT_SP_EVENT	13
+#define BNXT_LINK_SPEED_CHNG_SP_EVENT	14
 
 	struct bnxt_pf_info	pf;
 #ifdef CONFIG_BNXT_SRIOV
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 39+ messages in thread

* [PATCH for v4.9 LTS 082/111] bnxt_en: Fix RTNL lock usage on bnxt_update_link().
  2017-06-04  8:15 [PATCH for v4.9 LTS 073/111] nvmet-rdma: Fix missing dma sync to nvme data structures Levin, Alexander (Sasha Levin)
                   ` (7 preceding siblings ...)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 080/111] bnxt_en: Fix bnxt_reset() in the slow path task Levin, Alexander (Sasha Levin)
@ 2017-06-04  8:15 ` Levin, Alexander (Sasha Levin)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 083/111] bnxt_en: Fix RTNL lock usage on bnxt_get_port_module_status() Levin, Alexander (Sasha Levin)
                   ` (28 subsequent siblings)
  37 siblings, 0 replies; 39+ messages in thread
From: Levin, Alexander (Sasha Levin) @ 2017-06-04  8:15 UTC (permalink / raw)
  To: stable; +Cc: Michael Chan, David S . Miller, Levin, Alexander (Sasha Levin)

From: Michael Chan <michael.chan@broadcom.com>

[ Upstream commit 0eaa24b971ae251ae9d3be23f77662a655532063 ]

bnxt_update_link() is called from multiple code paths.  Most callers,
such as open, ethtool, already hold RTNL.  Only the caller bnxt_sp_task()
does not.  So it is a bug to take RTNL inside bnxt_update_link().

Fix it by removing the RTNL inside bnxt_update_link().  The function
now expects the caller to always hold RTNL.

In bnxt_sp_task(), call bnxt_rtnl_lock_sp() before calling
bnxt_update_link().  We also need to move the call to the end of
bnxt_sp_task() since it will be clearing the BNXT_STATE_IN_SP_TASK bit.

Signed-off-by: Michael Chan <michael.chan@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
---
 drivers/net/ethernet/broadcom/bnxt/bnxt.c | 37 +++++++++++++++----------------
 1 file changed, 18 insertions(+), 19 deletions(-)

diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index b30d447f8833..9f42850a10cf 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -5204,17 +5204,12 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
 	if ((link_info->support_auto_speeds | diff) !=
 	    link_info->support_auto_speeds) {
 		/* An advertised speed is no longer supported, so we need to
-		 * update the advertisement settings.  See bnxt_reset() for
-		 * comments about the rtnl_lock() sequence below.
+		 * update the advertisement settings.  Caller holds RTNL
+		 * so we can modify link settings.
 		 */
-		clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
-		rtnl_lock();
 		link_info->advertising = link_info->support_auto_speeds;
-		if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
-		    (link_info->autoneg & BNXT_AUTONEG_SPEED))
+		if (link_info->autoneg & BNXT_AUTONEG_SPEED)
 			bnxt_hwrm_set_link_setting(bp, true, false);
-		set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
-		rtnl_unlock();
 	}
 	return 0;
 }
@@ -6130,7 +6125,6 @@ static void bnxt_cfg_ntp_filters(struct bnxt *);
 static void bnxt_sp_task(struct work_struct *work)
 {
 	struct bnxt *bp = container_of(work, struct bnxt, sp_task);
-	int rc;
 
 	set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
 	smp_mb__after_atomic();
@@ -6144,16 +6138,6 @@ static void bnxt_sp_task(struct work_struct *work)
 
 	if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
 		bnxt_cfg_ntp_filters(bp);
-	if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
-		if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
-				       &bp->sp_event))
-			bnxt_hwrm_phy_qcaps(bp);
-
-		rc = bnxt_update_link(bp, true);
-		if (rc)
-			netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
-				   rc);
-	}
 	if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
 		bnxt_hwrm_exec_fwd_req(bp);
 	if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) {
@@ -6183,6 +6167,21 @@ static void bnxt_sp_task(struct work_struct *work)
 	/* These functions below will clear BNXT_STATE_IN_SP_TASK.  They
 	 * must be the last functions to be called before exiting.
 	 */
+	if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
+		int rc = 0;
+
+		if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
+				       &bp->sp_event))
+			bnxt_hwrm_phy_qcaps(bp);
+
+		bnxt_rtnl_lock_sp(bp);
+		if (test_bit(BNXT_STATE_OPEN, &bp->state))
+			rc = bnxt_update_link(bp, true);
+		bnxt_rtnl_unlock_sp(bp);
+		if (rc)
+			netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
+				   rc);
+	}
 	if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
 		bnxt_reset(bp, false);
 
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 39+ messages in thread

* [PATCH for v4.9 LTS 083/111] bnxt_en: Fix RTNL lock usage on bnxt_get_port_module_status().
  2017-06-04  8:15 [PATCH for v4.9 LTS 073/111] nvmet-rdma: Fix missing dma sync to nvme data structures Levin, Alexander (Sasha Levin)
                   ` (8 preceding siblings ...)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 082/111] bnxt_en: Fix RTNL lock usage on bnxt_update_link() Levin, Alexander (Sasha Levin)
@ 2017-06-04  8:15 ` Levin, Alexander (Sasha Levin)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 084/111] sctp: sctp gso should set feature with NETIF_F_SG when calling skb_segment Levin, Alexander (Sasha Levin)
                   ` (27 subsequent siblings)
  37 siblings, 0 replies; 39+ messages in thread
From: Levin, Alexander (Sasha Levin) @ 2017-06-04  8:15 UTC (permalink / raw)
  To: stable; +Cc: Michael Chan, David S . Miller, Levin, Alexander (Sasha Levin)

From: Michael Chan <michael.chan@broadcom.com>

[ Upstream commit 90c694bb71819fb5bd3501ac397307d7e41ddeca ]

bnxt_get_port_module_status() calls bnxt_update_link() which expects
RTNL to be held.  In bnxt_sp_task() that does not hold RTNL, we need to
call it with a prior call to bnxt_rtnl_lock_sp() and the call needs to
be moved to the end of bnxt_sp_task().

Signed-off-by: Michael Chan <michael.chan@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
---
 drivers/net/ethernet/broadcom/bnxt/bnxt.c | 9 ++++++---
 1 file changed, 6 insertions(+), 3 deletions(-)

diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 9f42850a10cf..5cc0f8cfec87 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -6158,9 +6158,6 @@ static void bnxt_sp_task(struct work_struct *work)
 		bnxt_hwrm_tunnel_dst_port_free(
 			bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
 	}
-	if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event))
-		bnxt_get_port_module_status(bp);
-
 	if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event))
 		bnxt_hwrm_port_qstats(bp);
 
@@ -6182,6 +6179,12 @@ static void bnxt_sp_task(struct work_struct *work)
 			netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
 				   rc);
 	}
+	if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
+		bnxt_rtnl_lock_sp(bp);
+		if (test_bit(BNXT_STATE_OPEN, &bp->state))
+			bnxt_get_port_module_status(bp);
+		bnxt_rtnl_unlock_sp(bp);
+	}
 	if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
 		bnxt_reset(bp, false);
 
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 39+ messages in thread

* [PATCH for v4.9 LTS 084/111] sctp: sctp gso should set feature with NETIF_F_SG when calling skb_segment
  2017-06-04  8:15 [PATCH for v4.9 LTS 073/111] nvmet-rdma: Fix missing dma sync to nvme data structures Levin, Alexander (Sasha Levin)
                   ` (9 preceding siblings ...)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 083/111] bnxt_en: Fix RTNL lock usage on bnxt_get_port_module_status() Levin, Alexander (Sasha Levin)
@ 2017-06-04  8:15 ` Levin, Alexander (Sasha Levin)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 085/111] sctp: sctp_addr_id2transport should verify the addr before looking up assoc Levin, Alexander (Sasha Levin)
                   ` (26 subsequent siblings)
  37 siblings, 0 replies; 39+ messages in thread
From: Levin, Alexander (Sasha Levin) @ 2017-06-04  8:15 UTC (permalink / raw)
  To: stable; +Cc: Xin Long, David S . Miller, Levin, Alexander (Sasha Levin)

From: Xin Long <lucien.xin@gmail.com>

[ Upstream commit 5207f3996338e1db71363fe381c81aaf1e54e4e3 ]

Now sctp gso puts segments into skb's frag_list, then processes these
segments in skb_segment. But skb_segment handles them only when gs is
enabled, as it's in the same branch with skb's frags.

Although almost all the NICs support sg other than some old ones, but
since commit 1e16aa3ddf86 ("net: gso: use feature flag argument in all
protocol gso handlers"), features &= skb->dev->hw_enc_features, and
xfrm_output_gso call skb_segment with features = 0, which means sctp
gso would call skb_segment with sg = 0, and skb_segment would not work
as expected.

This patch is to fix it by setting features param with NETIF_F_SG when
calling skb_segment so that it can go the right branch to process the
skb's frag_list.

Signed-off-by: Xin Long <lucien.xin@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
---
 net/sctp/offload.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/net/sctp/offload.c b/net/sctp/offload.c
index 7e869d0cca69..4f5a2b580aa5 100644
--- a/net/sctp/offload.c
+++ b/net/sctp/offload.c
@@ -68,7 +68,7 @@ static struct sk_buff *sctp_gso_segment(struct sk_buff *skb,
 		goto out;
 	}
 
-	segs = skb_segment(skb, features | NETIF_F_HW_CSUM);
+	segs = skb_segment(skb, features | NETIF_F_HW_CSUM | NETIF_F_SG);
 	if (IS_ERR(segs))
 		goto out;
 
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 39+ messages in thread

* [PATCH for v4.9 LTS 085/111] sctp: sctp_addr_id2transport should verify the addr before looking up assoc
  2017-06-04  8:15 [PATCH for v4.9 LTS 073/111] nvmet-rdma: Fix missing dma sync to nvme data structures Levin, Alexander (Sasha Levin)
                   ` (10 preceding siblings ...)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 084/111] sctp: sctp gso should set feature with NETIF_F_SG when calling skb_segment Levin, Alexander (Sasha Levin)
@ 2017-06-04  8:15 ` Levin, Alexander (Sasha Levin)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 087/111] mn10300: fix build error of missing fpu_save() Levin, Alexander (Sasha Levin)
                   ` (25 subsequent siblings)
  37 siblings, 0 replies; 39+ messages in thread
From: Levin, Alexander (Sasha Levin) @ 2017-06-04  8:15 UTC (permalink / raw)
  To: stable; +Cc: Xin Long, David S . Miller, Levin, Alexander (Sasha Levin)

From: Xin Long <lucien.xin@gmail.com>

[ Upstream commit 6f29a130613191d3c6335169febe002cba00edf5 ]

sctp_addr_id2transport is a function for sockopt to look up assoc by
address. As the address is from userspace, it can be a v4-mapped v6
address. But in sctp protocol stack, it always handles a v4-mapped
v6 address as a v4 address. So it's necessary to convert it to a v4
address before looking up assoc by address.

This patch is to fix it by calling sctp_verify_addr in which it can do
this conversion before calling sctp_endpoint_lookup_assoc, just like
what sctp_sendmsg and __sctp_connect do for the address from users.

Signed-off-by: Xin Long <lucien.xin@gmail.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
---
 net/sctp/socket.c | 6 +++++-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 14346dccc4fe..e1719c695174 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -235,8 +235,12 @@ static struct sctp_transport *sctp_addr_id2transport(struct sock *sk,
 					      sctp_assoc_t id)
 {
 	struct sctp_association *addr_asoc = NULL, *id_asoc = NULL;
-	struct sctp_transport *transport;
+	struct sctp_af *af = sctp_get_af_specific(addr->ss_family);
 	union sctp_addr *laddr = (union sctp_addr *)addr;
+	struct sctp_transport *transport;
+
+	if (sctp_verify_addr(sk, laddr, af->sockaddr_len))
+		return NULL;
 
 	addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep,
 					       laddr,
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 39+ messages in thread

* [PATCH for v4.9 LTS 086/111] usb: musb: Fix external abort on non-linefetch for musb_irq_work()
  2017-06-04  8:15 [PATCH for v4.9 LTS 073/111] nvmet-rdma: Fix missing dma sync to nvme data structures Levin, Alexander (Sasha Levin)
                   ` (12 preceding siblings ...)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 087/111] mn10300: fix build error of missing fpu_save() Levin, Alexander (Sasha Levin)
@ 2017-06-04  8:15 ` Levin, Alexander (Sasha Levin)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 088/111] romfs: use different way to generate fsid for BLOCK or MTD Levin, Alexander (Sasha Levin)
                   ` (23 subsequent siblings)
  37 siblings, 0 replies; 39+ messages in thread
From: Levin, Alexander (Sasha Levin) @ 2017-06-04  8:15 UTC (permalink / raw)
  To: stable
  Cc: Tony Lindgren, Bin Liu, Greg Kroah-Hartman, Levin,
	Alexander (Sasha Levin)

From: Tony Lindgren <tony@atomide.com>

[ Upstream commit 3ba7b7795b7e8889af1377904c55c7fae9e0c775 ]

While testing musb host mode cable plugging on a BeagleBone, I came across this
error:

Unhandled fault: external abort on non-linefetch (0x1008) at 0xd1dcfc60
...
[<bf668390>] (musb_default_readb [musb_hdrc]) from [<bf668578>] (musb_irq_work+0x1c/0x180 [musb_hdrc])
[<bf668578>] (musb_irq_work [musb_hdrc]) from [<c0156554>] (process_one_work+0x2b4/0x808)
[<c0156554>] (process_one_work) from [<c015767c>] (worker_thread+0x3c/0x550)
[<c015767c>] (worker_thread) from [<c015d568>] (kthread+0x104/0x148)
[<c015d568>] (kthread) from [<c01078d0>] (ret_from_fork+0x14/0x24)

Signed-off-by: Tony Lindgren <tony@atomide.com>
Signed-off-by: Bin Liu <b-liu@ti.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
---
 drivers/usb/musb/musb_core.c | 11 +++++++++++
 1 file changed, 11 insertions(+)

diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 358feca54945..261ed2ca28f9 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1909,6 +1909,14 @@ static void musb_pm_runtime_check_session(struct musb *musb)
 static void musb_irq_work(struct work_struct *data)
 {
 	struct musb *musb = container_of(data, struct musb, irq_work.work);
+	int error;
+
+	error = pm_runtime_get_sync(musb->controller);
+	if (error < 0) {
+		dev_err(musb->controller, "Could not enable: %i\n", error);
+
+		return;
+	}
 
 	musb_pm_runtime_check_session(musb);
 
@@ -1916,6 +1924,9 @@ static void musb_irq_work(struct work_struct *data)
 		musb->xceiv_old_state = musb->xceiv->otg->state;
 		sysfs_notify(&musb->controller->kobj, NULL, "mode");
 	}
+
+	pm_runtime_mark_last_busy(musb->controller);
+	pm_runtime_put_autosuspend(musb->controller);
 }
 
 static void musb_recover_from_babble(struct musb *musb)
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 39+ messages in thread

* [PATCH for v4.9 LTS 087/111] mn10300: fix build error of missing fpu_save()
  2017-06-04  8:15 [PATCH for v4.9 LTS 073/111] nvmet-rdma: Fix missing dma sync to nvme data structures Levin, Alexander (Sasha Levin)
                   ` (11 preceding siblings ...)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 085/111] sctp: sctp_addr_id2transport should verify the addr before looking up assoc Levin, Alexander (Sasha Levin)
@ 2017-06-04  8:15 ` Levin, Alexander (Sasha Levin)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 086/111] usb: musb: Fix external abort on non-linefetch for musb_irq_work() Levin, Alexander (Sasha Levin)
                   ` (24 subsequent siblings)
  37 siblings, 0 replies; 39+ messages in thread
From: Levin, Alexander (Sasha Levin) @ 2017-06-04  8:15 UTC (permalink / raw)
  To: stable
  Cc: Randy Dunlap, Andrew Morton, Linus Torvalds, Levin,
	Alexander (Sasha Levin)

From: Randy Dunlap <rdunlap@infradead.org>

[ Upstream commit 3705ccfdd1e8b539225ce20e3925a945cc788d67 ]

When CONFIG_FPU is not enabled on arch/mn10300, <asm/switch_to.h> causes
a build error with a call to fpu_save():

  kernel/built-in.o: In function `.L410':
  core.c:(.sched.text+0x28a): undefined reference to `fpu_save'

Fix this by including <asm/fpu.h> in <asm/switch_to.h> so that an empty
static inline fpu_save() is defined.

Link: http://lkml.kernel.org/r/dc421c4f-4842-4429-1b99-92865c2f24b6@infradead.org
Signed-off-by: Randy Dunlap <rdunlap@infradead.org>
Reported-by: kbuild test robot <fengguang.wu@intel.com>
Reviewed-by: David Howells <dhowells@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
---
 arch/mn10300/include/asm/switch_to.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/arch/mn10300/include/asm/switch_to.h b/arch/mn10300/include/asm/switch_to.h
index 393d311735c8..67e333aa7629 100644
--- a/arch/mn10300/include/asm/switch_to.h
+++ b/arch/mn10300/include/asm/switch_to.h
@@ -16,7 +16,7 @@
 struct task_struct;
 struct thread_struct;
 
-#if !defined(CONFIG_LAZY_SAVE_FPU)
+#if defined(CONFIG_FPU) && !defined(CONFIG_LAZY_SAVE_FPU)
 struct fpu_state_struct;
 extern asmlinkage void fpu_save(struct fpu_state_struct *);
 #define switch_fpu(prev, next)						\
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 39+ messages in thread

* [PATCH for v4.9 LTS 088/111] romfs: use different way to generate fsid for BLOCK or MTD
  2017-06-04  8:15 [PATCH for v4.9 LTS 073/111] nvmet-rdma: Fix missing dma sync to nvme data structures Levin, Alexander (Sasha Levin)
                   ` (13 preceding siblings ...)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 086/111] usb: musb: Fix external abort on non-linefetch for musb_irq_work() Levin, Alexander (Sasha Levin)
@ 2017-06-04  8:15 ` Levin, Alexander (Sasha Levin)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 089/111] frv: add atomic64_add_unless() Levin, Alexander (Sasha Levin)
                   ` (22 subsequent siblings)
  37 siblings, 0 replies; 39+ messages in thread
From: Levin, Alexander (Sasha Levin) @ 2017-06-04  8:15 UTC (permalink / raw)
  To: stable
  Cc: Coly Li, Richard Weinberger, Andrew Morton, Linus Torvalds,
	Levin, Alexander (Sasha Levin)

From: Coly Li <colyli@suse.de>

[ Upstream commit f598f82e204ec0b17797caaf1b0311c52d43fb9a ]

Commit 8a59f5d25265 ("fs/romfs: return f_fsid for statfs(2)") generates
a 64bit id from sb->s_bdev->bd_dev.  This is only correct when romfs is
defined with CONFIG_ROMFS_ON_BLOCK.  If romfs is only defined with
CONFIG_ROMFS_ON_MTD, sb->s_bdev is NULL, referencing sb->s_bdev->bd_dev
will triger an oops.

Richard Weinberger points out that when CONFIG_ROMFS_BACKED_BY_BOTH=y,
both CONFIG_ROMFS_ON_BLOCK and CONFIG_ROMFS_ON_MTD are defined.
Therefore when calling huge_encode_dev() to generate a 64bit id, I use
the follow order to choose parameter,

- CONFIG_ROMFS_ON_BLOCK defined
  use sb->s_bdev->bd_dev
- CONFIG_ROMFS_ON_BLOCK undefined and CONFIG_ROMFS_ON_MTD defined
  use sb->s_dev when,
- both CONFIG_ROMFS_ON_BLOCK and CONFIG_ROMFS_ON_MTD undefined
  leave id as 0

When CONFIG_ROMFS_ON_MTD is defined and sb->s_mtd is not NULL, sb->s_dev
is set to a device ID generated by MTD_BLOCK_MAJOR and mtd index,
otherwise sb->s_dev is 0.

This is a try-best effort to generate a uniq file system ID, if all the
above conditions are not meet, f_fsid of this romfs instance will be 0.
Generally only one romfs can be built on single MTD block device, this
method is enough to identify multiple romfs instances in a computer.

Link: http://lkml.kernel.org/r/1482928596-115155-1-git-send-email-colyli@suse.de
Signed-off-by: Coly Li <colyli@suse.de>
Reported-by: Nong Li <nongli1031@gmail.com>
Tested-by: Nong Li <nongli1031@gmail.com>
Cc: Richard Weinberger <richard.weinberger@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
---
 fs/romfs/super.c | 23 ++++++++++++++++++++++-
 1 file changed, 22 insertions(+), 1 deletion(-)

diff --git a/fs/romfs/super.c b/fs/romfs/super.c
index d0f8a38dfafa..0186fe6d39f3 100644
--- a/fs/romfs/super.c
+++ b/fs/romfs/super.c
@@ -74,6 +74,7 @@
 #include <linux/highmem.h>
 #include <linux/pagemap.h>
 #include <linux/uaccess.h>
+#include <linux/major.h>
 #include "internal.h"
 
 static struct kmem_cache *romfs_inode_cachep;
@@ -416,7 +417,22 @@ static void romfs_destroy_inode(struct inode *inode)
 static int romfs_statfs(struct dentry *dentry, struct kstatfs *buf)
 {
 	struct super_block *sb = dentry->d_sb;
-	u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
+	u64 id = 0;
+
+	/* When calling huge_encode_dev(),
+	 * use sb->s_bdev->bd_dev when,
+	 *   - CONFIG_ROMFS_ON_BLOCK defined
+	 * use sb->s_dev when,
+	 *   - CONFIG_ROMFS_ON_BLOCK undefined and
+	 *   - CONFIG_ROMFS_ON_MTD defined
+	 * leave id as 0 when,
+	 *   - CONFIG_ROMFS_ON_BLOCK undefined and
+	 *   - CONFIG_ROMFS_ON_MTD undefined
+	 */
+	if (sb->s_bdev)
+		id = huge_encode_dev(sb->s_bdev->bd_dev);
+	else if (sb->s_dev)
+		id = huge_encode_dev(sb->s_dev);
 
 	buf->f_type = ROMFS_MAGIC;
 	buf->f_namelen = ROMFS_MAXFN;
@@ -489,6 +505,11 @@ static int romfs_fill_super(struct super_block *sb, void *data, int silent)
 	sb->s_flags |= MS_RDONLY | MS_NOATIME;
 	sb->s_op = &romfs_super_ops;
 
+#ifdef CONFIG_ROMFS_ON_MTD
+	/* Use same dev ID from the underlying mtdblock device */
+	if (sb->s_mtd)
+		sb->s_dev = MKDEV(MTD_BLOCK_MAJOR, sb->s_mtd->index);
+#endif
 	/* read the image superblock and check it */
 	rsb = kmalloc(512, GFP_KERNEL);
 	if (!rsb)
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 39+ messages in thread

* [PATCH for v4.9 LTS 089/111] frv: add atomic64_add_unless()
  2017-06-04  8:15 [PATCH for v4.9 LTS 073/111] nvmet-rdma: Fix missing dma sync to nvme data structures Levin, Alexander (Sasha Levin)
                   ` (14 preceding siblings ...)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 088/111] romfs: use different way to generate fsid for BLOCK or MTD Levin, Alexander (Sasha Levin)
@ 2017-06-04  8:15 ` Levin, Alexander (Sasha Levin)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 090/111] frv: add missing atomic64 operations Levin, Alexander (Sasha Levin)
                   ` (21 subsequent siblings)
  37 siblings, 0 replies; 39+ messages in thread
From: Levin, Alexander (Sasha Levin) @ 2017-06-04  8:15 UTC (permalink / raw)
  To: stable
  Cc: Sudip Mukherjee, Sudip Mukherjee, David Howells, Andrew Morton,
	Linus Torvalds, Levin, Alexander (Sasha Levin)

From: Sudip Mukherjee <sudipm.mukherjee@gmail.com>

[ Upstream commit 545d58f677b21401f6de1ac12c25cc109f903ace ]

The build of frv allmodconfig was failing with the error:
lib/atomic64_test.c:209:9: error:

	implicit declaration of function 'atomic64_add_unless'

All the atomic64 operations were defined in frv, but
atomic64_add_unless() was not done.

Implement atomic64_add_unless() as done in other arches.

Link: http://lkml.kernel.org/r/1484781236-6698-1-git-send-email-sudipm.mukherjee@gmail.com
Signed-off-by: Sudip Mukherjee <sudip.mukherjee@codethink.co.uk>
Cc: David Howells <dhowells@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
---
 arch/frv/include/asm/atomic.h | 16 ++++++++++++++++
 1 file changed, 16 insertions(+)

diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
index 1c2a5e264fc7..994ed3d5ca08 100644
--- a/arch/frv/include/asm/atomic.h
+++ b/arch/frv/include/asm/atomic.h
@@ -161,6 +161,22 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
 	return c;
 }
 
+static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
+{
+	long long c, old;
+
+	c = atomic64_read(v);
+	for (;;) {
+		if (unlikely(c == u))
+			break;
+		old = atomic64_cmpxchg(v, c, c + i);
+		if (likely(old == c))
+			break;
+		c = old;
+	}
+	return c != u;
+}
+
 #define ATOMIC_OP(op)							\
 static inline int atomic_fetch_##op(int i, atomic_t *v)			\
 {									\
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 39+ messages in thread

* [PATCH for v4.9 LTS 090/111] frv: add missing atomic64 operations
  2017-06-04  8:15 [PATCH for v4.9 LTS 073/111] nvmet-rdma: Fix missing dma sync to nvme data structures Levin, Alexander (Sasha Levin)
                   ` (15 preceding siblings ...)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 089/111] frv: add atomic64_add_unless() Levin, Alexander (Sasha Levin)
@ 2017-06-04  8:15 ` Levin, Alexander (Sasha Levin)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 091/111] Documentation/filesystems/proc.txt: add VmPin Levin, Alexander (Sasha Levin)
                   ` (20 subsequent siblings)
  37 siblings, 0 replies; 39+ messages in thread
From: Levin, Alexander (Sasha Levin) @ 2017-06-04  8:15 UTC (permalink / raw)
  To: stable
  Cc: Sudip Mukherjee, Sudip Mukherjee, David Howells, Andrew Morton,
	Linus Torvalds, Levin, Alexander (Sasha Levin)

From: Sudip Mukherjee <sudipm.mukherjee@gmail.com>

[ Upstream commit 4180c4c170a5a33b9987b314d248a9d572d89ab0 ]

Some more atomic64 operations were missing and as a result frv
allmodconfig was failing.  Add the missing operations.

Link: http://lkml.kernel.org/r/1485193844-12850-1-git-send-email-sudip.mukherjee@codethink.co.uk
Signed-off-by: Sudip Mukherjee <sudip.mukherjee@codethink.co.uk>
Cc: David Howells <dhowells@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
---
 arch/frv/include/asm/atomic.h | 19 ++++++++++++++++++-
 1 file changed, 18 insertions(+), 1 deletion(-)

diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
index 994ed3d5ca08..e93c9494503a 100644
--- a/arch/frv/include/asm/atomic.h
+++ b/arch/frv/include/asm/atomic.h
@@ -139,7 +139,7 @@ static inline void atomic64_dec(atomic64_t *v)
 #define atomic64_sub_and_test(i,v)	(atomic64_sub_return((i), (v)) == 0)
 #define atomic64_dec_and_test(v)	(atomic64_dec_return((v)) == 0)
 #define atomic64_inc_and_test(v)	(atomic64_inc_return((v)) == 0)
-
+#define atomic64_inc_not_zero(v)	atomic64_add_unless((v), 1, 0)
 
 #define atomic_cmpxchg(v, old, new)	(cmpxchg(&(v)->counter, old, new))
 #define atomic_xchg(v, new)		(xchg(&(v)->counter, new))
@@ -177,6 +177,23 @@ static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
 	return c != u;
 }
 
+static inline long long atomic64_dec_if_positive(atomic64_t *v)
+{
+	long long c, old, dec;
+
+	c = atomic64_read(v);
+	for (;;) {
+		dec = c - 1;
+		if (unlikely(dec < 0))
+			break;
+		old = atomic64_cmpxchg((v), c, dec);
+		if (likely(old == c))
+			break;
+		c = old;
+	}
+		return dec;
+}
+
 #define ATOMIC_OP(op)							\
 static inline int atomic_fetch_##op(int i, atomic_t *v)			\
 {									\
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 39+ messages in thread

* [PATCH for v4.9 LTS 091/111] Documentation/filesystems/proc.txt: add VmPin
  2017-06-04  8:15 [PATCH for v4.9 LTS 073/111] nvmet-rdma: Fix missing dma sync to nvme data structures Levin, Alexander (Sasha Levin)
                   ` (16 preceding siblings ...)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 090/111] frv: add missing atomic64 operations Levin, Alexander (Sasha Levin)
@ 2017-06-04  8:15 ` Levin, Alexander (Sasha Levin)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 092/111] proc: add a schedule point in proc_pid_readdir() Levin, Alexander (Sasha Levin)
                   ` (19 subsequent siblings)
  37 siblings, 0 replies; 39+ messages in thread
From: Levin, Alexander (Sasha Levin) @ 2017-06-04  8:15 UTC (permalink / raw)
  To: stable
  Cc: Fabian Frederick, Christoph Lameter, Andrew Morton,
	Linus Torvalds, Levin, Alexander (Sasha Levin)

From: Fabian Frederick <fabf@skynet.be>

[ Upstream commit bbd88e1d53a84df9f57a2e37acc15518c3d304db ]

Commit bc3e53f682d9 ("mm: distinguish between mlocked and pinned pages")
added VmPin in /proc/<pid>/status.  Report that in
Documentation/filesystems/proc.txt

Also move Umask after Name to keep correct order.

Link: http://lkml.kernel.org/r/20170114201219.30387-1-fabf@skynet.be
Signed-off-by: Fabian Frederick <fabf@skynet.be>
Cc: Christoph Lameter <cl@linux.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
---
 Documentation/filesystems/proc.txt | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index 74329fd0add2..5600d1e293b2 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -211,10 +211,11 @@ asynchronous manner and the value may not be very precise. To see a precise
 snapshot of a moment, you can see /proc/<pid>/smaps file and scan page table.
 It's slow but very precise.
 
-Table 1-2: Contents of the status files (as of 4.1)
+Table 1-2: Contents of the status files (as of 4.8)
 ..............................................................................
  Field                       Content
  Name                        filename of the executable
+ Umask                       file mode creation mask
  State                       state (R is running, S is sleeping, D is sleeping
                              in an uninterruptible wait, Z is zombie,
 			     T is traced or stopped)
@@ -225,7 +226,6 @@ Table 1-2: Contents of the status files (as of 4.1)
  TracerPid                   PID of process tracing this process (0 if not)
  Uid                         Real, effective, saved set, and  file system UIDs
  Gid                         Real, effective, saved set, and  file system GIDs
- Umask                       file mode creation mask
  FDSize                      number of file descriptor slots currently allocated
  Groups                      supplementary group list
  NStgid                      descendant namespace thread group ID hierarchy
@@ -235,6 +235,7 @@ Table 1-2: Contents of the status files (as of 4.1)
  VmPeak                      peak virtual memory size
  VmSize                      total program size
  VmLck                       locked memory size
+ VmPin                       pinned memory size
  VmHWM                       peak resident set size ("high water mark")
  VmRSS                       size of memory portions. It contains the three
                              following parts (VmRSS = RssAnon + RssFile + RssShmem)
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 39+ messages in thread

* [PATCH for v4.9 LTS 092/111] proc: add a schedule point in proc_pid_readdir()
  2017-06-04  8:15 [PATCH for v4.9 LTS 073/111] nvmet-rdma: Fix missing dma sync to nvme data structures Levin, Alexander (Sasha Levin)
                   ` (17 preceding siblings ...)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 091/111] Documentation/filesystems/proc.txt: add VmPin Levin, Alexander (Sasha Levin)
@ 2017-06-04  8:15 ` Levin, Alexander (Sasha Levin)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 093/111] mm/slub.c: trace free objects at KERN_INFO Levin, Alexander (Sasha Levin)
                   ` (18 subsequent siblings)
  37 siblings, 0 replies; 39+ messages in thread
From: Levin, Alexander (Sasha Levin) @ 2017-06-04  8:15 UTC (permalink / raw)
  To: stable
  Cc: Eric Dumazet, Andrew Morton, Linus Torvalds, Levin,
	Alexander (Sasha Levin)

From: Eric Dumazet <edumazet@google.com>

[ Upstream commit 3ba4bceef23206349d4130ddf140819b365de7c8 ]

We have seen proc_pid_readdir() invocations holding cpu for more than 50
ms.  Add a cond_resched() to be gentle with other tasks.

[akpm@linux-foundation.org: coding style fix]
Link: http://lkml.kernel.org/r/1484238380.15816.42.camel@edumazet-glaptop3.roam.corp.google.com
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
---
 fs/proc/base.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/fs/proc/base.c b/fs/proc/base.c
index ca651ac00660..e67fec3c9856 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -3181,6 +3181,8 @@ int proc_pid_readdir(struct file *file, struct dir_context *ctx)
 	     iter.tgid += 1, iter = next_tgid(ns, iter)) {
 		char name[PROC_NUMBUF];
 		int len;
+
+		cond_resched();
 		if (!has_pid_permissions(ns, iter.task, 2))
 			continue;
 
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 39+ messages in thread

* [PATCH for v4.9 LTS 093/111] mm/slub.c: trace free objects at KERN_INFO
  2017-06-04  8:15 [PATCH for v4.9 LTS 073/111] nvmet-rdma: Fix missing dma sync to nvme data structures Levin, Alexander (Sasha Levin)
                   ` (18 preceding siblings ...)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 092/111] proc: add a schedule point in proc_pid_readdir() Levin, Alexander (Sasha Levin)
@ 2017-06-04  8:15 ` Levin, Alexander (Sasha Levin)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 094/111] userfaultfd: fix SIGBUS resulting from false rwsem wakeups Levin, Alexander (Sasha Levin)
                   ` (17 subsequent siblings)
  37 siblings, 0 replies; 39+ messages in thread
From: Levin, Alexander (Sasha Levin) @ 2017-06-04  8:15 UTC (permalink / raw)
  To: stable
  Cc: Daniel Thompson, Pekka Enberg, Joonsoo Kim, Andrew Morton,
	Linus Torvalds, Levin, Alexander (Sasha Levin)

From: Daniel Thompson <daniel.thompson@linaro.org>

[ Upstream commit aa2efd5ea4041754da4046c3d2e7edaac9526258 ]

Currently when trace is enabled (e.g.  slub_debug=T,kmalloc-128 ) the
trace messages are mostly output at KERN_INFO.  However the trace code
also calls print_section() to hexdump the head of a free object.  This
is hard coded to use KERN_ERR, meaning the console is deluged with trace
messages even if we've asked for quiet.

Fix this the obvious way but adding a level parameter to
print_section(), allowing calls from the trace code to use the same
trace level as other trace messages.

Link: http://lkml.kernel.org/r/20170113154850.518-1-daniel.thompson@linaro.org
Signed-off-by: Daniel Thompson <daniel.thompson@linaro.org>
Acked-by: Christoph Lameter <cl@linux.com>
Acked-by: David Rientjes <rientjes@google.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
---
 mm/slub.c | 23 +++++++++++++----------
 1 file changed, 13 insertions(+), 10 deletions(-)

diff --git a/mm/slub.c b/mm/slub.c
index 58c7526f8de2..a94c8904dee1 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -496,10 +496,11 @@ static inline int check_valid_pointer(struct kmem_cache *s,
 	return 1;
 }
 
-static void print_section(char *text, u8 *addr, unsigned int length)
+static void print_section(char *level, char *text, u8 *addr,
+			  unsigned int length)
 {
 	metadata_access_enable();
-	print_hex_dump(KERN_ERR, text, DUMP_PREFIX_ADDRESS, 16, 1, addr,
+	print_hex_dump(level, text, DUMP_PREFIX_ADDRESS, 16, 1, addr,
 			length, 1);
 	metadata_access_disable();
 }
@@ -636,14 +637,15 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
 	       p, p - addr, get_freepointer(s, p));
 
 	if (s->flags & SLAB_RED_ZONE)
-		print_section("Redzone ", p - s->red_left_pad, s->red_left_pad);
+		print_section(KERN_ERR, "Redzone ", p - s->red_left_pad,
+			      s->red_left_pad);
 	else if (p > addr + 16)
-		print_section("Bytes b4 ", p - 16, 16);
+		print_section(KERN_ERR, "Bytes b4 ", p - 16, 16);
 
-	print_section("Object ", p, min_t(unsigned long, s->object_size,
-				PAGE_SIZE));
+	print_section(KERN_ERR, "Object ", p,
+		      min_t(unsigned long, s->object_size, PAGE_SIZE));
 	if (s->flags & SLAB_RED_ZONE)
-		print_section("Redzone ", p + s->object_size,
+		print_section(KERN_ERR, "Redzone ", p + s->object_size,
 			s->inuse - s->object_size);
 
 	if (s->offset)
@@ -658,7 +660,8 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
 
 	if (off != size_from_object(s))
 		/* Beginning of the filler is the free pointer */
-		print_section("Padding ", p + off, size_from_object(s) - off);
+		print_section(KERN_ERR, "Padding ", p + off,
+			      size_from_object(s) - off);
 
 	dump_stack();
 }
@@ -820,7 +823,7 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page)
 		end--;
 
 	slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
-	print_section("Padding ", end - remainder, remainder);
+	print_section(KERN_ERR, "Padding ", end - remainder, remainder);
 
 	restore_bytes(s, "slab padding", POISON_INUSE, end - remainder, end);
 	return 0;
@@ -973,7 +976,7 @@ static void trace(struct kmem_cache *s, struct page *page, void *object,
 			page->freelist);
 
 		if (!alloc)
-			print_section("Object ", (void *)object,
+			print_section(KERN_INFO, "Object ", (void *)object,
 					s->object_size);
 
 		dump_stack();
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 39+ messages in thread

* [PATCH for v4.9 LTS 095/111] kernel/watchdog.c: move hardlockup detector to separate file
  2017-06-04  8:15 [PATCH for v4.9 LTS 073/111] nvmet-rdma: Fix missing dma sync to nvme data structures Levin, Alexander (Sasha Levin)
                   ` (20 preceding siblings ...)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 094/111] userfaultfd: fix SIGBUS resulting from false rwsem wakeups Levin, Alexander (Sasha Levin)
@ 2017-06-04  8:15 ` Levin, Alexander (Sasha Levin)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 096/111] kernel/watchdog.c: move shared definitions to nmi.h Levin, Alexander (Sasha Levin)
                   ` (15 subsequent siblings)
  37 siblings, 0 replies; 39+ messages in thread
From: Levin, Alexander (Sasha Levin) @ 2017-06-04  8:15 UTC (permalink / raw)
  To: stable
  Cc: Babu Moger, Ingo Molnar, Jiri Kosina, Andi Kleen, Yaowei Bai,
	Aaron Tomlin, Ulrich Obergfell, Tejun Heo, Hidehiro Kawai,
	Josh Hunt, David S. Miller, Andrew Morton, Linus Torvalds, Levin,
	Alexander (Sasha Levin)

From: Babu Moger <babu.moger@oracle.com>

[ Upstream commit 73ce0511c43686095efd2f65ef564aab952e07bc ]

Separate hardlockup code from watchdog.c and move it to watchdog_hld.c.
It is mostly straight forward.  Remove everything inside
CONFIG_HARDLOCKUP_DETECTORS.  This code will go to file watchdog_hld.c.
Also update the makefile accordigly.

Link: http://lkml.kernel.org/r/1478034826-43888-3-git-send-email-babu.moger@oracle.com
Signed-off-by: Babu Moger <babu.moger@oracle.com>
Acked-by: Don Zickus <dzickus@redhat.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Jiri Kosina <jkosina@suse.cz>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Yaowei Bai <baiyaowei@cmss.chinamobile.com>
Cc: Aaron Tomlin <atomlin@redhat.com>
Cc: Ulrich Obergfell <uobergfe@redhat.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: Hidehiro Kawai <hidehiro.kawai.ez@hitachi.com>
Cc: Josh Hunt <johunt@akamai.com>
Cc: "David S. Miller" <davem@davemloft.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
---
 kernel/Makefile       |   1 +
 kernel/watchdog.c     | 241 +++-----------------------------------------------
 kernel/watchdog_hld.c | 227 +++++++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 239 insertions(+), 230 deletions(-)
 create mode 100644 kernel/watchdog_hld.c

diff --git a/kernel/Makefile b/kernel/Makefile
index eb26e12c6c2a..314e7d62f5f0 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -84,6 +84,7 @@ obj-$(CONFIG_KPROBES) += kprobes.o
 obj-$(CONFIG_KGDB) += debug/
 obj-$(CONFIG_DETECT_HUNG_TASK) += hung_task.o
 obj-$(CONFIG_LOCKUP_DETECTOR) += watchdog.o
+obj-$(CONFIG_HARDLOCKUP_DETECTOR) += watchdog_hld.o
 obj-$(CONFIG_SECCOMP) += seccomp.o
 obj-$(CONFIG_RELAY) += relay.o
 obj-$(CONFIG_SYSCTL) += utsname_sysctl.o
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 6d1020c03d41..94aed27d4ffd 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -24,7 +24,6 @@
 
 #include <asm/irq_regs.h>
 #include <linux/kvm_para.h>
-#include <linux/perf_event.h>
 #include <linux/kthread.h>
 
 /*
@@ -100,50 +99,9 @@ static DEFINE_PER_CPU(bool, soft_watchdog_warn);
 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
 static DEFINE_PER_CPU(unsigned long, soft_lockup_hrtimer_cnt);
 static DEFINE_PER_CPU(struct task_struct *, softlockup_task_ptr_saved);
-#ifdef CONFIG_HARDLOCKUP_DETECTOR
-static DEFINE_PER_CPU(bool, hard_watchdog_warn);
-static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
-static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
-#endif
 static unsigned long soft_lockup_nmi_warn;
 
-/* boot commands */
-/*
- * Should we panic when a soft-lockup or hard-lockup occurs:
- */
-#ifdef CONFIG_HARDLOCKUP_DETECTOR
-unsigned int __read_mostly hardlockup_panic =
-			CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
-static unsigned long hardlockup_allcpu_dumped;
-/*
- * We may not want to enable hard lockup detection by default in all cases,
- * for example when running the kernel as a guest on a hypervisor. In these
- * cases this function can be called to disable hard lockup detection. This
- * function should only be executed once by the boot processor before the
- * kernel command line parameters are parsed, because otherwise it is not
- * possible to override this in hardlockup_panic_setup().
- */
-void hardlockup_detector_disable(void)
-{
-	watchdog_enabled &= ~NMI_WATCHDOG_ENABLED;
-}
-
-static int __init hardlockup_panic_setup(char *str)
-{
-	if (!strncmp(str, "panic", 5))
-		hardlockup_panic = 1;
-	else if (!strncmp(str, "nopanic", 7))
-		hardlockup_panic = 0;
-	else if (!strncmp(str, "0", 1))
-		watchdog_enabled &= ~NMI_WATCHDOG_ENABLED;
-	else if (!strncmp(str, "1", 1))
-		watchdog_enabled |= NMI_WATCHDOG_ENABLED;
-	return 1;
-}
-__setup("nmi_watchdog=", hardlockup_panic_setup);
-#endif
-
 unsigned int __read_mostly softlockup_panic =
 			CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
 
@@ -264,30 +222,12 @@ void touch_all_softlockup_watchdogs(void)
 	wq_watchdog_touch(-1);
 }
 
-#ifdef CONFIG_HARDLOCKUP_DETECTOR
-void touch_nmi_watchdog(void)
-{
-	/*
-	 * Using __raw here because some code paths have
-	 * preemption enabled.  If preemption is enabled
-	 * then interrupts should be enabled too, in which
-	 * case we shouldn't have to worry about the watchdog
-	 * going off.
-	 */
-	raw_cpu_write(watchdog_nmi_touch, true);
-	touch_softlockup_watchdog();
-}
-EXPORT_SYMBOL(touch_nmi_watchdog);
-
-#endif
-
 void touch_softlockup_watchdog_sync(void)
 {
 	__this_cpu_write(softlockup_touch_sync, true);
 	__this_cpu_write(watchdog_touch_ts, 0);
 }
 
-#ifdef CONFIG_HARDLOCKUP_DETECTOR
 /* watchdog detector functions */
 static bool is_hardlockup(void)
 {
@@ -299,7 +239,6 @@ static bool is_hardlockup(void)
 	__this_cpu_write(hrtimer_interrupts_saved, hrint);
 	return false;
 }
-#endif
 
 static int is_softlockup(unsigned long touch_ts)
 {
@@ -313,77 +252,22 @@ static int is_softlockup(unsigned long touch_ts)
 	return 0;
 }
 
-#ifdef CONFIG_HARDLOCKUP_DETECTOR
-
-static struct perf_event_attr wd_hw_attr = {
-	.type		= PERF_TYPE_HARDWARE,
-	.config		= PERF_COUNT_HW_CPU_CYCLES,
-	.size		= sizeof(struct perf_event_attr),
-	.pinned		= 1,
-	.disabled	= 1,
-};
-
-/* Callback function for perf event subsystem */
-static void watchdog_overflow_callback(struct perf_event *event,
-		 struct perf_sample_data *data,
-		 struct pt_regs *regs)
-{
-	/* Ensure the watchdog never gets throttled */
-	event->hw.interrupts = 0;
-
-	if (__this_cpu_read(watchdog_nmi_touch) == true) {
-		__this_cpu_write(watchdog_nmi_touch, false);
-		return;
-	}
-
-	/* check for a hardlockup
-	 * This is done by making sure our timer interrupt
-	 * is incrementing.  The timer interrupt should have
-	 * fired multiple times before we overflow'd.  If it hasn't
-	 * then this is a good indication the cpu is stuck
-	 */
-	if (is_hardlockup()) {
-		int this_cpu = smp_processor_id();
-
-		/* only print hardlockups once */
-		if (__this_cpu_read(hard_watchdog_warn) == true)
-			return;
-
-		pr_emerg("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
-		print_modules();
-		print_irqtrace_events(current);
-		if (regs)
-			show_regs(regs);
-		else
-			dump_stack();
-
-		/*
-		 * Perform all-CPU dump only once to avoid multiple hardlockups
-		 * generating interleaving traces
-		 */
-		if (sysctl_hardlockup_all_cpu_backtrace &&
-				!test_and_set_bit(0, &hardlockup_allcpu_dumped))
-			trigger_allbutself_cpu_backtrace();
-
-		if (hardlockup_panic)
-			nmi_panic(regs, "Hard LOCKUP");
-
-		__this_cpu_write(hard_watchdog_warn, true);
-		return;
-	}
-
-	__this_cpu_write(hard_watchdog_warn, false);
-	return;
-}
-#endif /* CONFIG_HARDLOCKUP_DETECTOR */
-
 static void watchdog_interrupt_count(void)
 {
 	__this_cpu_inc(hrtimer_interrupts);
 }
 
-static int watchdog_nmi_enable(unsigned int cpu);
-static void watchdog_nmi_disable(unsigned int cpu);
+/*
+ * These two functions are mostly architecture specific
+ * defining them as weak here.
+ */
+int __weak watchdog_nmi_enable(unsigned int cpu)
+{
+	return 0;
+}
+void __weak watchdog_nmi_disable(unsigned int cpu)
+{
+}
 
 static int watchdog_enable_all_cpus(void);
 static void watchdog_disable_all_cpus(void);
@@ -576,109 +460,6 @@ static void watchdog(unsigned int cpu)
 		watchdog_nmi_disable(cpu);
 }
 
-#ifdef CONFIG_HARDLOCKUP_DETECTOR
-/*
- * People like the simple clean cpu node info on boot.
- * Reduce the watchdog noise by only printing messages
- * that are different from what cpu0 displayed.
- */
-static unsigned long cpu0_err;
-
-static int watchdog_nmi_enable(unsigned int cpu)
-{
-	struct perf_event_attr *wd_attr;
-	struct perf_event *event = per_cpu(watchdog_ev, cpu);
-
-	/* nothing to do if the hard lockup detector is disabled */
-	if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
-		goto out;
-
-	/* is it already setup and enabled? */
-	if (event && event->state > PERF_EVENT_STATE_OFF)
-		goto out;
-
-	/* it is setup but not enabled */
-	if (event != NULL)
-		goto out_enable;
-
-	wd_attr = &wd_hw_attr;
-	wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh);
-
-	/* Try to register using hardware perf events */
-	event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL);
-
-	/* save cpu0 error for future comparision */
-	if (cpu == 0 && IS_ERR(event))
-		cpu0_err = PTR_ERR(event);
-
-	if (!IS_ERR(event)) {
-		/* only print for cpu0 or different than cpu0 */
-		if (cpu == 0 || cpu0_err)
-			pr_info("enabled on all CPUs, permanently consumes one hw-PMU counter.\n");
-		goto out_save;
-	}
-
-	/*
-	 * Disable the hard lockup detector if _any_ CPU fails to set up
-	 * set up the hardware perf event. The watchdog() function checks
-	 * the NMI_WATCHDOG_ENABLED bit periodically.
-	 *
-	 * The barriers are for syncing up watchdog_enabled across all the
-	 * cpus, as clear_bit() does not use barriers.
-	 */
-	smp_mb__before_atomic();
-	clear_bit(NMI_WATCHDOG_ENABLED_BIT, &watchdog_enabled);
-	smp_mb__after_atomic();
-
-	/* skip displaying the same error again */
-	if (cpu > 0 && (PTR_ERR(event) == cpu0_err))
-		return PTR_ERR(event);
-
-	/* vary the KERN level based on the returned errno */
-	if (PTR_ERR(event) == -EOPNOTSUPP)
-		pr_info("disabled (cpu%i): not supported (no LAPIC?)\n", cpu);
-	else if (PTR_ERR(event) == -ENOENT)
-		pr_warn("disabled (cpu%i): hardware events not enabled\n",
-			 cpu);
-	else
-		pr_err("disabled (cpu%i): unable to create perf event: %ld\n",
-			cpu, PTR_ERR(event));
-
-	pr_info("Shutting down hard lockup detector on all cpus\n");
-
-	return PTR_ERR(event);
-
-	/* success path */
-out_save:
-	per_cpu(watchdog_ev, cpu) = event;
-out_enable:
-	perf_event_enable(per_cpu(watchdog_ev, cpu));
-out:
-	return 0;
-}
-
-static void watchdog_nmi_disable(unsigned int cpu)
-{
-	struct perf_event *event = per_cpu(watchdog_ev, cpu);
-
-	if (event) {
-		perf_event_disable(event);
-		per_cpu(watchdog_ev, cpu) = NULL;
-
-		/* should be in cleanup, but blocks oprofile */
-		perf_event_release_kernel(event);
-	}
-	if (cpu == 0) {
-		/* watchdog_nmi_enable() expects this to be zero initially. */
-		cpu0_err = 0;
-	}
-}
-
-#else
-static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
-static void watchdog_nmi_disable(unsigned int cpu) { return; }
-#endif /* CONFIG_HARDLOCKUP_DETECTOR */
-
 static struct smp_hotplug_thread watchdog_threads = {
 	.store			= &softlockup_watchdog,
 	.thread_should_run	= watchdog_should_run,
diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c
new file mode 100644
index 000000000000..84016c8aee6b
--- /dev/null
+++ b/kernel/watchdog_hld.c
@@ -0,0 +1,227 @@
+/*
+ * Detect hard lockups on a system
+ *
+ * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
+ *
+ * Note: Most of this code is borrowed heavily from the original softlockup
+ * detector, so thanks to Ingo for the initial implementation.
+ * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
+ * to those contributors as well.
+ */
+
+#define pr_fmt(fmt) "NMI watchdog: " fmt
+
+#include <linux/nmi.h>
+#include <linux/module.h>
+#include <asm/irq_regs.h>
+#include <linux/perf_event.h>
+
+static DEFINE_PER_CPU(bool, hard_watchdog_warn);
+static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
+static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
+
+/* boot commands */
+/*
+ * Should we panic when a soft-lockup or hard-lockup occurs:
+ */
+unsigned int __read_mostly hardlockup_panic =
+			CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
+static unsigned long hardlockup_allcpu_dumped;
+/*
+ * We may not want to enable hard lockup detection by default in all cases,
+ * for example when running the kernel as a guest on a hypervisor. In these
+ * cases this function can be called to disable hard lockup detection. This
+ * function should only be executed once by the boot processor before the
+ * kernel command line parameters are parsed, because otherwise it is not
+ * possible to override this in hardlockup_panic_setup().
+ */
+void hardlockup_detector_disable(void)
+{
+	watchdog_enabled &= ~NMI_WATCHDOG_ENABLED;
+}
+
+static int __init hardlockup_panic_setup(char *str)
+{
+	if (!strncmp(str, "panic", 5))
+		hardlockup_panic = 1;
+	else if (!strncmp(str, "nopanic", 7))
+		hardlockup_panic = 0;
+	else if (!strncmp(str, "0", 1))
+		watchdog_enabled &= ~NMI_WATCHDOG_ENABLED;
+	else if (!strncmp(str, "1", 1))
+		watchdog_enabled |= NMI_WATCHDOG_ENABLED;
+	return 1;
+}
+__setup("nmi_watchdog=", hardlockup_panic_setup);
+
+void touch_nmi_watchdog(void)
+{
+	/*
+	 * Using __raw here because some code paths have
+	 * preemption enabled.  If preemption is enabled
+	 * then interrupts should be enabled too, in which
+	 * case we shouldn't have to worry about the watchdog
+	 * going off.
+	 */
+	raw_cpu_write(watchdog_nmi_touch, true);
+	touch_softlockup_watchdog();
+}
+EXPORT_SYMBOL(touch_nmi_watchdog);
+
+static struct perf_event_attr wd_hw_attr = {
+	.type		= PERF_TYPE_HARDWARE,
+	.config		= PERF_COUNT_HW_CPU_CYCLES,
+	.size		= sizeof(struct perf_event_attr),
+	.pinned		= 1,
+	.disabled	= 1,
+};
+
+/* Callback function for perf event subsystem */
+static void watchdog_overflow_callback(struct perf_event *event,
+		 struct perf_sample_data *data,
+		 struct pt_regs *regs)
+{
+	/* Ensure the watchdog never gets throttled */
+	event->hw.interrupts = 0;
+
+	if (__this_cpu_read(watchdog_nmi_touch) == true) {
+		__this_cpu_write(watchdog_nmi_touch, false);
+		return;
+	}
+
+	/* check for a hardlockup
+	 * This is done by making sure our timer interrupt
+	 * is incrementing.  The timer interrupt should have
+	 * fired multiple times before we overflow'd.  If it hasn't
+	 * then this is a good indication the cpu is stuck
+	 */
+	if (is_hardlockup()) {
+		int this_cpu = smp_processor_id();
+
+		/* only print hardlockups once */
+		if (__this_cpu_read(hard_watchdog_warn) == true)
+			return;
+
+		pr_emerg("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
+		print_modules();
+		print_irqtrace_events(current);
+		if (regs)
+			show_regs(regs);
+		else
+			dump_stack();
+
+		/*
+		 * Perform all-CPU dump only once to avoid multiple hardlockups
+		 * generating interleaving traces
+		 */
+		if (sysctl_hardlockup_all_cpu_backtrace &&
+				!test_and_set_bit(0, &hardlockup_allcpu_dumped))
+			trigger_allbutself_cpu_backtrace();
+
+		if (hardlockup_panic)
+			nmi_panic(regs, "Hard LOCKUP");
+
+		__this_cpu_write(hard_watchdog_warn, true);
+		return;
+	}
+
+	__this_cpu_write(hard_watchdog_warn, false);
+	return;
+}
+
+/*
+ * People like the simple clean cpu node info on boot.
+ * Reduce the watchdog noise by only printing messages
+ * that are different from what cpu0 displayed.
+ */
+static unsigned long cpu0_err;
+
+int watchdog_nmi_enable(unsigned int cpu)
+{
+	struct perf_event_attr *wd_attr;
+	struct perf_event *event = per_cpu(watchdog_ev, cpu);
+
+	/* nothing to do if the hard lockup detector is disabled */
+	if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
+		goto out;
+
+	/* is it already setup and enabled? */
+	if (event && event->state > PERF_EVENT_STATE_OFF)
+		goto out;
+
+	/* it is setup but not enabled */
+	if (event != NULL)
+		goto out_enable;
+
+	wd_attr = &wd_hw_attr;
+	wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh);
+
+	/* Try to register using hardware perf events */
+	event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL);
+
+	/* save cpu0 error for future comparision */
+	if (cpu == 0 && IS_ERR(event))
+		cpu0_err = PTR_ERR(event);
+
+	if (!IS_ERR(event)) {
+		/* only print for cpu0 or different than cpu0 */
+		if (cpu == 0 || cpu0_err)
+			pr_info("enabled on all CPUs, permanently consumes one hw-PMU counter.\n");
+		goto out_save;
+	}
+
+	/*
+	 * Disable the hard lockup detector if _any_ CPU fails to set up
+	 * set up the hardware perf event. The watchdog() function checks
+	 * the NMI_WATCHDOG_ENABLED bit periodically.
+	 *
+	 * The barriers are for syncing up watchdog_enabled across all the
+	 * cpus, as clear_bit() does not use barriers.
+	 */
+	smp_mb__before_atomic();
+	clear_bit(NMI_WATCHDOG_ENABLED_BIT, &watchdog_enabled);
+	smp_mb__after_atomic();
+
+	/* skip displaying the same error again */
+	if (cpu > 0 && (PTR_ERR(event) == cpu0_err))
+		return PTR_ERR(event);
+
+	/* vary the KERN level based on the returned errno */
+	if (PTR_ERR(event) == -EOPNOTSUPP)
+		pr_info("disabled (cpu%i): not supported (no LAPIC?)\n", cpu);
+	else if (PTR_ERR(event) == -ENOENT)
+		pr_warn("disabled (cpu%i): hardware events not enabled\n",
+			 cpu);
+	else
+		pr_err("disabled (cpu%i): unable to create perf event: %ld\n",
+			cpu, PTR_ERR(event));
+
+	pr_info("Shutting down hard lockup detector on all cpus\n");
+
+	return PTR_ERR(event);
+
+	/* success path */
+out_save:
+	per_cpu(watchdog_ev, cpu) = event;
+out_enable:
+	perf_event_enable(per_cpu(watchdog_ev, cpu));
+out:
+	return 0;
+}
+
+void watchdog_nmi_disable(unsigned int cpu)
+{
+	struct perf_event *event = per_cpu(watchdog_ev, cpu);
+
+	if (event) {
+		perf_event_disable(event);
+		per_cpu(watchdog_ev, cpu) = NULL;
+
+		/* should be in cleanup, but blocks oprofile */
+		perf_event_release_kernel(event);
+	}
+	if (cpu == 0) {
+		/* watchdog_nmi_enable() expects this to be zero initially. */
+		cpu0_err = 0;
+	}
+}
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 39+ messages in thread

* [PATCH for v4.9 LTS 094/111] userfaultfd: fix SIGBUS resulting from false rwsem wakeups
  2017-06-04  8:15 [PATCH for v4.9 LTS 073/111] nvmet-rdma: Fix missing dma sync to nvme data structures Levin, Alexander (Sasha Levin)
                   ` (19 preceding siblings ...)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 093/111] mm/slub.c: trace free objects at KERN_INFO Levin, Alexander (Sasha Levin)
@ 2017-06-04  8:15 ` Levin, Alexander (Sasha Levin)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 095/111] kernel/watchdog.c: move hardlockup detector to separate file Levin, Alexander (Sasha Levin)
                   ` (16 subsequent siblings)
  37 siblings, 0 replies; 39+ messages in thread
From: Levin, Alexander (Sasha Levin) @ 2017-06-04  8:15 UTC (permalink / raw)
  To: stable
  Cc: Andrea Arcangeli, Michael Rapoport, Dr. David Alan Gilbert,
	Pavel Emelyanov, Andrew Morton, Linus Torvalds, Levin,
	Alexander (Sasha Levin)

From: Andrea Arcangeli <aarcange@redhat.com>

[ Upstream commit 15a77c6fe494f4b1757d30cd137fe66ab06a38c3 ]

With >=32 CPUs the userfaultfd selftest triggered a graceful but
unexpected SIGBUS because VM_FAULT_RETRY was returned by
handle_userfault() despite the UFFDIO_COPY wasn't completed.

This seems caused by rwsem waking the thread blocked in
handle_userfault() and we can't run up_read() before the wait_event
sequence is complete.

Keeping the wait_even sequence identical to the first one, would require
running userfaultfd_must_wait() again to know if the loop should be
repeated, and it would also require retaking the rwsem and revalidating
the whole vma status.

It seems simpler to wait the targeted wakeup so that if false wakeups
materialize we still wait for our specific wakeup event, unless of
course there are signals or the uffd was released.

Debug code collecting the stack trace of the wakeup showed this:

  $ ./userfaultfd 100 99999
  nr_pages: 25600, nr_pages_per_cpu: 800
  bounces: 99998, mode: racing ver poll, userfaults: 32 35 90 232 30 138 69 82 34 30 139 40 40 31 20 19 43 13 15 28 27 38 21 43 56 22 1 17 31 8 4 2
  bounces: 99997, mode: rnd ver poll, Bus error (core dumped)

    save_stack_trace+0x2b/0x50
    try_to_wake_up+0x2a6/0x580
    wake_up_q+0x32/0x70
    rwsem_wake+0xe0/0x120
    call_rwsem_wake+0x1b/0x30
    up_write+0x3b/0x40
    vm_mmap_pgoff+0x9c/0xc0
    SyS_mmap_pgoff+0x1a9/0x240
    SyS_mmap+0x22/0x30
    entry_SYSCALL_64_fastpath+0x1f/0xbd
    0xffffffffffffffff
    FAULT_FLAG_ALLOW_RETRY missing 70
  CPU: 24 PID: 1054 Comm: userfaultfd Tainted: G        W       4.8.0+ #30
  Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.9.3-0-ge2fc41e-prebuilt.qemu-project.org 04/01/2014
  Call Trace:
    dump_stack+0xb8/0x112
    handle_userfault+0x572/0x650
    handle_mm_fault+0x12cb/0x1520
    __do_page_fault+0x175/0x500
    trace_do_page_fault+0x61/0x270
    do_async_page_fault+0x19/0x90
    async_page_fault+0x25/0x30

This always happens when the main userfault selftest thread is running
clone() while glibc runs either mprotect or mmap (both taking mmap_sem
down_write()) to allocate the thread stack of the background threads,
while locking/userfault threads already run at full throttle and are
susceptible to false wakeups that may cause handle_userfault() to return
before than expected (which results in graceful SIGBUS at the next
attempt).

This was reproduced only with >=32 CPUs because the loop to start the
thread where clone() is too quick with fewer CPUs, while with 32 CPUs
there's already significant activity on ~32 locking and userfault
threads when the last background threads are started with clone().

This >=32 CPUs SMP race condition is likely reproducible only with the
selftest because of the much heavier userfault load it generates if
compared to real apps.

We'll have to allow "one more" VM_FAULT_RETRY for the WP support and a
patch floating around that provides it also hidden this problem but in
reality only is successfully at hiding the problem.

False wakeups could still happen again the second time
handle_userfault() is invoked, even if it's a so rare race condition
that getting false wakeups twice in a row is impossible to reproduce.
This full fix is needed for correctness, the only alternative would be
to allow VM_FAULT_RETRY to be returned infinitely.  With this fix the WP
support can stick to a strict "one more" VM_FAULT_RETRY logic (no need
of returning it infinite times to avoid the SIGBUS).

Link: http://lkml.kernel.org/r/20170111005535.13832-2-aarcange@redhat.com
Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Reported-by: Shubham Kumar Sharma <shubham.kumar.sharma@oracle.com>
Tested-by: Mike Kravetz <mike.kravetz@oracle.com>
Acked-by: Hillf Danton <hillf.zj@alibaba-inc.com>
Cc: Michael Rapoport <RAPOPORT@il.ibm.com>
Cc: "Dr. David Alan Gilbert" <dgilbert@redhat.com>
Cc: Pavel Emelyanov <xemul@parallels.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
---
 fs/userfaultfd.c | 37 +++++++++++++++++++++++++++++++++++--
 1 file changed, 35 insertions(+), 2 deletions(-)

diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 85959d8324df..b86054cc41db 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -63,6 +63,7 @@ struct userfaultfd_wait_queue {
 	struct uffd_msg msg;
 	wait_queue_t wq;
 	struct userfaultfd_ctx *ctx;
+	bool waken;
 };
 
 struct userfaultfd_wake_range {
@@ -86,6 +87,12 @@ static int userfaultfd_wake_function(wait_queue_t *wq, unsigned mode,
 	if (len && (start > uwq->msg.arg.pagefault.address ||
 		    start + len <= uwq->msg.arg.pagefault.address))
 		goto out;
+	WRITE_ONCE(uwq->waken, true);
+	/*
+	 * The implicit smp_mb__before_spinlock in try_to_wake_up()
+	 * renders uwq->waken visible to other CPUs before the task is
+	 * waken.
+	 */
 	ret = wake_up_state(wq->private, mode);
 	if (ret)
 		/*
@@ -264,6 +271,7 @@ int handle_userfault(struct fault_env *fe, unsigned long reason)
 	struct userfaultfd_wait_queue uwq;
 	int ret;
 	bool must_wait, return_to_userland;
+	long blocking_state;
 
 	BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
 
@@ -333,10 +341,13 @@ int handle_userfault(struct fault_env *fe, unsigned long reason)
 	uwq.wq.private = current;
 	uwq.msg = userfault_msg(fe->address, fe->flags, reason);
 	uwq.ctx = ctx;
+	uwq.waken = false;
 
 	return_to_userland =
 		(fe->flags & (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE)) ==
 		(FAULT_FLAG_USER|FAULT_FLAG_KILLABLE);
+	blocking_state = return_to_userland ? TASK_INTERRUPTIBLE :
+			 TASK_KILLABLE;
 
 	spin_lock(&ctx->fault_pending_wqh.lock);
 	/*
@@ -349,8 +360,7 @@ int handle_userfault(struct fault_env *fe, unsigned long reason)
 	 * following the spin_unlock to happen before the list_add in
 	 * __add_wait_queue.
 	 */
-	set_current_state(return_to_userland ? TASK_INTERRUPTIBLE :
-			  TASK_KILLABLE);
+	set_current_state(blocking_state);
 	spin_unlock(&ctx->fault_pending_wqh.lock);
 
 	must_wait = userfaultfd_must_wait(ctx, fe->address, fe->flags, reason);
@@ -362,6 +372,29 @@ int handle_userfault(struct fault_env *fe, unsigned long reason)
 		wake_up_poll(&ctx->fd_wqh, POLLIN);
 		schedule();
 		ret |= VM_FAULT_MAJOR;
+
+		/*
+		 * False wakeups can orginate even from rwsem before
+		 * up_read() however userfaults will wait either for a
+		 * targeted wakeup on the specific uwq waitqueue from
+		 * wake_userfault() or for signals or for uffd
+		 * release.
+		 */
+		while (!READ_ONCE(uwq.waken)) {
+			/*
+			 * This needs the full smp_store_mb()
+			 * guarantee as the state write must be
+			 * visible to other CPUs before reading
+			 * uwq.waken from other CPUs.
+			 */
+			set_current_state(blocking_state);
+			if (READ_ONCE(uwq.waken) ||
+			    READ_ONCE(ctx->released) ||
+			    (return_to_userland ? signal_pending(current) :
+			     fatal_signal_pending(current)))
+				break;
+			schedule();
+		}
 	}
 
 	__set_current_state(TASK_RUNNING);
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 39+ messages in thread

* [PATCH for v4.9 LTS 096/111] kernel/watchdog.c: move shared definitions to nmi.h
  2017-06-04  8:15 [PATCH for v4.9 LTS 073/111] nvmet-rdma: Fix missing dma sync to nvme data structures Levin, Alexander (Sasha Levin)
                   ` (21 preceding siblings ...)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 095/111] kernel/watchdog.c: move hardlockup detector to separate file Levin, Alexander (Sasha Levin)
@ 2017-06-04  8:15 ` Levin, Alexander (Sasha Levin)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 097/111] kernel/watchdog: prevent false hardlockup on overloaded system Levin, Alexander (Sasha Levin)
                   ` (14 subsequent siblings)
  37 siblings, 0 replies; 39+ messages in thread
From: Levin, Alexander (Sasha Levin) @ 2017-06-04  8:15 UTC (permalink / raw)
  To: stable
  Cc: Babu Moger, Ingo Molnar, Jiri Kosina, Andi Kleen, Yaowei Bai,
	Aaron Tomlin, Ulrich Obergfell, Tejun Heo, Hidehiro Kawai,
	Josh Hunt, David S. Miller, Andrew Morton, Linus Torvalds, Levin,
	Alexander (Sasha Levin)

From: Babu Moger <babu.moger@oracle.com>

[ Upstream commit 249e52e35580fcfe5dad53a7dcd7c1252788749c ]

Patch series "Clean up watchdog handlers", v2.

This is an attempt to cleanup watchdog handlers.  Right now,
kernel/watchdog.c implements both softlockup and hardlockup detectors.
Softlockup code is generic.  Hardlockup code is arch specific.  Some
architectures don't use hardlockup detectors.  They use their own
watchdog detectors.  To make both these combination work, we have
numerous #ifdefs in kernel/watchdog.c.

We are trying here to make these handlers independent of each other.
Also provide an interface for architectures to implement their own
handlers.  watchdog_nmi_enable and watchdog_nmi_disable will be defined
as weak such that architectures can override its definitions.

Thanks to Don Zickus for his suggestions.
Here are our previous discussions
http://www.spinics.net/lists/sparclinux/msg16543.html
http://www.spinics.net/lists/sparclinux/msg16441.html

This patch (of 3):

Move shared macros and definitions to nmi.h so that watchdog.c, new file
watchdog_hld.c or any other architecture specific handler can use those
definitions.

Link: http://lkml.kernel.org/r/1478034826-43888-2-git-send-email-babu.moger@oracle.com
Signed-off-by: Babu Moger <babu.moger@oracle.com>
Acked-by: Don Zickus <dzickus@redhat.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Jiri Kosina <jkosina@suse.cz>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Yaowei Bai <baiyaowei@cmss.chinamobile.com>
Cc: Aaron Tomlin <atomlin@redhat.com>
Cc: Ulrich Obergfell <uobergfe@redhat.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: Hidehiro Kawai <hidehiro.kawai.ez@hitachi.com>
Cc: Josh Hunt <johunt@akamai.com>
Cc: "David S. Miller" <davem@davemloft.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
---
 include/linux/nmi.h | 24 ++++++++++++++++++++++++
 kernel/watchdog.c   | 28 ++++------------------------
 2 files changed, 28 insertions(+), 24 deletions(-)

diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index a78c35cff1ae..aacca824a6ae 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -7,6 +7,23 @@
 #include <linux/sched.h>
 #include <asm/irq.h>
 
+/*
+ * The run state of the lockup detectors is controlled by the content of the
+ * 'watchdog_enabled' variable. Each lockup detector has its dedicated bit -
+ * bit 0 for the hard lockup detector and bit 1 for the soft lockup detector.
+ *
+ * 'watchdog_user_enabled', 'nmi_watchdog_enabled' and 'soft_watchdog_enabled'
+ * are variables that are only used as an 'interface' between the parameters
+ * in /proc/sys/kernel and the internal state bits in 'watchdog_enabled'. The
+ * 'watchdog_thresh' variable is handled differently because its value is not
+ * boolean, and the lockup detectors are 'suspended' while 'watchdog_thresh'
+ * is equal zero.
+ */
+#define NMI_WATCHDOG_ENABLED_BIT   0
+#define SOFT_WATCHDOG_ENABLED_BIT  1
+#define NMI_WATCHDOG_ENABLED      (1 << NMI_WATCHDOG_ENABLED_BIT)
+#define SOFT_WATCHDOG_ENABLED     (1 << SOFT_WATCHDOG_ENABLED_BIT)
+
 /**
  * touch_nmi_watchdog - restart NMI watchdog timeout.
  * 
@@ -91,9 +108,16 @@ extern int nmi_watchdog_enabled;
 extern int soft_watchdog_enabled;
 extern int watchdog_user_enabled;
 extern int watchdog_thresh;
+extern unsigned long watchdog_enabled;
 extern unsigned long *watchdog_cpumask_bits;
+#ifdef CONFIG_SMP
 extern int sysctl_softlockup_all_cpu_backtrace;
 extern int sysctl_hardlockup_all_cpu_backtrace;
+#else
+#define sysctl_softlockup_all_cpu_backtrace 0
+#define sysctl_hardlockup_all_cpu_backtrace 0
+#endif
+extern bool is_hardlockup(void);
 struct ctl_table;
 extern int proc_watchdog(struct ctl_table *, int ,
 			 void __user *, size_t *, loff_t *);
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 94aed27d4ffd..d4b0fa01cae3 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -26,29 +26,12 @@
 #include <linux/kvm_para.h>
 #include <linux/kthread.h>
 
-/*
- * The run state of the lockup detectors is controlled by the content of the
- * 'watchdog_enabled' variable. Each lockup detector has its dedicated bit -
- * bit 0 for the hard lockup detector and bit 1 for the soft lockup detector.
- *
- * 'watchdog_user_enabled', 'nmi_watchdog_enabled' and 'soft_watchdog_enabled'
- * are variables that are only used as an 'interface' between the parameters
- * in /proc/sys/kernel and the internal state bits in 'watchdog_enabled'. The
- * 'watchdog_thresh' variable is handled differently because its value is not
- * boolean, and the lockup detectors are 'suspended' while 'watchdog_thresh'
- * is equal zero.
- */
-#define NMI_WATCHDOG_ENABLED_BIT   0
-#define SOFT_WATCHDOG_ENABLED_BIT  1
-#define NMI_WATCHDOG_ENABLED      (1 << NMI_WATCHDOG_ENABLED_BIT)
-#define SOFT_WATCHDOG_ENABLED     (1 << SOFT_WATCHDOG_ENABLED_BIT)
-
 static DEFINE_MUTEX(watchdog_proc_mutex);
 
-#ifdef CONFIG_HARDLOCKUP_DETECTOR
-static unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED|NMI_WATCHDOG_ENABLED;
+#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR)
+unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED|NMI_WATCHDOG_ENABLED;
 #else
-static unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED;
+unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED;
 #endif
 int __read_mostly nmi_watchdog_enabled;
 int __read_mostly soft_watchdog_enabled;
@@ -58,9 +41,6 @@ int __read_mostly watchdog_thresh = 10;
 #ifdef CONFIG_SMP
 int __read_mostly sysctl_softlockup_all_cpu_backtrace;
 int __read_mostly sysctl_hardlockup_all_cpu_backtrace;
-#else
-#define sysctl_softlockup_all_cpu_backtrace 0
-#define sysctl_hardlockup_all_cpu_backtrace 0
 #endif
 static struct cpumask watchdog_cpumask __read_mostly;
 unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
@@ -229,7 +209,7 @@ void touch_softlockup_watchdog_sync(void)
 }
 
 /* watchdog detector functions */
-static bool is_hardlockup(void)
+bool is_hardlockup(void)
 {
 	unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
 
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 39+ messages in thread

* [PATCH for v4.9 LTS 097/111] kernel/watchdog: prevent false hardlockup on overloaded system
  2017-06-04  8:15 [PATCH for v4.9 LTS 073/111] nvmet-rdma: Fix missing dma sync to nvme data structures Levin, Alexander (Sasha Levin)
                   ` (22 preceding siblings ...)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 096/111] kernel/watchdog.c: move shared definitions to nmi.h Levin, Alexander (Sasha Levin)
@ 2017-06-04  8:15 ` Levin, Alexander (Sasha Levin)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 098/111] vhost/vsock: handle vhost_vq_init_access() error Levin, Alexander (Sasha Levin)
                   ` (13 subsequent siblings)
  37 siblings, 0 replies; 39+ messages in thread
From: Levin, Alexander (Sasha Levin) @ 2017-06-04  8:15 UTC (permalink / raw)
  To: stable
  Cc: Don Zickus, Ulrich Obergfell, Andrew Morton, Linus Torvalds,
	Levin, Alexander (Sasha Levin)

From: Don Zickus <dzickus@redhat.com>

[ Upstream commit b94f51183b0617e7b9b4fb4137d4cf1cab7547c2 ]

On an overloaded system, it is possible that a change in the watchdog
threshold can be delayed long enough to trigger a false positive.

This can easily be achieved by having a cpu spinning indefinitely on a
task, while another cpu updates watchdog threshold.

What happens is while trying to park the watchdog threads, the hrtimers
on the other cpus trigger and reprogram themselves with the new slower
watchdog threshold.  Meanwhile, the nmi watchdog is still programmed
with the old faster threshold.

Because the one cpu is blocked, it prevents the thread parking on the
other cpus from completing, which is needed to shutdown the nmi watchdog
and reprogram it correctly.  As a result, a false positive from the nmi
watchdog is reported.

Fix this by setting a park_in_progress flag to block all lockups until
the parking is complete.

Fix provided by Ulrich Obergfell.

[akpm@linux-foundation.org: s/park_in_progress/watchdog_park_in_progress/]
Link: http://lkml.kernel.org/r/1481041033-192236-1-git-send-email-dzickus@redhat.com
Signed-off-by: Don Zickus <dzickus@redhat.com>
Reviewed-by: Aaron Tomlin <atomlin@redhat.com>
Cc: Ulrich Obergfell <uobergfe@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
---
 include/linux/nmi.h   | 1 +
 kernel/watchdog.c     | 9 +++++++++
 kernel/watchdog_hld.c | 3 +++
 3 files changed, 13 insertions(+)

diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index aacca824a6ae..0a3fadc32693 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -110,6 +110,7 @@ extern int watchdog_user_enabled;
 extern int watchdog_thresh;
 extern unsigned long watchdog_enabled;
 extern unsigned long *watchdog_cpumask_bits;
+extern atomic_t watchdog_park_in_progress;
 #ifdef CONFIG_SMP
 extern int sysctl_softlockup_all_cpu_backtrace;
 extern int sysctl_hardlockup_all_cpu_backtrace;
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index d4b0fa01cae3..63177be0159e 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -49,6 +49,8 @@ unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
 #define for_each_watchdog_cpu(cpu) \
 	for_each_cpu_and((cpu), cpu_online_mask, &watchdog_cpumask)
 
+atomic_t watchdog_park_in_progress = ATOMIC_INIT(0);
+
 /*
  * The 'watchdog_running' variable is set to 1 when the watchdog threads
  * are registered/started and is set to 0 when the watchdog threads are
@@ -260,6 +262,9 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
 	int duration;
 	int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
 
+	if (atomic_read(&watchdog_park_in_progress) != 0)
+		return HRTIMER_NORESTART;
+
 	/* kick the hardlockup detector */
 	watchdog_interrupt_count();
 
@@ -467,12 +472,16 @@ static int watchdog_park_threads(void)
 {
 	int cpu, ret = 0;
 
+	atomic_set(&watchdog_park_in_progress, 1);
+
 	for_each_watchdog_cpu(cpu) {
 		ret = kthread_park(per_cpu(softlockup_watchdog, cpu));
 		if (ret)
 			break;
 	}
 
+	atomic_set(&watchdog_park_in_progress, 0);
+
 	return ret;
 }
 
diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c
index 84016c8aee6b..12b8dd640786 100644
--- a/kernel/watchdog_hld.c
+++ b/kernel/watchdog_hld.c
@@ -84,6 +84,9 @@ static void watchdog_overflow_callback(struct perf_event *event,
 	/* Ensure the watchdog never gets throttled */
 	event->hw.interrupts = 0;
 
+	if (atomic_read(&watchdog_park_in_progress) != 0)
+		return;
+
 	if (__this_cpu_read(watchdog_nmi_touch) == true) {
 		__this_cpu_write(watchdog_nmi_touch, false);
 		return;
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 39+ messages in thread

* [PATCH for v4.9 LTS 098/111] vhost/vsock: handle vhost_vq_init_access() error
  2017-06-04  8:15 [PATCH for v4.9 LTS 073/111] nvmet-rdma: Fix missing dma sync to nvme data structures Levin, Alexander (Sasha Levin)
                   ` (23 preceding siblings ...)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 097/111] kernel/watchdog: prevent false hardlockup on overloaded system Levin, Alexander (Sasha Levin)
@ 2017-06-04  8:15 ` Levin, Alexander (Sasha Levin)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 100/111] ARCv2: smp-boot: wake_flag polling by non-Masters needs to be uncached Levin, Alexander (Sasha Levin)
                   ` (12 subsequent siblings)
  37 siblings, 0 replies; 39+ messages in thread
From: Levin, Alexander (Sasha Levin) @ 2017-06-04  8:15 UTC (permalink / raw)
  To: stable
  Cc: Stefan Hajnoczi, Michael S . Tsirkin, Levin, Alexander (Sasha Levin)

From: Stefan Hajnoczi <stefanha@redhat.com>

[ Upstream commit 0516ffd88fa0d006ee80389ce14a9ca5ae45e845 ]

Propagate the error when vhost_vq_init_access() fails and set
vq->private_data to NULL.

Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
---
 drivers/vhost/vsock.c | 13 +++++++++----
 1 file changed, 9 insertions(+), 4 deletions(-)

diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index a504e2e003da..e3fad302b4fb 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -368,6 +368,7 @@ static void vhost_vsock_handle_rx_kick(struct vhost_work *work)
 
 static int vhost_vsock_start(struct vhost_vsock *vsock)
 {
+	struct vhost_virtqueue *vq;
 	size_t i;
 	int ret;
 
@@ -378,19 +379,20 @@ static int vhost_vsock_start(struct vhost_vsock *vsock)
 		goto err;
 
 	for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
-		struct vhost_virtqueue *vq = &vsock->vqs[i];
+		vq = &vsock->vqs[i];
 
 		mutex_lock(&vq->mutex);
 
 		if (!vhost_vq_access_ok(vq)) {
 			ret = -EFAULT;
-			mutex_unlock(&vq->mutex);
 			goto err_vq;
 		}
 
 		if (!vq->private_data) {
 			vq->private_data = vsock;
-			vhost_vq_init_access(vq);
+			ret = vhost_vq_init_access(vq);
+			if (ret)
+				goto err_vq;
 		}
 
 		mutex_unlock(&vq->mutex);
@@ -400,8 +402,11 @@ static int vhost_vsock_start(struct vhost_vsock *vsock)
 	return 0;
 
 err_vq:
+	vq->private_data = NULL;
+	mutex_unlock(&vq->mutex);
+
 	for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
-		struct vhost_virtqueue *vq = &vsock->vqs[i];
+		vq = &vsock->vqs[i];
 
 		mutex_lock(&vq->mutex);
 		vq->private_data = NULL;
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 39+ messages in thread

* [PATCH for v4.9 LTS 099/111] ARC: smp-boot: Decouple Non masters waiting API from jump to entry point
  2017-06-04  8:15 [PATCH for v4.9 LTS 073/111] nvmet-rdma: Fix missing dma sync to nvme data structures Levin, Alexander (Sasha Levin)
                   ` (25 preceding siblings ...)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 100/111] ARCv2: smp-boot: wake_flag polling by non-Masters needs to be uncached Levin, Alexander (Sasha Levin)
@ 2017-06-04  8:15 ` Levin, Alexander (Sasha Levin)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 101/111] tipc: ignore requests when the connection state is not CONNECTED Levin, Alexander (Sasha Levin)
                   ` (10 subsequent siblings)
  37 siblings, 0 replies; 39+ messages in thread
From: Levin, Alexander (Sasha Levin) @ 2017-06-04  8:15 UTC (permalink / raw)
  To: stable; +Cc: Vineet Gupta, Levin, Alexander (Sasha Levin)

From: Vineet Gupta <vgupta@synopsys.com>

[ Upstream commit bf02454a741b58682a82c314a9a46bed930ed2f7 ]

For run-on-reset SMP configs, non master cores call a routine which
waits until Master gives it a "go" signal (currently using a shared
mem flag). The same routine then jumps off the well known entry point of
all non Master cores i.e. @first_lines_of_secondary

This patch moves out the last part into one single place in early boot
code.

This is better in terms of absraction (the wait API only waits) and
returns, leaving out the "jump off to" part.

In actual implementation this requires some restructuring of the early
boot code as well as Master now jumps to BSS setup explicitly,
vs. falling thru into it before.

Technically this patch doesn't cause any functional change, it just
moves the ugly #ifdef'ry from assembly code to "C"

Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
---
 arch/arc/kernel/head.S | 14 +++++++-------
 arch/arc/kernel/smp.c  |  6 ++++--
 2 files changed, 11 insertions(+), 9 deletions(-)

diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S
index 689dd867fdff..8b90d25a15cc 100644
--- a/arch/arc/kernel/head.S
+++ b/arch/arc/kernel/head.S
@@ -71,14 +71,14 @@ ENTRY(stext)
 	GET_CPU_ID  r5
 	cmp	r5, 0
 	mov.nz	r0, r5
-#ifdef CONFIG_ARC_SMP_HALT_ON_RESET
-	; Non-Master can proceed as system would be booted sufficiently
-	jnz	first_lines_of_secondary
-#else
+	bz	.Lmaster_proceed
+
 	; Non-Masters wait for Master to boot enough and bring them up
-	jnz	arc_platform_smp_wait_to_boot
-#endif
-	; Master falls thru
+	; when they resume, tail-call to entry point
+	mov	blink, @first_lines_of_secondary
+	j	arc_platform_smp_wait_to_boot
+
+.Lmaster_proceed:
 #endif
 
 	; Clear BSS before updating any globals
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c
index 88674d972c9d..44a0d21ed342 100644
--- a/arch/arc/kernel/smp.c
+++ b/arch/arc/kernel/smp.c
@@ -98,14 +98,16 @@ static void arc_default_smp_cpu_kick(int cpu, unsigned long pc)
 
 void arc_platform_smp_wait_to_boot(int cpu)
 {
+	/* for halt-on-reset, we've waited already */
+	if (IS_ENABLED(CONFIG_ARC_SMP_HALT_ON_RESET))
+		return;
+
 	while (wake_flag != cpu)
 		;
 
 	wake_flag = 0;
-	__asm__ __volatile__("j @first_lines_of_secondary	\n");
 }
 
-
 const char *arc_platform_smp_cpuinfo(void)
 {
 	return plat_smp_ops.info ? : "";
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 39+ messages in thread

* [PATCH for v4.9 LTS 100/111] ARCv2: smp-boot: wake_flag polling by non-Masters needs to be uncached
  2017-06-04  8:15 [PATCH for v4.9 LTS 073/111] nvmet-rdma: Fix missing dma sync to nvme data structures Levin, Alexander (Sasha Levin)
                   ` (24 preceding siblings ...)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 098/111] vhost/vsock: handle vhost_vq_init_access() error Levin, Alexander (Sasha Levin)
@ 2017-06-04  8:15 ` Levin, Alexander (Sasha Levin)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 099/111] ARC: smp-boot: Decouple Non masters waiting API from jump to entry point Levin, Alexander (Sasha Levin)
                   ` (11 subsequent siblings)
  37 siblings, 0 replies; 39+ messages in thread
From: Levin, Alexander (Sasha Levin) @ 2017-06-04  8:15 UTC (permalink / raw)
  To: stable; +Cc: Vineet Gupta, Levin, Alexander (Sasha Levin)

From: Vineet Gupta <vgupta@synopsys.com>

[ Upstream commit 78f824d4312a8944f5340c6b161bba3bf2c81096 ]

This is needed on HS38 cores, for setting up IO-Coherency aperture properly

The polling could perturb the caches and coherecy fabric which could be
wrong in the small window when Master is setting up IOC aperture etc
in arc_cache_init()

We do it only for ARCv2 based builds to not affect EZChip ARCompact
based platform.

Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
---
 arch/arc/kernel/smp.c | 19 ++++++++++++++++---
 1 file changed, 16 insertions(+), 3 deletions(-)

diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c
index 44a0d21ed342..2afbafadb6ab 100644
--- a/arch/arc/kernel/smp.c
+++ b/arch/arc/kernel/smp.c
@@ -90,10 +90,23 @@ void __init smp_cpus_done(unsigned int max_cpus)
  */
 static volatile int wake_flag;
 
+#ifdef CONFIG_ISA_ARCOMPACT
+
+#define __boot_read(f)		f
+#define __boot_write(f, v)	f = v
+
+#else
+
+#define __boot_read(f)		arc_read_uncached_32(&f)
+#define __boot_write(f, v)	arc_write_uncached_32(&f, v)
+
+#endif
+
 static void arc_default_smp_cpu_kick(int cpu, unsigned long pc)
 {
 	BUG_ON(cpu == 0);
-	wake_flag = cpu;
+
+	__boot_write(wake_flag, cpu);
 }
 
 void arc_platform_smp_wait_to_boot(int cpu)
@@ -102,10 +115,10 @@ void arc_platform_smp_wait_to_boot(int cpu)
 	if (IS_ENABLED(CONFIG_ARC_SMP_HALT_ON_RESET))
 		return;
 
-	while (wake_flag != cpu)
+	while (__boot_read(wake_flag) != cpu)
 		;
 
-	wake_flag = 0;
+	__boot_write(wake_flag, 0);
 }
 
 const char *arc_platform_smp_cpuinfo(void)
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 39+ messages in thread

* [PATCH for v4.9 LTS 101/111] tipc: ignore requests when the connection state is not CONNECTED
  2017-06-04  8:15 [PATCH for v4.9 LTS 073/111] nvmet-rdma: Fix missing dma sync to nvme data structures Levin, Alexander (Sasha Levin)
                   ` (26 preceding siblings ...)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 099/111] ARC: smp-boot: Decouple Non masters waiting API from jump to entry point Levin, Alexander (Sasha Levin)
@ 2017-06-04  8:15 ` Levin, Alexander (Sasha Levin)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 102/111] tipc: fix connection refcount error Levin, Alexander (Sasha Levin)
                   ` (9 subsequent siblings)
  37 siblings, 0 replies; 39+ messages in thread
From: Levin, Alexander (Sasha Levin) @ 2017-06-04  8:15 UTC (permalink / raw)
  To: stable
  Cc: Parthasarathy Bhuvaragan, David S . Miller, Levin,
	Alexander (Sasha Levin)

From: Parthasarathy Bhuvaragan <parthasarathy.bhuvaragan@ericsson.com>

[ Upstream commit 4c887aa65d38633885010277f3482400681be719 ]

In tipc_conn_sendmsg(), we first queue the request to the outqueue
followed by the connection state check. If the connection is not
connected, we should not queue this message.

In this commit, we reject the messages if the connection state is
not CF_CONNECTED.

Acked-by: Ying Xue <ying.xue@windriver.com>
Acked-by: Jon Maloy <jon.maloy@ericsson.com>
Tested-by: John Thompson <thompa.atl@gmail.com>
Signed-off-by: Parthasarathy Bhuvaragan <parthasarathy.bhuvaragan@ericsson.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
---
 net/tipc/server.c | 13 +++++++------
 1 file changed, 7 insertions(+), 6 deletions(-)

diff --git a/net/tipc/server.c b/net/tipc/server.c
index 215849ce453d..b4b742c89a26 100644
--- a/net/tipc/server.c
+++ b/net/tipc/server.c
@@ -458,6 +458,11 @@ int tipc_conn_sendmsg(struct tipc_server *s, int conid,
 	if (!con)
 		return -EINVAL;
 
+	if (!test_bit(CF_CONNECTED, &con->flags)) {
+		conn_put(con);
+		return 0;
+	}
+
 	e = tipc_alloc_entry(data, len);
 	if (!e) {
 		conn_put(con);
@@ -471,12 +476,8 @@ int tipc_conn_sendmsg(struct tipc_server *s, int conid,
 	list_add_tail(&e->list, &con->outqueue);
 	spin_unlock_bh(&con->outqueue_lock);
 
-	if (test_bit(CF_CONNECTED, &con->flags)) {
-		if (!queue_work(s->send_wq, &con->swork))
-			conn_put(con);
-	} else {
+	if (!queue_work(s->send_wq, &con->swork))
 		conn_put(con);
-	}
 	return 0;
 }
 
@@ -500,7 +501,7 @@ static void tipc_send_to_sock(struct tipc_conn *con)
 	int ret;
 
 	spin_lock_bh(&con->outqueue_lock);
-	while (1) {
+	while (test_bit(CF_CONNECTED, &con->flags)) {
 		e = list_entry(con->outqueue.next, struct outqueue_entry,
 			       list);
 		if ((struct list_head *) e == &con->outqueue)
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 39+ messages in thread

* [PATCH for v4.9 LTS 102/111] tipc: fix connection refcount error
  2017-06-04  8:15 [PATCH for v4.9 LTS 073/111] nvmet-rdma: Fix missing dma sync to nvme data structures Levin, Alexander (Sasha Levin)
                   ` (27 preceding siblings ...)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 101/111] tipc: ignore requests when the connection state is not CONNECTED Levin, Alexander (Sasha Levin)
@ 2017-06-04  8:15 ` Levin, Alexander (Sasha Levin)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 104/111] tipc: fix nametbl_lock soft lockup at node/link events Levin, Alexander (Sasha Levin)
                   ` (8 subsequent siblings)
  37 siblings, 0 replies; 39+ messages in thread
From: Levin, Alexander (Sasha Levin) @ 2017-06-04  8:15 UTC (permalink / raw)
  To: stable
  Cc: Parthasarathy Bhuvaragan, David S . Miller, Levin,
	Alexander (Sasha Levin)

From: Parthasarathy Bhuvaragan <parthasarathy.bhuvaragan@ericsson.com>

[ Upstream commit fc0adfc8fd18b61b6f7a3f28b429e134d6f3a008 ]

Until now, the generic server framework maintains the connection
id's per subscriber in server's conn_idr. At tipc_close_conn, we
remove the connection id from the server list, but the connection is
valid until we call the refcount cleanup. Hence we have a window
where the server allocates the same connection to an new subscriber
leading to inconsistent reference count. We have another refcount
warning we grab the refcount in tipc_conn_lookup() for connections
with flag with CF_CONNECTED not set. This usually occurs at shutdown
when the we stop the topology server and withdraw TIPC_CFG_SRV
publication thereby triggering a withdraw message to subscribers.

In this commit, we:
1. remove the connection from the server list at recount cleanup.
2. grab the refcount for a connection only if CF_CONNECTED is set.

Tested-by: John Thompson <thompa.atl@gmail.com>
Acked-by: Ying Xue <ying.xue@windriver.com>
Acked-by: Jon Maloy <jon.maloy@ericsson.com>
Signed-off-by: Parthasarathy Bhuvaragan <parthasarathy.bhuvaragan@ericsson.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
---
 net/tipc/server.c | 19 ++++++++++---------
 1 file changed, 10 insertions(+), 9 deletions(-)

diff --git a/net/tipc/server.c b/net/tipc/server.c
index b4b742c89a26..f89c0c2e8c16 100644
--- a/net/tipc/server.c
+++ b/net/tipc/server.c
@@ -91,7 +91,8 @@ static void tipc_sock_release(struct tipc_conn *con);
 static void tipc_conn_kref_release(struct kref *kref)
 {
 	struct tipc_conn *con = container_of(kref, struct tipc_conn, kref);
-	struct sockaddr_tipc *saddr = con->server->saddr;
+	struct tipc_server *s = con->server;
+	struct sockaddr_tipc *saddr = s->saddr;
 	struct socket *sock = con->sock;
 	struct sock *sk;
 
@@ -106,6 +107,11 @@ static void tipc_conn_kref_release(struct kref *kref)
 		tipc_sock_release(con);
 		sock_release(sock);
 		con->sock = NULL;
+
+		spin_lock_bh(&s->idr_lock);
+		idr_remove(&s->conn_idr, con->conid);
+		s->idr_in_use--;
+		spin_unlock_bh(&s->idr_lock);
 	}
 
 	tipc_clean_outqueues(con);
@@ -128,8 +134,10 @@ static struct tipc_conn *tipc_conn_lookup(struct tipc_server *s, int conid)
 
 	spin_lock_bh(&s->idr_lock);
 	con = idr_find(&s->conn_idr, conid);
-	if (con)
+	if (con && test_bit(CF_CONNECTED, &con->flags))
 		conn_get(con);
+	else
+		con = NULL;
 	spin_unlock_bh(&s->idr_lock);
 	return con;
 }
@@ -198,15 +206,8 @@ static void tipc_sock_release(struct tipc_conn *con)
 
 static void tipc_close_conn(struct tipc_conn *con)
 {
-	struct tipc_server *s = con->server;
-
 	if (test_and_clear_bit(CF_CONNECTED, &con->flags)) {
 
-		spin_lock_bh(&s->idr_lock);
-		idr_remove(&s->conn_idr, con->conid);
-		s->idr_in_use--;
-		spin_unlock_bh(&s->idr_lock);
-
 		/* We shouldn't flush pending works as we may be in the
 		 * thread. In fact the races with pending rx/tx work structs
 		 * are harmless for us here as we have already deleted this
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 39+ messages in thread

* [PATCH for v4.9 LTS 104/111] tipc: fix nametbl_lock soft lockup at node/link events
  2017-06-04  8:15 [PATCH for v4.9 LTS 073/111] nvmet-rdma: Fix missing dma sync to nvme data structures Levin, Alexander (Sasha Levin)
                   ` (28 preceding siblings ...)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 102/111] tipc: fix connection refcount error Levin, Alexander (Sasha Levin)
@ 2017-06-04  8:15 ` Levin, Alexander (Sasha Levin)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 103/111] tipc: add subscription refcount to avoid invalid delete Levin, Alexander (Sasha Levin)
                   ` (7 subsequent siblings)
  37 siblings, 0 replies; 39+ messages in thread
From: Levin, Alexander (Sasha Levin) @ 2017-06-04  8:15 UTC (permalink / raw)
  To: stable
  Cc: Parthasarathy Bhuvaragan, David S . Miller, Levin,
	Alexander (Sasha Levin)

From: Parthasarathy Bhuvaragan <parthasarathy.bhuvaragan@ericsson.com>

[ Upstream commit 93f955aad4bacee5acebad141d1a03cd51f27b4e ]

We trigger a soft lockup as we grab nametbl_lock twice if the node
has a pending node up/down or link up/down event while:
- we process an incoming named message in tipc_named_rcv() and
  perform an tipc_update_nametbl().
- we have pending backlog items in the name distributor queue
  during a nametable update using tipc_nametbl_publish() or
  tipc_nametbl_withdraw().

The following are the call chain associated:
tipc_named_rcv() Grabs nametbl_lock
   tipc_update_nametbl() (publish/withdraw)
     tipc_node_subscribe()/unsubscribe()
       tipc_node_write_unlock()
          << lockup occurs if an outstanding node/link event
             exits, as we grabs nametbl_lock again >>

tipc_nametbl_withdraw() Grab nametbl_lock
  tipc_named_process_backlog()
    tipc_update_nametbl()
      << rest as above >>

The function tipc_node_write_unlock(), in addition to releasing the
lock processes the outstanding node/link up/down events. To do this,
we need to grab the nametbl_lock again leading to the lockup.

In this commit we fix the soft lockup by introducing a fast variant of
node_unlock(), where we just release the lock. We adapt the
node_subscribe()/node_unsubscribe() to use the fast variants.

Reported-and-Tested-by: John Thompson <thompa.atl@gmail.com>
Acked-by: Ying Xue <ying.xue@windriver.com>
Acked-by: Jon Maloy <jon.maloy@ericsson.com>
Signed-off-by: Parthasarathy Bhuvaragan <parthasarathy.bhuvaragan@ericsson.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
---
 net/tipc/node.c | 9 +++++++--
 1 file changed, 7 insertions(+), 2 deletions(-)

diff --git a/net/tipc/node.c b/net/tipc/node.c
index 9d2f4c2b08ab..27753325e06e 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -263,6 +263,11 @@ static void tipc_node_write_lock(struct tipc_node *n)
 	write_lock_bh(&n->lock);
 }
 
+static void tipc_node_write_unlock_fast(struct tipc_node *n)
+{
+	write_unlock_bh(&n->lock);
+}
+
 static void tipc_node_write_unlock(struct tipc_node *n)
 {
 	struct net *net = n->net;
@@ -417,7 +422,7 @@ void tipc_node_subscribe(struct net *net, struct list_head *subscr, u32 addr)
 	}
 	tipc_node_write_lock(n);
 	list_add_tail(subscr, &n->publ_list);
-	tipc_node_write_unlock(n);
+	tipc_node_write_unlock_fast(n);
 	tipc_node_put(n);
 }
 
@@ -435,7 +440,7 @@ void tipc_node_unsubscribe(struct net *net, struct list_head *subscr, u32 addr)
 	}
 	tipc_node_write_lock(n);
 	list_del_init(subscr);
-	tipc_node_write_unlock(n);
+	tipc_node_write_unlock_fast(n);
 	tipc_node_put(n);
 }
 
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 39+ messages in thread

* [PATCH for v4.9 LTS 103/111] tipc: add subscription refcount to avoid invalid delete
  2017-06-04  8:15 [PATCH for v4.9 LTS 073/111] nvmet-rdma: Fix missing dma sync to nvme data structures Levin, Alexander (Sasha Levin)
                   ` (29 preceding siblings ...)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 104/111] tipc: fix nametbl_lock soft lockup at node/link events Levin, Alexander (Sasha Levin)
@ 2017-06-04  8:15 ` Levin, Alexander (Sasha Levin)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 106/111] netfilter: nft_log: restrict the log prefix length to 127 Levin, Alexander (Sasha Levin)
                   ` (6 subsequent siblings)
  37 siblings, 0 replies; 39+ messages in thread
From: Levin, Alexander (Sasha Levin) @ 2017-06-04  8:15 UTC (permalink / raw)
  To: stable
  Cc: Parthasarathy Bhuvaragan, David S . Miller, Levin,
	Alexander (Sasha Levin)

From: Parthasarathy Bhuvaragan <parthasarathy.bhuvaragan@ericsson.com>

[ Upstream commit d094c4d5f5c7e1b225e94227ca3f007be3adc4e8 ]

Until now, the subscribers keep track of the subscriptions using
reference count at subscriber level. At subscription cancel or
subscriber delete, we delete the subscription only if the timer
was pending for the subscription. This approach is incorrect as:
1. del_timer() is not SMP safe, if on CPU0 the check for pending
   timer returns true but CPU1 might schedule the timer callback
   thereby deleting the subscription. Thus when CPU0 is scheduled,
   it deletes an invalid subscription.
2. We export tipc_subscrp_report_overlap(), which accesses the
   subscription pointer multiple times. Meanwhile the subscription
   timer can expire thereby freeing the subscription and we might
   continue to access the subscription pointer leading to memory
   violations.

In this commit, we introduce subscription refcount to avoid deleting
an invalid subscription.

Reported-and-Tested-by: John Thompson <thompa.atl@gmail.com>
Acked-by: Ying Xue <ying.xue@windriver.com>
Acked-by: Jon Maloy <jon.maloy@ericsson.com>
Signed-off-by: Parthasarathy Bhuvaragan <parthasarathy.bhuvaragan@ericsson.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
---
 net/tipc/subscr.c | 124 ++++++++++++++++++++++++++++++------------------------
 net/tipc/subscr.h |   1 +
 2 files changed, 71 insertions(+), 54 deletions(-)

diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index 0dd02244e21d..9d94e65d0894 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -54,6 +54,8 @@ struct tipc_subscriber {
 
 static void tipc_subscrp_delete(struct tipc_subscription *sub);
 static void tipc_subscrb_put(struct tipc_subscriber *subscriber);
+static void tipc_subscrp_put(struct tipc_subscription *subscription);
+static void tipc_subscrp_get(struct tipc_subscription *subscription);
 
 /**
  * htohl - convert value to endianness used by destination
@@ -123,6 +125,7 @@ void tipc_subscrp_report_overlap(struct tipc_subscription *sub, u32 found_lower,
 {
 	struct tipc_name_seq seq;
 
+	tipc_subscrp_get(sub);
 	tipc_subscrp_convert_seq(&sub->evt.s.seq, sub->swap, &seq);
 	if (!tipc_subscrp_check_overlap(&seq, found_lower, found_upper))
 		return;
@@ -132,30 +135,23 @@ void tipc_subscrp_report_overlap(struct tipc_subscription *sub, u32 found_lower,
 
 	tipc_subscrp_send_event(sub, found_lower, found_upper, event, port_ref,
 				node);
+	tipc_subscrp_put(sub);
 }
 
 static void tipc_subscrp_timeout(unsigned long data)
 {
 	struct tipc_subscription *sub = (struct tipc_subscription *)data;
-	struct tipc_subscriber *subscriber = sub->subscriber;
 
 	/* Notify subscriber of timeout */
 	tipc_subscrp_send_event(sub, sub->evt.s.seq.lower, sub->evt.s.seq.upper,
 				TIPC_SUBSCR_TIMEOUT, 0, 0);
 
-	spin_lock_bh(&subscriber->lock);
-	tipc_subscrp_delete(sub);
-	spin_unlock_bh(&subscriber->lock);
-
-	tipc_subscrb_put(subscriber);
+	tipc_subscrp_put(sub);
 }
 
 static void tipc_subscrb_kref_release(struct kref *kref)
 {
-	struct tipc_subscriber *subcriber = container_of(kref,
-					    struct tipc_subscriber, kref);
-
-	kfree(subcriber);
+	kfree(container_of(kref,struct tipc_subscriber, kref));
 }
 
 static void tipc_subscrb_put(struct tipc_subscriber *subscriber)
@@ -168,6 +164,59 @@ static void tipc_subscrb_get(struct tipc_subscriber *subscriber)
 	kref_get(&subscriber->kref);
 }
 
+static void tipc_subscrp_kref_release(struct kref *kref)
+{
+	struct tipc_subscription *sub = container_of(kref,
+						     struct tipc_subscription,
+						     kref);
+	struct tipc_net *tn = net_generic(sub->net, tipc_net_id);
+	struct tipc_subscriber *subscriber = sub->subscriber;
+
+	spin_lock_bh(&subscriber->lock);
+	tipc_nametbl_unsubscribe(sub);
+	list_del(&sub->subscrp_list);
+	atomic_dec(&tn->subscription_count);
+	spin_unlock_bh(&subscriber->lock);
+	kfree(sub);
+	tipc_subscrb_put(subscriber);
+}
+
+static void tipc_subscrp_put(struct tipc_subscription *subscription)
+{
+	kref_put(&subscription->kref, tipc_subscrp_kref_release);
+}
+
+static void tipc_subscrp_get(struct tipc_subscription *subscription)
+{
+	kref_get(&subscription->kref);
+}
+
+/* tipc_subscrb_subscrp_delete - delete a specific subscription or all
+ * subscriptions for a given subscriber.
+ */
+static void tipc_subscrb_subscrp_delete(struct tipc_subscriber *subscriber,
+					struct tipc_subscr *s)
+{
+	struct list_head *subscription_list = &subscriber->subscrp_list;
+	struct tipc_subscription *sub, *temp;
+
+	spin_lock_bh(&subscriber->lock);
+	list_for_each_entry_safe(sub, temp, subscription_list,  subscrp_list) {
+		if (s && memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr)))
+			continue;
+
+		tipc_subscrp_get(sub);
+		spin_unlock_bh(&subscriber->lock);
+		tipc_subscrp_delete(sub);
+		tipc_subscrp_put(sub);
+		spin_lock_bh(&subscriber->lock);
+
+		if (s)
+			break;
+	}
+	spin_unlock_bh(&subscriber->lock);
+}
+
 static struct tipc_subscriber *tipc_subscrb_create(int conid)
 {
 	struct tipc_subscriber *subscriber;
@@ -177,8 +226,8 @@ static struct tipc_subscriber *tipc_subscrb_create(int conid)
 		pr_warn("Subscriber rejected, no memory\n");
 		return NULL;
 	}
-	kref_init(&subscriber->kref);
 	INIT_LIST_HEAD(&subscriber->subscrp_list);
+	kref_init(&subscriber->kref);
 	subscriber->conid = conid;
 	spin_lock_init(&subscriber->lock);
 
@@ -187,55 +236,22 @@ static struct tipc_subscriber *tipc_subscrb_create(int conid)
 
 static void tipc_subscrb_delete(struct tipc_subscriber *subscriber)
 {
-	struct tipc_subscription *sub, *temp;
-	u32 timeout;
-
-	spin_lock_bh(&subscriber->lock);
-	/* Destroy any existing subscriptions for subscriber */
-	list_for_each_entry_safe(sub, temp, &subscriber->subscrp_list,
-				 subscrp_list) {
-		timeout = htohl(sub->evt.s.timeout, sub->swap);
-		if ((timeout == TIPC_WAIT_FOREVER) || del_timer(&sub->timer)) {
-			tipc_subscrp_delete(sub);
-			tipc_subscrb_put(subscriber);
-		}
-	}
-	spin_unlock_bh(&subscriber->lock);
-
+	tipc_subscrb_subscrp_delete(subscriber, NULL);
 	tipc_subscrb_put(subscriber);
 }
 
 static void tipc_subscrp_delete(struct tipc_subscription *sub)
 {
-	struct tipc_net *tn = net_generic(sub->net, tipc_net_id);
+	u32 timeout = htohl(sub->evt.s.timeout, sub->swap);
 
-	tipc_nametbl_unsubscribe(sub);
-	list_del(&sub->subscrp_list);
-	kfree(sub);
-	atomic_dec(&tn->subscription_count);
+	if (timeout == TIPC_WAIT_FOREVER || del_timer(&sub->timer))
+		tipc_subscrp_put(sub);
 }
 
 static void tipc_subscrp_cancel(struct tipc_subscr *s,
 				struct tipc_subscriber *subscriber)
 {
-	struct tipc_subscription *sub, *temp;
-	u32 timeout;
-
-	spin_lock_bh(&subscriber->lock);
-	/* Find first matching subscription, exit if not found */
-	list_for_each_entry_safe(sub, temp, &subscriber->subscrp_list,
-				 subscrp_list) {
-		if (!memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr))) {
-			timeout = htohl(sub->evt.s.timeout, sub->swap);
-			if ((timeout == TIPC_WAIT_FOREVER) ||
-			    del_timer(&sub->timer)) {
-				tipc_subscrp_delete(sub);
-				tipc_subscrb_put(subscriber);
-			}
-			break;
-		}
-	}
-	spin_unlock_bh(&subscriber->lock);
+	tipc_subscrb_subscrp_delete(subscriber, s);
 }
 
 static struct tipc_subscription *tipc_subscrp_create(struct net *net,
@@ -272,6 +288,7 @@ static struct tipc_subscription *tipc_subscrp_create(struct net *net,
 	sub->swap = swap;
 	memcpy(&sub->evt.s, s, sizeof(*s));
 	atomic_inc(&tn->subscription_count);
+	kref_init(&sub->kref);
 	return sub;
 }
 
@@ -288,17 +305,16 @@ static void tipc_subscrp_subscribe(struct net *net, struct tipc_subscr *s,
 
 	spin_lock_bh(&subscriber->lock);
 	list_add(&sub->subscrp_list, &subscriber->subscrp_list);
-	tipc_subscrb_get(subscriber);
 	sub->subscriber = subscriber;
 	tipc_nametbl_subscribe(sub);
+	tipc_subscrb_get(subscriber);
 	spin_unlock_bh(&subscriber->lock);
 
+	setup_timer(&sub->timer, tipc_subscrp_timeout, (unsigned long)sub);
 	timeout = htohl(sub->evt.s.timeout, swap);
-	if (timeout == TIPC_WAIT_FOREVER)
-		return;
 
-	setup_timer(&sub->timer, tipc_subscrp_timeout, (unsigned long)sub);
-	mod_timer(&sub->timer, jiffies + msecs_to_jiffies(timeout));
+	if (timeout != TIPC_WAIT_FOREVER)
+		mod_timer(&sub->timer, jiffies + msecs_to_jiffies(timeout));
 }
 
 /* Handle one termination request for the subscriber */
diff --git a/net/tipc/subscr.h b/net/tipc/subscr.h
index be60103082c9..ffdc214c117a 100644
--- a/net/tipc/subscr.h
+++ b/net/tipc/subscr.h
@@ -57,6 +57,7 @@ struct tipc_subscriber;
  * @evt: template for events generated by subscription
  */
 struct tipc_subscription {
+	struct kref kref;
 	struct tipc_subscriber *subscriber;
 	struct net *net;
 	struct timer_list timer;
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 39+ messages in thread

* [PATCH for v4.9 LTS 105/111] netfilter: nf_tables: fix set->nelems counting with no NLM_F_EXCL
  2017-06-04  8:15 [PATCH for v4.9 LTS 073/111] nvmet-rdma: Fix missing dma sync to nvme data structures Levin, Alexander (Sasha Levin)
                   ` (31 preceding siblings ...)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 106/111] netfilter: nft_log: restrict the log prefix length to 127 Levin, Alexander (Sasha Levin)
@ 2017-06-04  8:15 ` Levin, Alexander (Sasha Levin)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 108/111] RDMA/qedr: Fix and simplify memory leak in PD alloc Levin, Alexander (Sasha Levin)
                   ` (4 subsequent siblings)
  37 siblings, 0 replies; 39+ messages in thread
From: Levin, Alexander (Sasha Levin) @ 2017-06-04  8:15 UTC (permalink / raw)
  To: stable; +Cc: Pablo Neira Ayuso, Levin, Alexander (Sasha Levin)

From: Pablo Neira Ayuso <pablo@netfilter.org>

[ Upstream commit 35d0ac9070ef619e3bf44324375878a1c540387b ]

If the element exists and no NLM_F_EXCL is specified, do not bump
set->nelems, otherwise we leak one set element slot. This problem
amplifies if the set is full since the abort path always decrements the
counter for the -ENFILE case too, giving one spare extra slot.

Fix this by moving set->nelems update to nft_add_set_elem() after
successful element insertion. Moreover, remove the element if the set is
full so there is no need to rely on the abort path to undo things
anymore.

Fixes: c016c7e45ddf ("netfilter: nf_tables: honor NLM_F_EXCL flag in set element insertion")
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
---
 net/netfilter/nf_tables_api.c | 16 +++++++++-------
 1 file changed, 9 insertions(+), 7 deletions(-)

diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index e5194f6f906c..778fcdb83225 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -3637,10 +3637,18 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
 		goto err5;
 	}
 
+	if (set->size &&
+	    !atomic_add_unless(&set->nelems, 1, set->size + set->ndeact)) {
+		err = -ENFILE;
+		goto err6;
+	}
+
 	nft_trans_elem(trans) = elem;
 	list_add_tail(&trans->list, &ctx->net->nft.commit_list);
 	return 0;
 
+err6:
+	set->ops->remove(set, &elem);
 err5:
 	kfree(trans);
 err4:
@@ -3687,15 +3695,9 @@ static int nf_tables_newsetelem(struct net *net, struct sock *nlsk,
 		return -EBUSY;
 
 	nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) {
-		if (set->size &&
-		    !atomic_add_unless(&set->nelems, 1, set->size + set->ndeact))
-			return -ENFILE;
-
 		err = nft_add_set_elem(&ctx, set, attr, nlh->nlmsg_flags);
-		if (err < 0) {
-			atomic_dec(&set->nelems);
+		if (err < 0)
 			break;
-		}
 	}
 	return err;
 }
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 39+ messages in thread

* [PATCH for v4.9 LTS 106/111] netfilter: nft_log: restrict the log prefix length to 127
  2017-06-04  8:15 [PATCH for v4.9 LTS 073/111] nvmet-rdma: Fix missing dma sync to nvme data structures Levin, Alexander (Sasha Levin)
                   ` (30 preceding siblings ...)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 103/111] tipc: add subscription refcount to avoid invalid delete Levin, Alexander (Sasha Levin)
@ 2017-06-04  8:15 ` Levin, Alexander (Sasha Levin)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 105/111] netfilter: nf_tables: fix set->nelems counting with no NLM_F_EXCL Levin, Alexander (Sasha Levin)
                   ` (5 subsequent siblings)
  37 siblings, 0 replies; 39+ messages in thread
From: Levin, Alexander (Sasha Levin) @ 2017-06-04  8:15 UTC (permalink / raw)
  To: stable; +Cc: Liping Zhang, Pablo Neira Ayuso, Levin, Alexander (Sasha Levin)

From: Liping Zhang <zlpnobody@gmail.com>

[ Upstream commit 5ce6b04ce96896e8a79e6f60740ced911eaac7a4 ]

First, log prefix will be truncated to NF_LOG_PREFIXLEN-1, i.e. 127,
at nf_log_packet(), so the extra part is useless.

Second, after adding a log rule with a very very long prefix, we will
fail to dump the nft rules after this _special_ one, but acctually,
they do exist. For example:
  # name_65000=$(printf "%0.sQ" {1..65000})
  # nft add rule filter output log prefix "$name_65000"
  # nft add rule filter output counter
  # nft add rule filter output counter
  # nft list chain filter output
  table ip filter {
      chain output {
          type filter hook output priority 0; policy accept;
      }
  }

So now, restrict the log prefix length to NF_LOG_PREFIXLEN-1.

Fixes: 96518518cc41 ("netfilter: add nftables")
Signed-off-by: Liping Zhang <zlpnobody@gmail.com>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
---
 include/uapi/linux/netfilter/nf_log.h | 2 ++
 net/netfilter/nf_log.c                | 1 -
 net/netfilter/nft_log.c               | 3 ++-
 3 files changed, 4 insertions(+), 2 deletions(-)

diff --git a/include/uapi/linux/netfilter/nf_log.h b/include/uapi/linux/netfilter/nf_log.h
index 8be21e02387d..d0b5fa91ff54 100644
--- a/include/uapi/linux/netfilter/nf_log.h
+++ b/include/uapi/linux/netfilter/nf_log.h
@@ -9,4 +9,6 @@
 #define NF_LOG_MACDECODE	0x20	/* Decode MAC header */
 #define NF_LOG_MASK		0x2f
 
+#define NF_LOG_PREFIXLEN	128
+
 #endif /* _NETFILTER_NF_LOG_H */
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index 3dca90dc24ad..ffb9e8ada899 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -13,7 +13,6 @@
 /* Internal logging interface, which relies on the real
    LOG target modules */
 
-#define NF_LOG_PREFIXLEN		128
 #define NFLOGGER_NAME_LEN		64
 
 static struct nf_logger __rcu *loggers[NFPROTO_NUMPROTO][NF_LOG_TYPE_MAX] __read_mostly;
diff --git a/net/netfilter/nft_log.c b/net/netfilter/nft_log.c
index 1b01404bb33f..c7704e9123ef 100644
--- a/net/netfilter/nft_log.c
+++ b/net/netfilter/nft_log.c
@@ -38,7 +38,8 @@ static void nft_log_eval(const struct nft_expr *expr,
 
 static const struct nla_policy nft_log_policy[NFTA_LOG_MAX + 1] = {
 	[NFTA_LOG_GROUP]	= { .type = NLA_U16 },
-	[NFTA_LOG_PREFIX]	= { .type = NLA_STRING },
+	[NFTA_LOG_PREFIX]	= { .type = NLA_STRING,
+				    .len = NF_LOG_PREFIXLEN - 1 },
 	[NFTA_LOG_SNAPLEN]	= { .type = NLA_U32 },
 	[NFTA_LOG_QTHRESHOLD]	= { .type = NLA_U16 },
 	[NFTA_LOG_LEVEL]	= { .type = NLA_U32 },
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 39+ messages in thread

* [PATCH for v4.9 LTS 108/111] RDMA/qedr: Fix and simplify memory leak in PD alloc
  2017-06-04  8:15 [PATCH for v4.9 LTS 073/111] nvmet-rdma: Fix missing dma sync to nvme data structures Levin, Alexander (Sasha Levin)
                   ` (32 preceding siblings ...)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 105/111] netfilter: nf_tables: fix set->nelems counting with no NLM_F_EXCL Levin, Alexander (Sasha Levin)
@ 2017-06-04  8:15 ` Levin, Alexander (Sasha Levin)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 107/111] RDMA/qedr: Dispatch port active event from qedr_add Levin, Alexander (Sasha Levin)
                   ` (3 subsequent siblings)
  37 siblings, 0 replies; 39+ messages in thread
From: Levin, Alexander (Sasha Levin) @ 2017-06-04  8:15 UTC (permalink / raw)
  To: stable
  Cc: Ram Amrani, Ram Amrani, Ariel Elior, Doug Ledford, Levin,
	Alexander (Sasha Levin)

From: Ram Amrani <Ram.Amrani@Cavium.com>

[ Upstream commit 9c1e0228ab35e52d30abf4b5629c28350833fbcb ]

Free the PD if no internal resources were available. Move userspace
code under the relevant 'if'.

Signed-off-by: Ram Amrani <Ram.Amrani@cavium.com>
Signed-off-by: Ariel Elior <Ariel.Elior@cavium.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
---
 drivers/infiniband/hw/qedr/verbs.c | 26 ++++++++++++++++++--------
 1 file changed, 18 insertions(+), 8 deletions(-)

diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index a61514296767..b78e37ed5352 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -471,8 +471,6 @@ struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
 			    struct ib_ucontext *context, struct ib_udata *udata)
 {
 	struct qedr_dev *dev = get_qedr_dev(ibdev);
-	struct qedr_ucontext *uctx = NULL;
-	struct qedr_alloc_pd_uresp uresp;
 	struct qedr_pd *pd;
 	u16 pd_id;
 	int rc;
@@ -489,21 +487,33 @@ struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
 	if (!pd)
 		return ERR_PTR(-ENOMEM);
 
-	dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
+	rc = dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
+	if (rc)
+		goto err;
 
-	uresp.pd_id = pd_id;
 	pd->pd_id = pd_id;
 
 	if (udata && context) {
+		struct qedr_alloc_pd_uresp uresp;
+
+		uresp.pd_id = pd_id;
+
 		rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
-		if (rc)
+		if (rc) {
 			DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
-		uctx = get_qedr_ucontext(context);
-		uctx->pd = pd;
-		pd->uctx = uctx;
+			dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id);
+			goto err;
+		}
+
+		pd->uctx = get_qedr_ucontext(context);
+		pd->uctx->pd = pd;
 	}
 
 	return &pd->ibpd;
+
+err:
+	kfree(pd);
+	return ERR_PTR(rc);
 }
 
 int qedr_dealloc_pd(struct ib_pd *ibpd)
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 39+ messages in thread

* [PATCH for v4.9 LTS 107/111] RDMA/qedr: Dispatch port active event from qedr_add
  2017-06-04  8:15 [PATCH for v4.9 LTS 073/111] nvmet-rdma: Fix missing dma sync to nvme data structures Levin, Alexander (Sasha Levin)
                   ` (33 preceding siblings ...)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 108/111] RDMA/qedr: Fix and simplify memory leak in PD alloc Levin, Alexander (Sasha Levin)
@ 2017-06-04  8:15 ` Levin, Alexander (Sasha Levin)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 109/111] RDMA/qedr: Don't reset QP when queues aren't flushed Levin, Alexander (Sasha Levin)
                   ` (2 subsequent siblings)
  37 siblings, 0 replies; 39+ messages in thread
From: Levin, Alexander (Sasha Levin) @ 2017-06-04  8:15 UTC (permalink / raw)
  To: stable
  Cc: Ram Amrani, Ram Amrani, Ariel Elior, Doug Ledford, Levin,
	Alexander (Sasha Levin)

From: Ram Amrani <Ram.Amrani@Cavium.com>

[ Upstream commit f449c7a2d822c2d81b5bcb2c50eec80796766726 ]

Relying on qede to trigger qedr on startup is problematic. When probing
both if qedr loads slowly then qede can assume qedr is missing and not
trigger it. This patch adds a triggering from qedr and protects against
a race via an atomic bit.

Signed-off-by: Ram Amrani <Ram.Amrani@cavium.com>
Signed-off-by: Ariel Elior <Ariel.Elior@cavium.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
---
 drivers/infiniband/hw/qedr/main.c | 20 ++++++++++++++------
 drivers/infiniband/hw/qedr/qedr.h |  5 +++++
 2 files changed, 19 insertions(+), 6 deletions(-)

diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index 7b74d09a8217..58e92bce6825 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -792,6 +792,9 @@ static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev,
 		if (device_create_file(&dev->ibdev.dev, qedr_attributes[i]))
 			goto sysfs_err;
 
+	if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
+		qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE);
+
 	DP_DEBUG(dev, QEDR_MSG_INIT, "qedr driver loaded successfully\n");
 	return dev;
 
@@ -824,11 +827,10 @@ static void qedr_remove(struct qedr_dev *dev)
 	ib_dealloc_device(&dev->ibdev);
 }
 
-static int qedr_close(struct qedr_dev *dev)
+static void qedr_close(struct qedr_dev *dev)
 {
-	qedr_ib_dispatch_event(dev, 1, IB_EVENT_PORT_ERR);
-
-	return 0;
+	if (test_and_clear_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
+		qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ERR);
 }
 
 static void qedr_shutdown(struct qedr_dev *dev)
@@ -837,6 +839,12 @@ static void qedr_shutdown(struct qedr_dev *dev)
 	qedr_remove(dev);
 }
 
+static void qedr_open(struct qedr_dev *dev)
+{
+	if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
+		qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE);
+}
+
 static void qedr_mac_address_change(struct qedr_dev *dev)
 {
 	union ib_gid *sgid = &dev->sgid_tbl[0];
@@ -863,7 +871,7 @@ static void qedr_mac_address_change(struct qedr_dev *dev)
 
 	ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr);
 
-	qedr_ib_dispatch_event(dev, 1, IB_EVENT_GID_CHANGE);
+	qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_GID_CHANGE);
 
 	if (rc)
 		DP_ERR(dev, "Error updating mac filter\n");
@@ -877,7 +885,7 @@ static void qedr_notify(struct qedr_dev *dev, enum qede_roce_event event)
 {
 	switch (event) {
 	case QEDE_UP:
-		qedr_ib_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE);
+		qedr_open(dev);
 		break;
 	case QEDE_DOWN:
 		qedr_close(dev);
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
index 620badd7d4fb..f669d0bb697e 100644
--- a/drivers/infiniband/hw/qedr/qedr.h
+++ b/drivers/infiniband/hw/qedr/qedr.h
@@ -113,6 +113,8 @@ struct qedr_device_attr {
 	struct qed_rdma_events events;
 };
 
+#define QEDR_ENET_STATE_BIT	(0)
+
 struct qedr_dev {
 	struct ib_device	ibdev;
 	struct qed_dev		*cdev;
@@ -153,6 +155,8 @@ struct qedr_dev {
 	struct qedr_cq		*gsi_sqcq;
 	struct qedr_cq		*gsi_rqcq;
 	struct qedr_qp		*gsi_qp;
+
+	unsigned long enet_state;
 };
 
 #define QEDR_MAX_SQ_PBL			(0x8000)
@@ -188,6 +192,7 @@ struct qedr_dev {
 #define QEDR_ROCE_MAX_CNQ_SIZE		(0x4000)
 
 #define QEDR_MAX_PORT			(1)
+#define QEDR_PORT			(1)
 
 #define QEDR_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME)
 
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 39+ messages in thread

* [PATCH for v4.9 LTS 109/111] RDMA/qedr: Don't reset QP when queues aren't flushed
  2017-06-04  8:15 [PATCH for v4.9 LTS 073/111] nvmet-rdma: Fix missing dma sync to nvme data structures Levin, Alexander (Sasha Levin)
                   ` (34 preceding siblings ...)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 107/111] RDMA/qedr: Dispatch port active event from qedr_add Levin, Alexander (Sasha Levin)
@ 2017-06-04  8:15 ` Levin, Alexander (Sasha Levin)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 110/111] RDMA/qedr: Don't spam dmesg if QP is in error state Levin, Alexander (Sasha Levin)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 111/111] RDMA/qedr: Return max inline data in QP query result Levin, Alexander (Sasha Levin)
  37 siblings, 0 replies; 39+ messages in thread
From: Levin, Alexander (Sasha Levin) @ 2017-06-04  8:15 UTC (permalink / raw)
  To: stable
  Cc: Ram Amrani, Ram Amrani, Michal Kalderon, Doug Ledford, Levin,
	Alexander (Sasha Levin)

From: Ram Amrani <Ram.Amrani@Cavium.com>

[ Upstream commit 933e6dcaa0f65eb2f624ad760274020874a1f35e ]

Fail QP state transition from error to reset if SQ/RQ are not empty
and still in the process of flushing out the queued work entries.

Signed-off-by: Ram Amrani <Ram.Amrani@cavium.com>
Signed-off-by: Michal Kalderon <Michal.Kalderon@cavium.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
---
 drivers/infiniband/hw/qedr/verbs.c | 8 ++++++++
 1 file changed, 8 insertions(+)

diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index b78e37ed5352..4e3e157009b6 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -1729,6 +1729,14 @@ static int qedr_update_qp_state(struct qedr_dev *dev,
 		/* ERR->XXX */
 		switch (new_state) {
 		case QED_ROCE_QP_STATE_RESET:
+			if ((qp->rq.prod != qp->rq.cons) ||
+			    (qp->sq.prod != qp->sq.cons)) {
+				DP_NOTICE(dev,
+					  "Error->Reset with rq/sq not empty rq.prod=%x rq.cons=%x sq.prod=%x sq.cons=%x\n",
+					  qp->rq.prod, qp->rq.cons, qp->sq.prod,
+					  qp->sq.cons);
+				status = -EINVAL;
+			}
 			break;
 		default:
 			status = -EINVAL;
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 39+ messages in thread

* [PATCH for v4.9 LTS 110/111] RDMA/qedr: Don't spam dmesg if QP is in error state
  2017-06-04  8:15 [PATCH for v4.9 LTS 073/111] nvmet-rdma: Fix missing dma sync to nvme data structures Levin, Alexander (Sasha Levin)
                   ` (35 preceding siblings ...)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 109/111] RDMA/qedr: Don't reset QP when queues aren't flushed Levin, Alexander (Sasha Levin)
@ 2017-06-04  8:15 ` Levin, Alexander (Sasha Levin)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 111/111] RDMA/qedr: Return max inline data in QP query result Levin, Alexander (Sasha Levin)
  37 siblings, 0 replies; 39+ messages in thread
From: Levin, Alexander (Sasha Levin) @ 2017-06-04  8:15 UTC (permalink / raw)
  To: stable
  Cc: Ram Amrani, Ram Amrani, Michal Kalderon, Doug Ledford, Levin,
	Alexander (Sasha Levin)

From: Ram Amrani <Ram.Amrani@Cavium.com>

[ Upstream commit c78c31496111f497b4a03f955c100091185da8b6 ]

It is normal to flush CQEs if the QP is in error state. Hence there's no
use in printing a message per CQE to dmesg.

Signed-off-by: Ram Amrani <Ram.Amrani@cavium.com>
Signed-off-by: Michal Kalderon <Michal.Kalderon@cavium.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
---
 drivers/infiniband/hw/qedr/verbs.c | 7 ++++---
 1 file changed, 4 insertions(+), 3 deletions(-)

diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 4e3e157009b6..960e4bd8ffe9 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -3238,9 +3238,10 @@ static int qedr_poll_cq_req(struct qedr_dev *dev,
 				  IB_WC_SUCCESS, 0);
 		break;
 	case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR:
-		DP_ERR(dev,
-		       "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
-		       cq->icid, qp->icid);
+		if (qp->state != QED_ROCE_QP_STATE_ERR)
+			DP_ERR(dev,
+			       "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+			       cq->icid, qp->icid);
 		cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
 				  IB_WC_WR_FLUSH_ERR, 0);
 		break;
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 39+ messages in thread

* [PATCH for v4.9 LTS 111/111] RDMA/qedr: Return max inline data in QP query result
  2017-06-04  8:15 [PATCH for v4.9 LTS 073/111] nvmet-rdma: Fix missing dma sync to nvme data structures Levin, Alexander (Sasha Levin)
                   ` (36 preceding siblings ...)
  2017-06-04  8:15 ` [PATCH for v4.9 LTS 110/111] RDMA/qedr: Don't spam dmesg if QP is in error state Levin, Alexander (Sasha Levin)
@ 2017-06-04  8:15 ` Levin, Alexander (Sasha Levin)
  37 siblings, 0 replies; 39+ messages in thread
From: Levin, Alexander (Sasha Levin) @ 2017-06-04  8:15 UTC (permalink / raw)
  To: stable
  Cc: Ram Amrani, Ram Amrani, Michal Kalderon, Doug Ledford, Levin,
	Alexander (Sasha Levin)

From: Ram Amrani <Ram.Amrani@Cavium.com>

[ Upstream commit 59e8970b3798e4cbe575ed9cf4d53098760a2a86 ]

Return the maximum supported amount of inline data, not the qp's current
configured inline data size, when filling out the results of a query
qp call.

Signed-off-by: Ram Amrani <Ram.Amrani@cavium.com>
Signed-off-by: Michal Kalderon <Michal.Kalderon@cavium.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
---
 drivers/infiniband/hw/qedr/verbs.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 960e4bd8ffe9..4ba019e3dc56 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -2032,7 +2032,7 @@ int qedr_query_qp(struct ib_qp *ibqp,
 	qp_attr->cap.max_recv_wr = qp->rq.max_wr;
 	qp_attr->cap.max_send_sge = qp->sq.max_sges;
 	qp_attr->cap.max_recv_sge = qp->rq.max_sges;
-	qp_attr->cap.max_inline_data = qp->max_inline_data;
+	qp_attr->cap.max_inline_data = ROCE_REQ_MAX_INLINE_DATA_SIZE;
 	qp_init_attr->cap = qp_attr->cap;
 
 	memcpy(&qp_attr->ah_attr.grh.dgid.raw[0], &params.dgid.bytes[0],
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 39+ messages in thread

end of thread, other threads:[~2017-06-04  8:16 UTC | newest]

Thread overview: 39+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-06-04  8:15 [PATCH for v4.9 LTS 073/111] nvmet-rdma: Fix missing dma sync to nvme data structures Levin, Alexander (Sasha Levin)
2017-06-04  8:15 ` [PATCH for v4.9 LTS 074/111] r8152: avoid start_xmit to call napi_schedule during autosuspend Levin, Alexander (Sasha Levin)
2017-06-04  8:15 ` [PATCH for v4.9 LTS 075/111] r8152: check rx after napi is enabled Levin, Alexander (Sasha Levin)
2017-06-04  8:15 ` [PATCH for v4.9 LTS 077/111] r8152: fix rtl8152_post_reset function Levin, Alexander (Sasha Levin)
2017-06-04  8:15 ` [PATCH for v4.9 LTS 078/111] r8152: avoid start_xmit to schedule napi when napi is disabled Levin, Alexander (Sasha Levin)
2017-06-04  8:15 ` [PATCH for v4.9 LTS 076/111] r8152: re-schedule napi for tx Levin, Alexander (Sasha Levin)
2017-06-04  8:15 ` [PATCH for v4.9 LTS 079/111] net-next: ethernet: mediatek: change the compatible string Levin, Alexander (Sasha Levin)
2017-06-04  8:15 ` [PATCH for v4.9 LTS 081/111] bnxt_en: Enhance autoneg support Levin, Alexander (Sasha Levin)
2017-06-04  8:15 ` [PATCH for v4.9 LTS 080/111] bnxt_en: Fix bnxt_reset() in the slow path task Levin, Alexander (Sasha Levin)
2017-06-04  8:15 ` [PATCH for v4.9 LTS 082/111] bnxt_en: Fix RTNL lock usage on bnxt_update_link() Levin, Alexander (Sasha Levin)
2017-06-04  8:15 ` [PATCH for v4.9 LTS 083/111] bnxt_en: Fix RTNL lock usage on bnxt_get_port_module_status() Levin, Alexander (Sasha Levin)
2017-06-04  8:15 ` [PATCH for v4.9 LTS 084/111] sctp: sctp gso should set feature with NETIF_F_SG when calling skb_segment Levin, Alexander (Sasha Levin)
2017-06-04  8:15 ` [PATCH for v4.9 LTS 085/111] sctp: sctp_addr_id2transport should verify the addr before looking up assoc Levin, Alexander (Sasha Levin)
2017-06-04  8:15 ` [PATCH for v4.9 LTS 087/111] mn10300: fix build error of missing fpu_save() Levin, Alexander (Sasha Levin)
2017-06-04  8:15 ` [PATCH for v4.9 LTS 086/111] usb: musb: Fix external abort on non-linefetch for musb_irq_work() Levin, Alexander (Sasha Levin)
2017-06-04  8:15 ` [PATCH for v4.9 LTS 088/111] romfs: use different way to generate fsid for BLOCK or MTD Levin, Alexander (Sasha Levin)
2017-06-04  8:15 ` [PATCH for v4.9 LTS 089/111] frv: add atomic64_add_unless() Levin, Alexander (Sasha Levin)
2017-06-04  8:15 ` [PATCH for v4.9 LTS 090/111] frv: add missing atomic64 operations Levin, Alexander (Sasha Levin)
2017-06-04  8:15 ` [PATCH for v4.9 LTS 091/111] Documentation/filesystems/proc.txt: add VmPin Levin, Alexander (Sasha Levin)
2017-06-04  8:15 ` [PATCH for v4.9 LTS 092/111] proc: add a schedule point in proc_pid_readdir() Levin, Alexander (Sasha Levin)
2017-06-04  8:15 ` [PATCH for v4.9 LTS 093/111] mm/slub.c: trace free objects at KERN_INFO Levin, Alexander (Sasha Levin)
2017-06-04  8:15 ` [PATCH for v4.9 LTS 094/111] userfaultfd: fix SIGBUS resulting from false rwsem wakeups Levin, Alexander (Sasha Levin)
2017-06-04  8:15 ` [PATCH for v4.9 LTS 095/111] kernel/watchdog.c: move hardlockup detector to separate file Levin, Alexander (Sasha Levin)
2017-06-04  8:15 ` [PATCH for v4.9 LTS 096/111] kernel/watchdog.c: move shared definitions to nmi.h Levin, Alexander (Sasha Levin)
2017-06-04  8:15 ` [PATCH for v4.9 LTS 097/111] kernel/watchdog: prevent false hardlockup on overloaded system Levin, Alexander (Sasha Levin)
2017-06-04  8:15 ` [PATCH for v4.9 LTS 098/111] vhost/vsock: handle vhost_vq_init_access() error Levin, Alexander (Sasha Levin)
2017-06-04  8:15 ` [PATCH for v4.9 LTS 100/111] ARCv2: smp-boot: wake_flag polling by non-Masters needs to be uncached Levin, Alexander (Sasha Levin)
2017-06-04  8:15 ` [PATCH for v4.9 LTS 099/111] ARC: smp-boot: Decouple Non masters waiting API from jump to entry point Levin, Alexander (Sasha Levin)
2017-06-04  8:15 ` [PATCH for v4.9 LTS 101/111] tipc: ignore requests when the connection state is not CONNECTED Levin, Alexander (Sasha Levin)
2017-06-04  8:15 ` [PATCH for v4.9 LTS 102/111] tipc: fix connection refcount error Levin, Alexander (Sasha Levin)
2017-06-04  8:15 ` [PATCH for v4.9 LTS 104/111] tipc: fix nametbl_lock soft lockup at node/link events Levin, Alexander (Sasha Levin)
2017-06-04  8:15 ` [PATCH for v4.9 LTS 103/111] tipc: add subscription refcount to avoid invalid delete Levin, Alexander (Sasha Levin)
2017-06-04  8:15 ` [PATCH for v4.9 LTS 106/111] netfilter: nft_log: restrict the log prefix length to 127 Levin, Alexander (Sasha Levin)
2017-06-04  8:15 ` [PATCH for v4.9 LTS 105/111] netfilter: nf_tables: fix set->nelems counting with no NLM_F_EXCL Levin, Alexander (Sasha Levin)
2017-06-04  8:15 ` [PATCH for v4.9 LTS 108/111] RDMA/qedr: Fix and simplify memory leak in PD alloc Levin, Alexander (Sasha Levin)
2017-06-04  8:15 ` [PATCH for v4.9 LTS 107/111] RDMA/qedr: Dispatch port active event from qedr_add Levin, Alexander (Sasha Levin)
2017-06-04  8:15 ` [PATCH for v4.9 LTS 109/111] RDMA/qedr: Don't reset QP when queues aren't flushed Levin, Alexander (Sasha Levin)
2017-06-04  8:15 ` [PATCH for v4.9 LTS 110/111] RDMA/qedr: Don't spam dmesg if QP is in error state Levin, Alexander (Sasha Levin)
2017-06-04  8:15 ` [PATCH for v4.9 LTS 111/111] RDMA/qedr: Return max inline data in QP query result Levin, Alexander (Sasha Levin)

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.