dev.dpdk.org archive mirror
 help / color / mirror / Atom feed
From: "Wei Hu (Xavier)" <xavier.huwei@huawei.com>
To: <dev@dpdk.org>
Cc: <linuxarm@huawei.com>, <xavier_huwei@163.com>,
	<liudongdong3@huawei.com>,  <forest.zhouchang@huawei.com>
Subject: [dpdk-dev] [PATCH 20/22] net/hns3: add reset related process for hns3 PMD driver
Date: Fri, 23 Aug 2019 21:47:09 +0800	[thread overview]
Message-ID: <1566568031-45991-21-git-send-email-xavier.huwei@huawei.com> (raw)
In-Reply-To: <1566568031-45991-1-git-send-email-xavier.huwei@huawei.com>

This patch adds reset related process for hns3 PMD driver.
The following three scenarios will trigger the reset process,
and the driver settings will be restored after the reset is
successful:
1. Receive a reset interrupt
2. PF receives a hardware error interrupt
3. VF is notified by PF to reset

Signed-off-by: Chunsong Feng <fengchunsong@huawei.com>
Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
Signed-off-by: Min Hu (Connor) <humin29@huawei.com>
Signed-off-by: Hao Chen <chenhao164@huawei.com>
Signed-off-by: Huisong Li <lihuisong@huawei.com>
---
 drivers/net/hns3/hns3_cmd.c       |  31 ++
 drivers/net/hns3/hns3_ethdev.c    | 625 +++++++++++++++++++++++++++++++++++++-
 drivers/net/hns3/hns3_ethdev.h    |  13 +
 drivers/net/hns3/hns3_ethdev_vf.c | 426 +++++++++++++++++++++++++-
 drivers/net/hns3/hns3_intr.c      | 510 +++++++++++++++++++++++++++++++
 drivers/net/hns3/hns3_intr.h      |  11 +
 drivers/net/hns3/hns3_mbx.c       |  14 +
 7 files changed, 1602 insertions(+), 28 deletions(-)

diff --git a/drivers/net/hns3/hns3_cmd.c b/drivers/net/hns3/hns3_cmd.c
index 8c0bf8d..82eedf1 100644
--- a/drivers/net/hns3/hns3_cmd.c
+++ b/drivers/net/hns3/hns3_cmd.c
@@ -33,6 +33,7 @@
 #include "hns3_stats.h"
 #include "hns3_ethdev.h"
 #include "hns3_regs.h"
+#include "hns3_intr.h"
 #include "hns3_logs.h"
 
 #define hns3_is_csq(ring) ((ring)->flag & HNS3_TYPE_CSQ)
@@ -231,6 +232,22 @@ hns3_cmd_csq_clean(struct hns3_hw *hw)
 		hns3_err(hw, "wrong cmd head (%d, %d-%d)", head,
 			    csq->next_to_use, csq->next_to_clean);
 		rte_atomic16_set(&hw->reset.disable_cmd, 1);
+		if (hns->is_vf) {
+			global = hns3_read_dev(hw, HNS3_VF_RST_ING);
+			fun_rst = hns3_read_dev(hw, HNS3_FUN_RST_ING);
+			hns3_err(hw, "Delayed VF reset global: %x fun_rst: %x",
+				 global, fun_rst);
+			hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending);
+		} else {
+			global = hns3_read_dev(hw, HNS3_GLOBAL_RESET_REG);
+			fun_rst = hns3_read_dev(hw, HNS3_FUN_RST_ING);
+			hns3_err(hw, "Delayed IMP reset global: %x fun_rst: %x",
+				 global, fun_rst);
+			hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending);
+		}
+
+		hns3_schedule_delayed_reset(hns);
+
 		return -EIO;
 	}
 
@@ -327,6 +344,11 @@ static int hns3_cmd_poll_reply(struct hns3_hw *hw)
 			return -EBUSY;
 		}
 
+		if (is_reset_pending(hns)) {
+			hns3_err(hw, "Don't wait for reply because of reset pending");
+			return -EIO;
+		}
+
 		rte_delay_us(1);
 		timeout++;
 	} while (timeout < hw->cmq.tx_timeout);
@@ -482,6 +504,15 @@ hns3_cmd_init(struct hns3_hw *hw)
 	rte_spinlock_unlock(&hw->cmq.crq.lock);
 	rte_spinlock_unlock(&hw->cmq.csq.lock);
 
+	/*
+	 * Check if there is new reset pending, because the higher level
+	 * reset may happen when lower level reset is being processed.
+	 */
+	if (is_reset_pending(HNS3_DEV_HW_TO_ADAPTER(hw))) {
+		PMD_INIT_LOG(ERR, "New reset pending, keep disable cmd");
+		ret = -EBUSY;
+		goto err_cmd_init;
+	}
 	rte_atomic16_clear(&hw->reset.disable_cmd);
 
 	ret = hns3_cmd_query_firmware_version(hw, &hw->fw_version);
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index 22d7e61..9a4c560 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -61,6 +61,17 @@
 #define HNS3_FILTER_FE_INGRESS		(HNS3_FILTER_FE_NIC_INGRESS_B \
 					| HNS3_FILTER_FE_ROCE_INGRESS_B)
 
+/* Reset related Registers */
+#define HNS3_GLOBAL_RESET_BIT		0
+#define HNS3_CORE_RESET_BIT		1
+#define HNS3_IMP_RESET_BIT		2
+#define HNS3_FUN_RST_ING_B		0
+
+#define HNS3_VECTOR0_IMP_RESET_INT_B	1
+
+#define HNS3_RESET_WAIT_MS	100
+#define HNS3_RESET_WAIT_CNT	200
+
 int hns3_logtype_init;
 int hns3_logtype_driver;
 
@@ -71,6 +82,8 @@ enum hns3_evt_cause {
 	HNS3_VECTOR0_EVENT_OTHER,
 };
 
+static enum hns3_reset_level hns3_get_reset_level(struct hns3_adapter *hns,
+						 uint64_t *levels);
 static int hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
 static int hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid,
 				    int on);
@@ -108,12 +121,34 @@ hns3_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval)
 	 * from H/W just for the mailbox.
 	 */
 	if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_int_stats) { /* IMP */
+		rte_atomic16_set(&hw->reset.disable_cmd, 1);
+		hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending);
+		val = BIT(HNS3_VECTOR0_IMPRESET_INT_B);
+		if (clearval) {
+			hw->reset.stats.imp_cnt++;
+			hns3_warn(hw, "IMP reset detected, clear reset status");
+		} else {
+			hns3_schedule_delayed_reset(hns);
+			hns3_warn(hw, "IMP reset detected, don't clear reset status");
+		}
+
 		ret = HNS3_VECTOR0_EVENT_RST;
 		goto out;
 	}
 
 	/* Global reset */
 	if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_int_stats) {
+		rte_atomic16_set(&hw->reset.disable_cmd, 1);
+		hns3_atomic_set_bit(HNS3_GLOBAL_RESET, &hw->reset.pending);
+		val = BIT(HNS3_VECTOR0_GLOBALRESET_INT_B);
+		if (clearval) {
+			hw->reset.stats.global_cnt++;
+			hns3_warn(hw, "Global reset detected, clear reset status");
+		} else {
+			hns3_schedule_delayed_reset(hns);
+			hns3_warn(hw, "Global reset detected, don't clear reset status");
+		}
+
 		ret = HNS3_VECTOR0_EVENT_RST;
 		goto out;
 	}
@@ -187,6 +222,15 @@ hns3_interrupt_handler(void *param)
 
 	event_cause = hns3_check_event_cause(hns, &clearval);
 
+	/* vector 0 interrupt is shared with reset and mailbox source events. */
+	if (event_cause == HNS3_VECTOR0_EVENT_ERR) {
+		hns3_handle_msix_error(hns, &hw->reset.request);
+		hns3_schedule_reset(hns);
+	} else if (event_cause == HNS3_VECTOR0_EVENT_RST)
+		hns3_schedule_reset(hns);
+	else
+		hns3_err(hw, "Received unknown event");
+
 	hns3_clear_event_cause(hw, event_cause, clearval);
 	/* Enable interrupt if it is not cause by reset */
 	hns3_pf_enable_irq0(hw);
@@ -261,6 +305,32 @@ hns3_add_dev_vlan_table(struct hns3_adapter *hns, uint16_t vlan_id,
 }
 
 static int
+hns3_restore_vlan_table(struct hns3_adapter *hns)
+{
+	struct hns3_user_vlan_table *vlan_entry;
+	struct hns3_pf *pf = &hns->pf;
+	uint16_t vlan_id;
+	int ret = 0;
+
+	if (pf->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_ENABLE) {
+		ret = hns3_vlan_pvid_configure(hns, pf->port_base_vlan_cfg.pvid,
+					       1);
+		return ret;
+	}
+
+	LIST_FOREACH(vlan_entry, &pf->vlan_list, next) {
+		if (vlan_entry->hd_tbl_status) {
+			vlan_id = vlan_entry->vlan_id;
+			ret = hns3_set_port_vlan_filter(hns, vlan_id, 1);
+			if (ret)
+				break;
+		}
+	}
+
+	return ret;
+}
+
+static int
 hns3_vlan_filter_configure(struct hns3_adapter *hns, uint16_t vlan_id, int on)
 {
 	struct hns3_pf *pf = &hns->pf;
@@ -837,7 +907,15 @@ hns3_init_vlan_config(struct hns3_adapter *hns)
 	struct hns3_hw *hw = &hns->hw;
 	int ret;
 
-	init_port_base_vlan_info(hw);
+	/*
+	 * This function can be called in the initialization and reset process,
+	 * when in reset process, it means that hardware had been reseted
+	 * successfully and we need to restore the hardware configuration to
+	 * ensure that the hardware configuration remains unchanged before and
+	 * after reset.
+	 */
+	if (rte_atomic16_read(&hw->reset.resetting) == 0)
+		init_port_base_vlan_info(hw);
 
 	ret = hns3_enable_vlan_filter(hns, true);
 	if (ret) {
@@ -852,22 +930,85 @@ hns3_init_vlan_config(struct hns3_adapter *hns)
 		return ret;
 	}
 
-	ret = hns3_vlan_pvid_configure(hns, HNS3_INVLID_PVID, 0);
+	/*
+	 * When in the reinit dev stage of the reset process, the following
+	 * vlan-related configurations may differ from those at initialization,
+	 * we will restore configurations to hardware in hns3_restore_vlan_table
+	 * and hns3_restore_vlan_conf later.
+	 */
+	if (rte_atomic16_read(&hw->reset.resetting) == 0) {
+		ret = hns3_vlan_pvid_configure(hns, HNS3_INVLID_PVID, 0);
+		if (ret) {
+			hns3_err(hw, "pvid set fail in pf, ret =%d", ret);
+			return ret;
+		}
+
+		ret = hns3_en_hw_strip_rxvtag(hns, false);
+		if (ret) {
+			hns3_err(hw, "rx strip configure fail in pf, ret =%d",
+				 ret);
+			return ret;
+		}
+	}
+
+	return hns3_default_vlan_config(hns);
+}
+
+static int
+hns3_restore_vlan_conf(struct hns3_adapter *hns)
+{
+	struct hns3_pf *pf = &hns->pf;
+	struct hns3_hw *hw = &hns->hw;
+	int ret;
+
+	ret = hns3_set_vlan_rx_offload_cfg(hns, &pf->vtag_config.rx_vcfg);
 	if (ret) {
-		hns3_err(hw, "pvid set fail in pf, ret =%d", ret);
+		hns3_err(hw, "hns3 restore vlan rx conf fail, ret =%d", ret);
 		return ret;
 	}
 
-	ret = hns3_en_hw_strip_rxvtag(hns, false);
+	ret = hns3_set_vlan_tx_offload_cfg(hns, &pf->vtag_config.tx_vcfg);
+	if (ret)
+		hns3_err(hw, "hns3 restore vlan tx conf fail, ret =%d", ret);
+
+	return ret;
+}
+
+static int
+hns3_dev_configure_vlan(struct rte_eth_dev *dev)
+{
+	struct hns3_adapter *hns = dev->data->dev_private;
+	struct rte_eth_dev_data *data = dev->data;
+	struct rte_eth_txmode *txmode;
+	struct hns3_hw *hw = &hns->hw;
+	int ret;
+
+	txmode = &data->dev_conf.txmode;
+	if (txmode->hw_vlan_reject_tagged || txmode->hw_vlan_reject_untagged)
+		hns3_warn(hw,
+			  "hw_vlan_reject_tagged or hw_vlan_reject_untagged "
+			  "configuration is not supported! Ignore these two "
+			  "parameters: hw_vlan_reject_tagged(%d), "
+			  "hw_vlan_reject_untagged(%d)",
+			  txmode->hw_vlan_reject_tagged,
+			  txmode->hw_vlan_reject_untagged);
+
+	/* Apply vlan offload setting */
+	ret = hns3_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK);
 	if (ret) {
-		hns3_err(hw, "rx strip configure fail in pf, ret =%d",
-			 ret);
+		hns3_err(hw, "dev config vlan Strip failed, ret =%d", ret);
 		return ret;
 	}
 
-	return hns3_default_vlan_config(hns);
-}
+	/* Apply pvid setting */
+	ret = hns3_vlan_pvid_set(dev, txmode->pvid,
+				 txmode->hw_vlan_insert_pvid);
+	if (ret)
+		hns3_err(hw, "dev config vlan pvid(%d) failed, ret =%d",
+			 txmode->pvid, ret);
 
+	return ret;
+}
 
 static int
 hns3_config_tso(struct hns3_hw *hw, unsigned int tso_mss_min,
@@ -3480,6 +3621,19 @@ hns3_dev_allmulticast_disable(struct rte_eth_dev *dev)
 }
 
 static int
+hns3_dev_promisc_restore(struct hns3_adapter *hns)
+{
+	struct hns3_hw *hw = &hns->hw;
+	bool en_mc_pmc;
+	bool en_uc_pmc;
+
+	en_uc_pmc = (hw->data->promiscuous == 1) ? true : false;
+	en_mc_pmc = (hw->data->all_multicast == 1) ? true : false;
+
+	return hns3_set_promisc_mode(hw, en_uc_pmc, en_mc_pmc);
+}
+
+static int
 hns3_get_sfp_speed(struct hns3_hw *hw, uint32_t *speed)
 {
 	struct hns3_sfp_speed_cmd *resp;
@@ -3871,6 +4025,8 @@ hns3_dev_start(struct rte_eth_dev *eth_dev)
 	int ret;
 
 	PMD_INIT_FUNC_TRACE();
+	if (rte_atomic16_read(&hw->reset.resetting))
+		return -EBUSY;
 	rte_spinlock_lock(&hw->lock);
 	hw->adapter_state = HNS3_NIC_STARTING;
 
@@ -3902,8 +4058,11 @@ hns3_do_stop(struct hns3_adapter *hns)
 		return ret;
 	hw->mac.link_status = ETH_LINK_DOWN;
 
-	hns3_configure_all_mac_addr(hns, true);
-	reset_queue = true;
+	if (rte_atomic16_read(&hw->reset.disable_cmd) == 0) {
+		hns3_configure_all_mac_addr(hns, true);
+		reset_queue = true;
+	} else
+		reset_queue = false;
 	hw->mac.default_addr_setted = false;
 	return hns3_stop_queues(hns, reset_queue);
 }
@@ -3921,9 +4080,11 @@ hns3_dev_stop(struct rte_eth_dev *eth_dev)
 	rte_wmb();
 
 	rte_spinlock_lock(&hw->lock);
-	hns3_do_stop(hns);
-	hns3_dev_release_mbufs(hns);
-	hw->adapter_state = HNS3_NIC_CONFIGURED;
+	if (rte_atomic16_read(&hw->reset.resetting) == 0) {
+		hns3_do_stop(hns);
+		hns3_dev_release_mbufs(hns);
+		hw->adapter_state = HNS3_NIC_CONFIGURED;
+	}
 	rte_spinlock_unlock(&hw->lock);
 }
 
@@ -3937,6 +4098,8 @@ hns3_dev_close(struct rte_eth_dev *eth_dev)
 		hns3_dev_stop(eth_dev);
 
 	hw->adapter_state = HNS3_NIC_CLOSING;
+	hns3_reset_abort(hns);
+	hw->adapter_state = HNS3_NIC_CLOSED;
 	rte_eal_alarm_cancel(hns3_service_handler, eth_dev);
 
 	hns3_configure_all_mc_mac_addr(hns, true);
@@ -3944,7 +4107,8 @@ hns3_dev_close(struct rte_eth_dev *eth_dev)
 	hns3_vlan_txvlan_cfg(hns, HNS3_PORT_BASE_VLAN_DISABLE, 0);
 	hns3_uninit_pf(eth_dev);
 	hns3_free_all_queues(eth_dev);
-	hw->adapter_state = HNS3_NIC_CLOSED;
+	rte_free(hw->reset.wait_data);
+	hns3_warn(hw, "Close port %d finished", hw->data->port_id);
 }
 
 static int
@@ -4133,6 +4297,414 @@ hns3_get_dcb_info(struct rte_eth_dev *dev, struct rte_eth_dcb_info *dcb_info)
 	return 0;
 }
 
+static int
+hns3_reinit_dev(struct hns3_adapter *hns)
+{
+	struct hns3_hw *hw = &hns->hw;
+	int ret;
+
+	ret = hns3_cmd_init(hw);
+	if (ret) {
+		hns3_err(hw, "Failed to init cmd: %d", ret);
+		return ret;
+	}
+
+	ret = hns3_reset_all_queues(hns);
+	if (ret) {
+		hns3_err(hw, "Failed to reset all queues: %d", ret);
+		goto err_init;
+	}
+
+	ret = hns3_init_hardware(hns);
+	if (ret) {
+		hns3_err(hw, "Failed to init hardware: %d", ret);
+		goto err_init;
+	}
+
+	ret = hns3_enable_hw_error_intr(hns, true);
+	if (ret) {
+		hns3_err(hw, "fail to enable hw error interrupts: %d",
+			     ret);
+		goto err_mac_init;
+	}
+	hns3_info(hw, "Reset done, driver initialization finished.");
+
+	return 0;
+
+err_mac_init:
+	hns3_uninit_umv_space(hw);
+err_init:
+	hns3_cmd_uninit(hw);
+
+	return ret;
+}
+
+static bool
+is_pf_reset_done(struct hns3_hw *hw)
+{
+	uint32_t val, reg, reg_bit;
+
+	switch (hw->reset.level) {
+	case HNS3_IMP_RESET:
+		reg = HNS3_GLOBAL_RESET_REG;
+		reg_bit = HNS3_IMP_RESET_BIT;
+		break;
+	case HNS3_GLOBAL_RESET:
+		reg = HNS3_GLOBAL_RESET_REG;
+		reg_bit = HNS3_GLOBAL_RESET_BIT;
+		break;
+	case HNS3_FUNC_RESET:
+		reg = HNS3_FUN_RST_ING;
+		reg_bit = HNS3_FUN_RST_ING_B;
+		break;
+	case HNS3_FLR_RESET:
+	default:
+		hns3_err(hw, "Wait for unsupported reset level: %d",
+			 hw->reset.level);
+		return true;
+	}
+	val = hns3_read_dev(hw, reg);
+	if (hns3_get_bit(val, reg_bit))
+		return false;
+	else
+		return true;
+}
+
+bool
+hns3_is_reset_pending(struct hns3_adapter *hns)
+{
+	struct hns3_hw *hw = &hns->hw;
+	enum hns3_reset_level reset;
+
+	hns3_check_event_cause(hns, NULL);
+	reset = hns3_get_reset_level(hns, &hw->reset.pending);
+	if (hw->reset.level != HNS3_NONE_RESET && hw->reset.level < reset) {
+		hns3_warn(hw, "High level reset %d is pending", reset);
+		return true;
+	}
+	reset = hns3_get_reset_level(hns, &hw->reset.request);
+	if (hw->reset.level != HNS3_NONE_RESET && hw->reset.level < reset) {
+		hns3_warn(hw, "High level reset %d is request", reset);
+		return true;
+	}
+	return false;
+}
+
+static int
+hns3_wait_hardware_ready(struct hns3_adapter *hns)
+{
+	struct hns3_hw *hw = &hns->hw;
+	struct hns3_wait_data *wait_data = hw->reset.wait_data;
+	struct timeval tv;
+
+	if (wait_data->result == HNS3_WAIT_SUCCESS)
+		return 0;
+	else if (wait_data->result == HNS3_WAIT_TIMEOUT) {
+		gettimeofday(&tv, NULL);
+		hns3_warn(hw, "Reset step4 hardware not ready after reset time=%ld.%.6ld",
+			  tv.tv_sec, tv.tv_usec);
+		return -ETIME;
+	} else if (wait_data->result == HNS3_WAIT_REQUEST)
+		return -EAGAIN;
+
+	wait_data->hns = hns;
+	wait_data->check_completion = is_pf_reset_done;
+	wait_data->end_ms = (uint64_t)HNS3_RESET_WAIT_CNT *
+				      HNS3_RESET_WAIT_MS + get_timeofday_ms();
+	wait_data->interval = HNS3_RESET_WAIT_MS * USEC_PER_MSEC;
+	wait_data->count = HNS3_RESET_WAIT_CNT;
+	wait_data->result = HNS3_WAIT_REQUEST;
+	rte_eal_alarm_set(wait_data->interval, hns3_wait_callback, wait_data);
+	return -EAGAIN;
+}
+
+static int
+hns3_func_reset_cmd(struct hns3_hw *hw, int func_id)
+{
+	struct hns3_cmd_desc desc;
+	struct hns3_reset_cmd *req = (struct hns3_reset_cmd *)desc.data;
+
+	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_RST_TRIGGER, false);
+	hns3_set_bit(req->mac_func_reset, HNS3_CFG_RESET_FUNC_B, 1);
+	req->fun_reset_vfid = func_id;
+
+	return hns3_cmd_send(hw, &desc, 1);
+}
+
+static int
+hns3_imp_reset_cmd(struct hns3_hw *hw)
+{
+	struct hns3_cmd_desc desc;
+
+	hns3_cmd_setup_basic_desc(&desc, 0xFFFE, false);
+	desc.data[0] = 0xeedd;
+
+	return hns3_cmd_send(hw, &desc, 1);
+}
+
+static void
+hns3_msix_process(struct hns3_adapter *hns, enum hns3_reset_level reset_level)
+{
+	struct hns3_hw *hw = &hns->hw;
+	struct timeval tv;
+	uint32_t val;
+
+	gettimeofday(&tv, NULL);
+	if (hns3_read_dev(hw, HNS3_GLOBAL_RESET_REG) ||
+	    hns3_read_dev(hw, HNS3_FUN_RST_ING)) {
+		hns3_warn(hw, "Don't process msix during resetting time=%ld.%.6ld",
+			  tv.tv_sec, tv.tv_usec);
+		return;
+	}
+
+	switch (reset_level) {
+	case HNS3_IMP_RESET:
+		hns3_imp_reset_cmd(hw);
+		hns3_warn(hw, "IMP Reset requested time=%ld.%.6ld",
+			  tv.tv_sec, tv.tv_usec);
+		break;
+	case HNS3_GLOBAL_RESET:
+		val = hns3_read_dev(hw, HNS3_GLOBAL_RESET_REG);
+		hns3_set_bit(val, HNS3_GLOBAL_RESET_BIT, 1);
+		hns3_write_dev(hw, HNS3_GLOBAL_RESET_REG, val);
+		hns3_warn(hw, "Global Reset requested time=%ld.%.6ld",
+			  tv.tv_sec, tv.tv_usec);
+		break;
+	case HNS3_FUNC_RESET:
+		hns3_warn(hw, "PF Reset requested time=%ld.%.6ld",
+			  tv.tv_sec, tv.tv_usec);
+		/* schedule again to check later */
+		hns3_atomic_set_bit(HNS3_FUNC_RESET, &hw->reset.pending);
+		hns3_schedule_reset(hns);
+		break;
+	default:
+		hns3_warn(hw, "Unsupported reset level: %d", reset_level);
+		return;
+	}
+	hns3_atomic_clear_bit(reset_level, &hw->reset.request);
+}
+
+static enum hns3_reset_level
+hns3_get_reset_level(struct hns3_adapter *hns, uint64_t *levels)
+{
+	struct hns3_hw *hw = &hns->hw;
+	enum hns3_reset_level reset_level = HNS3_NONE_RESET;
+
+	/* Return the highest priority reset level amongst all */
+	if (hns3_atomic_test_bit(HNS3_IMP_RESET, levels))
+		reset_level = HNS3_IMP_RESET;
+	else if (hns3_atomic_test_bit(HNS3_GLOBAL_RESET, levels))
+		reset_level = HNS3_GLOBAL_RESET;
+	else if (hns3_atomic_test_bit(HNS3_FUNC_RESET, levels))
+		reset_level = HNS3_FUNC_RESET;
+	else if (hns3_atomic_test_bit(HNS3_FLR_RESET, levels))
+		reset_level = HNS3_FLR_RESET;
+
+	if (hw->reset.level != HNS3_NONE_RESET && reset_level < hw->reset.level)
+		return HNS3_NONE_RESET;
+
+	return reset_level;
+}
+
+static int
+hns3_prepare_reset(struct hns3_adapter *hns)
+{
+	struct hns3_hw *hw = &hns->hw;
+	uint32_t reg_val;
+	int ret;
+
+	switch (hw->reset.level) {
+	case HNS3_FUNC_RESET:
+		ret = hns3_func_reset_cmd(hw, 0);
+		if (ret)
+			return ret;
+
+		/*
+		 * After performaning pf reset, it is not necessary to do the
+		 * mailbox handling or send any command to firmware, because
+		 * any mailbox handling or command to firmware is only valid
+		 * after hns3_cmd_init is called.
+		 */
+		rte_atomic16_set(&hw->reset.disable_cmd, 1);
+		hw->reset.stats.request_cnt++;
+		break;
+	case HNS3_IMP_RESET:
+		reg_val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG);
+		hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val |
+			       BIT(HNS3_VECTOR0_IMP_RESET_INT_B));
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
+
+static int
+hns3_set_rst_done(struct hns3_hw *hw)
+{
+	struct hns3_pf_rst_done_cmd *req;
+	struct hns3_cmd_desc desc;
+
+	req = (struct hns3_pf_rst_done_cmd *)desc.data;
+	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_PF_RST_DONE, false);
+	req->pf_rst_done |= HNS3_PF_RESET_DONE_BIT;
+	return hns3_cmd_send(hw, &desc, 1);
+}
+
+static int
+hns3_stop_service(struct hns3_adapter *hns)
+{
+	struct hns3_hw *hw = &hns->hw;
+	struct rte_eth_dev *eth_dev;
+
+	eth_dev = &rte_eth_devices[hw->data->port_id];
+	rte_eal_alarm_cancel(hns3_service_handler, eth_dev);
+	hw->mac.link_status = ETH_LINK_DOWN;
+
+	hns3_set_rxtx_function(eth_dev);
+	rte_wmb();
+	/* Disable datapath on secondary process. */
+	hns3_mp_req_stop_rxtx(eth_dev);
+	rte_delay_ms(hw->tqps_num);
+
+	rte_spinlock_lock(&hw->lock);
+	if (hns->hw.adapter_state == HNS3_NIC_STARTED ||
+	    hw->adapter_state == HNS3_NIC_STOPPING) {
+		hns3_do_stop(hns);
+		hw->reset.mbuf_deferred_free = true;
+	} else
+		hw->reset.mbuf_deferred_free = false;
+
+	/*
+	 * It is cumbersome for hardware to pick-and-choose entries for deletion
+	 * from table space. Hence, for function reset software intervention is
+	 * required to delete the entries
+	 */
+	if (rte_atomic16_read(&hw->reset.disable_cmd) == 0)
+		hns3_configure_all_mc_mac_addr(hns, true);
+	rte_spinlock_unlock(&hw->lock);
+
+	return 0;
+}
+
+static int
+hns3_start_service(struct hns3_adapter *hns)
+{
+	struct hns3_hw *hw = &hns->hw;
+	struct rte_eth_dev *eth_dev;
+
+	if (hw->reset.level == HNS3_IMP_RESET ||
+	    hw->reset.level == HNS3_GLOBAL_RESET)
+		hns3_set_rst_done(hw);
+	eth_dev = &rte_eth_devices[hw->data->port_id];
+	hns3_set_rxtx_function(eth_dev);
+	hns3_mp_req_start_rxtx(eth_dev);
+	hns3_service_handler(eth_dev);
+	return 0;
+}
+
+static int
+hns3_restore_conf(struct hns3_adapter *hns)
+{
+	struct hns3_hw *hw = &hns->hw;
+	int ret;
+
+	ret = hns3_configure_all_mac_addr(hns, false);
+	if (ret)
+		return ret;
+
+	ret = hns3_configure_all_mc_mac_addr(hns, false);
+	if (ret)
+		goto err_mc_mac;
+
+	ret = hns3_dev_promisc_restore(hns);
+	if (ret)
+		goto err_promisc;
+
+	ret = hns3_restore_vlan_table(hns);
+	if (ret)
+		goto err_promisc;
+
+	ret = hns3_restore_vlan_conf(hns);
+	if (ret)
+		goto err_promisc;
+
+	ret = hns3_restore_all_fdir_filter(hns);
+	if (ret)
+		goto err_promisc;
+
+	if (hns->hw.adapter_state == HNS3_NIC_STARTED) {
+		ret = hns3_do_start(hns, false);
+		if (ret)
+			goto err_promisc;
+		hns3_info(hw, "hns3 dev restart successful!");
+	} else if (hw->adapter_state == HNS3_NIC_STOPPING)
+		hw->adapter_state = HNS3_NIC_CONFIGURED;
+	return 0;
+
+err_promisc:
+	hns3_configure_all_mc_mac_addr(hns, true);
+err_mc_mac:
+	hns3_configure_all_mac_addr(hns, true);
+	return ret;
+}
+
+static void
+hns3_reset_service(void *param)
+{
+	struct hns3_adapter *hns = (struct hns3_adapter *)param;
+	struct hns3_hw *hw = &hns->hw;
+	enum hns3_reset_level reset_level;
+	struct timeval tv_delta;
+	struct timeval tv_start;
+	struct timeval tv;
+	uint64_t msec;
+	int ret;
+
+	/*
+	 * The interrupt is not triggered within the delay time.
+	 * The interrupt may have been lost. It is necessary to handle
+	 * the interrupt to recover from the error.
+	 */
+	if (rte_atomic16_read(&hns->hw.reset.schedule) == SCHEDULE_DEFERRED) {
+		rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_REQUESTED);
+		hns3_err(hw, "Handling interrupts in delayed tasks");
+		hns3_interrupt_handler(&rte_eth_devices[hw->data->port_id]);
+	}
+	rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_NONE);
+
+	/*
+	 * Check if there is any ongoing reset in the hardware. This status can
+	 * be checked from reset_pending. If there is then, we need to wait for
+	 * hardware to complete reset.
+	 *    a. If we are able to figure out in reasonable time that hardware
+	 *       has fully resetted then, we can proceed with driver, client
+	 *       reset.
+	 *    b. else, we can come back later to check this status so re-sched
+	 *       now.
+	 */
+	reset_level = hns3_get_reset_level(hns, &hw->reset.pending);
+	if (reset_level != HNS3_NONE_RESET) {
+		gettimeofday(&tv_start, NULL);
+		ret = hns3_reset_process(hns, reset_level);
+		gettimeofday(&tv, NULL);
+		timersub(&tv, &tv_start, &tv_delta);
+		msec = tv_delta.tv_sec * MSEC_PER_SEC +
+		       tv_delta.tv_usec / USEC_PER_MSEC;
+		if (msec > HNS3_RESET_PROCESS_MS)
+			hns3_err(hw, "%d handle long time delta %ld ms time=%ld.%.6ld",
+				 hw->reset.level, msec,
+				 tv.tv_sec, tv.tv_usec);
+		if (ret == -EAGAIN)
+			return;
+	}
+
+	/* Check if we got any *new* reset requests to be honored */
+	reset_level = hns3_get_reset_level(hns, &hw->reset.request);
+	if (reset_level != HNS3_NONE_RESET)
+		hns3_msix_process(hns, reset_level);
+}
+
 static const struct eth_dev_ops hns3_eth_dev_ops = {
 	.dev_start          = hns3_dev_start,
 	.dev_stop           = hns3_dev_stop,
@@ -4178,6 +4750,16 @@ static const struct eth_dev_ops hns3_eth_dev_ops = {
 	.dev_supported_ptypes_get = hns3_dev_supported_ptypes_get,
 };
 
+static const struct hns3_reset_ops hns3_reset_ops = {
+	.reset_service       = hns3_reset_service,
+	.stop_service        = hns3_stop_service,
+	.prepare_reset       = hns3_prepare_reset,
+	.wait_hardware_ready = hns3_wait_hardware_ready,
+	.reinit_dev          = hns3_reinit_dev,
+	.restore_conf	     = hns3_restore_conf,
+	.start_service       = hns3_start_service,
+};
+
 static int
 hns3_dev_init(struct rte_eth_dev *eth_dev)
 {
@@ -4221,6 +4803,11 @@ hns3_dev_init(struct rte_eth_dev *eth_dev)
 	 */
 	hns->pf.mps = hw->data->mtu + HNS3_ETH_OVERHEAD;
 
+	ret = hns3_reset_init(hw);
+	if (ret)
+		goto err_init_reset;
+	hw->reset.ops = &hns3_reset_ops;
+
 	ret = hns3_init_pf(eth_dev);
 	if (ret) {
 		PMD_INIT_LOG(ERR, "Failed to init pf: %d", ret);
@@ -4244,6 +4831,14 @@ hns3_dev_init(struct rte_eth_dev *eth_dev)
 			    &eth_dev->data->mac_addrs[0]);
 
 	hw->adapter_state = HNS3_NIC_INITIALIZED;
+	if (rte_atomic16_read(&hns->hw.reset.schedule) == SCHEDULE_PENDING) {
+		hns3_err(hw, "Reschedule reset service after dev_init");
+		hns3_schedule_reset(hns);
+	} else {
+		/* IMP will wait ready flag before reset */
+		hns3_notify_reset_ready(hw, false);
+	}
+
 	rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, eth_dev);
 	hns3_info(hw, "hns3 dev initialization successful!");
 
@@ -4253,6 +4848,8 @@ hns3_dev_init(struct rte_eth_dev *eth_dev)
 	hns3_uninit_pf(eth_dev);
 
 err_init_pf:
+	rte_free(hw->reset.wait_data);
+err_init_reset:
 	eth_dev->dev_ops = NULL;
 	eth_dev->rx_pkt_burst = NULL;
 	eth_dev->tx_pkt_burst = NULL;
diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h
index 986314c..97f9637 100644
--- a/drivers/net/hns3/hns3_ethdev.h
+++ b/drivers/net/hns3/hns3_ethdev.h
@@ -620,5 +620,18 @@ hns3_test_and_clear_bit(unsigned int nr, volatile uint64_t *addr)
 
 int hns3_buffer_alloc(struct hns3_hw *hw);
 int hns3_config_gro(struct hns3_hw *hw, bool en);
+bool hns3_is_reset_pending(struct hns3_adapter *hns);
+bool hns3vf_is_reset_pending(struct hns3_adapter *hns);
+
+static inline bool
+is_reset_pending(struct hns3_adapter *hns)
+{
+	bool ret;
+	if (hns->is_vf)
+		ret = hns3vf_is_reset_pending(hns);
+	else
+		ret = hns3_is_reset_pending(hns);
+	return ret;
+}
 
 #endif /* _HNS3_ETHDEV_H_ */
diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c
index d941969..45360c4 100644
--- a/drivers/net/hns3/hns3_ethdev_vf.c
+++ b/drivers/net/hns3/hns3_ethdev_vf.c
@@ -47,12 +47,20 @@
 #define HNS3VF_RESET_WAIT_MS	20
 #define HNS3VF_RESET_WAIT_CNT	2000
 
+/* Reset related Registers */
+#define HNS3_GLOBAL_RESET_BIT		0
+#define HNS3_CORE_RESET_BIT		1
+#define HNS3_IMP_RESET_BIT		2
+#define HNS3_FUN_RST_ING_B		0
+
 enum hns3vf_evt_cause {
 	HNS3VF_VECTOR0_EVENT_RST,
 	HNS3VF_VECTOR0_EVENT_MBX,
 	HNS3VF_VECTOR0_EVENT_OTHER,
 };
 
+static enum hns3_reset_level hns3vf_get_reset_level(struct hns3_hw *hw,
+						    uint64_t *levels);
 static int hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
 static int hns3vf_dev_configure_vlan(struct rte_eth_dev *dev);
 
@@ -442,6 +450,11 @@ hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 		return -EBUSY;
 	}
 
+	if (rte_atomic16_read(&hw->reset.resetting)) {
+		hns3_err(hw, "Failed to set mtu during resetting");
+		return -EIO;
+	}
+
 	rte_spinlock_lock(&hw->lock);
 	ret = hns3vf_config_mtu(hw, mtu);
 	if (ret) {
@@ -545,6 +558,26 @@ hns3vf_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval)
 	/* Fetch the events from their corresponding regs */
 	cmdq_stat_reg = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_STAT_REG);
 
+	if (BIT(HNS3_VECTOR0_RST_INT_B) & cmdq_stat_reg) {
+		rst_ing_reg = hns3_read_dev(hw, HNS3_FUN_RST_ING);
+		hns3_warn(hw, "resetting reg: 0x%x", rst_ing_reg);
+		hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending);
+		rte_atomic16_set(&hw->reset.disable_cmd, 1);
+		val = hns3_read_dev(hw, HNS3_VF_RST_ING);
+		hns3_write_dev(hw, HNS3_VF_RST_ING, val | HNS3_VF_RST_ING_BIT);
+		val = cmdq_stat_reg & ~BIT(HNS3_VECTOR0_RST_INT_B);
+		if (clearval) {
+			hw->reset.stats.global_cnt++;
+			hns3_warn(hw, "Global reset detected, clear reset status");
+		} else {
+			hns3_schedule_delayed_reset(hns);
+			hns3_warn(hw, "Global reset detected, don't clear reset status");
+		}
+
+		ret = HNS3VF_VECTOR0_EVENT_RST;
+		goto out;
+	}
+
 	/* Check for vector0 mailbox(=CMDQ RX) event source */
 	if (BIT(HNS3_VECTOR0_RX_CMDQ_INT_B) & cmdq_stat_reg) {
 		val = cmdq_stat_reg & ~BIT(HNS3_VECTOR0_RX_CMDQ_INT_B);
@@ -579,6 +612,9 @@ hns3vf_interrupt_handler(void *param)
 	event_cause = hns3vf_check_event_cause(hns, &clearval);
 
 	switch (event_cause) {
+	case HNS3VF_VECTOR0_EVENT_RST:
+		hns3_schedule_reset(hns);
+		break;
 	case HNS3VF_VECTOR0_EVENT_MBX:
 		hns3_dev_handle_mbx_msg(hw);
 		break;
@@ -802,6 +838,67 @@ hns3vf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 }
 
 static int
+hns3vf_restore_vlan_table(struct hns3_adapter *hns)
+{
+	struct rte_vlan_filter_conf *vfc;
+	struct hns3_hw *hw = &hns->hw;
+	uint16_t vlan_id;
+	uint64_t vbit;
+	uint64_t ids;
+	int ret = 0;
+	uint32_t i;
+
+	vfc = &hw->data->vlan_filter_conf;
+	for (i = 0; i < RTE_DIM(vfc->ids); i++) {
+		if (vfc->ids[i] == 0)
+			continue;
+		ids = vfc->ids[i];
+		while (ids) {
+			/*
+			 * 64 means the num bits of ids, one bit corresponds to
+			 * one vlan id
+			 */
+			vlan_id = 64 * i;
+			/* count trailing zeroes */
+			vbit = ~ids & (ids - 1);
+			/* clear least significant bit set */
+			ids ^= (ids ^ (ids - 1)) ^ vbit;
+			for (; vbit;) {
+				vbit >>= 1;
+				vlan_id++;
+			}
+			ret = hns3vf_vlan_filter_configure(hns, vlan_id, 1);
+			if (ret) {
+				hns3_err(hw,
+					 "VF restore vlan table failed, ret =%d",
+					 ret);
+				return ret;
+			}
+		}
+	}
+
+	return ret;
+}
+
+static int
+hns3vf_restore_vlan_conf(struct hns3_adapter *hns)
+{
+	struct hns3_hw *hw = &hns->hw;
+	struct rte_eth_conf *dev_conf;
+	bool en;
+	int ret;
+
+	dev_conf = &hw->data->dev_conf;
+	en = dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP ? true
+								   : false;
+	ret = hns3vf_en_hw_strip_rxvtag(hw, en);
+	if (ret)
+		hns3_err(hw, "VF restore vlan conf fail, en =%d, ret =%d", en,
+			 ret);
+	return ret;
+}
+
+static int
 hns3vf_dev_configure_vlan(struct rte_eth_dev *dev)
 {
 	struct hns3_adapter *hns = dev->data->dev_private;
@@ -843,14 +940,11 @@ hns3vf_keep_alive_handler(void *param)
 	uint8_t respmsg;
 	int ret;
 
-	if (!hns3vf_is_reset_pending(hns)) {
-		ret = hns3_send_mbx_msg(hw, HNS3_MBX_KEEP_ALIVE, 0, NULL, 0,
-					false, &respmsg, sizeof(uint8_t));
-		if (ret)
-			hns3_err(hw, "VF sends keeping alive cmd failed(=%d)",
-				 ret);
-	} else
-		hns3_warn(hw, "Cancel keeping alive when reset is pending");
+	ret = hns3_send_mbx_msg(hw, HNS3_MBX_KEEP_ALIVE, 0, NULL, 0,
+				false, &respmsg, sizeof(uint8_t));
+	if (ret)
+		hns3_err(hw, "VF sends keeping alive cmd failed(=%d)",
+			 ret);
 
 	rte_eal_alarm_set(HNS3VF_KEEP_ALIVE_INTERVAL, hns3vf_keep_alive_handler,
 			  eth_dev);
@@ -1028,8 +1122,11 @@ hns3vf_do_stop(struct hns3_adapter *hns)
 
 	hw->mac.link_status = ETH_LINK_DOWN;
 
-	hns3vf_configure_mac_addr(hns, true);
-	reset_queue = true;
+	if (rte_atomic16_read(&hw->reset.disable_cmd) == 0) {
+		hns3vf_configure_mac_addr(hns, true);
+		reset_queue = true;
+	} else
+		reset_queue = false;
 	return hns3_stop_queues(hns, reset_queue);
 }
 
@@ -1050,9 +1147,11 @@ hns3vf_dev_stop(struct rte_eth_dev *eth_dev)
 	rte_delay_ms(hw->tqps_num);
 
 	rte_spinlock_lock(&hw->lock);
-	hns3vf_do_stop(hns);
-	hns3_dev_release_mbufs(hns);
-	hw->adapter_state = HNS3_NIC_CONFIGURED;
+	if (rte_atomic16_read(&hw->reset.resetting) == 0) {
+		hns3vf_do_stop(hns);
+		hns3_dev_release_mbufs(hns);
+		hw->adapter_state = HNS3_NIC_CONFIGURED;
+	}
 	rte_spinlock_unlock(&hw->lock);
 }
 
@@ -1066,12 +1165,16 @@ hns3vf_dev_close(struct rte_eth_dev *eth_dev)
 		hns3vf_dev_stop(eth_dev);
 
 	hw->adapter_state = HNS3_NIC_CLOSING;
+	hns3_reset_abort(hns);
+	hw->adapter_state = HNS3_NIC_CLOSED;
 	rte_eal_alarm_cancel(hns3vf_keep_alive_handler, eth_dev);
 	rte_eal_alarm_cancel(hns3vf_service_handler, eth_dev);
+
 	hns3vf_configure_all_mc_mac_addr(hns, true);
 	hns3vf_uninit_vf(eth_dev);
 	hns3_free_all_queues(eth_dev);
-	hw->adapter_state = HNS3_NIC_CLOSED;
+	rte_free(hw->reset.wait_data);
+	hns3_warn(hw, "Close port %d finished", hw->data->port_id);
 }
 
 static int
@@ -1135,6 +1238,8 @@ hns3vf_dev_start(struct rte_eth_dev *eth_dev)
 	int ret;
 
 	PMD_INIT_FUNC_TRACE();
+	if (rte_atomic16_read(&hw->reset.resetting))
+		return -EBUSY;
 	rte_spinlock_lock(&hw->lock);
 	hw->adapter_state = HNS3_NIC_STARTING;
 	ret = hns3vf_do_start(hns, true);
@@ -1149,6 +1254,274 @@ hns3vf_dev_start(struct rte_eth_dev *eth_dev)
 	return 0;
 }
 
+static bool
+is_vf_reset_done(struct hns3_hw *hw)
+{
+#define HNS3_FUN_RST_ING_BITS \
+	(BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) | \
+	 BIT(HNS3_VECTOR0_CORERESET_INT_B) | \
+	 BIT(HNS3_VECTOR0_IMPRESET_INT_B) | \
+	 BIT(HNS3_VECTOR0_FUNCRESET_INT_B))
+
+	uint32_t val;
+
+	if (hw->reset.level == HNS3_VF_RESET) {
+		val = hns3_read_dev(hw, HNS3_VF_RST_ING);
+		if (val & HNS3_VF_RST_ING_BIT)
+			return false;
+	} else {
+		val = hns3_read_dev(hw, HNS3_FUN_RST_ING);
+		if (val & HNS3_FUN_RST_ING_BITS)
+			return false;
+	}
+	return true;
+}
+
+bool
+hns3vf_is_reset_pending(struct hns3_adapter *hns)
+{
+	struct hns3_hw *hw = &hns->hw;
+	enum hns3_reset_level reset;
+
+	hns3vf_check_event_cause(hns, NULL);
+	reset = hns3vf_get_reset_level(hw, &hw->reset.pending);
+	if (hw->reset.level != HNS3_NONE_RESET && hw->reset.level < reset) {
+		hns3_warn(hw, "High level reset %d is pending", reset);
+		return true;
+	}
+	return false;
+}
+
+static int
+hns3vf_wait_hardware_ready(struct hns3_adapter *hns)
+{
+	struct hns3_hw *hw = &hns->hw;
+	struct hns3_wait_data *wait_data = hw->reset.wait_data;
+	struct timeval tv;
+
+	if (wait_data->result == HNS3_WAIT_SUCCESS)
+		return 0;
+	else if (wait_data->result == HNS3_WAIT_TIMEOUT) {
+		gettimeofday(&tv, NULL);
+		hns3_warn(hw, "Reset step4 hardware not ready after reset time=%ld.%.6ld",
+			  tv.tv_sec, tv.tv_usec);
+		return -ETIME;
+	} else if (wait_data->result == HNS3_WAIT_REQUEST)
+		return -EAGAIN;
+
+	wait_data->hns = hns;
+	wait_data->check_completion = is_vf_reset_done;
+	wait_data->end_ms = (uint64_t)HNS3VF_RESET_WAIT_CNT *
+				      HNS3VF_RESET_WAIT_MS + get_timeofday_ms();
+	wait_data->interval = HNS3VF_RESET_WAIT_MS * USEC_PER_MSEC;
+	wait_data->count = HNS3VF_RESET_WAIT_CNT;
+	wait_data->result = HNS3_WAIT_REQUEST;
+	rte_eal_alarm_set(wait_data->interval, hns3_wait_callback, wait_data);
+	return -EAGAIN;
+}
+
+static int
+hns3vf_prepare_reset(struct hns3_adapter *hns)
+{
+	struct hns3_hw *hw = &hns->hw;
+	int ret = 0;
+
+	if (hw->reset.level == HNS3_VF_FUNC_RESET) {
+		ret = hns3_send_mbx_msg(hw, HNS3_MBX_RESET, 0, NULL,
+					0, true, NULL, 0);
+	}
+	rte_atomic16_set(&hw->reset.disable_cmd, 1);
+
+	return ret;
+}
+
+static int
+hns3vf_stop_service(struct hns3_adapter *hns)
+{
+	struct hns3_hw *hw = &hns->hw;
+	struct rte_eth_dev *eth_dev;
+
+	eth_dev = &rte_eth_devices[hw->data->port_id];
+	rte_eal_alarm_cancel(hns3vf_service_handler, eth_dev);
+	hw->mac.link_status = ETH_LINK_DOWN;
+
+	hns3_set_rxtx_function(eth_dev);
+	rte_wmb();
+	/* Disable datapath on secondary process. */
+	hns3_mp_req_stop_rxtx(eth_dev);
+	rte_delay_ms(hw->tqps_num);
+
+	rte_spinlock_lock(&hw->lock);
+	if (hw->adapter_state == HNS3_NIC_STARTED ||
+	    hw->adapter_state == HNS3_NIC_STOPPING) {
+		hns3vf_do_stop(hns);
+		hw->reset.mbuf_deferred_free = true;
+	} else
+		hw->reset.mbuf_deferred_free = false;
+
+	/*
+	 * It is cumbersome for hardware to pick-and-choose entries for deletion
+	 * from table space. Hence, for function reset software intervention is
+	 * required to delete the entries.
+	 */
+	if (rte_atomic16_read(&hw->reset.disable_cmd) == 0)
+		hns3vf_configure_all_mc_mac_addr(hns, true);
+	rte_spinlock_unlock(&hw->lock);
+
+	return 0;
+}
+
+static int
+hns3vf_start_service(struct hns3_adapter *hns)
+{
+	struct hns3_hw *hw = &hns->hw;
+	struct rte_eth_dev *eth_dev;
+
+	eth_dev = &rte_eth_devices[hw->data->port_id];
+	hns3_set_rxtx_function(eth_dev);
+	hns3_mp_req_start_rxtx(eth_dev);
+
+	hns3vf_service_handler(eth_dev);
+	return 0;
+}
+
+static int
+hns3vf_restore_conf(struct hns3_adapter *hns)
+{
+	struct hns3_hw *hw = &hns->hw;
+	int ret;
+
+	ret = hns3vf_configure_mac_addr(hns, false);
+	if (ret)
+		return ret;
+
+	ret = hns3vf_configure_all_mc_mac_addr(hns, false);
+	if (ret)
+		goto err_mc_mac;
+
+	ret = hns3vf_restore_vlan_table(hns);
+	if (ret)
+		goto err_vlan_table;
+
+	ret = hns3vf_restore_vlan_conf(hns);
+	if (ret)
+		goto err_vlan_table;
+
+	if (hw->adapter_state == HNS3_NIC_STARTED) {
+		ret = hns3vf_do_start(hns, false);
+		if (ret)
+			goto err_vlan_table;
+		hns3_info(hw, "hns3vf dev restart successful!");
+	} else if (hw->adapter_state == HNS3_NIC_STOPPING)
+		hw->adapter_state = HNS3_NIC_CONFIGURED;
+	return 0;
+
+err_vlan_table:
+	hns3vf_configure_all_mc_mac_addr(hns, true);
+err_mc_mac:
+	hns3vf_configure_mac_addr(hns, true);
+	return ret;
+}
+
+static enum hns3_reset_level
+hns3vf_get_reset_level(struct hns3_hw *hw, uint64_t *levels)
+{
+	enum hns3_reset_level reset_level;
+
+	/* return the highest priority reset level amongst all */
+	if (hns3_atomic_test_bit(HNS3_VF_RESET, levels))
+		reset_level = HNS3_VF_RESET;
+	else if (hns3_atomic_test_bit(HNS3_VF_FULL_RESET, levels))
+		reset_level = HNS3_VF_FULL_RESET;
+	else if (hns3_atomic_test_bit(HNS3_VF_PF_FUNC_RESET, levels))
+		reset_level = HNS3_VF_PF_FUNC_RESET;
+	else if (hns3_atomic_test_bit(HNS3_VF_FUNC_RESET, levels))
+		reset_level = HNS3_VF_FUNC_RESET;
+	else if (hns3_atomic_test_bit(HNS3_FLR_RESET, levels))
+		reset_level = HNS3_FLR_RESET;
+	else
+		reset_level = HNS3_NONE_RESET;
+
+	if (hw->reset.level != HNS3_NONE_RESET && reset_level < hw->reset.level)
+		return HNS3_NONE_RESET;
+
+	return reset_level;
+}
+
+static void
+hns3vf_reset_service(void *param)
+{
+	struct hns3_adapter *hns = (struct hns3_adapter *)param;
+	struct hns3_hw *hw = &hns->hw;
+	enum hns3_reset_level reset_level;
+	struct timeval tv_delta;
+	struct timeval tv_start;
+	struct timeval tv;
+	uint64_t msec;
+
+	/*
+	 * The interrupt is not triggered within the delay time.
+	 * The interrupt may have been lost. It is necessary to handle
+	 * the interrupt to recover from the error.
+	 */
+	if (rte_atomic16_read(&hns->hw.reset.schedule) == SCHEDULE_DEFERRED) {
+		rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_REQUESTED);
+		hns3_err(hw, "Handling interrupts in delayed tasks");
+		hns3vf_interrupt_handler(&rte_eth_devices[hw->data->port_id]);
+	}
+	rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_NONE);
+
+	/*
+	 * Hardware reset has been notified, we now have to poll & check if
+	 * hardware has actually completed the reset sequence.
+	 */
+	reset_level = hns3vf_get_reset_level(hw, &hw->reset.pending);
+	if (reset_level != HNS3_NONE_RESET) {
+		gettimeofday(&tv_start, NULL);
+		hns3_reset_process(hns, reset_level);
+		gettimeofday(&tv, NULL);
+		timersub(&tv, &tv_start, &tv_delta);
+		msec = tv_delta.tv_sec * MSEC_PER_SEC +
+		       tv_delta.tv_usec / USEC_PER_MSEC;
+		if (msec > HNS3_RESET_PROCESS_MS)
+			hns3_err(hw, "%d handle long time delta %ld ms time=%ld.%.6ld",
+				 hw->reset.level, msec,
+				 tv.tv_sec, tv.tv_usec);
+	}
+}
+
+static int
+hns3vf_reinit_dev(struct hns3_adapter *hns)
+{
+	struct hns3_hw *hw = &hns->hw;
+	int ret;
+
+	/* Firmware command initialize */
+	ret = hns3_cmd_init(hw);
+	if (ret) {
+		hns3_err(hw, "Failed to init cmd: %d", ret);
+		return ret;
+	}
+
+	ret = hns3_reset_all_queues(hns);
+	if (ret) {
+		hns3_err(hw, "Failed to reset all queues: %d", ret);
+		goto err_init;
+	}
+
+	ret = hns3vf_init_hardware(hns);
+	if (ret) {
+		hns3_err(hw, "Failed to init hardware: %d", ret);
+		goto err_init;
+	}
+
+	return 0;
+
+err_init:
+	hns3_cmd_uninit(hw);
+	return ret;
+}
+
 static const struct eth_dev_ops hns3vf_eth_dev_ops = {
 	.dev_start          = hns3vf_dev_start,
 	.dev_stop           = hns3vf_dev_stop,
@@ -1183,6 +1556,16 @@ static const struct eth_dev_ops hns3vf_eth_dev_ops = {
 	.dev_supported_ptypes_get = hns3_dev_supported_ptypes_get,
 };
 
+static const struct hns3_reset_ops hns3vf_reset_ops = {
+	.reset_service       = hns3vf_reset_service,
+	.stop_service        = hns3vf_stop_service,
+	.prepare_reset       = hns3vf_prepare_reset,
+	.wait_hardware_ready = hns3vf_wait_hardware_ready,
+	.reinit_dev          = hns3vf_reinit_dev,
+	.restore_conf        = hns3vf_restore_conf,
+	.start_service       = hns3vf_start_service,
+};
+
 static int
 hns3vf_dev_init(struct rte_eth_dev *eth_dev)
 {
@@ -1216,6 +1599,11 @@ hns3vf_dev_init(struct rte_eth_dev *eth_dev)
 	hns->is_vf = true;
 	hw->data = eth_dev->data;
 
+	ret = hns3_reset_init(hw);
+	if (ret)
+		goto err_init_reset;
+	hw->reset.ops = &hns3vf_reset_ops;
+
 	ret = hns3vf_init_vf(eth_dev);
 	if (ret) {
 		PMD_INIT_LOG(ERR, "Failed to init vf: %d", ret);
@@ -1238,6 +1626,13 @@ hns3vf_dev_init(struct rte_eth_dev *eth_dev)
 	rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.mac_addr,
 			    &eth_dev->data->mac_addrs[0]);
 	hw->adapter_state = HNS3_NIC_INITIALIZED;
+	if (rte_atomic16_read(&hns->hw.reset.schedule) == SCHEDULE_PENDING) {
+		hns3_err(hw, "Reschedule reset service after dev_init");
+		hns3_schedule_reset(hns);
+	} else {
+		/* IMP will wait ready flag before reset */
+		hns3_notify_reset_ready(hw, false);
+	}
 	rte_eal_alarm_set(HNS3VF_KEEP_ALIVE_INTERVAL, hns3vf_keep_alive_handler,
 			  eth_dev);
 	rte_eal_alarm_set(HNS3VF_SERVICE_INTERVAL, hns3vf_service_handler,
@@ -1248,6 +1643,9 @@ hns3vf_dev_init(struct rte_eth_dev *eth_dev)
 	hns3vf_uninit_vf(eth_dev);
 
 err_init_vf:
+	rte_free(hw->reset.wait_data);
+
+err_init_reset:
 	eth_dev->dev_ops = NULL;
 	eth_dev->rx_pkt_burst = NULL;
 	eth_dev->tx_pkt_burst = NULL;
diff --git a/drivers/net/hns3/hns3_intr.c b/drivers/net/hns3/hns3_intr.c
index 2d2051e..1728311 100644
--- a/drivers/net/hns3/hns3_intr.c
+++ b/drivers/net/hns3/hns3_intr.c
@@ -24,6 +24,8 @@
 #include "hns3_regs.h"
 #include "hns3_rxtx.h"
 
+#define SWITCH_CONTEXT_US	10
+
 /* offset in MSIX bd */
 #define MAC_ERROR_OFFSET	1
 #define PPP_PF_ERROR_OFFSET	2
@@ -37,6 +39,11 @@
 			hw->reset.stats.merge_cnt++;	\
 	} while (0)
 
+static const char *reset_string[HNS3_MAX_RESET] = {
+	"none",	"vf_func", "vf_pf_func", "vf_full", "flr",
+	"vf_global", "pf_func", "global", "IMP",
+};
+
 const struct hns3_hw_error mac_afifo_tnl_int[] = {
 	{ .int_msk = BIT(0), .msg = "egu_cge_afifo_ecc_1bit_err",
 	  .reset_level = HNS3_NONE_RESET },
@@ -656,3 +663,506 @@ hns3_handle_msix_error(struct hns3_adapter *hns, uint64_t *levels)
 out:
 	rte_free(desc);
 }
+
+int
+hns3_reset_init(struct hns3_hw *hw)
+{
+	rte_spinlock_init(&hw->lock);
+	hw->reset.level = HNS3_NONE_RESET;
+	hw->reset.stage = RESET_STAGE_NONE;
+	hw->reset.request = 0;
+	hw->reset.pending = 0;
+	rte_atomic16_init(&hw->reset.resetting);
+	rte_atomic16_init(&hw->reset.disable_cmd);
+	hw->reset.wait_data = rte_zmalloc("wait_data",
+					  sizeof(struct hns3_wait_data), 0);
+	if (!hw->reset.wait_data) {
+		PMD_INIT_LOG(ERR, "Failed to allocate memory for wait_data");
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+void
+hns3_schedule_reset(struct hns3_adapter *hns)
+{
+	struct hns3_hw *hw = &hns->hw;
+
+	/* Reschedule the reset process after successful initialization */
+	if (hw->adapter_state == HNS3_NIC_UNINITIALIZED) {
+		rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_PENDING);
+		return;
+	}
+
+	if (hw->adapter_state >= HNS3_NIC_CLOSED)
+		return;
+
+	/* Schedule restart alarm if it is not scheduled yet */
+	if (rte_atomic16_read(&hns->hw.reset.schedule) == SCHEDULE_REQUESTED)
+		return;
+	if (rte_atomic16_read(&hns->hw.reset.schedule) == SCHEDULE_DEFERRED)
+		rte_eal_alarm_cancel(hw->reset.ops->reset_service, hns);
+	rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_REQUESTED);
+
+	rte_eal_alarm_set(SWITCH_CONTEXT_US, hw->reset.ops->reset_service, hns);
+}
+
+void
+hns3_schedule_delayed_reset(struct hns3_adapter *hns)
+{
+#define DEFERRED_SCHED_US (3 * MSEC_PER_SEC * USEC_PER_MSEC)
+	struct hns3_hw *hw = &hns->hw;
+
+	/* Do nothing if it is uninited or closed */
+	if (hw->adapter_state == HNS3_NIC_UNINITIALIZED ||
+	    hw->adapter_state >= HNS3_NIC_CLOSED) {
+		return;
+	}
+
+	if (rte_atomic16_read(&hns->hw.reset.schedule) != SCHEDULE_NONE)
+		return;
+	rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_DEFERRED);
+	rte_eal_alarm_set(DEFERRED_SCHED_US, hw->reset.ops->reset_service, hns);
+}
+
+void
+hns3_wait_callback(void *param)
+{
+	struct hns3_wait_data *data = (struct hns3_wait_data *)param;
+	struct hns3_adapter *hns = data->hns;
+	struct hns3_hw *hw = &hns->hw;
+	uint64_t msec;
+	bool done;
+
+	data->count--;
+	if (data->check_completion) {
+		/*
+		 * Check if the current time exceeds the deadline
+		 * or a pending reset coming, or reset during close.
+		 */
+		msec = get_timeofday_ms();
+		if (msec > data->end_ms || is_reset_pending(hns) ||
+		    hw->adapter_state == HNS3_NIC_CLOSING) {
+			done = false;
+			data->count = 0;
+		} else
+			done = data->check_completion(hw);
+	} else
+		done = true;
+
+	if (!done && data->count > 0) {
+		rte_eal_alarm_set(data->interval, hns3_wait_callback, data);
+		return;
+	}
+	if (done)
+		data->result = HNS3_WAIT_SUCCESS;
+	else {
+		hns3_err(hw, "%s wait timeout at stage %d",
+			 reset_string[hw->reset.level], hw->reset.stage);
+		data->result = HNS3_WAIT_TIMEOUT;
+	}
+	hns3_schedule_reset(hns);
+}
+
+void
+hns3_notify_reset_ready(struct hns3_hw *hw, bool enable)
+{
+	uint32_t reg_val;
+
+	reg_val = hns3_read_dev(hw, HNS3_CMDQ_TX_DEPTH_REG);
+	if (enable)
+		reg_val |= HNS3_NIC_SW_RST_RDY;
+	else
+		reg_val &= ~HNS3_NIC_SW_RST_RDY;
+
+	hns3_write_dev(hw, HNS3_CMDQ_TX_DEPTH_REG, reg_val);
+}
+
+int
+hns3_reset_req_hw_reset(struct hns3_adapter *hns)
+{
+	struct hns3_hw *hw = &hns->hw;
+
+	if (hw->reset.wait_data->result == HNS3_WAIT_UNKNOWN) {
+		hw->reset.wait_data->hns = hns;
+		hw->reset.wait_data->check_completion = NULL;
+		hw->reset.wait_data->interval = HNS3_RESET_SYNC_US;
+		hw->reset.wait_data->count = 1;
+		hw->reset.wait_data->result = HNS3_WAIT_REQUEST;
+		rte_eal_alarm_set(hw->reset.wait_data->interval,
+				  hns3_wait_callback, hw->reset.wait_data);
+		return -EAGAIN;
+	} else if (hw->reset.wait_data->result == HNS3_WAIT_REQUEST)
+		return -EAGAIN;
+
+	/* inform hardware that preparatory work is done */
+	hns3_notify_reset_ready(hw, true);
+	return 0;
+}
+
+static void
+hns3_clear_reset_level(struct hns3_hw *hw, uint64_t *levels)
+{
+	uint64_t merge_cnt = hw->reset.stats.merge_cnt;
+	int64_t tmp;
+
+	switch (hw->reset.level) {
+	case HNS3_IMP_RESET:
+		hns3_atomic_clear_bit(HNS3_IMP_RESET, levels);
+		tmp = hns3_test_and_clear_bit(HNS3_GLOBAL_RESET, levels);
+		HNS3_CHECK_MERGE_CNT(tmp);
+		tmp = hns3_test_and_clear_bit(HNS3_FUNC_RESET, levels);
+		HNS3_CHECK_MERGE_CNT(tmp);
+		break;
+	case HNS3_GLOBAL_RESET:
+		hns3_atomic_clear_bit(HNS3_GLOBAL_RESET, levels);
+		tmp = hns3_test_and_clear_bit(HNS3_FUNC_RESET, levels);
+		HNS3_CHECK_MERGE_CNT(tmp);
+		break;
+	case HNS3_FUNC_RESET:
+		hns3_atomic_clear_bit(HNS3_FUNC_RESET, levels);
+		break;
+	case HNS3_VF_RESET:
+		hns3_atomic_clear_bit(HNS3_VF_RESET, levels);
+		tmp = hns3_test_and_clear_bit(HNS3_VF_PF_FUNC_RESET, levels);
+		HNS3_CHECK_MERGE_CNT(tmp);
+		tmp = hns3_test_and_clear_bit(HNS3_VF_FUNC_RESET, levels);
+		HNS3_CHECK_MERGE_CNT(tmp);
+		break;
+	case HNS3_VF_FULL_RESET:
+		hns3_atomic_clear_bit(HNS3_VF_FULL_RESET, levels);
+		tmp = hns3_test_and_clear_bit(HNS3_VF_FUNC_RESET, levels);
+		HNS3_CHECK_MERGE_CNT(tmp);
+		break;
+	case HNS3_VF_PF_FUNC_RESET:
+		hns3_atomic_clear_bit(HNS3_VF_PF_FUNC_RESET, levels);
+		tmp = hns3_test_and_clear_bit(HNS3_VF_FUNC_RESET, levels);
+		HNS3_CHECK_MERGE_CNT(tmp);
+		break;
+	case HNS3_VF_FUNC_RESET:
+		hns3_atomic_clear_bit(HNS3_VF_FUNC_RESET, levels);
+		break;
+	case HNS3_FLR_RESET:
+		hns3_atomic_clear_bit(HNS3_FLR_RESET, levels);
+		break;
+	case HNS3_NONE_RESET:
+	default:
+		return;
+	};
+	if (merge_cnt != hw->reset.stats.merge_cnt)
+		hns3_warn(hw, "No need to do low-level reset after %s reset. "
+			      "merge cnt: %ld total merge_cnt: %ld",
+			  reset_string[hw->reset.level],
+			  hw->reset.stats.merge_cnt - merge_cnt,
+			  hw->reset.stats.merge_cnt);
+}
+
+static bool
+hns3_reset_err_handle(struct hns3_adapter *hns)
+{
+#define MAX_RESET_FAIL_CNT 5
+
+	struct hns3_hw *hw = &hns->hw;
+
+	if (hw->adapter_state == HNS3_NIC_CLOSING)
+		goto reset_fail;
+
+	if (is_reset_pending(hns)) {
+		hw->reset.attempts = 0;
+		hw->reset.stats.fail_cnt++;
+		hns3_warn(hw, "%s reset fail because new Reset is pending attempts:%lu",
+			  reset_string[hw->reset.level],
+			  hw->reset.stats.fail_cnt);
+		hw->reset.level = HNS3_NONE_RESET;
+		return true;
+	}
+
+	hw->reset.attempts++;
+	if (hw->reset.attempts < MAX_RESET_FAIL_CNT) {
+		hns3_atomic_set_bit(hw->reset.level, &hw->reset.pending);
+		hns3_warn(hw, "%s retry to reset attempts: %d",
+			  reset_string[hw->reset.level],
+			  hw->reset.attempts);
+		return true;
+	}
+
+	if (rte_atomic16_read(&hw->reset.disable_cmd))
+		hns3_cmd_init(hw);
+reset_fail:
+	hw->reset.attempts = 0;
+	hw->reset.stats.fail_cnt++;
+	hns3_warn(hw, "%s reset fail fail_cnt:%lu success_cnt:%lu "
+		  "global_cnt:%lu imp_cnt:%lu request_cnt:%lu exec_cnt:%lu "
+		  "merge_cnt:%lu",
+		  reset_string[hw->reset.level], hw->reset.stats.fail_cnt,
+		  hw->reset.stats.success_cnt, hw->reset.stats.global_cnt,
+		  hw->reset.stats.imp_cnt, hw->reset.stats.request_cnt,
+		  hw->reset.stats.exec_cnt, hw->reset.stats.merge_cnt);
+
+	/* IMP no longer waiting the ready flag */
+	hns3_notify_reset_ready(hw, true);
+	return false;
+}
+
+static int
+hns3_reset_pre(struct hns3_adapter *hns)
+{
+	struct hns3_hw *hw = &hns->hw;
+	struct timeval tv;
+	int ret;
+
+	if (hw->reset.stage == RESET_STAGE_NONE) {
+		rte_atomic16_set(&hns->hw.reset.resetting, 1);
+		hw->reset.stage = RESET_STAGE_DOWN;
+		ret = hw->reset.ops->stop_service(hns);
+		gettimeofday(&tv, NULL);
+		if (ret) {
+			hns3_warn(hw, "Reset step1 down fail=%d time=%ld.%.6ld",
+				  ret, tv.tv_sec, tv.tv_usec);
+			return ret;
+		}
+		hns3_warn(hw, "Reset step1 down success time=%ld.%.6ld",
+			  tv.tv_sec, tv.tv_usec);
+		hw->reset.stage = RESET_STAGE_PREWAIT;
+	}
+	if (hw->reset.stage == RESET_STAGE_PREWAIT) {
+		ret = hw->reset.ops->prepare_reset(hns);
+		gettimeofday(&tv, NULL);
+		if (ret) {
+			hns3_warn(hw,
+				  "Reset step2 prepare wait fail=%d time=%ld.%.6ld",
+				  ret, tv.tv_sec, tv.tv_usec);
+			return ret;
+		}
+		hns3_warn(hw, "Reset step2 prepare wait success time=%ld.%.6ld",
+			  tv.tv_sec, tv.tv_usec);
+		hw->reset.stage = RESET_STAGE_REQ_HW_RESET;
+		hw->reset.wait_data->result = HNS3_WAIT_UNKNOWN;
+	}
+	return 0;
+}
+
+static int
+hns3_reset_post(struct hns3_adapter *hns)
+{
+#define TIMEOUT_RETRIES_CNT	5
+	struct hns3_hw *hw = &hns->hw;
+	struct timeval tv_delta;
+	struct timeval tv;
+	int ret = 0;
+
+	if (hw->adapter_state == HNS3_NIC_CLOSING) {
+		hns3_warn(hw, "Don't do reset_post during closing, just uninit cmd");
+		hns3_cmd_uninit(hw);
+		return -EPERM;
+	}
+
+	if (hw->reset.stage == RESET_STAGE_DEV_INIT) {
+		rte_spinlock_lock(&hw->lock);
+		if (hw->reset.mbuf_deferred_free) {
+			hns3_dev_release_mbufs(hns);
+			hw->reset.mbuf_deferred_free = false;
+		}
+		ret = hw->reset.ops->reinit_dev(hns);
+		rte_spinlock_unlock(&hw->lock);
+		gettimeofday(&tv, NULL);
+		if (ret) {
+			hns3_warn(hw, "Reset step5 devinit fail=%d retries=%d",
+				  ret, hw->reset.retries);
+			goto err;
+		}
+		hns3_warn(hw, "Reset step5 devinit success time=%ld.%.6ld",
+			  tv.tv_sec, tv.tv_usec);
+		hw->reset.retries = 0;
+		hw->reset.stage = RESET_STAGE_RESTORE;
+		rte_eal_alarm_set(SWITCH_CONTEXT_US,
+				  hw->reset.ops->reset_service, hns);
+		return -EAGAIN;
+	}
+	if (hw->reset.stage == RESET_STAGE_RESTORE) {
+		rte_spinlock_lock(&hw->lock);
+		ret = hw->reset.ops->restore_conf(hns);
+		rte_spinlock_unlock(&hw->lock);
+		gettimeofday(&tv, NULL);
+		if (ret) {
+			hns3_warn(hw,
+				  "Reset step6 restore fail=%d retries=%d",
+				  ret, hw->reset.retries);
+			goto err;
+		}
+		hns3_warn(hw, "Reset step6 restore success time=%ld.%.6ld",
+			  tv.tv_sec, tv.tv_usec);
+		hw->reset.retries = 0;
+		hw->reset.stage = RESET_STAGE_DONE;
+	}
+	if (hw->reset.stage == RESET_STAGE_DONE) {
+		/* IMP will wait ready flag before reset */
+		hns3_notify_reset_ready(hw, false);
+		hns3_clear_reset_level(hw, &hw->reset.pending);
+		rte_atomic16_clear(&hns->hw.reset.resetting);
+		hw->reset.attempts = 0;
+		hw->reset.stats.success_cnt++;
+		hw->reset.stage = RESET_STAGE_NONE;
+		hw->reset.ops->start_service(hns);
+		gettimeofday(&tv, NULL);
+		timersub(&tv, &hw->reset.start_time, &tv_delta);
+		hns3_warn(hw, "%s reset done fail_cnt:%lu success_cnt:%lu "
+			  "global_cnt:%lu imp_cnt:%lu request_cnt:%lu exec_cnt:%lu "
+			  "merge_cnt:%lu",
+			  reset_string[hw->reset.level],
+			  hw->reset.stats.fail_cnt, hw->reset.stats.success_cnt,
+			  hw->reset.stats.global_cnt, hw->reset.stats.imp_cnt,
+			  hw->reset.stats.request_cnt, hw->reset.stats.exec_cnt,
+			  hw->reset.stats.merge_cnt);
+		hns3_warn(hw,
+			  "%s reset done delta %ld ms time=%ld.%.6ld",
+			  reset_string[hw->reset.level],
+			  tv_delta.tv_sec * MSEC_PER_SEC +
+			  tv_delta.tv_usec / USEC_PER_MSEC,
+			  tv.tv_sec, tv.tv_usec);
+		hw->reset.level = HNS3_NONE_RESET;
+	}
+	return 0;
+
+err:
+	if (ret == -ETIME) {
+		hw->reset.retries++;
+		if (hw->reset.retries < TIMEOUT_RETRIES_CNT) {
+			rte_eal_alarm_set(HNS3_RESET_SYNC_US,
+					  hw->reset.ops->reset_service, hns);
+			return -EAGAIN;
+		}
+	}
+	hw->reset.retries = 0;
+	return -EIO;
+}
+
+/*
+ * There are three scenarios as follows:
+ * When the reset is not in progress, the reset process starts.
+ * During the reset process, if the reset level has not changed,
+ * the reset process continues; otherwise, the reset process is aborted.
+ *	hw->reset.level   new_level          action
+ *	HNS3_NONE_RESET	 HNS3_XXXX_RESET    start reset
+ *	HNS3_XXXX_RESET  HNS3_XXXX_RESET    continue reset
+ *	HNS3_LOW_RESET   HNS3_HIGH_RESET    abort
+ */
+int
+hns3_reset_process(struct hns3_adapter *hns, enum hns3_reset_level new_level)
+{
+	struct hns3_hw *hw = &hns->hw;
+	struct timeval tv_delta;
+	struct timeval tv;
+	int ret;
+
+	if (hw->reset.level == HNS3_NONE_RESET) {
+		hw->reset.level = new_level;
+		hw->reset.stats.exec_cnt++;
+		gettimeofday(&hw->reset.start_time, NULL);
+		hns3_warn(hw, "Start %s reset time=%ld.%.6ld",
+			  reset_string[hw->reset.level],
+			  hw->reset.start_time.tv_sec,
+			  hw->reset.start_time.tv_usec);
+	}
+
+	if (is_reset_pending(hns)) {
+		gettimeofday(&tv, NULL);
+		hns3_warn(hw,
+			  "%s reset is aborted by high level time=%ld.%.6ld",
+			  reset_string[hw->reset.level], tv.tv_sec, tv.tv_usec);
+		if (hw->reset.wait_data->result == HNS3_WAIT_REQUEST)
+			rte_eal_alarm_cancel(hns3_wait_callback,
+					     hw->reset.wait_data);
+		ret = -EBUSY;
+		goto err;
+	}
+
+	ret = hns3_reset_pre(hns);
+	if (ret)
+		goto err;
+
+	if (hw->reset.stage == RESET_STAGE_REQ_HW_RESET) {
+		ret = hns3_reset_req_hw_reset(hns);
+		if (ret == -EAGAIN)
+			return ret;
+		gettimeofday(&tv, NULL);
+		hns3_warn(hw,
+			  "Reset step3 request IMP reset success time=%ld.%.6ld",
+			  tv.tv_sec, tv.tv_usec);
+		hw->reset.stage = RESET_STAGE_WAIT;
+		hw->reset.wait_data->result = HNS3_WAIT_UNKNOWN;
+	}
+	if (hw->reset.stage == RESET_STAGE_WAIT) {
+		ret = hw->reset.ops->wait_hardware_ready(hns);
+		if (ret)
+			goto retry;
+		gettimeofday(&tv, NULL);
+		hns3_warn(hw, "Reset step4 reset wait success time=%ld.%.6ld",
+			  tv.tv_sec, tv.tv_usec);
+		hw->reset.stage = RESET_STAGE_DEV_INIT;
+	}
+
+	ret = hns3_reset_post(hns);
+	if (ret)
+		goto retry;
+
+	return 0;
+retry:
+	if (ret == -EAGAIN)
+		return ret;
+err:
+	hns3_clear_reset_level(hw, &hw->reset.pending);
+	if (hns3_reset_err_handle(hns)) {
+		hw->reset.stage = RESET_STAGE_PREWAIT;
+		hns3_schedule_reset(hns);
+	} else {
+		rte_spinlock_lock(&hw->lock);
+		if (hw->reset.mbuf_deferred_free) {
+			hns3_dev_release_mbufs(hns);
+			hw->reset.mbuf_deferred_free = false;
+		}
+		rte_spinlock_unlock(&hw->lock);
+		rte_atomic16_clear(&hns->hw.reset.resetting);
+		hw->reset.stage = RESET_STAGE_NONE;
+		gettimeofday(&tv, NULL);
+		timersub(&tv, &hw->reset.start_time, &tv_delta);
+		hns3_warn(hw, "%s reset fail delta %ld ms time=%ld.%.6ld",
+			  reset_string[hw->reset.level],
+			  tv_delta.tv_sec * MSEC_PER_SEC +
+			  tv_delta.tv_usec / USEC_PER_MSEC,
+			  tv.tv_sec, tv.tv_usec);
+		hw->reset.level = HNS3_NONE_RESET;
+	}
+
+	return -EIO;
+}
+
+/*
+ * The reset process can only be terminated after handshake with IMP(step3),
+ * so that IMP can complete the reset process normally.
+ */
+void
+hns3_reset_abort(struct hns3_adapter *hns)
+{
+	struct hns3_hw *hw = &hns->hw;
+	struct timeval tv;
+	int i;
+
+	for (i = 0; i < HNS3_QUIT_RESET_CNT; i++) {
+		if (hw->reset.level == HNS3_NONE_RESET)
+			break;
+		rte_delay_ms(HNS3_QUIT_RESET_DELAY_MS);
+	}
+
+	/* IMP no longer waiting the ready flag */
+	hns3_notify_reset_ready(hw, true);
+
+	rte_eal_alarm_cancel(hw->reset.ops->reset_service, hns);
+	rte_eal_alarm_cancel(hns3_wait_callback, hw->reset.wait_data);
+
+	if (hw->reset.level != HNS3_NONE_RESET) {
+		gettimeofday(&tv, NULL);
+		hns3_err(hw, "Failed to terminate reset: %s time=%ld.%.6ld",
+			 reset_string[hw->reset.level], tv.tv_sec, tv.tv_usec);
+	}
+}
diff --git a/drivers/net/hns3/hns3_intr.h b/drivers/net/hns3/hns3_intr.h
index b57b4ac..d0af16c 100644
--- a/drivers/net/hns3/hns3_intr.h
+++ b/drivers/net/hns3/hns3_intr.h
@@ -49,6 +49,8 @@
 #define HNS3_SSU_COMMON_ERR_INT_MASK		GENMASK(9, 0)
 #define HNS3_SSU_PORT_INT_MSIX_MASK		0x7BFF
 
+#define HNS3_RESET_PROCESS_MS			200
+
 struct hns3_hw_blk {
 	const char *name;
 	int (*enable_err_intr)(struct hns3_adapter *hns, bool en);
@@ -64,5 +66,14 @@ int hns3_enable_hw_error_intr(struct hns3_adapter *hns, bool state);
 void hns3_handle_msix_error(struct hns3_adapter *hns, uint64_t *levels);
 void hns3_intr_unregister(const struct rte_intr_handle *hdl,
 			  rte_intr_callback_fn cb_fn, void *cb_arg);
+void hns3_notify_reset_ready(struct hns3_hw *hw, bool enable);
+int hns3_reset_init(struct hns3_hw *hw);
+void hns3_wait_callback(void *param);
+void hns3_schedule_reset(struct hns3_adapter *hns);
+void hns3_schedule_delayed_reset(struct hns3_adapter *hns);
+int hns3_reset_req_hw_reset(struct hns3_adapter *hns);
+int hns3_reset_process(struct hns3_adapter *hns,
+		       enum hns3_reset_level reset_level);
+void hns3_reset_abort(struct hns3_adapter *hns);
 
 #endif /* _HNS3_INTR_H_ */
diff --git a/drivers/net/hns3/hns3_mbx.c b/drivers/net/hns3/hns3_mbx.c
index 44d8275..3ac78b1 100644
--- a/drivers/net/hns3/hns3_mbx.c
+++ b/drivers/net/hns3/hns3_mbx.c
@@ -107,6 +107,19 @@ hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code0, uint16_t code1,
 	end = now + HNS3_MAX_RETRY_MS;
 	while ((hw->mbx_resp.head != hw->mbx_resp.tail + hw->mbx_resp.lost) &&
 	       (now < end)) {
+		if (rte_atomic16_read(&hw->reset.disable_cmd)) {
+			hns3_err(hw, "Don't wait for mbx respone because of "
+				 "disable_cmd");
+			return -EBUSY;
+		}
+
+		if (is_reset_pending(hns)) {
+			hw->mbx_resp.req_msg_data = 0;
+			hns3_err(hw, "Don't wait for mbx respone because of "
+				 "reset pending");
+			return -EIO;
+		}
+
 		/*
 		 * The mbox response is running on the interrupt thread.
 		 * Sending mbox in the interrupt thread cannot wait for the
@@ -235,6 +248,7 @@ hns3_mbx_handler(struct hns3_hw *hw)
 
 			hns3_warn(hw, "PF inform reset level %d", reset_level);
 			hw->reset.stats.request_cnt++;
+			hns3_schedule_reset(HNS3_DEV_HW_TO_ADAPTER(hw));
 			break;
 		default:
 			hns3_err(hw, "Fetched unsupported(%d) message from arq",
-- 
2.7.4


  parent reply	other threads:[~2019-08-23 13:53 UTC|newest]

Thread overview: 75+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-08-23 13:46 [dpdk-dev] [PATCH 00/22] add hns3 ethernet PMD driver Wei Hu (Xavier)
2019-08-23 13:46 ` [dpdk-dev] [PATCH 01/22] net/hns3: add hardware registers definition Wei Hu (Xavier)
2019-08-23 13:46 ` [dpdk-dev] [PATCH 02/22] net/hns3: add some definitions for data structure and macro Wei Hu (Xavier)
2019-08-30  8:25   ` Gavin Hu (Arm Technology China)
2019-09-05  6:01     ` Wei Hu (Xavier)
2019-08-23 13:46 ` [dpdk-dev] [PATCH 03/22] net/hns3: register hns3 PMD driver Wei Hu (Xavier)
2019-08-30 15:01   ` Ferruh Yigit
2019-09-06  6:20     ` Wei Hu (Xavier)
2019-08-23 13:46 ` [dpdk-dev] [PATCH 04/22] net/hns3: add support for cmd of " Wei Hu (Xavier)
2019-08-30 15:02   ` Ferruh Yigit
2019-09-06  6:49     ` Wei Hu (Xavier)
2019-08-23 13:46 ` [dpdk-dev] [PATCH 05/22] net/hns3: add the initialization " Wei Hu (Xavier)
2019-08-23 13:46 ` [dpdk-dev] [PATCH 06/22] net/hns3: add support for MAC address related operations Wei Hu (Xavier)
2019-08-30 15:03   ` Ferruh Yigit
2019-09-05  5:40     ` Wei Hu (Xavier)
2019-08-23 13:46 ` [dpdk-dev] [PATCH 07/22] net/hns3: add support for some misc operations Wei Hu (Xavier)
2019-08-30 15:04   ` Ferruh Yigit
2019-08-23 13:46 ` [dpdk-dev] [PATCH 08/22] net/hns3: add support for link update operation Wei Hu (Xavier)
2019-08-30 15:04   ` Ferruh Yigit
2019-09-06  6:56     ` Wei Hu (Xavier)
2019-08-23 13:46 ` [dpdk-dev] [PATCH 09/22] net/hns3: add support for flow directory of hns3 PMD driver Wei Hu (Xavier)
2019-08-30 15:06   ` Ferruh Yigit
2019-09-06  8:23     ` Wei Hu (Xavier)
2019-09-06 11:08     ` Wei Hu (Xavier)
2019-08-23 13:46 ` [dpdk-dev] [PATCH 10/22] net/hns3: add support for RSS " Wei Hu (Xavier)
2019-08-30 15:07   ` Ferruh Yigit
2019-08-31  9:16     ` Wei Hu (Xavier)
2019-08-23 13:47 ` [dpdk-dev] [PATCH 11/22] net/hns3: add support for flow control " Wei Hu (Xavier)
2019-08-30 15:07   ` Ferruh Yigit
2019-08-31  8:04     ` Wei Hu (Xavier)
2019-08-23 13:47 ` [dpdk-dev] [PATCH 12/22] net/hns3: add support for VLAN " Wei Hu (Xavier)
2019-08-30 15:08   ` Ferruh Yigit
2019-08-31  9:04     ` Wei Hu (Xavier)
2019-08-23 13:47 ` [dpdk-dev] [PATCH 13/22] net/hns3: add support for mailbox " Wei Hu (Xavier)
2019-08-30 15:08   ` Ferruh Yigit
2019-09-06 11:25     ` Wei Hu (Xavier)
2019-08-23 13:47 ` [dpdk-dev] [PATCH 14/22] net/hns3: add support for hns3 VF " Wei Hu (Xavier)
2019-08-30 15:11   ` Ferruh Yigit
2019-08-31  9:03     ` Wei Hu (Xavier)
2019-09-06 11:27     ` Wei Hu (Xavier)
2019-08-23 13:47 ` [dpdk-dev] [PATCH 15/22] net/hns3: add package and queue related operation Wei Hu (Xavier)
2019-08-23 15:42   ` Aaron Conole
2019-08-30 15:13   ` Ferruh Yigit
2019-09-11 11:40     ` Wei Hu (Xavier)
2019-08-23 13:47 ` [dpdk-dev] [PATCH 16/22] net/hns3: add start stop configure promiscuous ops Wei Hu (Xavier)
2019-08-30 15:14   ` Ferruh Yigit
2019-09-06 11:51     ` Wei Hu (Xavier)
2019-08-23 13:47 ` [dpdk-dev] [PATCH 17/22] net/hns3: add dump register ops for hns3 PMD driver Wei Hu (Xavier)
2019-08-23 13:47 ` [dpdk-dev] [PATCH 18/22] net/hns3: add abnormal interrupt process " Wei Hu (Xavier)
2019-08-23 13:47 ` [dpdk-dev] [PATCH 19/22] net/hns3: add stats related ops " Wei Hu (Xavier)
2019-08-30 15:20   ` Ferruh Yigit
2019-08-31  8:49     ` Wei Hu (Xavier)
2019-08-23 13:47 ` Wei Hu (Xavier) [this message]
2019-08-23 13:47 ` [dpdk-dev] [PATCH 21/22] net/hns3: add multiple process support " Wei Hu (Xavier)
2019-08-30 15:14   ` Ferruh Yigit
2019-09-02 13:41     ` Wei Hu (Xavier)
2019-08-23 13:47 ` [dpdk-dev] [PATCH 22/22] net/hns3: add hns3 build files Wei Hu (Xavier)
2019-08-23 14:08   ` Jerin Jacob Kollanukkaran
2019-08-30  3:22     ` Wei Hu (Xavier)
2019-08-31  2:10       ` Wei Hu (Xavier)
2019-08-30 14:57     ` Ferruh Yigit
2019-08-30  6:16   ` Stephen Hemminger
2019-08-31  8:46     ` Wei Hu (Xavier)
2019-08-30  6:17   ` Stephen Hemminger
2019-08-31  8:44     ` Wei Hu (Xavier)
2019-09-03 15:27     ` Ye Xiaolong
2019-09-11 11:36       ` Wei Hu (Xavier)
2019-08-30 14:58   ` Ferruh Yigit
2019-09-10 11:43     ` Wei Hu (Xavier)
2019-08-30 15:00   ` Ferruh Yigit
2019-08-31  8:07     ` Wei Hu (Xavier)
2019-08-30 15:12   ` Ferruh Yigit
2019-08-31  8:07     ` Wei Hu (Xavier)
2019-08-30 15:23 ` [dpdk-dev] [PATCH 00/22] add hns3 ethernet PMD driver Ferruh Yigit
2019-08-31  8:06   ` Wei Hu (Xavier)

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1566568031-45991-21-git-send-email-xavier.huwei@huawei.com \
    --to=xavier.huwei@huawei.com \
    --cc=dev@dpdk.org \
    --cc=forest.zhouchang@huawei.com \
    --cc=linuxarm@huawei.com \
    --cc=liudongdong3@huawei.com \
    --cc=xavier_huwei@163.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).